diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md
index 1fdf99045488..994f506e0f09 100644
--- a/.github/ISSUE_TEMPLATE/feature-request.md
+++ b/.github/ISSUE_TEMPLATE/feature-request.md
@@ -13,7 +13,7 @@ assignees: ''
## Motivation
-
## Pitch
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index c489a753aa95..c1b3d5d514c3 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -10,3 +10,14 @@ updates:
- glenn-jocher
labels:
- dependencies
+
+ - package-ecosystem: github-actions
+ directory: "/"
+ schedule:
+ interval: weekly
+ time: "04:00"
+ open-pull-requests-limit: 5
+ reviewers:
+ - glenn-jocher
+ labels:
+ - dependencies
diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml
index 1b29173b83ad..8ebfdeca8d74 100644
--- a/.github/workflows/ci-testing.yml
+++ b/.github/workflows/ci-testing.yml
@@ -39,7 +39,7 @@ jobs:
python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)"
- name: Cache pip
- uses: actions/cache@v1
+ uses: actions/cache@v2.1.6
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }}
@@ -83,7 +83,7 @@ jobs:
# Python
python - <> $GITHUB_ENV
+ - uses: actions/cache@v2
+ with:
+ path: ~/.cache/pre-commit
+ key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }}
+
+ - uses: pre-commit/action@v2.0.3
+ # this action also provides an additional behaviour when used in private repositories
+ # when configured with a github token, the action will push back fixes to the pull request branch
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 2305ea07e902..67f51f0e8bce 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -1,4 +1,4 @@
-# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
+# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
# https://github.com/github/codeql-action
name: "CodeQL"
diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml
index 0bbc49ba2508..0daf9514d3c5 100644
--- a/.github/workflows/greetings.yml
+++ b/.github/workflows/greetings.yml
@@ -13,7 +13,7 @@ jobs:
repo-token: ${{ secrets.GITHUB_TOKEN }}
pr-message: |
👋 Hello @${{ github.actor }}, thank you for submitting a 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to:
- - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch:
+ - ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch:
```bash
git remote add upstream https://github.com/ultralytics/yolov5.git
git fetch upstream
@@ -57,4 +57,3 @@ jobs:
If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.
-
diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml
index e86c57744b84..a4db1efb2971 100644
--- a/.github/workflows/rebase.yml
+++ b/.github/workflows/rebase.yml
@@ -1,10 +1,9 @@
-name: Automatic Rebase
# https://github.com/marketplace/actions/automatic-rebase
+name: Automatic Rebase
on:
issue_comment:
types: [created]
-
jobs:
rebase:
name: Rebase
@@ -14,8 +13,9 @@ jobs:
- name: Checkout the latest code
uses: actions/checkout@v2
with:
- fetch-depth: 0
+ token: ${{ secrets.ACTIONS_TOKEN }}
+ fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
- name: Automatic Rebase
- uses: cirrus-actions/rebase@1.3.1
+ uses: cirrus-actions/rebase@1.5
env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GITHUB_TOKEN: ${{ secrets.ACTIONS_TOKEN }}
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 34ff0c94730a..b046dc949d1c 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -9,7 +9,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- - uses: actions/stale@v3
+ - uses: actions/stale@v4
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: |
diff --git a/.gitignore b/.gitignore
index 375b71807588..5f8cab550021 100755
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,7 @@
*.data
*.json
*.cfg
+!setup.cfg
!cfg/yolov3*.cfg
storage.googleapis.com
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 000000000000..2eb78aa17ef4
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,67 @@
+# Define hooks for code formations
+# Will be applied on any updated commit files if a user has installed and linked commit hook
+
+default_language_version:
+ python: python3.8
+
+# Define bot property if installed via https://github.com/marketplace/pre-commit-ci
+ci:
+ autofix_prs: true
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
+ autoupdate_schedule: quarterly
+ # submodules: true
+
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.0.1
+ hooks:
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
+ - id: check-case-conflict
+ - id: check-yaml
+ - id: check-toml
+ - id: pretty-format-json
+ - id: check-docstring-first
+
+ - repo: https://github.com/asottile/pyupgrade
+ rev: v2.23.1
+ hooks:
+ - id: pyupgrade
+ args: [--py36-plus]
+ name: Upgrade code
+
+ # TODO
+ #- repo: https://github.com/PyCQA/isort
+ # rev: 5.9.3
+ # hooks:
+ # - id: isort
+ # name: imports
+
+ # TODO
+ #- repo: https://github.com/pre-commit/mirrors-yapf
+ # rev: v0.31.0
+ # hooks:
+ # - id: yapf
+ # name: formatting
+
+ # TODO
+ #- repo: https://github.com/executablebooks/mdformat
+ # rev: 0.7.7
+ # hooks:
+ # - id: mdformat
+ # additional_dependencies:
+ # - mdformat-gfm
+ # - mdformat-black
+ # - mdformat_frontmatter
+
+ # TODO
+ #- repo: https://github.com/asottile/yesqa
+ # rev: v1.2.3
+ # hooks:
+ # - id: yesqa
+
+ - repo: https://github.com/PyCQA/flake8
+ rev: 3.9.2
+ hooks:
+ - id: flake8
+ name: PEP8
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 38601775caeb..1b46b5968e41 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -41,22 +41,22 @@ changes** button. All done, your PR is now submitted to YOLOv5 for review and ap
To allow your work to be integrated as seamlessly as possible, we advise you to:
-- ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an
+- ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an
automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may
be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature'
with the name of your local branch:
-```bash
-git remote add upstream https://github.com/ultralytics/yolov5.git
-git fetch upstream
-git checkout feature # <----- replace 'feature' with local branch name
-git merge upstream/master
-git push -u origin -f
-```
+ ```bash
+ git remote add upstream https://github.com/ultralytics/yolov5.git
+ git fetch upstream
+ git checkout feature # <----- replace 'feature' with local branch name
+ git merge upstream/master
+ git push -u origin -f
+ ```
- ✅ Verify all Continuous Integration (CI) **checks are passing**.
- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
- but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee
+ but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
## Submitting a Bug Report 🐛
diff --git a/LICENSE b/LICENSE
index 9e419e042146..92b370f0e0e1 100644
--- a/LICENSE
+++ b/LICENSE
@@ -671,4 +671,4 @@ into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
-.
\ No newline at end of file
+.
diff --git a/README.md b/README.md
index 0d474cb4a09b..d3fd7e9a92f5 100644
--- a/README.md
+++ b/README.md
@@ -46,7 +46,7 @@ YOLOv5 🚀 is a family of object detection architectures and models pretrained
open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
-
@@ -109,7 +109,7 @@ the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and
```bash
$ python detect.py --source 0 # webcam
- file.jpg # image
+ file.jpg # image
file.mp4 # video
path/ # directory
path/*.jpg # glob
@@ -136,7 +136,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
-
+
Tutorials
@@ -178,7 +178,7 @@ Get started in seconds with our verified environments. Click each icon below for
-
+
## Integrations
@@ -239,7 +239,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi
|[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |16.8 |12.6
|[YOLOv5m6][assets] |1280 |51.0 |69.0 |887 |11.1 |6.8 |35.7 |50.0
|[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.8 |111.4
-|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
-
+|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
-
Table Notes (click to expand)
diff --git a/data/Objects365.yaml b/data/Objects365.yaml
index 5c0a732253e3..b10c28e764c1 100644
--- a/data/Objects365.yaml
+++ b/data/Objects365.yaml
@@ -62,21 +62,21 @@ names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gla
download: |
from pycocotools.coco import COCO
from tqdm import tqdm
-
- from utils.general import download, Path
-
+
+ from utils.general import Path, download, np, xyxy2xywhn
+
# Make Directories
dir = Path(yaml['path']) # dataset root dir
for p in 'images', 'labels':
(dir / p).mkdir(parents=True, exist_ok=True)
for q in 'train', 'val':
(dir / p / q).mkdir(parents=True, exist_ok=True)
-
+
# Train, Val Splits
for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
print(f"Processing {split} in {patches} patches ...")
images, labels = dir / 'images' / split, dir / 'labels' / split
-
+
# Download
url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
if split == 'train':
@@ -86,11 +86,11 @@ download: |
download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json
download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
-
+
# Move
for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
f.rename(images / f.name) # move to /images/{split}
-
+
# Labels
coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
@@ -105,7 +105,8 @@ download: |
annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
for a in coco.loadAnns(annIds):
x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
- x, y = x + w / 2, y + h / 2 # xy to center
- file.write(f"{cid} {x / width:.5f} {y / height:.5f} {w / width:.5f} {h / height:.5f}\n")
+ xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
+ x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped
+ file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
except Exception as e:
print(e)
diff --git a/data/coco128.yaml b/data/coco128.yaml
index 70cf52c397af..b1dfb004afa1 100644
--- a/data/coco128.yaml
+++ b/data/coco128.yaml
@@ -27,4 +27,4 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't
# Download script/URL (optional)
-download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
\ No newline at end of file
+download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml
index 519c82687e09..5a586cc63fae 100644
--- a/data/hyps/hyp.scratch-high.yaml
+++ b/data/hyps/hyp.scratch-high.yaml
@@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability)
fliplr: 0.5 # image flip left-right (probability)
mosaic: 1.0 # image mosaic (probability)
mixup: 0.1 # image mixup (probability)
-copy_paste: 0.1 # segment copy-paste (probability)
\ No newline at end of file
+copy_paste: 0.1 # segment copy-paste (probability)
diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml
index b093a95ac53b..b9ef1d55a3b6 100644
--- a/data/hyps/hyp.scratch-low.yaml
+++ b/data/hyps/hyp.scratch-low.yaml
@@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability)
fliplr: 0.5 # image flip left-right (probability)
mosaic: 1.0 # image mosaic (probability)
mixup: 0.0 # image mixup (probability)
-copy_paste: 0.0 # segment copy-paste (probability)
\ No newline at end of file
+copy_paste: 0.0 # segment copy-paste (probability)
diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh
index b4b0ccd7857e..e9fa65394178 100755
--- a/data/scripts/download_weights.sh
+++ b/data/scripts/download_weights.sh
@@ -11,7 +11,10 @@
python - <=4.5.4',))
+ check_requirements(('opencv-python>=4.5.4',))
net = cv2.dnn.readNetFromONNX(w)
else:
check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime'))
@@ -139,7 +139,7 @@ def wrap_frozen_graph(gd, inputs, outputs):
else:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
- img = img / 255.0 # 0 - 255 to 0.0 - 1.0
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
if len(img.shape) == 3:
img = img[None] # expand for batch dim
t2 = time_sync()
diff --git a/models/common.py b/models/common.py
index f7f35c0f9f5c..d0fb0e8596ed 100644
--- a/models/common.py
+++ b/models/common.py
@@ -79,7 +79,7 @@ def __init__(self, c1, c2, num_heads, num_layers):
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
- self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
+ self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
self.c2 = c2
def forward(self, x):
@@ -114,7 +114,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
@@ -130,7 +130,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
@@ -158,7 +158,7 @@ class C3Ghost(C3):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)])
+ self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
class SPP(nn.Module):
@@ -277,7 +277,7 @@ class AutoShape(nn.Module):
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
- classes = None # (optional list) filter by class
+ classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
multi_label = False # NMS multiple labels per box
max_det = 1000 # maximum number of detections per image
@@ -362,7 +362,7 @@ class Detections:
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super().__init__()
d = pred[0].device # device
- gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
+ gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
diff --git a/models/experimental.py b/models/experimental.py
index edccc9632fb5..adb86c81fc06 100644
--- a/models/experimental.py
+++ b/models/experimental.py
@@ -97,7 +97,6 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True):
else:
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse
-
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml
index 119aebb1523a..2f2c82c70122 100644
--- a/models/hub/yolov5-bifpn.yaml
+++ b/models/hub/yolov5-bifpn.yaml
@@ -18,7 +18,7 @@ backbone:
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
- [-1, 9, C3, [512]]
+ [-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, C3, [1024, False]], # 9
diff --git a/models/tf.py b/models/tf.py
index 1c6da43adaac..5599ff5cce91 100644
--- a/models/tf.py
+++ b/models/tf.py
@@ -40,7 +40,7 @@
class TFBN(keras.layers.Layer):
# TensorFlow BatchNormalization wrapper
def __init__(self, w=None):
- super(TFBN, self).__init__()
+ super().__init__()
self.bn = keras.layers.BatchNormalization(
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
@@ -54,7 +54,7 @@ def call(self, inputs):
class TFPad(keras.layers.Layer):
def __init__(self, pad):
- super(TFPad, self).__init__()
+ super().__init__()
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
def call(self, inputs):
@@ -65,7 +65,7 @@ class TFConv(keras.layers.Layer):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
# ch_in, ch_out, weights, kernel, stride, padding, groups
- super(TFConv, self).__init__()
+ super().__init__()
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
assert isinstance(k, int), "Convolution with multiple kernels are not allowed."
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
@@ -96,7 +96,7 @@ class TFFocus(keras.layers.Layer):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
# ch_in, ch_out, kernel, stride, padding, groups
- super(TFFocus, self).__init__()
+ super().__init__()
self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
@@ -110,7 +110,7 @@ def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
class TFBottleneck(keras.layers.Layer):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
- super(TFBottleneck, self).__init__()
+ super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
@@ -123,7 +123,7 @@ def call(self, inputs):
class TFConv2d(keras.layers.Layer):
# Substitution for PyTorch nn.Conv2D
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
- super(TFConv2d, self).__init__()
+ super().__init__()
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
self.conv = keras.layers.Conv2D(
c2, k, s, 'VALID', use_bias=bias,
@@ -138,7 +138,7 @@ class TFBottleneckCSP(keras.layers.Layer):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
# ch_in, ch_out, number, shortcut, groups, expansion
- super(TFBottleneckCSP, self).__init__()
+ super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
@@ -158,7 +158,7 @@ class TFC3(keras.layers.Layer):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
# ch_in, ch_out, number, shortcut, groups, expansion
- super(TFC3, self).__init__()
+ super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
@@ -172,7 +172,7 @@ def call(self, inputs):
class TFSPP(keras.layers.Layer):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
- super(TFSPP, self).__init__()
+ super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
@@ -186,7 +186,7 @@ def call(self, inputs):
class TFSPPF(keras.layers.Layer):
# Spatial pyramid pooling-Fast layer
def __init__(self, c1, c2, k=5, w=None):
- super(TFSPPF, self).__init__()
+ super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
@@ -201,7 +201,7 @@ def call(self, inputs):
class TFDetect(keras.layers.Layer):
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
- super(TFDetect, self).__init__()
+ super().__init__()
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
@@ -249,7 +249,7 @@ def _make_grid(nx=20, ny=20):
class TFUpsample(keras.layers.Layer):
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
- super(TFUpsample, self).__init__()
+ super().__init__()
assert scale_factor == 2, "scale_factor must be 2"
self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode)
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
@@ -263,7 +263,7 @@ def call(self, inputs):
class TFConcat(keras.layers.Layer):
def __init__(self, dimension=1, w=None):
- super(TFConcat, self).__init__()
+ super().__init__()
assert dimension == 1, "convert only NCHW to NHWC concat"
self.d = 3
@@ -272,7 +272,7 @@ def call(self, inputs):
def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
- LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
@@ -299,7 +299,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
- c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
+ c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
elif m is Detect:
args.append([ch[x + 1] for x in f])
if isinstance(args[1], int): # number of anchors
@@ -312,11 +312,11 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
else tf_m(*args, w=model.model[i]) # module
- torch_m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
+ torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
- np = sum([x.numel() for x in torch_m_.parameters()]) # number params
+ np = sum(x.numel() for x in torch_m_.parameters()) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
- LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
+ LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
ch.append(c2)
@@ -325,7 +325,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
class TFModel:
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
- super(TFModel, self).__init__()
+ super().__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
@@ -336,7 +336,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 64
# Define model
if nc and nc != self.yaml['nc']:
- print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc))
+ print(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
diff --git a/models/yolo.py b/models/yolo.py
index 497a0e9c24e6..0fa2db91e82b 100644
--- a/models/yolo.py
+++ b/models/yolo.py
@@ -247,7 +247,7 @@ def _apply(self, fn):
def parse_model(d, ch): # model_dict, input_channels(3)
- LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
@@ -275,7 +275,7 @@ def parse_model(d, ch): # model_dict, input_channels(3)
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
- c2 = sum([ch[x] for x in f])
+ c2 = sum(ch[x] for x in f)
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
@@ -287,11 +287,11 @@ def parse_model(d, ch): # model_dict, input_channels(3)
else:
c2 = ch[f]
- m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
+ m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
- np = sum([x.numel() for x in m_.parameters()]) # number params
+ np = sum(x.numel() for x in m_.parameters()) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
- LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n_, np, t, args)) # print
+ LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 000000000000..7d25200cdb33
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,45 @@
+# Project-wide configuration file, can be used for package metadata and other toll configurations
+# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
+
+[metadata]
+license_file = LICENSE
+description-file = README.md
+
+
+[tool:pytest]
+norecursedirs =
+ .git
+ dist
+ build
+addopts =
+ --doctest-modules
+ --durations=25
+ --color=yes
+
+
+[flake8]
+max-line-length = 120
+exclude = .tox,*.egg,build,temp
+select = E,W,F
+doctests = True
+verbose = 2
+# https://pep8.readthedocs.io/en/latest/intro.html#error-codes
+format = pylint
+# see: https://www.flake8rules.com/
+ignore =
+ E731 # Do not assign a lambda expression, use a def
+ F405
+ E402
+ F841
+ E741
+ F821
+ E722
+ F401
+ W504
+ E127
+ W504
+ E231
+ E501
+ F403
+ E302
+ F541
diff --git a/train.py b/train.py
index 29ae43e3bd37..292f2da965f0 100644
--- a/train.py
+++ b/train.py
@@ -36,6 +36,7 @@
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
+from utils.autobatch import check_train_batch_size
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \
@@ -131,6 +132,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
print(f'freezing {k}')
v.requires_grad = False
+ # Image size
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
+ imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
+
+ # Batch size
+ if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
+ batch_size = check_train_batch_size(model, imgsz)
+
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
@@ -190,11 +199,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
del ckpt, csd
- # Image sizes
- gs = max(int(model.stride.max()), 32) # grid size (max stride)
- nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
- imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
-
# DP mode
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n'
@@ -242,6 +246,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
# Model parameters
+ nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
@@ -423,8 +428,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
plots=True,
callbacks=callbacks,
compute_loss=compute_loss) # val best model with plots
+ if is_coco:
+ callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
- callbacks.run('on_train_end', last, best, plots, epoch)
+ callbacks.run('on_train_end', last, best, plots, epoch, results)
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
torch.cuda.empty_cache()
@@ -438,7 +445,7 @@ def parse_opt(known=False):
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
- parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
diff --git a/tutorial.ipynb b/tutorial.ipynb
index 421ddbeaa15f..115d767a70bf 100644
--- a/tutorial.ipynb
+++ b/tutorial.ipynb
@@ -505,7 +505,7 @@
"id": "eyTZYGgRjnMc"
},
"source": [
- "## COCO val2017\n",
+ "## COCO val\n",
"Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy."
]
},
@@ -533,8 +533,8 @@
"outputId": "7e6f5c96-c819-43e1-cd03-d3b9878cf8de"
},
"source": [
- "# Download COCO val2017\n",
- "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
+ "# Download COCO val\n",
+ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n",
"!unzip -q tmp.zip -d ../datasets && rm tmp.zip"
],
"execution_count": null,
@@ -567,7 +567,7 @@
"outputId": "3dd0e2fc-aecf-4108-91b1-6392da1863cb"
},
"source": [
- "# Run YOLOv5x on COCO val2017\n",
+ "# Run YOLOv5x on COCO val\n",
"!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half"
],
"execution_count": null,
@@ -627,7 +627,7 @@
"id": "rc_KbFk0juX2"
},
"source": [
- "## COCO test-dev2017\n",
+ "## COCO test\n",
"Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794."
]
},
@@ -638,10 +638,9 @@
},
"source": [
"# Download COCO test-dev2017\n",
- "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n",
- "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
- "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n",
- "%mv ./test2017 ../coco/images # move to /coco"
+ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017labels.zip', 'tmp.zip')\n",
+ "!unzip -q tmp.zip -d ../datasets && rm tmp.zip\n",
+ "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f -d ../datasets/coco/images"
],
"execution_count": null,
"outputs": []
@@ -652,8 +651,8 @@
"id": "29GJXAP_lPrt"
},
"source": [
- "# Run YOLOv5s on COCO test-dev2017 using --task test\n",
- "!python val.py --weights yolov5s.pt --data coco.yaml --task test"
+ "# Run YOLOv5x on COCO test\n",
+ "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test"
],
"execution_count": null,
"outputs": []
@@ -1015,4 +1014,4 @@
"outputs": []
}
]
-}
\ No newline at end of file
+}
diff --git a/utils/autobatch.py b/utils/autobatch.py
new file mode 100644
index 000000000000..168b16f691ab
--- /dev/null
+++ b/utils/autobatch.py
@@ -0,0 +1,56 @@
+# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
+"""
+Auto-batch utils
+"""
+
+from copy import deepcopy
+
+import numpy as np
+import torch
+from torch.cuda import amp
+
+from utils.general import colorstr
+from utils.torch_utils import profile
+
+
+def check_train_batch_size(model, imgsz=640):
+ # Check YOLOv5 training batch size
+ with amp.autocast():
+ return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
+
+
+def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
+ # Automatically estimate best batch size to use `fraction` of available CUDA memory
+ # Usage:
+ # import torch
+ # from utils.autobatch import autobatch
+ # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
+ # print(autobatch(model))
+
+ prefix = colorstr('autobatch: ')
+ print(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
+ device = next(model.parameters()).device # get model device
+ if device.type == 'cpu':
+ print(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
+ return batch_size
+
+ d = str(device).upper() # 'CUDA:0'
+ t = torch.cuda.get_device_properties(device).total_memory / 1024 ** 3 # (GB)
+ r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GB)
+ a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GB)
+ f = t - (r + a) # free inside reserved
+ print(f'{prefix}{d} {t:.3g}G total, {r:.3g}G reserved, {a:.3g}G allocated, {f:.3g}G free')
+
+ batch_sizes = [1, 2, 4, 8, 16]
+ try:
+ img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes]
+ y = profile(img, model, n=3, device=device)
+ except Exception as e:
+ print(f'{prefix}{e}')
+
+ y = [x[2] for x in y if x] # memory [2]
+ batch_sizes = batch_sizes[:len(y)]
+ p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit
+ b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
+ print(f'{prefix}Using colorstr(batch-size {b}) for {d} {t * fraction:.3g}G/{t:.3g}G ({fraction * 100:.0f}%)')
+ return b
diff --git a/utils/datasets.py b/utils/datasets.py
index 091d65336fb1..fce005bd597c 100755
--- a/utils/datasets.py
+++ b/utils/datasets.py
@@ -22,7 +22,7 @@
import torch
import torch.nn.functional as F
import yaml
-from PIL import Image, ExifTags
+from PIL import Image, ImageOps, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
@@ -69,7 +69,7 @@ def exif_size(img):
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
- From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
+ Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
@@ -140,7 +140,7 @@ def __iter__(self):
yield next(self.iterator)
-class _RepeatSampler(object):
+class _RepeatSampler:
""" Sampler that repeats forever
Args:
@@ -287,7 +287,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.stride = stride
if os.path.isfile(sources):
- with open(sources, 'r') as f:
+ with open(sources) as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
@@ -396,17 +396,17 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
- # f = list(p.rglob('**/*.*')) # pathlib
+ # f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
- with open(p, 'r') as t:
+ with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
- self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
- # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
+ self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
+ # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
@@ -681,7 +681,7 @@ def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
- yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
+ yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
@@ -767,7 +767,7 @@ def load_mosaic9(self, index):
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
- x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
+ x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
@@ -782,7 +782,7 @@ def load_mosaic9(self, index):
hp, wp = h, w # height, width previous
# Offset
- yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
+ yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
@@ -838,7 +838,7 @@ def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *;
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
- with open(lb_file, 'r') as f:
+ with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
@@ -866,7 +866,7 @@ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annota
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
- files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only
+ files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
@@ -896,13 +896,13 @@ def verify_image_label(args):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
- Image.open(im_file).save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image
+ ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
- with open(lb_file, 'r') as f:
+ with open(lb_file) as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
@@ -944,7 +944,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
- return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]
+ return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
@@ -1019,7 +1019,7 @@ def hub_ops(f, max_dim=1920):
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
- with open(file, 'r') as f:
+ with open(file) as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
diff --git a/utils/general.py b/utils/general.py
index 02bc741ca3ba..f22908907fd0 100755
--- a/utils/general.py
+++ b/utils/general.py
@@ -136,7 +136,7 @@ def is_writeable(dir, test=False):
pass
file.unlink() # remove file
return True
- except IOError:
+ except OSError:
return False
else: # method 2
return os.access(dir, os.R_OK) # possible issues on Windows
@@ -355,7 +355,7 @@ def check_dataset(data, autodownload=True):
assert 'nc' in data, "Dataset 'nc' key missing."
if 'names' not in data:
data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
- train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')]
+ train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml
index ac29d104b144..5056b7c1186d 100644
--- a/utils/google_app_engine/app.yaml
+++ b/utils/google_app_engine/app.yaml
@@ -11,4 +11,4 @@ manual_scaling:
resources:
cpu: 1
memory_gb: 4
- disk_size_gb: 20
\ No newline at end of file
+ disk_size_gb: 20
diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py
index b698c3d2db45..ae2d98bdc36d 100644
--- a/utils/loggers/__init__.py
+++ b/utils/loggers/__init__.py
@@ -131,11 +131,11 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
- def on_train_end(self, last, best, plots, epoch):
+ def on_train_end(self, last, best, plots, epoch, results):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
- files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
+ files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md
index dd7dc1e46d45..d787fb7a5a0e 100644
--- a/utils/loggers/wandb/README.md
+++ b/utils/loggers/wandb/README.md
@@ -61,10 +61,10 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
Usage
Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data ..
-
+
![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)
-
+
2: Train and Log Evaluation simultaneousy
This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table
Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets,
@@ -72,31 +72,31 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
Usage
Code $ python utils/logger/wandb/log_dataset.py --data .. --upload_data
-
+
![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
-
+
3: Train using dataset artifact
- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that
+ When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that
can be used to train a model directly from the dataset artifact. This also logs evaluation
Usage
Code $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml
-
+
![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
-
+
4: Save model checkpoints as artifacts
- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval.
+ To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval.
You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged
Usage
Code $ python train.py --save_period 1
-
+
![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png)
-
+
5: Resume runs from checkpoint artifacts.
@@ -105,28 +105,28 @@ Any run can be resumed using artifacts if the --resume
argument sta
Usage
Code $ python train.py --resume wandb-artifact://{run_path}
-
+
![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
-
+
6: Resume runs from dataset artifact & checkpoint artifacts.
Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device
- The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset
or
+ The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset
or
train from _wandb.yaml
file and set --save_period
Usage
Code $ python train.py --resume wandb-artifact://{run_path}
-
+
![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
-
+
Reports
W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)).
-
+
diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml
index c3727de82d4a..c7790d75f6b2 100644
--- a/utils/loggers/wandb/sweep.yaml
+++ b/utils/loggers/wandb/sweep.yaml
@@ -1,17 +1,17 @@
# Hyperparameters for training
-# To set range-
+# To set range-
# Provide min and max values as:
# parameter:
-#
+#
# min: scalar
# max: scalar
# OR
#
# Set a specific list of search space-
-# parameter:
+# parameter:
# values: [scalar1, scalar2, scalar3...]
-#
-# You can use grid, bayesian and hyperopt search strategy
+#
+# You can use grid, bayesian and hyperopt search strategy
# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
program: utils/loggers/wandb/sweep.py
diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py
index 5c92ed947c7b..8546ec6c63cb 100644
--- a/utils/loggers/wandb/wandb_utils.py
+++ b/utils/loggers/wandb/wandb_utils.py
@@ -5,6 +5,7 @@
import sys
from contextlib import contextmanager
from pathlib import Path
+from typing import Dict
import pkg_resources as pkg
import yaml
@@ -25,7 +26,7 @@
assert hasattr(wandb, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
wandb = None
-
+
RANK = int(os.getenv('RANK', -1))
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
@@ -127,7 +128,7 @@ def __init__(self, opt, run_id=None, job_type='Training'):
arguments:
opt (namespace) -- Commandline arguments for this run
run_id (str) -- Run ID of W&B run to be resumed
- job_type (str) -- To set the job_type for this run
+ job_type (str) -- To set the job_type for this run
"""
# Pre-training routine --
@@ -142,7 +143,8 @@ def __init__(self, opt, run_id=None, job_type='Training'):
self.max_imgs_to_log = 16
self.wandb_artifact_data_dict = None
self.data_dict = None
- # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call
+ # It's more elegant to stick to 1 wandb.init call,
+ # but useful config data is overwritten in the WandbLogger's wandb.init call
if isinstance(opt.resume, str): # checks resume from artifact
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
@@ -212,7 +214,7 @@ def setup_training(self, opt):
Setup the necessary processes for training YOLO models:
- Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
- Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
- - Setup log_dict, initialize bbox_interval
+ - Setup log_dict, initialize bbox_interval
arguments:
opt (namespace) -- commandline arguments for this run
@@ -301,7 +303,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False):
path (Path) -- Path of directory containing the checkpoints
opt (namespace) -- Command line arguments for this run
epoch (int) -- Current epoch number
- fitness_score (float) -- fitness score for current epoch
+ fitness_score (float) -- fitness score for current epoch
best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
"""
model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
@@ -325,7 +327,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=
data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
single_class (boolean) -- train multi-class data as single-class
project (str) -- project name. Used to construct the artifact path
- overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
+ overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
file with _wandb postfix. Eg -> data_wandb.yaml
returns:
@@ -371,14 +373,14 @@ def map_val_table_path(self):
for i, data in enumerate(tqdm(self.val_table.data)):
self.val_table_path_map[data[3]] = data[0]
- def create_dataset_table(self, dataset, class_to_id, name='dataset'):
+ def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int,str], name: str = 'dataset'):
"""
Create and return W&B artifact containing W&B Table of the dataset.
arguments:
- dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
- class_to_id (dict(int, str)) -- hash map that maps class ids to labels
- name (str) -- name of the artifact
+ dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
+ class_to_id -- hash map that maps class ids to labels
+ name -- name of the artifact
returns:
dataset artifact to be logged or used
@@ -419,7 +421,7 @@ def log_training_progress(self, predn, path, names):
arguments:
predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
- path (str): local path of the current evaluation image
+ path (str): local path of the current evaluation image
names (dict(int, str)): hash map that maps class ids to labels
"""
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
@@ -430,10 +432,10 @@ def log_training_progress(self, predn, path, names):
box_data.append(
{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
- "box_caption": "%s %.3f" % (names[cls], conf),
+ "box_caption": f"{names[cls]} {conf:.3f}",
"scores": {"class_score": conf},
"domain": "pixel"})
- total_conf = total_conf + conf
+ total_conf += conf
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
id = self.val_table_path_map[Path(path).name]
self.result_table.add_data(self.current_epoch,
@@ -450,7 +452,7 @@ def val_one_image(self, pred, predn, path, names, im):
arguments:
pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
- path (str): local path of the current evaluation image
+ path (str): local path of the current evaluation image
"""
if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact
self.log_training_progress(predn, path, names)
@@ -459,7 +461,7 @@ def val_one_image(self, pred, predn, path, names, im):
if self.current_epoch % self.bbox_interval == 0:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
- "box_caption": "%s %.3f" % (names[cls], conf),
+ "box_caption": f"{names[cls]} {conf:.3f}",
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
@@ -486,7 +488,7 @@ def end_epoch(self, best_result=False):
if self.wandb_run:
with all_logging_disabled():
if self.bbox_media_panel_images:
- self.log_dict["Bounding Box Debugger/Images"] = self.bbox_media_panel_images
+ self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images
wandb.log(self.log_dict)
self.log_dict = {}
self.bbox_media_panel_images = []
diff --git a/utils/loss.py b/utils/loss.py
index fac432d0edc3..e8ce42ad994a 100644
--- a/utils/loss.py
+++ b/utils/loss.py
@@ -18,7 +18,7 @@ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#iss
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
- super(BCEBlurWithLogitsLoss, self).__init__()
+ super().__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
@@ -35,7 +35,7 @@ def forward(self, pred, true):
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
- super(FocalLoss, self).__init__()
+ super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
@@ -65,7 +65,7 @@ def forward(self, pred, true):
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
- super(QFocalLoss, self).__init__()
+ super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
diff --git a/utils/plots.py b/utils/plots.py
index 00b8f88811e2..00cda6d8d986 100644
--- a/utils/plots.py
+++ b/utils/plots.py
@@ -250,7 +250,7 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
- ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
+ ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig('targets.jpg', dpi=200)
@@ -363,7 +363,7 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
else:
a.remove()
except Exception as e:
- print('Warning: Plotting error for %s; %s' % (f, e))
+ print(f'Warning: Plotting error for {f}; {e}')
ax[1].legend()
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
@@ -384,10 +384,10 @@ def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *;
plt.subplot(6, 5, i + 1)
plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
plt.plot(mu, f.max(), 'k+', markersize=15)
- plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
+ plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
- print('%15s: %.3g' % (k, mu))
+ print(f'{k:>15}: {mu:.3g}')
f = evolve_csv.with_suffix('.png') # filename
plt.savefig(f, dpi=200)
plt.close()
diff --git a/utils/torch_utils.py b/utils/torch_utils.py
index 352ecf572c9f..e6d8ebd743bf 100644
--- a/utils/torch_utils.py
+++ b/utils/torch_utils.py
@@ -123,10 +123,10 @@ def profile(input, ops, n=10, device=None):
y = m(x)
t[1] = time_sync()
try:
- _ = (sum([yi.sum() for yi in y]) if isinstance(y, list) else y).sum().backward()
+ _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()
t[2] = time_sync()
except Exception as e: # no backward method
- print(e)
+ # print(e) # for debug
t[2] = float('nan')
tf += (t[1] - t[0]) * 1000 / n # ms per op forward
tb += (t[2] - t[1]) * 1000 / n # ms per op backward
@@ -223,7 +223,7 @@ def model_info(model, verbose=False, img_size=640):
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
- print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
+ print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}")
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
@@ -270,7 +270,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
- h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
+ h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
@@ -299,7 +299,10 @@ def __call__(self, epoch, fitness):
self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch
stop = delta >= self.patience # stop training if patience exceeded
if stop:
- LOGGER.info(f'EarlyStopping patience {self.patience} exceeded, stopping training.')
+ LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. '
+ f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n'
+ f'To update EarlyStopping(patience={self.patience}) pass a new patience value, '
+ f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.')
return stop
diff --git a/val.py b/val.py
index 2fc547322a0a..9a26b3b61163 100644
--- a/val.py
+++ b/val.py
@@ -276,13 +276,13 @@ def run(data,
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
- eval = COCOeval(anno, pred, 'bbox')
+ evaluation = COCOeval(anno, pred, 'bbox')
if is_coco:
- eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
- eval.evaluate()
- eval.accumulate()
- eval.summarize()
- map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
+ evaluation.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
+ evaluation.evaluate()
+ evaluation.accumulate()
+ evaluation.summarize()
+ map, map50 = evaluation.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')