From 5d0c8271ed337a1cee20cd5e0b8faf57c75eb1e3 Mon Sep 17 00:00:00 2001
From: DefectDojo release bot
Date: Mon, 1 Jul 2024 15:39:40 +0000
Subject: [PATCH 001/111] Update versions in application files
---
components/package.json | 2 +-
docs/content/en/getting_started/upgrading/2.37.md | 7 +++++++
dojo/__init__.py | 2 +-
helm/defectdojo/Chart.yaml | 4 ++--
4 files changed, 11 insertions(+), 4 deletions(-)
create mode 100644 docs/content/en/getting_started/upgrading/2.37.md
diff --git a/components/package.json b/components/package.json
index ae18f7927c..ab3201e6a4 100644
--- a/components/package.json
+++ b/components/package.json
@@ -1,6 +1,6 @@
{
"name": "defectdojo",
- "version": "2.36.0",
+ "version": "2.37.0-dev",
"license" : "BSD-3-Clause",
"private": true,
"dependencies": {
diff --git a/docs/content/en/getting_started/upgrading/2.37.md b/docs/content/en/getting_started/upgrading/2.37.md
new file mode 100644
index 0000000000..6445a74c76
--- /dev/null
+++ b/docs/content/en/getting_started/upgrading/2.37.md
@@ -0,0 +1,7 @@
+---
+title: 'Upgrading to DefectDojo Version 2.37.x'
+toc_hide: true
+weight: -20240701
+description: No special instructions.
+---
+There are no special instructions for upgrading to 2.37.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.37.0) for the contents of the release.
diff --git a/dojo/__init__.py b/dojo/__init__.py
index b6ae12017f..707177ee3e 100644
--- a/dojo/__init__.py
+++ b/dojo/__init__.py
@@ -4,6 +4,6 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app # noqa: F401
-__version__ = '2.36.0'
+__version__ = '2.37.0-dev'
__url__ = 'https://github.com/DefectDojo/django-DefectDojo'
__docs__ = 'https://documentation.defectdojo.com'
diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml
index 3ede21350c..ab21a0409c 100644
--- a/helm/defectdojo/Chart.yaml
+++ b/helm/defectdojo/Chart.yaml
@@ -1,8 +1,8 @@
apiVersion: v2
-appVersion: "2.36.0"
+appVersion: "2.37.0-dev"
description: A Helm chart for Kubernetes to install DefectDojo
name: defectdojo
-version: 1.6.137
+version: 1.6.138-dev
icon: https://www.defectdojo.org/img/favicon.ico
maintainers:
- name: madchap
From cb37e7a4afcbb432ccf4a3a8f3bc58e343fc413d Mon Sep 17 00:00:00 2001
From: DefectDojo
Date: Mon, 1 Jul 2024 15:46:55 +0000
Subject: [PATCH 002/111] Update helm lock file
Signed-off-by: DefectDojo
---
helm/defectdojo/Chart.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/helm/defectdojo/Chart.lock b/helm/defectdojo/Chart.lock
index 74468c83ba..83c44cb3c1 100644
--- a/helm/defectdojo/Chart.lock
+++ b/helm/defectdojo/Chart.lock
@@ -4,7 +4,7 @@ dependencies:
version: 9.19.1
- name: postgresql
repository: https://charts.bitnami.com/bitnami
- version: 15.5.9
+ version: 15.5.11
- name: postgresql-ha
repository: https://charts.bitnami.com/bitnami
version: 9.4.11
@@ -14,5 +14,5 @@ dependencies:
- name: redis
repository: https://charts.bitnami.com/bitnami
version: 19.5.5
-digest: sha256:7ad88ea953ebef3acbd1270eeae206e4e650f2fb20f754e0d912688795500b18
-generated: "2024-06-24T18:56:55.876075791Z"
+digest: sha256:19929ba605dd8c095aaa65be37fef1330044dace57397aa023a0b73764b32599
+generated: "2024-07-01T15:46:45.361963299Z"
From b43d8f06fc04801d3ca0b0a2c6ee40e70b7cd9dc Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 1 Jul 2024 12:03:58 -0500
Subject: [PATCH 003/111] Update Helm release redis from 19.5.5 to ~19.6.0
(helm/defectdojo/Chart.yaml) (#10461)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
helm/defectdojo/Chart.lock | 6 +++---
helm/defectdojo/Chart.yaml | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/helm/defectdojo/Chart.lock b/helm/defectdojo/Chart.lock
index 83c44cb3c1..9bd08b08b0 100644
--- a/helm/defectdojo/Chart.lock
+++ b/helm/defectdojo/Chart.lock
@@ -13,6 +13,6 @@ dependencies:
version: 14.4.4
- name: redis
repository: https://charts.bitnami.com/bitnami
- version: 19.5.5
-digest: sha256:19929ba605dd8c095aaa65be37fef1330044dace57397aa023a0b73764b32599
-generated: "2024-07-01T15:46:45.361963299Z"
+ version: 19.6.0
+digest: sha256:d00f56b5b3cf6525a4e06c82789ec7dd68526959ce38ea50e5251151535dcd8b
+generated: "2024-07-01T16:26:01.747085461Z"
diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml
index ab21a0409c..58e421ed49 100644
--- a/helm/defectdojo/Chart.yaml
+++ b/helm/defectdojo/Chart.yaml
@@ -27,6 +27,6 @@ dependencies:
repository: "https://charts.bitnami.com/bitnami"
condition: rabbitmq.enabled
- name: redis
- version: ~19.5.0
+ version: ~19.6.0
repository: "https://charts.bitnami.com/bitnami"
condition: redis.enabled
From 569c82654130f70051918982c240d8c95ef422ea Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 1 Jul 2024 16:07:21 -0500
Subject: [PATCH 004/111] Bump python-gitlab from 4.6.0 to 4.7.0 (#10469)
Bumps [python-gitlab](https://github.com/python-gitlab/python-gitlab) from 4.6.0 to 4.7.0.
- [Release notes](https://github.com/python-gitlab/python-gitlab/releases)
- [Changelog](https://github.com/python-gitlab/python-gitlab/blob/main/CHANGELOG.md)
- [Commits](https://github.com/python-gitlab/python-gitlab/compare/v4.6.0...v4.7.0)
---
updated-dependencies:
- dependency-name: python-gitlab
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 27eb0b2603..5fb1370338 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -52,7 +52,7 @@ social-auth-core==4.5.4
Python-jose==3.3.0
gitpython==3.1.43
debugpy==1.8.2
-python-gitlab==4.6.0
+python-gitlab==4.7.0
cpe==1.2.1
packageurl-python==0.15.1
django-crum==0.7.9
From be7051ab9b6ba7091193338ef61d58d402ba8457 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 1 Jul 2024 16:25:54 -0500
Subject: [PATCH 005/111] Update dependency ruff from 0.4.10 to v0.5.0
(requirements-lint.txt) (#10466)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
requirements-lint.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements-lint.txt b/requirements-lint.txt
index e022cdb619..0eceab328e 100644
--- a/requirements-lint.txt
+++ b/requirements-lint.txt
@@ -1 +1 @@
-ruff==0.4.10
\ No newline at end of file
+ruff==0.5.0
\ No newline at end of file
From dff84bdfead2269ee62f717931681c31be5f3b7f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 1 Jul 2024 17:00:46 -0500
Subject: [PATCH 006/111] Bump boto3 from 1.34.135 to 1.34.136 (#10480)
Bumps [boto3](https://github.com/boto/boto3) from 1.34.135 to 1.34.136.
- [Release notes](https://github.com/boto/boto3/releases)
- [Commits](https://github.com/boto/boto3/compare/1.34.135...1.34.136)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 5fb1370338..17efe04d4c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -75,7 +75,7 @@ django-ratelimit==4.1.0
argon2-cffi==23.1.0
blackduck==1.1.3
pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support
-boto3==1.34.135 # Required for Celery Broker AWS (SQS) support
+boto3==1.34.136 # Required for Celery Broker AWS (SQS) support
netaddr==1.3.0
vulners==2.1.7
fontawesomefree==6.5.1
From 039f3cb87d88fee817e330238b5740c28d43b83b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 1 Jul 2024 17:02:48 -0500
Subject: [PATCH 007/111] Bump django-test-migrations from 1.3.0 to 1.4.0
(#10481)
Bumps [django-test-migrations](https://github.com/wemake-services/django-test-migrations) from 1.3.0 to 1.4.0.
- [Release notes](https://github.com/wemake-services/django-test-migrations/releases)
- [Changelog](https://github.com/wemake-services/django-test-migrations/blob/master/CHANGELOG.md)
- [Commits](https://github.com/wemake-services/django-test-migrations/compare/1.3.0...1.4.0)
---
updated-dependencies:
- dependency-name: django-test-migrations
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 17efe04d4c..344f2a4e75 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -67,7 +67,7 @@ PyJWT==2.8.0
cvss==3.1
django-fieldsignals==0.7.0
hyperlink==21.0.0
-django-test-migrations==1.3.0
+django-test-migrations==1.4.0
djangosaml2==1.9.3
drf-spectacular==0.27.2
drf-spectacular-sidecar==2024.6.1
From ed1551adf527adb412cf908926a3d005167fe0ce Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 1 Jul 2024 17:03:34 -0500
Subject: [PATCH 008/111] Bump openpyxl from 3.1.4 to 3.1.5 (#10478)
Bumps [openpyxl](https://openpyxl.readthedocs.io) from 3.1.4 to 3.1.5.
---
updated-dependencies:
- dependency-name: openpyxl
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 344f2a4e75..b867d11e39 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -33,7 +33,7 @@ PyGithub==1.58.2
lxml==5.2.2
Markdown==3.6
mysqlclient==2.1.1
-openpyxl==3.1.4
+openpyxl==3.1.5
Pillow==10.3.0 # required by django-imagekit
psycopg2-binary==2.9.9
cryptography==42.0.8
From aae8b00badac520e4b7b7eb7deec21ee77f26297 Mon Sep 17 00:00:00 2001
From: kiblik <5609770+kiblik@users.noreply.github.com>
Date: Tue, 2 Jul 2024 04:23:43 +0200
Subject: [PATCH 009/111] Ruff: add and fix TID (#10113)
---
ruff.toml | 1 +
unittests/authorization/test_authorization.py | 3 +--
unittests/authorization/test_authorization_decorators.py | 3 +--
unittests/authorization/test_authorization_tags.py | 3 +--
unittests/tools/test_acunetix_parser.py | 3 +--
unittests/tools/test_anchore_engine_parser.py | 3 +--
unittests/tools/test_anchore_enterprise_parser.py | 3 +--
unittests/tools/test_anchore_grype_parser.py | 3 +--
unittests/tools/test_anchorectl_policies_parser.py | 3 +--
unittests/tools/test_anchorectl_vulns_parser.py | 3 +--
unittests/tools/test_api_blackduck_parser.py | 3 +--
unittests/tools/test_api_cobalt_importer.py | 3 +--
unittests/tools/test_api_cobalt_parser.py | 3 +--
unittests/tools/test_api_sonarqube_importer.py | 3 +--
unittests/tools/test_api_sonarqube_parser.py | 3 +--
unittests/tools/test_api_vulners_parser.py | 3 +--
unittests/tools/test_appspider_parser.py | 3 +--
unittests/tools/test_aqua_parser.py | 3 +--
unittests/tools/test_arachni_parser.py | 3 +--
unittests/tools/test_asff_parser.py | 3 +--
unittests/tools/test_auditjs_parser.py | 3 +--
unittests/tools/test_aws_prowler_parser.py | 3 +--
unittests/tools/test_aws_prowler_v3_parser.py | 3 +--
unittests/tools/test_awssecurityhub_parser.py | 3 +--
.../tools/test_azure_security_center_recommendations_parser.py | 3 +--
unittests/tools/test_bandit_parser.py | 3 +--
unittests/tools/test_blackduck_binary_analysis_parser.py | 3 +--
unittests/tools/test_blackduck_component_risk_parser.py | 3 +--
unittests/tools/test_blackduck_parser.py | 3 +--
unittests/tools/test_brakeman_parser.py | 3 +--
unittests/tools/test_bugcrowd_parser.py | 3 +--
unittests/tools/test_bundler_audit_parser.py | 3 +--
unittests/tools/test_burp_api_parser.py | 3 +--
unittests/tools/test_burp_dastardly_parser.py | 3 +--
unittests/tools/test_burp_enterprise_parser.py | 3 +--
unittests/tools/test_burp_graphql_parser.py | 3 +--
unittests/tools/test_burp_parser.py | 3 +--
unittests/tools/test_cargo_audit_parser.py | 3 +--
unittests/tools/test_checkmarx_one_parser.py | 3 +--
unittests/tools/test_checkmarx_osa_parser.py | 3 +--
unittests/tools/test_checkmarx_parser.py | 3 +--
unittests/tools/test_checkov_parser.py | 3 +--
unittests/tools/test_chefinspect_parser.py | 3 +--
unittests/tools/test_clair_parser.py | 3 +--
unittests/tools/test_cloudsploit_parser.py | 3 +--
unittests/tools/test_cobalt_parser.py | 3 +--
unittests/tools/test_codechecker_parser.py | 3 +--
unittests/tools/test_contrast_parser.py | 3 +--
unittests/tools/test_coverity_api_parser.py | 3 +--
unittests/tools/test_crashtest_security_parser.py | 3 +--
unittests/tools/test_cred_scan_parser.py | 3 +--
unittests/tools/test_crunch42_parser.py | 3 +--
unittests/tools/test_cyclonedx_parser.py | 3 +--
unittests/tools/test_dawnscanner_parser.py | 3 +--
unittests/tools/test_deepfence_threatmapper_parser.py | 3 +--
unittests/tools/test_dependency_check_parser.py | 3 +--
unittests/tools/test_dependency_track_parser.py | 3 +--
unittests/tools/test_detect_secrets_parser.py | 3 +--
unittests/tools/test_dockerbench_parser.py | 3 +--
unittests/tools/test_dockle_parser.py | 3 +--
unittests/tools/test_drheader_parser.py | 3 +--
unittests/tools/test_dsop_parser.py | 3 +--
unittests/tools/test_eslint_parser.py | 3 +--
unittests/tools/test_fortify_parser.py | 3 +--
unittests/tools/test_gcloud_artifact_scan_parser.py | 3 +--
unittests/tools/test_generic_parser.py | 3 +--
unittests/tools/test_ggshield_parser.py | 3 +--
unittests/tools/test_github_vulnerability_parser.py | 3 +--
unittests/tools/test_gitlab_api_fuzzing_parser.py | 3 +--
unittests/tools/test_gitlab_container_scan_parser.py | 3 +--
unittests/tools/test_gitlab_dast_parser.py | 3 +--
unittests/tools/test_gitlab_dep_scan_parser.py | 3 +--
unittests/tools/test_gitlab_sast_parser.py | 3 +--
unittests/tools/test_gitlab_secret_detection_report_parser.py | 3 +--
unittests/tools/test_gitleaks_parser.py | 3 +--
unittests/tools/test_gosec_parser.py | 3 +--
unittests/tools/test_h1_parser.py | 3 +--
unittests/tools/test_hadolint_parser.py | 3 +--
unittests/tools/test_harbor_vulnerability_parser.py | 3 +--
unittests/tools/test_hcl_appscan_parser.py | 3 +--
unittests/tools/test_horusec_parser.py | 3 +--
unittests/tools/test_huskyci_parser.py | 3 +--
unittests/tools/test_ibm_app_parser.py | 3 +--
unittests/tools/test_immuniweb_parser.py | 3 +--
unittests/tools/test_intsights_parser.py | 3 +--
unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py | 3 +--
.../tools/test_jfrog_xray_on_demand_binary_scan_parser.py | 3 +--
unittests/tools/test_jfrog_xray_unified_parser.py | 3 +--
unittests/tools/test_jfrogxray_parser.py | 3 +--
unittests/tools/test_kics_parser.py | 3 +--
unittests/tools/test_kiuwan_parser.py | 3 +--
unittests/tools/test_kubeaudit_parser.py | 3 +--
unittests/tools/test_kubebench_parser.py | 3 +--
unittests/tools/test_kubescape_parser.py | 3 +--
unittests/tools/test_mend_parser.py | 3 +--
unittests/tools/test_meterian_parser.py | 3 +--
unittests/tools/test_microfocus_webinspect_parser.py | 3 +--
unittests/tools/test_mobsf_parser.py | 3 +--
unittests/tools/test_mobsfscan_parser.py | 3 +--
unittests/tools/test_mozilla_observatory_parser.py | 3 +--
unittests/tools/test_ms_defender_parser.py | 3 +--
unittests/tools/test_nancy_parser.py | 3 +--
unittests/tools/test_netsparker_parser.py | 3 +--
unittests/tools/test_neuvector_compliance_parser.py | 3 +--
unittests/tools/test_neuvector_parser.py | 3 +--
unittests/tools/test_nexpose_parser.py | 3 +--
unittests/tools/test_nikto_parser.py | 3 +--
unittests/tools/test_nmap_parser.py | 3 +--
unittests/tools/test_npm_audit_7_plus_parser.py | 3 +--
unittests/tools/test_npm_audit_parser.py | 3 +--
unittests/tools/test_nsp_parser.py | 3 +--
unittests/tools/test_nuclei_parser.py | 3 +--
unittests/tools/test_openscap_parser.py | 3 +--
unittests/tools/test_openvas_parser.py | 3 +--
unittests/tools/test_ort_parser.py | 3 +--
unittests/tools/test_ossindex_devaudit_parser.py | 3 +--
unittests/tools/test_osv_scanner_parser.py | 3 +--
unittests/tools/test_outpost24_parser.py | 3 +--
unittests/tools/test_php_security_audit_v2_parser.py | 3 +--
unittests/tools/test_php_symfony_security_check_parser.py | 3 +--
unittests/tools/test_pip_audit_parser.py | 3 +--
unittests/tools/test_pmd_parser.py | 3 +--
unittests/tools/test_popeye_parser.py | 3 +--
unittests/tools/test_progpilot_parser.py | 3 +--
unittests/tools/test_pwn_sast_parser.py | 3 +--
unittests/tools/test_qualys_infrascan_webgui_parser.py | 3 +--
unittests/tools/test_qualys_parser.py | 3 +--
unittests/tools/test_qualys_webapp_parser.py | 3 +--
unittests/tools/test_redhatsatellite_parser.py | 3 +--
unittests/tools/test_retirejs_parser.py | 3 +--
unittests/tools/test_risk_recon_parser.py | 3 +--
unittests/tools/test_rubocop_parser.py | 3 +--
unittests/tools/test_rusty_hog_parser.py | 3 +--
unittests/tools/test_sarif_parser.py | 3 +--
unittests/tools/test_scantist_parser.py | 3 +--
unittests/tools/test_scout_suite_parser.py | 3 +--
unittests/tools/test_semgrep_parser.py | 3 +--
unittests/tools/test_skf_parser.py | 3 +--
unittests/tools/test_snyk_code_parser.py | 3 +--
unittests/tools/test_snyk_parser.py | 3 +--
unittests/tools/test_solar_appscreener_parser.py | 3 +--
unittests/tools/test_sonarqube_parser.py | 3 +--
unittests/tools/test_sonatype_parser.py | 3 +--
unittests/tools/test_spotbugs_parser.py | 3 +--
unittests/tools/test_ssh_audit_parser.py | 3 +--
unittests/tools/test_ssl_labs_parser.py | 3 +--
unittests/tools/test_sslscan_parser.py | 3 +--
unittests/tools/test_sslyze_parser.py | 3 +--
unittests/tools/test_talisman_parser.py | 3 +--
unittests/tools/test_tenable_parser.py | 3 +--
unittests/tools/test_terrascan_parser.py | 3 +--
unittests/tools/test_testssl_parser.py | 3 +--
unittests/tools/test_tfsec_parser.py | 3 +--
unittests/tools/test_trivy_operator_parser.py | 3 +--
unittests/tools/test_trivy_parser.py | 3 +--
unittests/tools/test_trufflehog3_parser.py | 3 +--
unittests/tools/test_trufflehog_parser.py | 3 +--
unittests/tools/test_trustwave_fusion_api_parser.py | 3 +--
unittests/tools/test_trustwave_parser.py | 3 +--
unittests/tools/test_twistlock_parser.py | 3 +--
unittests/tools/test_vcg_parser.py | 3 +--
unittests/tools/test_veracode_parser.py | 3 +--
unittests/tools/test_veracode_sca_parser.py | 3 +--
unittests/tools/test_wapiti_parser.py | 3 +--
unittests/tools/test_wazuh_parser.py | 3 +--
unittests/tools/test_wfuzz_parser.py | 3 +--
unittests/tools/test_whitehat_sentinel_parser.py | 3 +--
unittests/tools/test_wiz_parser.py | 3 +--
unittests/tools/test_wpscan_parser.py | 3 +--
unittests/tools/test_xanitizer_parser.py | 3 +--
unittests/tools/test_yarn_audit_parser.py | 3 +--
unittests/tools/test_zap_parser.py | 3 +--
172 files changed, 172 insertions(+), 342 deletions(-)
diff --git a/ruff.toml b/ruff.toml
index 1349d475e9..32ad177291 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -51,6 +51,7 @@ select = [
"INP",
"SLOT",
"RSE",
+ "TID",
"PD",
"PGH",
"TRY003",
diff --git a/unittests/authorization/test_authorization.py b/unittests/authorization/test_authorization.py
index 0240171152..726bfb544a 100644
--- a/unittests/authorization/test_authorization.py
+++ b/unittests/authorization/test_authorization.py
@@ -35,8 +35,7 @@
Stub_Finding,
Test,
)
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAuthorization(DojoTestCase):
diff --git a/unittests/authorization/test_authorization_decorators.py b/unittests/authorization/test_authorization_decorators.py
index ea5d76eee8..7e5b5d04a3 100644
--- a/unittests/authorization/test_authorization_decorators.py
+++ b/unittests/authorization/test_authorization_decorators.py
@@ -8,8 +8,7 @@
from dojo.authorization.authorization_decorators import user_is_authorized, user_is_configuration_authorized
from dojo.authorization.roles_permissions import Permissions
from dojo.models import Product_Type
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAuthorizationDecorators(DojoTestCase):
diff --git a/unittests/authorization/test_authorization_tags.py b/unittests/authorization/test_authorization_tags.py
index 6a31ca5666..8b49c25a18 100644
--- a/unittests/authorization/test_authorization_tags.py
+++ b/unittests/authorization/test_authorization_tags.py
@@ -10,8 +10,7 @@
has_object_permission,
user_has_configuration_permission_without_group,
)
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAuthorizationTags(DojoTestCase):
diff --git a/unittests/tools/test_acunetix_parser.py b/unittests/tools/test_acunetix_parser.py
index cd11c874fb..eabe5e2322 100644
--- a/unittests/tools/test_acunetix_parser.py
+++ b/unittests/tools/test_acunetix_parser.py
@@ -3,8 +3,7 @@
from dojo.models import Test
from dojo.tools.acunetix.parser import AcunetixParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAcunetixParser(DojoTestCase):
diff --git a/unittests/tools/test_anchore_engine_parser.py b/unittests/tools/test_anchore_engine_parser.py
index 29946312a8..ee4f2ae509 100644
--- a/unittests/tools/test_anchore_engine_parser.py
+++ b/unittests/tools/test_anchore_engine_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.anchore_engine.parser import AnchoreEngineParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAnchoreEngineParser(DojoTestCase):
diff --git a/unittests/tools/test_anchore_enterprise_parser.py b/unittests/tools/test_anchore_enterprise_parser.py
index 3ea2286a4c..1d286d8a84 100644
--- a/unittests/tools/test_anchore_enterprise_parser.py
+++ b/unittests/tools/test_anchore_enterprise_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.anchore_enterprise.parser import AnchoreEnterpriseParser, extract_vulnerability_id, search_filepath
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAnchoreEnterpriseParser(DojoTestCase):
diff --git a/unittests/tools/test_anchore_grype_parser.py b/unittests/tools/test_anchore_grype_parser.py
index 0b41180de1..f20b950913 100644
--- a/unittests/tools/test_anchore_grype_parser.py
+++ b/unittests/tools/test_anchore_grype_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Finding, Test
from dojo.tools.anchore_grype.parser import AnchoreGrypeParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAnchoreGrypeParser(DojoTestCase):
diff --git a/unittests/tools/test_anchorectl_policies_parser.py b/unittests/tools/test_anchorectl_policies_parser.py
index 87968ea792..0bb48d0521 100644
--- a/unittests/tools/test_anchorectl_policies_parser.py
+++ b/unittests/tools/test_anchorectl_policies_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.anchorectl_policies.parser import AnchoreCTLPoliciesParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAnchoreCTLPoliciesParser(DojoTestCase):
diff --git a/unittests/tools/test_anchorectl_vulns_parser.py b/unittests/tools/test_anchorectl_vulns_parser.py
index 40295ec027..384de7aba8 100644
--- a/unittests/tools/test_anchorectl_vulns_parser.py
+++ b/unittests/tools/test_anchorectl_vulns_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.anchorectl_vulns.parser import AnchoreCTLVulnsParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAnchoreCTLVulnsParser(DojoTestCase):
diff --git a/unittests/tools/test_api_blackduck_parser.py b/unittests/tools/test_api_blackduck_parser.py
index d97b301f13..a8922ca99b 100644
--- a/unittests/tools/test_api_blackduck_parser.py
+++ b/unittests/tools/test_api_blackduck_parser.py
@@ -1,8 +1,7 @@
from dojo.models import SEVERITIES, Test
from dojo.tools.api_blackduck.parser import ApiBlackduckParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestApiBlackduckParser(DojoTestCase):
diff --git a/unittests/tools/test_api_cobalt_importer.py b/unittests/tools/test_api_cobalt_importer.py
index 9eb78e373b..8361ac9b10 100644
--- a/unittests/tools/test_api_cobalt_importer.py
+++ b/unittests/tools/test_api_cobalt_importer.py
@@ -5,8 +5,7 @@
from dojo.models import Engagement, Product, Product_API_Scan_Configuration, Test, Tool_Configuration, Tool_Type
from dojo.tools.api_cobalt.importer import CobaltApiImporter
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestCobaltApiImporter(DojoTestCase):
diff --git a/unittests/tools/test_api_cobalt_parser.py b/unittests/tools/test_api_cobalt_parser.py
index 956bce388a..644dec0e5f 100644
--- a/unittests/tools/test_api_cobalt_parser.py
+++ b/unittests/tools/test_api_cobalt_parser.py
@@ -3,8 +3,7 @@
from dojo.models import Test, Test_Type
from dojo.tools.api_cobalt.parser import ApiCobaltParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestApiCobaltParser(DojoTestCase):
diff --git a/unittests/tools/test_api_sonarqube_importer.py b/unittests/tools/test_api_sonarqube_importer.py
index 44e29f011b..0b30008c1c 100644
--- a/unittests/tools/test_api_sonarqube_importer.py
+++ b/unittests/tools/test_api_sonarqube_importer.py
@@ -5,8 +5,7 @@
from dojo.models import Engagement, Product, Product_API_Scan_Configuration, Test
from dojo.tools.api_sonarqube.importer import SonarQubeApiImporter
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
def dummy_product(self, *args, **kwargs):
diff --git a/unittests/tools/test_api_sonarqube_parser.py b/unittests/tools/test_api_sonarqube_parser.py
index dd8de04c3b..ffb33b76ee 100644
--- a/unittests/tools/test_api_sonarqube_parser.py
+++ b/unittests/tools/test_api_sonarqube_parser.py
@@ -11,8 +11,7 @@
Tool_Type,
)
from dojo.tools.api_sonarqube.parser import ApiSonarQubeParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
def dummy_product(self, *args, **kwargs):
diff --git a/unittests/tools/test_api_vulners_parser.py b/unittests/tools/test_api_vulners_parser.py
index ea370389ba..bd2e4df212 100644
--- a/unittests/tools/test_api_vulners_parser.py
+++ b/unittests/tools/test_api_vulners_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.api_vulners.parser import ApiVulnersParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestApiVulnersParser(DojoTestCase):
diff --git a/unittests/tools/test_appspider_parser.py b/unittests/tools/test_appspider_parser.py
index 094ca3e01e..0d94c4b719 100644
--- a/unittests/tools/test_appspider_parser.py
+++ b/unittests/tools/test_appspider_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Engagement, Finding, Product, Test
from dojo.tools.appspider.parser import AppSpiderParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAppSpiderParser(DojoTestCase):
diff --git a/unittests/tools/test_aqua_parser.py b/unittests/tools/test_aqua_parser.py
index 89f16334fa..39191a7e3d 100644
--- a/unittests/tools/test_aqua_parser.py
+++ b/unittests/tools/test_aqua_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.aqua.parser import AquaParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAquaParser(DojoTestCase):
diff --git a/unittests/tools/test_arachni_parser.py b/unittests/tools/test_arachni_parser.py
index ca28de74b4..ca75a6ed43 100644
--- a/unittests/tools/test_arachni_parser.py
+++ b/unittests/tools/test_arachni_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.arachni.parser import ArachniParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestArachniParser(DojoTestCase):
diff --git a/unittests/tools/test_asff_parser.py b/unittests/tools/test_asff_parser.py
index 666e7bf8e7..68c409c166 100644
--- a/unittests/tools/test_asff_parser.py
+++ b/unittests/tools/test_asff_parser.py
@@ -4,8 +4,7 @@
from dojo.models import Endpoint, Test
from dojo.tools.asff.parser import AsffParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
def sample_path(file_name):
diff --git a/unittests/tools/test_auditjs_parser.py b/unittests/tools/test_auditjs_parser.py
index 629a6c9012..789efc73d0 100644
--- a/unittests/tools/test_auditjs_parser.py
+++ b/unittests/tools/test_auditjs_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.auditjs.parser import AuditJSParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAuditJSParser(DojoTestCase):
diff --git a/unittests/tools/test_aws_prowler_parser.py b/unittests/tools/test_aws_prowler_parser.py
index e4e1ee8392..2c33d706bd 100644
--- a/unittests/tools/test_aws_prowler_parser.py
+++ b/unittests/tools/test_aws_prowler_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.aws_prowler.parser import AWSProwlerParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAwsProwlerParser(DojoTestCase):
diff --git a/unittests/tools/test_aws_prowler_v3_parser.py b/unittests/tools/test_aws_prowler_v3_parser.py
index abbecc578e..c92ff3f922 100644
--- a/unittests/tools/test_aws_prowler_v3_parser.py
+++ b/unittests/tools/test_aws_prowler_v3_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.aws_prowler_v3.parser import AWSProwlerV3Parser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAwsProwlerV3Parser(DojoTestCase):
diff --git a/unittests/tools/test_awssecurityhub_parser.py b/unittests/tools/test_awssecurityhub_parser.py
index 3734f2e440..7993b065a5 100644
--- a/unittests/tools/test_awssecurityhub_parser.py
+++ b/unittests/tools/test_awssecurityhub_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.awssecurityhub.parser import AwsSecurityHubParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
def sample_path(file_name: str):
diff --git a/unittests/tools/test_azure_security_center_recommendations_parser.py b/unittests/tools/test_azure_security_center_recommendations_parser.py
index 5ce988dfed..e5c9823b14 100644
--- a/unittests/tools/test_azure_security_center_recommendations_parser.py
+++ b/unittests/tools/test_azure_security_center_recommendations_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.azure_security_center_recommendations.parser import AzureSecurityCenterRecommendationsParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestAzureSecurityCenterRecommendationsParser(DojoTestCase):
diff --git a/unittests/tools/test_bandit_parser.py b/unittests/tools/test_bandit_parser.py
index f52a318738..dc4a3f34c8 100644
--- a/unittests/tools/test_bandit_parser.py
+++ b/unittests/tools/test_bandit_parser.py
@@ -4,8 +4,7 @@
from dojo.models import Test
from dojo.tools.bandit.parser import BanditParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestBanditParser(DojoTestCase):
diff --git a/unittests/tools/test_blackduck_binary_analysis_parser.py b/unittests/tools/test_blackduck_binary_analysis_parser.py
index 89d237c872..29c4130294 100644
--- a/unittests/tools/test_blackduck_binary_analysis_parser.py
+++ b/unittests/tools/test_blackduck_binary_analysis_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.blackduck_binary_analysis.parser import BlackduckBinaryAnalysisParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestBlackduckBinaryAnalysisParser(DojoTestCase):
diff --git a/unittests/tools/test_blackduck_component_risk_parser.py b/unittests/tools/test_blackduck_component_risk_parser.py
index 7ad8db6230..2a520c33aa 100644
--- a/unittests/tools/test_blackduck_component_risk_parser.py
+++ b/unittests/tools/test_blackduck_component_risk_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.blackduck_component_risk.parser import BlackduckComponentRiskParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestBlackduckComponentRiskParser(DojoTestCase):
diff --git a/unittests/tools/test_blackduck_parser.py b/unittests/tools/test_blackduck_parser.py
index 1eaacd157a..96c0f2eac3 100644
--- a/unittests/tools/test_blackduck_parser.py
+++ b/unittests/tools/test_blackduck_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.blackduck.parser import BlackduckParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestBlackduckHubParser(DojoTestCase):
diff --git a/unittests/tools/test_brakeman_parser.py b/unittests/tools/test_brakeman_parser.py
index c8e3f195b6..0c8e4a43c8 100644
--- a/unittests/tools/test_brakeman_parser.py
+++ b/unittests/tools/test_brakeman_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.brakeman.parser import BrakemanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestBrakemanParser(DojoTestCase):
diff --git a/unittests/tools/test_bugcrowd_parser.py b/unittests/tools/test_bugcrowd_parser.py
index f21451e7c2..dc39110fb9 100644
--- a/unittests/tools/test_bugcrowd_parser.py
+++ b/unittests/tools/test_bugcrowd_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.bugcrowd.parser import BugCrowdParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestBugCrowdParser(DojoTestCase):
diff --git a/unittests/tools/test_bundler_audit_parser.py b/unittests/tools/test_bundler_audit_parser.py
index 612f48007e..b08834c38b 100644
--- a/unittests/tools/test_bundler_audit_parser.py
+++ b/unittests/tools/test_bundler_audit_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.bundler_audit.parser import BundlerAuditParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestBundlerAuditParser(DojoTestCase):
diff --git a/unittests/tools/test_burp_api_parser.py b/unittests/tools/test_burp_api_parser.py
index 3217261faa..bbd36e634a 100644
--- a/unittests/tools/test_burp_api_parser.py
+++ b/unittests/tools/test_burp_api_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.burp_api.parser import BurpApiParser, convert_confidence, convert_severity
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestParser(DojoTestCase):
diff --git a/unittests/tools/test_burp_dastardly_parser.py b/unittests/tools/test_burp_dastardly_parser.py
index d7e16b48ab..3c17bcb091 100644
--- a/unittests/tools/test_burp_dastardly_parser.py
+++ b/unittests/tools/test_burp_dastardly_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.burp_dastardly.parser import BurpDastardlyParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestBurpParser(DojoTestCase):
diff --git a/unittests/tools/test_burp_enterprise_parser.py b/unittests/tools/test_burp_enterprise_parser.py
index 0aeb7c0302..cbbdec18ac 100644
--- a/unittests/tools/test_burp_enterprise_parser.py
+++ b/unittests/tools/test_burp_enterprise_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.burp_enterprise.parser import BurpEnterpriseParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestBurpEnterpriseParser(DojoTestCase):
diff --git a/unittests/tools/test_burp_graphql_parser.py b/unittests/tools/test_burp_graphql_parser.py
index cbc1ca8d5a..1574d4f3e0 100644
--- a/unittests/tools/test_burp_graphql_parser.py
+++ b/unittests/tools/test_burp_graphql_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.burp_graphql.parser import BurpGraphQLParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestBurpGraphQLParser(DojoTestCase):
diff --git a/unittests/tools/test_burp_parser.py b/unittests/tools/test_burp_parser.py
index f29aea11ad..12aad2405a 100644
--- a/unittests/tools/test_burp_parser.py
+++ b/unittests/tools/test_burp_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.burp.parser import BurpParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestBurpParser(DojoTestCase):
diff --git a/unittests/tools/test_cargo_audit_parser.py b/unittests/tools/test_cargo_audit_parser.py
index 5a1bb6aceb..c1f3c62291 100644
--- a/unittests/tools/test_cargo_audit_parser.py
+++ b/unittests/tools/test_cargo_audit_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.cargo_audit.parser import CargoAuditParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestCargoAuditParser(DojoTestCase):
diff --git a/unittests/tools/test_checkmarx_one_parser.py b/unittests/tools/test_checkmarx_one_parser.py
index 74a112a9ec..bc234f8e7b 100644
--- a/unittests/tools/test_checkmarx_one_parser.py
+++ b/unittests/tools/test_checkmarx_one_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.checkmarx_one.parser import CheckmarxOneParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestCheckmarxOneParser(DojoTestCase):
diff --git a/unittests/tools/test_checkmarx_osa_parser.py b/unittests/tools/test_checkmarx_osa_parser.py
index afefb96821..bfe1590c77 100644
--- a/unittests/tools/test_checkmarx_osa_parser.py
+++ b/unittests/tools/test_checkmarx_osa_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.checkmarx_osa.parser import CheckmarxOsaParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestCheckmarxOsaParser(DojoTestCase):
diff --git a/unittests/tools/test_checkmarx_parser.py b/unittests/tools/test_checkmarx_parser.py
index 8fb37eff9b..88e5cc965b 100644
--- a/unittests/tools/test_checkmarx_parser.py
+++ b/unittests/tools/test_checkmarx_parser.py
@@ -3,8 +3,7 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.checkmarx.parser import CheckmarxParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestCheckmarxParser(DojoTestCase):
diff --git a/unittests/tools/test_checkov_parser.py b/unittests/tools/test_checkov_parser.py
index ef9f67dc1c..b626fc952e 100644
--- a/unittests/tools/test_checkov_parser.py
+++ b/unittests/tools/test_checkov_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.checkov.parser import CheckovParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestCheckovParser(DojoTestCase):
diff --git a/unittests/tools/test_chefinspect_parser.py b/unittests/tools/test_chefinspect_parser.py
index 9455020257..d979b4b313 100644
--- a/unittests/tools/test_chefinspect_parser.py
+++ b/unittests/tools/test_chefinspect_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.chefinspect.parser import ChefInspectParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestChefInspectParser(DojoTestCase):
diff --git a/unittests/tools/test_clair_parser.py b/unittests/tools/test_clair_parser.py
index 745b92003d..6ecdbfcd59 100644
--- a/unittests/tools/test_clair_parser.py
+++ b/unittests/tools/test_clair_parser.py
@@ -1,6 +1,5 @@
from dojo.tools.clair.parser import ClairParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestClairParser(DojoTestCase):
diff --git a/unittests/tools/test_cloudsploit_parser.py b/unittests/tools/test_cloudsploit_parser.py
index a054848a1b..0e1564390c 100644
--- a/unittests/tools/test_cloudsploit_parser.py
+++ b/unittests/tools/test_cloudsploit_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.cloudsploit.parser import CloudsploitParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestCloudsploitParser(DojoTestCase):
diff --git a/unittests/tools/test_cobalt_parser.py b/unittests/tools/test_cobalt_parser.py
index d3bca067c1..f8f3f90815 100644
--- a/unittests/tools/test_cobalt_parser.py
+++ b/unittests/tools/test_cobalt_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.cobalt.parser import CobaltParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestCobaltParser(DojoTestCase):
diff --git a/unittests/tools/test_codechecker_parser.py b/unittests/tools/test_codechecker_parser.py
index fb118215c7..8c6d9e6cc6 100644
--- a/unittests/tools/test_codechecker_parser.py
+++ b/unittests/tools/test_codechecker_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.codechecker.parser import CodeCheckerParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestCodeCheckerParser(DojoTestCase):
diff --git a/unittests/tools/test_contrast_parser.py b/unittests/tools/test_contrast_parser.py
index 70821628b4..2d48445cd5 100644
--- a/unittests/tools/test_contrast_parser.py
+++ b/unittests/tools/test_contrast_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.contrast.parser import ContrastParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestContrastParser(DojoTestCase):
diff --git a/unittests/tools/test_coverity_api_parser.py b/unittests/tools/test_coverity_api_parser.py
index 8a480a230d..fd1a268420 100644
--- a/unittests/tools/test_coverity_api_parser.py
+++ b/unittests/tools/test_coverity_api_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.coverity_api.parser import CoverityApiParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestZapParser(DojoTestCase):
diff --git a/unittests/tools/test_crashtest_security_parser.py b/unittests/tools/test_crashtest_security_parser.py
index da05803f97..2eaa5211cd 100644
--- a/unittests/tools/test_crashtest_security_parser.py
+++ b/unittests/tools/test_crashtest_security_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.crashtest_security.parser import CrashtestSecurityParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestCrashtestSecurityParser(DojoTestCase):
diff --git a/unittests/tools/test_cred_scan_parser.py b/unittests/tools/test_cred_scan_parser.py
index c8aebcbdca..af49941f4d 100644
--- a/unittests/tools/test_cred_scan_parser.py
+++ b/unittests/tools/test_cred_scan_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.cred_scan.parser import CredScanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestCredScanParser(DojoTestCase):
diff --git a/unittests/tools/test_crunch42_parser.py b/unittests/tools/test_crunch42_parser.py
index 3f203bd3c5..57cab2f32e 100644
--- a/unittests/tools/test_crunch42_parser.py
+++ b/unittests/tools/test_crunch42_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.crunch42.parser import Crunch42Parser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestCrunch42Parser(DojoTestCase):
diff --git a/unittests/tools/test_cyclonedx_parser.py b/unittests/tools/test_cyclonedx_parser.py
index 5aa5cd218b..16a346ce16 100644
--- a/unittests/tools/test_cyclonedx_parser.py
+++ b/unittests/tools/test_cyclonedx_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Finding, Test
from dojo.tools.cyclonedx.parser import CycloneDXParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestCyclonedxParser(DojoTestCase):
diff --git a/unittests/tools/test_dawnscanner_parser.py b/unittests/tools/test_dawnscanner_parser.py
index f0833b61fd..62ccc11c44 100644
--- a/unittests/tools/test_dawnscanner_parser.py
+++ b/unittests/tools/test_dawnscanner_parser.py
@@ -3,8 +3,7 @@
from dojo.models import Test
from dojo.tools.dawnscanner.parser import DawnScannerParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestDawnScannerParser(DojoTestCase):
diff --git a/unittests/tools/test_deepfence_threatmapper_parser.py b/unittests/tools/test_deepfence_threatmapper_parser.py
index 2dc584b225..8e899a5f3a 100644
--- a/unittests/tools/test_deepfence_threatmapper_parser.py
+++ b/unittests/tools/test_deepfence_threatmapper_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.deepfence_threatmapper.parser import DeepfenceThreatmapperParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestDeepfenceThreatmapperParser(DojoTestCase):
diff --git a/unittests/tools/test_dependency_check_parser.py b/unittests/tools/test_dependency_check_parser.py
index c6a26d1e78..0bf78406f4 100644
--- a/unittests/tools/test_dependency_check_parser.py
+++ b/unittests/tools/test_dependency_check_parser.py
@@ -5,8 +5,7 @@
from dojo.models import Test
from dojo.tools.dependency_check.parser import DependencyCheckParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestFile:
diff --git a/unittests/tools/test_dependency_track_parser.py b/unittests/tools/test_dependency_track_parser.py
index f2783229d9..41fb2591fa 100644
--- a/unittests/tools/test_dependency_track_parser.py
+++ b/unittests/tools/test_dependency_track_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.dependency_track.parser import DependencyTrackParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestDependencyTrackParser(DojoTestCase):
diff --git a/unittests/tools/test_detect_secrets_parser.py b/unittests/tools/test_detect_secrets_parser.py
index 0c11f06b8f..d403cf7af7 100644
--- a/unittests/tools/test_detect_secrets_parser.py
+++ b/unittests/tools/test_detect_secrets_parser.py
@@ -4,8 +4,7 @@
from dojo.models import Test
from dojo.tools.detect_secrets.parser import DetectSecretsParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestDetectSecretsParser(DojoTestCase):
diff --git a/unittests/tools/test_dockerbench_parser.py b/unittests/tools/test_dockerbench_parser.py
index 6a52066172..8a2ec6137d 100644
--- a/unittests/tools/test_dockerbench_parser.py
+++ b/unittests/tools/test_dockerbench_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.dockerbench.parser import DockerBenchParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestDockerBenchParser(DojoTestCase):
diff --git a/unittests/tools/test_dockle_parser.py b/unittests/tools/test_dockle_parser.py
index 5baed6455c..1ecd8b2a7b 100644
--- a/unittests/tools/test_dockle_parser.py
+++ b/unittests/tools/test_dockle_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.dockle.parser import DockleParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestDockleParser(DojoTestCase):
diff --git a/unittests/tools/test_drheader_parser.py b/unittests/tools/test_drheader_parser.py
index 9ae187497c..ccda65e286 100644
--- a/unittests/tools/test_drheader_parser.py
+++ b/unittests/tools/test_drheader_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.drheader.parser import DrHeaderParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestDrHeaderParser(DojoTestCase):
diff --git a/unittests/tools/test_dsop_parser.py b/unittests/tools/test_dsop_parser.py
index d4ffd6ff5f..e3a1b8d984 100644
--- a/unittests/tools/test_dsop_parser.py
+++ b/unittests/tools/test_dsop_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.dsop.parser import DsopParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestDsopParser(DojoTestCase):
diff --git a/unittests/tools/test_eslint_parser.py b/unittests/tools/test_eslint_parser.py
index f04d446496..0266860890 100644
--- a/unittests/tools/test_eslint_parser.py
+++ b/unittests/tools/test_eslint_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.eslint.parser import ESLintParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestESLintParser(DojoTestCase):
diff --git a/unittests/tools/test_fortify_parser.py b/unittests/tools/test_fortify_parser.py
index 52140f66a5..c65531fcf5 100644
--- a/unittests/tools/test_fortify_parser.py
+++ b/unittests/tools/test_fortify_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.fortify.parser import FortifyParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestFortifyParser(DojoTestCase):
diff --git a/unittests/tools/test_gcloud_artifact_scan_parser.py b/unittests/tools/test_gcloud_artifact_scan_parser.py
index 762fbbb3b9..c581d9f8f6 100644
--- a/unittests/tools/test_gcloud_artifact_scan_parser.py
+++ b/unittests/tools/test_gcloud_artifact_scan_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.gcloud_artifact_scan.parser import GCloudArtifactScanParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestGCloudArtifactScanParser(DojoTestCase):
diff --git a/unittests/tools/test_generic_parser.py b/unittests/tools/test_generic_parser.py
index 5951f9a413..f8a3f467a7 100644
--- a/unittests/tools/test_generic_parser.py
+++ b/unittests/tools/test_generic_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Engagement, Finding, Product, Test
from dojo.tools.generic.parser import GenericParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestFile:
diff --git a/unittests/tools/test_ggshield_parser.py b/unittests/tools/test_ggshield_parser.py
index c95a70b8d0..e4163900f1 100644
--- a/unittests/tools/test_ggshield_parser.py
+++ b/unittests/tools/test_ggshield_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.ggshield.parser import GgshieldParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestGgshieldParser(DojoTestCase):
diff --git a/unittests/tools/test_github_vulnerability_parser.py b/unittests/tools/test_github_vulnerability_parser.py
index cff3956675..857d665ebf 100644
--- a/unittests/tools/test_github_vulnerability_parser.py
+++ b/unittests/tools/test_github_vulnerability_parser.py
@@ -4,8 +4,7 @@
from dojo.models import Test
from dojo.tools.github_vulnerability.parser import GithubVulnerabilityParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestGithubVulnerabilityParser(DojoTestCase):
diff --git a/unittests/tools/test_gitlab_api_fuzzing_parser.py b/unittests/tools/test_gitlab_api_fuzzing_parser.py
index 33698d5d4a..2948134d72 100644
--- a/unittests/tools/test_gitlab_api_fuzzing_parser.py
+++ b/unittests/tools/test_gitlab_api_fuzzing_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.gitlab_api_fuzzing.parser import GitlabAPIFuzzingParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestGitlabAPIFuzzingParser(DojoTestCase):
diff --git a/unittests/tools/test_gitlab_container_scan_parser.py b/unittests/tools/test_gitlab_container_scan_parser.py
index 8aaaca7f18..e4da366286 100644
--- a/unittests/tools/test_gitlab_container_scan_parser.py
+++ b/unittests/tools/test_gitlab_container_scan_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.gitlab_container_scan.parser import GitlabContainerScanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestGitlabContainerScanParser(DojoTestCase):
diff --git a/unittests/tools/test_gitlab_dast_parser.py b/unittests/tools/test_gitlab_dast_parser.py
index 7d778adc40..4e6cc5d41c 100644
--- a/unittests/tools/test_gitlab_dast_parser.py
+++ b/unittests/tools/test_gitlab_dast_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.gitlab_dast.parser import GitlabDastParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestGitlabDastParser(DojoTestCase):
diff --git a/unittests/tools/test_gitlab_dep_scan_parser.py b/unittests/tools/test_gitlab_dep_scan_parser.py
index 7601438490..a31b98051f 100644
--- a/unittests/tools/test_gitlab_dep_scan_parser.py
+++ b/unittests/tools/test_gitlab_dep_scan_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.gitlab_dep_scan.parser import GitlabDepScanParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestGitlabDepScanParser(DojoTestCase):
diff --git a/unittests/tools/test_gitlab_sast_parser.py b/unittests/tools/test_gitlab_sast_parser.py
index b0acbe3421..16e9ef4ad9 100644
--- a/unittests/tools/test_gitlab_sast_parser.py
+++ b/unittests/tools/test_gitlab_sast_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.gitlab_sast.parser import GitlabSastParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestGitlabSastParser(DojoTestCase):
diff --git a/unittests/tools/test_gitlab_secret_detection_report_parser.py b/unittests/tools/test_gitlab_secret_detection_report_parser.py
index 4c1d60922d..32747f7b88 100644
--- a/unittests/tools/test_gitlab_secret_detection_report_parser.py
+++ b/unittests/tools/test_gitlab_secret_detection_report_parser.py
@@ -4,8 +4,7 @@
from dojo.tools.gitlab_secret_detection_report.parser import (
GitlabSecretDetectionReportParser,
)
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestGitlabSecretDetectionReportParser(DojoTestCase):
diff --git a/unittests/tools/test_gitleaks_parser.py b/unittests/tools/test_gitleaks_parser.py
index f3ba72907b..b712777696 100644
--- a/unittests/tools/test_gitleaks_parser.py
+++ b/unittests/tools/test_gitleaks_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.gitleaks.parser import GitleaksParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestGitleaksParser(DojoTestCase):
diff --git a/unittests/tools/test_gosec_parser.py b/unittests/tools/test_gosec_parser.py
index a4274301da..53c79a71ad 100644
--- a/unittests/tools/test_gosec_parser.py
+++ b/unittests/tools/test_gosec_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.gosec.parser import GosecParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestGosecParser(DojoTestCase):
diff --git a/unittests/tools/test_h1_parser.py b/unittests/tools/test_h1_parser.py
index d216c0498e..079404cd22 100644
--- a/unittests/tools/test_h1_parser.py
+++ b/unittests/tools/test_h1_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.h1.parser import H1Parser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestHackerOneParser(DojoTestCase):
diff --git a/unittests/tools/test_hadolint_parser.py b/unittests/tools/test_hadolint_parser.py
index 48cafebb9d..d19ed046b7 100644
--- a/unittests/tools/test_hadolint_parser.py
+++ b/unittests/tools/test_hadolint_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.hadolint.parser import HadolintParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TesthadolintParser(DojoTestCase):
diff --git a/unittests/tools/test_harbor_vulnerability_parser.py b/unittests/tools/test_harbor_vulnerability_parser.py
index 6a947d2d84..14994db1fd 100644
--- a/unittests/tools/test_harbor_vulnerability_parser.py
+++ b/unittests/tools/test_harbor_vulnerability_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.harbor_vulnerability.parser import HarborVulnerabilityParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestHarborVulnerabilityParser(DojoTestCase):
diff --git a/unittests/tools/test_hcl_appscan_parser.py b/unittests/tools/test_hcl_appscan_parser.py
index ee33da0046..5539260fa7 100644
--- a/unittests/tools/test_hcl_appscan_parser.py
+++ b/unittests/tools/test_hcl_appscan_parser.py
@@ -1,6 +1,5 @@
from dojo.tools.hcl_appscan.parser import HCLAppScanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestHCLAppScanParser(DojoTestCase):
diff --git a/unittests/tools/test_horusec_parser.py b/unittests/tools/test_horusec_parser.py
index 308738f779..5353acdd79 100644
--- a/unittests/tools/test_horusec_parser.py
+++ b/unittests/tools/test_horusec_parser.py
@@ -3,8 +3,7 @@
from dojo.models import Test
from dojo.tools.horusec.parser import HorusecParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestHorusecParser(DojoTestCase):
diff --git a/unittests/tools/test_huskyci_parser.py b/unittests/tools/test_huskyci_parser.py
index 76aad5a15c..d0b76d7313 100644
--- a/unittests/tools/test_huskyci_parser.py
+++ b/unittests/tools/test_huskyci_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.huskyci.parser import HuskyCIParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestHuskyCIParser(DojoTestCase):
diff --git a/unittests/tools/test_ibm_app_parser.py b/unittests/tools/test_ibm_app_parser.py
index 6755d294c0..cc58e1b014 100644
--- a/unittests/tools/test_ibm_app_parser.py
+++ b/unittests/tools/test_ibm_app_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.ibm_app.parser import IbmAppParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestIbmAppParser(DojoTestCase):
diff --git a/unittests/tools/test_immuniweb_parser.py b/unittests/tools/test_immuniweb_parser.py
index ed79494c67..97de358c91 100644
--- a/unittests/tools/test_immuniweb_parser.py
+++ b/unittests/tools/test_immuniweb_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.immuniweb.parser import ImmuniwebParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestImmuniwebParser(DojoTestCase):
diff --git a/unittests/tools/test_intsights_parser.py b/unittests/tools/test_intsights_parser.py
index 7afd0e33dd..79f367cfaf 100644
--- a/unittests/tools/test_intsights_parser.py
+++ b/unittests/tools/test_intsights_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.intsights.parser import IntSightsParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestIntSightsParser(DojoTestCase):
diff --git a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
index d191ca7b43..615cad2595 100644
--- a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
+++ b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
@@ -4,8 +4,7 @@
from dojo.tools.jfrog_xray_api_summary_artifact.parser import (
JFrogXrayApiSummaryArtifactParser,
)
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestJFrogXrayApiSummaryArtifactParser(DojoTestCase):
diff --git a/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py b/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py
index 8109d7c48a..b88bc87d23 100644
--- a/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py
+++ b/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py
@@ -4,8 +4,7 @@
clean_title,
get_component_name_version,
)
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestJFrogXrayOnDemandBinaryScanParser(DojoTestCase):
diff --git a/unittests/tools/test_jfrog_xray_unified_parser.py b/unittests/tools/test_jfrog_xray_unified_parser.py
index 9b8465e89a..914d3fb073 100644
--- a/unittests/tools/test_jfrog_xray_unified_parser.py
+++ b/unittests/tools/test_jfrog_xray_unified_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.jfrog_xray_unified.parser import JFrogXrayUnifiedParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestJFrogXrayUnifiedParser(DojoTestCase):
diff --git a/unittests/tools/test_jfrogxray_parser.py b/unittests/tools/test_jfrogxray_parser.py
index d48742b9ff..267ebeed73 100644
--- a/unittests/tools/test_jfrogxray_parser.py
+++ b/unittests/tools/test_jfrogxray_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.jfrogxray.parser import JFrogXrayParser, decode_cwe_number
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestJfrogJFrogXrayParser(DojoTestCase):
diff --git a/unittests/tools/test_kics_parser.py b/unittests/tools/test_kics_parser.py
index 3e814dc87f..9d3f064dcb 100644
--- a/unittests/tools/test_kics_parser.py
+++ b/unittests/tools/test_kics_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.kics.parser import KICSParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestKICSParser(DojoTestCase):
diff --git a/unittests/tools/test_kiuwan_parser.py b/unittests/tools/test_kiuwan_parser.py
index 0d8a458719..f3c71124b4 100644
--- a/unittests/tools/test_kiuwan_parser.py
+++ b/unittests/tools/test_kiuwan_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.kiuwan.parser import KiuwanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestKiuwanParser(DojoTestCase):
diff --git a/unittests/tools/test_kubeaudit_parser.py b/unittests/tools/test_kubeaudit_parser.py
index 65b52378a2..1ec26a5729 100644
--- a/unittests/tools/test_kubeaudit_parser.py
+++ b/unittests/tools/test_kubeaudit_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.kubeaudit.parser import KubeAuditParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestKubeAuditParser(DojoTestCase):
diff --git a/unittests/tools/test_kubebench_parser.py b/unittests/tools/test_kubebench_parser.py
index e0c7b6181a..601db2707d 100644
--- a/unittests/tools/test_kubebench_parser.py
+++ b/unittests/tools/test_kubebench_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.kubebench.parser import KubeBenchParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestKubeBenchParser(DojoTestCase):
diff --git a/unittests/tools/test_kubescape_parser.py b/unittests/tools/test_kubescape_parser.py
index c68cb2f1f7..6437b705ef 100644
--- a/unittests/tools/test_kubescape_parser.py
+++ b/unittests/tools/test_kubescape_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.kubescape.parser import KubescapeParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestKubescapeParser(DojoTestCase):
diff --git a/unittests/tools/test_mend_parser.py b/unittests/tools/test_mend_parser.py
index d30a35752a..8deaf6f2be 100644
--- a/unittests/tools/test_mend_parser.py
+++ b/unittests/tools/test_mend_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.mend.parser import MendParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestMendParser(DojoTestCase):
diff --git a/unittests/tools/test_meterian_parser.py b/unittests/tools/test_meterian_parser.py
index 728c669020..d02b877aba 100644
--- a/unittests/tools/test_meterian_parser.py
+++ b/unittests/tools/test_meterian_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.meterian.parser import MeterianParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestMeterianParser(DojoTestCase):
diff --git a/unittests/tools/test_microfocus_webinspect_parser.py b/unittests/tools/test_microfocus_webinspect_parser.py
index b44678cb4d..0d2dd131c1 100644
--- a/unittests/tools/test_microfocus_webinspect_parser.py
+++ b/unittests/tools/test_microfocus_webinspect_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.microfocus_webinspect.parser import MicrofocusWebinspectParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestMicrofocusWebinspectParser(DojoTestCase):
diff --git a/unittests/tools/test_mobsf_parser.py b/unittests/tools/test_mobsf_parser.py
index e5eb2a48e0..2cd4ed7363 100644
--- a/unittests/tools/test_mobsf_parser.py
+++ b/unittests/tools/test_mobsf_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.mobsf.parser import MobSFParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestMobSFParser(DojoTestCase):
diff --git a/unittests/tools/test_mobsfscan_parser.py b/unittests/tools/test_mobsfscan_parser.py
index 038bc09091..c0822bbf79 100644
--- a/unittests/tools/test_mobsfscan_parser.py
+++ b/unittests/tools/test_mobsfscan_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.mobsfscan.parser import MobsfscanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestMobsfscanParser(DojoTestCase):
diff --git a/unittests/tools/test_mozilla_observatory_parser.py b/unittests/tools/test_mozilla_observatory_parser.py
index 147eff5f2b..a84bc8c122 100644
--- a/unittests/tools/test_mozilla_observatory_parser.py
+++ b/unittests/tools/test_mozilla_observatory_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.mozilla_observatory.parser import MozillaObservatoryParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestMozillaObservatoryParser(DojoTestCase):
diff --git a/unittests/tools/test_ms_defender_parser.py b/unittests/tools/test_ms_defender_parser.py
index 1f8f73ee3f..9d22621096 100644
--- a/unittests/tools/test_ms_defender_parser.py
+++ b/unittests/tools/test_ms_defender_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.ms_defender.parser import MSDefenderParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestMSDefenderParser(DojoTestCase):
diff --git a/unittests/tools/test_nancy_parser.py b/unittests/tools/test_nancy_parser.py
index 5918f12ce9..39baa59297 100644
--- a/unittests/tools/test_nancy_parser.py
+++ b/unittests/tools/test_nancy_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.nancy.parser import NancyParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNancyParser(DojoTestCase):
diff --git a/unittests/tools/test_netsparker_parser.py b/unittests/tools/test_netsparker_parser.py
index c3c3f33604..0dbc95ff4b 100644
--- a/unittests/tools/test_netsparker_parser.py
+++ b/unittests/tools/test_netsparker_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.netsparker.parser import NetsparkerParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNetsparkerParser(DojoTestCase):
diff --git a/unittests/tools/test_neuvector_compliance_parser.py b/unittests/tools/test_neuvector_compliance_parser.py
index 0d41a19a99..30de36ea32 100644
--- a/unittests/tools/test_neuvector_compliance_parser.py
+++ b/unittests/tools/test_neuvector_compliance_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.neuvector_compliance.parser import NeuVectorComplianceParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNeuVectorComplianceParser(DojoTestCase):
diff --git a/unittests/tools/test_neuvector_parser.py b/unittests/tools/test_neuvector_parser.py
index ecb2faba98..c2bdd07af7 100644
--- a/unittests/tools/test_neuvector_parser.py
+++ b/unittests/tools/test_neuvector_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.neuvector.parser import NeuVectorParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNeuVectorParser(DojoTestCase):
diff --git a/unittests/tools/test_nexpose_parser.py b/unittests/tools/test_nexpose_parser.py
index 7b5afc112e..e53e90ce0e 100644
--- a/unittests/tools/test_nexpose_parser.py
+++ b/unittests/tools/test_nexpose_parser.py
@@ -4,8 +4,7 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.nexpose.parser import NexposeParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNexposeParser(DojoTestCase):
diff --git a/unittests/tools/test_nikto_parser.py b/unittests/tools/test_nikto_parser.py
index 9524fab549..290a3a8a37 100644
--- a/unittests/tools/test_nikto_parser.py
+++ b/unittests/tools/test_nikto_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.nikto.parser import NiktoParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNiktoParser(DojoTestCase):
diff --git a/unittests/tools/test_nmap_parser.py b/unittests/tools/test_nmap_parser.py
index a78474d89a..fa5efd00f8 100644
--- a/unittests/tools/test_nmap_parser.py
+++ b/unittests/tools/test_nmap_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.nmap.parser import NmapParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNmapParser(DojoTestCase):
diff --git a/unittests/tools/test_npm_audit_7_plus_parser.py b/unittests/tools/test_npm_audit_7_plus_parser.py
index 1c3c888edf..e551c5d77b 100644
--- a/unittests/tools/test_npm_audit_7_plus_parser.py
+++ b/unittests/tools/test_npm_audit_7_plus_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.npm_audit_7_plus.parser import NpmAudit7PlusParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNpmAudit7PlusParser(DojoTestCase):
diff --git a/unittests/tools/test_npm_audit_parser.py b/unittests/tools/test_npm_audit_parser.py
index fac2473b51..0793f0cfd4 100644
--- a/unittests/tools/test_npm_audit_parser.py
+++ b/unittests/tools/test_npm_audit_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.npm_audit.parser import NpmAuditParser, censor_path_hashes
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNpmAuditParser(DojoTestCase):
diff --git a/unittests/tools/test_nsp_parser.py b/unittests/tools/test_nsp_parser.py
index 0901681343..571adb41cd 100644
--- a/unittests/tools/test_nsp_parser.py
+++ b/unittests/tools/test_nsp_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.nsp.parser import NspParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNspParser(DojoTestCase):
diff --git a/unittests/tools/test_nuclei_parser.py b/unittests/tools/test_nuclei_parser.py
index 2ffde1fb02..131694523e 100644
--- a/unittests/tools/test_nuclei_parser.py
+++ b/unittests/tools/test_nuclei_parser.py
@@ -4,8 +4,7 @@
from dojo.models import Test
from dojo.tools.nuclei.parser import NucleiParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestNucleiParser(DojoTestCase):
diff --git a/unittests/tools/test_openscap_parser.py b/unittests/tools/test_openscap_parser.py
index 5e765b6986..2fd718a1ee 100644
--- a/unittests/tools/test_openscap_parser.py
+++ b/unittests/tools/test_openscap_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.openscap.parser import OpenscapParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestOpenscapParser(DojoTestCase):
diff --git a/unittests/tools/test_openvas_parser.py b/unittests/tools/test_openvas_parser.py
index edefaac12f..d7906896e3 100644
--- a/unittests/tools/test_openvas_parser.py
+++ b/unittests/tools/test_openvas_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.openvas.parser import OpenVASParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestOpenVASParser(DojoTestCase):
diff --git a/unittests/tools/test_ort_parser.py b/unittests/tools/test_ort_parser.py
index 823c611774..f523d35626 100644
--- a/unittests/tools/test_ort_parser.py
+++ b/unittests/tools/test_ort_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.ort.parser import OrtParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestOrtParser(DojoTestCase):
diff --git a/unittests/tools/test_ossindex_devaudit_parser.py b/unittests/tools/test_ossindex_devaudit_parser.py
index e6eb38f4de..e617654a20 100644
--- a/unittests/tools/test_ossindex_devaudit_parser.py
+++ b/unittests/tools/test_ossindex_devaudit_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.ossindex_devaudit.parser import OssIndexDevauditParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestOssIndexDevauditParser(DojoTestCase):
diff --git a/unittests/tools/test_osv_scanner_parser.py b/unittests/tools/test_osv_scanner_parser.py
index 46ba8077dd..794d429bd0 100644
--- a/unittests/tools/test_osv_scanner_parser.py
+++ b/unittests/tools/test_osv_scanner_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.osv_scanner.parser import OSVScannerParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestOSVScannerParser(DojoTestCase):
diff --git a/unittests/tools/test_outpost24_parser.py b/unittests/tools/test_outpost24_parser.py
index 1e8ca61b91..39e44d8220 100644
--- a/unittests/tools/test_outpost24_parser.py
+++ b/unittests/tools/test_outpost24_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.outpost24.parser import Outpost24Parser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestOutpost24Parser(DojoTestCase):
diff --git a/unittests/tools/test_php_security_audit_v2_parser.py b/unittests/tools/test_php_security_audit_v2_parser.py
index e684fabcc9..c8d4fd2091 100644
--- a/unittests/tools/test_php_security_audit_v2_parser.py
+++ b/unittests/tools/test_php_security_audit_v2_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.php_security_audit_v2.parser import PhpSecurityAuditV2Parser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestPhpSecurityAuditV2ParserParser(DojoTestCase):
diff --git a/unittests/tools/test_php_symfony_security_check_parser.py b/unittests/tools/test_php_symfony_security_check_parser.py
index b191038d98..6566c02ebe 100644
--- a/unittests/tools/test_php_symfony_security_check_parser.py
+++ b/unittests/tools/test_php_symfony_security_check_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.php_symfony_security_check.parser import PhpSymfonySecurityCheckParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestPhpSymfonySecurityCheckerParser(DojoTestCase):
diff --git a/unittests/tools/test_pip_audit_parser.py b/unittests/tools/test_pip_audit_parser.py
index 2f765ca9c3..679fe10590 100644
--- a/unittests/tools/test_pip_audit_parser.py
+++ b/unittests/tools/test_pip_audit_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.pip_audit.parser import PipAuditParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestPipAuditParser(DojoTestCase):
diff --git a/unittests/tools/test_pmd_parser.py b/unittests/tools/test_pmd_parser.py
index e876d2c700..9a23238402 100644
--- a/unittests/tools/test_pmd_parser.py
+++ b/unittests/tools/test_pmd_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.pmd.parser import PmdParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestPMDParser(DojoTestCase):
diff --git a/unittests/tools/test_popeye_parser.py b/unittests/tools/test_popeye_parser.py
index ee029855df..04a553679b 100644
--- a/unittests/tools/test_popeye_parser.py
+++ b/unittests/tools/test_popeye_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.popeye.parser import PopeyeParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestPopeyeParser(DojoTestCase):
diff --git a/unittests/tools/test_progpilot_parser.py b/unittests/tools/test_progpilot_parser.py
index 0cfbaf1804..b1f6557b20 100644
--- a/unittests/tools/test_progpilot_parser.py
+++ b/unittests/tools/test_progpilot_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.progpilot.parser import ProgpilotParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestProgpilotParser(DojoTestCase):
diff --git a/unittests/tools/test_pwn_sast_parser.py b/unittests/tools/test_pwn_sast_parser.py
index e24bdaaca3..140aa761c3 100644
--- a/unittests/tools/test_pwn_sast_parser.py
+++ b/unittests/tools/test_pwn_sast_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.pwn_sast.parser import PWNSASTParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestPWNSASTParser(DojoTestCase):
diff --git a/unittests/tools/test_qualys_infrascan_webgui_parser.py b/unittests/tools/test_qualys_infrascan_webgui_parser.py
index 9605e814c3..78e57188a6 100644
--- a/unittests/tools/test_qualys_infrascan_webgui_parser.py
+++ b/unittests/tools/test_qualys_infrascan_webgui_parser.py
@@ -4,8 +4,7 @@
from dojo.models import Test
from dojo.tools.qualys_infrascan_webgui.parser import QualysInfrascanWebguiParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestQualysInfrascanWebguiParser(DojoTestCase):
diff --git a/unittests/tools/test_qualys_parser.py b/unittests/tools/test_qualys_parser.py
index a1317eaf49..075b9d4a3b 100644
--- a/unittests/tools/test_qualys_parser.py
+++ b/unittests/tools/test_qualys_parser.py
@@ -4,8 +4,7 @@
from dojo.models import Test
from dojo.tools.qualys.parser import QualysParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestQualysParser(DojoTestCase):
diff --git a/unittests/tools/test_qualys_webapp_parser.py b/unittests/tools/test_qualys_webapp_parser.py
index 456b8f5a80..2df655e36b 100644
--- a/unittests/tools/test_qualys_webapp_parser.py
+++ b/unittests/tools/test_qualys_webapp_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.qualys_webapp.parser import QualysWebAppParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestQualysWebAppParser(DojoTestCase):
diff --git a/unittests/tools/test_redhatsatellite_parser.py b/unittests/tools/test_redhatsatellite_parser.py
index da1175a8df..bfaabbd326 100644
--- a/unittests/tools/test_redhatsatellite_parser.py
+++ b/unittests/tools/test_redhatsatellite_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.redhatsatellite.parser import RedHatSatelliteParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestRedHatSatelliteParser(DojoTestCase):
diff --git a/unittests/tools/test_retirejs_parser.py b/unittests/tools/test_retirejs_parser.py
index 2802d3667a..d26c8af1c1 100644
--- a/unittests/tools/test_retirejs_parser.py
+++ b/unittests/tools/test_retirejs_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.retirejs.parser import RetireJsParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestRetireJsParser(DojoTestCase):
diff --git a/unittests/tools/test_risk_recon_parser.py b/unittests/tools/test_risk_recon_parser.py
index 839c6cf80d..dde31a77ca 100644
--- a/unittests/tools/test_risk_recon_parser.py
+++ b/unittests/tools/test_risk_recon_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.risk_recon.parser import RiskReconParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestRiskReconAPIParser(DojoTestCase):
diff --git a/unittests/tools/test_rubocop_parser.py b/unittests/tools/test_rubocop_parser.py
index 1dba608f01..e581366ba6 100644
--- a/unittests/tools/test_rubocop_parser.py
+++ b/unittests/tools/test_rubocop_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.rubocop.parser import RubocopParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestRubocopParser(DojoTestCase):
diff --git a/unittests/tools/test_rusty_hog_parser.py b/unittests/tools/test_rusty_hog_parser.py
index 472e15b822..b9aca9a65c 100644
--- a/unittests/tools/test_rusty_hog_parser.py
+++ b/unittests/tools/test_rusty_hog_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.rusty_hog.parser import RustyhogParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestRustyhogParser(DojoTestCase):
diff --git a/unittests/tools/test_sarif_parser.py b/unittests/tools/test_sarif_parser.py
index a3443e51bc..eb3dd05332 100644
--- a/unittests/tools/test_sarif_parser.py
+++ b/unittests/tools/test_sarif_parser.py
@@ -3,8 +3,7 @@
from dojo.models import Finding, Test
from dojo.tools.sarif.parser import SarifParser, get_fingerprints_hashes
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestSarifParser(DojoTestCase):
diff --git a/unittests/tools/test_scantist_parser.py b/unittests/tools/test_scantist_parser.py
index befdf404c8..7b8e0b0d4c 100644
--- a/unittests/tools/test_scantist_parser.py
+++ b/unittests/tools/test_scantist_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.scantist.parser import ScantistParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestScantistParser(DojoTestCase):
diff --git a/unittests/tools/test_scout_suite_parser.py b/unittests/tools/test_scout_suite_parser.py
index 40e6761f84..1cab703c17 100644
--- a/unittests/tools/test_scout_suite_parser.py
+++ b/unittests/tools/test_scout_suite_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.scout_suite.parser import ScoutSuiteParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestScoutSuiteParser(DojoTestCase):
diff --git a/unittests/tools/test_semgrep_parser.py b/unittests/tools/test_semgrep_parser.py
index 27a6d1b755..f2f329a15a 100644
--- a/unittests/tools/test_semgrep_parser.py
+++ b/unittests/tools/test_semgrep_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.semgrep.parser import SemgrepParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestSemgrepParser(DojoTestCase):
diff --git a/unittests/tools/test_skf_parser.py b/unittests/tools/test_skf_parser.py
index b39ac83257..f0f197abe5 100644
--- a/unittests/tools/test_skf_parser.py
+++ b/unittests/tools/test_skf_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.skf.parser import SKFParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestSkfParser(DojoTestCase):
diff --git a/unittests/tools/test_snyk_code_parser.py b/unittests/tools/test_snyk_code_parser.py
index 37524acefa..20d3109e1e 100644
--- a/unittests/tools/test_snyk_code_parser.py
+++ b/unittests/tools/test_snyk_code_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.snyk_code.parser import SnykCodeParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestSnykCodeParser(DojoTestCase):
diff --git a/unittests/tools/test_snyk_parser.py b/unittests/tools/test_snyk_parser.py
index d79dc0db0d..59fde5a85f 100644
--- a/unittests/tools/test_snyk_parser.py
+++ b/unittests/tools/test_snyk_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.snyk.parser import SnykParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestSnykParser(DojoTestCase):
diff --git a/unittests/tools/test_solar_appscreener_parser.py b/unittests/tools/test_solar_appscreener_parser.py
index 1d4c38c7c0..3e2284ee80 100644
--- a/unittests/tools/test_solar_appscreener_parser.py
+++ b/unittests/tools/test_solar_appscreener_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.solar_appscreener.parser import SolarAppscreenerParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestSolarAppscreenerParser(DojoTestCase):
diff --git a/unittests/tools/test_sonarqube_parser.py b/unittests/tools/test_sonarqube_parser.py
index 95c1d3532c..cf72d020e5 100644
--- a/unittests/tools/test_sonarqube_parser.py
+++ b/unittests/tools/test_sonarqube_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.sonarqube.parser import SonarQubeParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestSonarQubeParser(DojoTestCase):
diff --git a/unittests/tools/test_sonatype_parser.py b/unittests/tools/test_sonatype_parser.py
index 6bf57e041c..232f4dfec0 100644
--- a/unittests/tools/test_sonatype_parser.py
+++ b/unittests/tools/test_sonatype_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.sonatype.parser import SonatypeParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestSonatypeParser(DojoTestCase):
diff --git a/unittests/tools/test_spotbugs_parser.py b/unittests/tools/test_spotbugs_parser.py
index b105ebb20a..879c971312 100644
--- a/unittests/tools/test_spotbugs_parser.py
+++ b/unittests/tools/test_spotbugs_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.spotbugs.parser import SpotbugsParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestSpotbugsParser(DojoTestCase):
diff --git a/unittests/tools/test_ssh_audit_parser.py b/unittests/tools/test_ssh_audit_parser.py
index a29d36c1a9..e189ea81e7 100644
--- a/unittests/tools/test_ssh_audit_parser.py
+++ b/unittests/tools/test_ssh_audit_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.ssh_audit.parser import SSHAuditParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestSSHAuditParser(DojoTestCase):
diff --git a/unittests/tools/test_ssl_labs_parser.py b/unittests/tools/test_ssl_labs_parser.py
index b28d647dd7..125965ea13 100644
--- a/unittests/tools/test_ssl_labs_parser.py
+++ b/unittests/tools/test_ssl_labs_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.ssl_labs.parser import SslLabsParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestSslLabsParser(DojoTestCase):
diff --git a/unittests/tools/test_sslscan_parser.py b/unittests/tools/test_sslscan_parser.py
index b685178247..157046e3e1 100644
--- a/unittests/tools/test_sslscan_parser.py
+++ b/unittests/tools/test_sslscan_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.sslscan.parser import SslscanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestSslscanParser(DojoTestCase):
diff --git a/unittests/tools/test_sslyze_parser.py b/unittests/tools/test_sslyze_parser.py
index 4493d4d180..0e961f4531 100644
--- a/unittests/tools/test_sslyze_parser.py
+++ b/unittests/tools/test_sslyze_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.sslyze.parser import SslyzeParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestSslyzeJSONParser(DojoTestCase):
diff --git a/unittests/tools/test_talisman_parser.py b/unittests/tools/test_talisman_parser.py
index 89221d212b..0f05b83d71 100644
--- a/unittests/tools/test_talisman_parser.py
+++ b/unittests/tools/test_talisman_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.talisman.parser import TalismanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestTalismanParser(DojoTestCase):
diff --git a/unittests/tools/test_tenable_parser.py b/unittests/tools/test_tenable_parser.py
index c8468e3222..29922a3f64 100644
--- a/unittests/tools/test_tenable_parser.py
+++ b/unittests/tools/test_tenable_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Engagement, Finding, Product, Test
from dojo.tools.tenable.parser import TenableParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestTenableParser(DojoTestCase):
diff --git a/unittests/tools/test_terrascan_parser.py b/unittests/tools/test_terrascan_parser.py
index 4fa717fccd..8201b65c31 100644
--- a/unittests/tools/test_terrascan_parser.py
+++ b/unittests/tools/test_terrascan_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.terrascan.parser import TerrascanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestTerrascanParser(DojoTestCase):
diff --git a/unittests/tools/test_testssl_parser.py b/unittests/tools/test_testssl_parser.py
index 785c1b51f7..deae821733 100644
--- a/unittests/tools/test_testssl_parser.py
+++ b/unittests/tools/test_testssl_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.testssl.parser import TestsslParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestTestsslParser(DojoTestCase):
diff --git a/unittests/tools/test_tfsec_parser.py b/unittests/tools/test_tfsec_parser.py
index f3974c20d0..23a88fd895 100644
--- a/unittests/tools/test_tfsec_parser.py
+++ b/unittests/tools/test_tfsec_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.tfsec.parser import TFSecParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestTFSecParser(DojoTestCase):
diff --git a/unittests/tools/test_trivy_operator_parser.py b/unittests/tools/test_trivy_operator_parser.py
index 705a9bae29..a5a52f1ded 100644
--- a/unittests/tools/test_trivy_operator_parser.py
+++ b/unittests/tools/test_trivy_operator_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.trivy_operator.parser import TrivyOperatorParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
def sample_path(file_name):
diff --git a/unittests/tools/test_trivy_parser.py b/unittests/tools/test_trivy_parser.py
index 6e53d9b756..e61cd70cd8 100644
--- a/unittests/tools/test_trivy_parser.py
+++ b/unittests/tools/test_trivy_parser.py
@@ -3,8 +3,7 @@
from dojo.models import Test
from dojo.tools.trivy.parser import TrivyParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
def sample_path(file_name):
diff --git a/unittests/tools/test_trufflehog3_parser.py b/unittests/tools/test_trufflehog3_parser.py
index 25215ada85..1b24c35e2b 100644
--- a/unittests/tools/test_trufflehog3_parser.py
+++ b/unittests/tools/test_trufflehog3_parser.py
@@ -3,8 +3,7 @@
from dojo.models import Test
from dojo.tools.trufflehog3.parser import TruffleHog3Parser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
def sample_path(file_name):
diff --git a/unittests/tools/test_trufflehog_parser.py b/unittests/tools/test_trufflehog_parser.py
index a3820ab458..11aa8b55a3 100644
--- a/unittests/tools/test_trufflehog_parser.py
+++ b/unittests/tools/test_trufflehog_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.trufflehog.parser import TruffleHogParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
def sample_path(file_name):
diff --git a/unittests/tools/test_trustwave_fusion_api_parser.py b/unittests/tools/test_trustwave_fusion_api_parser.py
index f09272e445..7773af5cb2 100644
--- a/unittests/tools/test_trustwave_fusion_api_parser.py
+++ b/unittests/tools/test_trustwave_fusion_api_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.trustwave_fusion_api.parser import TrustwaveFusionAPIParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestTrustwaveFusionAPIParser(DojoTestCase):
diff --git a/unittests/tools/test_trustwave_parser.py b/unittests/tools/test_trustwave_parser.py
index 328213743b..b0931d980b 100644
--- a/unittests/tools/test_trustwave_parser.py
+++ b/unittests/tools/test_trustwave_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.trustwave.parser import TrustwaveParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
def sample_path(file_name):
diff --git a/unittests/tools/test_twistlock_parser.py b/unittests/tools/test_twistlock_parser.py
index e972b0700f..8d8121305b 100644
--- a/unittests/tools/test_twistlock_parser.py
+++ b/unittests/tools/test_twistlock_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.twistlock.parser import TwistlockParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestTwistlockParser(DojoTestCase):
diff --git a/unittests/tools/test_vcg_parser.py b/unittests/tools/test_vcg_parser.py
index 59c5a7bc1e..3900a7ad04 100644
--- a/unittests/tools/test_vcg_parser.py
+++ b/unittests/tools/test_vcg_parser.py
@@ -5,8 +5,7 @@
from dojo.models import Test
from dojo.tools.vcg.parser import VCGCsvParser, VCGParser, VCGXmlParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestFile:
diff --git a/unittests/tools/test_veracode_parser.py b/unittests/tools/test_veracode_parser.py
index 1149daddba..9a00b0d646 100644
--- a/unittests/tools/test_veracode_parser.py
+++ b/unittests/tools/test_veracode_parser.py
@@ -4,8 +4,7 @@
from dojo.models import Endpoint, Engagement, Product, Product_Type, Test
from dojo.tools.veracode.parser import VeracodeParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestVeracodeScannerParser(DojoTestCase):
diff --git a/unittests/tools/test_veracode_sca_parser.py b/unittests/tools/test_veracode_sca_parser.py
index fdcfa0b60f..03c70e50a1 100644
--- a/unittests/tools/test_veracode_sca_parser.py
+++ b/unittests/tools/test_veracode_sca_parser.py
@@ -5,8 +5,7 @@
from dojo.models import Test
from dojo.tools.veracode_sca.parser import VeracodeScaParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestVeracodeScaScannerParser(DojoTestCase):
diff --git a/unittests/tools/test_wapiti_parser.py b/unittests/tools/test_wapiti_parser.py
index 3b5bc74235..fa7dd8592a 100644
--- a/unittests/tools/test_wapiti_parser.py
+++ b/unittests/tools/test_wapiti_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.wapiti.parser import WapitiParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestWapitiParser(DojoTestCase):
diff --git a/unittests/tools/test_wazuh_parser.py b/unittests/tools/test_wazuh_parser.py
index 1e881e1d02..ec48ac6335 100644
--- a/unittests/tools/test_wazuh_parser.py
+++ b/unittests/tools/test_wazuh_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.wazuh.parser import WazuhParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestWazuhParser(DojoTestCase):
diff --git a/unittests/tools/test_wfuzz_parser.py b/unittests/tools/test_wfuzz_parser.py
index e9f1e69411..ce140a9c11 100644
--- a/unittests/tools/test_wfuzz_parser.py
+++ b/unittests/tools/test_wfuzz_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.wfuzz.parser import WFuzzParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestWFuzzParser(DojoTestCase):
diff --git a/unittests/tools/test_whitehat_sentinel_parser.py b/unittests/tools/test_whitehat_sentinel_parser.py
index 65e91fc23b..9e957333e0 100644
--- a/unittests/tools/test_whitehat_sentinel_parser.py
+++ b/unittests/tools/test_whitehat_sentinel_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.whitehat_sentinel.parser import WhiteHatSentinelParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestWhiteHatSentinelParser(DojoTestCase):
diff --git a/unittests/tools/test_wiz_parser.py b/unittests/tools/test_wiz_parser.py
index 040d7791e1..38a350318c 100644
--- a/unittests/tools/test_wiz_parser.py
+++ b/unittests/tools/test_wiz_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.wiz.parser import WizParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestWizParser(DojoTestCase):
diff --git a/unittests/tools/test_wpscan_parser.py b/unittests/tools/test_wpscan_parser.py
index c3932ebe75..1e70aa4e51 100644
--- a/unittests/tools/test_wpscan_parser.py
+++ b/unittests/tools/test_wpscan_parser.py
@@ -2,8 +2,7 @@
from dojo.models import Test
from dojo.tools.wpscan.parser import WpscanParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestWpscanParser(DojoTestCase):
diff --git a/unittests/tools/test_xanitizer_parser.py b/unittests/tools/test_xanitizer_parser.py
index 8b46a61dea..0b27b985a5 100644
--- a/unittests/tools/test_xanitizer_parser.py
+++ b/unittests/tools/test_xanitizer_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Test
from dojo.tools.xanitizer.parser import XanitizerParser
-
-from ..dojo_test_case import DojoTestCase, get_unit_tests_path
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
class TestXanitizerParser(DojoTestCase):
diff --git a/unittests/tools/test_yarn_audit_parser.py b/unittests/tools/test_yarn_audit_parser.py
index 2a43e6222a..1206f88b43 100644
--- a/unittests/tools/test_yarn_audit_parser.py
+++ b/unittests/tools/test_yarn_audit_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Engagement, Product, Test
from dojo.tools.yarn_audit.parser import YarnAuditParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestYarnAuditParser(DojoTestCase):
diff --git a/unittests/tools/test_zap_parser.py b/unittests/tools/test_zap_parser.py
index c7b738155c..325e920598 100644
--- a/unittests/tools/test_zap_parser.py
+++ b/unittests/tools/test_zap_parser.py
@@ -1,7 +1,6 @@
from dojo.models import Finding, Test
from dojo.tools.zap.parser import ZapParser
-
-from ..dojo_test_case import DojoTestCase
+from unittests.dojo_test_case import DojoTestCase
class TestZapParser(DojoTestCase):
From 36d6700cd2b3d74741e1eef06fd23c2359a92cf2 Mon Sep 17 00:00:00 2001
From: kiblik <5609770+kiblik@users.noreply.github.com>
Date: Tue, 2 Jul 2024 04:24:45 +0200
Subject: [PATCH 010/111] Ruff: add and fix PIE (#10090)
---
dojo/endpoint/utils.py | 2 +-
dojo/engagement/views.py | 5 -----
dojo/filters.py | 10 ----------
dojo/importers/base_importer.py | 1 -
dojo/importers/options.py | 1 -
dojo/jira_link/helper.py | 1 -
dojo/models.py | 2 --
dojo/notifications/helper.py | 1 -
dojo/okta.py | 1 -
dojo/product/views.py | 3 +--
dojo/templatetags/display_tags.py | 2 +-
dojo/tools/burp_enterprise/parser.py | 4 ++--
dojo/tools/jfrog_xray_unified/parser.py | 2 +-
dojo/tools/qualys_webapp/parser.py | 4 ++--
dojo/tools/veracode/json_parser.py | 2 +-
dojo/tools/veracode_sca/parser.py | 6 +++---
dojo/tools/xanitizer/parser.py | 1 -
dojo/utils.py | 3 +--
ruff.toml | 1 +
tests/close_old_findings_dedupe_test.py | 2 +-
tests/dedupe_test.py | 2 +-
21 files changed, 16 insertions(+), 40 deletions(-)
diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py
index dea5db9e07..e40de5c5e1 100644
--- a/dojo/endpoint/utils.py
+++ b/dojo/endpoint/utils.py
@@ -93,7 +93,7 @@ def endpoint_get_or_create(**kwargs):
def clean_hosts_run(apps, change):
def err_log(message, html_log, endpoint_html_log, endpoint):
error_suffix = 'It is not possible to migrate it. Delete or edit this endpoint.'
- html_log.append({**endpoint_html_log, **{'message': message}})
+ html_log.append({**endpoint_html_log, 'message': message})
logger.error(f'Endpoint (id={endpoint.pk}) {message}. {error_suffix}')
broken_endpoints.add(endpoint.pk)
html_log = []
diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py
index f0c542e2d9..2fdc7e34a4 100644
--- a/dojo/engagement/views.py
+++ b/dojo/engagement/views.py
@@ -459,7 +459,6 @@ def get(self, request, eid, *args, **kwargs):
check = Check_List.objects.get(engagement=eng)
except:
check = None
- pass
notes = eng.notes.all()
note_type_activation = Note_Type.objects.filter(is_active=True).count()
if note_type_activation:
@@ -531,7 +530,6 @@ def post(self, request, eid, *args, **kwargs):
check = Check_List.objects.get(engagement=eng)
except:
check = None
- pass
notes = eng.notes.all()
note_type_activation = Note_Type.objects.filter(is_active=True).count()
if note_type_activation:
@@ -636,7 +634,6 @@ def add_tests(request, eid):
new_test.lead = User.objects.get(id=form['lead'].value())
except:
new_test.lead = None
- pass
# Set status to in progress if a test is added
if eng.status != "In Progress" and eng.active is True:
@@ -1152,7 +1149,6 @@ def complete_checklist(request, eid):
checklist = Check_List.objects.get(engagement=eng)
except:
checklist = None
- pass
add_breadcrumb(
parent=eng,
@@ -1174,7 +1170,6 @@ def complete_checklist(request, eid):
cl.engagement = eng
cl.save()
form.save_m2m()
- pass
messages.add_message(
request,
messages.SUCCESS,
diff --git a/dojo/filters.py b/dojo/filters.py
index 9d3a94a43d..5eeced2f11 100644
--- a/dojo/filters.py
+++ b/dojo/filters.py
@@ -1999,14 +1999,6 @@ class Meta:
exclude = ['description', 'mitigation', 'impact',
'references', 'numerical_severity']
- not_tags = ModelMultipleChoiceFilter(
- field_name='tags__name',
- to_field_name='name',
- exclude=True,
- queryset=Finding.tags.tag_model.objects.all().order_by('name'),
- # label='tags', # doesn't work with tagulous, need to set in __init__ below
- )
-
not_test__tags = ModelMultipleChoiceFilter(
field_name='test__tags__name',
to_field_name='name',
@@ -2034,8 +2026,6 @@ class Meta:
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True)
-
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.form.fields['cwe'].choices = cwe_options(self.queryset)
diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py
index b2ff46e8f1..9eee547b9c 100644
--- a/dojo/importers/base_importer.py
+++ b/dojo/importers/base_importer.py
@@ -49,7 +49,6 @@ def get_findings(scan_type: str, test: Test) -> List[Finding]:
TODO This should be enforced in the future, but here is not the place
TODO once this enforced, this stub class should be removed
"""
- pass
class BaseImporter(ImporterOptions):
diff --git a/dojo/importers/options.py b/dojo/importers/options.py
index 52b291ecaf..5ae687085a 100644
--- a/dojo/importers/options.py
+++ b/dojo/importers/options.py
@@ -83,7 +83,6 @@ def load_additional_options(
An added hook for loading additional options
to be used by children classes for the BaseImporter
"""
- pass
def log_translation(
self,
diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py
index 0273e0a746..32329431d7 100644
--- a/dojo/jira_link/helper.py
+++ b/dojo/jira_link/helper.py
@@ -1540,7 +1540,6 @@ def process_jira_project_form(request, instance=None, target=None, product=None,
except Exception as e:
error = True
logger.exception(e)
- pass
else:
logger.debug(jform.errors)
error = True
diff --git a/dojo/models.py b/dojo/models.py
index 5de06d4274..040ffbe3dd 100644
--- a/dojo/models.py
+++ b/dojo/models.py
@@ -3055,7 +3055,6 @@ def github_conf(self):
github_conf = github_product_key.conf
except:
github_conf = None
- pass
return github_conf
# newer version that can work with prefetching
@@ -3064,7 +3063,6 @@ def github_conf_new(self):
return self.test.engagement.product.github_pkey_set.all()[0].git_conf
except:
return None
- pass
@property
def has_jira_issue(self):
diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py
index c89d185554..0afb0d6b36 100644
--- a/dojo/notifications/helper.py
+++ b/dojo/notifications/helper.py
@@ -270,7 +270,6 @@ def send_msteams_notification(event, user=None, *args, **kwargs):
except Exception as e:
logger.exception(e)
log_alert(e, "Microsoft Teams Notification", title=kwargs['title'], description=str(e), url=kwargs['url'])
- pass
@dojo_async_task
diff --git a/dojo/okta.py b/dojo/okta.py
index 47fa718de6..68934e1d5e 100644
--- a/dojo/okta.py
+++ b/dojo/okta.py
@@ -88,7 +88,6 @@ def validate_and_return_id_token(self, id_token, access_token):
except JWTError:
if k is None and client_id == 'a-key':
k = self.get_jwks_keys()[0]
- pass
claims = jwt.decode(
id_token,
diff --git a/dojo/product/views.py b/dojo/product/views.py
index 9a70751ae1..47d984c833 100644
--- a/dojo/product/views.py
+++ b/dojo/product/views.py
@@ -226,7 +226,7 @@ def view_product(request, pid):
benchmark_type__enabled=True).order_by('benchmark_type__name')
sla = SLA_Configuration.objects.filter(id=prod.sla_configuration_id).first()
benchAndPercent = []
- for i in range(0, len(benchmarks)):
+ for i in range(len(benchmarks)):
desired_level, total, total_pass, total_wait, total_fail, _total_viewed = asvs_calc_level(benchmarks[i])
success_percent = round((float(total_pass) / float(total)) * 100, 2)
@@ -939,7 +939,6 @@ def edit_product(request, pid):
github_inst = GITHUB_PKey.objects.get(product=product)
except:
github_inst = None
- pass
if request.method == 'POST':
form = ProductForm(request.POST, instance=product)
diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py
index ed224d1b70..d7df4559bd 100644
--- a/dojo/templatetags/display_tags.py
+++ b/dojo/templatetags/display_tags.py
@@ -484,7 +484,7 @@ def not_specified_icon(tooltip):
def stars(filled, total, tooltip):
code = ''
- for i in range(0, total):
+ for i in range(total):
if i < filled:
code += ''
else:
diff --git a/dojo/tools/burp_enterprise/parser.py b/dojo/tools/burp_enterprise/parser.py
index 4dc08625fb..1984cc65d7 100644
--- a/dojo/tools/burp_enterprise/parser.py
+++ b/dojo/tools/burp_enterprise/parser.py
@@ -83,7 +83,7 @@ def pre_allocate_items(self, tree):
if ("Issues found" in "".join(endpoint.itertext()).strip())
]
- for index in range(0, len(severities)):
+ for index in range(len(severities)):
url = endpoint_text[index].text[16:]
sev_table = list(severities[index].iter("tr"))
@@ -235,7 +235,7 @@ def create_findings(self, items, test):
requests = details.get("Request").split("SPLITTER")[:-1]
responses = details.get("Response").split("SPLITTER")[:-1]
unsaved_req_resp = []
- for index in range(0, len(requests)):
+ for index in range(len(requests)):
unsaved_req_resp.append(
{"req": requests[index], "resp": responses[index]}
)
diff --git a/dojo/tools/jfrog_xray_unified/parser.py b/dojo/tools/jfrog_xray_unified/parser.py
index c7e48897a2..e8b36d1b34 100644
--- a/dojo/tools/jfrog_xray_unified/parser.py
+++ b/dojo/tools/jfrog_xray_unified/parser.py
@@ -40,7 +40,7 @@ def get_item(vulnerability, test):
highestCvssV3Index = 0
highestCvssV3Score = 0
- for thisCveIndex in range(0, len(vulnerability["cves"]) - 1):
+ for thisCveIndex in range(len(vulnerability["cves"]) - 1):
# not all cves have cvssv3 scores, so skip these. If no v3 scores,
# we'll default to index 0
if "cvss_v3_score" in vulnerability["cves"][thisCveIndex]:
diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py
index 729fc76d28..4c8c595cf1 100644
--- a/dojo/tools/qualys_webapp/parser.py
+++ b/dojo/tools/qualys_webapp/parser.py
@@ -93,7 +93,7 @@ def attach_unique_extras(
)
)
- for i in range(0, len(requests)):
+ for i in range(len(requests)):
if requests[i] != "" or responses[i] != "":
finding.unsaved_req_resp.append(
{"req": requests[i], "resp": responses[i]}
@@ -130,7 +130,7 @@ def attach_extras(endpoints, requests, responses, finding, date, qid, test):
for endpoint in endpoints:
finding.unsaved_endpoints.append(Endpoint.from_uri(endpoint))
- for i in range(0, len(requests)):
+ for i in range(len(requests)):
if requests[i] != "" or responses[i] != "":
finding.unsaved_req_resp.append(
{"req": requests[i], "resp": responses[i]}
diff --git a/dojo/tools/veracode/json_parser.py b/dojo/tools/veracode/json_parser.py
index 7f2111eff0..9e6818effc 100644
--- a/dojo/tools/veracode/json_parser.py
+++ b/dojo/tools/veracode/json_parser.py
@@ -124,7 +124,7 @@ def create_finding_from_details(self, finding_details, scan_type, policy_violate
# Attempt to get the CVSS score
if uncleaned_cvss := finding_details.get("cvss"):
if isinstance(uncleaned_cvss, str):
- if uncleaned_cvss.startswith("CVSS:3.1/") or uncleaned_cvss.startswith("CVSS:3.0/"):
+ if uncleaned_cvss.startswith(("CVSS:3.1/", "CVSS:3.0/")):
finding.cvssv3 = CVSS3(str(uncleaned_cvss)).clean_vector(output_prefix=True)
elif not uncleaned_cvss.startswith("CVSS"):
finding.cvssv3 = CVSS3(f"CVSS:3.1/{str(uncleaned_cvss)}").clean_vector(output_prefix=True)
diff --git a/dojo/tools/veracode_sca/parser.py b/dojo/tools/veracode_sca/parser.py
index 7cb730801b..a37a08cf7e 100644
--- a/dojo/tools/veracode_sca/parser.py
+++ b/dojo/tools/veracode_sca/parser.py
@@ -69,7 +69,7 @@ def _get_findings_json(self, file, test):
vulnerability = issue.get("vulnerability")
vuln_id = vulnerability.get("cve")
if vuln_id and not (
- vuln_id.startswith("cve") or vuln_id.startswith("CVE")
+ vuln_id.startswith(("cve", "CVE"))
):
vuln_id = "CVE-" + vuln_id
cvss_score = issue.get("severity")
@@ -113,7 +113,7 @@ def _get_findings_json(self, file, test):
if vulnerability.get("cwe_id"):
cwe = vulnerability.get("cwe_id")
if cwe:
- if cwe.startswith("CWE-") or cwe.startswith("cwe-"):
+ if cwe.startswith(("CWE-", "cwe-")):
cwe = cwe[4:]
if cwe.isdigit():
finding.cwe = int(cwe)
@@ -168,7 +168,7 @@ def get_findings_csv(self, file, test):
version = row.get("Version in use", None)
vuln_id = row.get("CVE", None)
if vuln_id and not (
- vuln_id.startswith("cve") or vuln_id.startswith("CVE")
+ vuln_id.startswith(("cve", "CVE"))
):
vuln_id = "CVE-" + vuln_id
diff --git a/dojo/tools/xanitizer/parser.py b/dojo/tools/xanitizer/parser.py
index bed38e19f4..0486967517 100644
--- a/dojo/tools/xanitizer/parser.py
+++ b/dojo/tools/xanitizer/parser.py
@@ -156,7 +156,6 @@ def add_code(self, node, showline, description):
return description
def generate_file_path(self, finding):
- pass
if finding.find("endNode") is not None and finding.find("endNode").get(
"relativePath"
diff --git a/dojo/utils.py b/dojo/utils.py
index 6c0b16bbdf..09bea49ac3 100644
--- a/dojo/utils.py
+++ b/dojo/utils.py
@@ -883,7 +883,7 @@ def get_punchcard_data(objs, start_date, weeks, view='Finding'):
def get_week_data(week_start_date, tick, day_counts):
data = []
- for i in range(0, len(day_counts)):
+ for i in range(len(day_counts)):
data.append([tick, i, day_counts[i]])
label = [tick, week_start_date.strftime("%m/%d
%Y")]
return data, label
@@ -2308,7 +2308,6 @@ def delete_chunk(self, objects, **kwargs):
logger.debug('ASYNC_DELETE: object has already been deleted elsewhere. Skipping')
# The id must be None
# The object has already been deleted elsewhere
- pass
@dojo_async_task
@app.task
diff --git a/ruff.toml b/ruff.toml
index 32ad177291..3174bbcb9a 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -50,6 +50,7 @@ select = [
"LOG",
"INP",
"SLOT",
+ "PIE",
"RSE",
"TID",
"PD",
diff --git a/tests/close_old_findings_dedupe_test.py b/tests/close_old_findings_dedupe_test.py
index 26b39f39e1..cb7db1b836 100644
--- a/tests/close_old_findings_dedupe_test.py
+++ b/tests/close_old_findings_dedupe_test.py
@@ -29,7 +29,7 @@ def setUp(self):
def check_nb_duplicates(self, expected_number_of_duplicates):
logger.debug("checking duplicates...")
driver = self.driver
- for i in range(0, 18):
+ for i in range(18):
time.sleep(5) # wait bit for celery dedupe task which can be slow on travis
self.goto_all_findings_list(driver)
dupe_count = 0
diff --git a/tests/dedupe_test.py b/tests/dedupe_test.py
index 5a58edf064..73214cc06d 100644
--- a/tests/dedupe_test.py
+++ b/tests/dedupe_test.py
@@ -27,7 +27,7 @@ def setUp(self):
def check_nb_duplicates(self, expected_number_of_duplicates):
logger.debug("checking duplicates...")
driver = self.driver
- for i in range(0, 18):
+ for i in range(18):
time.sleep(5) # wait bit for celery dedupe task which can be slow on travis
self.goto_all_findings_list(driver)
dupe_count = 0
From 1502a3c6c63af6931887f55c3279a9fe99b549cf Mon Sep 17 00:00:00 2001
From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com>
Date: Tue, 2 Jul 2024 14:19:52 -0500
Subject: [PATCH 011/111] Revert "Shuffle tests (#10335)" (#10495)
This reverts commit c8e1b09547406a00bcb3a017475ff60630982955.
---
docker/entrypoint-unit-tests-devDocker.sh | 2 +-
docker/entrypoint-unit-tests.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docker/entrypoint-unit-tests-devDocker.sh b/docker/entrypoint-unit-tests-devDocker.sh
index 6872d8668f..c590974b1b 100755
--- a/docker/entrypoint-unit-tests-devDocker.sh
+++ b/docker/entrypoint-unit-tests-devDocker.sh
@@ -53,7 +53,7 @@ EOF
echo "Unit Tests"
echo "------------------------------------------------------------"
-python3 manage.py test unittests -v 3 --keepdb --no-input --shuffle
+python3 manage.py test unittests -v 3 --keepdb --no-input
# you can select a single file to "test" unit tests
# python3 manage.py test unittests.tools.test_npm_audit_scan_parser.TestNpmAuditParser --keepdb -v 3
diff --git a/docker/entrypoint-unit-tests.sh b/docker/entrypoint-unit-tests.sh
index a356283c37..6c45ce489d 100755
--- a/docker/entrypoint-unit-tests.sh
+++ b/docker/entrypoint-unit-tests.sh
@@ -79,4 +79,4 @@ python3 manage.py migrate
echo "Unit Tests"
echo "------------------------------------------------------------"
-python3 manage.py test unittests -v 3 --keepdb --no-input --shuffle
+python3 manage.py test unittests -v 3 --keepdb --no-input
From 24c989af5df48064906b39b64e608189717932bf Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:02:59 -0500
Subject: [PATCH 012/111] Update dependency postcss from 8.4.38 to v8.4.39
(docs/package.json) (#10476)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
docs/package-lock.json | 30 +++++++++++++++---------------
docs/package.json | 2 +-
2 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/docs/package-lock.json b/docs/package-lock.json
index 3493f14261..d29ed4ff30 100644
--- a/docs/package-lock.json
+++ b/docs/package-lock.json
@@ -6,7 +6,7 @@
"": {
"devDependencies": {
"autoprefixer": "10.4.19",
- "postcss": "8.4.38",
+ "postcss": "8.4.39",
"postcss-cli": "11.0.0"
}
},
@@ -585,9 +585,9 @@
}
},
"node_modules/picocolors": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
- "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz",
+ "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==",
"dev": true
},
"node_modules/picomatch": {
@@ -612,9 +612,9 @@
}
},
"node_modules/postcss": {
- "version": "8.4.38",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz",
- "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==",
+ "version": "8.4.39",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.39.tgz",
+ "integrity": "sha512-0vzE+lAiG7hZl1/9I8yzKLx3aR9Xbof3fBHKunvMfOCYAtMhrsnccJY2iTURb9EZd5+pLuiNV9/c/GZJOHsgIw==",
"dev": true,
"funding": [
{
@@ -632,7 +632,7 @@
],
"dependencies": {
"nanoid": "^3.3.7",
- "picocolors": "^1.0.0",
+ "picocolors": "^1.0.1",
"source-map-js": "^1.2.0"
},
"engines": {
@@ -1372,9 +1372,9 @@
"dev": true
},
"picocolors": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
- "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz",
+ "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==",
"dev": true
},
"picomatch": {
@@ -1390,13 +1390,13 @@
"dev": true
},
"postcss": {
- "version": "8.4.38",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz",
- "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==",
+ "version": "8.4.39",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.39.tgz",
+ "integrity": "sha512-0vzE+lAiG7hZl1/9I8yzKLx3aR9Xbof3fBHKunvMfOCYAtMhrsnccJY2iTURb9EZd5+pLuiNV9/c/GZJOHsgIw==",
"dev": true,
"requires": {
"nanoid": "^3.3.7",
- "picocolors": "^1.0.0",
+ "picocolors": "^1.0.1",
"source-map-js": "^1.2.0"
}
},
diff --git a/docs/package.json b/docs/package.json
index e320d75356..a7df81281f 100644
--- a/docs/package.json
+++ b/docs/package.json
@@ -1,6 +1,6 @@
{
"devDependencies": {
- "postcss": "8.4.38",
+ "postcss": "8.4.39",
"autoprefixer": "10.4.19",
"postcss-cli": "11.0.0"
}
From f90805ccc9e3da92650f4a334e1cb34b7d8e500a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 2 Jul 2024 16:27:35 -0500
Subject: [PATCH 013/111] Bump pillow from 10.3.0 to 10.4.0 (#10482)
Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.3.0 to 10.4.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/10.3.0...10.4.0)
---
updated-dependencies:
- dependency-name: pillow
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index b867d11e39..1a50464b22 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -34,7 +34,7 @@ lxml==5.2.2
Markdown==3.6
mysqlclient==2.1.1
openpyxl==3.1.5
-Pillow==10.3.0 # required by django-imagekit
+Pillow==10.4.0 # required by django-imagekit
psycopg2-binary==2.9.9
cryptography==42.0.8
python-dateutil==2.9.0.post0
From 4a5d2a6869f18794a9232ba52a31649f8cf94d88 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 2 Jul 2024 17:13:21 -0500
Subject: [PATCH 014/111] Bump drf-spectacular-sidecar from 2024.6.1 to
2024.7.1 (#10479)
Bumps [drf-spectacular-sidecar](https://github.com/tfranzel/drf-spectacular-sidecar) from 2024.6.1 to 2024.7.1.
- [Commits](https://github.com/tfranzel/drf-spectacular-sidecar/compare/2024.6.1...2024.7.1)
---
updated-dependencies:
- dependency-name: drf-spectacular-sidecar
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 1a50464b22..86cbdc861a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -70,7 +70,7 @@ hyperlink==21.0.0
django-test-migrations==1.4.0
djangosaml2==1.9.3
drf-spectacular==0.27.2
-drf-spectacular-sidecar==2024.6.1
+drf-spectacular-sidecar==2024.7.1
django-ratelimit==4.1.0
argon2-cffi==23.1.0
blackduck==1.1.3
From d52044e4ccce7c93fd8c16f22e395d746db7f6e7 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 2 Jul 2024 17:14:50 -0500
Subject: [PATCH 015/111] Bump asteval from 0.9.33 to 1.0.0 (#10488)
Bumps [asteval](https://github.com/lmfit/asteval) from 0.9.33 to 1.0.0.
- [Release notes](https://github.com/lmfit/asteval/releases)
- [Commits](https://github.com/lmfit/asteval/compare/0.9.33...1.0.0)
---
updated-dependencies:
- dependency-name: asteval
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 86cbdc861a..4150fd372f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
# requirements.txt for DefectDojo using Python 3.x
-asteval==0.9.33
+asteval==1.0.0
bleach==6.1.0
bleach[css]
celery==5.4.0
From 39d6963c6842658e690985f5a42a66d5b7243a15 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 2 Jul 2024 17:15:31 -0500
Subject: [PATCH 016/111] Bump boto3 from 1.34.136 to 1.34.137 (#10489)
Bumps [boto3](https://github.com/boto/boto3) from 1.34.136 to 1.34.137.
- [Release notes](https://github.com/boto/boto3/releases)
- [Commits](https://github.com/boto/boto3/compare/1.34.136...1.34.137)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 4150fd372f..15799fbde8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -75,7 +75,7 @@ django-ratelimit==4.1.0
argon2-cffi==23.1.0
blackduck==1.1.3
pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support
-boto3==1.34.136 # Required for Celery Broker AWS (SQS) support
+boto3==1.34.137 # Required for Celery Broker AWS (SQS) support
netaddr==1.3.0
vulners==2.1.7
fontawesomefree==6.5.1
From 6fdd46d35f88975f082316be0385f53c2a7009bc Mon Sep 17 00:00:00 2001
From: kiblik <5609770+kiblik@users.noreply.github.com>
Date: Wed, 3 Jul 2024 00:35:09 +0200
Subject: [PATCH 017/111] Ruff: add and fix Q (except Q000) (#10094)
* Ruff: add Q001
* Ruff: fix Q001
* Ruff: add Q002
* Ruff: fix Q002
* Ruff: add Q003
* Ruff: fix Q003
* Ruff: add Q004
* Ruff: fix Q004
---
dojo/api_v2/permissions.py | 28 ++++++------
dojo/filters.py | 4 +-
dojo/finding/views.py | 2 +-
dojo/forms.py | 4 +-
dojo/importers/base_importer.py | 2 +-
dojo/importers/default_reimporter.py | 2 +-
dojo/jira_link/views.py | 2 +-
dojo/management/commands/dedupe.py | 2 +-
dojo/models.py | 32 +++++++-------
dojo/product/views.py | 4 +-
dojo/search/views.py | 4 +-
dojo/sla_config/views.py | 2 +-
dojo/survey/urls.py | 4 +-
dojo/templatetags/display_tags.py | 2 +-
dojo/templatetags/survey_tags.py | 4 +-
dojo/tools/api_sonarqube/api_client.py | 2 +-
dojo/tools/npm_audit_7_plus/parser.py | 4 +-
dojo/user/validators.py | 2 +-
dojo/utils.py | 8 ++--
ruff.toml | 1 +
tests/notes_test.py | 4 +-
unittests/test_apiv2_endpoint.py | 2 +-
unittests/test_finding_model.py | 34 +++++++-------
unittests/test_user_validators.py | 2 +-
unittests/tools/test_anchore_grype_parser.py | 44 +++++++++----------
..._security_center_recommendations_parser.py | 12 ++---
unittests/tools/test_gitleaks_parser.py | 8 ++--
unittests/tools/test_jfrogxray_parser.py | 8 ++--
unittests/tools/test_kics_parser.py | 8 ++--
.../tools/test_mozilla_observatory_parser.py | 4 +-
unittests/tools/test_nexpose_parser.py | 2 +-
unittests/tools/test_nikto_parser.py | 2 +-
unittests/tools/test_qualys_parser.py | 2 +-
unittests/tools/test_semgrep_parser.py | 10 ++---
unittests/tools/test_sslyze_parser.py | 12 ++---
unittests/tools/test_trivy_parser.py | 28 ++++++------
unittests/tools/test_trufflehog3_parser.py | 8 ++--
37 files changed, 153 insertions(+), 152 deletions(-)
diff --git a/dojo/api_v2/permissions.py b/dojo/api_v2/permissions.py
index bf999ac635..10991bb3e5 100644
--- a/dojo/api_v2/permissions.py
+++ b/dojo/api_v2/permissions.py
@@ -437,7 +437,7 @@ def has_permission(self, request, view):
)
elif engagement_id := converted_dict.get("engagement_id"):
# engagement_id doesn't exist
- msg = f"Engagement \"{engagement_id}\" does not exist"
+ msg = f'Engagement "{engagement_id}" does not exist'
raise serializers.ValidationError(msg)
if not converted_dict.get("auto_create_context"):
@@ -492,7 +492,7 @@ def has_permission(self, request, view):
)
elif product_id := converted_dict.get("product_id"):
# product_id doesn't exist
- msg = f"Product \"{product_id}\" does not exist"
+ msg = f'Product "{product_id}" does not exist'
raise serializers.ValidationError(msg)
else:
msg = "Need product_id or product_name to perform import"
@@ -633,7 +633,7 @@ def has_permission(self, request, view):
)
elif test_id := converted_dict.get("test_id"):
# test_id doesn't exist
- msg = f"Test \"{test_id}\" does not exist"
+ msg = f'Test "{test_id}" does not exist'
raise serializers.ValidationError(msg)
if not converted_dict.get("auto_create_context"):
@@ -927,28 +927,28 @@ def raise_no_auto_create_import_validation_error(
raise ValidationError(msg)
if product_type_name and not product_type:
- msg = f"Product Type \"{product_type_name}\" does not exist"
+ msg = f'Product Type "{product_type_name}" does not exist'
raise serializers.ValidationError(msg)
if product_name and not product:
if product_type_name:
- msg = f"Product \"{product_name}\" does not exist in Product_Type \"{product_type_name}\""
+ msg = f'Product "{product_name}" does not exist in Product_Type "{product_type_name}"'
raise serializers.ValidationError(msg)
else:
- msg = f"Product \"{product_name}\" does not exist"
+ msg = f'Product "{product_name}" does not exist'
raise serializers.ValidationError(msg)
if engagement_name and not engagement:
- msg = f"Engagement \"{engagement_name}\" does not exist in Product \"{product_name}\""
+ msg = f'Engagement "{engagement_name}" does not exist in Product "{product_name}"'
raise serializers.ValidationError(msg)
# these are only set for reimport
if test_title:
- msg = f"Test \"{test_title}\" with scan_type \"{scan_type}\" does not exist in Engagement \"{engagement_name}\""
+ msg = f'Test "{test_title}" with scan_type "{scan_type}" does not exist in Engagement "{engagement_name}"'
raise serializers.ValidationError(msg)
if scan_type:
- msg = f"Test with scan_type \"{scan_type}\" does not exist in Engagement \"{engagement_name}\""
+ msg = f'Test with scan_type "{scan_type}" does not exist in Engagement "{engagement_name}"'
raise serializers.ValidationError(msg)
raise ValidationError(error_message)
@@ -995,13 +995,13 @@ def check_auto_create_permission(
if product and product_name and engagement_name:
if not user_has_permission(user, product, Permissions.Engagement_Add):
- msg = f"No permission to create engagements in product \"{product_name}\""
+ msg = f'No permission to create engagements in product "{product_name}"'
raise PermissionDenied(msg)
if not user_has_permission(
user, product, Permissions.Import_Scan_Result
):
- msg = f"No permission to import scans into product \"{product_name}\""
+ msg = f'No permission to import scans into product "{product_name}"'
raise PermissionDenied(msg)
# all good
@@ -1009,14 +1009,14 @@ def check_auto_create_permission(
if not product and product_name:
if not product_type_name:
- msg = f"Product \"{product_name}\" does not exist and no product_type_name provided to create the new product in"
+ msg = f'Product "{product_name}" does not exist and no product_type_name provided to create the new product in'
raise serializers.ValidationError(msg)
if not product_type:
if not user_has_global_permission(
user, Permissions.Product_Type_Add
):
- msg = f"No permission to create product_type \"{product_type_name}\""
+ msg = f'No permission to create product_type "{product_type_name}"'
raise PermissionDenied(msg)
# new product type can be created with current user as owner, so
# all objects in it can be created as well
@@ -1025,7 +1025,7 @@ def check_auto_create_permission(
if not user_has_permission(
user, product_type, Permissions.Product_Type_Add_Product
):
- msg = f"No permission to create products in product_type \"{product_type}\""
+ msg = f'No permission to create products in product_type "{product_type}"'
raise PermissionDenied(msg)
# product can be created, so objects in it can be created as well
diff --git a/dojo/filters.py b/dojo/filters.py
index 5eeced2f11..76c65b92a6 100644
--- a/dojo/filters.py
+++ b/dojo/filters.py
@@ -1615,7 +1615,7 @@ class FindingFilterHelper(FilterSet):
"The range of EPSS score percentages to filter on; the left input is a lower bound, "
"the right is an upper bound. Leaving one empty will skip that bound (e.g., leaving "
"the lower bound input empty will filter only on the upper bound -- filtering on "
- "\"less than or equal\")."
+ '"less than or equal").'
))
epss_percentile = PercentageFilter(field_name="epss_percentile", label="EPSS percentile")
epss_percentile_range = PercentageRangeFilter(
@@ -1624,7 +1624,7 @@ class FindingFilterHelper(FilterSet):
help_text=(
"The range of EPSS percentiles to filter on; the left input is a lower bound, the right "
"is an upper bound. Leaving one empty will skip that bound (e.g., leaving the lower bound "
- "input empty will filter only on the upper bound -- filtering on \"less than or equal\")."
+ 'input empty will filter only on the upper bound -- filtering on "less than or equal").'
))
o = OrderingFilter(
diff --git a/dojo/finding/views.py b/dojo/finding/views.py
index f7624c996c..c84154804a 100644
--- a/dojo/finding/views.py
+++ b/dojo/finding/views.py
@@ -1709,7 +1709,7 @@ def request_finding_review(request, fid):
finding=finding,
reviewers=reviewers,
recipients=reviewers_usernames,
- description=f"User {user.get_full_name()} has requested that user(s) {reviewers_string} review the finding \"{finding.title}\" for accuracy:\n\n{new_note}",
+ description=f'User {user.get_full_name()} has requested that user(s) {reviewers_string} review the finding "{finding.title}" for accuracy:\n\n{new_note}',
icon="check",
url=reverse("view_finding", args=(finding.id,)),
)
diff --git a/dojo/forms.py b/dojo/forms.py
index 9d91955847..734d97586a 100644
--- a/dojo/forms.py
+++ b/dojo/forms.py
@@ -3155,8 +3155,8 @@ def __init__(self, *args, **kwargs):
# Show in admin a multichoice list of validator names
# pass this to form using field_name='validator_name' ?
class QuestionForm(forms.Form):
- ''' Base class for a Question
- '''
+ """ Base class for a Question
+ """
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py
index 9eee547b9c..449a9074b8 100644
--- a/dojo/importers/base_importer.py
+++ b/dojo/importers/base_importer.py
@@ -567,7 +567,7 @@ def sanitize_severity(
# Ensure the final severity is one of the supported options
if finding.severity not in SEVERITIES:
msg = (
- f"Finding severity \"{finding.severity}\" is not supported. "
+ f'Finding severity "{finding.severity}" is not supported. '
f"Any of the following are supported: {SEVERITIES}."
)
raise ValidationError(msg)
diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py
index f2e8b86e51..0c930d9df7 100644
--- a/dojo/importers/default_reimporter.py
+++ b/dojo/importers/default_reimporter.py
@@ -399,7 +399,7 @@ def match_new_finding_to_existing_finding(
severity=unsaved_finding.severity,
numerical_severity=Finding.get_numerical_severity(unsaved_finding.severity)).order_by('id')
else:
- logger.error(f"Internal error: unexpected deduplication_algorithm: \"{self.deduplication_algorithm}\"")
+ logger.error(f'Internal error: unexpected deduplication_algorithm: "{self.deduplication_algorithm}"')
return None
def process_matched_finding(
diff --git a/dojo/jira_link/views.py b/dojo/jira_link/views.py
index 6ae03f8d4c..e0c43884c4 100644
--- a/dojo/jira_link/views.py
+++ b/dojo/jira_link/views.py
@@ -544,7 +544,7 @@ def post(self, request, tid=None):
create_notification(
event='jira_config_deleted',
title=_('Deletion of JIRA: %s') % jira_instance.configuration_name,
- description=f"JIRA \"{jira_instance.configuration_name}\" was deleted by {request.user}",
+ description=f'JIRA "{jira_instance.configuration_name}" was deleted by {request.user}',
url=request.build_absolute_uri(reverse('jira')))
return HttpResponseRedirect(reverse('jira'))
except Exception as e:
diff --git a/dojo/management/commands/dedupe.py b/dojo/management/commands/dedupe.py
index 928b4d3144..1e77c82c9a 100644
--- a/dojo/management/commands/dedupe.py
+++ b/dojo/management/commands/dedupe.py
@@ -38,7 +38,7 @@ def add_arguments(self, parser):
'--parser',
dest='parser',
action='append',
- help='''List of parsers for which hash_code needs recomputing (defaults to all parsers)'''
+ help="""List of parsers for which hash_code needs recomputing (defaults to all parsers)"""
)
parser.add_argument('--hash_code_only', action='store_true', help='Only compute hash codes')
diff --git a/dojo/models.py b/dojo/models.py
index 040ffbe3dd..364f714b4a 100644
--- a/dojo/models.py
+++ b/dojo/models.py
@@ -553,7 +553,7 @@ class System_Settings(models.Model):
default=True,
blank=False,
verbose_name=_("Password must contain one special character"),
- help_text=_("Requires user passwords to contain at least one special character (()[]{}|\\`~!@#$%^&*_-+=;:\'\",<>./?)."))
+ help_text=_("Requires user passwords to contain at least one special character (()[]{}|\\`~!@#$%^&*_-+=;:'\",<>./?)."))
lowercase_character_required = models.BooleanField(
default=True,
blank=False,
@@ -3908,7 +3908,7 @@ class JIRA_Project(models.Model):
engagement = models.OneToOneField(Engagement, on_delete=models.CASCADE, null=True, blank=True)
component = models.CharField(max_length=200, blank=True)
custom_fields = models.JSONField(max_length=200, blank=True, null=True,
- help_text=_("JIRA custom field JSON mapping of Id to value, e.g. {\"customfield_10122\": [{\"name\": \"8.0.1\"}]}"))
+ help_text=_('JIRA custom field JSON mapping of Id to value, e.g. {"customfield_10122": [{"name": "8.0.1"}]}'))
default_assignee = models.CharField(max_length=200, blank=True, null=True,
help_text=_("JIRA default assignee (name). If left blank then it defaults to whatever is configured in JIRA."))
jira_labels = models.CharField(max_length=200, blank=True, null=True,
@@ -4384,9 +4384,9 @@ def __str__(self):
# ==============================
with warnings.catch_warnings(action="ignore", category=ManagerInheritanceWarning):
class Question(PolymorphicModel, TimeStampedModel):
- '''
+ """
Represents a question.
- '''
+ """
class Meta:
ordering = ['order']
@@ -4407,23 +4407,23 @@ def __str__(self):
class TextQuestion(Question):
- '''
+ """
Question with a text answer
- '''
+ """
objects = PolymorphicManager()
def get_form(self):
- '''
+ """
Returns the form for this model
- '''
+ """
from .forms import TextQuestionForm
return TextQuestionForm
class Choice(TimeStampedModel):
- '''
+ """
Model to store the choices for multi choice questions
- '''
+ """
order = models.PositiveIntegerField(default=1)
@@ -4437,10 +4437,10 @@ def __str__(self):
class ChoiceQuestion(Question):
- '''
+ """
Question with answers that are chosen from a list of choices defined
by the user.
- '''
+ """
multichoice = models.BooleanField(default=False,
help_text=_("Select one or more"))
@@ -4448,9 +4448,9 @@ class ChoiceQuestion(Question):
objects = PolymorphicManager()
def get_form(self):
- '''
+ """
Returns the form for this model
- '''
+ """
from .forms import ChoiceQuestionForm
return ChoiceQuestionForm
@@ -4516,8 +4516,8 @@ def __str__(self):
with warnings.catch_warnings(action="ignore", category=ManagerInheritanceWarning):
class Answer(PolymorphicModel, TimeStampedModel):
- ''' Base Answer model
- '''
+ """ Base Answer model
+ """
question = models.ForeignKey(Question, on_delete=models.CASCADE)
answered_survey = models.ForeignKey(Answered_Survey,
diff --git a/dojo/product/views.py b/dojo/product/views.py
index 47d984c833..c3afce1524 100644
--- a/dojo/product/views.py
+++ b/dojo/product/views.py
@@ -580,11 +580,11 @@ def view_product_metrics(request, pid):
closed_findings = list(filters.get("closed", []).values('id', 'date', 'severity'))
accepted_findings = list(filters.get("accepted", []).values('id', 'date', 'severity'))
- '''
+ """
Optimization: Create dictionaries in the structure of { finding_id: True } for index based search
Previously the for-loop below used "if finding in open_findings" -- an average O(n^2) time complexity
This allows for "if open_findings.get(finding_id, None)" -- an average O(n) time complexity
- '''
+ """
open_findings_dict = {f.get('id'): True for f in open_findings}
closed_findings_dict = {f.get('id'): True for f in closed_findings}
accepted_findings_dict = {f.get('id'): True for f in accepted_findings}
diff --git a/dojo/search/views.py b/dojo/search/views.py
index 9867dfc62f..6ae591063c 100644
--- a/dojo/search/views.py
+++ b/dojo/search/views.py
@@ -364,7 +364,7 @@ def simple_search(request):
response.delete_cookie("highlight", path='/')
return response
- '''
+ """
query: some keywords
operators: {}
keywords: ['some', 'keywords']
@@ -400,7 +400,7 @@ def simple_search(request):
query: tags:anchore vulnerability_id:CVE-2020-1234 jquery
operators: {'tags': ['anchore'], 'vulnerability_id': ['CVE-2020-1234']}
keywords: ['jquery']
- '''
+ """
# it's not google grade parsing, but let's do some basic stuff right
diff --git a/dojo/sla_config/views.py b/dojo/sla_config/views.py
index a333946e39..da0c6b6a28 100644
--- a/dojo/sla_config/views.py
+++ b/dojo/sla_config/views.py
@@ -42,7 +42,7 @@ def edit_sla_config(request, slaid):
if request.method == 'POST' and request.POST.get('delete'):
if sla_config.id != 1:
if Product.objects.filter(sla_configuration=sla_config).count():
- msg = f"The \"{sla_config}\" SLA configuration could not be deleted, as it is currently in use by one or more products."
+ msg = f'The "{sla_config}" SLA configuration could not be deleted, as it is currently in use by one or more products.'
messages.add_message(request,
messages.ERROR,
msg,
diff --git a/dojo/survey/urls.py b/dojo/survey/urls.py
index 8880bc3bcc..2286b83226 100644
--- a/dojo/survey/urls.py
+++ b/dojo/survey/urls.py
@@ -1,8 +1,8 @@
-'''
+"""
Created on Feb 18, 2015
@author: jay7958
-'''
+"""
from django.apps import apps
from django.contrib import admin
from django.urls import re_path
diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py
index d7df4559bd..514cc685df 100644
--- a/dojo/templatetags/display_tags.py
+++ b/dojo/templatetags/display_tags.py
@@ -378,7 +378,7 @@ def notspecified(text):
if text:
return text
else:
- return mark_safe("Not Specified")
+ return mark_safe('Not Specified')
@register.tag
diff --git a/dojo/templatetags/survey_tags.py b/dojo/templatetags/survey_tags.py
index c60edc60e1..d08ce902bb 100644
--- a/dojo/templatetags/survey_tags.py
+++ b/dojo/templatetags/survey_tags.py
@@ -1,8 +1,8 @@
-'''
+"""
Created on Feb 18, 2015
@author: jay7958
-'''
+"""
from django import template
from dojo.models import Answered_Survey, Engagement_Survey
diff --git a/dojo/tools/api_sonarqube/api_client.py b/dojo/tools/api_sonarqube/api_client.py
index 1e26ebad9d..09a983d744 100644
--- a/dojo/tools/api_sonarqube/api_client.py
+++ b/dojo/tools/api_sonarqube/api_client.py
@@ -265,7 +265,7 @@ def get_issue(self, issue_key):
if issue["key"] == issue_key:
return issue
msg = (
- f"Expected Issue \"{issue_key}\", but it returned"
+ f'Expected Issue "{issue_key}", but it returned'
f"{[x.get('key') for x in response.json().get('issues')]}. "
"Full response: "
f"{response.json()}"
diff --git a/dojo/tools/npm_audit_7_plus/parser.py b/dojo/tools/npm_audit_7_plus/parser.py
index 89c4a8575f..77d3b77c0f 100644
--- a/dojo/tools/npm_audit_7_plus/parser.py
+++ b/dojo/tools/npm_audit_7_plus/parser.py
@@ -6,7 +6,7 @@
logger = logging.getLogger(__name__)
-'''
+"""
the npm audit json output depends on the params used. this parser
accepts the formats for any of:
@@ -18,7 +18,7 @@
as the report's meta block indicates, all top level keys
are consiered a vulnerability and as much information as provided
is added to each
-'''
+"""
class NpmAudit7PlusParser:
diff --git a/dojo/user/validators.py b/dojo/user/validators.py
index b84f171797..17e35c781b 100644
--- a/dojo/user/validators.py
+++ b/dojo/user/validators.py
@@ -86,7 +86,7 @@ def validate(self, password, user=None):
def get_help_text(self):
return gettext('The password must contain at least 1 special character, '
- + '''()[]{}|`~!@#$%^&*_-+=;:'",<>./?.''')
+ + """()[]{}|`~!@#$%^&*_-+=;:'",<>./?.""")
class DojoCommonPasswordValidator(CommonPasswordValidator):
diff --git a/dojo/utils.py b/dojo/utils.py
index 09bea49ac3..d66c538529 100644
--- a/dojo/utils.py
+++ b/dojo/utils.py
@@ -2059,11 +2059,11 @@ def get_current_request():
def create_bleached_link(url, title):
- link = ''
+ link += '">'
link += title
link += ''
return bleach.clean(link, tags={'a'}, attributes={'a': ['href', 'target', 'title']})
@@ -2398,7 +2398,7 @@ def get_password_requirements_string():
if bool(get_system_setting('number_character_required')):
s += ', one number (0-9)'
if bool(get_system_setting('special_character_required')):
- s += ', one special character (()[]{}|\\`~!@#$%^&*_-+=;:\'\",<>./?)'
+ s += ', one special character (()[]{}|\\`~!@#$%^&*_-+=;:\'",<>./?)'
if s.count(', ') == 1:
password_requirements_string = s.rsplit(', ', 1)[0] + ' and ' + s.rsplit(', ', 1)[1]
diff --git a/ruff.toml b/ruff.toml
index 3174bbcb9a..ed814e15f9 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -51,6 +51,7 @@ select = [
"INP",
"SLOT",
"PIE",
+ "Q001", "Q002", "Q003", "Q004",
"RSE",
"TID",
"PD",
diff --git a/tests/notes_test.py b/tests/notes_test.py
index ff2619d6ef..77546d3349 100644
--- a/tests/notes_test.py
+++ b/tests/notes_test.py
@@ -6,10 +6,10 @@
from product_test import ProductTest
from selenium.webdriver.common.by import By
-'''
+"""
Tests Notes functionality on all levels (Engagement, Test, and Finding)
Private and public notes are tested
-'''
+"""
class NoteTest(BaseTestCase):
diff --git a/unittests/test_apiv2_endpoint.py b/unittests/test_apiv2_endpoint.py
index 26342298d4..e197fb6eec 100644
--- a/unittests/test_apiv2_endpoint.py
+++ b/unittests/test_apiv2_endpoint.py
@@ -19,7 +19,7 @@ def test_endpoint_missing_host_product(self):
"host": "FOO.BAR"
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
- self.assertIn("Attribute \'product\' is required", r.content.decode("utf-8"))
+ self.assertIn("Attribute 'product' is required", r.content.decode("utf-8"))
r = self.client.post(reverse('endpoint-list'), {
"product": 1
diff --git a/unittests/test_finding_model.py b/unittests/test_finding_model.py
index 1a2fb4e2a2..7d93832921 100644
--- a/unittests/test_finding_model.py
+++ b/unittests/test_finding_model.py
@@ -30,7 +30,7 @@ def test_get_sast_source_file_path_with_link_and_source_code_management_uri(self
finding.test = test
finding.sast_source_file_path = 'SastSourceFilePath'
engagement.source_code_management_uri = 'URL'
- self.assertEqual('SastSourceFilePath', finding.get_sast_source_file_path_with_link())
+ self.assertEqual('SastSourceFilePath', finding.get_sast_source_file_path_with_link())
def test_get_file_path_with_link_no_file_path(self):
finding = Finding()
@@ -53,7 +53,7 @@ def test_get_file_path_with_link_and_source_code_management_uri(self):
finding.test = test
finding.file_path = 'FilePath'
engagement.source_code_management_uri = 'URL'
- self.assertEqual('FilePath', finding.get_file_path_with_link())
+ self.assertEqual('FilePath', finding.get_file_path_with_link())
def test_get_file_path_with_link_and_source_code_management_uri_github_no_scm_type_with_details_and_line(self):
# checks that for github.com in uri dojo makes correct url to browse on github
@@ -68,7 +68,7 @@ def test_get_file_path_with_link_and_source_code_management_uri_github_no_scm_ty
finding.file_path = 'some-folder/some-file.ext'
finding.line = 5432
engagement.source_code_management_uri = 'https://github.com/some-test-account/some-test-repo'
- self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
+ self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
def test_get_file_path_with_link_and_source_code_management_uri_github_with_scm_type_with_details_and_line(self):
# checks that for github in custom field dojo makes correct url to browse on github
@@ -92,7 +92,7 @@ def test_get_file_path_with_link_and_source_code_management_uri_github_with_scm_
finding.line = 5432
engagement.source_code_management_uri = 'https://github.com/some-test-account/some-test-repo'
- self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
+ self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public_project_with_no_details_and_line(self):
# checks that for public bitbucket (bitbucket.org) in custom field
@@ -115,7 +115,7 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public
finding.line = 5432
engagement.source_code_management_uri = 'https://bb.example.com/some-test-user/some-test-repo.git'
- self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
+ self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public_project_with_commithash_and_line(self):
# checks that for public bitbucket (bitbucket.org) in custom field and existing commit hash in finding
@@ -139,7 +139,7 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public
finding.line = 5432
engagement.source_code_management_uri = 'https://bb.example.com/some-test-user/some-test-repo.git'
- self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
+ self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standalone_project_with_commithash_and_line(self):
# checks that for standalone bitbucket in custom field and existing commit hash in finding
@@ -163,7 +163,7 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standa
finding.line = 5432
engagement.source_code_management_uri = 'https://bb.example.com/scm/some-test-project/some-test-repo.git'
- self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
+ self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standalone_project_with_branchtag_and_line(self):
# checks that for standalone bitbucket in custom field and existing branch/tag in finding
@@ -187,7 +187,7 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standa
finding.line = 5432
engagement.source_code_management_uri = 'https://bb.example.com/scm/some-test-project/some-test-repo.git'
- self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
+ self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standalone_user_with_branchtag_and_line(self):
# checks that for standalone bitbucket in custom field and existing branch/tag in finding
@@ -212,7 +212,7 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standa
engagement.source_code_management_uri = 'https://bb.example.com/scm/~some-user/some-test-repo.git'
- self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
+ self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
def test_get_file_path_with_link_and_source_code_management_uri_gitea_or_codeberg_project_with_no_details_and_line(self):
# checks that for gitea and codeberg in custom field
@@ -235,7 +235,7 @@ def test_get_file_path_with_link_and_source_code_management_uri_gitea_or_codeber
finding.line = 5432
engagement.source_code_management_uri = 'https://bb.example.com/some-test-user/some-test-repo.git'
- self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
+ self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
def test_get_file_path_with_link_and_source_code_management_uri_gitea_or_codeberg_project_with_commithash_and_line(self):
# checks that for gitea and codeberg in custom field and existing commit hash in finding
@@ -259,7 +259,7 @@ def test_get_file_path_with_link_and_source_code_management_uri_gitea_or_codeber
finding.line = 5432
engagement.source_code_management_uri = 'https://bb.example.com/some-test-user/some-test-repo.git'
- self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
+ self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link())
def test_get_file_path_with_xss_attack(self):
test = Test()
@@ -283,32 +283,32 @@ def test_get_references_with_links_no_links(self):
def test_get_references_with_links_simple_url(self):
finding = Finding()
finding.references = 'URL: https://www.example.com'
- self.assertEqual('URL: https://www.example.com', finding.get_references_with_links())
+ self.assertEqual('URL: https://www.example.com', finding.get_references_with_links())
def test_get_references_with_links_url_with_port(self):
finding = Finding()
finding.references = 'http://www.example.com:8080'
- self.assertEqual('http://www.example.com:8080', finding.get_references_with_links())
+ self.assertEqual('http://www.example.com:8080', finding.get_references_with_links())
def test_get_references_with_links_url_with_path(self):
finding = Finding()
finding.references = 'URL https://www.example.com/path/part2 behind URL'
- self.assertEqual('URL https://www.example.com/path/part2 behind URL', finding.get_references_with_links())
+ self.assertEqual('URL https://www.example.com/path/part2 behind URL', finding.get_references_with_links())
def test_get_references_with_links_complicated_url_with_parameter(self):
finding = Finding()
finding.references = 'URL:https://www.example.com/path?param1=abc&_param2=xyz'
- self.assertEqual('URL:https://www.example.com/path?param1=abc&_param2=xyz', finding.get_references_with_links())
+ self.assertEqual('URL:https://www.example.com/path?param1=abc&_param2=xyz', finding.get_references_with_links())
def test_get_references_with_links_two_urls(self):
finding = Finding()
finding.references = 'URL1: https://www.example.com URL2: https://info.example.com'
- self.assertEqual('URL1: https://www.example.com URL2: https://info.example.com', finding.get_references_with_links())
+ self.assertEqual('URL1: https://www.example.com URL2: https://info.example.com', finding.get_references_with_links())
def test_get_references_with_links_linebreak(self):
finding = Finding()
finding.references = 'https://www.example.com\nhttps://info.example.com'
- self.assertEqual('https://www.example.com\nhttps://info.example.com', finding.get_references_with_links())
+ self.assertEqual('https://www.example.com\nhttps://info.example.com', finding.get_references_with_links())
def test_get_references_with_links_markdown(self):
finding = Finding()
diff --git a/unittests/test_user_validators.py b/unittests/test_user_validators.py
index 4751cacc87..265a529a45 100644
--- a/unittests/test_user_validators.py
+++ b/unittests/test_user_validators.py
@@ -102,7 +102,7 @@ def test_validator_special_character_required(self):
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['new_password'][0],
- '''The password must contain at least 1 special character, ()[]{}|`~!@#$%^&*_-+=;:'",<>./?.''')
+ """The password must contain at least 1 special character, ()[]{}|`~!@#$%^&*_-+=;:'",<>./?.""")
def test_validator_lowercase_character_required(self):
with self.subTest(policy='lowercase_character_required=False'):
diff --git a/unittests/tools/test_anchore_grype_parser.py b/unittests/tools/test_anchore_grype_parser.py
index f20b950913..4112837dca 100644
--- a/unittests/tools/test_anchore_grype_parser.py
+++ b/unittests/tools/test_anchore_grype_parser.py
@@ -104,10 +104,10 @@ def test_check_all_fields(self):
finding = findings[0]
self.assertEqual('CVE-2004-0971 in libgssapi-krb5-2:1.17-3+deb10u3', finding.title)
- description = '''**Vulnerability Namespace:** debian:10
+ description = """**Vulnerability Namespace:** debian:10
**Related Vulnerability Description:** The krb5-send-pr script in the kerberos5 (krb5) package in Trustix Secure Linux 1.5 through 2.1, and possibly other operating systems, allows local users to overwrite files via a symlink attack on temporary files.
**Matcher:** dpkg-matcher
-**Package URL:** pkg:deb/debian/libgssapi-krb5-2@1.17-3+deb10u3?arch=amd64'''
+**Package URL:** pkg:deb/debian/libgssapi-krb5-2@1.17-3+deb10u3?arch=amd64"""
self.assertEqual(description, finding.description)
vulnerability_ids = finding.unsaved_vulnerability_ids
self.assertEqual(2, len(vulnerability_ids))
@@ -118,7 +118,7 @@ def test_check_all_fields(self):
self.assertIsNone(finding.cvssv3_score)
self.assertEqual('Info', finding.severity)
self.assertIsNone(finding.mitigation)
- references = '''**Vulnerability Datasource:** https://security-tracker.debian.org/tracker/CVE-2004-0971
+ references = """**Vulnerability Datasource:** https://security-tracker.debian.org/tracker/CVE-2004-0971
**Related Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2004-0971
**Related Vulnerability URLs:**
- http://www.securityfocus.com/bid/11289
@@ -128,7 +128,7 @@ def test_check_all_fields(self):
- http://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=136304
- https://exchange.xforce.ibmcloud.com/vulnerabilities/17583
- https://oval.cisecurity.org/repository/search/definition/oval%3Aorg.mitre.oval%3Adef%3A10497
-- https://lists.apache.org/thread.html/rc713534b10f9daeee2e0990239fa407e2118e4aa9e88a7041177497c@%3Cissues.guacamole.apache.org%3E'''
+- https://lists.apache.org/thread.html/rc713534b10f9daeee2e0990239fa407e2118e4aa9e88a7041177497c@%3Cissues.guacamole.apache.org%3E"""
self.assertEqual(references, finding.references)
self.assertEqual('libgssapi-krb5-2', finding.component_name)
self.assertEqual('1.17-3+deb10u3', finding.component_version)
@@ -138,12 +138,12 @@ def test_check_all_fields(self):
finding = findings[1]
self.assertEqual('CVE-2021-32626 in redis:4.0.2', finding.title)
- description = '''**Vulnerability Namespace:** nvd
+ description = """**Vulnerability Namespace:** nvd
**Vulnerability Description:** Redis is an open source, in-memory database that persists on disk. In affected versions specially crafted Lua scripts executing in Redis can cause the heap-based Lua stack to be overflowed, due to incomplete checks for this condition. This can result with heap corruption and potentially remote code execution. This problem exists in all versions of Redis with Lua scripting support, starting from 2.6. The problem is fixed in versions 6.2.6, 6.0.16 and 5.0.14. For users unable to update an additional workaround to mitigate the problem without patching the redis-server executable is to prevent users from executing Lua scripts. This can be done using ACL to restrict EVAL and EVALSHA commands.
**Matchers:**
- python-matcher
- python2-matcher
-**Package URL:** pkg:pypi/redis@4.0.2'''
+**Package URL:** pkg:pypi/redis@4.0.2"""
self.assertEqual(description, finding.description)
vulnerability_ids = finding.unsaved_vulnerability_ids
self.assertEqual(1, len(vulnerability_ids))
@@ -151,11 +151,11 @@ def test_check_all_fields(self):
self.assertEqual(1352, finding.cwe)
self.assertEqual('CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H', finding.cvssv3)
self.assertEqual('High', finding.severity)
- mitigation = '''Upgrade to version:
+ mitigation = """Upgrade to version:
- fix_1
-- fix_2'''
+- fix_2"""
self.assertEqual(mitigation, finding.mitigation)
- references = '''**Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2021-32626
+ references = """**Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2021-32626
**Vulnerability URLs:**
- https://github.com/redis/redis/commit/666ed7facf4524bf6d19b11b20faa2cf93fdf591
- https://github.com/redis/redis/security/advisories/GHSA-p486-xggp-782c
@@ -164,7 +164,7 @@ def test_check_all_fields(self):
- https://lists.apache.org/thread.html/r75490c61c2cb7b6ae2c81238fd52ae13636c60435abcd732d41531a0@%3Ccommits.druid.apache.org%3E
- https://security.netapp.com/advisory/ntap-20211104-0003/
- https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/WR5WKJWXD4D6S3DJCZ56V74ESLTDQRAB/
-- https://www.debian.org/security/2021/dsa-5001'''
+- https://www.debian.org/security/2021/dsa-5001"""
self.assertEqual(references, finding.references)
self.assertEqual('redis', finding.component_name)
self.assertEqual('4.0.2', finding.component_version)
@@ -174,10 +174,10 @@ def test_check_all_fields(self):
finding = findings[2]
self.assertEqual('CVE-2021-33574 in libc-bin:2.28-10', finding.title)
- description = '''**Vulnerability Namespace:** debian:10
+ description = """**Vulnerability Namespace:** debian:10
**Related Vulnerability Description:** The mq_notify function in the GNU C Library (aka glibc) versions 2.32 and 2.33 has a use-after-free. It may use the notification thread attributes object (passed through its struct sigevent parameter) after it has been freed by the caller, leading to a denial of service (application crash) or possibly unspecified other impact.
**Matcher:** dpkg-matcher
-**Package URL:** pkg:deb/debian/libc-bin@2.28-10?arch=amd64'''
+**Package URL:** pkg:deb/debian/libc-bin@2.28-10?arch=amd64"""
self.assertEqual(description, finding.description)
vulnerability_ids = finding.unsaved_vulnerability_ids
self.assertEqual(2, len(vulnerability_ids))
@@ -187,7 +187,7 @@ def test_check_all_fields(self):
self.assertEqual('CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H', finding.cvssv3)
self.assertEqual('Critical', finding.severity)
self.assertIsNone(finding.mitigation)
- references = '''**Vulnerability Datasource:** https://security-tracker.debian.org/tracker/CVE-2021-33574
+ references = """**Vulnerability Datasource:** https://security-tracker.debian.org/tracker/CVE-2021-33574
**Related Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2021-33574
**Related Vulnerability URLs:**
- https://sourceware.org/bugzilla/show_bug.cgi?id=27896
@@ -195,7 +195,7 @@ def test_check_all_fields(self):
- https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/RBUUWUGXVILQXVWEOU7N42ICHPJNAEUP/
- https://security.netapp.com/advisory/ntap-20210629-0005/
- https://security.gentoo.org/glsa/202107-07
-- https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/KJYYIMDDYOHTP2PORLABTOHYQYYREZDD/'''
+- https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/KJYYIMDDYOHTP2PORLABTOHYQYYREZDD/"""
self.assertEqual(references, finding.references)
self.assertEqual('libc-bin', finding.component_name)
self.assertEqual('2.28-10', finding.component_version)
@@ -205,10 +205,10 @@ def test_check_all_fields(self):
finding = findings[3]
self.assertEqual('CVE-2021-33574 in libc6:2.28-10', finding.title)
- description = '''**Vulnerability Namespace:** debian:10
+ description = """**Vulnerability Namespace:** debian:10
**Related Vulnerability Description:** The mq_notify function in the GNU C Library (aka glibc) versions 2.32 and 2.33 has a use-after-free. It may use the notification thread attributes object (passed through its struct sigevent parameter) after it has been freed by the caller, leading to a denial of service (application crash) or possibly unspecified other impact.
**Matcher:** dpkg-matcher
-**Package URL:** pkg:deb/debian/libc6@2.28-10?arch=amd64'''
+**Package URL:** pkg:deb/debian/libc6@2.28-10?arch=amd64"""
self.assertEqual(description, finding.description)
vulnerability_ids = finding.unsaved_vulnerability_ids
self.assertEqual(2, len(vulnerability_ids))
@@ -218,7 +218,7 @@ def test_check_all_fields(self):
self.assertEqual('CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H', finding.cvssv3)
self.assertEqual('Critical', finding.severity)
self.assertIsNone(finding.mitigation)
- references = '''**Vulnerability Datasource:** https://security-tracker.debian.org/tracker/CVE-2021-33574
+ references = """**Vulnerability Datasource:** https://security-tracker.debian.org/tracker/CVE-2021-33574
**Related Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2021-33574
**Related Vulnerability URLs:**
- https://sourceware.org/bugzilla/show_bug.cgi?id=27896
@@ -226,7 +226,7 @@ def test_check_all_fields(self):
- https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/RBUUWUGXVILQXVWEOU7N42ICHPJNAEUP/
- https://security.netapp.com/advisory/ntap-20210629-0005/
- https://security.gentoo.org/glsa/202107-07
-- https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/KJYYIMDDYOHTP2PORLABTOHYQYYREZDD/'''
+- https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/KJYYIMDDYOHTP2PORLABTOHYQYYREZDD/"""
self.assertEqual(references, finding.references)
self.assertEqual('libc6', finding.component_name)
self.assertEqual('2.28-10', finding.component_version)
@@ -236,11 +236,11 @@ def test_check_all_fields(self):
finding = findings[4]
self.assertEqual('GHSA-v6rh-hp5x-86rv in Django:3.2.9', finding.title)
- description = '''**Vulnerability Namespace:** github:python
+ description = """**Vulnerability Namespace:** github:python
**Vulnerability Description:** Potential bypass of an upstream access control based on URL paths in Django
**Related Vulnerability Description:** In Django 2.2 before 2.2.25, 3.1 before 3.1.14, and 3.2 before 3.2.10, HTTP requests for URLs with trailing newlines could bypass upstream access control based on URL paths.
**Matcher:** python-matcher
-**Package URL:** pkg:pypi/Django@3.2.9'''
+**Package URL:** pkg:pypi/Django@3.2.9"""
self.assertEqual(description, finding.description)
vulnerability_ids = finding.unsaved_vulnerability_ids
self.assertEqual(2, len(vulnerability_ids))
@@ -251,13 +251,13 @@ def test_check_all_fields(self):
self.assertEqual('High', finding.severity)
mitigation = 'Upgrade to version: 3.2.10'
self.assertEqual(mitigation, finding.mitigation)
- references = '''**Vulnerability Datasource:** https://github.com/advisories/GHSA-v6rh-hp5x-86rv
+ references = """**Vulnerability Datasource:** https://github.com/advisories/GHSA-v6rh-hp5x-86rv
**Related Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2021-44420
**Related Vulnerability URLs:**
- https://docs.djangoproject.com/en/3.2/releases/security/
- https://www.openwall.com/lists/oss-security/2021/12/07/1
- https://www.djangoproject.com/weblog/2021/dec/07/security-releases/
-- https://groups.google.com/forum/#!forum/django-announce'''
+- https://groups.google.com/forum/#!forum/django-announce"""
self.assertEqual(references, finding.references)
self.assertEqual('Django', finding.component_name)
self.assertEqual('3.2.9', finding.component_version)
diff --git a/unittests/tools/test_azure_security_center_recommendations_parser.py b/unittests/tools/test_azure_security_center_recommendations_parser.py
index e5c9823b14..5865ea02c9 100644
--- a/unittests/tools/test_azure_security_center_recommendations_parser.py
+++ b/unittests/tools/test_azure_security_center_recommendations_parser.py
@@ -25,14 +25,14 @@ def test_parse_file_with_multiple_findings(self):
self.assertEqual(date.fromisoformat('2021-09-28'), finding.date)
self.assertEqual(1032, finding.cwe)
self.assertEqual('Low', finding.severity)
- description = '''**Recommendation:** Virtual networks should be protected by Azure Firewall
+ description = """**Recommendation:** Virtual networks should be protected by Azure Firewall
**Resource Name:** my_virtual_network
**Resource Type:** virtualnetworks
**Resource Group:** my_resource_group
**Description:** Some of your virtual networks aren't protected with a firewall. Use Azure Firewall to restrict access to your virtual networks and prevent potential threats. To learn more about Azure Firewall, Click here
**Controls:** Restrict unauthorized network access
**Subscription:** My first subscription
-**Subscription Id:** 9cfbad7a-7369-42e4-bcce-7677c5b3a44b'''
+**Subscription Id:** 9cfbad7a-7369-42e4-bcce-7677c5b3a44b"""
self.assertEqual(description, finding.description)
mitigation = 'To protect your virtual networks with Azure Firewall: 1. From the list below, select a network. Or select Take action if you\'ve arrived here from a specific virtual network page. 2. Follow the Azure Firewall deployment instructions. Make sure to configure all default routes properly.Important: Azure Firewall is billed separately from Azure Security Center. Learn more about Azure Firewall pricing.'
self.assertEqual(mitigation, finding.mitigation)
@@ -49,14 +49,14 @@ def test_parse_file_with_multiple_findings(self):
self.assertEqual(date.fromisoformat('2021-09-28'), finding.date)
self.assertEqual(1032, finding.cwe)
self.assertEqual('High', finding.severity)
- description = '''**Recommendation:** Azure Defender for Resource Manager should be enabled
+ description = """**Recommendation:** Azure Defender for Resource Manager should be enabled
**Resource Name:** My first subscription
**Resource Type:** Subscription
**Description:** Azure Defender for Resource Manager automatically monitors the resource management operations in your organization. Azure Defender detects threats and alerts you about suspicious activity. Learn more about the capabilities of Azure Defender for Resource Manager at https://aka.ms/defender-for-resource-manager . Enabling this Azure Defender plan results in charges. Learn about the pricing details per region on Security Center's pricing page: https://aka.ms/pricing-security-center .
**Controls:** Enable Advanced Threat Protection
**Subscription:** My first subscription
**Subscription Id:** 9cfbad7a-7369-42e4-bcce-7677c5b3a44b
-**Native Cloud Account Id:** my_native_cloud_id'''
+**Native Cloud Account Id:** my_native_cloud_id"""
self.assertEqual(description, finding.description)
mitigation = 'To enable Azure Defender for Resource Manager on your subscription: 1. Open Security Center\'s Pricing & settings page. 2. Select the subscription on which you want to enable Azure Defender. 3. Under "Select Azure Defender plan by resource type", set "Resource Manager" to "On".'
self.assertEqual(mitigation, finding.mitigation)
@@ -73,14 +73,14 @@ def test_parse_file_with_multiple_findings(self):
self.assertEqual(date.fromisoformat('2021-09-28'), finding.date)
self.assertEqual(1032, finding.cwe)
self.assertEqual('Medium', finding.severity)
- description = '''**Recommendation:** Storage account should use a private link connection
+ description = """**Recommendation:** Storage account should use a private link connection
**Resource Name:** swe10032201245e263h
**Resource Type:** storageaccounts
**Resource Group:** storage-westeurope
**Description:** Private links enforce secure communication, by providing private connectivity to the storage account
**Controls:** Restrict unauthorized network access
**Subscription:** My first subscription
-**Subscription Id:** 9cfbad7a-7369-42e4-bcce-7677c5b3a44b'''
+**Subscription Id:** 9cfbad7a-7369-42e4-bcce-7677c5b3a44b"""
self.assertEqual(description, finding.description)
mitigation = 'To enforce secure communications for your storage accounts, add a private endpoint as described here: https://aka.ms/connectprivatelytostorageaccount.'
self.assertEqual(mitigation, finding.mitigation)
diff --git a/unittests/tools/test_gitleaks_parser.py b/unittests/tools/test_gitleaks_parser.py
index b712777696..43313e2e3f 100644
--- a/unittests/tools/test_gitleaks_parser.py
+++ b/unittests/tools/test_gitleaks_parser.py
@@ -100,16 +100,16 @@ def test_parse_file_from_version_8(self):
with self.subTest(i=1):
finding = findings[1]
self.assertEqual("Hard coded RSA private key found in conf/rsa.pk", finding.title)
- description = '''**Secret:** -----BEGIN RSA PRIVATE KEY-----
+ description = """**Secret:** -----BEGIN RSA PRIVATE KEY-----
**Match:** -----BEGIN RSA PRIVATE KEY-----
-**Rule Id:** RSA-PK'''
+**Rule Id:** RSA-PK"""
self.assertEqual(description, finding.description)
self.assertIn("tag1", finding.unsaved_tags)
self.assertIn("tag2", finding.unsaved_tags)
with self.subTest(i=2):
finding = findings[2]
self.assertEqual("Hard coded Generic API Key found in tests/api.py", finding.title)
- description = '''**Secret:** dfjksdjfs3294dfjlsdaf213
+ description = """**Secret:** dfjksdjfs3294dfjlsdaf213
**Match:** apikey = "dfjksdjfs3294dfjlsdaf213"
**Commit message:**
```
@@ -120,5 +120,5 @@ def test_parse_file_from_version_8(self):
```
**Commit hash:** 69235ea9ea4d59e18e2cc3c295526de46aa1365c1f0c7a95a22ff1537acdf517
**Commit date:** 2016-09-16T18:17:59Z
-**Rule Id:** generic-api-key'''
+**Rule Id:** generic-api-key"""
self.assertEqual(description, finding.description)
diff --git a/unittests/tools/test_jfrogxray_parser.py b/unittests/tools/test_jfrogxray_parser.py
index 267ebeed73..ba75e6ca8c 100644
--- a/unittests/tools/test_jfrogxray_parser.py
+++ b/unittests/tools/test_jfrogxray_parser.py
@@ -34,8 +34,8 @@ def test_parse_file_with_many_vulns2(self):
item = findings[0]
self.assertEqual("No CVE - pip:9.0.1", item.title)
- description = '''pip PyPI (Python Packaging Index) PipXmlrpcTransport._download_http_url() Function Content-Disposition Header Path Traversal Arbitrary File Write Weakness
-**Provider:** JFrog'''
+ description = """pip PyPI (Python Packaging Index) PipXmlrpcTransport._download_http_url() Function Content-Disposition Header Path Traversal Arbitrary File Write Weakness
+**Provider:** JFrog"""
self.assertEqual(description, item.description)
self.assertEqual("High", item.severity)
self.assertEqual("pip", item.component_name)
@@ -46,10 +46,10 @@ def test_parse_file_with_many_vulns2(self):
item = findings[1]
self.assertEqual("CVE-2020-14386 - ubuntu:bionic:linux:4.15.0-88.88", item.title)
- description = '''A flaw was found in the Linux kernel before 5.9-rc4. Memory corruption can be exploited to gain root privileges from unprivileged processes. The highest threat from this vulnerability is to data confidentiality and integrity.
+ description = """A flaw was found in the Linux kernel before 5.9-rc4. Memory corruption can be exploited to gain root privileges from unprivileged processes. The highest threat from this vulnerability is to data confidentiality and integrity.
**Versions that are vulnerable:**
< 4.15.0-117.118
-**Provider:** JFrog'''
+**Provider:** JFrog"""
self.assertEqual(description, item.description)
self.assertEqual("High", item.severity)
self.assertEqual("ubuntu:bionic:linux", item.component_name)
diff --git a/unittests/tools/test_kics_parser.py b/unittests/tools/test_kics_parser.py
index 9d3f064dcb..04078968b1 100644
--- a/unittests/tools/test_kics_parser.py
+++ b/unittests/tools/test_kics_parser.py
@@ -26,10 +26,10 @@ def test_parse_many_findings(self):
self.assertEqual("test/charts/example/terraform/main.tf", finding.file_path)
self.assertEqual(25, finding.line)
self.assertEqual("Common", finding.component_name)
- description = '''Query to find passwords and secrets in infrastructure code.
+ description = """Query to find passwords and secrets in infrastructure code.
**Platform:** Common
**Category:** Secret Management
-**Issue type:** RedundantAttribute'''
+**Issue type:** RedundantAttribute"""
self.assertEqual(description, finding.description)
self.assertEqual('https://kics.io/', finding.references)
self.assertEqual(1, finding.nb_occurences)
@@ -44,11 +44,11 @@ def test_parse_many_findings(self):
self.assertEqual("test/charts/example/terraform/s3.tf", finding.file_path)
self.assertEqual(36, finding.line)
self.assertEqual("Terraform", finding.component_name)
- description = '''S3 Buckets must not allow Actions From All Principals, as to prevent leaking private information to the entire internet or allow unauthorized data tampering / deletion. This means the 'Effect' must not be 'Allow' when there are All Principals
+ description = """S3 Buckets must not allow Actions From All Principals, as to prevent leaking private information to the entire internet or allow unauthorized data tampering / deletion. This means the 'Effect' must not be 'Allow' when there are All Principals
**Platform:** Terraform
**Category:** Access Control
**Issue type:** IncorrectValue
-**Actual value:** aws_s3_bucket_policy[this].policy.Principal is equal to or contains \'*\''''
+**Actual value:** aws_s3_bucket_policy[this].policy.Principal is equal to or contains \'*\'"""
self.assertEqual(description, finding.description)
self.assertEqual('https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_policy', finding.references)
self.assertEqual(1, finding.nb_occurences)
diff --git a/unittests/tools/test_mozilla_observatory_parser.py b/unittests/tools/test_mozilla_observatory_parser.py
index a84bc8c122..2dfd4b1f32 100644
--- a/unittests/tools/test_mozilla_observatory_parser.py
+++ b/unittests/tools/test_mozilla_observatory_parser.py
@@ -105,7 +105,7 @@ def test_parse_file_cli_juicy(self):
elif "subresource-integrity" == finding.vuln_id_from_tool:
with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool):
self.assertTrue(finding.active)
- self.assertEqual("Subresource Integrity (SRI) not implemented, and external scripts are loaded over HTTP or use protocol-relative URLs via src=\"//...\"", finding.title)
+ self.assertEqual('Subresource Integrity (SRI) not implemented, and external scripts are loaded over HTTP or use protocol-relative URLs via src="//..."', finding.title)
self.assertEqual("High", finding.severity)
self.assertIn("Subresource Integrity (SRI) not implemented", finding.description)
elif "redirection" == finding.vuln_id_from_tool:
@@ -158,7 +158,7 @@ def test_parse_file_cli_nmap_scanme(self):
elif "subresource-integrity" == finding.vuln_id_from_tool:
with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool):
self.assertTrue(finding.active)
- self.assertEqual("Subresource Integrity (SRI) not implemented, and external scripts are loaded over HTTP or use protocol-relative URLs via src=\"//...\"", finding.title)
+ self.assertEqual('Subresource Integrity (SRI) not implemented, and external scripts are loaded over HTTP or use protocol-relative URLs via src="//..."', finding.title)
self.assertEqual("High", finding.severity)
self.assertIn("Subresource Integrity (SRI) not implemented", finding.description)
elif "redirection" == finding.vuln_id_from_tool:
diff --git a/unittests/tools/test_nexpose_parser.py b/unittests/tools/test_nexpose_parser.py
index e53e90ce0e..d3a9af3884 100644
--- a/unittests/tools/test_nexpose_parser.py
+++ b/unittests/tools/test_nexpose_parser.py
@@ -64,7 +64,7 @@ def test_nexpose_parser_has_many_finding(self):
# vuln 5
finding = findings[5]
- self.assertEqual("Default SSH password: root password \"root\"", finding.title)
+ self.assertEqual('Default SSH password: root password "root"', finding.title)
self.assertEqual(1, len(finding.unsaved_endpoints))
# vuln 5 - endpoint
diff --git a/unittests/tools/test_nikto_parser.py b/unittests/tools/test_nikto_parser.py
index 290a3a8a37..4498095381 100644
--- a/unittests/tools/test_nikto_parser.py
+++ b/unittests/tools/test_nikto_parser.py
@@ -165,7 +165,7 @@ def test_parse_file_xml_another(self):
self.assertIsNone(endpoint.path)
with self.subTest(i=5):
finding = findings[5]
- self.assertEqual("The Content-Encoding header is set to \"deflate\" this may mean that the server is vulnerable to the BREACH attack.", finding.title)
+ self.assertEqual('The Content-Encoding header is set to "deflate" this may mean that the server is vulnerable to the BREACH attack.', finding.title)
self.assertEqual("999966", finding.vuln_id_from_tool)
self.assertEqual(1, finding.nb_occurences)
self.assertEqual("Info", finding.severity)
diff --git a/unittests/tools/test_qualys_parser.py b/unittests/tools/test_qualys_parser.py
index 075b9d4a3b..be96a3334a 100644
--- a/unittests/tools/test_qualys_parser.py
+++ b/unittests/tools/test_qualys_parser.py
@@ -153,7 +153,7 @@ def test_parse_file_with_cvss_values_and_scores(self):
finding_cvssv3_score = finding
if finding.unsaved_endpoints[0].host == "demo13.s02.sjc01.qualys.com" and finding.title == "QID-370876 | AMD Processors Multiple Security Vulnerabilities (RYZENFALL/MASTERKEY/CHIMERA-FW/FALLOUT)":
finding_no_cvssv3_at_detection = finding
- if finding.unsaved_endpoints[0].host == "demo14.s02.sjc01.qualys.com" and finding.title == "QID-121695 | NTP \"monlist\" Feature Denial of Service Vulnerability":
+ if finding.unsaved_endpoints[0].host == "demo14.s02.sjc01.qualys.com" and finding.title == 'QID-121695 | NTP "monlist" Feature Denial of Service Vulnerability':
finding_no_cvssv3 = finding
# The CVSS Vector is not used from the Knowledgebase
self.assertEqual(
diff --git a/unittests/tools/test_semgrep_parser.py b/unittests/tools/test_semgrep_parser.py
index f2f329a15a..4287fa7bdf 100644
--- a/unittests/tools/test_semgrep_parser.py
+++ b/unittests/tools/test_semgrep_parser.py
@@ -21,9 +21,9 @@ def test_parse_one_finding(self):
self.assertEqual("src/main/java/org/owasp/benchmark/testcode/BenchmarkTest02194.java", finding.file_path)
self.assertEqual(64, finding.line)
self.assertEqual(696, finding.cwe)
- self.assertEqual("javax crypto Cipher.getInstance(\"AES/GCM/NoPadding\");", finding.mitigation)
+ self.assertEqual('javax crypto Cipher.getInstance("AES/GCM/NoPadding");', finding.mitigation)
self.assertEqual("java.lang.security.audit.cbc-padding-oracle.cbc-padding-oracle", finding.vuln_id_from_tool)
- self.assertIn("javax.crypto.Cipher c = javax.crypto.Cipher.getInstance(\"DES/CBC/PKCS5Padding\");", finding.description)
+ self.assertIn('javax.crypto.Cipher c = javax.crypto.Cipher.getInstance("DES/CBC/PKCS5Padding");', finding.description)
self.assertIn("Using CBC with PKCS5Padding is susceptible to padding orcale attacks", finding.description)
def test_parse_many_finding(self):
@@ -36,14 +36,14 @@ def test_parse_many_finding(self):
self.assertEqual("src/main/java/org/owasp/benchmark/testcode/BenchmarkTest02194.java", finding.file_path)
self.assertEqual(64, finding.line)
self.assertEqual(696, finding.cwe)
- self.assertEqual("javax crypto Cipher.getInstance(\"AES/GCM/NoPadding\");", finding.mitigation)
+ self.assertEqual('javax crypto Cipher.getInstance("AES/GCM/NoPadding");', finding.mitigation)
self.assertEqual("java.lang.security.audit.cbc-padding-oracle.cbc-padding-oracle", finding.vuln_id_from_tool)
finding = findings[2]
self.assertEqual("Info", finding.severity)
self.assertEqual("src/main/java/org/owasp/benchmark/testcode/BenchmarkTest01150.java", finding.file_path)
self.assertEqual(66, finding.line)
self.assertEqual(696, finding.cwe)
- self.assertEqual("javax crypto Cipher.getInstance(\"AES/GCM/NoPadding\");", finding.mitigation)
+ self.assertEqual('javax crypto Cipher.getInstance("AES/GCM/NoPadding");', finding.mitigation)
self.assertEqual("java.lang.security.audit.cbc-padding-oracle.cbc-padding-oracle", finding.vuln_id_from_tool)
def test_parse_repeated_finding(self):
@@ -57,7 +57,7 @@ def test_parse_repeated_finding(self):
self.assertEqual(66, finding.line)
self.assertEqual("java.lang.security.audit.cbc-padding-oracle.cbc-padding-oracle", finding.vuln_id_from_tool)
self.assertEqual(696, finding.cwe)
- self.assertEqual("javax crypto Cipher.getInstance(\"AES/GCM/NoPadding\");", finding.mitigation)
+ self.assertEqual('javax crypto Cipher.getInstance("AES/GCM/NoPadding");', finding.mitigation)
self.assertEqual(2, finding.nb_occurences)
def test_parse_many_vulns(self):
diff --git a/unittests/tools/test_sslyze_parser.py b/unittests/tools/test_sslyze_parser.py
index 0e961f4531..03194a31d0 100644
--- a/unittests/tools/test_sslyze_parser.py
+++ b/unittests/tools/test_sslyze_parser.py
@@ -26,12 +26,12 @@ def test_parse_json_file_with_one_target_has_one_vuln_old(self):
self.assertEqual(1, len(findings))
finding = findings[0]
self.assertEqual('Problems in certificate deployments (www.example.com:443)', finding.title)
- description = '''www.example.com:443 has problems in certificate deployments:
+ description = """www.example.com:443 has problems in certificate deployments:
- certificate has expired for trust store Android, version 9.0.0_r9
- certificate has expired for trust store Apple, version iOS 13, iPadOS 13, macOS 10.15, watchOS 6, and tvOS 13
- certificate has expired for trust store Java, version jdk-13.0.2
- certificate has expired for trust store Mozilla, version 2019-11-28
- - certificate has expired for trust store Windows, version 2020-05-04'''
+ - certificate has expired for trust store Windows, version 2020-05-04"""
self.assertEqual(description, finding.description)
self.assertEqual('Medium', finding.severity)
@@ -74,7 +74,7 @@ def test_parse_json_file_with_one_target_has_one_vuln_new(self):
self.assertEqual(1, len(findings))
finding = findings[0]
self.assertEqual('Unrecommended cipher suites for TLS 1.2 (example.com:443)', finding.title)
- description = '''example.com:443 accepts unrecommended cipher suites for TLS 1.2:
+ description = """example.com:443 accepts unrecommended cipher suites for TLS 1.2:
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_256_CCM_8
- TLS_RSA_WITH_AES_256_CCM
@@ -90,7 +90,7 @@ def test_parse_json_file_with_one_target_has_one_vuln_new(self):
- TLS_DHE_RSA_WITH_AES_256_CCM_8
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA
- TLS_DHE_RSA_WITH_AES_128_CCM_8
- - TLS_DHE_RSA_WITH_AES_128_CBC_SHA'''
+ - TLS_DHE_RSA_WITH_AES_128_CBC_SHA"""
self.assertEqual(description, finding.description)
self.assertEqual('Medium', finding.severity)
self.assertEqual(
@@ -118,7 +118,7 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self):
# We look at 2 examplary findings, the others are similar and don't give more test coverage
finding = findings[0]
self.assertEqual('Unrecommended cipher suites for TLS 1.2 (example.com:443)', finding.title)
- description = '''example.com:443 accepts unrecommended cipher suites for TLS 1.2:
+ description = """example.com:443 accepts unrecommended cipher suites for TLS 1.2:
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA
@@ -129,7 +129,7 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self):
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA
- - TLS_DHE_RSA_WITH_AES_128_CBC_SHA'''
+ - TLS_DHE_RSA_WITH_AES_128_CBC_SHA"""
self.assertEqual(description, finding.description)
self.assertEqual('Medium', finding.severity)
self.assertEqual(
diff --git a/unittests/tools/test_trivy_parser.py b/unittests/tools/test_trivy_parser.py
index e61cd70cd8..025127b704 100644
--- a/unittests/tools/test_trivy_parser.py
+++ b/unittests/tools/test_trivy_parser.py
@@ -81,25 +81,25 @@ def test_misconfigurations_and_secrets(self):
finding = findings[2]
self.assertEqual('DS002 - Image user should not be \'root\'', finding.title)
self.assertEqual('High', finding.severity)
- description = '''**Target:** Dockerfile
+ description = """**Target:** Dockerfile
**Type:** Dockerfile Security Check
Running containers with 'root' user can lead to a container escape situation. It is a best practice to run containers as non-root users, which can be done by adding a 'USER' statement to the Dockerfile.
Specify at least 1 USER command in Dockerfile with non-root user as argument
-'''
+"""
self.assertEqual(description, finding.description)
self.assertEqual('Add \'USER \' line to the Dockerfile', finding.mitigation)
- references = '''https://avd.aquasec.com/misconfig/ds002
-https://docs.docker.com/develop/develop-images/dockerfile_best-practices/'''
+ references = """https://avd.aquasec.com/misconfig/ds002
+https://docs.docker.com/develop/develop-images/dockerfile_best-practices/"""
self.assertEqual(references, finding.references)
self.assertEqual(['config', 'dockerfile'], finding.tags)
finding = findings[3]
self.assertEqual('Secret detected in Dockerfile - GitHub Personal Access Token', finding.title)
self.assertEqual('Critical', finding.severity)
- description = '''GitHub Personal Access Token
+ description = """GitHub Personal Access Token
**Category:** GitHub
**Match:** ENV GITHUB_PAT=*****
-'''
+"""
self.assertEqual(description, finding.description)
self.assertEqual('Dockerfile', finding.file_path)
self.assertEqual(24, finding.line)
@@ -113,13 +113,13 @@ def test_kubernetes(self):
finding = findings[0]
self.assertEqual('CVE-2020-27350 apt 1.8.2.1', finding.title)
self.assertEqual('Medium', finding.severity)
- description = '''apt: integer overflows and underflows while parsing .deb packages
+ description = """apt: integer overflows and underflows while parsing .deb packages
**Target:** gcr.io/google_samples/gb-redis-follower:v2 (debian 10.4)
**Type:** debian
**Fixed version:** 1.8.2.2
APT had several integer overflows and underflows while parsing .deb packages, aka GHSL-2020-168 GHSL-2020-169, in files apt-pkg/contrib/extracttar.cc, apt-pkg/deb/debfile.cc, and apt-pkg/contrib/arfile.cc. This issue affects: apt 1.2.32ubuntu0 versions prior to 1.2.32ubuntu0.2; 1.6.12ubuntu0 versions prior to 1.6.12ubuntu0.2; 2.0.2ubuntu0 versions prior to 2.0.2ubuntu0.2; 2.1.10ubuntu0 versions prior to 2.1.10ubuntu0.1;
-'''
+"""
self.assertEqual(description, finding.description)
self.assertEqual('1.8.2.2', finding.mitigation)
self.assertEqual(1, len(finding.unsaved_vulnerability_ids))
@@ -132,13 +132,13 @@ def test_kubernetes(self):
finding = findings[5]
self.assertEqual('CVE-2020-27350 apt 1.8.2.1', finding.title)
self.assertEqual('Medium', finding.severity)
- description = '''apt: integer overflows and underflows while parsing .deb packages
+ description = """apt: integer overflows and underflows while parsing .deb packages
**Target:** docker.io/redis:6.0.5 (debian 10.4)
**Type:** debian
**Fixed version:** 1.8.2.2
APT had several integer overflows and underflows while parsing .deb packages, aka GHSL-2020-168 GHSL-2020-169, in files apt-pkg/contrib/extracttar.cc, apt-pkg/deb/debfile.cc, and apt-pkg/contrib/arfile.cc. This issue affects: apt 1.2.32ubuntu0 versions prior to 1.2.32ubuntu0.2; 1.6.12ubuntu0 versions prior to 1.6.12ubuntu0.2; 2.0.2ubuntu0 versions prior to 2.0.2ubuntu0.2; 2.1.10ubuntu0 versions prior to 2.1.10ubuntu0.1;
-'''
+"""
self.assertEqual(description, finding.description)
self.assertEqual('1.8.2.2', finding.mitigation)
self.assertEqual(1, len(finding.unsaved_vulnerability_ids))
@@ -150,7 +150,7 @@ def test_kubernetes(self):
finding = findings[10]
self.assertEqual('KSV001 - Process can elevate its own privileges', finding.title)
self.assertEqual('Medium', finding.severity)
- description = '''**Target:** Deployment/redis-follower
+ description = """**Target:** Deployment/redis-follower
**Type:** Kubernetes Security Check
A program inside the container can elevate its own privileges and run as root, which might give the program control over the container and node.
@@ -165,7 +165,7 @@ def test_kubernetes(self):
138 resources:
139 requests:
140 cpu: 100m
-141'''
+141"""
re_description = re.sub(r"\s+", " ", description)
re_finding_description = re.sub(r"\s+", " ", finding.description)
self.assertEqual(re_description.strip(), re_finding_description.strip())
@@ -186,10 +186,10 @@ def test_license_scheme(self):
self.assertEqual("", finding.file_path)
self.assertEqual(1, finding.scanner_confidence)
self.assertEqual("", finding.url)
- description = '''GPL-2.0
+ description = """GPL-2.0
**Category:** restricted
**Package:** alpine-baselayout
-'''
+"""
self.assertEqual(description, finding.description)
def test_issue_9092(self):
diff --git a/unittests/tools/test_trufflehog3_parser.py b/unittests/tools/test_trufflehog3_parser.py
index 1b24c35e2b..f80f9ae834 100644
--- a/unittests/tools/test_trufflehog3_parser.py
+++ b/unittests/tools/test_trufflehog3_parser.py
@@ -65,13 +65,13 @@ def test_many_vulns_current(self):
finding = findings[0]
self.assertEqual('High Entropy found in docker/Dockerfile', finding.title)
self.assertEqual(798, finding.cwe)
- description = '''**Secret:** 964a1afa20dd4a3723002560124dd96f2a9e853f7ef5b86f5c2354af336fca37
+ description = """**Secret:** 964a1afa20dd4a3723002560124dd96f2a9e853f7ef5b86f5c2354af336fca37
**Context:**
3: +FROM python:3.9.7-alpine@sha256:964a1afa20dd4a3723002560124dd96f2a9e853f7ef5b86f5c2354af336fca37
**Branch:** python-ab08dd9
**Commit message:** Bump python from 3.9.7-alpine to 3.10.0-alpine
**Commit hash:** 9c3f4d641d14eba2740febccd902cde300218a8d
-**Commit date:** 2021-10-08T20:14:27+02:00'''
+**Commit date:** 2021-10-08T20:14:27+02:00"""
self.assertEqual(description, finding.description)
self.assertEqual("High", finding.severity)
self.assertEqual('docker/Dockerfile', finding.file_path)
@@ -91,10 +91,10 @@ def test_many_vulns_current(self):
finding = findings[2]
self.assertEqual('High Entropy found in env-file.txt', finding.title)
self.assertEqual(798, finding.cwe)
- description = '''**Secret:** 44c45225cf94e58d0c86f0a31051eb7c52c8f78f
+ description = """**Secret:** 44c45225cf94e58d0c86f0a31051eb7c52c8f78f
**Context:**
10: DD_API_KEY=44c45225cf94e58d0c86f0a31051eb7c52c8f78f
- 11: second line of context'''
+ 11: second line of context"""
self.assertEqual(description, finding.description)
self.assertEqual("Low", finding.severity)
self.assertEqual('env-file.txt', finding.file_path)
From 3b141230e69cec44a13418d9e84e25a620050e8e Mon Sep 17 00:00:00 2001
From: kiblik <5609770+kiblik@users.noreply.github.com>
Date: Wed, 3 Jul 2024 16:47:28 +0200
Subject: [PATCH 018/111] Feat(psql): Use psycopg3 (#10348)
Co-authored-by: Matt Tesauro
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 15799fbde8..6b849277ad 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -35,7 +35,7 @@ Markdown==3.6
mysqlclient==2.1.1
openpyxl==3.1.5
Pillow==10.4.0 # required by django-imagekit
-psycopg2-binary==2.9.9
+psycopg[binary]==3.1.19
cryptography==42.0.8
python-dateutil==2.9.0.post0
pytz==2024.1
From 8b9f9a4baeebdcdb8d924a851a9b8cc13e955fd4 Mon Sep 17 00:00:00 2001
From: Kay Agahd
Date: Wed, 3 Jul 2024 16:23:19 +0100
Subject: [PATCH 019/111] extend AWS prowler v3 parser (#10372)
* add prowler v4 parser
* remove line
* fix typo
* add settings.dist.py although it's written that one should not touch it but use env vars
* add modified .settings.dist.py.sha256sum
* extend prowler v3 parser to parse also prowler v4 reports in oscf-json format
* update aws_prowler_v3.md
* revert settings
* add modified .settings.dist.py.sha256sum
* revert docker-compose.yml
* make ruff happy
* separate prowler v3 and v4 parsers
* renaming
* add prowler v4 parser
* remove line
* fix typo
* add settings.dist.py although it's written that one should not touch it but use env vars
* add modified .settings.dist.py.sha256sum
* extend prowler v3 parser to parse also prowler v4 reports in oscf-json format
* update aws_prowler_v3.md
* revert settings
* add modified .settings.dist.py.sha256sum
* make ruff happy
* separate prowler v3 and v4 parsers
* renaming
* Update helm lock file
Signed-off-by: DefectDojo
* make ruff happy
---------
Signed-off-by: DefectDojo
Co-authored-by: DefectDojo
---
.../parsers/file/aws_prowler_v3.md | 72 -----
.../parsers/file/aws_prowler_v3plus.md | 163 ++++++++++++
.../__init__.py | 0
dojo/tools/aws_prowler_v3plus/parser.py | 24 ++
.../prowler_v3.py} | 25 --
dojo/tools/aws_prowler_v3plus/prowler_v4.py | 83 ++++++
.../many_vuln.json | 0
.../aws_prowler_v3plus/many_vuln.ocsf.json | 247 ++++++++++++++++++
.../no_vuln.json | 0
.../aws_prowler_v3plus/no_vuln.ocsf.json | 1 +
.../one_vuln.json | 0
.../aws_prowler_v3plus/one_vuln.ocsf.json | 80 ++++++
unittests/tools/test_aws_prowler_v3_parser.py | 39 ---
.../tools/test_aws_prowler_v3plus_parser.py | 67 +++++
14 files changed, 665 insertions(+), 136 deletions(-)
delete mode 100644 docs/content/en/integrations/parsers/file/aws_prowler_v3.md
create mode 100644 docs/content/en/integrations/parsers/file/aws_prowler_v3plus.md
rename dojo/tools/{aws_prowler_v3 => aws_prowler_v3plus}/__init__.py (100%)
create mode 100644 dojo/tools/aws_prowler_v3plus/parser.py
rename dojo/tools/{aws_prowler_v3/parser.py => aws_prowler_v3plus/prowler_v3.py} (84%)
create mode 100644 dojo/tools/aws_prowler_v3plus/prowler_v4.py
rename unittests/scans/{aws_prowler_v3 => aws_prowler_v3plus}/many_vuln.json (100%)
create mode 100644 unittests/scans/aws_prowler_v3plus/many_vuln.ocsf.json
rename unittests/scans/{aws_prowler_v3 => aws_prowler_v3plus}/no_vuln.json (100%)
create mode 100644 unittests/scans/aws_prowler_v3plus/no_vuln.ocsf.json
rename unittests/scans/{aws_prowler_v3 => aws_prowler_v3plus}/one_vuln.json (100%)
create mode 100644 unittests/scans/aws_prowler_v3plus/one_vuln.ocsf.json
delete mode 100644 unittests/tools/test_aws_prowler_v3_parser.py
create mode 100644 unittests/tools/test_aws_prowler_v3plus_parser.py
diff --git a/docs/content/en/integrations/parsers/file/aws_prowler_v3.md b/docs/content/en/integrations/parsers/file/aws_prowler_v3.md
deleted file mode 100644
index 17dcf9698a..0000000000
--- a/docs/content/en/integrations/parsers/file/aws_prowler_v3.md
+++ /dev/null
@@ -1,72 +0,0 @@
----
-title: "AWS Prowler V3"
-toc_hide: true
----
-
-### File Types
-DefectDojo parser accepts a .json file. Please note: earlier versions of AWS Prowler create output data in a different format. See our other documentation if you are using an earlier version of AWS Prowler: https://documentation.defectdojo.com/integrations/parsers/file/aws_prowler/
-
-JSON reports can be created from the [AWS Prowler V3 CLI](https://docs.prowler.cloud/en/latest/tutorials/reporting/#json) using the following command: `prowler -M json`
-
-### Acceptable JSON Format
-Parser expects an array of assessments. All properties are strings and are required by the parser.
-
-~~~
-
-[
- {
- "AssessmentStartTime": "example_timestamp",
- "FindingUniqueId": "example_uniqueIdFromTool",
- "Provider": "example_provider",
- "CheckID": "acm_certificates_expiration_check",
- "CheckTitle": "Check if ACM Certificates are about to expire in specific days or less",
- "CheckType": [
- "Example ASFF-Compliant Finding Type"
- ],
- "ServiceName": "example_awsServiceName",
- "SubServiceName": "",
- "Status": "FAIL",
- "StatusExtended": "Example status description",
- "Severity": "example_severity",
- "ResourceType": "AwsCertificateManagerCertificate",
- "ResourceDetails": "",
- "Description": "Example general test description.",
- "Risk": "Example test impact description.",
- "RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html",
- "Remediation": {
- "Code": {
- "NativeIaC": "",
- "Terraform": "",
- "CLI": "",
- "Other": ""
- },
- "Recommendation": {
- "Text": "Example recommendation.",
- "Url": "https://docs.aws.amazon.com/config/latest/developerguide/example_related_documentation.html"
- }
- },
- "Compliance": {
- "GDPR": [
- "article_32"
- ],
- ...
- },
- "Categories": [],
- "DependsOn": [],
- "RelatedTo": [],
- "Notes": "",
- "Profile": null,
- "AccountId": "example_accountId",
- "OrganizationsInfo": null,
- "Region": "example_region",
- "ResourceId": "example.resource.id.com",
- "ResourceArn": "arn:aws:acm:us-east-1:999999999999:certificate/ffffffff-0000-0000-0000-000000000000",
- "ResourceTags": {}
- }
- ...
-]
-
-~~~
-
-### Sample Scan Data
-Unit tests of AWS Prowler V3 JSON can be found at https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/aws_prowler_v3.
\ No newline at end of file
diff --git a/docs/content/en/integrations/parsers/file/aws_prowler_v3plus.md b/docs/content/en/integrations/parsers/file/aws_prowler_v3plus.md
new file mode 100644
index 0000000000..687d9faf58
--- /dev/null
+++ b/docs/content/en/integrations/parsers/file/aws_prowler_v3plus.md
@@ -0,0 +1,163 @@
+---
+title: "AWS Prowler V3"
+toc_hide: true
+---
+
+### File Types
+DefectDojo parser accepts a native `json` file produced by prowler v3 with file extension `.json` or a `ocsf-json` file produced by prowler v4 with file extension `.ocsf.json`.
+Please note: earlier versions of AWS Prowler create output data in a different format. See our other [prowler parser documentation](https://documentation.defectdojo.com/integrations/parsers/file/aws_prowler/) if you are using an earlier version of AWS Prowler.
+
+JSON reports can be created from the [AWS Prowler v3 CLI](https://docs.prowler.com/projects/prowler-open-source/en/v3/tutorials/reporting/#json) using the following command: `prowler -M json`
+
+JSON-OCSF reports can be created from the [AWS Prowler v4 CLI](https://docs.prowler.cloud/en/latest/tutorials/reporting/#json) using the following command: `prowler -M json-ocsf`
+
+
+### Acceptable Prowler v3 JSON format
+Parser expects an array of assessments. All properties are strings and are required by the parser.
+
+~~~
+
+[
+ {
+ "AssessmentStartTime": "example_timestamp",
+ "FindingUniqueId": "example_uniqueIdFromTool",
+ "Provider": "example_provider",
+ "CheckID": "acm_certificates_expiration_check",
+ "CheckTitle": "Check if ACM Certificates are about to expire in specific days or less",
+ "CheckType": [
+ "Example ASFF-Compliant Finding Type"
+ ],
+ "ServiceName": "example_awsServiceName",
+ "SubServiceName": "",
+ "Status": "FAIL",
+ "StatusExtended": "Example status description",
+ "Severity": "example_severity",
+ "ResourceType": "AwsCertificateManagerCertificate",
+ "ResourceDetails": "",
+ "Description": "Example general test description.",
+ "Risk": "Example test impact description.",
+ "RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html",
+ "Remediation": {
+ "Code": {
+ "NativeIaC": "",
+ "Terraform": "",
+ "CLI": "",
+ "Other": ""
+ },
+ "Recommendation": {
+ "Text": "Example recommendation.",
+ "Url": "https://docs.aws.amazon.com/config/latest/developerguide/example_related_documentation.html"
+ }
+ },
+ "Compliance": {
+ "GDPR": [
+ "article_32"
+ ],
+ ...
+ },
+ "Categories": [],
+ "DependsOn": [],
+ "RelatedTo": [],
+ "Notes": "",
+ "Profile": null,
+ "AccountId": "example_accountId",
+ "OrganizationsInfo": null,
+ "Region": "example_region",
+ "ResourceId": "example.resource.id.com",
+ "ResourceArn": "arn:aws:acm:us-east-1:999999999999:certificate/ffffffff-0000-0000-0000-000000000000",
+ "ResourceTags": {}
+ }
+ ...
+]
+
+~~~
+
+### Acceptable Prowler v4 JSON-OCSF format
+The parser expects an array of assessments. All properties are strings and are required by the parser.
+
+~~~
+[{
+ "metadata": {
+ "event_code": "iam_role_administratoraccess_policy_permissive_trust_relationship",
+ "product": {
+ "name": "Prowler",
+ "vendor_name": "Prowler",
+ "version": "4.2.1"
+ },
+ "version": "1.2.0"
+ },
+ "severity_id": 4,
+ "severity": "High",
+ "status": "Suppressed",
+ "status_code": "FAIL",
+ "status_detail": "IAM Role myAdministratorExecutionRole has AdministratorAccess policy attached that has too permissive trust relationship.",
+ "status_id": 3,
+ "unmapped": {
+ "check_type": "",
+ "related_url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html#jf_administrator",
+ "categories": "trustboundaries",
+ "depends_on": "",
+ "related_to": "",
+ "notes": "CAF Security Epic: IAM",
+ "compliance": {}
+ },
+ "activity_name": "Create",
+ "activity_id": 1,
+ "finding_info": {
+ "created_time": "2024-06-03T14:15:19.382075",
+ "desc": "Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship",
+ "product_uid": "prowler",
+ "title": "Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship",
+ "uid": "prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole"
+ },
+ "resources": [
+ {
+ "cloud_partition": "aws",
+ "region": "us-east-1",
+ "data": {
+ "details": ""
+ },
+ "group": {
+ "name": "iam"
+ },
+ "labels": [],
+ "name": "myAdministratorExecutionRole",
+ "type": "AwsIamRole",
+ "uid": "arn:aws:iam::123456789012:role/myAdministratorExecutionRole"
+ }
+ ],
+ "category_name": "Findings",
+ "category_uid": 2,
+ "class_name": "DetectionFinding",
+ "class_uid": 2004,
+ "cloud": {
+ "account": {
+ "name": "",
+ "type": "AWS_Account",
+ "type_id": 10,
+ "uid": "123456789012",
+ "labels": []
+ },
+ "org": {
+ "name": "",
+ "uid": ""
+ },
+ "provider": "aws",
+ "region": "us-east-1"
+ },
+ "event_time": "2024-06-03T14:15:19.382075",
+ "remediation": {
+ "desc": "Apply the principle of least privilege. Instead of AdministratorAccess, assign only the permissions necessary for specific roles and tasks. Create custom IAM policies with minimal permissions based on the principle of least privilege. If a role really needs AdministratorAccess, the trust relationship must be well defined to restrict it usage only to the Principal, Action, Audience and Subject intended for it.",
+ "references": [
+ "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege"
+ ]
+ },
+ "risk_details": "The AWS-managed AdministratorAccess policy grants all actions for all AWS services and for all resources in the account and as such exposes the customer to a significant data leakage threat. It is therefore particularly important that the trust relationship is well defined to restrict it usage only to the Principal, Action, Audience and Subject intended for it.",
+ "type_uid": 200401,
+ "type_name": "Create"
+}]
+
+~~~
+
+### Sample Scan Data
+Unit tests of AWS Prowler v3 JSON and Prowler v4 JSON-OCSF can be found at https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/aws_prowler_v3.
\ No newline at end of file
diff --git a/dojo/tools/aws_prowler_v3/__init__.py b/dojo/tools/aws_prowler_v3plus/__init__.py
similarity index 100%
rename from dojo/tools/aws_prowler_v3/__init__.py
rename to dojo/tools/aws_prowler_v3plus/__init__.py
diff --git a/dojo/tools/aws_prowler_v3plus/parser.py b/dojo/tools/aws_prowler_v3plus/parser.py
new file mode 100644
index 0000000000..8e62047ac8
--- /dev/null
+++ b/dojo/tools/aws_prowler_v3plus/parser.py
@@ -0,0 +1,24 @@
+from dojo.tools.aws_prowler_v3plus.prowler_v3 import AWSProwlerV3Parser
+from dojo.tools.aws_prowler_v3plus.prowler_v4 import AWSProwlerV4Parser
+
+
+class AWSProwlerV3plusParser:
+ SCAN_TYPE = ["AWS Prowler V3"]
+
+ def get_scan_types(self):
+ return AWSProwlerV3plusParser.SCAN_TYPE
+
+ def get_label_for_scan_types(self, scan_type):
+ return AWSProwlerV3plusParser.SCAN_TYPE[0]
+
+ def get_description_for_scan_types(self, scan_type):
+ return "Exports from AWS Prowler v3 in JSON format or from Prowler v4 in OCSF-JSON format."
+
+ def get_findings(self, file, test):
+ if file.name.lower().endswith('.ocsf.json'):
+ return AWSProwlerV4Parser().process_ocsf_json(file, test)
+ elif file.name.lower().endswith('.json'):
+ return AWSProwlerV3Parser().process_json(file, test)
+ else:
+ msg = 'Unknown file format'
+ raise ValueError(msg)
diff --git a/dojo/tools/aws_prowler_v3/parser.py b/dojo/tools/aws_prowler_v3plus/prowler_v3.py
similarity index 84%
rename from dojo/tools/aws_prowler_v3/parser.py
rename to dojo/tools/aws_prowler_v3plus/prowler_v3.py
index 60f7a5dc7e..cce0472b67 100644
--- a/dojo/tools/aws_prowler_v3/parser.py
+++ b/dojo/tools/aws_prowler_v3plus/prowler_v3.py
@@ -1,4 +1,3 @@
-
import hashlib
import json
import textwrap
@@ -8,24 +7,6 @@
class AWSProwlerV3Parser:
- SCAN_TYPE = ["AWS Prowler V3"]
-
- def get_scan_types(self):
- return AWSProwlerV3Parser.SCAN_TYPE
-
- def get_label_for_scan_types(self, scan_type):
- return AWSProwlerV3Parser.SCAN_TYPE[0]
-
- def get_description_for_scan_types(self, scan_type):
- return "Export of AWS Prowler JSON V3 format."
-
- def get_findings(self, file, test):
- if file.name.lower().endswith('.json'):
- return self.process_json(file, test)
- else:
- msg = 'Unknown file format'
- raise ValueError(msg)
-
def process_json(self, file, test):
dupes = {}
@@ -96,9 +77,3 @@ def process_json(self, file, test):
dupes[dupe_key] = find
return list(dupes.values())
-
- def formatview(self, depth):
- if depth > 1:
- return "* "
- else:
- return ""
diff --git a/dojo/tools/aws_prowler_v3plus/prowler_v4.py b/dojo/tools/aws_prowler_v3plus/prowler_v4.py
new file mode 100644
index 0000000000..ac3c4a99e6
--- /dev/null
+++ b/dojo/tools/aws_prowler_v3plus/prowler_v4.py
@@ -0,0 +1,83 @@
+import hashlib
+import json
+import textwrap
+from datetime import date
+
+from dojo.models import Finding
+
+
+class AWSProwlerV4Parser:
+ def process_ocsf_json(self, file, test):
+ dupes = {}
+
+ data = json.load(file)
+ # mapping of json fields between Prowler v3 and v4:
+ # https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/reporting/#json
+ for deserialized in data:
+
+ status = deserialized.get('status_code')
+ if status.upper() != 'FAIL':
+ continue
+
+ account_id = deserialized.get('cloud', {}).get('account', {}).get("uid", '')
+ region = deserialized.get('resources', [{}])[0].get('region', '')
+ provider = deserialized.get('cloud', {}).get('provider', '')
+ compliance = ''
+ compliance_field = deserialized.get('unmapped', {}).get("compliance", {})
+ if compliance_field:
+ compliance = ' | '.join([f"{key}:{','.join(value)}" for key, value in compliance_field.items()])
+ result_extended = deserialized.get('status_detail')
+ general_description = deserialized.get('finding_info', {}).get('desc', '')
+ asff_compliance_type = deserialized.get('unmapped', {}).get('check_type', '')
+ severity = deserialized.get('severity', 'Info').capitalize()
+ aws_service_name = deserialized.get('resources', [{}])[0].get('group', {}).get('name', '')
+ impact = deserialized.get('risk_details')
+ mitigation = deserialized.get('remediation', {}).get("desc", '')
+ documentation = deserialized.get('remediation', {}).get("references", '')
+ documentation = str(documentation) + "\n" + str(deserialized.get('unmapped', {}).get('related_url', ''))
+ security_domain = deserialized.get('resources', [{}])[0].get('type', '')
+ timestamp = deserialized.get("event_time")
+ resource_arn = deserialized.get('resources', [{}])[0].get('uid', '')
+ resource_id = deserialized.get('resources', [{}])[0].get('name', '')
+ unique_id_from_tool = deserialized.get('finding_info', {}).get('uid', '')
+ if not resource_arn or resource_arn == "":
+ component_name = str(provider) + "-" + str(account_id) + "-" + str(region) + "-" + str(resource_id)
+ else:
+ component_name = resource_arn
+
+ description = "**Issue:** " + str(result_extended) + \
+ "\n**Description:** " + str(general_description) + \
+ "\n**AWS Account:** " + str(account_id) + \
+ "\n**Region:** " + str(region) + \
+ "\n**AWS Service:** " + str(aws_service_name) + \
+ "\n**Security Domain:** " + str(security_domain) + \
+ "\n**Compliance:** " + str(compliance) + \
+ "\n**ASFF Compliance Type:** " + str(asff_compliance_type)
+
+ # improving key to get duplicates
+ dupe_key = hashlib.sha256(unique_id_from_tool.encode('utf-8')).hexdigest()
+ if dupe_key in dupes:
+ find = dupes[dupe_key]
+ if description is not None:
+ find.description += description + "\n\n"
+ find.nb_occurences += 1
+ else:
+ find = Finding(
+ title=textwrap.shorten(result_extended, 150),
+ cwe=1032, # Security Configuration Weaknesses, would like to fine tune
+ test=test,
+ description=description,
+ component_name=component_name,
+ unique_id_from_tool=unique_id_from_tool,
+ severity=severity,
+ references=documentation,
+ date=date.fromisoformat(timestamp[:10]),
+ static_finding=True,
+ dynamic_finding=False,
+ nb_occurences=1,
+ mitigation=mitigation,
+ impact=impact,
+ )
+ dupes[dupe_key] = find
+
+ return list(dupes.values())
diff --git a/unittests/scans/aws_prowler_v3/many_vuln.json b/unittests/scans/aws_prowler_v3plus/many_vuln.json
similarity index 100%
rename from unittests/scans/aws_prowler_v3/many_vuln.json
rename to unittests/scans/aws_prowler_v3plus/many_vuln.json
diff --git a/unittests/scans/aws_prowler_v3plus/many_vuln.ocsf.json b/unittests/scans/aws_prowler_v3plus/many_vuln.ocsf.json
new file mode 100644
index 0000000000..e0f563c2a0
--- /dev/null
+++ b/unittests/scans/aws_prowler_v3plus/many_vuln.ocsf.json
@@ -0,0 +1,247 @@
+[{
+ "metadata": {
+ "event_code": "iam_role_administratoraccess_policy_permissive_trust_relationship",
+ "product": {
+ "name": "Prowler",
+ "vendor_name": "Prowler",
+ "version": "4.2.1"
+ },
+ "version": "1.2.0"
+ },
+ "severity_id": 4,
+ "severity": "High",
+ "status": "Suppressed",
+ "status_code": "FAIL",
+ "status_detail": "IAM Role myAdministratorExecutionRole has AdministratorAccess policy attached that has too permissive trust relationship.",
+ "status_id": 3,
+ "unmapped": {
+ "check_type": "",
+ "related_url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html#jf_administrator",
+ "categories": "trustboundaries",
+ "depends_on": "",
+ "related_to": "",
+ "notes": "CAF Security Epic: IAM",
+ "compliance": {}
+ },
+ "activity_name": "Create",
+ "activity_id": 1,
+ "finding_info": {
+ "created_time": "2024-06-03T14:15:19.382075",
+ "desc": "Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship",
+ "product_uid": "prowler",
+ "title": "Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship",
+ "uid": "prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole"
+ },
+ "resources": [
+ {
+ "cloud_partition": "aws",
+ "region": "us-east-1",
+ "data": {
+ "details": ""
+ },
+ "group": {
+ "name": "iam"
+ },
+ "labels": [],
+ "name": "myAdministratorExecutionRole",
+ "type": "AwsIamRole",
+ "uid": "arn:aws:iam::123456789012:role/myAdministratorExecutionRole"
+ }
+ ],
+ "category_name": "Findings",
+ "category_uid": 2,
+ "class_name": "DetectionFinding",
+ "class_uid": 2004,
+ "cloud": {
+ "account": {
+ "name": "",
+ "type": "AWS_Account",
+ "type_id": 10,
+ "uid": "123456789012",
+ "labels": []
+ },
+ "org": {
+ "name": "",
+ "uid": ""
+ },
+ "provider": "aws",
+ "region": "us-east-1"
+ },
+ "event_time": "2024-06-03T14:15:19.382075",
+ "remediation": {
+ "desc": "Apply the principle of least privilege. Instead of AdministratorAccess, assign only the permissions necessary for specific roles and tasks. Create custom IAM policies with minimal permissions based on the principle of least privilege. If a role really needs AdministratorAccess, the trust relationship must be well defined to restrict it usage only to the Principal, Action, Audience and Subject intended for it.",
+ "references": [
+ "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege"
+ ]
+ },
+ "risk_details": "The AWS-managed AdministratorAccess policy grants all actions for all AWS services and for all resources in the account and as such exposes the customer to a significant data leakage threat. It is therefore particularly important that the trust relationship is well defined to restrict it usage only to the Principal, Action, Audience and Subject intended for it.",
+ "type_uid": 200401,
+ "type_name": "Create"
+},{
+ "metadata": {
+ "event_code": "iam_role_cross_account_readonlyaccess_policy",
+ "product": {
+ "name": "Prowler",
+ "vendor_name": "Prowler",
+ "version": "4.2.1"
+ },
+ "version": "1.2.0"
+ },
+ "severity_id": 4,
+ "severity": "High",
+ "status": "Suppressed",
+ "status_code": "FAIL",
+ "status_detail": "IAM Role AuditRole gives cross account read-only access.",
+ "status_id": 3,
+ "unmapped": {
+ "check_type": "",
+ "related_url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html#awsmp_readonlyaccess",
+ "categories": "trustboundaries",
+ "depends_on": "",
+ "related_to": "",
+ "notes": "CAF Security Epic: IAM",
+ "compliance": {
+ "MITRE-ATTACK": [
+ "T1078"
+ ],
+ "AWS-Foundational-Technical-Review": [
+ "IAM-0012"
+ ]
+ }
+ },
+ "activity_name": "Create",
+ "activity_id": 1,
+ "finding_info": {
+ "created_time": "2024-06-03T14:15:19.382075",
+ "desc": "Ensure IAM Roles do not have ReadOnlyAccess access for external AWS accounts",
+ "product_uid": "prowler",
+ "title": "Ensure IAM Roles do not have ReadOnlyAccess access for external AWS accounts",
+ "uid": "prowler-aws-iam_role_cross_account_readonlyaccess_policy-123456789012-us-east-1-AuditRole"
+ },
+ "resources": [
+ {
+ "cloud_partition": "aws",
+ "region": "us-east-1",
+ "data": {
+ "details": ""
+ },
+ "group": {
+ "name": "iam"
+ },
+ "labels": [
+ "some-label=some value"
+ ],
+ "name": "AuditRole",
+ "type": "AwsIamRole",
+ "uid": "arn:aws:iam::123456789012:role/AuditRole"
+ }
+ ],
+ "category_name": "Findings",
+ "category_uid": 2,
+ "class_name": "DetectionFinding",
+ "class_uid": 2004,
+ "cloud": {
+ "account": {
+ "name": "",
+ "type": "AWS_Account",
+ "type_id": 10,
+ "uid": "123456789012",
+ "labels": []
+ },
+ "org": {
+ "name": "",
+ "uid": ""
+ },
+ "provider": "aws",
+ "region": "us-east-1"
+ },
+ "event_time": "2024-06-03T14:15:19.382075",
+ "remediation": {
+ "desc": "Remove the AWS-managed ReadOnlyAccess policy from all roles that have a trust policy, including third-party cloud accounts, or remove third-party cloud accounts from the trust policy of all roles that need the ReadOnlyAccess policy.",
+ "references": [
+ "https://docs.securestate.vmware.com/rule-docs/aws-iam-role-cross-account-readonlyaccess-policy"
+ ]
+ },
+ "risk_details": "The AWS-managed ReadOnlyAccess policy is highly potent and exposes the customer to a significant data leakage threat. It should be granted very conservatively. For granting access to 3rd party vendors, consider using alternative managed policies, such as ViewOnlyAccess or SecurityAudit.",
+ "type_uid": 200401,
+ "type_name": "Create"
+},{
+ "metadata": {
+ "event_code": "iam_role_permissive_trust_relationship",
+ "product": {
+ "name": "Prowler",
+ "vendor_name": "Prowler",
+ "version": "4.2.1"
+ },
+ "version": "1.2.0"
+ },
+ "severity_id": 4,
+ "severity": "High",
+ "status": "Suppressed",
+ "status_code": "FAIL",
+ "status_detail": "IAM Role CrossAccountResourceAccessRole has permissive trust relationship to other accounts",
+ "status_id": 3,
+ "unmapped": {
+ "check_type": "",
+ "related_url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-accounts",
+ "categories": "trustboundaries",
+ "depends_on": "",
+ "related_to": "",
+ "notes": "CAF Security Epic: IAM",
+ "compliance": {}
+ },
+ "activity_name": "Create",
+ "activity_id": 1,
+ "finding_info": {
+ "created_time": "2024-06-03T14:15:19.382075",
+ "desc": "Ensure IAM Roles do not allow assume role from any role of a cross account",
+ "product_uid": "prowler",
+ "title": "Ensure IAM Roles do not allow assume role from any role of a cross account",
+ "uid": "prowler-aws-iam_role_permissive_trust_relationship-123456789012-us-east-1-CrossAccountResourceAccessRole"
+ },
+ "resources": [
+ {
+ "cloud_partition": "aws",
+ "region": "us-east-1",
+ "data": {
+ "details": ""
+ },
+ "group": {
+ "name": "iam"
+ },
+ "labels": [],
+ "name": "CrossAccountResourceAccessRole",
+ "type": "AwsIamRole",
+ "uid": "arn:aws:iam::123456789012:role/CrossAccountResourceAccessRole"
+ }
+ ],
+ "category_name": "Findings",
+ "category_uid": 2,
+ "class_name": "DetectionFinding",
+ "class_uid": 2004,
+ "cloud": {
+ "account": {
+ "name": "",
+ "type": "AWS_Account",
+ "type_id": 10,
+ "uid": "123456789012",
+ "labels": []
+ },
+ "org": {
+ "name": "",
+ "uid": ""
+ },
+ "provider": "aws",
+ "region": "us-east-1"
+ },
+ "event_time": "2024-06-03T14:15:19.382075",
+ "remediation": {
+ "desc": "Ensure IAM Roles do not allow assume role from any role of a cross account but only from specific roles of specific accounts.",
+ "references": [
+ "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-roles"
+ ]
+ },
+ "risk_details": "If an IAM role allows assume role from any role of a cross account, it can lead to privilege escalation.",
+ "type_uid": 200401,
+ "type_name": "Create"
+}]
\ No newline at end of file
diff --git a/unittests/scans/aws_prowler_v3/no_vuln.json b/unittests/scans/aws_prowler_v3plus/no_vuln.json
similarity index 100%
rename from unittests/scans/aws_prowler_v3/no_vuln.json
rename to unittests/scans/aws_prowler_v3plus/no_vuln.json
diff --git a/unittests/scans/aws_prowler_v3plus/no_vuln.ocsf.json b/unittests/scans/aws_prowler_v3plus/no_vuln.ocsf.json
new file mode 100644
index 0000000000..0637a088a0
--- /dev/null
+++ b/unittests/scans/aws_prowler_v3plus/no_vuln.ocsf.json
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/unittests/scans/aws_prowler_v3/one_vuln.json b/unittests/scans/aws_prowler_v3plus/one_vuln.json
similarity index 100%
rename from unittests/scans/aws_prowler_v3/one_vuln.json
rename to unittests/scans/aws_prowler_v3plus/one_vuln.json
diff --git a/unittests/scans/aws_prowler_v3plus/one_vuln.ocsf.json b/unittests/scans/aws_prowler_v3plus/one_vuln.ocsf.json
new file mode 100644
index 0000000000..5e45f2077e
--- /dev/null
+++ b/unittests/scans/aws_prowler_v3plus/one_vuln.ocsf.json
@@ -0,0 +1,80 @@
+[{
+ "metadata": {
+ "event_code": "iam_role_administratoraccess_policy_permissive_trust_relationship",
+ "product": {
+ "name": "Prowler",
+ "vendor_name": "Prowler",
+ "version": "4.2.1"
+ },
+ "version": "1.2.0"
+ },
+ "severity_id": 4,
+ "severity": "High",
+ "status": "Suppressed",
+ "status_code": "FAIL",
+ "status_detail": "IAM Role myAdministratorExecutionRole has AdministratorAccess policy attached that has too permissive trust relationship.",
+ "status_id": 3,
+ "unmapped": {
+ "check_type": "",
+ "related_url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html#jf_administrator",
+ "categories": "trustboundaries",
+ "depends_on": "",
+ "related_to": "",
+ "notes": "CAF Security Epic: IAM",
+ "compliance": {}
+ },
+ "activity_name": "Create",
+ "activity_id": 1,
+ "finding_info": {
+ "created_time": "2024-06-03T14:15:19.382075",
+ "desc": "Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship",
+ "product_uid": "prowler",
+ "title": "Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship",
+ "uid": "prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole"
+ },
+ "resources": [
+ {
+ "cloud_partition": "aws",
+ "region": "us-east-1",
+ "data": {
+ "details": ""
+ },
+ "group": {
+ "name": "iam"
+ },
+ "labels": [],
+ "name": "myAdministratorExecutionRole",
+ "type": "AwsIamRole",
+ "uid": "arn:aws:iam::123456789012:role/myAdministratorExecutionRole"
+ }
+ ],
+ "category_name": "Findings",
+ "category_uid": 2,
+ "class_name": "DetectionFinding",
+ "class_uid": 2004,
+ "cloud": {
+ "account": {
+ "name": "",
+ "type": "AWS_Account",
+ "type_id": 10,
+ "uid": "123456789012",
+ "labels": []
+ },
+ "org": {
+ "name": "",
+ "uid": ""
+ },
+ "provider": "aws",
+ "region": "us-east-1"
+ },
+ "event_time": "2024-06-03T14:15:19.382075",
+ "remediation": {
+ "desc": "Apply the principle of least privilege. Instead of AdministratorAccess, assign only the permissions necessary for specific roles and tasks. Create custom IAM policies with minimal permissions based on the principle of least privilege. If a role really needs AdministratorAccess, the trust relationship must be well defined to restrict it usage only to the Principal, Action, Audience and Subject intended for it.",
+ "references": [
+ "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege"
+ ]
+ },
+ "risk_details": "The AWS-managed AdministratorAccess policy grants all actions for all AWS services and for all resources in the account and as such exposes the customer to a significant data leakage threat. It is therefore particularly important that the trust relationship is well defined to restrict it usage only to the Principal, Action, Audience and Subject intended for it.",
+ "type_uid": 200401,
+ "type_name": "Create"
+}]
\ No newline at end of file
diff --git a/unittests/tools/test_aws_prowler_v3_parser.py b/unittests/tools/test_aws_prowler_v3_parser.py
deleted file mode 100644
index c92ff3f922..0000000000
--- a/unittests/tools/test_aws_prowler_v3_parser.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from dojo.models import Test
-from dojo.tools.aws_prowler_v3.parser import AWSProwlerV3Parser
-from unittests.dojo_test_case import DojoTestCase
-
-
-class TestAwsProwlerV3Parser(DojoTestCase):
- def setup(self, testfile):
- parser = AWSProwlerV3Parser()
- findings = parser.get_findings(testfile, Test())
- testfile.close()
- return findings
-
- def test_aws_prowler_parser_with_no_vuln_has_no_findings_json(self):
- findings = self.setup(
- open("unittests/scans/aws_prowler_v3/no_vuln.json"))
- self.assertEqual(0, len(findings))
-
- def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self):
- findings = self.setup(
- open("unittests/scans/aws_prowler_v3/one_vuln.json"))
- self.assertEqual(1, len(findings))
- self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.sandbox.partner.teste.com", findings[0].unique_id_from_tool)
- self.assertIn('Check if ACM Certificates are about to expire in specific days or less', findings[0].description)
- self.assertEqual("arn:aws:acm:us-east-1:999999999999:certificate/ffffffff-0000-0000-0000-000000000000", findings[0].component_name)
- self.assertIn('https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html', findings[0].references)
-
- def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self):
- findings = self.setup(
- open("unittests/scans/aws_prowler_v3/many_vuln.json"))
- self.assertEqual(3, len(findings))
- with self.subTest(i=0):
- self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.teste.teste.com", findings[0].unique_id_from_tool)
- self.assertIn('Check if ACM Certificates are about to expire in specific days or less', findings[0].description)
- with self.subTest(i=1):
- self.assertEqual("prowler-aws-accessanalyzer_enabled-999999999999-us-east-1-999999999999", findings[1].unique_id_from_tool)
- self.assertIn('Check if IAM Access Analyzer is enabled', findings[1].description)
- with self.subTest(i=3):
- self.assertEqual("prowler-aws-account_maintain_current_contact_details-999999999999-us-east-1-999999999999", findings[2].unique_id_from_tool)
- self.assertIn('Maintain current contact details.', findings[2].description)
diff --git a/unittests/tools/test_aws_prowler_v3plus_parser.py b/unittests/tools/test_aws_prowler_v3plus_parser.py
new file mode 100644
index 0000000000..5096c6275d
--- /dev/null
+++ b/unittests/tools/test_aws_prowler_v3plus_parser.py
@@ -0,0 +1,67 @@
+from dojo.models import Test
+from dojo.tools.aws_prowler_v3plus.parser import AWSProwlerV3plusParser
+from unittests.dojo_test_case import DojoTestCase
+
+
+class TestAwsProwlerV3plusParser(DojoTestCase):
+ def setup(self, testfile):
+ parser = AWSProwlerV3plusParser()
+ findings = parser.get_findings(testfile, Test())
+ testfile.close()
+ return findings
+
+ def test_aws_prowler_parser_with_no_vuln_has_no_findings_json(self):
+ findings = self.setup(
+ open("unittests/scans/aws_prowler_v3plus/no_vuln.json"))
+ self.assertEqual(0, len(findings))
+
+ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self):
+ findings = self.setup(
+ open("unittests/scans/aws_prowler_v3plus/one_vuln.json"))
+ self.assertEqual(1, len(findings))
+ self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.sandbox.partner.teste.com", findings[0].unique_id_from_tool)
+ self.assertIn('Check if ACM Certificates are about to expire in specific days or less', findings[0].description)
+ self.assertEqual("arn:aws:acm:us-east-1:999999999999:certificate/ffffffff-0000-0000-0000-000000000000", findings[0].component_name)
+ self.assertIn('https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html', findings[0].references)
+
+ def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self):
+ findings = self.setup(
+ open("unittests/scans/aws_prowler_v3plus/many_vuln.json"))
+ self.assertEqual(3, len(findings))
+ with self.subTest(i=0):
+ self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.teste.teste.com", findings[0].unique_id_from_tool)
+ self.assertIn('Check if ACM Certificates are about to expire in specific days or less', findings[0].description)
+ with self.subTest(i=1):
+ self.assertEqual("prowler-aws-accessanalyzer_enabled-999999999999-us-east-1-999999999999", findings[1].unique_id_from_tool)
+ self.assertIn('Check if IAM Access Analyzer is enabled', findings[1].description)
+ with self.subTest(i=3):
+ self.assertEqual("prowler-aws-account_maintain_current_contact_details-999999999999-us-east-1-999999999999", findings[2].unique_id_from_tool)
+ self.assertIn('Maintain current contact details.', findings[2].description)
+
+ def test_aws_prowler_parser_with_no_vuln_has_no_findings_ocsf_json(self):
+ findings = self.setup(
+ open("unittests/scans/aws_prowler_v3plus/no_vuln.ocsf.json"))
+ self.assertEqual(0, len(findings))
+
+ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_ocsf_json(self):
+ findings = self.setup(
+ open("unittests/scans/aws_prowler_v3plus/one_vuln.ocsf.json"))
+ self.assertEqual(1, len(findings))
+ self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool)
+ self.assertIn('Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship', findings[0].description)
+ self.assertEqual("arn:aws:iam::123456789012:role/myAdministratorExecutionRole", findings[0].component_name)
+ self.assertIn('https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege', findings[0].references)
+
+ def test_aws_prowler_parser_with_many_vuln_has_many_findings_ocsf_json(self):
+ findings = self.setup(
+ open("unittests/scans/aws_prowler_v3plus/many_vuln.ocsf.json"))
+ self.assertEqual(3, len(findings))
+ with self.subTest(i=0):
+ self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool)
+ self.assertIn('Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship', findings[0].description)
+ with self.subTest(i=1):
+ self.assertEqual("prowler-aws-iam_role_cross_account_readonlyaccess_policy-123456789012-us-east-1-AuditRole", findings[1].unique_id_from_tool)
+ self.assertIn('Ensure IAM Roles do not have ReadOnlyAccess access for external AWS accounts', findings[1].description)
+ with self.subTest(i=3):
+ self.assertEqual("prowler-aws-iam_role_permissive_trust_relationship-123456789012-us-east-1-CrossAccountResourceAccessRole", findings[2].unique_id_from_tool)
+ self.assertIn('Ensure IAM Roles do not allow assume role from any role of a cross account', findings[2].description)
From 590b7413251938ff53fa38535fc976ab79a3eaa3 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 3 Jul 2024 14:06:05 -0500
Subject: [PATCH 020/111] Bump boto3 from 1.34.137 to 1.34.138 (#10508)
Bumps [boto3](https://github.com/boto/boto3) from 1.34.137 to 1.34.138.
- [Release notes](https://github.com/boto/boto3/releases)
- [Commits](https://github.com/boto/boto3/compare/1.34.137...1.34.138)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 6b849277ad..3feab385ac 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -75,7 +75,7 @@ django-ratelimit==4.1.0
argon2-cffi==23.1.0
blackduck==1.1.3
pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support
-boto3==1.34.137 # Required for Celery Broker AWS (SQS) support
+boto3==1.34.138 # Required for Celery Broker AWS (SQS) support
netaddr==1.3.0
vulners==2.1.7
fontawesomefree==6.5.1
From a309c716cc72776695052f982d63093458113cc5 Mon Sep 17 00:00:00 2001
From: kiblik <5609770+kiblik@users.noreply.github.com>
Date: Wed, 3 Jul 2024 22:16:13 +0200
Subject: [PATCH 021/111] Ruff: add and fix COM (#10086)
---
docker/install_chrome_dependencies.py | 2 +-
dojo/admin.py | 2 +-
dojo/announcement/signals.py | 8 +-
dojo/announcement/views.py | 6 +-
dojo/api_v2/exception_handler.py | 2 +-
dojo/api_v2/mixins.py | 2 +-
dojo/api_v2/permissions.py | 100 +++---
dojo/api_v2/prefetch/prefetcher.py | 4 +-
dojo/api_v2/prefetch/schema.py | 6 +-
dojo/api_v2/prefetch/utils.py | 2 +-
dojo/api_v2/serializers.py | 260 +++++++--------
dojo/api_v2/views.py | 254 +++++++-------
dojo/apps.py | 8 +-
dojo/authorization/authorization.py | 24 +-
.../authorization/authorization_decorators.py | 2 +-
dojo/banner/urls.py | 2 +-
dojo/banner/views.py | 4 +-
dojo/benchmark/views.py | 38 +--
dojo/checks.py | 2 +-
dojo/components/sql_group_concat.py | 6 +-
dojo/components/views.py | 20 +-
dojo/context_processors.py | 2 +-
dojo/cred/views.py | 28 +-
dojo/endpoint/signals.py | 2 +-
dojo/endpoint/utils.py | 10 +-
dojo/endpoint/views.py | 4 +-
dojo/engagement/signals.py | 2 +-
dojo/engagement/views.py | 40 +--
dojo/filters.py | 74 ++---
dojo/finding/queries.py | 6 +-
dojo/finding/urls.py | 44 +--
dojo/finding/views.py | 312 +++++++++---------
dojo/finding_group/signals.py | 2 +-
dojo/forms.py | 37 +--
dojo/github_issue_link/views.py | 4 +-
dojo/group/urls.py | 2 +-
dojo/group/views.py | 10 +-
dojo/home/views.py | 2 +-
dojo/importers/auto_create_context.py | 2 +-
dojo/importers/base_importer.py | 8 +-
dojo/importers/default_importer.py | 10 +-
dojo/importers/default_reimporter.py | 28 +-
dojo/importers/endpoint_manager.py | 6 +-
dojo/jira_link/helper.py | 10 +-
dojo/jira_link/views.py | 6 +-
dojo/management/commands/dedupe.py | 2 +-
.../commands/initialize_permissions.py | 2 +-
.../commands/jira_status_reconciliation.py | 2 +-
dojo/management/commands/migrate_cve.py | 4 +-
dojo/metrics/utils.py | 78 ++---
dojo/metrics/views.py | 18 +-
dojo/models.py | 40 +--
dojo/notes/urls.py | 2 +-
dojo/notifications/helper.py | 4 +-
dojo/notifications/urls.py | 2 +-
dojo/notifications/views.py | 2 +-
dojo/object/views.py | 6 +-
dojo/okta.py | 8 +-
dojo/product/queries.py | 2 +-
dojo/product/signals.py | 2 +-
dojo/product/views.py | 50 +--
dojo/product_type/signals.py | 2 +-
dojo/product_type/urls.py | 2 +-
dojo/product_type/views.py | 4 +-
dojo/regulations/urls.py | 2 +-
dojo/regulations/views.py | 6 +-
dojo/reports/views.py | 12 +-
dojo/risk_acceptance/helper.py | 6 +-
dojo/settings/.settings.dist.py.sha256sum | 2 +-
.../attribute-maps/django_saml_uri.py | 2 +-
dojo/settings/attribute-maps/saml_uri.py | 2 +-
dojo/settings/settings.dist.py | 62 ++--
dojo/settings/settings.py | 2 +-
dojo/settings/unittest.py | 2 +-
dojo/sla_config/views.py | 10 +-
dojo/survey/views.py | 16 +-
dojo/system_settings/urls.py | 4 +-
dojo/system_settings/views.py | 2 +-
dojo/templatetags/display_tags.py | 12 +-
dojo/test/signals.py | 2 +-
dojo/test/urls.py | 2 +-
dojo/test/views.py | 14 +-
dojo/tool_config/views.py | 4 +-
dojo/tool_product/views.py | 8 +-
dojo/tool_type/views.py | 4 +-
dojo/tools/acunetix/parse_acunetix360_json.py | 2 +-
dojo/tools/acunetix/parse_acunetix_xml.py | 26 +-
dojo/tools/anchore_engine/parser.py | 4 +-
dojo/tools/anchore_enterprise/parser.py | 6 +-
dojo/tools/anchore_grype/parser.py | 6 +-
dojo/tools/anchorectl_policies/parser.py | 2 +-
dojo/tools/anchorectl_vulns/parser.py | 4 +-
dojo/tools/api_blackduck/api_client.py | 2 +-
dojo/tools/api_blackduck/parser.py | 6 +-
dojo/tools/api_bugcrowd/api_client.py | 16 +-
dojo/tools/api_bugcrowd/importer.py | 2 +-
dojo/tools/api_bugcrowd/parser.py | 20 +-
dojo/tools/api_cobalt/api_client.py | 6 +-
dojo/tools/api_cobalt/parser.py | 2 +-
dojo/tools/api_edgescan/importer.py | 2 +-
dojo/tools/api_edgescan/parser.py | 4 +-
dojo/tools/api_sonarqube/api_client.py | 4 +-
dojo/tools/api_sonarqube/importer.py | 20 +-
dojo/tools/api_sonarqube/updater.py | 16 +-
.../api_sonarqube/updater_from_source.py | 4 +-
dojo/tools/api_vulners/importer.py | 2 +-
dojo/tools/api_vulners/parser.py | 2 +-
dojo/tools/aqua/parser.py | 4 +-
dojo/tools/arachni/parser.py | 2 +-
dojo/tools/auditjs/parser.py | 10 +-
dojo/tools/aws_prowler/parser.py | 4 +-
dojo/tools/awssecurityhub/parser.py | 2 +-
.../parser.py | 8 +-
dojo/tools/bandit/parser.py | 4 +-
dojo/tools/blackduck/importer.py | 8 +-
dojo/tools/blackduck/parser.py | 4 +-
.../blackduck_binary_analysis/importer.py | 6 +-
.../tools/blackduck_binary_analysis/parser.py | 4 +-
dojo/tools/blackduck_component_risk/parser.py | 22 +-
dojo/tools/bugcrowd/parser.py | 10 +-
dojo/tools/burp/parser.py | 14 +-
dojo/tools/burp_api/parser.py | 14 +-
dojo/tools/burp_enterprise/parser.py | 8 +-
dojo/tools/burp_graphql/parser.py | 32 +-
dojo/tools/cargo_audit/parser.py | 6 +-
dojo/tools/checkmarx/parser.py | 46 +--
dojo/tools/checkmarx_one/parser.py | 4 +-
dojo/tools/checkmarx_osa/parser.py | 8 +-
dojo/tools/chefinspect/parser.py | 2 +-
dojo/tools/clair/clairklar_parser.py | 4 +-
dojo/tools/cloudsploit/parser.py | 2 +-
dojo/tools/cobalt/parser.py | 4 +-
dojo/tools/contrast/parser.py | 8 +-
dojo/tools/coverity_api/parser.py | 6 +-
dojo/tools/crashtest_security/parser.py | 8 +-
dojo/tools/cred_scan/parser.py | 8 +-
dojo/tools/crunch42/parser.py | 2 +-
dojo/tools/cyclonedx/helpers.py | 2 +-
dojo/tools/cyclonedx/json_parser.py | 8 +-
dojo/tools/cyclonedx/xml_parser.py | 50 +--
dojo/tools/dawnscanner/parser.py | 2 +-
dojo/tools/dependency_check/parser.py | 52 +--
dojo/tools/detect_secrets/parser.py | 2 +-
dojo/tools/dockerbench/parser.py | 4 +-
dojo/tools/dockle/parser.py | 2 +-
dojo/tools/dsop/parser.py | 10 +-
dojo/tools/eslint/parser.py | 2 +-
dojo/tools/fortify/fpr_parser.py | 2 +-
dojo/tools/fortify/xml_parser.py | 6 +-
dojo/tools/gcloud_artifact_scan/parser.py | 2 +-
dojo/tools/generic/csv_parser.py | 14 +-
dojo/tools/generic/json_parser.py | 2 +-
dojo/tools/ggshield/parser.py | 2 +-
dojo/tools/github_vulnerability/parser.py | 8 +-
dojo/tools/gitlab_api_fuzzing/parser.py | 2 +-
dojo/tools/gitlab_container_scan/parser.py | 4 +-
dojo/tools/gitlab_dast/parser.py | 8 +-
dojo/tools/gitlab_sast/parser.py | 4 +-
dojo/tools/gitleaks/parser.py | 6 +-
dojo/tools/gosec/parser.py | 4 +-
dojo/tools/govulncheck/parser.py | 16 +-
dojo/tools/h1/parser.py | 14 +-
dojo/tools/hadolint/parser.py | 2 +-
dojo/tools/horusec/parser.py | 8 +-
dojo/tools/huskyci/parser.py | 4 +-
dojo/tools/hydra/parser.py | 8 +-
dojo/tools/ibm_app/parser.py | 22 +-
dojo/tools/immuniweb/parser.py | 4 +-
dojo/tools/intsights/csv_handler.py | 14 +-
dojo/tools/intsights/json_handler.py | 10 +-
dojo/tools/intsights/parser.py | 6 +-
.../jfrog_xray_api_summary_artifact/parser.py | 4 +-
dojo/tools/jfrog_xray_unified/parser.py | 2 +-
dojo/tools/jfrogxray/parser.py | 6 +-
dojo/tools/kics/parser.py | 2 +-
dojo/tools/kiuwan/parser.py | 4 +-
dojo/tools/kubehunter/parser.py | 2 +-
dojo/tools/kubescape/parser.py | 2 +-
dojo/tools/mend/parser.py | 10 +-
dojo/tools/meterian/parser.py | 4 +-
dojo/tools/microfocus_webinspect/parser.py | 6 +-
dojo/tools/mobsf/parser.py | 32 +-
dojo/tools/mobsfscan/parser.py | 8 +-
dojo/tools/mozilla_observatory/parser.py | 2 +-
dojo/tools/netsparker/parser.py | 8 +-
dojo/tools/neuvector/parser.py | 2 +-
dojo/tools/nexpose/parser.py | 16 +-
dojo/tools/nikto/json_parser.py | 4 +-
dojo/tools/nikto/xml_parser.py | 4 +-
dojo/tools/nmap/parser.py | 12 +-
dojo/tools/noseyparker/parser.py | 2 +-
dojo/tools/nuclei/parser.py | 8 +-
dojo/tools/openscap/parser.py | 12 +-
dojo/tools/openvas/csv_parser.py | 4 +-
dojo/tools/openvas/xml_parser.py | 2 +-
dojo/tools/ort/parser.py | 20 +-
dojo/tools/ossindex_devaudit/parser.py | 2 +-
dojo/tools/outpost24/parser.py | 6 +-
dojo/tools/php_security_audit_v2/parser.py | 2 +-
.../php_symfony_security_check/parser.py | 4 +-
dojo/tools/pmd/parser.py | 6 +-
dojo/tools/popeye/parser.py | 6 +-
dojo/tools/progpilot/parser.py | 2 +-
dojo/tools/pwn_sast/parser.py | 10 +-
dojo/tools/qualys/csv_parser.py | 18 +-
dojo/tools/qualys/parser.py | 6 +-
dojo/tools/qualys_infrascan_webgui/parser.py | 6 +-
dojo/tools/qualys_webapp/parser.py | 52 +--
dojo/tools/retirejs/parser.py | 4 +-
dojo/tools/risk_recon/parser.py | 2 +-
dojo/tools/rubocop/parser.py | 2 +-
dojo/tools/rusty_hog/parser.py | 32 +-
dojo/tools/sarif/parser.py | 18 +-
dojo/tools/scantist/parser.py | 2 +-
dojo/tools/scout_suite/parser.py | 6 +-
dojo/tools/semgrep/parser.py | 12 +-
dojo/tools/skf/parser.py | 10 +-
dojo/tools/snyk/parser.py | 10 +-
dojo/tools/snyk_code/parser.py | 10 +-
dojo/tools/solar_appscreener/parser.py | 2 +-
dojo/tools/sonarqube/parser.py | 2 +-
dojo/tools/sonarqube/soprasteria_helper.py | 2 +-
dojo/tools/sonarqube/soprasteria_html.py | 8 +-
dojo/tools/sonarqube/soprasteria_json.py | 4 +-
dojo/tools/sonatype/parser.py | 2 +-
dojo/tools/spotbugs/parser.py | 6 +-
dojo/tools/ssl_labs/parser.py | 8 +-
dojo/tools/sslscan/parser.py | 2 +-
dojo/tools/sslyze/parser_json.py | 24 +-
dojo/tools/sslyze/parser_xml.py | 8 +-
dojo/tools/stackhawk/parser.py | 12 +-
dojo/tools/sysdig_reports/sysdig_data.py | 2 +-
dojo/tools/talisman/parser.py | 2 +-
dojo/tools/tenable/csv_format.py | 6 +-
dojo/tools/tenable/parser.py | 2 +-
dojo/tools/tenable/xml_format.py | 32 +-
dojo/tools/terrascan/parser.py | 2 +-
dojo/tools/testssl/parser.py | 12 +-
dojo/tools/tfsec/parser.py | 4 +-
dojo/tools/threagile/parser.py | 4 +-
dojo/tools/trivy/parser.py | 8 +-
dojo/tools/trivy_operator/parser.py | 2 +-
.../trivy_operator/vulnerability_handler.py | 2 +-
dojo/tools/trufflehog/parser.py | 6 +-
dojo/tools/trufflehog3/parser.py | 2 +-
dojo/tools/trustwave/parser.py | 6 +-
dojo/tools/trustwave_fusion_api/parser.py | 4 +-
dojo/tools/twistlock/parser.py | 8 +-
dojo/tools/vcg/parser.py | 8 +-
dojo/tools/veracode/json_parser.py | 4 +-
dojo/tools/veracode/xml_parser.py | 42 +--
dojo/tools/veracode_sca/parser.py | 12 +-
dojo/tools/wapiti/parser.py | 6 +-
dojo/tools/wfuzz/parser.py | 6 +-
dojo/tools/whispers/parser.py | 4 +-
dojo/tools/whitehat_sentinel/parser.py | 24 +-
dojo/tools/wiz/parser.py | 2 +-
dojo/tools/wpscan/parser.py | 16 +-
dojo/tools/xanitizer/parser.py | 14 +-
dojo/tools/zap/parser.py | 10 +-
dojo/urls.py | 4 +-
dojo/user/urls.py | 4 +-
dojo/user/views.py | 4 +-
dojo/utils.py | 60 ++--
dojo/views.py | 2 +-
dojo/widgets.py | 2 +-
dojo/wsgi.py | 2 +-
ruff.toml | 1 +
tests/base_test_class.py | 26 +-
tests/false_positive_history_test.py | 8 +-
tests/notifications_test.py | 2 +-
tests/zap.py | 2 +-
unittests/dojo_test_case.py | 6 +-
unittests/test_api_sonarqube_updater.py | 28 +-
unittests/test_apiv2_endpoint.py | 18 +-
unittests/test_apiv2_methods_and_endpoints.py | 2 +-
unittests/test_apiv2_notifications.py | 4 +-
unittests/test_apiv2_user.py | 20 +-
unittests/test_apply_finding_template.py | 2 +-
unittests/test_dashboard.py | 4 +-
unittests/test_deduplication_logic.py | 2 +-
unittests/test_endpoint_model.py | 38 +--
.../test_false_positive_history_logic.py | 2 +-
unittests/test_finding_helper.py | 20 +-
unittests/test_import_reimport.py | 28 +-
unittests/test_jira_webhook.py | 24 +-
unittests/test_metrics_queries.py | 60 ++--
unittests/test_migrations.py | 12 +-
unittests/test_parsers.py | 20 +-
unittests/test_remote_user.py | 20 +-
unittests/test_rest_framework.py | 104 +++---
unittests/test_risk_acceptance.py | 18 +-
unittests/test_search_parser.py | 2 +-
unittests/test_utils.py | 6 +-
.../tools/test_anchore_enterprise_parser.py | 8 +-
unittests/tools/test_api_bugcrowd_importer.py | 8 +-
unittests/tools/test_api_bugcrowd_parser.py | 30 +-
unittests/tools/test_api_edgescan_parser.py | 2 +-
.../tools/test_api_sonarqube_importer.py | 44 +--
unittests/tools/test_api_sonarqube_parser.py | 4 +-
unittests/tools/test_auditjs_parser.py | 2 +-
unittests/tools/test_aws_prowler_parser.py | 2 +-
unittests/tools/test_awssecurityhub_parser.py | 2 +-
unittests/tools/test_bandit_parser.py | 2 +-
.../test_blackduck_binary_analysis_parser.py | 4 +-
.../test_blackduck_component_risk_parser.py | 2 +-
unittests/tools/test_blackduck_parser.py | 4 +-
unittests/tools/test_checkmarx_osa_parser.py | 18 +-
unittests/tools/test_checkmarx_parser.py | 48 +--
unittests/tools/test_checkov_parser.py | 6 +-
unittests/tools/test_codechecker_parser.py | 8 +-
.../tools/test_crashtest_security_parser.py | 2 +-
.../tools/test_dependency_check_parser.py | 16 +-
.../tools/test_dependency_track_parser.py | 18 +-
unittests/tools/test_dockerbench_parser.py | 6 +-
.../test_gitlab_container_scan_parser.py | 2 +-
unittests/tools/test_gitlab_dast_parser.py | 8 +-
unittests/tools/test_govulncheck_parser.py | 2 +-
unittests/tools/test_huskyci_parser.py | 6 +-
unittests/tools/test_hydra_parser.py | 14 +-
..._jfrog_xray_api_summary_artifact_parser.py | 4 +-
unittests/tools/test_kubebench_parser.py | 8 +-
unittests/tools/test_kubehunter_parser.py | 2 +-
unittests/tools/test_mend_parser.py | 2 +-
.../test_microfocus_webinspect_parser.py | 14 +-
unittests/tools/test_noseyparker_parser.py | 2 +-
unittests/tools/test_ort_parser.py | 2 +-
.../tools/test_ossindex_devaudit_parser.py | 30 +-
.../test_php_symfony_security_check_parser.py | 6 +-
.../test_qualys_infrascan_webgui_parser.py | 8 +-
unittests/tools/test_qualys_parser.py | 42 +--
unittests/tools/test_qualys_webapp_parser.py | 6 +-
unittests/tools/test_sarif_parser.py | 20 +-
unittests/tools/test_scantist_parser.py | 2 +-
unittests/tools/test_snyk_parser.py | 8 +-
.../tools/test_solar_appscreener_parser.py | 2 +-
unittests/tools/test_sonarqube_parser.py | 46 +--
unittests/tools/test_spotbugs_parser.py | 2 +-
unittests/tools/test_sslyze_parser.py | 6 +-
unittests/tools/test_stackhawk_parser.py | 26 +-
unittests/tools/test_sysdig_reports_parser.py | 4 +-
unittests/tools/test_talisman_parser.py | 2 +-
.../tools/test_trustwave_fusion_api_parser.py | 8 +-
unittests/tools/test_twistlock_parser.py | 8 +-
unittests/tools/test_veracode_parser.py | 2 +-
unittests/tools/test_yarn_audit_parser.py | 2 +-
346 files changed, 2184 insertions(+), 2184 deletions(-)
diff --git a/docker/install_chrome_dependencies.py b/docker/install_chrome_dependencies.py
index 2bf949c86c..c85372bf5d 100644
--- a/docker/install_chrome_dependencies.py
+++ b/docker/install_chrome_dependencies.py
@@ -25,7 +25,7 @@ def ldd(file_path):
# For simplicity, I'm assuming if we get an error, the code is non-zero.
try:
result = subprocess.run(
- ["ldd", file_path], capture_output=True, text=True
+ ["ldd", file_path], capture_output=True, text=True,
)
stdout = result.stdout
code = result.returncode
diff --git a/dojo/admin.py b/dojo/admin.py
index 68353f24ab..87823ff4d0 100644
--- a/dojo/admin.py
+++ b/dojo/admin.py
@@ -49,7 +49,7 @@ class QuestionParentAdmin(PolymorphicParentModelAdmin):
base_model = Question
child_models = (
TextQuestion,
- ChoiceQuestion
+ ChoiceQuestion,
)
diff --git a/dojo/announcement/signals.py b/dojo/announcement/signals.py
index 580da64a84..9a2682eddb 100644
--- a/dojo/announcement/signals.py
+++ b/dojo/announcement/signals.py
@@ -17,11 +17,11 @@ def add_announcement_to_new_user(sender, instance, **kwargs):
)
if not cloud_announcement or settings.CREATE_CLOUD_BANNER:
user_announcements = UserAnnouncement.objects.filter(
- user=dojo_user, announcement=announcement
+ user=dojo_user, announcement=announcement,
)
if user_announcements.count() == 0:
UserAnnouncement.objects.get_or_create(
- user=dojo_user, announcement=announcement
+ user=dojo_user, announcement=announcement,
)
@@ -31,8 +31,8 @@ def announcement_post_save(sender, instance, created, **kwargs):
UserAnnouncement.objects.bulk_create(
[
UserAnnouncement(
- user=user_id, announcement=instance
+ user=user_id, announcement=instance,
)
for user_id in Dojo_User.objects.all()
- ]
+ ],
)
diff --git a/dojo/announcement/views.py b/dojo/announcement/views.py
index 5c01ffaaf4..6b0cb16bc3 100644
--- a/dojo/announcement/views.py
+++ b/dojo/announcement/views.py
@@ -28,7 +28,7 @@ def configure_announcement(request):
"message": announcement.message,
"style": announcement.style,
"dismissable": announcement.dismissable,
- }
+ },
)
remove = True
except Announcement.DoesNotExist:
@@ -64,14 +64,14 @@ def configure_announcement(request):
request=request,
)
return render(
- request, "dojo/announcement.html", {"form": form, "remove": remove}
+ request, "dojo/announcement.html", {"form": form, "remove": remove},
)
def dismiss_announcement(request):
if request.method == "POST":
deleted_count, _objects_deleted = UserAnnouncement.objects.filter(
- user=request.user, announcement=1
+ user=request.user, announcement=1,
).delete()
if deleted_count > 0:
messages.add_message(
diff --git a/dojo/api_v2/exception_handler.py b/dojo/api_v2/exception_handler.py
index b4d8143366..513c98004b 100644
--- a/dojo/api_v2/exception_handler.py
+++ b/dojo/api_v2/exception_handler.py
@@ -54,7 +54,7 @@ def custom_exception_handler(exc, context):
# message, if it is different from the detail that is already
# in the response.
if isinstance(response.data, dict) and str(
- exc
+ exc,
) != response.data.get("detail", ""):
response.data["message"] = str(exc)
else:
diff --git a/dojo/api_v2/mixins.py b/dojo/api_v2/mixins.py
index 749f7ab8b5..e32683c374 100644
--- a/dojo/api_v2/mixins.py
+++ b/dojo/api_v2/mixins.py
@@ -15,7 +15,7 @@ class DeletePreviewModelMixin:
@extend_schema(
methods=["GET"],
responses={
- status.HTTP_200_OK: serializers.DeletePreviewSerializer(many=True)
+ status.HTTP_200_OK: serializers.DeletePreviewSerializer(many=True),
},
)
@action(detail=True, methods=["get"], filter_backends=[], suffix="List")
diff --git a/dojo/api_v2/permissions.py b/dojo/api_v2/permissions.py
index 10991bb3e5..f766982683 100644
--- a/dojo/api_v2/permissions.py
+++ b/dojo/api_v2/permissions.py
@@ -62,7 +62,7 @@ def check_object_permission(
class UserHasAppAnalysisPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Technology_Add
+ request, Product, "product", Permissions.Technology_Add,
)
def has_object_permission(self, request, view, obj):
@@ -79,22 +79,22 @@ class UserHasCredentialPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.data.get("product") is not None:
return check_post_permission(
- request, Cred_Mapping, "product", Permissions.Credential_Add
+ request, Cred_Mapping, "product", Permissions.Credential_Add,
)
if request.data.get("engagement") is not None:
return check_post_permission(
- request, Cred_Mapping, "engagement", Permissions.Credential_Add
+ request, Cred_Mapping, "engagement", Permissions.Credential_Add,
)
if request.data.get("test") is not None:
return check_post_permission(
- request, Cred_Mapping, "test", Permissions.Credential_Add
+ request, Cred_Mapping, "test", Permissions.Credential_Add,
)
if request.data.get("finding") is not None:
return check_post_permission(
- request, Cred_Mapping, "finding", Permissions.Credential_Add
+ request, Cred_Mapping, "finding", Permissions.Credential_Add,
)
return check_post_permission(
- request, Cred_Mapping, "product", Permissions.Credential_Add
+ request, Cred_Mapping, "product", Permissions.Credential_Add,
)
def has_object_permission(self, request, view, obj):
@@ -111,11 +111,11 @@ class UserHasDojoGroupPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.method == "GET":
return user_has_configuration_permission(
- request.user, "auth.view_group"
+ request.user, "auth.view_group",
)
elif request.method == "POST":
return user_has_configuration_permission(
- request.user, "auth.add_group"
+ request.user, "auth.add_group",
)
else:
return True
@@ -126,9 +126,9 @@ def has_object_permission(self, request, view, obj):
# because with the group they can see user information that might
# be considered as confidential
return user_has_configuration_permission(
- request.user, "auth.view_group"
+ request.user, "auth.view_group",
) and user_has_permission(
- request.user, obj, Permissions.Group_View
+ request.user, obj, Permissions.Group_View,
)
else:
return check_object_permission(
@@ -143,7 +143,7 @@ def has_object_permission(self, request, view, obj):
class UserHasDojoGroupMemberPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Dojo_Group, "group", Permissions.Group_Manage_Members
+ request, Dojo_Group, "group", Permissions.Group_Manage_Members,
)
def has_object_permission(self, request, view, obj):
@@ -166,7 +166,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Product_Edit
+ request.user, object, Permissions.Product_Edit,
)
)
finding_id = request.data.get("finding", None)
@@ -175,7 +175,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Finding_Edit
+ request.user, object, Permissions.Finding_Edit,
)
)
endpoint_id = request.data.get("endpoint", None)
@@ -184,7 +184,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Endpoint_Edit
+ request.user, object, Permissions.Endpoint_Edit,
)
)
return has_permission_result
@@ -235,7 +235,7 @@ def has_object_permission(self, request, view, obj):
class UserHasToolProductSettingsPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Product_Edit
+ request, Product, "product", Permissions.Product_Edit,
)
def has_object_permission(self, request, view, obj):
@@ -251,7 +251,7 @@ def has_object_permission(self, request, view, obj):
class UserHasEndpointPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Endpoint_Add
+ request, Product, "product", Permissions.Endpoint_Add,
)
def has_object_permission(self, request, view, obj):
@@ -267,7 +267,7 @@ def has_object_permission(self, request, view, obj):
class UserHasEndpointStatusPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Endpoint, "endpoint", Permissions.Endpoint_Edit
+ request, Endpoint, "endpoint", Permissions.Endpoint_Edit,
)
def has_object_permission(self, request, view, obj):
@@ -288,10 +288,10 @@ class UserHasEngagementPermission(permissions.BasePermission):
def has_permission(self, request, view):
if UserHasEngagementPermission.path_engagement_post.match(
- request.path
+ request.path,
) or UserHasEngagementPermission.path_engagement.match(request.path):
return check_post_permission(
- request, Product, "product", Permissions.Engagement_Add
+ request, Product, "product", Permissions.Engagement_Add,
)
else:
# related object only need object permission
@@ -299,7 +299,7 @@ def has_permission(self, request, view):
def has_object_permission(self, request, view, obj):
if UserHasEngagementPermission.path_engagement_post.match(
- request.path
+ request.path,
) or UserHasEngagementPermission.path_engagement.match(request.path):
return check_object_permission(
request,
@@ -327,12 +327,12 @@ class UserHasRiskAcceptancePermission(permissions.BasePermission):
def has_permission(self, request, view):
if UserHasRiskAcceptancePermission.path_risk_acceptance_post.match(
- request.path
+ request.path,
) or UserHasRiskAcceptancePermission.path_risk_acceptance.match(
- request.path
+ request.path,
):
return check_post_permission(
- request, Product, "product", Permissions.Risk_Acceptance
+ request, Product, "product", Permissions.Risk_Acceptance,
)
else:
# related object only need object permission
@@ -340,9 +340,9 @@ def has_permission(self, request, view):
def has_object_permission(self, request, view, obj):
if UserHasRiskAcceptancePermission.path_risk_acceptance_post.match(
- request.path
+ request.path,
) or UserHasRiskAcceptancePermission.path_risk_acceptance.match(
- request.path
+ request.path,
):
return check_object_permission(
request,
@@ -375,12 +375,12 @@ def has_permission(self, request, view):
UserHasFindingPermission.path_finding_post.match(request.path)
or UserHasFindingPermission.path_finding.match(request.path)
or UserHasFindingPermission.path_stub_finding_post.match(
- request.path
+ request.path,
)
or UserHasFindingPermission.path_stub_finding.match(request.path)
):
return check_post_permission(
- request, Test, "test", Permissions.Finding_Add
+ request, Test, "test", Permissions.Finding_Add,
)
else:
# related object only need object permission
@@ -391,7 +391,7 @@ def has_object_permission(self, request, view, obj):
UserHasFindingPermission.path_finding_post.match(request.path)
or UserHasFindingPermission.path_finding.match(request.path)
or UserHasFindingPermission.path_stub_finding_post.match(
- request.path
+ request.path,
)
or UserHasFindingPermission.path_stub_finding.match(request.path)
):
@@ -433,7 +433,7 @@ def has_permission(self, request, view):
if engagement := converted_dict.get("engagement"):
# existing engagement, nothing special to check
return user_has_permission(
- request.user, engagement, Permissions.Import_Scan_Result
+ request.user, engagement, Permissions.Import_Scan_Result,
)
elif engagement_id := converted_dict.get("engagement_id"):
# engagement_id doesn't exist
@@ -488,7 +488,7 @@ def has_permission(self, request, view):
if product:
# existing product, nothing special to check
return user_has_permission(
- request.user, product, Permissions.Import_Scan_Result
+ request.user, product, Permissions.Import_Scan_Result,
)
elif product_id := converted_dict.get("product_id"):
# product_id doesn't exist
@@ -521,7 +521,7 @@ def has_object_permission(self, request, view, obj):
class UserHasProductMemberPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Product_Manage_Members
+ request, Product, "product", Permissions.Product_Manage_Members,
)
def has_object_permission(self, request, view, obj):
@@ -537,7 +537,7 @@ def has_object_permission(self, request, view, obj):
class UserHasProductGroupPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Product_Group_Add
+ request, Product, "product", Permissions.Product_Group_Add,
)
def has_object_permission(self, request, view, obj):
@@ -554,7 +554,7 @@ class UserHasProductTypePermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.method == "POST":
return user_has_global_permission(
- request.user, Permissions.Product_Type_Add
+ request.user, Permissions.Product_Type_Add,
)
else:
return True
@@ -629,7 +629,7 @@ def has_permission(self, request, view):
if test := converted_dict.get("test"):
# existing test, nothing special to check
return user_has_permission(
- request.user, test, Permissions.Import_Scan_Result
+ request.user, test, Permissions.Import_Scan_Result,
)
elif test_id := converted_dict.get("test_id"):
# test_id doesn't exist
@@ -671,10 +671,10 @@ class UserHasTestPermission(permissions.BasePermission):
def has_permission(self, request, view):
if UserHasTestPermission.path_tests_post.match(
- request.path
+ request.path,
) or UserHasTestPermission.path_tests.match(request.path):
return check_post_permission(
- request, Engagement, "engagement", Permissions.Test_Add
+ request, Engagement, "engagement", Permissions.Test_Add,
)
else:
# related object only need object permission
@@ -682,7 +682,7 @@ def has_permission(self, request, view):
def has_object_permission(self, request, view, obj):
if UserHasTestPermission.path_tests_post.match(
- request.path
+ request.path,
) or UserHasTestPermission.path_tests.match(request.path):
return check_object_permission(
request,
@@ -705,7 +705,7 @@ def has_object_permission(self, request, view, obj):
class UserHasTestImportPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Test, "test", Permissions.Test_Edit
+ request, Test, "test", Permissions.Test_Edit,
)
def has_object_permission(self, request, view, obj):
@@ -721,7 +721,7 @@ def has_object_permission(self, request, view, obj):
class UserHasLanguagePermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Language_Add
+ request, Product, "product", Permissions.Language_Add,
)
def has_object_permission(self, request, view, obj):
@@ -763,7 +763,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Engagement_Edit
+ request.user, object, Permissions.Engagement_Edit,
)
)
product_id = request.data.get("product", None)
@@ -772,7 +772,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Product_Edit
+ request.user, object, Permissions.Product_Edit,
)
)
return has_permission_result
@@ -818,7 +818,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Engagement_Edit
+ request.user, object, Permissions.Engagement_Edit,
)
)
finding_id = request.data.get("finding", None)
@@ -827,7 +827,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Finding_Edit
+ request.user, object, Permissions.Finding_Edit,
)
)
finding_group_id = request.data.get("finding_group", None)
@@ -836,7 +836,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Finding_Group_Edit
+ request.user, object, Permissions.Finding_Group_Edit,
)
)
return has_permission_result
@@ -892,7 +892,7 @@ def has_permission(self, request, view):
class UserHasEngagementPresetPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Product_Edit
+ request, Product, "product", Permissions.Product_Edit,
)
def has_object_permission(self, request, view, obj):
@@ -990,7 +990,7 @@ def check_auto_create_permission(
if engagement:
# existing engagement, nothing special to check
return user_has_permission(
- user, engagement, Permissions.Import_Scan_Result
+ user, engagement, Permissions.Import_Scan_Result,
)
if product and product_name and engagement_name:
@@ -999,7 +999,7 @@ def check_auto_create_permission(
raise PermissionDenied(msg)
if not user_has_permission(
- user, product, Permissions.Import_Scan_Result
+ user, product, Permissions.Import_Scan_Result,
):
msg = f'No permission to import scans into product "{product_name}"'
raise PermissionDenied(msg)
@@ -1014,7 +1014,7 @@ def check_auto_create_permission(
if not product_type:
if not user_has_global_permission(
- user, Permissions.Product_Type_Add
+ user, Permissions.Product_Type_Add,
):
msg = f'No permission to create product_type "{product_type_name}"'
raise PermissionDenied(msg)
@@ -1023,7 +1023,7 @@ def check_auto_create_permission(
return True
else:
if not user_has_permission(
- user, product_type, Permissions.Product_Type_Add_Product
+ user, product_type, Permissions.Product_Type_Add_Product,
):
msg = f'No permission to create products in product_type "{product_type}"'
raise PermissionDenied(msg)
@@ -1051,7 +1051,7 @@ def has_permission(self, request, view):
class UserHasConfigurationPermissionSuperuser(
- permissions.DjangoModelPermissions
+ permissions.DjangoModelPermissions,
):
# Override map to also provide 'view' permissions
perms_map = {
diff --git a/dojo/api_v2/prefetch/prefetcher.py b/dojo/api_v2/prefetch/prefetcher.py
index 1b207c394d..79a4b0e731 100644
--- a/dojo/api_v2/prefetch/prefetcher.py
+++ b/dojo/api_v2/prefetch/prefetcher.py
@@ -26,7 +26,7 @@ def _is_model_serializer(obj):
# We process all the serializers found in the module SERIALIZER_DEFS_MODULE. We restrict the scope to avoid
# processing all the classes in the symbol table
available_serializers = inspect.getmembers(
- sys.modules[SERIALIZER_DEFS_MODULE], _is_model_serializer
+ sys.modules[SERIALIZER_DEFS_MODULE], _is_model_serializer,
)
for _, serializer in available_serializers:
@@ -86,7 +86,7 @@ def _prefetch(self, entry, fields_to_fetch):
# the serializer accordingly
many = utils._is_many_to_many_relation(field_meta)
field_data = extra_serializer(many=many).to_representation(
- field_value
+ field_value,
)
# For convenience in processing we store the field data in a list
field_data_list = (
diff --git a/dojo/api_v2/prefetch/schema.py b/dojo/api_v2/prefetch/schema.py
index 48892c4381..030a572a15 100644
--- a/dojo/api_v2/prefetch/schema.py
+++ b/dojo/api_v2/prefetch/schema.py
@@ -56,7 +56,7 @@ def prefetch_postprocessing_hook(result, generator, request, public):
prefetcher = _Prefetcher()
fields = _get_prefetchable_fields(
- serializer_classes[path]()
+ serializer_classes[path](),
)
field_names = [
@@ -87,8 +87,8 @@ def prefetch_postprocessing_hook(result, generator, request, public):
"type": "object",
"readOnly": True,
"additionalProperties": {
- "$ref": f"#/components/schemas/{fields_to_refname[name]}"
- }
+ "$ref": f"#/components/schemas/{fields_to_refname[name]}",
+ },
}
for name in field_names
}
diff --git a/dojo/api_v2/prefetch/utils.py b/dojo/api_v2/prefetch/utils.py
index 833fe9ae6e..de7ea2b383 100644
--- a/dojo/api_v2/prefetch/utils.py
+++ b/dojo/api_v2/prefetch/utils.py
@@ -39,7 +39,7 @@ def _get_prefetchable_fields(serializer):
def _is_field_prefetchable(field):
return _is_one_to_one_relation(field) or _is_many_to_many_relation(
- field
+ field,
)
meta = getattr(serializer, "Meta", None)
diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py
index 4de5d536d0..006edc63f8 100644
--- a/dojo/api_v2/serializers.py
+++ b/dojo/api_v2/serializers.py
@@ -167,22 +167,22 @@ class ImportStatisticsSerializer(serializers.Serializer):
help_text="Finding statistics of modifications made by the reimport. Only available when TRACK_IMPORT_HISTORY hass not disabled.",
)
after = SeverityStatusStatisticsSerializer(
- help_text="Finding statistics as stored in Defect Dojo after the import"
+ help_text="Finding statistics as stored in Defect Dojo after the import",
)
@extend_schema_field(
- serializers.ListField(child=serializers.CharField())
+ serializers.ListField(child=serializers.CharField()),
) # also takes basic python types
class TagListSerializerField(serializers.ListField):
child = serializers.CharField()
default_error_messages = {
"not_a_list": _(
- 'Expected a list of items but got type "{input_type}".'
+ 'Expected a list of items but got type "{input_type}".',
),
"invalid_json": _(
"Invalid json list. A tag list submitted in string"
- " form must be valid json."
+ " form must be valid json.",
),
"not_a_str": _("All list items must be of string type."),
}
@@ -256,7 +256,7 @@ def update(self, instance, validated_data):
to_be_tagged, validated_data = self._pop_tags(validated_data)
tag_object = super().update(
- instance, validated_data
+ instance, validated_data,
)
return self._save_tags(tag_object, to_be_tagged)
@@ -301,7 +301,7 @@ def __getitem__(self, item):
def __str__(self):
if self.pretty_print:
return json.dumps(
- self, sort_keys=True, indent=4, separators=(",", ": ")
+ self, sort_keys=True, indent=4, separators=(",", ": "),
)
else:
return json.dumps(self)
@@ -311,14 +311,14 @@ class RequestResponseSerializerField(serializers.ListSerializer):
child = DictField(child=serializers.CharField())
default_error_messages = {
"not_a_list": _(
- 'Expected a list of items but got type "{input_type}".'
+ 'Expected a list of items but got type "{input_type}".',
),
"invalid_json": _(
"Invalid json list. A tag list submitted in string"
- " form must be valid json."
+ " form must be valid json.",
),
"not_a_dict": _(
- "All list items must be of dict type with keys 'request' and 'response'"
+ "All list items must be of dict type with keys 'request' and 'response'",
),
"not_a_str": _("All values in the dict must be of string type."),
}
@@ -437,7 +437,7 @@ class UserSerializer(serializers.ModelSerializer):
configuration_permissions = serializers.PrimaryKeyRelatedField(
allow_null=True,
queryset=Permission.objects.filter(
- codename__in=get_configuration_permissions_codenames()
+ codename__in=get_configuration_permissions_codenames(),
),
many=True,
required=False,
@@ -469,10 +469,10 @@ def to_representation(self, instance):
allowed_configuration_permissions = set(
self.fields[
"configuration_permissions"
- ].child_relation.queryset.values_list("id", flat=True)
+ ].child_relation.queryset.values_list("id", flat=True),
)
ret["configuration_permissions"] = list(
- all_permissions.intersection(allowed_configuration_permissions)
+ all_permissions.intersection(allowed_configuration_permissions),
)
return ret
@@ -483,7 +483,7 @@ def update(self, instance, validated_data):
"user_permissions" in validated_data
): # This field was renamed from "configuration_permissions" in the meantime
new_configuration_permissions = set(
- validated_data.pop("user_permissions")
+ validated_data.pop("user_permissions"),
)
instance = super().update(instance, validated_data)
@@ -494,14 +494,14 @@ def update(self, instance, validated_data):
allowed_configuration_permissions = set(
self.fields[
"configuration_permissions"
- ].child_relation.queryset.all()
+ ].child_relation.queryset.all(),
)
non_configuration_permissions = (
set(instance.user_permissions.all())
- allowed_configuration_permissions
)
new_permissions = non_configuration_permissions.union(
- new_configuration_permissions
+ new_configuration_permissions,
)
instance.user_permissions.set(new_permissions)
@@ -518,7 +518,7 @@ def create(self, validated_data):
"user_permissions" in validated_data
): # This field was renamed from "configuration_permissions" in the meantime
new_configuration_permissions = set(
- validated_data.pop("user_permissions")
+ validated_data.pop("user_permissions"),
)
user = Dojo_User.objects.create(**validated_data)
@@ -582,7 +582,7 @@ class DojoGroupSerializer(serializers.ModelSerializer):
configuration_permissions = serializers.PrimaryKeyRelatedField(
allow_null=True,
queryset=Permission.objects.filter(
- codename__in=get_configuration_permissions_codenames()
+ codename__in=get_configuration_permissions_codenames(),
),
many=True,
required=False,
@@ -609,10 +609,10 @@ def to_representation(self, instance):
allowed_configuration_permissions = set(
self.fields[
"configuration_permissions"
- ].child_relation.queryset.values_list("id", flat=True)
+ ].child_relation.queryset.values_list("id", flat=True),
)
ret["configuration_permissions"] = list(
- all_permissions.intersection(allowed_configuration_permissions)
+ all_permissions.intersection(allowed_configuration_permissions),
)
return ret
@@ -624,7 +624,7 @@ def create(self, validated_data):
and "permissions" in validated_data["auth_group"]
): # This field was renamed from "configuration_permissions" in the meantime
new_configuration_permissions = set(
- validated_data.pop("auth_group")["permissions"]
+ validated_data.pop("auth_group")["permissions"],
)
instance = super().create(validated_data)
@@ -643,7 +643,7 @@ def update(self, instance, validated_data):
and "permissions" in validated_data["auth_group"]
): # This field was renamed from "configuration_permissions" in the meantime
new_configuration_permissions = set(
- validated_data.pop("auth_group")["permissions"]
+ validated_data.pop("auth_group")["permissions"],
)
instance = super().update(instance, validated_data)
@@ -654,14 +654,14 @@ def update(self, instance, validated_data):
allowed_configuration_permissions = set(
self.fields[
"configuration_permissions"
- ].child_relation.queryset.all()
+ ].child_relation.queryset.all(),
)
non_configuration_permissions = (
set(instance.auth_group.permissions.all())
- allowed_configuration_permissions
)
new_permissions = non_configuration_permissions.union(
- new_configuration_permissions
+ new_configuration_permissions,
)
instance.auth_group.permissions.set(new_permissions)
@@ -692,7 +692,7 @@ def validate(self, data):
or data.get("user") != self.instance.user
):
members = Dojo_Group_Member.objects.filter(
- group=data.get("group"), user=data.get("user")
+ group=data.get("group"), user=data.get("user"),
)
if members.count() > 0:
msg = "Dojo_Group_Member already exists"
@@ -701,7 +701,7 @@ def validate(self, data):
if self.instance is not None and not data.get("role").is_owner:
owners = (
Dojo_Group_Member.objects.filter(
- group=data.get("group"), role__is_owner=True
+ group=data.get("group"), role__is_owner=True,
)
.exclude(id=self.instance.id)
.count()
@@ -862,7 +862,7 @@ def validate(self, data):
or data.get("user") != self.instance.user
):
members = Product_Member.objects.filter(
- product=data.get("product"), user=data.get("user")
+ product=data.get("product"), user=data.get("user"),
)
if members.count() > 0:
msg = "Product_Member already exists"
@@ -903,7 +903,7 @@ def validate(self, data):
or data.get("group") != self.instance.group
):
members = Product_Group.objects.filter(
- product=data.get("product"), group=data.get("group")
+ product=data.get("product"), group=data.get("group"),
)
if members.count() > 0:
msg = "Product_Group already exists"
@@ -944,7 +944,7 @@ def validate(self, data):
or data.get("user") != self.instance.user
):
members = Product_Type_Member.objects.filter(
- product_type=data.get("product_type"), user=data.get("user")
+ product_type=data.get("product_type"), user=data.get("user"),
)
if members.count() > 0:
msg = "Product_Type_Member already exists"
@@ -953,7 +953,7 @@ def validate(self, data):
if self.instance is not None and not data.get("role").is_owner:
owners = (
Product_Type_Member.objects.filter(
- product_type=data.get("product_type"), role__is_owner=True
+ product_type=data.get("product_type"), role__is_owner=True,
)
.exclude(id=self.instance.id)
.count()
@@ -997,7 +997,7 @@ def validate(self, data):
or data.get("group") != self.instance.group
):
members = Product_Type_Group.objects.filter(
- product_type=data.get("product_type"), group=data.get("group")
+ product_type=data.get("product_type"), group=data.get("group"),
)
if members.count() > 0:
msg = "Product_Type_Group already exists"
@@ -1044,14 +1044,14 @@ def build_relational_field(self, field_name, relation_info):
class EngagementToNotesSerializer(serializers.Serializer):
engagement_id = serializers.PrimaryKeyRelatedField(
- queryset=Engagement.objects.all(), many=False, allow_null=True
+ queryset=Engagement.objects.all(), many=False, allow_null=True,
)
notes = NoteSerializer(many=True)
class EngagementToFilesSerializer(serializers.Serializer):
engagement_id = serializers.PrimaryKeyRelatedField(
- queryset=Engagement.objects.all(), many=False, allow_null=True
+ queryset=Engagement.objects.all(), many=False, allow_null=True,
)
files = FileSerializer(many=True)
@@ -1066,11 +1066,11 @@ def to_representation(self, data):
"file": "{site_url}/{file_access_url}".format(
site_url=settings.SITE_URL,
file_access_url=file.get_accessible_url(
- engagement, engagement.id
+ engagement, engagement.id,
),
),
"title": file.title,
- }
+ },
)
new_data = {"engagement_id": engagement.id, "files": new_files}
return new_data
@@ -1125,7 +1125,7 @@ class Meta:
class ToolProductSettingsSerializer(serializers.ModelSerializer):
setting_url = serializers.CharField(source="url")
product = serializers.PrimaryKeyRelatedField(
- queryset=Product.objects.all(), required=True
+ queryset=Product.objects.all(), required=True,
)
class Meta:
@@ -1143,7 +1143,7 @@ def create(self, validated_data):
finding = validated_data.get("finding")
try:
status = Endpoint_Status.objects.create(
- finding=finding, endpoint=endpoint
+ finding=finding, endpoint=endpoint,
)
except IntegrityError as ie:
if "endpoint-finding relation" in str(ie):
@@ -1279,7 +1279,7 @@ def validate(self, data):
engagement = data.get("engagement", self.instance.engagement)
finding = data.get("finding", self.instance.finding)
finding_group = data.get(
- "finding_group", self.instance.finding_group
+ "finding_group", self.instance.finding_group,
)
else:
engagement = data.get("engagement", None)
@@ -1364,7 +1364,7 @@ class TestSerializer(TaggitSerializer, serializers.ModelSerializer):
tags = TagListSerializerField(required=False)
test_type_name = serializers.ReadOnlyField()
finding_groups = FindingGroupSerializer(
- source="finding_group_set", many=True, read_only=True
+ source="finding_group_set", many=True, read_only=True,
)
class Meta:
@@ -1381,7 +1381,7 @@ def build_relational_field(self, field_name, relation_info):
class TestCreateSerializer(TaggitSerializer, serializers.ModelSerializer):
engagement = serializers.PrimaryKeyRelatedField(
- queryset=Engagement.objects.all()
+ queryset=Engagement.objects.all(),
)
notes = serializers.PrimaryKeyRelatedField(
allow_null=True,
@@ -1406,14 +1406,14 @@ class Meta:
class TestToNotesSerializer(serializers.Serializer):
test_id = serializers.PrimaryKeyRelatedField(
- queryset=Test.objects.all(), many=False, allow_null=True
+ queryset=Test.objects.all(), many=False, allow_null=True,
)
notes = NoteSerializer(many=True)
class TestToFilesSerializer(serializers.Serializer):
test_id = serializers.PrimaryKeyRelatedField(
- queryset=Test.objects.all(), many=False, allow_null=True
+ queryset=Test.objects.all(), many=False, allow_null=True,
)
files = FileSerializer(many=True)
@@ -1427,7 +1427,7 @@ def to_representation(self, data):
"id": file.id,
"file": f"{settings.SITE_URL}/{file.get_accessible_url(test, test.id)}",
"title": file.title,
- }
+ },
)
new_data = {"test_id": test.id, "files": new_files}
return new_data
@@ -1442,7 +1442,7 @@ class Meta:
class TestImportSerializer(serializers.ModelSerializer):
# findings = TestImportFindingActionSerializer(source='test_import_finding_action', many=True, read_only=True)
test_import_finding_action_set = TestImportFindingActionSerializer(
- many=True, read_only=True
+ many=True, read_only=True,
)
class Meta:
@@ -1489,12 +1489,12 @@ def get_decision(self, obj):
@extend_schema_field(serializers.CharField())
def get_path(self, obj):
engagement = Engagement.objects.filter(
- risk_acceptance__id__in=[obj.id]
+ risk_acceptance__id__in=[obj.id],
).first()
path = "No proof has been supplied"
if engagement and obj.filename() is not None:
path = reverse(
- "download_risk_acceptance", args=(engagement.id, obj.id)
+ "download_risk_acceptance", args=(engagement.id, obj.id),
)
request = self.context.get("request")
if request:
@@ -1504,10 +1504,10 @@ def get_path(self, obj):
@extend_schema_field(serializers.IntegerField())
def get_engagement(self, obj):
engagement = Engagement.objects.filter(
- risk_acceptance__id__in=[obj.id]
+ risk_acceptance__id__in=[obj.id],
).first()
return EngagementSerializer(read_only=True).to_representation(
- engagement
+ engagement,
)
def validate(self, data):
@@ -1618,7 +1618,7 @@ class FindingRelatedFieldsSerializer(serializers.Serializer):
@extend_schema_field(FindingTestSerializer)
def get_test(self, obj):
return FindingTestSerializer(read_only=True).to_representation(
- obj.test
+ obj.test,
)
@extend_schema_field(JIRAIssueSerializer)
@@ -1639,7 +1639,7 @@ class FindingSerializer(TaggitSerializer, serializers.ModelSerializer):
tags = TagListSerializerField(required=False)
request_response = serializers.SerializerMethodField()
accepted_risks = RiskAcceptanceSerializer(
- many=True, read_only=True, source="risk_acceptance_set"
+ many=True, read_only=True, source="risk_acceptance_set",
)
push_to_jira = serializers.BooleanField(default=False)
age = serializers.IntegerField(read_only=True)
@@ -1651,13 +1651,13 @@ class FindingSerializer(TaggitSerializer, serializers.ModelSerializer):
jira_change = serializers.SerializerMethodField(read_only=True)
display_status = serializers.SerializerMethodField()
finding_groups = FindingGroupSerializer(
- source="finding_group_set", many=True, read_only=True
+ source="finding_group_set", many=True, read_only=True,
)
vulnerability_ids = VulnerabilityIdSerializer(
- source="vulnerability_id_set", many=True, required=False
+ source="vulnerability_id_set", many=True, required=False,
)
reporter = serializers.PrimaryKeyRelatedField(
- required=False, queryset=User.objects.all()
+ required=False, queryset=User.objects.all(),
)
class Meta:
@@ -1684,7 +1684,7 @@ def get_related_fields(self, obj):
query_params = request.query_params
if query_params.get("related_fields", "false") == "true":
return FindingRelatedFieldsSerializer(
- required=False
+ required=False,
).to_representation(obj)
else:
return None
@@ -1701,7 +1701,7 @@ def update(self, instance, validated_data):
# TODO: JIRA can we remove this is_push_all_issues, already checked in
# apiv2 viewset?
push_to_jira = validated_data.pop(
- "push_to_jira"
+ "push_to_jira",
) or jira_helper.is_push_all_issues(instance)
# Save vulnerability ids and pop them
@@ -1711,12 +1711,12 @@ def update(self, instance, validated_data):
if vulnerability_id_set:
for vulnerability_id in vulnerability_id_set:
vulnerability_ids.append(
- vulnerability_id["vulnerability_id"]
+ vulnerability_id["vulnerability_id"],
)
save_vulnerability_ids(instance, vulnerability_ids)
instance = super(TaggitSerializer, self).update(
- instance, validated_data
+ instance, validated_data,
)
# Save the reporter on the finding
if reporter_id := validated_data.get("reporter"):
@@ -1741,7 +1741,7 @@ def validate(self, data):
is_duplicate = data.get("duplicate", self.instance.duplicate)
is_false_p = data.get("false_p", self.instance.false_p)
is_risk_accepted = data.get(
- "risk_accepted", self.instance.risk_accepted
+ "risk_accepted", self.instance.risk_accepted,
)
else:
is_active = data.get("active", True)
@@ -1794,28 +1794,28 @@ def get_request_response(self, obj):
response = burp.get_response()
burp_list.append({"request": request, "response": response})
serialized_burps = BurpRawRequestResponseSerializer(
- {"req_resp": burp_list}
+ {"req_resp": burp_list},
)
return serialized_burps.data
class FindingCreateSerializer(TaggitSerializer, serializers.ModelSerializer):
notes = serializers.PrimaryKeyRelatedField(
- read_only=True, allow_null=True, required=False, many=True
+ read_only=True, allow_null=True, required=False, many=True,
)
test = serializers.PrimaryKeyRelatedField(queryset=Test.objects.all())
thread_id = serializers.IntegerField(default=0)
found_by = serializers.PrimaryKeyRelatedField(
- queryset=Test_Type.objects.all(), many=True
+ queryset=Test_Type.objects.all(), many=True,
)
url = serializers.CharField(allow_null=True, default=None)
tags = TagListSerializerField(required=False)
push_to_jira = serializers.BooleanField(default=False)
vulnerability_ids = VulnerabilityIdSerializer(
- source="vulnerability_id_set", many=True, required=False
+ source="vulnerability_id_set", many=True, required=False,
)
reporter = serializers.PrimaryKeyRelatedField(
- required=False, queryset=User.objects.all()
+ required=False, queryset=User.objects.all(),
)
class Meta:
@@ -1857,7 +1857,7 @@ def create(self, validated_data):
# TODO: JIRA can we remove this is_push_all_issues, already checked in
# apiv2 viewset?
push_to_jira = push_to_jira or jira_helper.is_push_all_issues(
- new_finding
+ new_finding,
)
# If we need to push to JIRA, an extra save call is needed.
@@ -1877,7 +1877,7 @@ def validate(self, data):
data["reporter"] = request.user
if (data.get("active") or data.get("verified")) and data.get(
- "duplicate"
+ "duplicate",
):
msg = "Duplicate findings cannot be verified or active"
raise serializers.ValidationError(msg)
@@ -1918,7 +1918,7 @@ class Meta:
class FindingTemplateSerializer(TaggitSerializer, serializers.ModelSerializer):
tags = TagListSerializerField(required=False)
vulnerability_ids = VulnerabilityIdTemplateSerializer(
- source="vulnerability_id_template_set", many=True, required=False
+ source="vulnerability_id_template_set", many=True, required=False,
)
class Meta:
@@ -1929,13 +1929,13 @@ def create(self, validated_data):
# Save vulnerability ids and pop them
if "vulnerability_id_template_set" in validated_data:
vulnerability_id_set = validated_data.pop(
- "vulnerability_id_template_set"
+ "vulnerability_id_template_set",
)
else:
vulnerability_id_set = None
new_finding_template = super(TaggitSerializer, self).create(
- validated_data
+ validated_data,
)
if vulnerability_id_set:
@@ -1944,7 +1944,7 @@ def create(self, validated_data):
vulnerability_ids.append(vulnerability_id["vulnerability_id"])
validated_data["cve"] = vulnerability_ids[0]
save_vulnerability_ids_template(
- new_finding_template, vulnerability_ids
+ new_finding_template, vulnerability_ids,
)
new_finding_template.save()
@@ -1954,13 +1954,13 @@ def update(self, instance, validated_data):
# Save vulnerability ids and pop them
if "vulnerability_id_template_set" in validated_data:
vulnerability_id_set = validated_data.pop(
- "vulnerability_id_template_set"
+ "vulnerability_id_template_set",
)
vulnerability_ids = []
if vulnerability_id_set:
for vulnerability_id in vulnerability_id_set:
vulnerability_ids.append(
- vulnerability_id["vulnerability_id"]
+ vulnerability_id["vulnerability_id"],
)
save_vulnerability_ids_template(instance, vulnerability_ids)
@@ -2020,7 +2020,7 @@ class Meta:
exclude = (
"tid",
"updated",
- "async_updating"
+ "async_updating",
)
def validate(self, data):
@@ -2053,10 +2053,10 @@ class ImportScanSerializer(serializers.Serializer):
help_text="Minimum severity level to be imported",
)
active = serializers.BooleanField(
- help_text="Override the active setting from the tool."
+ help_text="Override the active setting from the tool.",
)
verified = serializers.BooleanField(
- help_text="Override the verified setting from the tool."
+ help_text="Override the verified setting from the tool.",
)
scan_type = serializers.ChoiceField(choices=get_choices_sorted())
# TODO why do we allow only existing endpoints?
@@ -2080,16 +2080,16 @@ class ImportScanSerializer(serializers.Serializer):
help_text="Resource link to source code",
)
engagement = serializers.PrimaryKeyRelatedField(
- queryset=Engagement.objects.all(), required=False
+ queryset=Engagement.objects.all(), required=False,
)
test_title = serializers.CharField(required=False)
auto_create_context = serializers.BooleanField(required=False)
deduplication_on_engagement = serializers.BooleanField(required=False)
lead = serializers.PrimaryKeyRelatedField(
- allow_null=True, default=None, queryset=User.objects.all()
+ allow_null=True, default=None, queryset=User.objects.all(),
)
tags = TagListSerializerField(
- required=False, allow_empty=True, help_text="Add tags that help describe this scan."
+ required=False, allow_empty=True, help_text="Add tags that help describe this scan.",
)
close_old_findings = serializers.BooleanField(
required=False,
@@ -2106,16 +2106,16 @@ class ImportScanSerializer(serializers.Serializer):
push_to_jira = serializers.BooleanField(default=False)
environment = serializers.CharField(required=False)
version = serializers.CharField(
- required=False, help_text="Version that was scanned."
+ required=False, help_text="Version that was scanned.",
)
build_id = serializers.CharField(
- required=False, help_text="ID of the build that was scanned."
+ required=False, help_text="ID of the build that was scanned.",
)
branch_tag = serializers.CharField(
- required=False, help_text="Branch or Tag that was scanned."
+ required=False, help_text="Branch or Tag that was scanned.",
)
commit_hash = serializers.CharField(
- required=False, help_text="Commit that was scanned."
+ required=False, help_text="Commit that was scanned.",
)
api_scan_configuration = serializers.PrimaryKeyRelatedField(
allow_null=True,
@@ -2142,7 +2142,7 @@ class ImportScanSerializer(serializers.Serializer):
# need to use the _id suffix as without the serializer framework gets
# confused
test = serializers.IntegerField(
- read_only=True
+ read_only=True,
) # left for backwards compatibility
test_id = serializers.IntegerField(read_only=True)
engagement_id = serializers.IntegerField(read_only=True)
@@ -2170,7 +2170,7 @@ def set_context(
# update some vars
context["scan"] = data.pop("file", None)
context["environment"] = Development_Environment.objects.get(
- name=data.get("environment", "Development")
+ name=data.get("environment", "Development"),
)
# Set the active/verified status based upon the overrides
if "active" in self.initial_data:
@@ -2197,7 +2197,7 @@ def set_context(
# the API would fail (but unit tests for api upload would pass...)
context["scan_date"] = (
timezone.make_aware(
- datetime.combine(context.get("scan_date"), datetime.min.time())
+ datetime.combine(context.get("scan_date"), datetime.min.time()),
)
if context.get("scan_date")
else None
@@ -2241,7 +2241,7 @@ def get_importer(
def process_scan(
self,
data: dict,
- context: dict
+ context: dict,
) -> None:
"""
Process the scan with all of the supplied data fully massaged
@@ -2252,7 +2252,7 @@ def process_scan(
try:
importer = self.get_importer(**context)
context["test"], _, _, _, _, _, _ = importer.process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
# Update the response body with some new data
if test := context.get("test"):
@@ -2318,20 +2318,20 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
help_text="Minimum severity level to be imported",
)
active = serializers.BooleanField(
- help_text="Override the active setting from the tool."
+ help_text="Override the active setting from the tool.",
)
verified = serializers.BooleanField(
- help_text="Override the verified setting from the tool."
+ help_text="Override the verified setting from the tool.",
)
help_do_not_reactivate = "Select if the import should ignore active findings from the report, useful for triage-less scanners. Will keep existing findings closed, without reactivating them. For more information check the docs."
do_not_reactivate = serializers.BooleanField(
- default=False, required=False, help_text=help_do_not_reactivate
+ default=False, required=False, help_text=help_do_not_reactivate,
)
scan_type = serializers.ChoiceField(
- choices=get_choices_sorted(), required=True
+ choices=get_choices_sorted(), required=True,
)
endpoint_to_add = serializers.PrimaryKeyRelatedField(
- queryset=Endpoint.objects.all(), default=None, required=False
+ queryset=Endpoint.objects.all(), default=None, required=False,
)
file = serializers.FileField(allow_empty_file=True, required=False)
product_type_name = serializers.CharField(required=False)
@@ -2347,7 +2347,7 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
help_text="Resource link to source code",
)
test = serializers.PrimaryKeyRelatedField(
- required=False, queryset=Test.objects.all()
+ required=False, queryset=Test.objects.all(),
)
test_title = serializers.CharField(required=False)
auto_create_context = serializers.BooleanField(required=False)
@@ -2374,13 +2374,13 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
help_text="Version that will be set on existing Test object. Leave empty to leave existing value in place.",
)
build_id = serializers.CharField(
- required=False, help_text="ID of the build that was scanned."
+ required=False, help_text="ID of the build that was scanned.",
)
branch_tag = serializers.CharField(
- required=False, help_text="Branch or Tag that was scanned."
+ required=False, help_text="Branch or Tag that was scanned.",
)
commit_hash = serializers.CharField(
- required=False, help_text="Commit that was scanned."
+ required=False, help_text="Commit that was scanned.",
)
api_scan_configuration = serializers.PrimaryKeyRelatedField(
allow_null=True,
@@ -2395,7 +2395,7 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
)
environment = serializers.CharField(required=False)
lead = serializers.PrimaryKeyRelatedField(
- allow_null=True, default=None, queryset=User.objects.all()
+ allow_null=True, default=None, queryset=User.objects.all(),
)
tags = TagListSerializerField(
required=False,
@@ -2419,7 +2419,7 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
# confused
test_id = serializers.IntegerField(read_only=True)
engagement_id = serializers.IntegerField(
- read_only=True
+ read_only=True,
) # need to use the _id suffix as without the serializer framework gets confused
product_id = serializers.IntegerField(read_only=True)
product_type_id = serializers.IntegerField(read_only=True)
@@ -2427,7 +2427,7 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
statistics = ImportStatisticsSerializer(read_only=True, required=False)
apply_tags_to_findings = serializers.BooleanField(
help_text="If set to True, the tags will be applied to the findings",
- required=False
+ required=False,
)
apply_tags_to_endpoints = serializers.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
@@ -2446,7 +2446,7 @@ def set_context(
# update some vars
context["scan"] = data.get("file", None)
context["environment"] = Development_Environment.objects.get(
- name=data.get("environment", "Development")
+ name=data.get("environment", "Development"),
)
# Set the active/verified status based upon the overrides
if "active" in self.initial_data:
@@ -2473,7 +2473,7 @@ def set_context(
# the API would fail (but unit tests for api upload would pass...)
context["scan_date"] = (
timezone.make_aware(
- datetime.combine(context.get("scan_date"), datetime.min.time())
+ datetime.combine(context.get("scan_date"), datetime.min.time()),
)
if context.get("scan_date")
else None
@@ -2540,9 +2540,9 @@ def process_scan(
if test := context.get("test"):
statistics_before = test.statistics
context["test"], _, _, _, _, _, test_import = self.get_reimporter(
- **context
+ **context,
).process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
if test_import:
statistics_delta = test_import.statistics
@@ -2551,9 +2551,9 @@ def process_scan(
logger.debug("reimport for non-existing test, using import to create new test")
context["engagement"] = auto_create_manager.get_or_create_engagement(**context)
context["test"], _, _, _, _, _, _ = self.get_importer(
- **context
+ **context,
).process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
else:
msg = "A test could not be found!"
@@ -2626,7 +2626,7 @@ class EndpointMetaImporterSerializer(serializers.Serializer):
create_dojo_meta = serializers.BooleanField(default=False, required=False)
product_name = serializers.CharField(required=False)
product = serializers.PrimaryKeyRelatedField(
- queryset=Product.objects.all(), required=False
+ queryset=Product.objects.all(), required=False,
)
# extra fields populated in response
# need to use the _id suffix as without the serializer framework gets
@@ -2689,7 +2689,7 @@ class Meta:
class ImportLanguagesSerializer(serializers.Serializer):
product = serializers.PrimaryKeyRelatedField(
- queryset=Product.objects.all(), required=True
+ queryset=Product.objects.all(), required=True,
)
file = serializers.FileField(required=True)
@@ -2721,7 +2721,7 @@ def save(self):
) = Language_Type.objects.get_or_create(language=name)
except Language_Type.MultipleObjectsReturned:
language_type = Language_Type.objects.filter(
- language=name
+ language=name,
).first()
language = Languages()
@@ -2754,14 +2754,14 @@ class Meta:
class FindingToNotesSerializer(serializers.Serializer):
finding_id = serializers.PrimaryKeyRelatedField(
- queryset=Finding.objects.all(), many=False, allow_null=True
+ queryset=Finding.objects.all(), many=False, allow_null=True,
)
notes = NoteSerializer(many=True)
class FindingToFilesSerializer(serializers.Serializer):
finding_id = serializers.PrimaryKeyRelatedField(
- queryset=Finding.objects.all(), many=False, allow_null=True
+ queryset=Finding.objects.all(), many=False, allow_null=True,
)
files = FileSerializer(many=True)
@@ -2776,11 +2776,11 @@ def to_representation(self, data):
"file": "{site_url}/{file_access_url}".format(
site_url=settings.SITE_URL,
file_access_url=file.get_accessible_url(
- finding, finding.id
+ finding, finding.id,
),
),
"title": file.title,
- }
+ },
)
new_data = {"finding_id": finding.id, "files": new_files}
return new_data
@@ -2820,7 +2820,7 @@ class ExecutiveSummarySerializer(serializers.Serializer):
test_target_end = serializers.DateTimeField()
test_environment_name = serializers.CharField(max_length=200)
test_strategy_ref = serializers.URLField(
- max_length=200, min_length=None, allow_blank=True
+ max_length=200, min_length=None, allow_blank=True,
)
total_findings = serializers.IntegerField()
@@ -2842,7 +2842,7 @@ class ReportGenerateSerializer(serializers.Serializer):
user_id = serializers.IntegerField()
host = serializers.CharField(max_length=200)
finding_notes = FindingToNotesSerializer(
- many=True, allow_null=True, required=False
+ many=True, allow_null=True, required=False,
)
@@ -2892,55 +2892,55 @@ class NotificationsSerializer(serializers.ModelSerializer):
allow_null=True,
)
product_type_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
product_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
engagement_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
test_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
scan_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
jira_update = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
upcoming_engagement = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
stale_engagement = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
auto_close_engagement = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
close_engagement = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
user_mentioned = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
code_review = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
review_requested = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
other = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
sla_breach = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
sla_breach_combined = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
risk_acceptance_expiration = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
template = serializers.BooleanField(default=False)
@@ -2967,7 +2967,7 @@ def validate(self, data):
or product != self.instance.product
):
notifications = Notifications.objects.filter(
- user=user, product=product, template=False
+ user=user, product=product, template=False,
).count()
if notifications > 0:
msg = "Notification for user and product already exists"
diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py
index d0fe775b07..c0a6f14229 100644
--- a/dojo/api_v2/views.py
+++ b/dojo/api_v2/views.py
@@ -288,7 +288,7 @@ def get_queryset(self):
return get_authorized_group_members(Permissions.Group_View).distinct()
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -329,7 +329,7 @@ def get_queryset(self):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
endpoint = self.get_object()
@@ -337,7 +337,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -354,7 +354,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, endpoint, options)
@@ -386,7 +386,7 @@ class EndpointStatusViewSet(
def get_queryset(self):
return get_authorized_endpoint_status(
- Permissions.Endpoint_View
+ Permissions.Endpoint_View,
).distinct()
@@ -426,7 +426,7 @@ def get_queryset(self):
)
@extend_schema(
- request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""}
+ request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""},
)
@action(detail=True, methods=["post"])
def close(self, request, pk=None):
@@ -435,7 +435,7 @@ def close(self, request, pk=None):
return HttpResponse()
@extend_schema(
- request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""}
+ request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""},
)
@action(detail=True, methods=["post"])
def reopen(self, request, pk=None):
@@ -448,7 +448,7 @@ def reopen(self, request, pk=None):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
engagement = self.get_object()
@@ -456,7 +456,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -473,7 +473,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, engagement, options)
@@ -483,7 +483,7 @@ def generate_report(self, request, pk=None):
@extend_schema(
methods=["GET"],
responses={
- status.HTTP_200_OK: serializers.EngagementToNotesSerializer
+ status.HTTP_200_OK: serializers.EngagementToNotesSerializer,
},
)
@extend_schema(
@@ -496,7 +496,7 @@ def notes(self, request, pk=None):
engagement = self.get_object()
if request.method == "POST":
new_note = serializers.AddNewNoteOptionSerializer(
- data=request.data
+ data=request.data,
)
if new_note.is_valid():
entry = new_note.validated_data["entry"]
@@ -504,7 +504,7 @@ def notes(self, request, pk=None):
note_type = new_note.validated_data.get("note_type", None)
else:
return Response(
- new_note.errors, status=status.HTTP_400_BAD_REQUEST
+ new_note.errors, status=status.HTTP_400_BAD_REQUEST,
)
author = request.user
@@ -518,22 +518,22 @@ def notes(self, request, pk=None):
engagement.notes.add(note)
serialized_note = serializers.NoteSerializer(
- {"author": author, "entry": entry, "private": private}
+ {"author": author, "entry": entry, "private": private},
)
return Response(
- serialized_note.data, status=status.HTTP_201_CREATED
+ serialized_note.data, status=status.HTTP_201_CREATED,
)
notes = engagement.notes.all()
serialized_notes = serializers.EngagementToNotesSerializer(
- {"engagement_id": engagement, "notes": notes}
+ {"engagement_id": engagement, "notes": notes},
)
return Response(serialized_notes.data, status=status.HTTP_200_OK)
@extend_schema(
methods=["GET"],
responses={
- status.HTTP_200_OK: serializers.EngagementToFilesSerializer
+ status.HTTP_200_OK: serializers.EngagementToFilesSerializer,
},
)
@extend_schema(
@@ -542,7 +542,7 @@ def notes(self, request, pk=None):
responses={status.HTTP_201_CREATED: serializers.FileSerializer},
)
@action(
- detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,)
+ detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,),
)
def files(self, request, pk=None):
engagement = self.get_object()
@@ -553,7 +553,7 @@ def files(self, request, pk=None):
file = new_file.validated_data["file"]
else:
return Response(
- new_file.errors, status=status.HTTP_400_BAD_REQUEST
+ new_file.errors, status=status.HTTP_400_BAD_REQUEST,
)
file = FileUpload(title=title, file=file)
@@ -562,12 +562,12 @@ def files(self, request, pk=None):
serialized_file = serializers.FileSerializer(file)
return Response(
- serialized_file.data, status=status.HTTP_201_CREATED
+ serialized_file.data, status=status.HTTP_201_CREATED,
)
files = engagement.files.all()
serialized_files = serializers.EngagementToFilesSerializer(
- {"engagement_id": engagement, "files": files}
+ {"engagement_id": engagement, "files": files},
)
return Response(serialized_files.data, status=status.HTTP_200_OK)
@@ -575,7 +575,7 @@ def files(self, request, pk=None):
methods=["POST"],
request=serializers.EngagementCheckListSerializer,
responses={
- status.HTTP_201_CREATED: serializers.EngagementCheckListSerializer
+ status.HTTP_201_CREATED: serializers.EngagementCheckListSerializer,
},
)
@action(detail=True, methods=["get", "post"])
@@ -588,25 +588,25 @@ def complete_checklist(self, request, pk=None):
if check_lists.count() > 0:
return Response(
{
- "message": "A completed checklist for this engagement already exists."
+ "message": "A completed checklist for this engagement already exists.",
},
status=status.HTTP_400_BAD_REQUEST,
)
check_list = serializers.EngagementCheckListSerializer(
- data=request.data
+ data=request.data,
)
if not check_list.is_valid():
return Response(
- check_list.errors, status=status.HTTP_400_BAD_REQUEST
+ check_list.errors, status=status.HTTP_400_BAD_REQUEST,
)
check_list = Check_List(**check_list.data)
check_list.engagement = engagement
check_list.save()
serialized_check_list = serializers.EngagementCheckListSerializer(
- check_list
+ check_list,
)
return Response(
- serialized_check_list.data, status=status.HTTP_201_CREATED
+ serialized_check_list.data, status=status.HTTP_201_CREATED,
)
prefetch_params = request.GET.get("prefetch", "").split(",")
prefetcher = _Prefetcher()
@@ -658,7 +658,7 @@ def download_file(self, request, file_id, pk=None):
class RiskAcceptanceViewSet(
- PrefetchDojoModelViewSet
+ PrefetchDojoModelViewSet,
):
serializer_class = serializers.RiskAcceptanceSerializer
queryset = Risk_Acceptance.objects.none()
@@ -682,7 +682,7 @@ def get_queryset(self):
return (
get_authorized_risk_acceptances(Permissions.Risk_Acceptance)
.prefetch_related(
- "notes", "engagement_set", "owner", "accepted_findings"
+ "notes", "engagement_set", "owner", "accepted_findings",
)
.distinct()
)
@@ -852,7 +852,7 @@ def perform_update(self, serializer):
def get_queryset(self):
findings = get_authorized_findings(
- Permissions.Finding_View
+ Permissions.Finding_View,
).prefetch_related(
"endpoints",
"reviewers",
@@ -893,7 +893,7 @@ def close(self, request, pk=None):
if request.method == "POST":
finding_close = serializers.FindingCloseSerializer(
- data=request.data
+ data=request.data,
)
if finding_close.is_valid():
finding.is_mitigated = finding_close.validated_data[
@@ -909,13 +909,13 @@ def close(self, request, pk=None):
finding.mitigated_by = request.user
finding.active = False
finding.false_p = finding_close.validated_data.get(
- "false_p", False
+ "false_p", False,
)
finding.duplicate = finding_close.validated_data.get(
- "duplicate", False
+ "duplicate", False,
)
finding.out_of_scope = finding_close.validated_data.get(
- "out_of_scope", False
+ "out_of_scope", False,
)
endpoints_status = finding.status_finding.all()
@@ -934,7 +934,7 @@ def close(self, request, pk=None):
finding.save()
else:
return Response(
- finding_close.errors, status=status.HTTP_400_BAD_REQUEST
+ finding_close.errors, status=status.HTTP_400_BAD_REQUEST,
)
serialized_finding = serializers.FindingCloseSerializer(finding)
return Response(serialized_finding.data)
@@ -961,7 +961,7 @@ def tags(self, request, pk=None):
]
for tag in tagulous.utils.parse_tags(
- new_tags.validated_data["tags"]
+ new_tags.validated_data["tags"],
):
if tag not in all_tags:
all_tags.append(tag)
@@ -970,7 +970,7 @@ def tags(self, request, pk=None):
finding.save()
else:
return Response(
- new_tags.errors, status=status.HTTP_400_BAD_REQUEST
+ new_tags.errors, status=status.HTTP_400_BAD_REQUEST,
)
tags = finding.tags
serialized_tags = serializers.TagSerializer({"tags": tags})
@@ -979,14 +979,14 @@ def tags(self, request, pk=None):
@extend_schema(
methods=["GET"],
responses={
- status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer
+ status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer,
},
)
@extend_schema(
methods=["POST"],
request=serializers.BurpRawRequestResponseSerializer,
responses={
- status.HTTP_201_CREATED: serializers.BurpRawRequestResponseSerializer
+ status.HTTP_201_CREATED: serializers.BurpRawRequestResponseSerializer,
},
)
@action(detail=True, methods=["get", "post"])
@@ -995,24 +995,24 @@ def request_response(self, request, pk=None):
if request.method == "POST":
burps = serializers.BurpRawRequestResponseSerializer(
- data=request.data, many=isinstance(request.data, list)
+ data=request.data, many=isinstance(request.data, list),
)
if burps.is_valid():
for pair in burps.validated_data["req_resp"]:
burp_rr = BurpRawRequestResponse(
finding=finding,
burpRequestBase64=base64.b64encode(
- pair["request"].encode("utf-8")
+ pair["request"].encode("utf-8"),
),
burpResponseBase64=base64.b64encode(
- pair["response"].encode("utf-8")
+ pair["response"].encode("utf-8"),
),
)
burp_rr.clean()
burp_rr.save()
else:
return Response(
- burps.errors, status=status.HTTP_400_BAD_REQUEST
+ burps.errors, status=status.HTTP_400_BAD_REQUEST,
)
# Not necessarily Burp scan specific - these are just any request/response pairs
burp_req_resp = BurpRawRequestResponse.objects.filter(finding=finding)
@@ -1026,7 +1026,7 @@ def request_response(self, request, pk=None):
response = burp.get_response()
burp_list.append({"request": request, "response": response})
serialized_burps = serializers.BurpRawRequestResponseSerializer(
- {"req_resp": burp_list}
+ {"req_resp": burp_list},
)
return Response(serialized_burps.data)
@@ -1044,7 +1044,7 @@ def notes(self, request, pk=None):
finding = self.get_object()
if request.method == "POST":
new_note = serializers.AddNewNoteOptionSerializer(
- data=request.data
+ data=request.data,
)
if new_note.is_valid():
entry = new_note.validated_data["entry"]
@@ -1052,7 +1052,7 @@ def notes(self, request, pk=None):
note_type = new_note.validated_data.get("note_type", None)
else:
return Response(
- new_note.errors, status=status.HTTP_400_BAD_REQUEST
+ new_note.errors, status=status.HTTP_400_BAD_REQUEST,
)
author = request.user
@@ -1071,15 +1071,15 @@ def notes(self, request, pk=None):
jira_helper.add_comment(finding.finding_group, note)
serialized_note = serializers.NoteSerializer(
- {"author": author, "entry": entry, "private": private}
+ {"author": author, "entry": entry, "private": private},
)
return Response(
- serialized_note.data, status=status.HTTP_201_CREATED
+ serialized_note.data, status=status.HTTP_201_CREATED,
)
notes = finding.notes.all()
serialized_notes = serializers.FindingToNotesSerializer(
- {"finding_id": finding, "notes": notes}
+ {"finding_id": finding, "notes": notes},
)
return Response(serialized_notes.data, status=status.HTTP_200_OK)
@@ -1093,7 +1093,7 @@ def notes(self, request, pk=None):
responses={status.HTTP_201_CREATED: serializers.FileSerializer},
)
@action(
- detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,)
+ detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,),
)
def files(self, request, pk=None):
finding = self.get_object()
@@ -1104,7 +1104,7 @@ def files(self, request, pk=None):
file = new_file.validated_data["file"]
else:
return Response(
- new_file.errors, status=status.HTTP_400_BAD_REQUEST
+ new_file.errors, status=status.HTTP_400_BAD_REQUEST,
)
file = FileUpload(title=title, file=file)
@@ -1113,12 +1113,12 @@ def files(self, request, pk=None):
serialized_file = serializers.FileSerializer(file)
return Response(
- serialized_file.data, status=status.HTTP_201_CREATED
+ serialized_file.data, status=status.HTTP_201_CREATED,
)
files = finding.files.all()
serialized_files = serializers.FindingToFilesSerializer(
- {"finding_id": finding, "files": files}
+ {"finding_id": finding, "files": files},
)
return Response(serialized_files.data, status=status.HTTP_200_OK)
@@ -1217,7 +1217,7 @@ def remove_tags(self, request, pk=None):
# serializer turns it into a string, but we need a list
del_tags = tagulous.utils.parse_tags(
- delete_tags.validated_data["tags"]
+ delete_tags.validated_data["tags"],
)
if len(del_tags) < 1:
return Response(
@@ -1228,7 +1228,7 @@ def remove_tags(self, request, pk=None):
if tag not in all_tags:
return Response(
{
- "error": f"'{tag}' is not a valid tag in list"
+ "error": f"'{tag}' is not a valid tag in list",
},
status=status.HTTP_400_BAD_REQUEST,
)
@@ -1242,13 +1242,13 @@ def remove_tags(self, request, pk=None):
)
else:
return Response(
- delete_tags.errors, status=status.HTTP_400_BAD_REQUEST
+ delete_tags.errors, status=status.HTTP_400_BAD_REQUEST,
)
@extend_schema(
responses={
- status.HTTP_200_OK: serializers.FindingSerializer(many=True)
- }
+ status.HTTP_200_OK: serializers.FindingSerializer(many=True),
+ },
)
@action(
detail=True,
@@ -1261,7 +1261,7 @@ def get_duplicate_cluster(self, request, pk):
finding = self.get_object()
result = duplicate_cluster(request, finding)
serializer = serializers.FindingSerializer(
- instance=result, many=True, context={"request": request}
+ instance=result, many=True, context={"request": request},
)
return Response(serializer.data, status=status.HTTP_200_OK)
@@ -1272,7 +1272,7 @@ def get_duplicate_cluster(self, request, pk):
@action(detail=True, methods=["post"], url_path=r"duplicate/reset")
def reset_finding_duplicate_status(self, request, pk):
checked_duplicate_id = reset_finding_duplicate_status_internal(
- request.user, pk
+ request.user, pk,
)
if checked_duplicate_id is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
@@ -1282,13 +1282,13 @@ def reset_finding_duplicate_status(self, request, pk):
request=OpenApiTypes.NONE,
parameters=[
OpenApiParameter(
- "new_fid", OpenApiTypes.INT, OpenApiParameter.PATH
- )
+ "new_fid", OpenApiTypes.INT, OpenApiParameter.PATH,
+ ),
],
responses={status.HTTP_204_NO_CONTENT: ""},
)
@action(
- detail=True, methods=["post"], url_path=r"original/(?P\d+)"
+ detail=True, methods=["post"], url_path=r"original/(?P\d+)",
)
def set_finding_as_original(self, request, pk, new_fid):
success = set_finding_as_original_internal(request.user, pk, new_fid)
@@ -1301,14 +1301,14 @@ def set_finding_as_original(self, request, pk, new_fid):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=False, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=False, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request):
findings = self.get_queryset()
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -1325,7 +1325,7 @@ def generate_report(self, request):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, findings, options)
@@ -1335,7 +1335,7 @@ def generate_report(self, request):
def _get_metadata(self, request, finding):
metadata = DojoMeta.objects.filter(finding=finding)
serializer = serializers.FindingMetaSerializer(
- instance=metadata, many=True
+ instance=metadata, many=True,
)
return Response(serializer.data, status=status.HTTP_200_OK)
@@ -1343,7 +1343,7 @@ def _edit_metadata(self, request, finding):
metadata_name = request.query_params.get("name", None)
if metadata_name is None:
return Response(
- "Metadata name is required", status=status.HTTP_400_BAD_REQUEST
+ "Metadata name is required", status=status.HTTP_400_BAD_REQUEST,
)
try:
@@ -1383,7 +1383,7 @@ def _add_metadata(self, request, finding):
return Response(data=metadata_data.data, status=status.HTTP_200_OK)
else:
return Response(
- metadata_data.errors, status=status.HTTP_400_BAD_REQUEST
+ metadata_data.errors, status=status.HTTP_400_BAD_REQUEST,
)
def _remove_metadata(self, request, finding):
@@ -1395,7 +1395,7 @@ def _remove_metadata(self, request, finding):
)
metadata = get_object_or_404(
- DojoMeta.objects, finding=finding, name=name
+ DojoMeta.objects, finding=finding, name=name,
)
metadata.delete()
@@ -1406,7 +1406,7 @@ def _remove_metadata(self, request, finding):
responses={
status.HTTP_200_OK: serializers.FindingMetaSerializer(many=True),
status.HTTP_404_NOT_FOUND: OpenApiResponse(
- description="Returned if finding does not exist"
+ description="Returned if finding does not exist",
),
},
)
@@ -1420,17 +1420,17 @@ def _remove_metadata(self, request, finding):
required=True,
description="name of the metadata to retrieve. If name is empty, return all the \
metadata associated with the finding",
- )
+ ),
],
responses={
status.HTTP_200_OK: OpenApiResponse(
- description="Returned if the metadata was correctly deleted"
+ description="Returned if the metadata was correctly deleted",
),
status.HTTP_404_NOT_FOUND: OpenApiResponse(
- description="Returned if finding does not exist"
+ description="Returned if finding does not exist",
),
status.HTTP_400_BAD_REQUEST: OpenApiResponse(
- description="Returned if there was a problem with the metadata information"
+ description="Returned if there was a problem with the metadata information",
),
},
)
@@ -1440,10 +1440,10 @@ def _remove_metadata(self, request, finding):
responses={
status.HTTP_200_OK: serializers.FindingMetaSerializer,
status.HTTP_404_NOT_FOUND: OpenApiResponse(
- description="Returned if finding does not exist"
+ description="Returned if finding does not exist",
),
status.HTTP_400_BAD_REQUEST: OpenApiResponse(
- description="Returned if there was a problem with the metadata information"
+ description="Returned if there was a problem with the metadata information",
),
},
)
@@ -1453,10 +1453,10 @@ def _remove_metadata(self, request, finding):
responses={
status.HTTP_200_OK: serializers.FindingMetaSerializer,
status.HTTP_404_NOT_FOUND: OpenApiResponse(
- description="Returned if finding does not exist"
+ description="Returned if finding does not exist",
),
status.HTTP_400_BAD_REQUEST: OpenApiResponse(
- description="Returned if there was a problem with the metadata information"
+ description="Returned if there was a problem with the metadata information",
),
},
)
@@ -1481,7 +1481,7 @@ def metadata(self, request, pk=None):
return self._remove_metadata(request, finding)
return Response(
- {"error", "unsupported method"}, status=status.HTTP_400_BAD_REQUEST
+ {"error", "unsupported method"}, status=status.HTTP_400_BAD_REQUEST,
)
@@ -1599,7 +1599,7 @@ class ProductAPIScanConfigurationViewSet(
def get_queryset(self):
return get_authorized_product_api_scan_configurations(
- Permissions.Product_API_Scan_Configuration_View
+ Permissions.Product_API_Scan_Configuration_View,
)
@@ -1722,7 +1722,7 @@ def destroy(self, request, *args, **kwargs):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
product = self.get_object()
@@ -1730,7 +1730,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -1747,7 +1747,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, product, options)
@@ -1794,11 +1794,11 @@ class ProductMemberViewSet(
def get_queryset(self):
return get_authorized_product_members(
- Permissions.Product_View
+ Permissions.Product_View,
).distinct()
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -1845,11 +1845,11 @@ class ProductGroupViewSet(
def get_queryset(self):
return get_authorized_product_groups(
- Permissions.Product_Group_View
+ Permissions.Product_Group_View,
).distinct()
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -1903,7 +1903,7 @@ class ProductTypeViewSet(
def get_queryset(self):
return get_authorized_product_types(
- Permissions.Product_Type_View
+ Permissions.Product_Type_View,
).distinct()
# Overwrite perfom_create of CreateModelMixin to add current user as owner
@@ -1932,7 +1932,7 @@ def destroy(self, request, *args, **kwargs):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
product_type = self.get_object()
@@ -1940,7 +1940,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -1957,7 +1957,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, product_type, options)
@@ -2004,14 +2004,14 @@ class ProductTypeMemberViewSet(
def get_queryset(self):
return get_authorized_product_type_members(
- Permissions.Product_Type_View
+ Permissions.Product_Type_View,
).distinct()
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.role.is_owner:
owners = Product_Type_Member.objects.filter(
- product_type=instance.product_type, role__is_owner=True
+ product_type=instance.product_type, role__is_owner=True,
).count()
if owners <= 1:
return Response(
@@ -2022,7 +2022,7 @@ def destroy(self, request, *args, **kwargs):
return Response(status=status.HTTP_204_NO_CONTENT)
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -2069,11 +2069,11 @@ class ProductTypeGroupViewSet(
def get_queryset(self):
return get_authorized_product_type_groups(
- Permissions.Product_Type_Group_View
+ Permissions.Product_Type_Group_View,
).distinct()
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -2096,7 +2096,7 @@ class StubFindingsViewSet(
def get_queryset(self):
return get_authorized_stub_findings(
- Permissions.Finding_View
+ Permissions.Finding_View,
).distinct()
def get_serializer_class(self):
@@ -2160,7 +2160,7 @@ def get_serializer_class(self):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
test = self.get_object()
@@ -2168,7 +2168,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -2185,7 +2185,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, test, options)
@@ -2206,7 +2206,7 @@ def notes(self, request, pk=None):
test = self.get_object()
if request.method == "POST":
new_note = serializers.AddNewNoteOptionSerializer(
- data=request.data
+ data=request.data,
)
if new_note.is_valid():
entry = new_note.validated_data["entry"]
@@ -2214,7 +2214,7 @@ def notes(self, request, pk=None):
note_type = new_note.validated_data.get("note_type", None)
else:
return Response(
- new_note.errors, status=status.HTTP_400_BAD_REQUEST
+ new_note.errors, status=status.HTTP_400_BAD_REQUEST,
)
author = request.user
@@ -2228,15 +2228,15 @@ def notes(self, request, pk=None):
test.notes.add(note)
serialized_note = serializers.NoteSerializer(
- {"author": author, "entry": entry, "private": private}
+ {"author": author, "entry": entry, "private": private},
)
return Response(
- serialized_note.data, status=status.HTTP_201_CREATED
+ serialized_note.data, status=status.HTTP_201_CREATED,
)
notes = test.notes.all()
serialized_notes = serializers.TestToNotesSerializer(
- {"test_id": test, "notes": notes}
+ {"test_id": test, "notes": notes},
)
return Response(serialized_notes.data, status=status.HTTP_200_OK)
@@ -2250,7 +2250,7 @@ def notes(self, request, pk=None):
responses={status.HTTP_201_CREATED: serializers.FileSerializer},
)
@action(
- detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,)
+ detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,),
)
def files(self, request, pk=None):
test = self.get_object()
@@ -2261,7 +2261,7 @@ def files(self, request, pk=None):
file = new_file.validated_data["file"]
else:
return Response(
- new_file.errors, status=status.HTTP_400_BAD_REQUEST
+ new_file.errors, status=status.HTTP_400_BAD_REQUEST,
)
file = FileUpload(title=title, file=file)
@@ -2270,12 +2270,12 @@ def files(self, request, pk=None):
serialized_file = serializers.FileSerializer(file)
return Response(
- serialized_file.data, status=status.HTTP_201_CREATED
+ serialized_file.data, status=status.HTTP_201_CREATED,
)
files = test.files.all()
serialized_files = serializers.TestToFilesSerializer(
- {"test_id": test, "files": files}
+ {"test_id": test, "files": files},
)
return Response(serialized_files.data, status=status.HTTP_200_OK)
@@ -2382,7 +2382,7 @@ class TestImportViewSet(
def get_queryset(self):
return get_authorized_test_imports(
- Permissions.Test_View
+ Permissions.Test_View,
).prefetch_related(
"test_import_finding_action_set",
"findings_affected",
@@ -2549,7 +2549,7 @@ class UserProfileView(GenericAPIView):
serializer_class = serializers.UserProfileSerializer
@action(
- detail=True, methods=["get"], filter_backends=[], pagination_class=None
+ detail=True, methods=["get"], filter_backends=[], pagination_class=None,
)
def get(self, request, format=None):
user = get_current_user()
@@ -2639,7 +2639,7 @@ def get_queryset(self):
# Authorization: authenticated users, DjangoModelPermissions
class EndpointMetaImporterView(
- mixins.CreateModelMixin, viewsets.GenericViewSet
+ mixins.CreateModelMixin, viewsets.GenericViewSet,
):
"""
Imports a CSV file into a product to propagate arbitrary meta and tags on endpoints.
@@ -2880,14 +2880,14 @@ def report_generate(request, obj, options):
prod_type=product_type,
queryset=prefetch_related_findings_for_report(
Finding.objects.filter(
- test__engagement__product__prod_type=product_type
- )
+ test__engagement__product__prod_type=product_type,
+ ),
),
)
if len(findings.qs) > 0:
start_date = timezone.make_aware(
- datetime.combine(findings.qs.last().date, datetime.min.time())
+ datetime.combine(findings.qs.last().date, datetime.min.time()),
)
else:
start_date = timezone.now()
@@ -2908,11 +2908,11 @@ def report_generate(request, obj, options):
request.GET,
product=product,
queryset=prefetch_related_findings_for_report(
- Finding.objects.filter(test__engagement__product=product)
+ Finding.objects.filter(test__engagement__product=product),
),
)
ids = get_endpoint_ids(
- Endpoint.objects.filter(product=product).distinct()
+ Endpoint.objects.filter(product=product).distinct(),
)
endpoints = Endpoint.objects.filter(id__in=ids)
@@ -2922,14 +2922,14 @@ def report_generate(request, obj, options):
request.GET,
engagement=engagement,
queryset=prefetch_related_findings_for_report(
- Finding.objects.filter(test__engagement=engagement)
+ Finding.objects.filter(test__engagement=engagement),
),
)
report_name = "Engagement Report: " + str(engagement)
ids = set(finding.id for finding in findings.qs) # noqa: C401
ids = get_endpoint_ids(
- Endpoint.objects.filter(product=engagement.product).distinct()
+ Endpoint.objects.filter(product=engagement.product).distinct(),
)
endpoints = Endpoint.objects.filter(id__in=ids)
@@ -2939,7 +2939,7 @@ def report_generate(request, obj, options):
request.GET,
engagement=test.engagement,
queryset=prefetch_related_findings_for_report(
- Finding.objects.filter(test=test)
+ Finding.objects.filter(test=test),
),
)
report_name = "Test Report: " + str(test)
@@ -2949,12 +2949,12 @@ def report_generate(request, obj, options):
host = endpoint.host
report_name = "Endpoint Report: " + host
endpoints = Endpoint.objects.filter(
- host=host, product=endpoint.product
+ host=host, product=endpoint.product,
).distinct()
findings = report_finding_filter_class(
request.GET,
queryset=prefetch_related_findings_for_report(
- Finding.objects.filter(endpoints__in=endpoints)
+ Finding.objects.filter(endpoints__in=endpoints),
),
)
@@ -3130,7 +3130,7 @@ def report_generate(request, obj, options):
# Authorization: superuser
class SystemSettingsViewSet(
- mixins.ListModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet
+ mixins.ListModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet,
):
"""Basic control over System Settings. Use 'id' 1 for PUT, PATCH operations"""
@@ -3221,7 +3221,7 @@ class ConfigurationPermissionViewSet(
):
serializer_class = serializers.ConfigurationPermissionSerializer
queryset = Permission.objects.filter(
- codename__in=get_configuration_permissions_codenames()
+ codename__in=get_configuration_permissions_codenames(),
)
filter_backends = (DjangoFilterBackend,)
filterset_fields = ["id", "name", "codename"]
@@ -3276,7 +3276,7 @@ class QuestionnaireGeneralSurveyViewSet(
class QuestionnaireEngagementSurveyViewSet(
- viewsets.ReadOnlyModelViewSet
+ viewsets.ReadOnlyModelViewSet,
):
serializer_class = serializers.QuestionnaireEngagementSurveySerializer
queryset = Engagement_Survey.objects.all()
@@ -3303,7 +3303,7 @@ class QuestionnaireAnsweredSurveyViewSet(
# Authorization: configuration
class AnnouncementViewSet(
- DojoModelViewSet
+ DojoModelViewSet,
):
serializer_class = serializers.AnnouncementSerializer
queryset = Announcement.objects.all()
diff --git a/dojo/apps.py b/dojo/apps.py
index e12ea7459b..9b3f786408 100644
--- a/dojo/apps.py
+++ b/dojo/apps.py
@@ -25,11 +25,11 @@ def ready(self):
# charfields/textfields are the fields that watson indexes by default (but we have to repeat here if we add extra fields)
# and watson likes to have tuples instead of lists
- watson.register(self.get_model('Product'), fields=get_model_fields_with_extra(self.get_model('Product'), ('id', 'prod_type__name', )), store=('prod_type__name', ))
+ watson.register(self.get_model('Product'), fields=get_model_fields_with_extra(self.get_model('Product'), ('id', 'prod_type__name')), store=('prod_type__name', ))
- watson.register(self.get_model('Test'), fields=get_model_fields_with_extra(self.get_model('Test'), ('id', 'engagement__product__name', )), store=('engagement__product__name', )) # test_type__name?
+ watson.register(self.get_model('Test'), fields=get_model_fields_with_extra(self.get_model('Test'), ('id', 'engagement__product__name')), store=('engagement__product__name', )) # test_type__name?
- watson.register(self.get_model('Finding'), fields=get_model_fields_with_extra(self.get_model('Finding'), ('id', 'url', 'unique_id_from_tool', 'test__engagement__product__name', 'jira_issue__jira_key', )),
+ watson.register(self.get_model('Finding'), fields=get_model_fields_with_extra(self.get_model('Finding'), ('id', 'url', 'unique_id_from_tool', 'test__engagement__product__name', 'jira_issue__jira_key')),
store=('status', 'jira_issue__jira_key', 'test__engagement__product__name', 'severity', 'severity_display', 'latest_note'))
# some thoughts on Finding fields that are not indexed yet:
@@ -60,7 +60,7 @@ def ready(self):
watson.register(self.get_model('Finding_Template'))
watson.register(self.get_model('Endpoint'), store=('product__name', )) # add product name also?
- watson.register(self.get_model('Engagement'), fields=get_model_fields_with_extra(self.get_model('Engagement'), ('id', 'product__name', )), store=('product__name', ))
+ watson.register(self.get_model('Engagement'), fields=get_model_fields_with_extra(self.get_model('Engagement'), ('id', 'product__name')), store=('product__name', ))
watson.register(self.get_model('App_Analysis'))
watson.register(self.get_model('Vulnerability_Id'), store=('finding__test__engagement__product__name', ))
diff --git a/dojo/authorization/authorization.py b/dojo/authorization/authorization.py
index 8538101cf5..a542d7c6e0 100644
--- a/dojo/authorization/authorization.py
+++ b/dojo/authorization/authorization.py
@@ -57,7 +57,7 @@ def user_has_permission(user, obj, permission):
# permissions
member = get_product_type_member(user, obj)
if member is not None and role_has_permission(
- member.role.id, permission
+ member.role.id, permission,
):
return True
# Check if the user is in a group with a role for the product type with
@@ -78,7 +78,7 @@ def user_has_permission(user, obj, permission):
# permissions
member = get_product_member(user, obj)
if member is not None and role_has_permission(
- member.role.id, permission
+ member.role.id, permission,
):
return True
# Check if the user is in a group with a role for the product with the
@@ -101,14 +101,14 @@ def user_has_permission(user, obj, permission):
isinstance(obj, Finding) or isinstance(obj, Stub_Finding)
) and permission in Permissions.get_finding_permissions():
return user_has_permission(
- user, obj.test.engagement.product, permission
+ user, obj.test.engagement.product, permission,
)
elif (
isinstance(obj, Finding_Group)
and permission in Permissions.get_finding_group_permissions()
):
return user_has_permission(
- user, obj.test.engagement.product, permission
+ user, obj.test.engagement.product, permission,
)
elif (
isinstance(obj, Endpoint)
@@ -138,7 +138,7 @@ def user_has_permission(user, obj, permission):
if permission == Permissions.Product_Type_Member_Delete:
# Every member is allowed to remove himself
return obj.user == user or user_has_permission(
- user, obj.product_type, permission
+ user, obj.product_type, permission,
)
else:
return user_has_permission(user, obj.product_type, permission)
@@ -149,7 +149,7 @@ def user_has_permission(user, obj, permission):
if permission == Permissions.Product_Member_Delete:
# Every member is allowed to remove himself
return obj.user == user or user_has_permission(
- user, obj.product, permission
+ user, obj.product, permission,
)
else:
return user_has_permission(user, obj.product, permission)
@@ -171,7 +171,7 @@ def user_has_permission(user, obj, permission):
# permissions
group_member = get_group_member(user, obj)
return group_member is not None and role_has_permission(
- group_member.role.id, permission
+ group_member.role.id, permission,
)
elif (
isinstance(obj, Dojo_Group_Member)
@@ -180,7 +180,7 @@ def user_has_permission(user, obj, permission):
if permission == Permissions.Group_Member_Delete:
# Every user is allowed to remove himself
return obj.user == user or user_has_permission(
- user, obj.group, permission
+ user, obj.group, permission,
)
else:
return user_has_permission(user, obj.group, permission)
@@ -192,15 +192,15 @@ def user_has_permission(user, obj, permission):
return user_has_permission(user, obj.product, permission)
if obj.engagement:
return user_has_permission(
- user, obj.engagement.product, permission
+ user, obj.engagement.product, permission,
)
if obj.test:
return user_has_permission(
- user, obj.test.engagement.product, permission
+ user, obj.test.engagement.product, permission,
)
if obj.finding:
return user_has_permission(
- user, obj.finding.test.engagement.product, permission
+ user, obj.finding.test.engagement.product, permission,
)
else:
msg = f"No authorization implemented for class {type(obj).__name__} and permission {permission}"
@@ -233,7 +233,7 @@ def user_has_global_permission(user, permission):
hasattr(group, "global_role")
and group.global_role.role is not None
and role_has_global_permission(
- group.global_role.role.id, permission
+ group.global_role.role.id, permission,
)
):
return True
diff --git a/dojo/authorization/authorization_decorators.py b/dojo/authorization/authorization_decorators.py
index 3fa1926ec8..3063d0821d 100644
--- a/dojo/authorization/authorization_decorators.py
+++ b/dojo/authorization/authorization_decorators.py
@@ -15,7 +15,7 @@ def user_is_authorized(model, permission, arg, lookup="pk", func=None):
if func is None:
return functools.partial(
- user_is_authorized, model, permission, arg, lookup
+ user_is_authorized, model, permission, arg, lookup,
)
@functools.wraps(func)
diff --git a/dojo/banner/urls.py b/dojo/banner/urls.py
index 309f735263..c0b75f1ff7 100644
--- a/dojo/banner/urls.py
+++ b/dojo/banner/urls.py
@@ -4,6 +4,6 @@
urlpatterns = [
re_path(
- r"^configure_banner$", views.configure_banner, name="configure_banner"
+ r"^configure_banner$", views.configure_banner, name="configure_banner",
),
]
diff --git a/dojo/banner/views.py b/dojo/banner/views.py
index 5d81a1cd6e..dcdccc77cc 100644
--- a/dojo/banner/views.py
+++ b/dojo/banner/views.py
@@ -37,11 +37,11 @@ def configure_banner(request):
initial={
"banner_enable": banner_config.banner_enable,
"banner_message": banner_config.banner_message,
- }
+ },
)
add_breadcrumb(
- title="Banner Configuration", top_level=True, request=request
+ title="Banner Configuration", top_level=True, request=request,
)
return render(
request,
diff --git a/dojo/benchmark/views.py b/dojo/benchmark/views.py
index 2169fd34d0..ffdbea82e6 100644
--- a/dojo/benchmark/views.py
+++ b/dojo/benchmark/views.py
@@ -78,15 +78,15 @@ def update_benchmark(request, pid, _type):
"date": n.date.ctime(),
}
for n in notes
- ]
- }
+ ],
+ },
)
bench.save()
return JsonResponse({field: value})
return redirect_to_return_url_or_else(
- request, reverse("view_product_benchmark", args=(pid, _type))
+ request, reverse("view_product_benchmark", args=(pid, _type)),
)
@@ -110,7 +110,7 @@ def update_benchmark_summary(request, pid, _type, summary):
return JsonResponse(data)
return redirect_to_return_url_or_else(
- request, reverse("view_product_benchmark", args=(pid, _type))
+ request, reverse("view_product_benchmark", args=(pid, _type)),
)
@@ -179,7 +179,7 @@ def score_asvs(product, benchmark_type):
)
asvs_level_3_benchmark, asvs_level_3_score = return_score(asvs_level_3)
benchmark_product_summary = Benchmark_Product_Summary.objects.get(
- product=product, benchmark_type=benchmark_type
+ product=product, benchmark_type=benchmark_type,
)
benchmark_product_summary.asvs_level_1_benchmark = asvs_level_1_benchmark
@@ -197,34 +197,34 @@ def benchmark_view(request, pid, type, cat=None):
product = get_object_or_404(Product, id=pid)
benchmark_type = get_object_or_404(Benchmark_Type, id=type)
benchmark_category = Benchmark_Category.objects.filter(
- type=type, enabled=True
+ type=type, enabled=True,
).order_by("name")
# Add requirements to the product
new_benchmarks = Benchmark_Requirement.objects.filter(
- category__type=type, category__type__enabled=True, enabled=True
+ category__type=type, category__type__enabled=True, enabled=True,
).exclude(
id__in=Benchmark_Product.objects.filter(product=product).values_list(
- "control_id", flat=True
- )
+ "control_id", flat=True,
+ ),
)
add_benchmark(new_benchmarks, product)
# Create the benchmark summary category
try:
benchmark_product_summary = Benchmark_Product_Summary.objects.get(
- product=product, benchmark_type=benchmark_type
+ product=product, benchmark_type=benchmark_type,
)
except Exception:
benchmark_product_summary = Benchmark_Product_Summary(
- product=product, benchmark_type=benchmark_type
+ product=product, benchmark_type=benchmark_type,
)
benchmark_product_summary.save()
if cat:
benchmarks = (
Benchmark_Product.objects.select_related(
- "control", "control__category"
+ "control", "control__category",
)
.filter(
product=product.id,
@@ -239,7 +239,7 @@ def benchmark_view(request, pid, type, cat=None):
else:
benchmarks = (
Benchmark_Product.objects.select_related(
- "control", "control__category"
+ "control", "control__category",
)
.filter(
product=product.id,
@@ -252,7 +252,7 @@ def benchmark_view(request, pid, type, cat=None):
)
benchmark_summary_form = Benchmark_Product_SummaryForm(
- instance=benchmark_product_summary
+ instance=benchmark_product_summary,
)
noted_benchmarks = (
@@ -268,7 +268,7 @@ def benchmark_view(request, pid, type, cat=None):
key=lambda x: [int(_) for _ in x.control.objective_number.split(".")],
)
benchmark_category = sorted(
- benchmark_category, key=lambda x: int(x.name[:3].strip("V: "))
+ benchmark_category, key=lambda x: int(x.name[:3].strip("V: ")),
)
product_tab = Product_Tab(product, title=_("Benchmarks"), tab="benchmarks")
@@ -295,7 +295,7 @@ def benchmark_view(request, pid, type, cat=None):
def delete(request, pid, type):
product = get_object_or_404(Product, id=pid)
benchmark_product_summary = Benchmark_Product_Summary.objects.filter(
- product=product, benchmark_type=type
+ product=product, benchmark_type=type,
).first()
form = DeleteBenchmarkForm(instance=benchmark_product_summary)
@@ -305,11 +305,11 @@ def delete(request, pid, type):
and str(benchmark_product_summary.id) == request.POST["id"]
):
form = DeleteBenchmarkForm(
- request.POST, instance=benchmark_product_summary
+ request.POST, instance=benchmark_product_summary,
)
if form.is_valid():
benchmark_product = Benchmark_Product.objects.filter(
- product=product, control__category__type=type
+ product=product, control__category__type=type,
)
benchmark_product.delete()
benchmark_product_summary.delete()
@@ -322,7 +322,7 @@ def delete(request, pid, type):
return HttpResponseRedirect(reverse("product"))
product_tab = Product_Tab(
- product, title=_("Delete Benchmarks"), tab="benchmarks"
+ product, title=_("Delete Benchmarks"), tab="benchmarks",
)
return render(
request,
diff --git a/dojo/checks.py b/dojo/checks.py
index 1299f4d765..39762b76fc 100644
--- a/dojo/checks.py
+++ b/dojo/checks.py
@@ -13,6 +13,6 @@ def check_configuration_deduplication(app_configs, **kwargs):
hint=f'Check configuration ["HASHCODE_FIELDS_PER_SCANNER"]["{scanner}"] value',
obj=settings.HASHCODE_FIELDS_PER_SCANNER[scanner],
id="dojo.E001",
- )
+ ),
)
return errors
diff --git a/dojo/components/sql_group_concat.py b/dojo/components/sql_group_concat.py
index b7abd6c9ef..3b08bb4cc5 100644
--- a/dojo/components/sql_group_concat.py
+++ b/dojo/components/sql_group_concat.py
@@ -6,7 +6,7 @@ class Sql_GroupConcat(Aggregate):
allow_distinct = True
def __init__(
- self, expression, separator, distinct=False, ordering=None, **extra
+ self, expression, separator, distinct=False, ordering=None, **extra,
):
self.separator = separator
super().__init__(
@@ -15,7 +15,7 @@ def __init__(
ordering=f" ORDER BY {ordering}" if ordering is not None else "",
separator=f' SEPARATOR "{separator}"',
output_field=CharField(),
- **extra
+ **extra,
)
def as_mysql(self, compiler, connection):
@@ -31,5 +31,5 @@ def as_sql(self, compiler, connection, **extra):
compiler,
connection,
template="%(function)s(%(distinct)s%(expressions)s%(ordering)s)",
- **extra
+ **extra,
)
diff --git a/dojo/components/views.py b/dojo/components/views.py
index d4a7490fbb..45cf09727f 100644
--- a/dojo/components/views.py
+++ b/dojo/components/views.py
@@ -25,32 +25,32 @@ def components(request):
.order_by("component_name")
.annotate(
component_version=StringAgg(
- "component_version", delimiter=separator, distinct=True, default=Value('')
- )
+ "component_version", delimiter=separator, distinct=True, default=Value(''),
+ ),
)
)
else:
component_query = component_query.values("component_name").order_by(
- "component_name"
+ "component_name",
)
component_query = component_query.annotate(
component_version=Sql_GroupConcat(
- "component_version", separator=separator, distinct=True
- )
+ "component_version", separator=separator, distinct=True,
+ ),
)
# Append counts
component_query = component_query.annotate(total=Count("id")).order_by(
- "component_name"
+ "component_name",
)
component_query = component_query.annotate(
- active=Count("id", filter=Q(active=True))
+ active=Count("id", filter=Q(active=True)),
)
component_query = component_query.annotate(
- duplicate=(Count("id", filter=Q(duplicate=True)))
+ duplicate=(Count("id", filter=Q(duplicate=True))),
)
component_query = component_query.order_by(
- "-total"
+ "-total",
) # Default sort by total descending
filter_string_matching = get_system_setting("filter_string_matching", False)
@@ -60,7 +60,7 @@ def components(request):
# Filter out None values for auto-complete
component_words = component_query.exclude(
- component_name__isnull=True
+ component_name__isnull=True,
).values_list("component_name", flat=True)
return render(
diff --git a/dojo/context_processors.py b/dojo/context_processors.py
index c0bbb25046..12168d9ea6 100644
--- a/dojo/context_processors.py
+++ b/dojo/context_processors.py
@@ -49,7 +49,7 @@ def bind_announcement(request):
try:
if request.user.is_authenticated:
user_announcement = UserAnnouncement.objects.select_related(
- "announcement"
+ "announcement",
).get(user=request.user)
return {"announcement": user_announcement.announcement}
return {}
diff --git a/dojo/cred/views.py b/dojo/cred/views.py
index 26d5d62f87..53136b4994 100644
--- a/dojo/cred/views.py
+++ b/dojo/cred/views.py
@@ -30,7 +30,7 @@ def new_cred(request):
messages.SUCCESS,
'Credential Successfully Created.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('cred', ))
+ return HttpResponseRedirect(reverse('cred'))
else:
tform = CredUserForm()
add_breadcrumb(
@@ -64,7 +64,7 @@ def edit_cred(request, ttid):
messages.SUCCESS,
'Credential Successfully Updated.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('cred', ))
+ return HttpResponseRedirect(reverse('cred'))
else:
tool_config.password = prepare_for_view(tool_config.password)
@@ -112,7 +112,7 @@ def view_cred_details(request, ttid):
'cred': cred,
'form': form,
'notes': notes,
- 'cred_products': cred_products
+ 'cred_products': cred_products,
})
@@ -177,7 +177,7 @@ def view_cred_product(request, pid, ttid):
'cred_type': cred_type,
'edit_link': edit_link,
'delete_link': delete_link,
- 'view_link': view_link
+ 'view_link': view_link,
})
@@ -226,7 +226,7 @@ def view_cred_product_engagement(request, eid, ttid):
'cred_type': cred_type,
'edit_link': edit_link,
'delete_link': delete_link,
- 'cred_product': cred_product
+ 'cred_product': cred_product,
})
@@ -277,7 +277,7 @@ def view_cred_engagement_test(request, tid, ttid):
'cred_type': cred_type,
'edit_link': edit_link,
'delete_link': delete_link,
- 'cred_product': cred_product
+ 'cred_product': cred_product,
})
@@ -328,7 +328,7 @@ def view_cred_finding(request, fid, ttid):
'cred_type': cred_type,
'edit_link': edit_link,
'delete_link': delete_link,
- 'cred_product': cred_product
+ 'cred_product': cred_product,
})
@@ -356,7 +356,7 @@ def edit_cred_product(request, pid, ttid):
return render(request, 'dojo/edit_cred_all.html', {
'tform': tform,
'product_tab': product_tab,
- 'cred_type': "Product"
+ 'cred_type': "Product",
})
@@ -390,7 +390,7 @@ def edit_cred_product_engagement(request, eid, ttid):
return render(request, 'dojo/edit_cred_all.html', {
'tform': tform,
- 'cred_type': "Engagement"
+ 'cred_type': "Engagement",
})
@@ -425,7 +425,7 @@ def new_cred_product(request, pid):
return render(request, 'dojo/new_cred_product.html', {
'tform': tform,
'pid': pid,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
@@ -476,7 +476,7 @@ def new_cred_product_engagement(request, eid):
request, 'dojo/new_cred_mapping.html', {
'tform': tform,
'eid': eid,
- 'formlink': reverse('new_cred_product_engagement', args=(eid, ))
+ 'formlink': reverse('new_cred_product_engagement', args=(eid, )),
})
@@ -526,7 +526,7 @@ def new_cred_engagement_test(request, tid):
request, 'dojo/new_cred_mapping.html', {
'tform': tform,
'eid': tid,
- 'formlink': reverse('new_cred_engagement_test', args=(tid, ))
+ 'formlink': reverse('new_cred_engagement_test', args=(tid, )),
})
@@ -577,7 +577,7 @@ def new_cred_finding(request, fid):
request, 'dojo/new_cred_mapping.html', {
'tform': tform,
'eid': fid,
- 'formlink': reverse('new_cred_finding', args=(fid, ))
+ 'formlink': reverse('new_cred_finding', args=(fid, )),
})
@@ -663,7 +663,7 @@ def delete_cred_controller(request, destination_url, id, ttid):
product_tab = Product_Tab(product, title="Delete Credential Mapping", tab="settings")
return render(request, 'dojo/delete_cred_all.html', {
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
diff --git a/dojo/endpoint/signals.py b/dojo/endpoint/signals.py
index 23ae7c4d07..4c18d03d91 100644
--- a/dojo/endpoint/signals.py
+++ b/dojo/endpoint/signals.py
@@ -17,7 +17,7 @@ def endpoint_post_delete(sender, instance, using, origin, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='endpoint'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The endpoint "%(name)s" was deleted by %(user)s') % {
'name': str(instance), 'user': le.actor}
diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py
index e40de5c5e1..9b7733c553 100644
--- a/dojo/endpoint/utils.py
+++ b/dojo/endpoint/utils.py
@@ -82,7 +82,7 @@ def endpoint_get_or_create(**kwargs):
else:
logger.warning(
f"Endpoints in your database are broken. "
- f"Please access {reverse('endpoint_migrate')} and migrate them to new format or remove them."
+ f"Please access {reverse('endpoint_migrate')} and migrate them to new format or remove them.",
)
# Get the oldest endpoint first, and return that instead
# a datetime is not captured on the endpoint model, so ID
@@ -217,7 +217,7 @@ def err_log(message, html_log, endpoint_html_log, endpoint):
path=endpoint.path,
query=endpoint.query,
fragment=endpoint.fragment,
- product_id=product.pk if product else None
+ product_id=product.pk if product else None,
).order_by('id')
if ep.count() > 1:
@@ -280,12 +280,12 @@ def validate_endpoints_to_add(endpoints_to_add):
endpoint_ins.port,
endpoint_ins.path,
endpoint_ins.query,
- endpoint_ins.fragment
+ endpoint_ins.fragment,
])
except ValidationError as ves:
for ve in ves:
errors.append(
- ValidationError(f"Invalid endpoint {endpoint}: {ve}")
+ ValidationError(f"Invalid endpoint {endpoint}: {ve}"),
)
return endpoint_list, errors
@@ -301,7 +301,7 @@ def save_endpoints_to_add(endpoint_list, product):
path=e[4],
query=e[5],
fragment=e[6],
- product=product
+ product=product,
)
processed_endpoints.append(endpoint)
return processed_endpoints
diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py
index 46a2098006..b6f838d793 100644
--- a/dojo/endpoint/views.py
+++ b/dojo/endpoint/views.py
@@ -400,7 +400,7 @@ def endpoint_bulk_update_all(request, pid=None):
mitigated=True,
mitigated_by=request.user,
mitigated_time=timezone.now(),
- last_modified=timezone.now()
+ last_modified=timezone.now(),
)
if updated_endpoint_count > 0:
@@ -472,7 +472,7 @@ def migrate_endpoints_view(request):
request, 'dojo/migrate_endpoints.html', {
'product_tab': None,
"name": view_name,
- "html_log": html_log
+ "html_log": html_log,
})
diff --git a/dojo/engagement/signals.py b/dojo/engagement/signals.py
index f8863ee862..7a8e3352ba 100644
--- a/dojo/engagement/signals.py
+++ b/dojo/engagement/signals.py
@@ -42,7 +42,7 @@ def engagement_post_delete(sender, instance, using, origin, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='engagement'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The engagement "%(name)s" was deleted by %(user)s') % {
'name': instance.name, 'user': le.actor}
diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py
index 2fdc7e34a4..f28a0863fb 100644
--- a/dojo/engagement/views.py
+++ b/dojo/engagement/views.py
@@ -137,7 +137,7 @@ def engagement_calendar(request):
'caltype': 'engagements',
'leads': request.GET.getlist('lead', ''),
'engagements': engagements,
- 'users': get_authorized_users(Permissions.Engagement_View)
+ 'users': get_authorized_users(Permissions.Engagement_View),
})
@@ -158,7 +158,7 @@ def get_filtered_engagements(request, view):
if System_Settings.objects.get().enable_jira:
engagements = engagements.prefetch_related(
'jira_project__jira_instance',
- 'product__jira_project_set__jira_instance'
+ 'product__jira_project_set__jira_instance',
)
filter_string_matching = get_system_setting("filter_string_matching", False)
@@ -174,11 +174,11 @@ def get_test_counts(engagements):
engagement_test_counts = {
test['engagement']: test['test_count']
for test in Test.objects.filter(
- engagement__in=engagements
+ engagement__in=engagements,
).values(
- 'engagement'
+ 'engagement',
).annotate(
- test_count=Count('engagement')
+ test_count=Count('engagement'),
)
}
return engagement_test_counts
@@ -221,7 +221,7 @@ def engagements_all(request):
products_filter_class = ProductEngagementsFilterWithoutObjectLookups if filter_string_matching else ProductEngagementsFilter
engagement_query = Engagement.objects.annotate(test_count=Count('test__id'))
filter_qs = products_with_engagements.prefetch_related(
- Prefetch('engagement_set', queryset=products_filter_class(request.GET, engagement_query).qs)
+ Prefetch('engagement_set', queryset=products_filter_class(request.GET, engagement_query).qs),
)
filter_qs = filter_qs.prefetch_related(
@@ -233,12 +233,12 @@ def engagements_all(request):
if System_Settings.objects.get().enable_jira:
filter_qs = filter_qs.prefetch_related(
'engagement_set__jira_project__jira_instance',
- 'jira_project_set__jira_instance'
+ 'jira_project_set__jira_instance',
)
filter_class = EngagementFilterWithoutObjectLookups if filter_string_matching else EngagementFilter
filtered = filter_class(
request.GET,
- queryset=filter_qs
+ queryset=filter_qs,
)
prods = get_page_items(request, filtered.qs, 25)
@@ -500,7 +500,7 @@ def get(self, request, eid, *args, **kwargs):
'creds': creds,
'cred_eng': cred_eng,
'network': network,
- 'preset_test_type': preset_test_type
+ 'preset_test_type': preset_test_type,
})
def post(self, request, eid, *args, **kwargs):
@@ -589,7 +589,7 @@ def post(self, request, eid, *args, **kwargs):
'creds': creds,
'cred_eng': cred_eng,
'network': network,
- 'preset_test_type': preset_test_type
+ 'preset_test_type': preset_test_type,
})
@@ -686,7 +686,7 @@ def add_tests(request, eid):
'form': form,
'cred_form': cred_form,
'eid': eid,
- 'eng': eng
+ 'eng': eng,
})
@@ -767,9 +767,9 @@ def get_credential_form(
return CredMappingForm(
initial={
"cred_user_queryset": Cred_Mapping.objects.filter(
- engagement=engagement
+ engagement=engagement,
).order_by('cred_id'),
- }
+ },
)
def get_jira_form(
@@ -790,12 +790,12 @@ def get_jira_form(
jira_form = JIRAImportScanForm(
request.POST,
push_all=push_all_jira_issues,
- prefix='jiraform'
+ prefix='jiraform',
)
else:
jira_form = JIRAImportScanForm(
push_all=push_all_jira_issues,
- prefix='jiraform'
+ prefix='jiraform',
)
return jira_form, push_all_jira_issues
@@ -926,7 +926,7 @@ def import_findings(
try:
importer_client = DefaultImporter(**context)
context["test"], _, finding_count, closed_finding_count, _, _, _ = importer_client.process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
# Add a message to the view for the user to see the results
add_success_message_to_response(importer_client.construct_imported_message(
@@ -1023,7 +1023,7 @@ def process_credentials_form(
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
pk=cred_user.id,
- engagement=context.get("engagement")
+ engagement=context.get("engagement"),
).first()
# Create the new credential mapping object
new_cred_mapping = form.save(commit=False)
@@ -1258,7 +1258,7 @@ def add_risk_acceptance(request, eid, fid=None):
return render(request, 'dojo/add_risk_acceptance.html', {
'eng': eng,
'product_tab': product_tab,
- 'form': form
+ 'form': form,
})
@@ -1551,7 +1551,7 @@ def engagement_ics(request, eid):
f"Set aside for engagement {eng.name}, on product {eng.product.name}. "
f"Additional detail can be found at {request.build_absolute_uri(reverse('view_engagement', args=(eng.id, )))}"
),
- uid
+ uid,
)
output = cal.serialize()
response = HttpResponse(content=output)
@@ -1687,7 +1687,7 @@ def excel_export(request):
response = HttpResponse(
content=stream,
- content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
+ content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
)
response['Content-Disposition'] = 'attachment; filename=engagements.xlsx'
return response
diff --git a/dojo/filters.py b/dojo/filters.py
index 76c65b92a6..6d124d67f0 100644
--- a/dojo/filters.py
+++ b/dojo/filters.py
@@ -96,7 +96,7 @@
local_tz = pytz.timezone(get_system_setting('time_zone'))
-BOOLEAN_CHOICES = (('false', 'No'), ('true', 'Yes'),)
+BOOLEAN_CHOICES = (('false', 'No'), ('true', 'Yes'))
EARLIEST_FINDING = None
@@ -183,7 +183,7 @@ def filter(self, qs, value):
earliest_finding = get_earliest_finding(qs)
if earliest_finding is not None:
start_date = local_tz.localize(datetime.combine(
- earliest_finding.date, datetime.min.time())
+ earliest_finding.date, datetime.min.time()),
)
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
@@ -213,7 +213,7 @@ def sla_violated(self, qs, name):
risk_accepted=False,
is_mitigated=False,
mitigated=None,
- ) & Q(sla_expiration_date__lt=timezone.now().date())
+ ) & Q(sla_expiration_date__lt=timezone.now().date()),
)
options = {
@@ -341,7 +341,7 @@ def get_finding_filterset_fields(metrics=False, similar=False, filter_string_mat
if similar:
fields.extend([
'id',
- 'hash_code'
+ 'hash_code',
])
fields.extend(['title', 'component_name', 'component_version'])
@@ -609,7 +609,7 @@ class DateRangeFilter(ChoiceFilter):
1: (_('Today'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
f'{name}__month': now().month,
- f'{name}__day': now().day
+ f'{name}__day': now().day,
})),
2: (_('Past 7 days'), lambda qs, name: qs.filter(**{
f'{name}__gte': _truncate(now() - timedelta(days=7)),
@@ -625,7 +625,7 @@ class DateRangeFilter(ChoiceFilter):
})),
5: (_('Current month'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
- f'{name}__month': now().month
+ f'{name}__month': now().month,
})),
6: (_('Current year'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
@@ -655,7 +655,7 @@ class DateRangeOmniFilter(ChoiceFilter):
1: (_('Today'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
f'{name}__month': now().month,
- f'{name}__day': now().day
+ f'{name}__day': now().day,
})),
2: (_('Next 7 days'), lambda qs, name: qs.filter(**{
f'{name}__gte': _truncate(now() + timedelta(days=1)),
@@ -683,7 +683,7 @@ class DateRangeOmniFilter(ChoiceFilter):
})),
8: (_('Current month'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
- f'{name}__month': now().month
+ f'{name}__month': now().month,
})),
9: (_('Past year'), lambda qs, name: qs.filter(**{
f'{name}__gte': _truncate(now() - timedelta(days=365)),
@@ -715,10 +715,10 @@ class ReportBooleanFilter(ChoiceFilter):
options = {
None: (_('Either'), lambda qs, name: qs.all()),
1: (_('Yes'), lambda qs, name: qs.filter(**{
- f'{name}': True
+ f'{name}': True,
})),
2: (_('No'), lambda qs, name: qs.filter(**{
- f'{name}': False
+ f'{name}': False,
})),
}
@@ -775,7 +775,7 @@ def any(self, qs, name):
earliest_finding = get_earliest_finding(qs)
if earliest_finding is not None:
start_date = local_tz.localize(datetime.combine(
- earliest_finding.date, datetime.min.time())
+ earliest_finding.date, datetime.min.time()),
)
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
@@ -787,7 +787,7 @@ def current_month(self, qs, name):
self.end_date = now()
return qs.filter(**{
f'{name}__year': self.start_date.year,
- f'{name}__month': self.start_date.month
+ f'{name}__month': self.start_date.month,
})
def current_year(self, qs, name):
@@ -843,7 +843,7 @@ def filter(self, qs, value):
earliest_finding = get_earliest_finding(qs)
if earliest_finding is not None:
start_date = local_tz.localize(datetime.combine(
- earliest_finding.date, datetime.min.time())
+ earliest_finding.date, datetime.min.time()),
)
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
@@ -872,7 +872,7 @@ class ProductComponentFilter(DojoFilter):
'active': 'Active',
'duplicate': 'Duplicate',
'total': 'Total',
- }
+ },
)
@@ -945,7 +945,7 @@ class EngagementDirectFilterHelper(FilterSet):
"product__name": "Product Name",
"product__prod_type__name": "Product Type",
"lead__first_name": "Lead",
- }
+ },
)
@@ -1026,7 +1026,7 @@ class EngagementFilterHelper(FilterSet):
field_labels={
"name": "Product Name",
"prod_type__name": "Product Type",
- }
+ },
)
@@ -1132,7 +1132,7 @@ class ProductEngagementFilterHelper(FilterSet):
),
field_labels={
'name': 'Engagement Name',
- }
+ },
)
class Meta:
@@ -1203,7 +1203,7 @@ class ApiEngagementFilter(DojoFilter):
),
field_labels={
'name': 'Engagement Name',
- }
+ },
)
@@ -1240,7 +1240,7 @@ class ProductFilterHelper(FilterSet):
('origin', 'origin'),
('external_audience', 'external_audience'),
('internet_accessible', 'internet_accessible'),
- ('findings_count', 'findings_count')
+ ('findings_count', 'findings_count'),
),
field_labels={
'name': 'Product Name',
@@ -1253,7 +1253,7 @@ class ProductFilterHelper(FilterSet):
'external_audience': 'External Audience ',
'internet_accessible': 'Internet Accessible ',
'findings_count': 'Findings Count ',
- }
+ },
)
@@ -1283,7 +1283,7 @@ class Meta:
fields = [
"name", "name_exact", "prod_type", "business_criticality",
"platform", "lifecycle", "origin", "external_audience",
- "internet_accessible", "tags"
+ "internet_accessible", "tags",
]
@@ -1377,8 +1377,8 @@ class ApiProductFilter(DojoFilter):
('prod_type', 'prod_type'),
('prod_type__name', 'prod_type__name'),
('updated', 'updated'),
- ('user_records', 'user_records')
- )
+ ('user_records', 'user_records'),
+ ),
)
@@ -1528,7 +1528,7 @@ def filter_percentage(self, queryset, name, value):
max_val = value + decimal.Decimal(f"1E{exponent}")
lookup_kwargs = {
f"{name}__gte": value,
- f"{name}__lt": max_val, }
+ f"{name}__lt": max_val}
return queryset.filter(**lookup_kwargs)
@@ -1652,7 +1652,7 @@ class FindingFilterHelper(FilterSet):
'test__engagement__product__name': 'Product Name',
'epss_score': 'EPSS Score',
'epss_percentile': 'EPSS Percentile',
- }
+ },
)
def __init__(self, *args, **kwargs):
@@ -1752,7 +1752,7 @@ class Meta:
'numerical_severity', 'line', 'duplicate_finding',
'hash_code', 'reviewers', 'created', 'files',
'sla_start_date', 'sla_expiration_date', 'cvssv3',
- 'severity_justification', 'steps_to_reproduce',]
+ 'severity_justification', 'steps_to_reproduce']
def __init__(self, *args, **kwargs):
self.user = None
@@ -1810,7 +1810,7 @@ class Meta:
'numerical_severity', 'line', 'duplicate_finding',
'hash_code', 'reviewers', 'created', 'files',
'sla_start_date', 'sla_expiration_date', 'cvssv3',
- 'severity_justification', 'steps_to_reproduce',]
+ 'severity_justification', 'steps_to_reproduce']
def __init__(self, *args, **kwargs):
self.user = None
@@ -1832,7 +1832,7 @@ def set_related_object_fields(self, *args: list, **kwargs: dict):
del self.form.fields['test__engagement__product__prod_type']
# TODO add authorized check to be sure
self.form.fields['test__engagement'].queryset = Engagement.objects.filter(
- product_id=self.pid
+ product_id=self.pid,
).all()
self.form.fields['test'].queryset = get_authorized_tests(Permissions.Test_View, product=self.pid).prefetch_related('test_type')
else:
@@ -1991,7 +1991,7 @@ class TemplateFindingFilter(DojoFilter):
),
field_labels={
'numerical_severity': 'Severity',
- }
+ },
)
class Meta:
@@ -2204,7 +2204,7 @@ def __init__(self, *args, **kwargs):
if self.pid:
del self.form.fields["finding__test__engagement__product__prod_type"]
self.form.fields["finding__test__engagement"].queryset = Engagement.objects.filter(
- product_id=self.pid
+ product_id=self.pid,
).all()
else:
self.form.fields["finding__test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View).order_by("name")
@@ -2669,7 +2669,7 @@ class EngagementTestFilterHelper(FilterSet):
),
field_labels={
'name': 'Test Name',
- }
+ },
)
@@ -2804,7 +2804,7 @@ class ApiTestFilter(DojoFilter):
),
field_labels={
'name': 'Test Name',
- }
+ },
)
class Meta:
@@ -3141,7 +3141,7 @@ class UserFilter(DojoFilter):
'username': 'User Name',
'is_active': 'Active',
'is_superuser': 'Superuser',
- }
+ },
)
class Meta:
@@ -3177,7 +3177,7 @@ class TestImportFilter(DojoFilter):
('build_id', 'build_id'),
('commit_hash', 'commit_hash'),
- )
+ ),
)
class Meta:
@@ -3191,7 +3191,7 @@ class TestImportFindingActionFilter(DojoFilter):
# tuple-mapping retains order
fields=(
('action', 'action'),
- )
+ ),
)
class Meta:
@@ -3219,8 +3219,8 @@ class Meta:
'filter_class': CharFilter,
'extra': lambda f: {
'lookup_expr': 'icontains',
- }
- }
+ },
+ },
}
diff --git a/dojo/finding/queries.py b/dojo/finding/queries.py
index e10cfca3dd..cfc7e9ace9 100644
--- a/dojo/finding/queries.py
+++ b/dojo/finding/queries.py
@@ -36,7 +36,7 @@ def get_authorized_groups(permission, user=None):
authorized_product_type_roles,
authorized_product_roles,
authorized_product_type_groups,
- authorized_product_groups
+ authorized_product_groups,
)
@@ -60,7 +60,7 @@ def get_authorized_findings(permission, queryset=None, user=None):
authorized_product_type_roles,
authorized_product_roles,
authorized_product_type_groups,
- authorized_product_groups
+ authorized_product_groups,
) = get_authorized_groups(permission, user=user)
findings = findings.annotate(
@@ -93,7 +93,7 @@ def get_authorized_stub_findings(permission):
authorized_product_type_roles,
authorized_product_roles,
authorized_product_type_groups,
- authorized_product_groups
+ authorized_product_groups,
) = get_authorized_groups(permission, user=user)
findings = Stub_Finding.objects.annotate(
diff --git a/dojo/finding/urls.py b/dojo/finding/urls.py
index 27549aeca1..5e20fd1b6f 100644
--- a/dojo/finding/urls.py
+++ b/dojo/finding/urls.py
@@ -7,113 +7,113 @@
re_path(
r'^finding/(?P\d+)$',
views.ViewFinding.as_view(),
- name='view_finding'
+ name='view_finding',
),
re_path(
r'^finding/(?P\d+)/edit$',
views.EditFinding.as_view(),
- name='edit_finding'
+ name='edit_finding',
),
re_path(
r'^finding/(?P\d+)/delete$',
views.DeleteFinding.as_view(),
- name='delete_finding'
+ name='delete_finding',
),
# Listing operations
re_path(
r'^finding$',
views.ListFindings.as_view(),
- name='all_findings'
+ name='all_findings',
),
re_path(
r'^finding/open$',
views.ListOpenFindings.as_view(),
- name='open_findings'
+ name='open_findings',
),
re_path(
r'^finding/verified$',
views.ListVerifiedFindings.as_view(),
- name='verified_findings'
+ name='verified_findings',
),
re_path(
r'^finding/closed$',
views.ListClosedFindings.as_view(),
- name='closed_findings'
+ name='closed_findings',
),
re_path(
r'^finding/accepted$',
views.ListAcceptedFindings.as_view(),
- name='accepted_findings'
+ name='accepted_findings',
),
re_path(
r'^product/(?P\d+)/finding/open$',
views.ListOpenFindings.as_view(),
- name='product_open_findings'
+ name='product_open_findings',
),
re_path(
r'^product/(?P\d+)/findings$',
views.ListOpenFindings.as_view(),
- name='view_product_findings_old'
+ name='view_product_findings_old',
),
re_path(
r'^product/(?P\d+)/finding/verified$',
views.ListVerifiedFindings.as_view(),
- name='product_verified_findings'
+ name='product_verified_findings',
),
re_path(
r'^product/(?P\d+)/finding/out_of_scope$',
views.ListOutOfScopeFindings.as_view(),
- name='product_out_of_scope_findings'
+ name='product_out_of_scope_findings',
),
re_path(
r'^product/(?P\d+)/finding/inactive$',
views.ListInactiveFindings.as_view(),
- name='product_inactive_findings'
+ name='product_inactive_findings',
),
re_path(
r'^product/(?P\d+)/finding/all$',
views.ListFindings.as_view(),
- name='product_all_findings'
+ name='product_all_findings',
),
re_path(
r'^product/(?P\d+)/finding/closed$',
views.ListClosedFindings.as_view(),
- name='product_closed_findings'
+ name='product_closed_findings',
),
re_path(
r'^product/(?P\d+)/finding/false_positive$',
views.ListFalsePositiveFindings.as_view(),
- name='product_false_positive_findings'
+ name='product_false_positive_findings',
),
re_path(
r'^product/(?P\d+)/finding/accepted$',
views.ListAcceptedFindings.as_view(),
- name='product_accepted_findings'
+ name='product_accepted_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/open$',
views.ListOpenFindings.as_view(),
- name='engagement_open_findings'
+ name='engagement_open_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/closed$',
views.ListClosedFindings.as_view(),
- name='engagement_closed_findings'
+ name='engagement_closed_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/verified$',
views.ListVerifiedFindings.as_view(),
- name='engagement_verified_findings'
+ name='engagement_verified_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/accepted$',
views.ListAcceptedFindings.as_view(),
- name='engagement_accepted_findings'
+ name='engagement_accepted_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/all$',
views.ListFindings.as_view(),
- name='engagement_all_findings'
+ name='engagement_all_findings',
),
# findings
re_path(r'^finding/bulk$', views.finding_bulk_update_all,
diff --git a/dojo/finding/views.py b/dojo/finding/views.py
index c84154804a..8373022d72 100644
--- a/dojo/finding/views.py
+++ b/dojo/finding/views.py
@@ -133,34 +133,34 @@
def prefetch_for_findings(findings, prefetch_type="all", exclude_untouched=True):
prefetched_findings = findings
if isinstance(
- findings, QuerySet
+ findings, QuerySet,
): # old code can arrive here with prods being a list because the query was already executed
prefetched_findings = prefetched_findings.prefetch_related("reporter")
prefetched_findings = prefetched_findings.prefetch_related(
- "jira_issue__jira_project__jira_instance"
+ "jira_issue__jira_project__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related("test__test_type")
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__jira_project__jira_instance"
+ "test__engagement__jira_project__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__product__jira_project_set__jira_instance"
+ "test__engagement__product__jira_project_set__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related("found_by")
# for open/active findings the following 4 prefetches are not needed
if prefetch_type != "open":
prefetched_findings = prefetched_findings.prefetch_related(
- "risk_acceptance_set"
+ "risk_acceptance_set",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "risk_acceptance_set__accepted_findings"
+ "risk_acceptance_set__accepted_findings",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "original_finding"
+ "original_finding",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "duplicate_finding"
+ "duplicate_finding",
)
if exclude_untouched:
@@ -169,13 +169,13 @@ def prefetch_for_findings(findings, prefetch_type="all", exclude_untouched=True)
Prefetch(
"test_import_finding_action_set",
queryset=Test_Import_Finding_Action.objects.exclude(
- action=IMPORT_UNTOUCHED_FINDING
+ action=IMPORT_UNTOUCHED_FINDING,
),
- )
+ ),
)
else:
prefetched_findings = prefetched_findings.prefetch_related(
- "test_import_finding_action_set"
+ "test_import_finding_action_set",
)
"""
we could try to prefetch only the latest note with SubQuery and OuterRef,
@@ -187,23 +187,23 @@ def prefetch_for_findings(findings, prefetch_type="all", exclude_untouched=True)
prefetched_findings = prefetched_findings.prefetch_related("status_finding")
prefetched_findings = prefetched_findings.annotate(
active_endpoint_count=Count(
- "status_finding__id", filter=Q(status_finding__mitigated=False)
- )
+ "status_finding__id", filter=Q(status_finding__mitigated=False),
+ ),
)
prefetched_findings = prefetched_findings.annotate(
mitigated_endpoint_count=Count(
- "status_finding__id", filter=Q(status_finding__mitigated=True)
- )
+ "status_finding__id", filter=Q(status_finding__mitigated=True),
+ ),
)
prefetched_findings = prefetched_findings.prefetch_related("finding_group_set")
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__product__members"
+ "test__engagement__product__members",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__product__prod_type__members"
+ "test__engagement__product__prod_type__members",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "vulnerability_id_set"
+ "vulnerability_id_set",
)
else:
logger.debug("unable to prefetch because query was already executed")
@@ -214,25 +214,25 @@ def prefetch_for_findings(findings, prefetch_type="all", exclude_untouched=True)
def prefetch_for_similar_findings(findings):
prefetched_findings = findings
if isinstance(
- findings, QuerySet
+ findings, QuerySet,
): # old code can arrive here with prods being a list because the query was already executed
prefetched_findings = prefetched_findings.prefetch_related("reporter")
prefetched_findings = prefetched_findings.prefetch_related(
- "jira_issue__jira_project__jira_instance"
+ "jira_issue__jira_project__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related("test__test_type")
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__jira_project__jira_instance"
+ "test__engagement__jira_project__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__product__jira_project_set__jira_instance"
+ "test__engagement__product__jira_project_set__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related("found_by")
prefetched_findings = prefetched_findings.prefetch_related(
- "risk_acceptance_set"
+ "risk_acceptance_set",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "risk_acceptance_set__accepted_findings"
+ "risk_acceptance_set__accepted_findings",
)
prefetched_findings = prefetched_findings.prefetch_related("original_finding")
prefetched_findings = prefetched_findings.prefetch_related("duplicate_finding")
@@ -241,9 +241,9 @@ def prefetch_for_similar_findings(findings):
Prefetch(
"test_import_finding_action_set",
queryset=Test_Import_Finding_Action.objects.exclude(
- action=IMPORT_UNTOUCHED_FINDING
+ action=IMPORT_UNTOUCHED_FINDING,
),
- )
+ ),
)
"""
we could try to prefetch only the latest note with SubQuery and OuterRef,
@@ -252,7 +252,7 @@ def prefetch_for_similar_findings(findings):
prefetched_findings = prefetched_findings.prefetch_related("notes")
prefetched_findings = prefetched_findings.prefetch_related("tags")
prefetched_findings = prefetched_findings.prefetch_related(
- "vulnerability_id_set"
+ "vulnerability_id_set",
)
else:
logger.debug("unable to prefetch because query was already executed")
@@ -414,7 +414,7 @@ def add_breadcrumbs(self, request: HttpRequest, context: dict):
[
("Endpoints", reverse("vulnerable_endpoints")),
(endpoint, reverse("view_endpoint", args=(endpoint.id,))),
- ]
+ ],
)
# Show the "All findings" breadcrumb if nothing is coming from the product or engagement
elif not self.get_engagement_id() and not self.get_product_id():
@@ -556,7 +556,7 @@ def get_cwe_template(self, finding: Finding):
cwe_template = Finding_Template.objects.filter(cwe=finding.cwe).first()
return {
- "cwe_template": cwe_template
+ "cwe_template": cwe_template,
}
def get_request_response(self, finding: Finding):
@@ -609,12 +609,12 @@ def get_similar_findings(self, request: HttpRequest, finding: Finding):
}
# add related actions for non-similar and non-duplicate cluster members
finding.related_actions = calculate_possible_related_actions_for_similar_finding(
- request, finding, finding
+ request, finding, finding,
)
if finding.duplicate_finding:
finding.duplicate_finding.related_actions = (
calculate_possible_related_actions_for_similar_finding(
- request, finding, finding.duplicate_finding
+ request, finding, finding.duplicate_finding,
)
)
filter_string_matching = get_system_setting("filter_string_matching", False)
@@ -633,12 +633,12 @@ def get_similar_findings(self, request: HttpRequest, finding: Finding):
prefix="similar",
)
similar_findings.object_list = prefetch_for_similar_findings(
- similar_findings.object_list
+ similar_findings.object_list,
)
for similar_finding in similar_findings:
similar_finding.related_actions = (
calculate_possible_related_actions_for_similar_finding(
- request, finding, similar_finding
+ request, finding, similar_finding,
)
)
@@ -677,7 +677,7 @@ def get_typed_note_form(self, request: HttpRequest, context: dict):
args = [request.POST] if request.method == "POST" else []
# Set the initial form args
kwargs = {
- "available_note_types": context.get("available_note_types")
+ "available_note_types": context.get("available_note_types"),
}
return TypedNoteForm(*args, **kwargs)
@@ -698,7 +698,7 @@ def process_form(self, request: HttpRequest, finding: Finding, context: dict):
new_note.save()
# Add an entry to the note history
history = NoteHistory(
- data=new_note.entry, time=new_note.date, current_editor=new_note.author
+ data=new_note.entry, time=new_note.date, current_editor=new_note.author,
)
history.save()
new_note.history.add(history)
@@ -714,13 +714,13 @@ def process_form(self, request: HttpRequest, finding: Finding, context: dict):
jira_helper.add_comment(finding.finding_group, new_note)
# Send the notification of the note being added
url = request.build_absolute_uri(
- reverse("view_finding", args=(finding.id,))
+ reverse("view_finding", args=(finding.id,)),
)
title = f"Finding: {finding.title}"
process_tag_notifications(request, new_note, url, title)
# Add a message to the request
messages.add_message(
- request, messages.SUCCESS, "Note saved.", extra_tags="alert-success"
+ request, messages.SUCCESS, "Note saved.", extra_tags="alert-success",
)
return request, True
@@ -743,8 +743,8 @@ def get_initial_context(self, request: HttpRequest, finding: Finding, user: Dojo
"note_type_activation": note_type_activation,
"available_note_types": available_note_types,
"product_tab": Product_Tab(
- finding.test.engagement.product, title="View Finding", tab="findings"
- )
+ finding.test.engagement.product, title="View Finding", tab="findings",
+ ),
}
# Set the form using the context, and then update the context
form = self.get_form(request, context)
@@ -855,7 +855,7 @@ def get_github_form(self, request: HttpRequest, finding: Finding):
# Set the initial form args
kwargs = {
"enabled": finding.has_github_issue(),
- "prefix": "githubform"
+ "prefix": "githubform",
}
return GITHUBFindingForm(*args, **kwargs)
@@ -871,8 +871,8 @@ def get_initial_context(self, request: HttpRequest, finding: Finding):
"gform": self.get_github_form(request, finding),
"return_url": get_return_url(request),
"product_tab": Product_Tab(
- finding.test.engagement.product, title="Edit Finding", tab="findings"
- )
+ finding.test.engagement.product, title="Edit Finding", tab="findings",
+ ),
}
def validate_status_change(self, request: HttpRequest, finding: Finding, context: dict):
@@ -945,7 +945,7 @@ def process_false_positive_history(self, finding: Finding):
logger.debug('FALSE_POSITIVE_HISTORY: Reactivating existing findings based on: %s', finding)
existing_fp_findings = match_finding_to_existing_findings(
- finding, product=finding.test.engagement.product
+ finding, product=finding.test.engagement.product,
).filter(false_p=True)
for fp in existing_fp_findings:
@@ -964,10 +964,10 @@ def process_burp_request_response(self, finding: Finding, context: dict):
except BurpRawRequestResponse.MultipleObjectsReturned:
burp_rr = BurpRawRequestResponse.objects.filter(finding=finding).first()
burp_rr.burpRequestBase64 = base64.b64encode(
- context["form"].cleaned_data["request"].encode()
+ context["form"].cleaned_data["request"].encode(),
)
burp_rr.burpResponseBase64 = base64.b64encode(
- context["form"].cleaned_data["response"].encode()
+ context["form"].cleaned_data["response"].encode(),
)
burp_rr.clean()
burp_rr.save()
@@ -1067,7 +1067,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic
# Determine if a message should be added
if jira_message:
messages.add_message(
- request, messages.SUCCESS, jira_message, extra_tags="alert-success"
+ request, messages.SUCCESS, jira_message, extra_tags="alert-success",
)
return request, True, push_to_jira
@@ -1240,7 +1240,7 @@ def close_finding(request, fid):
finding.notes.add(new_note)
messages.add_message(
- request, messages.SUCCESS, "Note Saved.", extra_tags="alert-success"
+ request, messages.SUCCESS, "Note Saved.", extra_tags="alert-success",
)
if len(missing_note_types) <= 1:
@@ -1307,15 +1307,15 @@ def close_finding(request, fid):
url=reverse("view_finding", args=(finding.id,)),
)
return HttpResponseRedirect(
- reverse("view_test", args=(finding.test.id,))
+ reverse("view_test", args=(finding.test.id,)),
)
else:
return HttpResponseRedirect(
- reverse("close_finding", args=(finding.id,))
+ reverse("close_finding", args=(finding.id,)),
)
product_tab = Product_Tab(
- finding.test.engagement.product, title="Close", tab="findings"
+ finding.test.engagement.product, title="Close", tab="findings",
)
return render(
@@ -1394,7 +1394,7 @@ def defect_finding_review(request, fid):
jira_helper.push_to_jira(finding.finding_group)
messages.add_message(
- request, messages.SUCCESS, "Defect Reviewed", extra_tags="alert-success"
+ request, messages.SUCCESS, "Defect Reviewed", extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("view_test", args=(finding.test.id,)))
@@ -1402,7 +1402,7 @@ def defect_finding_review(request, fid):
form = DefectFindingForm()
product_tab = Product_Tab(
- finding.test.engagement.product, title="Jira Status Review", tab="findings"
+ finding.test.engagement.product, title="Jira Status Review", tab="findings",
)
return render(
@@ -1460,7 +1460,7 @@ def reopen_finding(request, fid):
reopen_external_issue(finding, "re-opened by defectdojo", "github")
messages.add_message(
- request, messages.SUCCESS, "Finding Reopened.", extra_tags="alert-success"
+ request, messages.SUCCESS, "Finding Reopened.", extra_tags="alert-success",
)
# Note: this notification has not be moved to "@receiver(pre_save, sender=Finding)" method as many other notifications
@@ -1508,7 +1508,7 @@ def copy_finding(request, fid):
finding = get_object_or_404(Finding, id=fid)
product = finding.test.engagement.product
tests = get_authorized_tests(Permissions.Test_Edit).filter(
- engagement=finding.test.engagement
+ engagement=finding.test.engagement,
)
form = CopyFindingForm(tests=tests)
@@ -1531,13 +1531,13 @@ def copy_finding(request, fid):
description=f'The finding "{finding.title}" was copied by {request.user} to {test.title}',
product=product,
url=request.build_absolute_uri(
- reverse("copy_finding", args=(finding_copy.id,))
+ reverse("copy_finding", args=(finding_copy.id,)),
),
recipients=[finding.test.engagement.lead],
icon="exclamation-triangle",
)
return redirect_to_return_url_or_else(
- request, reverse("view_test", args=(test.id,))
+ request, reverse("view_test", args=(test.id,)),
)
else:
messages.add_message(
@@ -1571,7 +1571,7 @@ def remediation_date(request, fid):
if form.is_valid():
finding.planned_remediation_date = request.POST.get(
- "planned_remediation_date", ""
+ "planned_remediation_date", "",
)
finding.save()
messages.add_message(
@@ -1605,7 +1605,7 @@ def touch_finding(request, fid):
finding.last_reviewed_by = request.user
finding.save()
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(finding.id,))
+ request, reverse("view_finding", args=(finding.id,)),
)
@@ -1619,11 +1619,11 @@ def simple_risk_accept(request, fid):
ra_helper.simple_risk_accept(finding)
messages.add_message(
- request, messages.WARNING, "Finding risk accepted.", extra_tags="alert-success"
+ request, messages.WARNING, "Finding risk accepted.", extra_tags="alert-success",
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(finding.id,))
+ request, reverse("view_finding", args=(finding.id,)),
)
@@ -1640,7 +1640,7 @@ def risk_unaccept(request, fid):
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(finding.id,))
+ request, reverse("view_finding", args=(finding.id,)),
)
@@ -1723,7 +1723,7 @@ def request_finding_review(request, fid):
return HttpResponseRedirect(reverse("view_finding", args=(finding.id,)))
product_tab = Product_Tab(
- finding.test.engagement.product, title="Review Finding", tab="findings"
+ finding.test.engagement.product, title="Review Finding", tab="findings",
)
return render(
@@ -1799,7 +1799,7 @@ def clear_finding_review(request, fid):
form = ClearFindingReviewForm(instance=finding)
product_tab = Product_Tab(
- finding.test.engagement.product, title="Clear Finding Review", tab="findings"
+ finding.test.engagement.product, title="Clear Finding Review", tab="findings",
)
return render(
@@ -1838,14 +1838,14 @@ def mktemplate(request, fid):
for vulnerability_id in finding.vulnerability_ids:
Vulnerability_Id_Template(
- finding_template=template, vulnerability_id=vulnerability_id
+ finding_template=template, vulnerability_id=vulnerability_id,
).save()
messages.add_message(
request,
messages.SUCCESS,
mark_safe(
- 'Finding template added successfully. You may edit it here.'.format(reverse("edit_template", args=(template.id,)))
+ 'Finding template added successfully. You may edit it here.'.format(reverse("edit_template", args=(template.id,))),
),
extra_tags="alert-success",
)
@@ -1858,7 +1858,7 @@ def find_template_to_apply(request, fid):
test = get_object_or_404(Test, id=finding.test.id)
templates_by_cve = (
Finding_Template.objects.annotate(
- cve_len=Length("cve"), order=models.Value(1, models.IntegerField())
+ cve_len=Length("cve"), order=models.Value(1, models.IntegerField()),
)
.filter(cve=finding.cve, cve_len__gt=0)
.order_by("-last_used")
@@ -1868,7 +1868,7 @@ def find_template_to_apply(request, fid):
Finding_Template.objects.all()
.order_by("-last_used")
.annotate(
- cve_len=Length("cve"), order=models.Value(2, models.IntegerField())
+ cve_len=Length("cve"), order=models.Value(2, models.IntegerField()),
)
)
templates = templates_by_last_used
@@ -1878,11 +1878,11 @@ def find_template_to_apply(request, fid):
.exclude(cve=finding.cve)
.order_by("-last_used")
.annotate(
- cve_len=Length("cve"), order=models.Value(2, models.IntegerField())
+ cve_len=Length("cve"), order=models.Value(2, models.IntegerField()),
)
)
templates = templates_by_last_used.union(templates_by_cve).order_by(
- "order", "-last_used"
+ "order", "-last_used",
)
templates = TemplateFindingFilter(request.GET, queryset=templates)
@@ -1891,7 +1891,7 @@ def find_template_to_apply(request, fid):
# just query all templates as this weird ordering above otherwise breaks Django ORM
title_words = get_words_for_field(Finding_Template, "title")
product_tab = Product_Tab(
- test.engagement.product, title="Apply Template to Finding", tab="findings"
+ test.engagement.product, title="Apply Template to Finding", tab="findings",
)
return render(
request,
@@ -1961,7 +1961,7 @@ def apply_template_to_finding(request, fid, tid):
finding.cve = None
finding_helper.save_vulnerability_ids(
- finding, form.cleaned_data["vulnerability_ids"].split()
+ finding, form.cleaned_data["vulnerability_ids"].split(),
)
finding.save()
@@ -2070,7 +2070,7 @@ def promote_to_finding(request, fid):
jform = None
use_jira = jira_helper.get_jira_project(finding) is not None
product_tab = Product_Tab(
- finding.test.engagement.product, title="Promote Finding", tab="findings"
+ finding.test.engagement.product, title="Promote Finding", tab="findings",
)
if request.method == "POST":
@@ -2087,17 +2087,17 @@ def promote_to_finding(request, fid):
if form.is_valid() and (jform is None or jform.is_valid()):
if jform:
logger.debug(
- "jform.jira_issue: %s", jform.cleaned_data.get("jira_issue")
+ "jform.jira_issue: %s", jform.cleaned_data.get("jira_issue"),
)
logger.debug(
- JFORM_PUSH_TO_JIRA_MESSAGE, jform.cleaned_data.get("push_to_jira")
+ JFORM_PUSH_TO_JIRA_MESSAGE, jform.cleaned_data.get("push_to_jira"),
)
new_finding = form.save(commit=False)
new_finding.test = test
new_finding.reporter = request.user
new_finding.numerical_severity = Finding.get_numerical_severity(
- new_finding.severity
+ new_finding.severity,
)
new_finding.active = True
@@ -2116,7 +2116,7 @@ def promote_to_finding(request, fid):
# Push to Jira?
logger.debug("jira form valid")
push_to_jira = push_all_jira_issues or jform.cleaned_data.get(
- "push_to_jira"
+ "push_to_jira",
)
# if the jira issue key was changed, update database
@@ -2138,7 +2138,7 @@ def promote_to_finding(request, fid):
elif new_jira_issue_key != new_finding.jira_issue.jira_key:
jira_helper.finding_unlink_jira(request, new_finding)
jira_helper.finding_link_jira(
- request, new_finding, new_jira_issue_key
+ request, new_finding, new_jira_issue_key,
)
else:
logger.debug("finding has no jira issue yet")
@@ -2146,11 +2146,11 @@ def promote_to_finding(request, fid):
logger.debug(
"finding has no jira issue yet, but jira issue specified in request. trying to link.")
jira_helper.finding_link_jira(
- request, new_finding, new_jira_issue_key
+ request, new_finding, new_jira_issue_key,
)
finding_helper.save_vulnerability_ids(
- new_finding, form.cleaned_data["vulnerability_ids"].split()
+ new_finding, form.cleaned_data["vulnerability_ids"].split(),
)
new_finding.save(push_to_jira=push_to_jira)
@@ -2161,7 +2161,7 @@ def promote_to_finding(request, fid):
request.POST,
prefix="githubform",
enabled=GITHUB_PKey.objects.get(
- product=test.engagement.product
+ product=test.engagement.product,
).push_all_issues,
)
if gform.is_valid():
@@ -2178,7 +2178,7 @@ def promote_to_finding(request, fid):
else:
form_error = True
add_error_message_to_response(
- "The form has errors, please correct them below."
+ "The form has errors, please correct them below.",
)
add_field_errors_to_response(jform)
add_field_errors_to_response(form)
@@ -2261,7 +2261,7 @@ def apply_cwe_mitigation(apply_to_findings, template, update=True):
)
else:
finding_templates = Finding_Template.objects.filter(
- cwe=template.cwe, template_match=True, template_match_title=True
+ cwe=template.cwe, template_match=True, template_match_title=True,
)
finding_ids = None
@@ -2282,11 +2282,11 @@ def apply_cwe_mitigation(apply_to_findings, template, update=True):
# If result_list is None the filter exclude won't work
if result_list:
count = Finding.objects.filter(
- active=True, verified=True, cwe=template.cwe
+ active=True, verified=True, cwe=template.cwe,
).exclude(id__in=result_list)
else:
count = Finding.objects.filter(
- active=True, verified=True, cwe=template.cwe
+ active=True, verified=True, cwe=template.cwe,
)
if update:
@@ -2302,7 +2302,7 @@ def apply_cwe_mitigation(apply_to_findings, template, update=True):
f"CWE remediation text applied to finding for CWE: {template.cwe} using template: {template.title}."
)
new_note.author, _created = User.objects.get_or_create(
- username="System"
+ username="System",
)
new_note.save()
finding.notes.add(new_note)
@@ -2321,15 +2321,15 @@ def add_template(request):
apply_message = ""
template = form.save(commit=False)
template.numerical_severity = Finding.get_numerical_severity(
- template.severity
+ template.severity,
)
template.save()
finding_helper.save_vulnerability_ids_template(
- template, form.cleaned_data["vulnerability_ids"].split()
+ template, form.cleaned_data["vulnerability_ids"].split(),
)
form.save_m2m()
count = apply_cwe_mitigation(
- form.cleaned_data["apply_to_findings"], template
+ form.cleaned_data["apply_to_findings"], template,
)
if count > 0:
apply_message = (
@@ -2352,7 +2352,7 @@ def add_template(request):
)
add_breadcrumb(title="Add Template", top_level=False, request=request)
return render(
- request, "dojo/add_template.html", {"form": form, "name": "Add Template"}
+ request, "dojo/add_template.html", {"form": form, "name": "Add Template"},
)
@@ -2369,16 +2369,16 @@ def edit_template(request, tid):
if form.is_valid():
template = form.save(commit=False)
template.numerical_severity = Finding.get_numerical_severity(
- template.severity
+ template.severity,
)
finding_helper.save_vulnerability_ids_template(
- template, form.cleaned_data["vulnerability_ids"].split()
+ template, form.cleaned_data["vulnerability_ids"].split(),
)
template.save()
form.save_m2m()
count = apply_cwe_mitigation(
- form.cleaned_data["apply_to_findings"], template
+ form.cleaned_data["apply_to_findings"], template,
)
if count > 0:
apply_message = (
@@ -2516,10 +2516,10 @@ def merge_finding_product(request, pid):
request.GET.get("merge_findings") or request.method == "POST"
) and finding_to_update:
finding = Finding.objects.get(
- id=finding_to_update[0], test__engagement__product=product
+ id=finding_to_update[0], test__engagement__product=product,
)
findings = Finding.objects.filter(
- id__in=finding_to_update, test__engagement__product=product
+ id__in=finding_to_update, test__engagement__product=product,
)
form = MergeFindings(
finding=finding,
@@ -2540,7 +2540,7 @@ def merge_finding_product(request, pid):
if finding_to_merge_into not in findings_to_merge:
for finding in findings_to_merge.exclude(
- pk=finding_to_merge_into.pk
+ pk=finding_to_merge_into.pk,
):
notes_entry = f"{notes_entry}\n- {finding.title} ({finding.id}),"
if finding.static_finding:
@@ -2565,7 +2565,7 @@ def merge_finding_product(request, pid):
# if checked merge the endpoints
if form.cleaned_data["add_endpoints"]:
finding_to_merge_into.endpoints.add(
- *finding.endpoints.all()
+ *finding.endpoints.all(),
)
# if checked merge the tags
@@ -2576,7 +2576,7 @@ def merge_finding_product(request, pid):
# if checked re-assign the burp requests to the merged finding
if form.cleaned_data["dynamic_raw"]:
BurpRawRequestResponse.objects.filter(
- finding=finding
+ finding=finding,
).update(finding=finding_to_merge_into)
# Add merge finding information to the note if set to inactive
@@ -2584,7 +2584,7 @@ def merge_finding_product(request, pid):
single_finding_notes_entry = ("Finding has been set to inactive "
f"and merged with the finding: {finding_to_merge_into.title}.")
note = Notes(
- entry=single_finding_notes_entry, author=request.user
+ entry=single_finding_notes_entry, author=request.user,
)
note.save()
finding.notes.add(note)
@@ -2646,7 +2646,7 @@ def merge_finding_product(request, pid):
extra_tags="alert-success",
)
return HttpResponseRedirect(
- reverse("edit_finding", args=(finding_to_merge_into.id,))
+ reverse("edit_finding", args=(finding_to_merge_into.id,)),
)
else:
messages.add_message(
@@ -2664,14 +2664,14 @@ def merge_finding_product(request, pid):
)
product_tab = Product_Tab(
- finding.test.engagement.product, title="Merge Findings", tab="findings"
+ finding.test.engagement.product, title="Merge Findings", tab="findings",
)
custom_breadcrumb = {
"Open Findings": reverse(
- "product_open_findings", args=(finding.test.engagement.product.id,)
+ "product_open_findings", args=(finding.test.engagement.product.id,),
)
+ "?test__engagement__product="
- + str(finding.test.engagement.product.id)
+ + str(finding.test.engagement.product.id),
}
return render(
@@ -2709,11 +2709,11 @@ def finding_bulk_update_all(request, pid=None):
if pid is not None:
product = get_object_or_404(Product, id=pid)
user_has_permission_or_403(
- request.user, product, Permissions.Finding_Delete
+ request.user, product, Permissions.Finding_Delete,
)
finds = get_authorized_findings(
- Permissions.Finding_Delete, finds
+ Permissions.Finding_Delete, finds,
).distinct()
skipped_find_count = total_find_count - finds.count()
@@ -2724,7 +2724,7 @@ def finding_bulk_update_all(request, pid=None):
if skipped_find_count > 0:
add_error_message_to_response(
- f"Skipped deletion of {skipped_find_count} findings because you are not authorized."
+ f"Skipped deletion of {skipped_find_count} findings because you are not authorized.",
)
if deleted_find_count > 0:
@@ -2739,12 +2739,12 @@ def finding_bulk_update_all(request, pid=None):
if pid is not None:
product = get_object_or_404(Product, id=pid)
user_has_permission_or_403(
- request.user, product, Permissions.Finding_Edit
+ request.user, product, Permissions.Finding_Edit,
)
# make sure users are not editing stuff they are not authorized for
finds = get_authorized_findings(
- Permissions.Finding_Edit, finds
+ Permissions.Finding_Edit, finds,
).distinct()
skipped_find_count = total_find_count - finds.count()
@@ -2752,7 +2752,7 @@ def finding_bulk_update_all(request, pid=None):
if skipped_find_count > 0:
add_error_message_to_response(
- f"Skipped update of {skipped_find_count} findings because you are not authorized."
+ f"Skipped update of {skipped_find_count} findings because you are not authorized.",
)
finds = prefetch_for_findings(finds)
@@ -2763,7 +2763,7 @@ def finding_bulk_update_all(request, pid=None):
if form.cleaned_data["severity"]:
find.severity = form.cleaned_data["severity"]
find.numerical_severity = Finding.get_numerical_severity(
- form.cleaned_data["severity"]
+ form.cleaned_data["severity"],
)
find.last_reviewed = now
find.last_reviewed_by = request.user
@@ -2793,7 +2793,7 @@ def finding_bulk_update_all(request, pid=None):
logger.debug('FALSE_POSITIVE_HISTORY: Reactivating existing findings based on: %s', find)
existing_fp_findings = match_finding_to_existing_findings(
- find, product=find.test.engagement.product
+ find, product=find.test.engagement.product,
).filter(false_p=True)
for fp in existing_fp_findings:
@@ -2859,20 +2859,20 @@ def finding_bulk_update_all(request, pid=None):
finding_group_name = form.cleaned_data["finding_group_create_name"]
logger.debug("finding_group_create_name: %s", finding_group_name)
finding_group, added, skipped = finding_helper.create_finding_group(
- finds, finding_group_name
+ finds, finding_group_name,
)
if added:
add_success_message_to_response(
- f"Created finding group with {added} findings"
+ f"Created finding group with {added} findings",
)
return_url = reverse(
- "view_finding_group", args=(finding_group.id,)
+ "view_finding_group", args=(finding_group.id,),
)
if skipped:
add_success_message_to_response(
- f"Skipped {skipped} findings in group creation, findings already part of another group"
+ f"Skipped {skipped} findings in group creation, findings already part of another group",
)
# refresh findings from db
@@ -2883,21 +2883,21 @@ def finding_bulk_update_all(request, pid=None):
fgid = form.cleaned_data["add_to_finding_group_id"]
finding_group = Finding_Group.objects.get(id=fgid)
finding_group, added, skipped = finding_helper.add_to_finding_group(
- finding_group, finds
+ finding_group, finds,
)
if added:
add_success_message_to_response(
- f"Added {added} findings to finding group {finding_group.name}"
+ f"Added {added} findings to finding group {finding_group.name}",
)
return_url = reverse(
- "view_finding_group", args=(finding_group.id,)
+ "view_finding_group", args=(finding_group.id,),
)
if skipped:
add_success_message_to_response(
f"Skipped {skipped} findings when adding to finding group {finding_group.name}, "
- "findings already part of another group"
+ "findings already part of another group",
)
# refresh findings from db
@@ -2919,14 +2919,14 @@ def finding_bulk_update_all(request, pid=None):
[
finding_group.name
for finding_group in finding_groups
- ]
+ ],
),
- )
+ ),
)
if skipped:
add_success_message_to_response(
- f"Skipped {skipped} findings when removing from any finding group, findings not part of any group"
+ f"Skipped {skipped} findings when removing from any finding group, findings not part of any group",
)
# refresh findings from db
@@ -2950,13 +2950,13 @@ def finding_bulk_update_all(request, pid=None):
if grouped:
add_success_message_to_response(
"Grouped %d findings into %d (%d newly created) finding groups"
- % (grouped, len(finding_groups), groups_created)
+ % (grouped, len(finding_groups), groups_created),
)
if skipped:
add_success_message_to_response(
f"Skipped {skipped} findings when grouping by {finding_group_by_option} as these findings "
- "were already in an existing group"
+ "were already in an existing group",
)
# refresh findings from db
@@ -2982,7 +2982,7 @@ def finding_bulk_update_all(request, pid=None):
)
note.save()
history = NoteHistory(
- data=note.entry, time=note.date, current_editor=note.author
+ data=note.entry, time=note.date, current_editor=note.author,
)
history.save()
note.history.add(history)
@@ -3022,7 +3022,7 @@ def finding_bulk_update_all(request, pid=None):
jira_helper.log_jira_alert(error_message, group)
else:
logger.debug(
- "pushing to jira from finding.finding_bulk_update_all()"
+ "pushing to jira from finding.finding_bulk_update_all()",
)
jira_helper.push_to_jira(group)
success_count += 1
@@ -3030,12 +3030,12 @@ def finding_bulk_update_all(request, pid=None):
for error_message, error_count in error_counts.items():
add_error_message_to_response(
"%i finding groups could not be pushed to JIRA: %s"
- % (error_count, error_message)
+ % (error_count, error_message),
)
if success_count > 0:
add_success_message_to_response(
- "%i finding groups pushed to JIRA successfully" % success_count
+ "%i finding groups pushed to JIRA successfully" % success_count,
)
groups_pushed_to_jira = True
@@ -3080,7 +3080,7 @@ def finding_bulk_update_all(request, pid=None):
jira_helper.log_jira_alert(error_message, finding)
else:
logger.debug(
- "pushing to jira from finding.finding_bulk_update_all()"
+ "pushing to jira from finding.finding_bulk_update_all()",
)
jira_helper.push_to_jira(finding)
success_count += 1
@@ -3088,12 +3088,12 @@ def finding_bulk_update_all(request, pid=None):
for error_message, error_count in error_counts.items():
add_error_message_to_response(
"%i findings could not be pushed to JIRA: %s"
- % (error_count, error_message)
+ % (error_count, error_message),
)
if success_count > 0:
add_success_message_to_response(
- "%i findings pushed to JIRA successfully" % success_count
+ "%i findings pushed to JIRA successfully" % success_count,
)
if updated_find_count > 0:
@@ -3119,10 +3119,10 @@ def finding_bulk_update_all(request, pid=None):
def find_available_notetypes(notes):
single_note_types = Note_Type.objects.filter(
- is_single=True, is_active=True
+ is_single=True, is_active=True,
).values_list("id", flat=True)
multiple_note_types = Note_Type.objects.filter(
- is_single=False, is_active=True
+ is_single=False, is_active=True,
).values_list("id", flat=True)
available_note_types = []
for note_type_id in multiple_note_types:
@@ -3140,7 +3140,7 @@ def find_available_notetypes(notes):
def get_missing_mandatory_notetypes(finding):
notes = finding.notes.all()
mandatory_note_types = Note_Type.objects.filter(
- is_mandatory=True, is_active=True
+ is_mandatory=True, is_active=True,
).values_list("id", flat=True)
notes_to_be_added = []
for note_type_id in mandatory_note_types:
@@ -3171,7 +3171,7 @@ def mark_finding_duplicate(request, original_id, duplicate_id):
extra_tags="alert-danger",
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(duplicate.id,))
+ request, reverse("view_finding", args=(duplicate.id,)),
)
duplicate.duplicate = True
@@ -3196,7 +3196,7 @@ def mark_finding_duplicate(request, original_id, duplicate_id):
original.save(dedupe_option=False)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(duplicate.id,))
+ request, reverse("view_finding", args=(duplicate.id,)),
)
@@ -3223,7 +3223,7 @@ def reset_finding_duplicate_status_internal(user, duplicate_id):
@require_POST
def reset_finding_duplicate_status(request, duplicate_id):
checked_duplicate_id = reset_finding_duplicate_status_internal(
- request.user, duplicate_id
+ request.user, duplicate_id,
)
if checked_duplicate_id is None:
messages.add_message(
@@ -3233,11 +3233,11 @@ def reset_finding_duplicate_status(request, duplicate_id):
extra_tags="alert-danger",
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(duplicate_id,))
+ request, reverse("view_finding", args=(duplicate_id,)),
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(checked_duplicate_id,))
+ request, reverse("view_finding", args=(checked_duplicate_id,)),
)
@@ -3274,7 +3274,7 @@ def set_finding_as_original_internal(user, finding_id, new_original_id):
cluster_member.save(dedupe_option=False)
logger.debug(
- "setting new original for old root %i to %i", finding.id, new_original.id
+ "setting new original for old root %i to %i", finding.id, new_original.id,
)
finding.duplicate = True
finding.duplicate_finding = new_original
@@ -3302,7 +3302,7 @@ def set_finding_as_original_internal(user, finding_id, new_original_id):
@require_POST
def set_finding_as_original(request, finding_id, new_original_id):
success = set_finding_as_original_internal(
- request.user, finding_id, new_original_id
+ request.user, finding_id, new_original_id,
)
if not success:
messages.add_message(
@@ -3314,7 +3314,7 @@ def set_finding_as_original(request, finding_id, new_original_id):
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(finding_id,))
+ request, reverse("view_finding", args=(finding_id,)),
)
@@ -3323,7 +3323,7 @@ def set_finding_as_original(request, finding_id, new_original_id):
def unlink_jira(request, fid):
finding = get_object_or_404(Finding, id=fid)
logger.info(
- "trying to unlink a linked jira issue from %d:%s", finding.id, finding.title
+ "trying to unlink a linked jira issue from %d:%s", finding.id, finding.title,
)
if finding.has_jira_issue:
try:
@@ -3349,7 +3349,7 @@ def unlink_jira(request, fid):
return HttpResponse(status=500)
else:
messages.add_message(
- request, messages.ERROR, "Link to JIRA not found", extra_tags="alert-danger"
+ request, messages.ERROR, "Link to JIRA not found", extra_tags="alert-danger",
)
return HttpResponse(status=400)
@@ -3390,7 +3390,7 @@ def push_to_jira(request, fid):
logger.exception(e)
logger.error("Error pushing to JIRA: ", exc_info=True)
messages.add_message(
- request, messages.ERROR, "Error pushing to JIRA", extra_tags="alert-danger"
+ request, messages.ERROR, "Error pushing to JIRA", extra_tags="alert-danger",
)
return HttpResponse(status=500)
@@ -3405,7 +3405,7 @@ def duplicate_cluster(request, finding):
for duplicate_member in duplicate_cluster:
duplicate_member.related_actions = (
calculate_possible_related_actions_for_similar_finding(
- request, finding, duplicate_member
+ request, finding, duplicate_member,
)
)
@@ -3418,7 +3418,7 @@ def duplicate_cluster(request, finding):
# these actions are always calculated in the context of the finding the user is viewing
# because this determines which actions are possible
def calculate_possible_related_actions_for_similar_finding(
- request, finding, similar_finding
+ request, finding, similar_finding,
):
actions = []
if similar_finding.test.engagement != finding.test.engagement and (
@@ -3430,7 +3430,7 @@ def calculate_possible_related_actions_for_similar_finding(
"action": "None",
"reason": ("This finding is in a different engagement and deduplication_inside_engagment "
"is enabled here or in that finding"),
- }
+ },
)
elif finding.duplicate_finding == similar_finding:
actions.append(
@@ -3438,7 +3438,7 @@ def calculate_possible_related_actions_for_similar_finding(
"action": "None",
"reason": ("This finding is the root of the cluster, use an action on another row, "
"or the finding on top of the page to change the root of the cluser"),
- }
+ },
)
elif similar_finding.original_finding.all():
actions.append(
@@ -3446,7 +3446,7 @@ def calculate_possible_related_actions_for_similar_finding(
"action": "None",
"reason": ("This finding is similar, but is already an original in a different cluster. "
"Remove it from that cluster before you connect it to this cluster."),
- }
+ },
)
else:
if similar_finding.duplicate_finding:
@@ -3457,7 +3457,7 @@ def calculate_possible_related_actions_for_similar_finding(
"reason": ("This will remove the finding from the cluster, "
"effectively marking it no longer as duplicate. "
"Will not trigger deduplication logic after saving."),
- }
+ },
)
if (
@@ -3471,7 +3471,7 @@ def calculate_possible_related_actions_for_similar_finding(
"reason": ("Sets this finding as the Original for the whole cluster. "
"The existing Original will be downgraded to become a member of the cluster and, "
"together with the other members, will be marked as duplicate of the new Original."),
- }
+ },
)
else:
# duplicate inside different cluster
@@ -3480,7 +3480,7 @@ def calculate_possible_related_actions_for_similar_finding(
"action": "mark_finding_duplicate",
"reason": ("Will mark this finding as duplicate of the root finding in this cluster, "
"effectively adding it to the cluster and removing it from the other cluster."),
- }
+ },
)
else:
# similar is not a duplicate yet
@@ -3489,7 +3489,7 @@ def calculate_possible_related_actions_for_similar_finding(
{
"action": "mark_finding_duplicate",
"reason": "Will mark this finding as duplicate of the root finding in this cluster",
- }
+ },
)
actions.append(
{
@@ -3497,7 +3497,7 @@ def calculate_possible_related_actions_for_similar_finding(
"reason": ("Sets this finding as the Original for the whole cluster. "
"The existing Original will be downgraded to become a member of the cluster and, "
"together with the other members, will be marked as duplicate of the new Original."),
- }
+ },
)
else:
# similar_finding is not an original/root of a cluster as per earlier if clause
@@ -3505,14 +3505,14 @@ def calculate_possible_related_actions_for_similar_finding(
{
"action": "mark_finding_duplicate",
"reason": "Will mark this finding as duplicate of the finding on this page.",
- }
+ },
)
actions.append(
{
"action": "set_finding_as_original",
"reason": ("Sets this finding as the Original marking the finding "
"on this page as duplicate of this original."),
- }
+ },
)
return actions
diff --git a/dojo/finding_group/signals.py b/dojo/finding_group/signals.py
index 1dc0e339e9..e73927f13a 100644
--- a/dojo/finding_group/signals.py
+++ b/dojo/finding_group/signals.py
@@ -17,7 +17,7 @@ def finding_group_post_delete(sender, instance, using, origin, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='finding_group'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The finding group "%(name)s" was deleted by %(user)s') % {
'name': instance.name, 'user': le.actor}
diff --git a/dojo/forms.py b/dojo/forms.py
index 734d97586a..91c16eb3d5 100644
--- a/dojo/forms.py
+++ b/dojo/forms.py
@@ -169,8 +169,7 @@ def render(self, name, value, attrs=None, renderer=None):
if isinstance(value, str):
match = RE_DATE.match(value)
if match:
- year_val,
- month_val,
+ year_val, month_val = match[1], match[2]
output = []
@@ -540,13 +539,13 @@ class ImportScanForm(forms.Form):
help_text="If set to True, the tags will be applied to the findings",
label="Apply Tags to Findings",
required=False,
- initial=False
+ initial=False,
)
apply_tags_to_endpoints = forms.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
label="Apply Tags to Endpoints",
required=False,
- initial=False
+ initial=False,
)
if is_finding_groups_enabled():
@@ -652,13 +651,13 @@ class ReImportScanForm(forms.Form):
help_text="If set to True, the tags will be applied to the findings",
label="Apply Tags to Findings",
required=False,
- initial=False
+ initial=False,
)
apply_tags_to_endpoints = forms.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
label="Apply Tags to Endpoints",
required=False,
- initial=False
+ initial=False,
)
if is_finding_groups_enabled():
@@ -724,17 +723,17 @@ class ImportEndpointMetaForm(forms.Form):
label="Create nonexisting Endpoint",
initial=True,
required=False,
- help_text="Create endpoints that do not already exist",)
+ help_text="Create endpoints that do not already exist")
create_tags = forms.BooleanField(
label="Add Tags",
initial=True,
required=False,
- help_text="Add meta from file as tags in the format key:value",)
+ help_text="Add meta from file as tags in the format key:value")
create_dojo_meta = forms.BooleanField(
label="Add Meta",
initial=False,
required=False,
- help_text="Add data from file as Metadata. Metadata is used for displaying custom fields",)
+ help_text="Add data from file as Metadata. Metadata is used for displaying custom fields")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -885,7 +884,7 @@ class AddFindingsRiskAcceptanceForm(forms.ModelForm):
queryset=Finding.objects.none(),
required=True,
label="",
- widget=TableCheckboxWidget(attrs={'size': 25})
+ widget=TableCheckboxWidget(attrs={'size': 25}),
)
class Meta:
@@ -926,7 +925,7 @@ class Meta:
'configuration_management', 'config_issues', 'authentication', 'auth_issues',
'authorization_and_access_control', 'author_issues',
'data_input_sanitization_validation', 'data_issues',
- 'sensitive_data', 'sensitive_issues', 'other', 'other_issues', ]
+ 'sensitive_data', 'sensitive_issues', 'other', 'other_issues']
class EngForm(forms.ModelForm):
@@ -1607,7 +1606,7 @@ def clean(self):
path=path,
query=query,
fragment=fragment,
- product=self.product
+ product=self.product,
)
if endpoint.count() > 1 or (endpoint.count() == 1 and endpoint.first().pk != self.endpoint_instance.pk):
msg = 'It appears as though an endpoint with this data already exists for this product.'
@@ -1651,7 +1650,7 @@ def save(self):
path=e[4],
query=e[5],
fragment=e[6],
- product=self.product
+ product=self.product,
)
processed_endpoints.append(endpoint)
return processed_endpoints
@@ -3120,12 +3119,12 @@ class LoginBanner(forms.Form):
label="Enable login banner",
initial=False,
required=False,
- help_text='Tick this box to enable a text banner on the login page'
+ help_text='Tick this box to enable a text banner on the login page',
)
banner_message = forms.CharField(
required=False,
- label="Message to display on the login page"
+ label="Message to display on the login page",
)
def clean(self):
@@ -3196,7 +3195,7 @@ def __init__(self, *args, **kwargs):
initial_answer = TextAnswer.objects.filter(
answered_survey=self.answered_survey,
- question=self.question
+ question=self.question,
)
if initial_answer.exists():
@@ -3271,14 +3270,14 @@ def __init__(self, *args, **kwargs):
required=not self.question.optional,
choices=choices,
initial=initial_choices,
- widget=widget
+ widget=widget,
)
self.fields['answer'] = field
# Render choice buttons inline
self.helper.layout = Layout(
- inline_type('answer')
+ inline_type('answer'),
)
def clean_answer(self):
@@ -3318,7 +3317,7 @@ def save(self):
# create a ChoiceAnswer
choice_answer = ChoiceAnswer.objects.create(
answered_survey=self.answered_survey,
- question=self.question
+ question=self.question,
)
# re save out the choices
diff --git a/dojo/github_issue_link/views.py b/dojo/github_issue_link/views.py
index aa4e9269cb..f575cf2d4d 100644
--- a/dojo/github_issue_link/views.py
+++ b/dojo/github_issue_link/views.py
@@ -44,14 +44,14 @@ def new_github(request):
messages.SUCCESS,
'GitHub Configuration Successfully Created.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('github', ))
+ return HttpResponseRedirect(reverse('github'))
except Exception as info:
logger.error(info)
messages.add_message(request,
messages.ERROR,
'Unable to authenticate on GitHub.',
extra_tags='alert-danger')
- return HttpResponseRedirect(reverse('github', ))
+ return HttpResponseRedirect(reverse('github'))
else:
gform = GITHUBForm()
add_breadcrumb(title="New GitHub Configuration", top_level=False, request=request)
diff --git a/dojo/group/urls.py b/dojo/group/urls.py
index 5348f97c1d..ddf7f03bd9 100644
--- a/dojo/group/urls.py
+++ b/dojo/group/urls.py
@@ -13,5 +13,5 @@
re_path(r'^group/(?P\d+)/add_group_member$', views.add_group_member, name='add_group_member'),
re_path(r'group/member/(?P\d+)/edit_group_member$', views.edit_group_member, name='edit_group_member'),
re_path(r'group/member/(?P\d+)/delete_group_member$', views.delete_group_member, name='delete_group_member'),
- re_path(r'^group/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_group_permissions')
+ re_path(r'^group/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_group_permissions'),
]
diff --git a/dojo/group/views.py b/dojo/group/views.py
index 46d2dd3196..1aea50fe35 100644
--- a/dojo/group/views.py
+++ b/dojo/group/views.py
@@ -244,7 +244,7 @@ def get_initial_context(self, request: HttpRequest, group: Dojo_Group):
return {
"form": self.get_group_form(request, group),
"to_delete": group,
- "rels": collector.nested()
+ "rels": collector.nested(),
}
def process_forms(self, request: HttpRequest, group: Dojo_Group, context: dict):
@@ -418,7 +418,7 @@ def add_group_member(request, gid):
add_breadcrumb(title="Add Group Member", top_level=False, request=request)
return render(request, 'dojo/new_group_member.html', {
'group': group,
- 'form': groupform
+ 'form': groupform,
})
@@ -460,7 +460,7 @@ def edit_group_member(request, mid):
add_breadcrumb(title="Edit a Group Member", top_level=False, request=request)
return render(request, 'dojo/edit_group_member.html', {
'memberid': mid,
- 'form': memberform
+ 'form': memberform,
})
@@ -501,7 +501,7 @@ def delete_group_member(request, mid):
add_breadcrumb("Delete a group member", top_level=False, request=request)
return render(request, 'dojo/delete_group_member.html', {
'memberid': mid,
- 'form': memberform
+ 'form': memberform,
})
@@ -531,7 +531,7 @@ def add_product_group(request, gid):
add_breadcrumb(title="Add Product Group", top_level=False, request=request)
return render(request, 'dojo/new_product_group_group.html', {
'group': group,
- 'form': group_form
+ 'form': group_form,
})
diff --git a/dojo/home/views.py b/dojo/home/views.py
index 26039f4a28..b79a5bf843 100644
--- a/dojo/home/views.py
+++ b/dojo/home/views.py
@@ -47,7 +47,7 @@ def dashboard(request: HttpRequest) -> HttpResponse:
punchcard, ticks = get_punchcard_data(findings, today - relativedelta(weeks=26), 26)
if user_has_configuration_permission(request.user, 'dojo.view_engagement_survey'):
- unassigned_surveys = Answered_Survey.objects.filter(assignee_id__isnull=True, completed__gt=0, ) \
+ unassigned_surveys = Answered_Survey.objects.filter(assignee_id__isnull=True, completed__gt=0) \
.filter(Q(engagement__isnull=True) | Q(engagement__in=engagements))
else:
unassigned_surveys = None
diff --git a/dojo/importers/auto_create_context.py b/dojo/importers/auto_create_context.py
index 6122196d48..a0c24bffa8 100644
--- a/dojo/importers/auto_create_context.py
+++ b/dojo/importers/auto_create_context.py
@@ -296,7 +296,7 @@ def get_or_create_engagement(
engagement = self.get_target_engagement_if_exists(
engagement_id=engagement_id,
engagement_name=engagement_name,
- product=product
+ product=product,
)
# If we have an engagement, we cna just return it
if engagement:
diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py
index 449a9074b8..a2f4bb6794 100644
--- a/dojo/importers/base_importer.py
+++ b/dojo/importers/base_importer.py
@@ -299,7 +299,7 @@ def update_timestamps(self):
# target end date on the engagement
if self.test.engagement.engagement_type == 'CI/CD':
self.test.engagement.target_end = max_safe(
- [self.scan_date.date(), self.test.engagement.target_end]
+ [self.scan_date.date(), self.test.engagement.target_end],
)
# Set the target end date on the test in a similar fashion
max_test_start_date = max_safe([self.scan_date, self.test.target_end])
@@ -338,7 +338,7 @@ def update_import_history(
f"new: {len(new_findings)} "
f"closed: {len(closed_findings)} "
f"reactivated: {len(reactivated_findings)} "
- f"untouched: {len(untouched_findings)} "
+ f"untouched: {len(untouched_findings)} ",
)
# Create a dictionary to stuff into the test import object
import_settings = {}
@@ -597,7 +597,7 @@ def process_finding_groups(
def process_request_response_pairs(
self,
- finding: Finding
+ finding: Finding,
) -> None:
"""
Search the unsaved finding for the following attributes to determine
@@ -648,7 +648,7 @@ def process_endpoints(
def process_vulnerability_ids(
self,
- finding: Finding
+ finding: Finding,
) -> Finding:
"""
Parse the `unsaved_vulnerability_ids` field from findings after they are parsed
diff --git a/dojo/importers/default_importer.py b/dojo/importers/default_importer.py
index 4591fe3633..78bb761feb 100644
--- a/dojo/importers/default_importer.py
+++ b/dojo/importers/default_importer.py
@@ -216,7 +216,7 @@ def process_findings(
findings,
self.group_by,
create_finding_groups_for_all_findings=self.create_finding_groups_for_all_findings,
- **kwargs
+ **kwargs,
)
if self.push_to_jira:
if findings[0].finding_group is not None:
@@ -226,7 +226,7 @@ def process_findings(
sync = kwargs.get('sync', True)
if not sync:
- return [serialize('json', [finding, ]) for finding in new_findings]
+ return [serialize('json', [finding]) for finding in new_findings]
return new_findings
def close_old_findings(
@@ -259,12 +259,12 @@ def close_old_findings(
# Get the initial filtered list of old findings to be closed without
# considering the scope of the product or engagement
old_findings = Finding.objects.exclude(
- test=self.test
+ test=self.test,
).exclude(
- hash_code__in=new_hash_codes
+ hash_code__in=new_hash_codes,
).filter(
test__test_type=self.test.test_type,
- active=True
+ active=True,
)
# Accommodate for product scope or engagement scope
if self.close_old_findings_product_scope:
diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py
index 0c930d9df7..ad0260f714 100644
--- a/dojo/importers/default_reimporter.py
+++ b/dojo/importers/default_reimporter.py
@@ -210,12 +210,12 @@ def process_findings(
if finding.dynamic_finding:
logger.debug(
"Re-import found an existing dynamic finding for this new "
- "finding. Checking the status of endpoints"
+ "finding. Checking the status of endpoints",
)
self.endpoint_manager.update_endpoint_status(
existing_finding,
unsaved_finding,
- self.user
+ self.user,
)
else:
finding = self.process_finding_that_was_not_matched(unsaved_finding)
@@ -372,18 +372,18 @@ def match_new_finding_to_existing_finding(
if self.deduplication_algorithm == 'hash_code':
return Finding.objects.filter(
test=self.test,
- hash_code=unsaved_finding.hash_code
+ hash_code=unsaved_finding.hash_code,
).exclude(hash_code=None).order_by('id')
elif self.deduplication_algorithm == 'unique_id_from_tool':
return Finding.objects.filter(
test=self.test,
- unique_id_from_tool=unsaved_finding.unique_id_from_tool
+ unique_id_from_tool=unsaved_finding.unique_id_from_tool,
).exclude(unique_id_from_tool=None).order_by('id')
elif self.deduplication_algorithm == 'unique_id_from_tool_or_hash_code':
query = Finding.objects.filter(
Q(test=self.test),
(Q(hash_code__isnull=False) & Q(hash_code=unsaved_finding.hash_code))
- | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=unsaved_finding.unique_id_from_tool))
+ | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=unsaved_finding.unique_id_from_tool)),
).order_by('id')
deduplicationLogger.debug(query.query)
return query
@@ -440,7 +440,7 @@ def process_matched_special_status_finding(
f"Skipping existing finding (it is marked as false positive: {existing_finding.false_p} "
f"and/or out of scope: {existing_finding.out_of_scope} or is a risk accepted: "
f"{existing_finding.risk_accepted}) - {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
# If all statuses are the same between findings, we can safely move on to the next
# finding in the report. Return True here to force a continue in the loop
@@ -499,7 +499,7 @@ def process_matched_mitigated_finding(
logger.debug(
"Skipping reactivating by user's choice do_not_reactivate: "
f" - {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
# Search for an existing note that this finding has been skipped for reactivation
# before this current time
@@ -522,7 +522,7 @@ def process_matched_mitigated_finding(
else:
logger.debug(
f"Reactivating: - {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
existing_finding.mitigated = None
existing_finding.is_mitigated = False
@@ -543,7 +543,7 @@ def process_matched_mitigated_finding(
endpoint_statuses = existing_finding.status_finding.exclude(
Q(false_positive=True)
| Q(out_of_scope=True)
- | Q(risk_accepted=True)
+ | Q(risk_accepted=True),
)
self.endpoint_manager.chunk_endpoints_and_reactivate(endpoint_statuses)
existing_finding.notes.add(note)
@@ -566,7 +566,7 @@ def process_matched_active_finding(
# existing findings may be from before we had component_name/version fields
logger.debug(
f"Updating existing finding: {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
# First check that the existing finding is definitely not mitigated
if not (existing_finding.mitigated and existing_finding.is_mitigated):
@@ -577,7 +577,7 @@ def process_matched_active_finding(
# as they could be force closed by the scanner but a DD user forces it open ?
logger.debug(
f"Closing: {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
existing_finding.mitigated = unsaved_finding.mitigated
existing_finding.is_mitigated = True
@@ -589,7 +589,7 @@ def process_matched_active_finding(
logger.debug('Reimported mitigated item matches a finding that is currently open, closing.')
logger.debug(
f"Closing: {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
existing_finding.risk_accepted = unsaved_finding.risk_accepted
existing_finding.false_p = unsaved_finding.false_p
@@ -639,7 +639,7 @@ def process_finding_that_was_not_matched(
logger.debug(
"Reimport created new finding as no existing finding match: "
f"{finding.id}: {finding.title} "
- f"({finding.component_name} - {finding.component_version})"
+ f"({finding.component_name} - {finding.component_version})",
)
# Manage the finding grouping selection
self.process_finding_groups(
@@ -690,7 +690,7 @@ def process_groups_for_all_findings(
findings,
self.group_by,
create_finding_groups_for_all_findings=self.create_finding_groups_for_all_findings,
- **kwargs
+ **kwargs,
)
if self.push_to_jira:
if findings[0].finding_group is not None:
diff --git a/dojo/importers/endpoint_manager.py b/dojo/importers/endpoint_manager.py
index e7b21043e1..2e885168aa 100644
--- a/dojo/importers/endpoint_manager.py
+++ b/dojo/importers/endpoint_manager.py
@@ -147,7 +147,7 @@ def chunk_endpoints_and_disperse(
def clean_unsaved_endpoints(
self,
- endpoints: List[Endpoint]
+ endpoints: List[Endpoint],
) -> None:
"""
Clean endpoints that are supplied. For any endpoints that fail this validation
@@ -232,13 +232,13 @@ def update_endpoint_status(
endpoint_status_to_mitigate = list(
filter(
lambda existing_finding_endpoint_status: existing_finding_endpoint_status.endpoint not in new_finding_endpoints_list,
- existing_finding_endpoint_status_list)
+ existing_finding_endpoint_status_list),
)
# Re-activate any endpoints in the old finding that are in the new finding
endpoint_status_to_reactivate = list(
filter(
lambda existing_finding_endpoint_status: existing_finding_endpoint_status.endpoint in new_finding_endpoints_list,
- existing_finding_endpoint_status_list)
+ existing_finding_endpoint_status_list),
)
self.chunk_endpoints_and_reactivate(endpoint_status_to_reactivate)
self.chunk_endpoints_and_mitigate(endpoint_status_to_mitigate, user)
diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py
index 32329431d7..f2b869e55a 100644
--- a/dojo/jira_link/helper.py
+++ b/dojo/jira_link/helper.py
@@ -50,12 +50,12 @@
'Mitigated',
'False Positive',
'Out of Scope',
- 'Duplicate'
+ 'Duplicate',
]
OPEN_STATUS = [
'Active',
- 'Verified'
+ 'Verified',
]
@@ -1303,7 +1303,7 @@ def add_epic(engagement, **kwargs):
epic_name = engagement.name
issue_dict = {
'project': {
- 'key': jira_project.project_key
+ 'key': jira_project.project_key,
},
'summary': epic_name,
'description': epic_name,
@@ -1393,7 +1393,7 @@ def add_simple_jira_comment(jira_instance, jira_issue, comment):
jira = get_jira_connection(jira_instance)
jira.add_comment(
- jira_issue.jira_id, comment
+ jira_issue.jira_id, comment,
)
return True
except Exception as e:
@@ -1620,7 +1620,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign
finding.false_p = False
ra = Risk_Acceptance.objects.create(
accepted_by=assignee_name,
- owner=finding.reporter
+ owner=finding.reporter,
)
finding.test.engagement.risk_acceptance.add(ra)
ra_helper.add_findings_to_risk_acceptance(ra, [finding])
diff --git a/dojo/jira_link/views.py b/dojo/jira_link/views.py
index e0c43884c4..80065f78ad 100644
--- a/dojo/jira_link/views.py
+++ b/dojo/jira_link/views.py
@@ -385,7 +385,7 @@ def post(self, request):
description=f"JIRA \"{jform.cleaned_data.get('configuration_name')}\" was added by {request.user}",
url=request.build_absolute_uri(reverse('jira')))
- return HttpResponseRedirect(reverse('jira', ))
+ return HttpResponseRedirect(reverse('jira'))
return render(request, self.get_template(), {'jform': jform})
@@ -430,7 +430,7 @@ def post(self, request):
description=f"JIRA \"{jform.cleaned_data.get('configuration_name')}\" was added by {request.user}",
url=request.build_absolute_uri(reverse('jira')))
- return HttpResponseRedirect(reverse('jira', ))
+ return HttpResponseRedirect(reverse('jira'))
else:
logger.error('jform.errors: %s', jform.errors)
return render(request, self.get_template(), {'jform': jform})
@@ -485,7 +485,7 @@ def post(self, request, jid=None):
description=f"JIRA \"{jform.cleaned_data.get('configuration_name')}\" was edited by {request.user}",
url=request.build_absolute_uri(reverse('jira')))
- return HttpResponseRedirect(reverse('jira', ))
+ return HttpResponseRedirect(reverse('jira'))
return render(request, self.get_template(), {'jform': jform})
diff --git a/dojo/management/commands/dedupe.py b/dojo/management/commands/dedupe.py
index 1e77c82c9a..995d258f15 100644
--- a/dojo/management/commands/dedupe.py
+++ b/dojo/management/commands/dedupe.py
@@ -38,7 +38,7 @@ def add_arguments(self, parser):
'--parser',
dest='parser',
action='append',
- help="""List of parsers for which hash_code needs recomputing (defaults to all parsers)"""
+ help="""List of parsers for which hash_code needs recomputing (defaults to all parsers)""",
)
parser.add_argument('--hash_code_only', action='store_true', help='Only compute hash codes')
diff --git a/dojo/management/commands/initialize_permissions.py b/dojo/management/commands/initialize_permissions.py
index a5a204d686..9e14ecdb89 100644
--- a/dojo/management/commands/initialize_permissions.py
+++ b/dojo/management/commands/initialize_permissions.py
@@ -22,7 +22,7 @@ def handle(self, *args, **options):
Permission.objects.create(
name='Can change Google Sheet',
content_type=content_type_system_settings,
- codename='change_google_sheet'
+ codename='change_google_sheet',
)
logger.info('Non-standard permissions have been created')
diff --git a/dojo/management/commands/jira_status_reconciliation.py b/dojo/management/commands/jira_status_reconciliation.py
index 918b91a28f..db1337fda6 100644
--- a/dojo/management/commands/jira_status_reconciliation.py
+++ b/dojo/management/commands/jira_status_reconciliation.py
@@ -75,7 +75,7 @@ def jira_status_reconciliation(*args, **kwargs):
# convert from str to datetime
issue_from_jira.fields.updated = parse_datetime(issue_from_jira.fields.updated)
- find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated,
+ find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated
flag1, flag2, flag3 = None, None, None
diff --git a/dojo/management/commands/migrate_cve.py b/dojo/management/commands/migrate_cve.py
index 739f78f7d0..74a07325f2 100644
--- a/dojo/management/commands/migrate_cve.py
+++ b/dojo/management/commands/migrate_cve.py
@@ -15,13 +15,13 @@
def create_vulnerability_id(finding):
Vulnerability_Id.objects.get_or_create(
- finding=finding, vulnerability_id=finding.cve
+ finding=finding, vulnerability_id=finding.cve,
)
def create_vulnerability_id_template(finding_template):
Vulnerability_Id_Template.objects.get_or_create(
- finding_template=finding_template, vulnerability_id=finding_template.cve
+ finding_template=finding_template, vulnerability_id=finding_template.cve,
)
diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py
index 1a9d3f07c4..6de04ee72b 100644
--- a/dojo/metrics/utils.py
+++ b/dojo/metrics/utils.py
@@ -36,7 +36,7 @@
def finding_queries(
prod_type: QuerySet[Product_Type],
- request: HttpRequest
+ request: HttpRequest,
) -> dict[str, Any]:
# Get the initial list of findings the user is authorized to see
findings_query = get_authorized_findings(
@@ -94,7 +94,7 @@ def finding_queries(
active_findings,
accepted_findings,
start_date,
- MetricsType.FINDING
+ MetricsType.FINDING,
)
monthly_counts = query_counts_for_period(MetricsPeriod.MONTH, months_between)
@@ -110,9 +110,9 @@ def finding_queries(
prod_type__in=prod_type)
top_ten = severity_count(
- top_ten, 'annotate', 'engagement__test__finding__severity'
+ top_ten, 'annotate', 'engagement__test__finding__severity',
).order_by(
- '-critical', '-high', '-medium', '-low'
+ '-critical', '-high', '-medium', '-low',
)[:10]
return {
@@ -132,17 +132,17 @@ def finding_queries(
def endpoint_queries(
prod_type: QuerySet[Product_Type],
- request: HttpRequest
+ request: HttpRequest,
) -> dict[str, Any]:
endpoints_query = Endpoint_Status.objects.filter(
mitigated=False,
- finding__severity__in=('Critical', 'High', 'Medium', 'Low', 'Info')
+ finding__severity__in=('Critical', 'High', 'Medium', 'Low', 'Info'),
).prefetch_related(
'finding__test__engagement__product',
'finding__test__engagement__product__prod_type',
'finding__test__engagement__risk_acceptance',
'finding__risk_acceptance_set',
- 'finding__reporter'
+ 'finding__reporter',
)
endpoints_query = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_query, request.user)
@@ -166,30 +166,30 @@ def endpoint_queries(
if len(prod_type) > 0:
endpoints_closed = Endpoint_Status.objects.filter(
mitigated_time__range=[start_date, end_date],
- finding__test__engagement__product__prod_type__in=prod_type
+ finding__test__engagement__product__prod_type__in=prod_type,
).prefetch_related(
- 'finding__test__engagement__product'
+ 'finding__test__engagement__product',
)
# capture the accepted findings in period
accepted_endpoints = Endpoint_Status.objects.filter(
date__range=[start_date, end_date],
risk_accepted=True,
- finding__test__engagement__product__prod_type__in=prod_type
+ finding__test__engagement__product__prod_type__in=prod_type,
).prefetch_related(
- 'finding__test__engagement__product'
+ 'finding__test__engagement__product',
)
else:
endpoints_closed = Endpoint_Status.objects.filter(
- mitigated_time__range=[start_date, end_date]
+ mitigated_time__range=[start_date, end_date],
).prefetch_related(
- 'finding__test__engagement__product'
+ 'finding__test__engagement__product',
)
# capture the accepted findings in period
accepted_endpoints = Endpoint_Status.objects.filter(
date__range=[start_date, end_date],
- risk_accepted=True
+ risk_accepted=True,
).prefetch_related(
- 'finding__test__engagement__product'
+ 'finding__test__engagement__product',
)
endpoints_closed = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_closed, request.user)
@@ -203,7 +203,7 @@ def endpoint_queries(
endpoints_qs.filter(finding__active=True),
accepted_endpoints,
start_date,
- MetricsType.ENDPOINT
+ MetricsType.ENDPOINT,
)
monthly_counts = query_counts_for_period(MetricsPeriod.MONTH, months_between)
@@ -218,9 +218,9 @@ def endpoint_queries(
prod_type__in=prod_type)
top_ten = severity_count(
- top_ten, 'annotate', 'engagement__test__finding__severity'
+ top_ten, 'annotate', 'engagement__test__finding__severity',
).order_by(
- '-critical', '-high', '-medium', '-low'
+ '-critical', '-high', '-medium', '-low',
)[:10]
return {
@@ -281,7 +281,7 @@ def query_counts(
active_qs: MetricsQuerySet,
accepted_qs: MetricsQuerySet,
start_date: date,
- metrics_type: MetricsType
+ metrics_type: MetricsType,
) -> Callable[[MetricsPeriod, int], dict[str, list[dict]]]:
"""
Given three QuerySets, a start date, and a MetricsType, returns a method that can be used to generate statistics for
@@ -302,13 +302,13 @@ def _aggregate_data(qs: MetricsQuerySet, include_closed: bool = False) -> list[d
return {
'opened_per_period': _aggregate_data(open_qs, True),
'active_per_period': _aggregate_data(active_qs),
- 'accepted_per_period': _aggregate_data(accepted_qs)
+ 'accepted_per_period': _aggregate_data(accepted_qs),
}
return _aggregates_for_period
def get_date_range(
- qs: QuerySet
+ qs: QuerySet,
) -> tuple[datetime, datetime]:
"""
Given a queryset of objects, returns a tuple of (earliest date, latest date) from among those objects, based on the
@@ -334,7 +334,7 @@ def get_date_range(
def severity_count(
queryset: MetricsQuerySet,
method: str,
- expression: str
+ expression: str,
) -> Union[MetricsQuerySet, dict[str, int]]:
"""
Aggregates counts by severity for the given queryset.
@@ -351,12 +351,12 @@ def severity_count(
high=Count('id', filter=Q(**{expression: 'High'})),
medium=Count('id', filter=Q(**{expression: 'Medium'})),
low=Count('id', filter=Q(**{expression: 'Low'})),
- info=Count('id', filter=Q(**{expression: 'Info'}))
+ info=Count('id', filter=Q(**{expression: 'Info'})),
)
def identify_view(
- request: HttpRequest
+ request: HttpRequest,
) -> str:
"""
Identifies the requested metrics view.
@@ -382,7 +382,7 @@ def identify_view(
def js_epoch(
- d: Union[date, datetime]
+ d: Union[date, datetime],
) -> int:
"""
Converts a date/datetime object to a JavaScript epoch time (for use in FE charts)
@@ -400,7 +400,7 @@ def get_charting_data(
start_date: date,
period: MetricsPeriod,
period_count: int,
- include_closed: bool
+ include_closed: bool,
) -> list[dict]:
"""
Given a queryset of severities data for charting, adds epoch timestamp information and fills in missing data points
@@ -479,20 +479,20 @@ def aggregate_counts_by_period(
:return: A queryset with aggregate severity counts grouped by period
"""
- desired_values = ('grouped_date', 'critical', 'high', 'medium', 'low', 'info', 'total',)
+ desired_values = ('grouped_date', 'critical', 'high', 'medium', 'low', 'info', 'total')
severities_by_period = severity_count(
# Group by desired period
qs.annotate(grouped_date=period.db_method('date')).values('grouped_date'),
'annotate',
- metrics_type.severity_lookup
+ metrics_type.severity_lookup,
)
if include_closed:
severities_by_period = severities_by_period.annotate(
# Include 'closed' counts
closed=Sum(Case(
When(Q(**{metrics_type.closed_lookup: True}), then=Value(1)),
- output_field=IntegerField(), default=0)
+ output_field=IntegerField(), default=0),
),
)
desired_values += ('closed',)
@@ -501,7 +501,7 @@ def aggregate_counts_by_period(
def findings_by_product(
- findings: QuerySet[Finding]
+ findings: QuerySet[Finding],
) -> QuerySet[Finding]:
"""
Groups the given Findings queryset around related product (name/ID)
@@ -514,7 +514,7 @@ def findings_by_product(
def get_in_period_details(
- findings: QuerySet[Finding]
+ findings: QuerySet[Finding],
) -> tuple[QuerySet[Finding], QuerySet[Finding], dict[str, int]]:
"""
Gathers details for the given queryset, corresponding to metrics information for 'in period' Findings
@@ -525,7 +525,7 @@ def get_in_period_details(
"""
in_period_counts = severity_count(findings, 'aggregate', 'severity')
in_period_details = severity_count(
- findings_by_product(findings), 'annotate', 'severity'
+ findings_by_product(findings), 'annotate', 'severity',
).order_by('product_name')
# Approach to age determination is db-engine dependent
@@ -536,7 +536,7 @@ def get_in_period_details(
# so datediff() it is.
finding_table = Finding.objects.model._meta.db_table
age_detail = findings.annotate(
- age=RawSQL(f'DATEDIFF(COALESCE({finding_table}.mitigated, CURRENT_TIMESTAMP), {finding_table}.date)', [])
+ age=RawSQL(f'DATEDIFF(COALESCE({finding_table}.mitigated, CURRENT_TIMESTAMP), {finding_table}.date)', []),
)
else:
raise ValueError
@@ -552,7 +552,7 @@ def get_in_period_details(
def get_accepted_in_period_details(
- findings: QuerySet[Finding]
+ findings: QuerySet[Finding],
) -> QuerySet[Finding]:
"""
Gathers details for the given queryset, corresponding to metrics information for 'accepted' Findings
@@ -561,12 +561,12 @@ def get_accepted_in_period_details(
:return: A queryset of severity aggregates for Findings grouped by product (name/ID)
"""
return severity_count(
- findings_by_product(findings), 'annotate', 'severity'
+ findings_by_product(findings), 'annotate', 'severity',
).order_by('product_name')
def get_closed_in_period_details(
- findings: QuerySet[Finding]
+ findings: QuerySet[Finding],
) -> tuple[QuerySet[Finding], QuerySet[Finding]]:
"""
Gathers details for the given queryset, corresponding to metrics information for 'closed' Findings
@@ -578,13 +578,13 @@ def get_closed_in_period_details(
return (
severity_count(findings, 'aggregate', 'severity'),
severity_count(
- findings_by_product(findings), 'annotate', 'severity'
- ).order_by('product_name')
+ findings_by_product(findings), 'annotate', 'severity',
+ ).order_by('product_name'),
)
def findings_queryset(
- qs: MetricsQuerySet
+ qs: MetricsQuerySet,
) -> QuerySet[Finding]:
"""
Given a MetricsQuerySet, returns a QuerySet representing all its findings.
diff --git a/dojo/metrics/views.py b/dojo/metrics/views.py
index 718b21cd01..a15d9979fc 100644
--- a/dojo/metrics/views.py
+++ b/dojo/metrics/views.py
@@ -68,7 +68,7 @@ def critical_product_metrics(request, mtype):
return render(request, template, {
'name': page_name,
'critical_prods': critical_products,
- 'url_prefix': get_system_setting('url_prefix')
+ 'url_prefix': get_system_setting('url_prefix'),
})
@@ -108,11 +108,11 @@ def metrics(request, mtype):
in_period_counts, in_period_details, age_detail = get_in_period_details(all_findings)
accepted_in_period_details = get_accepted_in_period_details(
- findings_queryset(filters['accepted'])
+ findings_queryset(filters['accepted']),
)
closed_in_period_counts, closed_in_period_details = get_closed_in_period_details(
- findings_queryset(filters['closed'])
+ findings_queryset(filters['closed']),
)
punchcard = []
@@ -387,7 +387,7 @@ def product_type_counts(request):
'overall_in_pt': aip,
'all_current_in_pt': all_current_in_pt,
'top_ten': top_ten,
- 'pt': pt}
+ 'pt': pt},
)
@@ -545,7 +545,7 @@ def product_tag_counts(request):
'overall_in_pt': aip,
'all_current_in_pt': all_current_in_pt,
'top_ten': top_ten,
- 'pt': pt}
+ 'pt': pt},
)
@@ -722,22 +722,22 @@ def view_engineer(request, eid):
z_count += findings.filter(
test=test,
mitigated__isnull=True,
- severity='Critical'
+ severity='Critical',
).count()
o_count += findings.filter(
test=test,
mitigated__isnull=True,
- severity='High'
+ severity='High',
).count()
t_count += findings.filter(
test=test,
mitigated__isnull=True,
- severity='Medium'
+ severity='Medium',
).count()
h_count += findings.filter(
test=test,
mitigated__isnull=True,
- severity='Low'
+ severity='Low',
).count()
prod = Product.objects.get(id=product)
all_findings_link = "{}".format(
diff --git a/dojo/models.py b/dojo/models.py
index 364f714b4a..e29c0641db 100644
--- a/dojo/models.py
+++ b/dojo/models.py
@@ -360,16 +360,16 @@ class System_Settings(models.Model):
"has been previously marked as a false positive on the same product. "
"ATTENTION: Although the deduplication algorithm is used to determine "
"if a finding should be marked as a false positive, this feature will "
- "not work if deduplication is enabled since it doesn't make sense to use both."
- )
+ "not work if deduplication is enabled since it doesn't make sense to use both.",
+ ),
)
retroactive_false_positive_history = models.BooleanField(
default=False, help_text=_(
"(EXPERIMENTAL) FP History will also retroactively mark/unmark all "
"existing equal findings in the same product as a false positives. "
- "Only works if the False Positive History feature is also enabled."
- )
+ "Only works if the False Positive History feature is also enabled.",
+ ),
)
url_prefix = models.CharField(max_length=300, default='', blank=True, help_text=_("URL prefix if DefectDojo is installed in it's own virtual subdirectory."))
@@ -470,7 +470,7 @@ class System_Settings(models.Model):
default=False,
blank=False,
verbose_name=_('Allow Anonymous Survey Responses'),
- help_text=_("Enable anyone with a link to the survey to answer a survey")
+ help_text=_("Enable anyone with a link to the survey to answer a survey"),
)
credentials = models.TextField(max_length=3000, blank=True)
disclaimer = models.TextField(max_length=3000, default='', blank=True,
@@ -580,7 +580,7 @@ class System_Settings(models.Model):
verbose_name=_("Filter String Matching Optimization"),
help_text=_(
"When turned on, all filter operations in the UI will require string matches rather than ID. "
- "This is a performance enhancement to avoid fetching objects unnecessarily."
+ "This is a performance enhancement to avoid fetching objects unnecessarily.",
))
from dojo.middleware import System_Settings_Manager
@@ -1590,7 +1590,7 @@ class Meta:
models.Index(fields=['endpoint', 'mitigated']),
]
constraints = [
- models.UniqueConstraint(fields=['finding', 'endpoint'], name='endpoint-finding relation')
+ models.UniqueConstraint(fields=['finding', 'endpoint'], name='endpoint-finding relation'),
]
def __str__(self):
@@ -1672,7 +1672,7 @@ def __str__(self):
)
for qe in self.query.split("&")
) if self.query else (), # inspired by https://github.com/python-hyper/hyperlink/blob/b8c9152cd826bbe8e6cc125648f3738235019705/src/hyperlink/_url.py#L1427
- fragment=self.fragment or ''
+ fragment=self.fragment or '',
)
# Return a normalized version of the URL to avoid differences where there shouldn't be any difference.
# Example: https://google.com and https://google.com:443
@@ -1828,7 +1828,7 @@ def vulnerable(self):
mitigated=False,
false_positive=False,
out_of_scope=False,
- risk_accepted=False
+ risk_accepted=False,
).count() > 0
@property
@@ -1844,7 +1844,7 @@ def active_findings(self):
duplicate=False,
status_finding__false_positive=False,
status_finding__out_of_scope=False,
- status_finding__risk_accepted=False
+ status_finding__risk_accepted=False,
).order_by('numerical_severity')
return findings
@@ -1858,7 +1858,7 @@ def active_verified_findings(self):
duplicate=False,
status_finding__false_positive=False,
status_finding__out_of_scope=False,
- status_finding__risk_accepted=False
+ status_finding__risk_accepted=False,
).order_by('numerical_severity')
return findings
@@ -1913,7 +1913,7 @@ def host_active_findings(self):
status_finding__false_positive=False,
status_finding__out_of_scope=False,
status_finding__risk_accepted=False,
- endpoints__in=self.host_endpoints()
+ endpoints__in=self.host_endpoints(),
).order_by('numerical_severity')
return findings
@@ -1928,7 +1928,7 @@ def host_active_verified_findings(self):
status_finding__false_positive=False,
status_finding__out_of_scope=False,
status_finding__risk_accepted=False,
- endpoints__in=self.host_endpoints()
+ endpoints__in=self.host_endpoints(),
).order_by('numerical_severity')
return findings
@@ -2030,7 +2030,7 @@ class Test(models.Model):
target_start = models.DateTimeField()
target_end = models.DateTimeField()
estimated_time = models.TimeField(null=True, blank=True, editable=False)
- actual_time = models.TimeField(null=True, blank=True, editable=False, )
+ actual_time = models.TimeField(null=True, blank=True, editable=False)
percent_complete = models.IntegerField(null=True, blank=True,
editable=True)
notes = models.ManyToManyField(Notes, blank=True,
@@ -2852,7 +2852,7 @@ def get_endpoints(self):
# sort endpoints strings
endpoint_str = ''.join(
sorted(
- endpoint_str_list
+ endpoint_str_list,
))
return endpoint_str
@@ -3674,7 +3674,7 @@ def get_breadcrumbs(self):
bc = self.engagement_set.first().get_breadcrumbs()
bc += [{'title': str(self),
'url': reverse('view_risk_acceptance', args=(
- self.engagement_set.first().product.id, self.id,))}]
+ self.engagement_set.first().product.id, self.id))}]
return bc
@property
@@ -3736,7 +3736,7 @@ def save(self, *args, **kwargs):
('info', 'Info'),
('success', 'Success'),
('warning', 'Warning'),
- ('danger', 'Danger')
+ ('danger', 'Danger'),
)
@@ -3820,7 +3820,7 @@ class JIRA_Instance(models.Model):
('Epic', 'Epic'),
('Spike', 'Spike'),
('Bug', 'Bug'),
- ('Security', 'Security')
+ ('Security', 'Security'),
)
default_issue_type = models.CharField(max_length=255,
choices=default_issue_type_choices,
@@ -4044,7 +4044,7 @@ class Notifications(models.Model):
class Meta:
constraints = [
- models.UniqueConstraint(fields=['user', 'product'], name="notifications_user_product")
+ models.UniqueConstraint(fields=['user', 'product'], name="notifications_user_product"),
]
indexes = [
models.Index(fields=['user', 'product']),
@@ -4467,7 +4467,7 @@ class Engagement_Survey(models.Model):
class Meta:
verbose_name = _("Engagement Survey")
verbose_name_plural = "Engagement Surveys"
- ordering = ('-active', 'name',)
+ ordering = ('-active', 'name')
def __str__(self):
return self.name
diff --git a/dojo/notes/urls.py b/dojo/notes/urls.py
index 0f5ce2b0b1..ee8861ce2b 100644
--- a/dojo/notes/urls.py
+++ b/dojo/notes/urls.py
@@ -5,5 +5,5 @@
urlpatterns = [
re_path(r'^notes/(?P\d+)/delete/(?P[\w-]+)/(?P\d+)$', views.delete_note, name='delete_note'),
re_path(r'^notes/(?P\d+)/edit/(?P[\w-]+)/(?P\d+)$', views.edit_note, name='edit_note'),
- re_path(r'^notes/(?P\d+)/history/(?P[\w-]+)/(?P\d+)$', views.note_history, name='note_history')
+ re_path(r'^notes/(?P\d+)/history/(?P[\w-]+)/(?P\d+)$', views.note_history, name='note_history'),
]
diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py
index 0afb0d6b36..b09bf1bea6 100644
--- a/dojo/notifications/helper.py
+++ b/dojo/notifications/helper.py
@@ -92,7 +92,7 @@ def create_notification(event=None, **kwargs):
users = Dojo_User.objects.filter(is_active=True).prefetch_related(Prefetch(
"notifications_set",
queryset=Notifications.objects.filter(Q(product_id=product) | Q(product__isnull=True)),
- to_attr="applicable_notifications"
+ to_attr="applicable_notifications",
)).annotate(applicable_notifications_count=Count('notifications__id', filter=Q(notifications__product_id=product) | Q(notifications__product__isnull=True)))\
.filter(Q(applicable_notifications_count__gt=0) | Q(is_superuser=True))
@@ -201,7 +201,7 @@ def _post_slack_message(channel):
'token': get_system_setting('slack_token'),
'channel': channel,
'username': get_system_setting('slack_username'),
- 'text': create_notification_message(event, user, 'slack', *args, **kwargs)
+ 'text': create_notification_message(event, user, 'slack', *args, **kwargs),
})
if 'error' in res.text:
diff --git a/dojo/notifications/urls.py b/dojo/notifications/urls.py
index 8ac8cf2171..b7171e3779 100644
--- a/dojo/notifications/urls.py
+++ b/dojo/notifications/urls.py
@@ -6,5 +6,5 @@
re_path(r'^notifications$', views.PersonalNotificationsView.as_view(), name='notifications'),
re_path(r'^notifications/system$', views.SystemNotificationsView.as_view(), name='system_notifications'),
re_path(r'^notifications/personal$', views.PersonalNotificationsView.as_view(), name='personal_notifications'),
- re_path(r'^notifications/template$', views.TemplateNotificationsView.as_view(), name='template_notifications')
+ re_path(r'^notifications/template$', views.TemplateNotificationsView.as_view(), name='template_notifications'),
]
diff --git a/dojo/notifications/views.py b/dojo/notifications/views.py
index f20e45224f..2c102f59ef 100644
--- a/dojo/notifications/views.py
+++ b/dojo/notifications/views.py
@@ -45,7 +45,7 @@ def get_initial_context(self, request: HttpRequest, notifications: Notifications
'form': self.get_form(request, notifications),
'scope': scope,
'enabled_notifications': self.get_enabled_notifications(),
- 'admin': request.user.is_superuser
+ 'admin': request.user.is_superuser,
}
def set_breadcrumbs(self, request: HttpRequest):
diff --git a/dojo/object/views.py b/dojo/object/views.py
index 86d45e067f..cdaa60b05a 100644
--- a/dojo/object/views.py
+++ b/dojo/object/views.py
@@ -51,7 +51,7 @@ def view_objects(request, pid):
{
'object_queryset': object_queryset,
'product_tab': product_tab,
- 'product': product
+ 'product': product,
})
@@ -81,7 +81,7 @@ def edit_object(request, pid, ttid):
'dojo/edit_object.html',
{
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
@@ -109,5 +109,5 @@ def delete_object(request, pid, ttid):
'dojo/delete_object.html',
{
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
diff --git a/dojo/okta.py b/dojo/okta.py
index 68934e1d5e..cad1bc081c 100644
--- a/dojo/okta.py
+++ b/dojo/okta.py
@@ -37,12 +37,12 @@ class OktaOAuth2(OktaMixin, BaseOAuth2):
SCOPE_SEPARATOR = ' '
DEFAULT_SCOPE = [
- 'openid', 'profile'
+ 'openid', 'profile',
]
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('expires_in', 'expires'),
- ('token_type', 'token_type', True)
+ ('token_type', 'token_type', True),
]
def get_user_details(self, response):
@@ -58,7 +58,7 @@ def user_data(self, access_token, *args, **kwargs):
self._url('v1/userinfo'),
headers={
'Authorization': f'Bearer {access_token}',
- }
+ },
)
@@ -94,7 +94,7 @@ def validate_and_return_id_token(self, id_token, access_token):
k,
audience=client_id,
issuer=self.id_token_issuer(),
- access_token=access_token
+ access_token=access_token,
)
self.validate_claims(claims)
diff --git a/dojo/product/queries.py b/dojo/product/queries.py
index 96f1b626cb..90307238e3 100644
--- a/dojo/product/queries.py
+++ b/dojo/product/queries.py
@@ -244,7 +244,7 @@ def get_authorized_dojo_meta(permission):
finding__test__engagement__product__prod_type__member=Exists(finding_authorized_product_type_roles),
finding__test__engagement__product__member=Exists(finding_authorized_product_roles),
finding__test__engagement__product__prod_type__authorized_group=Exists(finding_authorized_product_type_groups),
- finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups)
+ finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups),
).order_by('name')
dojo_meta = dojo_meta.filter(
Q(product__prod_type__member=True)
diff --git a/dojo/product/signals.py b/dojo/product/signals.py
index 4ae3053b5f..02f93cd582 100644
--- a/dojo/product/signals.py
+++ b/dojo/product/signals.py
@@ -25,7 +25,7 @@ def product_post_delete(sender, instance, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='product'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The product "%(name)s" was deleted by %(user)s') % {
'name': instance.name, 'user': le.actor}
diff --git a/dojo/product/views.py b/dojo/product/views.py
index c3afce1524..95a133bc13 100644
--- a/dojo/product/views.py
+++ b/dojo/product/views.py
@@ -134,7 +134,7 @@ def product(request):
# see https://code.djangoproject.com/ticket/23771 and https://code.djangoproject.com/ticket/25375
name_words = prods.values_list('name', flat=True)
prods = prods.annotate(
- findings_count=Count('engagement__test__finding', filter=Q(engagement__test__finding__active=True))
+ findings_count=Count('engagement__test__finding', filter=Q(engagement__test__finding__active=True)),
)
filter_string_matching = get_system_setting("filter_string_matching", False)
filter_class = ProductFilterWithoutObjectLookups if filter_string_matching else ProductFilter
@@ -241,7 +241,7 @@ def view_product(request, pid):
'waiting': {'count': total_wait, 'percent': waiting_percent},
'fail': {'count': total_fail, 'percent': fail_percent},
'pass': total_pass + total_fail,
- 'total': total
+ 'total': total,
})
system_settings = System_Settings.objects.get()
@@ -336,7 +336,7 @@ def view_product_components(request, pid):
'filter': comp_filter,
'product_tab': product_tab,
'result': result,
- 'component_words': sorted(set(component_words))
+ 'component_words': sorted(set(component_words)),
})
@@ -410,18 +410,18 @@ def finding_querys(request, prod):
filters['open_vulns'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter(
cwe__isnull=False,
).order_by('cwe').values(
- 'cwe'
+ 'cwe',
).annotate(
- count=Count('cwe')
+ count=Count('cwe'),
)
filters['all_vulns'] = findings_qs.filter(
duplicate=False,
cwe__isnull=False,
).order_by('cwe').values(
- 'cwe'
+ 'cwe',
).annotate(
- count=Count('cwe')
+ count=Count('cwe'),
)
filters['start_date'] = start_date
@@ -496,21 +496,21 @@ def endpoint_querys(request, prod):
mitigated=True,
finding__cwe__isnull=False,
).order_by('finding__cwe').values(
- 'finding__cwe'
+ 'finding__cwe',
).annotate(
- count=Count('finding__cwe')
+ count=Count('finding__cwe'),
).annotate(
- cwe=F('finding__cwe')
+ cwe=F('finding__cwe'),
)
filters['all_vulns'] = endpoints_qs.filter(
finding__cwe__isnull=False,
).order_by('finding__cwe').values(
- 'finding__cwe'
+ 'finding__cwe',
).annotate(
- count=Count('finding__cwe')
+ count=Count('finding__cwe'),
).annotate(
- cwe=F('finding__cwe')
+ cwe=F('finding__cwe'),
)
filters['start_date'] = start_date
@@ -743,7 +743,7 @@ def async_burndown_metrics(request, pid):
'low': open_findings_burndown.get('Low', []),
'info': open_findings_burndown.get('Info', []),
'max': open_findings_burndown.get('y_max', 0),
- 'min': open_findings_burndown.get('y_min', 0)
+ 'min': open_findings_burndown.get('y_min', 0),
})
@@ -800,15 +800,15 @@ def view_engagements(request, pid):
def prefetch_for_view_engagements(engagements, recent_test_day_count):
engagements = engagements.select_related(
- 'lead'
+ 'lead',
).prefetch_related(
Prefetch('test_set', queryset=Test.objects.filter(
id__in=Subquery(
Test.objects.filter(
engagement_id=OuterRef('engagement_id'),
- updated__gte=timezone.now() - timedelta(days=recent_test_day_count)
- ).values_list('id', flat=True)
- ))
+ updated__gte=timezone.now() - timedelta(days=recent_test_day_count),
+ ).values_list('id', flat=True),
+ )),
),
'test_set__test_type',
).annotate(
@@ -1002,7 +1002,7 @@ def edit_product(request, pid):
'product_tab': product_tab,
'jform': jform,
'gform': gform,
- 'product': product
+ 'product': product,
})
@@ -1358,7 +1358,7 @@ def get_github_form(self, request: HttpRequest, test: Test):
# Set the initial form args
kwargs = {
"enabled": jira_helper.is_push_all_issues(test),
- "prefix": "githubform"
+ "prefix": "githubform",
}
return GITHUBFindingForm(*args, **kwargs)
@@ -1373,11 +1373,11 @@ def validate_status_change(self, request: HttpRequest, context: dict):
if closing_disabled != 0:
error_inactive = ValidationError(
_('Can not set a finding as inactive without adding all mandatory notes'),
- code='inactive_without_mandatory_notes'
+ code='inactive_without_mandatory_notes',
)
error_false_p = ValidationError(
_('Can not set a finding as false positive without adding all mandatory notes'),
- code='false_p_without_mandatory_notes'
+ code='false_p_without_mandatory_notes',
)
if context["form"]['active'].value() is False:
context["form"].add_error('active', error_inactive)
@@ -1447,7 +1447,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic
# Determine if a message should be added
if jira_message:
messages.add_message(
- request, messages.SUCCESS, jira_message, extra_tags="alert-success"
+ request, messages.SUCCESS, jira_message, extra_tags="alert-success",
)
return request, True, push_to_jira
@@ -1811,7 +1811,7 @@ def view_api_scan_configurations(request, pid):
{
'product_api_scan_configurations': product_api_scan_configurations,
'product_tab': product_tab,
- 'pid': pid
+ 'pid': pid,
})
@@ -1885,7 +1885,7 @@ def delete_api_scan_configuration(request, pid, pascid):
'dojo/delete_product_api_scan_configuration.html',
{
'form': form,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
diff --git a/dojo/product_type/signals.py b/dojo/product_type/signals.py
index 65a06c1284..15f06b03e6 100644
--- a/dojo/product_type/signals.py
+++ b/dojo/product_type/signals.py
@@ -25,7 +25,7 @@ def product_type_post_delete(sender, instance, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='product_type'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The product type "%(name)s" was deleted by %(user)s') % {
'name': instance.name, 'user': le.actor}
diff --git a/dojo/product_type/urls.py b/dojo/product_type/urls.py
index 41f9b840c7..98c6b1cf81 100644
--- a/dojo/product_type/urls.py
+++ b/dojo/product_type/urls.py
@@ -28,5 +28,5 @@
re_path(r'^product/type/group/(?P\d+)/edit$', views.edit_product_type_group,
name='edit_product_type_group'),
re_path(r'^product/type/group/(?P\d+)/delete$', views.delete_product_type_group,
- name='delete_product_type_group')
+ name='delete_product_type_group'),
]
diff --git a/dojo/product_type/views.py b/dojo/product_type/views.py
index efa46f73a8..08c91823c0 100644
--- a/dojo/product_type/views.py
+++ b/dojo/product_type/views.py
@@ -372,7 +372,7 @@ def edit_product_type_group(request, groupid):
return render(request, 'dojo/edit_product_type_group.html', {
'name': page_name,
'groupid': groupid,
- 'form': groupform
+ 'form': groupform,
})
@@ -401,5 +401,5 @@ def delete_product_type_group(request, groupid):
return render(request, 'dojo/delete_product_type_group.html', {
'name': page_name,
'groupid': groupid,
- 'form': groupform
+ 'form': groupform,
})
diff --git a/dojo/regulations/urls.py b/dojo/regulations/urls.py
index a16d3c9cca..e977103192 100644
--- a/dojo/regulations/urls.py
+++ b/dojo/regulations/urls.py
@@ -6,4 +6,4 @@
re_path(r'^regulations/add', views.new_regulation, name='new_regulation'),
re_path(r'^regulations/(?P\d+)/edit$', views.edit_regulations,
name='edit_regulations'),
- re_path(r'^regulations$', views.regulations, name='regulations'), ]
+ re_path(r'^regulations$', views.regulations, name='regulations')]
diff --git a/dojo/regulations/views.py b/dojo/regulations/views.py
index 0bcd19bf7b..16fb582e0d 100644
--- a/dojo/regulations/views.py
+++ b/dojo/regulations/views.py
@@ -26,7 +26,7 @@ def new_regulation(request):
messages.SUCCESS,
'Regulation Successfully Created.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('regulations', ))
+ return HttpResponseRedirect(reverse('regulations'))
else:
tform = RegulationForm()
add_breadcrumb(title="New regulation", top_level=False, request=request)
@@ -44,7 +44,7 @@ def edit_regulations(request, ttid):
messages.SUCCESS,
'Regulation Deleted.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('regulations', ))
+ return HttpResponseRedirect(reverse('regulations'))
elif request.method == 'POST':
tform = RegulationForm(request.POST, instance=regulation)
if tform.is_valid():
@@ -53,7 +53,7 @@ def edit_regulations(request, ttid):
messages.SUCCESS,
'Regulation Successfully Updated.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('regulations', ))
+ return HttpResponseRedirect(reverse('regulations'))
else:
tform = RegulationForm(instance=regulation)
add_breadcrumb(title="Edit Regulation", top_level=False, request=request)
diff --git a/dojo/reports/views.py b/dojo/reports/views.py
index b815c81eca..f67b2f40c5 100644
--- a/dojo/reports/views.py
+++ b/dojo/reports/views.py
@@ -112,7 +112,7 @@ def get_template(self):
def get_context(self, request: HttpRequest) -> dict:
return {
"available_widgets": self.get_available_widgets(request),
- "in_use_widgets": self.get_in_use_widgets(request), }
+ "in_use_widgets": self.get_in_use_widgets(request)}
class CustomReport(View):
@@ -153,7 +153,7 @@ def get_form(self, request):
def get_template(self):
if self.report_format == 'AsciiDoc':
- return 'dojo/custom_asciidoc_report.html',
+ return 'dojo/custom_asciidoc_report.html'
elif self.report_format == 'HTML':
return 'dojo/custom_html_report.html'
else:
@@ -165,7 +165,7 @@ def get_context(self):
"host": self.host,
"finding_notes": self.finding_notes,
"finding_images": self.finding_images,
- "user_id": self.request.user.id, }
+ "user_id": self.request.user.id}
def report_findings(request):
@@ -710,14 +710,14 @@ def prefetch_related_findings_for_report(findings):
'notes',
'files',
'reporter',
- 'mitigated_by'
+ 'mitigated_by',
)
def prefetch_related_endpoints_for_report(endpoints):
return endpoints.prefetch_related(
'product',
- 'tags'
+ 'tags',
)
@@ -1147,7 +1147,7 @@ def get(self, request):
response = HttpResponse(
content=stream,
- content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
+ content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
)
response['Content-Disposition'] = 'attachment; filename=findings.xlsx'
return response
diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py
index 9ceedfaab4..098bf52aaf 100644
--- a/dojo/risk_acceptance/helper.py
+++ b/dojo/risk_acceptance/helper.py
@@ -51,7 +51,7 @@ def expire_now(risk_acceptance):
create_notification(event='risk_acceptance_expiration', title=title, risk_acceptance=risk_acceptance, accepted_findings=accepted_findings,
reactivated_findings=reactivated_findings, engagement=risk_acceptance.engagement,
product=risk_acceptance.engagement.product,
- url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id, )))
+ url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id)))
def reinstate(risk_acceptance, old_expiration_date):
@@ -169,7 +169,7 @@ def expiration_handler(*args, **kwargs):
create_notification(event='risk_acceptance_expiration', title=notification_title, risk_acceptance=risk_acceptance,
accepted_findings=risk_acceptance.accepted_findings.all(), engagement=risk_acceptance.engagement,
product=risk_acceptance.engagement.product,
- url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id, )))
+ url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id)))
post_jira_comments(risk_acceptance, expiration_warning_message_creator, heads_up_days)
@@ -266,7 +266,7 @@ def prefetch_for_expiration(risk_acceptances):
return risk_acceptances.prefetch_related('accepted_findings', 'accepted_findings__jira_issue',
'engagement_set',
'engagement__jira_project',
- 'engagement__jira_project__jira_instance'
+ 'engagement__jira_project__jira_instance',
)
diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum
index 4885a81930..890d05663e 100644
--- a/dojo/settings/.settings.dist.py.sha256sum
+++ b/dojo/settings/.settings.dist.py.sha256sum
@@ -1 +1 @@
-7b3bb14160f3ffce537d75895ee18cb0a561232fa964bae88b4861f7d289b176
+cce215fa477d611d45cae69a29185e943eb209526fec2b38659666e5e9513fe3
diff --git a/dojo/settings/attribute-maps/django_saml_uri.py b/dojo/settings/attribute-maps/django_saml_uri.py
index b6f3f3a67c..83fd538420 100644
--- a/dojo/settings/attribute-maps/django_saml_uri.py
+++ b/dojo/settings/attribute-maps/django_saml_uri.py
@@ -15,5 +15,5 @@
'last_name': X500ATTR_OID + '4',
'email': PKCS_9 + '1',
'uid': UCL_DIR_PILOT + '1',
- }
+ },
}
diff --git a/dojo/settings/attribute-maps/saml_uri.py b/dojo/settings/attribute-maps/saml_uri.py
index 4922c50f89..c2e7694f89 100644
--- a/dojo/settings/attribute-maps/saml_uri.py
+++ b/dojo/settings/attribute-maps/saml_uri.py
@@ -239,5 +239,5 @@
'schacUserStatus': SCHAC + '19',
'schacProjectMembership': SCHAC + '20',
'schacProjectSpecificRole': SCHAC + '21',
- }
+ },
}
diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py
index e207309417..0c62f004bc 100644
--- a/dojo/settings/settings.dist.py
+++ b/dojo/settings/settings.dist.py
@@ -173,7 +173,7 @@
'Email': 'email',
'UserName': 'username',
'Firstname': 'first_name',
- 'Lastname': 'last_name'
+ 'Lastname': 'last_name',
}),
DD_SAML2_ALLOW_UNKNOWN_ATTRIBUTE=(bool, False),
# Authentication via HTTP Proxy which put username to HTTP Header REMOTE_USER
@@ -383,7 +383,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
# Parse database connection url strings like psql://user:pass@127.0.0.1:8458/db
if os.getenv('DD_DATABASE_URL') is not None:
DATABASES = {
- 'default': env.db('DD_DATABASE_URL')
+ 'default': env.db('DD_DATABASE_URL'),
}
else:
DATABASES = {
@@ -397,7 +397,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
'PASSWORD': env('DD_DATABASE_PASSWORD'),
'HOST': env('DD_DATABASE_HOST'),
'PORT': env('DD_DATABASE_PORT'),
- }
+ },
}
# Track migrations through source control rather than making migrations locally
@@ -637,23 +637,23 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
'NAME': 'dojo.user.validators.DojoCommonPasswordValidator',
},
{
- 'NAME': 'dojo.user.validators.MinLengthValidator'
+ 'NAME': 'dojo.user.validators.MinLengthValidator',
},
{
- 'NAME': 'dojo.user.validators.MaxLengthValidator'
+ 'NAME': 'dojo.user.validators.MaxLengthValidator',
},
{
- 'NAME': 'dojo.user.validators.NumberValidator'
+ 'NAME': 'dojo.user.validators.NumberValidator',
},
{
- 'NAME': 'dojo.user.validators.UppercaseValidator'
+ 'NAME': 'dojo.user.validators.UppercaseValidator',
},
{
- 'NAME': 'dojo.user.validators.LowercaseValidator'
+ 'NAME': 'dojo.user.validators.LowercaseValidator',
},
{
- 'NAME': 'dojo.user.validators.SymbolValidator'
- }
+ 'NAME': 'dojo.user.validators.SymbolValidator',
+ },
]
# https://django-ratelimit.readthedocs.io/en/stable/index.html
@@ -764,7 +764,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 25,
- 'EXCEPTION_HANDLER': 'dojo.api_v2.exception_handler.custom_exception_handler'
+ 'EXCEPTION_HANDLER': 'dojo.api_v2.exception_handler.custom_exception_handler',
}
if API_TOKENS_ENABLED:
@@ -783,8 +783,8 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
# show file selection dialogue, see https://github.com/tfranzel/drf-spectacular/issues/455
"COMPONENT_SPLIT_REQUEST": True,
"SWAGGER_UI_SETTINGS": {
- "docExpansion": "none"
- }
+ "docExpansion": "none",
+ },
}
if not env('DD_DEFAULT_SWAGGER_UI'):
@@ -1092,7 +1092,7 @@ def saml2_attrib_map_format(dict):
env('DD_CELERY_BROKER_HOST'),
env('DD_CELERY_BROKER_PORT'),
env('DD_CELERY_BROKER_PATH'),
- env('DD_CELERY_BROKER_PARAMS')
+ env('DD_CELERY_BROKER_PARAMS'),
)
CELERY_TASK_IGNORE_RESULT = env('DD_CELERY_TASK_IGNORE_RESULT')
CELERY_RESULT_BACKEND = env('DD_CELERY_RESULT_BACKEND')
@@ -1113,7 +1113,7 @@ def saml2_attrib_map_format(dict):
'add-alerts': {
'task': 'dojo.tasks.add_alerts',
'schedule': timedelta(hours=1),
- 'args': [timedelta(hours=1)]
+ 'args': [timedelta(hours=1)],
},
'cleanup-alerts': {
'task': 'dojo.tasks.cleanup_alerts',
@@ -1122,7 +1122,7 @@ def saml2_attrib_map_format(dict):
'dedupe-delete': {
'task': 'dojo.tasks.async_dupe_delete',
'schedule': timedelta(minutes=1),
- 'args': [timedelta(minutes=1)]
+ 'args': [timedelta(minutes=1)],
},
'flush_auditlog': {
'task': 'dojo.tasks.flush_auditlog',
@@ -1163,9 +1163,9 @@ def saml2_attrib_map_format(dict):
if env('DD_DJANGO_METRICS_ENABLED'):
DJANGO_METRICS_ENABLED = env('DD_DJANGO_METRICS_ENABLED')
INSTALLED_APPS = INSTALLED_APPS + ('django_prometheus',)
- MIDDLEWARE = ['django_prometheus.middleware.PrometheusBeforeMiddleware', ] + \
+ MIDDLEWARE = ['django_prometheus.middleware.PrometheusBeforeMiddleware'] + \
MIDDLEWARE + \
- ['django_prometheus.middleware.PrometheusAfterMiddleware', ]
+ ['django_prometheus.middleware.PrometheusAfterMiddleware']
database_engine = DATABASES.get('default').get('ENGINE')
DATABASES['default']['ENGINE'] = database_engine.replace('django.', 'django_prometheus.', 1)
# CELERY_RESULT_BACKEND.replace('django.core','django_prometheus.', 1)
@@ -1269,7 +1269,7 @@ def saml2_attrib_map_format(dict):
'Bearer CLI': ['title', 'severity'],
'Nancy Scan': ['title', 'vuln_id_from_tool'],
'Wiz Scan': ['title', 'description', 'severity'],
- 'Kubescape JSON Importer': ['title', 'component_name']
+ 'Kubescape JSON Importer': ['title', 'component_name'],
}
# Override the hardcoded settings here via the env var
@@ -1331,7 +1331,7 @@ def saml2_attrib_map_format(dict):
'Codechecker Report native': True,
'Wazuh': True,
'Nuclei Scan': True,
- 'Threagile risks report': True
+ 'Threagile risks report': True,
}
# List of fields that are known to be usable in hash_code computation)
@@ -1488,7 +1488,7 @@ def saml2_attrib_map_format(dict):
'Bearer CLI': DEDUPE_ALGO_HASH_CODE,
'Wiz Scan': DEDUPE_ALGO_HASH_CODE,
'Deepfence Threatmapper Report': DEDUPE_ALGO_HASH_CODE,
- 'Kubescape JSON Importer': DEDUPE_ALGO_HASH_CODE
+ 'Kubescape JSON Importer': DEDUPE_ALGO_HASH_CODE,
}
# Override the hardcoded settings here via the env var
@@ -1518,15 +1518,15 @@ def saml2_attrib_map_format(dict):
('Epic', 'Epic'),
('Spike', 'Spike'),
('Bug', 'Bug'),
- ('Security', 'Security')
+ ('Security', 'Security'),
)
if env('DD_JIRA_EXTRA_ISSUE_TYPES') != '':
if env('DD_JIRA_EXTRA_ISSUE_TYPES').count(',') > 0:
for extra_type in env('DD_JIRA_EXTRA_ISSUE_TYPES').split(','):
- JIRA_ISSUE_TYPE_CHOICES_CONFIG += (extra_type, extra_type),
+ JIRA_ISSUE_TYPE_CHOICES_CONFIG += (extra_type, extra_type)
else:
- JIRA_ISSUE_TYPE_CHOICES_CONFIG += (env('DD_JIRA_EXTRA_ISSUE_TYPES'), env('DD_JIRA_EXTRA_ISSUE_TYPES')),
+ JIRA_ISSUE_TYPE_CHOICES_CONFIG += (env('DD_JIRA_EXTRA_ISSUE_TYPES'), env('DD_JIRA_EXTRA_ISSUE_TYPES'))
JIRA_SSL_VERIFY = env('DD_JIRA_SSL_VERIFY')
@@ -1550,7 +1550,7 @@ def saml2_attrib_map_format(dict):
'datefmt': '%d/%b/%Y %H:%M:%S',
},
'simple': {
- 'format': '%(levelname)s %(funcName)s %(lineno)d %(message)s'
+ 'format': '%(levelname)s %(funcName)s %(lineno)d %(message)s',
},
'json': {
'()': 'json_log_formatter.JSONFormatter',
@@ -1558,25 +1558,25 @@ def saml2_attrib_map_format(dict):
},
'filters': {
'require_debug_false': {
- '()': 'django.utils.log.RequireDebugFalse'
+ '()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
- '()': 'django.utils.log.RequireDebugTrue'
+ '()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
- 'class': 'django.utils.log.AdminEmailHandler'
+ 'class': 'django.utils.log.AdminEmailHandler',
},
'console': {
'class': 'logging.StreamHandler',
- 'formatter': 'verbose'
+ 'formatter': 'verbose',
},
'json_console': {
'class': 'logging.StreamHandler',
- 'formatter': 'json'
+ 'formatter': 'json',
},
},
'loggers': {
@@ -1624,7 +1624,7 @@ def saml2_attrib_map_format(dict):
'level': str(LOG_LEVEL),
'propagate': False,
},
- }
+ },
}
# override filter to ensure sensitive variables are also hidden when DEBUG = True
diff --git a/dojo/settings/settings.py b/dojo/settings/settings.py
index 20f13285a7..2d378c742f 100644
--- a/dojo/settings/settings.py
+++ b/dojo/settings/settings.py
@@ -9,7 +9,7 @@
include(
'settings.dist.py',
- optional('local_settings.py')
+ optional('local_settings.py'),
)
if not (DEBUG or ('collectstatic' in sys.argv)):
diff --git a/dojo/settings/unittest.py b/dojo/settings/unittest.py
index c8831991e3..7132d3b928 100644
--- a/dojo/settings/unittest.py
+++ b/dojo/settings/unittest.py
@@ -10,5 +10,5 @@
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'unittest.sqlite',
- }
+ },
}
diff --git a/dojo/sla_config/views.py b/dojo/sla_config/views.py
index da0c6b6a28..28aefd0c3b 100644
--- a/dojo/sla_config/views.py
+++ b/dojo/sla_config/views.py
@@ -24,7 +24,7 @@ def new_sla_config(request):
messages.SUCCESS,
'SLA configuration Successfully Created.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('sla_config', ))
+ return HttpResponseRedirect(reverse('sla_config'))
else:
tform = SLAConfigForm()
add_breadcrumb(
@@ -55,13 +55,13 @@ def edit_sla_config(request, slaid):
messages.SUCCESS,
'SLA Configuration Deleted.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('sla_config', ))
+ return HttpResponseRedirect(reverse('sla_config'))
else:
messages.add_message(request,
messages.ERROR,
'The Default SLA Configuration cannot be deleted.',
extra_tags='alert-danger')
- return HttpResponseRedirect(reverse('sla_config', ))
+ return HttpResponseRedirect(reverse('sla_config'))
elif request.method == 'POST':
form = SLAConfigForm(request.POST, instance=sla_config)
@@ -71,7 +71,7 @@ def edit_sla_config(request, slaid):
messages.SUCCESS,
'SLA configuration successfully updated. All SLA expiration dates for findings within this SLA configuration will be recalculated asynchronously.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('sla_config', ))
+ return HttpResponseRedirect(reverse('sla_config'))
else:
form = SLAConfigForm(instance=sla_config)
@@ -100,5 +100,5 @@ def sla_config(request):
return render(request,
'dojo/sla_config.html',
{'confs': confs,
- 'settings': settings
+ 'settings': settings,
})
diff --git a/dojo/survey/views.py b/dojo/survey/views.py
index 091d68492e..5e036c6856 100644
--- a/dojo/survey/views.py
+++ b/dojo/survey/views.py
@@ -90,7 +90,7 @@ def delete_engagement_survey(request, eid, sid):
'survey': survey,
'form': form,
'engagement': engagement,
- 'questions': questions
+ 'questions': questions,
})
@@ -196,7 +196,7 @@ def view_questionnaire(request, eid, sid):
'user': request.user,
'engagement': engagement,
'questions': questions,
- 'name': survey.survey.name + " Questionnaire Responses"
+ 'name': survey.survey.name + " Questionnaire Responses",
})
@@ -254,7 +254,7 @@ def add_questionnaire(request, eid):
'surveys': surveys,
'user': user,
'form': form,
- 'engagement': engagement
+ 'engagement': engagement,
})
@@ -519,7 +519,7 @@ def create_question(request):
'name': 'Add Question',
'form': form,
'textForm': textQuestionForm,
- 'choiceForm': choiceQuestionFrom
+ 'choiceForm': choiceQuestionFrom,
})
@@ -582,7 +582,7 @@ def edit_question(request, qid):
return render(request, 'defectDojo-engagement-survey/edit_question.html', {
'name': 'Edit Question',
'question': question,
- 'form': form
+ 'form': form,
})
@@ -608,7 +608,7 @@ def add_choices(request):
add_breadcrumb(title="Add Choice", top_level=False, request=request)
return render(request, 'defectDojo-engagement-survey/add_choices.html', {
'name': 'Add Choice',
- 'form': form
+ 'form': form,
})
@@ -646,7 +646,7 @@ def add_empty_questionnaire(request):
'surveys': surveys,
'user': user,
'form': form,
- 'engagement': engagement
+ 'engagement': engagement,
})
@@ -664,7 +664,7 @@ def view_empty_survey(request, esid):
'user': request.user,
'engagement': engagement,
'questions': questions,
- 'name': survey.survey.name + " Questionnaire Responses"
+ 'name': survey.survey.name + " Questionnaire Responses",
})
diff --git a/dojo/system_settings/urls.py b/dojo/system_settings/urls.py
index 2e8d284e26..da5788bc81 100644
--- a/dojo/system_settings/urls.py
+++ b/dojo/system_settings/urls.py
@@ -6,6 +6,6 @@
re_path(
r'^system_settings$',
views.SystemSettingsView.as_view(),
- name='system_settings'
- )
+ name='system_settings',
+ ),
]
diff --git a/dojo/system_settings/views.py b/dojo/system_settings/views.py
index 365f06ca63..991fe46ca2 100644
--- a/dojo/system_settings/views.py
+++ b/dojo/system_settings/views.py
@@ -34,7 +34,7 @@ def get_context(
# Set the initial context
context = {
"system_settings_obj": system_settings_obj,
- "form": self.get_form(request, system_settings_obj)
+ "form": self.get_form(request, system_settings_obj),
}
# Check the status of celery
self.get_celery_status(context)
diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py
index 514cc685df..42b82dd085 100644
--- a/dojo/templatetags/display_tags.py
+++ b/dojo/templatetags/display_tags.py
@@ -53,25 +53,25 @@
}
markdown_styles = [
- "background-color"
+ "background-color",
]
finding_related_action_classes_dict = {
'reset_finding_duplicate_status': 'fa-solid fa-eraser',
'set_finding_as_original': 'fa-brands fa-superpowers',
- 'mark_finding_duplicate': 'fa-solid fa-copy'
+ 'mark_finding_duplicate': 'fa-solid fa-copy',
}
finding_related_action_title_dict = {
'reset_finding_duplicate_status': 'Reset duplicate status',
'set_finding_as_original': 'Set as original',
- 'mark_finding_duplicate': 'Mark as duplicate'
+ 'mark_finding_duplicate': 'Mark as duplicate',
}
supported_file_formats = [
'apng', 'avif', 'gif', 'jpg',
'jpeg', 'jfif', 'pjpeg', 'pjp',
- 'png', 'svg', 'webp', 'pdf'
+ 'png', 'svg', 'webp', 'pdf',
]
@@ -237,7 +237,7 @@ def asvs_level(benchmark_score):
return _("Checklist is %(level)s full (pass: %(total_viewed)s, total: %(total)s)") % {
'level': level,
'total_viewed': total_viewed,
- 'total': total
+ 'total': total,
}
@@ -734,7 +734,7 @@ def finding_display_status(finding):
if 'Risk Accepted' in display_status:
ra = finding.risk_acceptance
if ra:
- url = reverse('view_risk_acceptance', args=(finding.test.engagement.id, ra.id, ))
+ url = reverse('view_risk_acceptance', args=(finding.test.engagement.id, ra.id))
info = ra.name_and_expiration_info
link = 'Risk Accepted'
display_status = display_status.replace('Risk Accepted', link)
diff --git a/dojo/test/signals.py b/dojo/test/signals.py
index 47d4fdffb8..84b3de5571 100644
--- a/dojo/test/signals.py
+++ b/dojo/test/signals.py
@@ -19,7 +19,7 @@ def test_post_delete(sender, instance, using, origin, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='test'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The test "%(name)s" was deleted by %(user)s') % {
'name': str(instance), 'user': le.actor}
diff --git a/dojo/test/urls.py b/dojo/test/urls.py
index c77aca7690..63a96711c5 100644
--- a/dojo/test/urls.py
+++ b/dojo/test/urls.py
@@ -8,7 +8,7 @@
re_path(
r'^test/(?P\d+)$',
views.ViewTest.as_view(),
- name='view_test'
+ name='view_test',
),
re_path(r'^test/(?P\d+)/ics$', views.test_ics,
name='test_ics'),
diff --git a/dojo/test/views.py b/dojo/test/views.py
index d15d518863..202247ad33 100644
--- a/dojo/test/views.py
+++ b/dojo/test/views.py
@@ -165,7 +165,7 @@ def get_typed_note_form(self, request: HttpRequest, context: dict):
args = [request.POST] if request.method == "POST" else []
# Set the initial form args
kwargs = {
- "available_note_types": context.get("available_note_types")
+ "available_note_types": context.get("available_note_types"),
}
return TypedNoteForm(*args, **kwargs)
@@ -437,9 +437,9 @@ def test_ics(request, tid):
_(f"Test: {test.test_type.name} ({test.engagement.product.name}"),
_(
f"Set aside for test {test.test_type.name}, on product {test.engagement.product.name}. "
- f"Additional detail can be found at {request.build_absolute_uri(reverse('view_test', args=(test.id,)))}"
+ f"Additional detail can be found at {request.build_absolute_uri(reverse('view_test', args=(test.id,)))}",
),
- uid
+ uid,
)
output = cal.serialize()
response = HttpResponse(content=output)
@@ -579,7 +579,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic
# Determine if a message should be added
if jira_message:
messages.add_message(
- request, messages.SUCCESS, jira_message, extra_tags="alert-success"
+ request, messages.SUCCESS, jira_message, extra_tags="alert-success",
)
return request, True, push_to_jira
@@ -845,12 +845,12 @@ def get_jira_form(
jira_form = JIRAImportScanForm(
request.POST,
push_all=push_all_jira_issues,
- prefix='jiraform'
+ prefix='jiraform',
)
else:
jira_form = JIRAImportScanForm(
push_all=push_all_jira_issues,
- prefix='jiraform'
+ prefix='jiraform',
)
return jira_form, push_all_jira_issues
@@ -997,7 +997,7 @@ def reimport_findings(
untouched_finding_count,
_,
) = importer_client.process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
# Add a message to the view for the user to see the results
add_success_message_to_response(importer_client.construct_imported_message(
diff --git a/dojo/tool_config/views.py b/dojo/tool_config/views.py
index de8976e063..4744a260c6 100644
--- a/dojo/tool_config/views.py
+++ b/dojo/tool_config/views.py
@@ -34,7 +34,7 @@ def new_tool_config(request):
messages.SUCCESS,
'Tool Configuration successfully updated.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('tool_config', ))
+ return HttpResponseRedirect(reverse('tool_config'))
except Exception as e:
logger.exception(e)
messages.add_message(request,
@@ -72,7 +72,7 @@ def edit_tool_config(request, ttid):
messages.SUCCESS,
'Tool Configuration successfully updated.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('tool_config', ))
+ return HttpResponseRedirect(reverse('tool_config'))
except Exception as e:
logger.info(e)
messages.add_message(request,
diff --git a/dojo/tool_product/views.py b/dojo/tool_product/views.py
index 1564cb0ad5..ff24442d5f 100644
--- a/dojo/tool_product/views.py
+++ b/dojo/tool_product/views.py
@@ -41,7 +41,7 @@ def new_tool_product(request, pid):
return render(request, 'dojo/new_tool_product.html', {
'tform': tform,
'product_tab': product_tab,
- 'pid': pid
+ 'pid': pid,
})
@@ -53,7 +53,7 @@ def all_tool_product(request, pid):
return render(request, 'dojo/view_tool_product_all.html', {
'prod': prod,
'tools': tools,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
@@ -81,7 +81,7 @@ def edit_tool_product(request, pid, ttid):
product_tab = Product_Tab(product, title=_("Edit Product Tool Configuration"), tab="settings")
return render(request, 'dojo/edit_tool_product.html', {
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
@@ -109,5 +109,5 @@ def delete_tool_product(request, pid, ttid):
return render(request, 'dojo/delete_tool_product.html', {
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
diff --git a/dojo/tool_type/views.py b/dojo/tool_type/views.py
index 975f174246..75683718c4 100644
--- a/dojo/tool_type/views.py
+++ b/dojo/tool_type/views.py
@@ -25,7 +25,7 @@ def new_tool_type(request):
messages.SUCCESS,
_('Tool Type Configuration Successfully Created.'),
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('tool_type', ))
+ return HttpResponseRedirect(reverse('tool_type'))
else:
tform = ToolTypeForm()
if 'name' in request.GET:
@@ -46,7 +46,7 @@ def edit_tool_type(request, ttid):
messages.SUCCESS,
_('Tool Type successfully updated.'),
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('tool_type', ))
+ return HttpResponseRedirect(reverse('tool_type'))
else:
tform = ToolTypeForm(instance=tool_type)
diff --git a/dojo/tools/acunetix/parse_acunetix360_json.py b/dojo/tools/acunetix/parse_acunetix360_json.py
index 4398870542..fcff232a55 100644
--- a/dojo/tools/acunetix/parse_acunetix360_json.py
+++ b/dojo/tools/acunetix/parse_acunetix360_json.py
@@ -78,7 +78,7 @@ def get_findings(self, filename, test):
and (item["Classification"]["Cvss"]["Vector"] is not None)
):
cvss_objects = cvss_parser.parse_cvss_from_text(
- item["Classification"]["Cvss"]["Vector"]
+ item["Classification"]["Cvss"]["Vector"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
diff --git a/dojo/tools/acunetix/parse_acunetix_xml.py b/dojo/tools/acunetix/parse_acunetix_xml.py
index ae6ca8d5ee..22171bf24b 100644
--- a/dojo/tools/acunetix/parse_acunetix_xml.py
+++ b/dojo/tools/acunetix/parse_acunetix_xml.py
@@ -24,7 +24,7 @@ def get_findings(self, filename, test):
# get report date
if scan.findtext("StartTime") and "" != scan.findtext("StartTime"):
report_date = dateutil.parser.parse(
- scan.findtext("StartTime")
+ scan.findtext("StartTime"),
).date()
for item in scan.findall("ReportItems/ReportItem"):
finding = Finding(
@@ -32,10 +32,10 @@ def get_findings(self, filename, test):
title=item.findtext("Name"),
severity=self.get_severity(item.findtext("Severity")),
description=html2text.html2text(
- item.findtext("Description")
+ item.findtext("Description"),
).strip(),
false_p=self.get_false_positive(
- item.findtext("IsFalsePositive")
+ item.findtext("IsFalsePositive"),
),
static_finding=True,
dynamic_finding=False,
@@ -44,14 +44,14 @@ def get_findings(self, filename, test):
if item.findtext("Impact") and "" != item.findtext("Impact"):
finding.impact = item.findtext("Impact")
if item.findtext("Recommendation") and "" != item.findtext(
- "Recommendation"
+ "Recommendation",
):
finding.mitigation = item.findtext("Recommendation")
if report_date:
finding.date = report_date
if item.findtext("CWEList/CWE"):
finding.cwe = self.get_cwe_number(
- item.findtext("CWEList/CWE")
+ item.findtext("CWEList/CWE"),
)
references = []
for reference in item.findall("References/Reference"):
@@ -62,7 +62,7 @@ def get_findings(self, filename, test):
finding.references = "\n".join(references)
if item.findtext("CVSS3/Descriptor"):
cvss_objects = cvss_parser.parse_cvss_from_text(
- item.findtext("CVSS3/Descriptor")
+ item.findtext("CVSS3/Descriptor"),
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
@@ -72,7 +72,7 @@ def get_findings(self, filename, test):
and len(item.findtext("Details").strip()) > 0
):
finding.description += "\n\n**Details:**\n{}".format(
- html2text.html2text(item.findtext("Details"))
+ html2text.html2text(item.findtext("Details")),
)
if (
item.findtext("TechnicalDetails")
@@ -80,7 +80,7 @@ def get_findings(self, filename, test):
):
finding.description += (
"\n\n**TechnicalDetails:**\n\n{}".format(
- item.findtext("TechnicalDetails")
+ item.findtext("TechnicalDetails"),
)
)
# add requests
@@ -94,7 +94,7 @@ def get_findings(self, filename, test):
)
for request in item.findall("TechnicalDetails/Request"):
finding.unsaved_req_resp.append(
- {"req": (request.text or ""), "resp": ""}
+ {"req": (request.text or ""), "resp": ""},
)
# manage the endpoint
url = hyperlink.parse(start_url)
@@ -112,8 +112,8 @@ def get_findings(self, filename, test):
finding.title,
str(finding.impact),
str(finding.mitigation),
- ]
- ).encode("utf-8")
+ ],
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
@@ -124,14 +124,14 @@ def get_findings(self, filename, test):
):
find.description += (
"\n-----\n\n**Details:**\n{}".format(
- html2text.html2text(item.findtext("Details"))
+ html2text.html2text(item.findtext("Details")),
)
)
find.unsaved_endpoints.extend(finding.unsaved_endpoints)
find.unsaved_req_resp.extend(finding.unsaved_req_resp)
find.nb_occurences += finding.nb_occurences
logger.debug(
- f"Duplicate finding : {finding.title}"
+ f"Duplicate finding : {finding.title}",
)
else:
dupes[dupe_key] = finding
diff --git a/dojo/tools/anchore_engine/parser.py b/dojo/tools/anchore_engine/parser.py
index aeb2aab875..0e3ed39fdd 100644
--- a/dojo/tools/anchore_engine/parser.py
+++ b/dojo/tools/anchore_engine/parser.py
@@ -101,7 +101,7 @@ def get_findings(self, filename, test):
dupe_key = "|".join(
[
item.get(
- "image_digest", item.get("imageDigest", "None")
+ "image_digest", item.get("imageDigest", "None"),
), # depending on version image_digest/imageDigest
item["feed"],
item["feed_group"],
@@ -109,7 +109,7 @@ def get_findings(self, filename, test):
item["package_version"],
item["package_path"],
item["vuln"],
- ]
+ ],
)
if dupe_key in dupes:
diff --git a/dojo/tools/anchore_enterprise/parser.py b/dojo/tools/anchore_enterprise/parser.py
index 03e7cc1ee8..e58ee166d3 100644
--- a/dojo/tools/anchore_enterprise/parser.py
+++ b/dojo/tools/anchore_enterprise/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, filename, test):
repo, tag = row[1].split(":", 2)
description = row[5]
severity = map_gate_action_to_severity(
- row[6]
+ row[6],
)
policyid = row[8]
policyname = policy_name(
@@ -79,7 +79,7 @@ def get_findings(self, filename, test):
)
if vulnerability_id:
find.unsaved_vulnerability_ids = [
- vulnerability_id
+ vulnerability_id,
]
items.append(find)
except (KeyError, IndexError) as err:
@@ -89,7 +89,7 @@ def get_findings(self, filename, test):
# import empty policies without error (e.g. policies or images
# objects are not a dictionary)
logger.warning(
- "Exception at %s", "parsing anchore policy", exc_info=err
+ "Exception at %s", "parsing anchore policy", exc_info=err,
)
return items
diff --git a/dojo/tools/anchore_grype/parser.py b/dojo/tools/anchore_grype/parser.py
index 395955b1eb..c457f63e65 100644
--- a/dojo/tools/anchore_grype/parser.py
+++ b/dojo/tools/anchore_grype/parser.py
@@ -53,7 +53,7 @@ def get_findings(self, file, test):
rel_description = related_vulnerability.get("description")
rel_cvss = related_vulnerability.get("cvss")
vulnerability_ids = self.get_vulnerability_ids(
- vuln_id, related_vulnerabilities
+ vuln_id, related_vulnerabilities,
)
matches = item["matchDetails"]
@@ -96,7 +96,7 @@ def get_findings(self, file, test):
f"\n**Matcher:** {matches[0]['matcher']}"
)
finding_tags = [
- matches[0]["matcher"].replace("-matcher", "")
+ matches[0]["matcher"].replace("-matcher", ""),
]
else:
finding_description += "\n**Matchers:**"
@@ -198,7 +198,7 @@ def get_cvss(self, cvss):
vector = cvss_item["vector"]
cvss_objects = cvss_parser.parse_cvss_from_text(vector)
if len(cvss_objects) > 0 and isinstance(
- cvss_objects[0], CVSS3
+ cvss_objects[0], CVSS3,
):
return vector
return None
diff --git a/dojo/tools/anchorectl_policies/parser.py b/dojo/tools/anchorectl_policies/parser.py
index 1e31b08e68..30dd42e32b 100644
--- a/dojo/tools/anchorectl_policies/parser.py
+++ b/dojo/tools/anchorectl_policies/parser.py
@@ -72,7 +72,7 @@ def get_findings(self, filename, test):
# import empty policies without error (e.g. policies or images
# objects are not a dictionary)
logger.warning(
- "Exception at %s", "parsing anchore policy", exc_info=err
+ "Exception at %s", "parsing anchore policy", exc_info=err,
)
return items
diff --git a/dojo/tools/anchorectl_vulns/parser.py b/dojo/tools/anchorectl_vulns/parser.py
index 70371a955b..13632e84b8 100644
--- a/dojo/tools/anchorectl_vulns/parser.py
+++ b/dojo/tools/anchorectl_vulns/parser.py
@@ -92,7 +92,7 @@ def get_findings(self, filename, test):
dupe_key = "|".join(
[
item.get(
- "imageDigest", "None"
+ "imageDigest", "None",
), # depending on version image_digest/imageDigest
item["feed"],
item["feedGroup"],
@@ -100,7 +100,7 @@ def get_findings(self, filename, test):
item["packageVersion"],
item["packagePath"],
item["vuln"],
- ]
+ ],
)
if dupe_key in dupes:
diff --git a/dojo/tools/api_blackduck/api_client.py b/dojo/tools/api_blackduck/api_client.py
index 6d5342d580..98c0aeb533 100644
--- a/dojo/tools/api_blackduck/api_client.py
+++ b/dojo/tools/api_blackduck/api_client.py
@@ -47,5 +47,5 @@ def get_vulnerable_bom_components(self, version):
def get_vulnerabilities(self, component):
return self.client.get_json(
- f'/api/vulnerabilities/{component["vulnerabilityWithRemediation"]["vulnerabilityName"]}'
+ f'/api/vulnerabilities/{component["vulnerabilityWithRemediation"]["vulnerabilityName"]}',
)
diff --git a/dojo/tools/api_blackduck/parser.py b/dojo/tools/api_blackduck/parser.py
index 0be6680787..ccd228c89c 100644
--- a/dojo/tools/api_blackduck/parser.py
+++ b/dojo/tools/api_blackduck/parser.py
@@ -52,7 +52,7 @@ def get_findings(self, file, test):
test=test,
title=f"{vulnerability_id} in {component_name}:{component_version}",
description=entry["vulnerabilityWithRemediation"].get(
- "description"
+ "description",
),
severity=entry["vulnerabilityWithRemediation"][
"severity"
@@ -62,13 +62,13 @@ def get_findings(self, file, test):
static_finding=True,
dynamic_finding=False,
unique_id_from_tool=entry["vulnerabilityWithRemediation"].get(
- "vulnerabilityName"
+ "vulnerabilityName",
),
)
# get CWE
if entry["vulnerabilityWithRemediation"].get("cweId"):
cwe_raw = entry["vulnerabilityWithRemediation"]["cweId"].split(
- "-"
+ "-",
)
if len(cwe_raw) == 2 and cwe_raw[1].isdigit():
finding.cwe = int(cwe_raw[1])
diff --git a/dojo/tools/api_bugcrowd/api_client.py b/dojo/tools/api_bugcrowd/api_client.py
index 6bed971e31..bf76608380 100644
--- a/dojo/tools/api_bugcrowd/api_client.py
+++ b/dojo/tools/api_bugcrowd/api_client.py
@@ -20,7 +20,7 @@ def __init__(self, tool_config):
if tool_config.authentication_type == "API":
self.api_token = tool_config.api_key
self.session.headers.update(
- {"Authorization": f"Token {self.api_token}"}
+ {"Authorization": f"Token {self.api_token}"},
)
self.session.headers.update(self.default_headers)
else:
@@ -67,7 +67,7 @@ def get_findings(self, program, target):
# Otherwise, keep updating next link
next = "{}{}".format(
- self.bugcrowd_api_url, data["links"]["next"]
+ self.bugcrowd_api_url, data["links"]["next"],
)
else:
next = "over"
@@ -75,13 +75,13 @@ def get_findings(self, program, target):
def test_connection(self):
# Request programs
response_programs = self.session.get(
- url=f"{self.bugcrowd_api_url}/programs"
+ url=f"{self.bugcrowd_api_url}/programs",
)
response_programs.raise_for_status()
# Request submissions to validate the org token
response_subs = self.session.get(
- url=f"{self.bugcrowd_api_url}/submissions"
+ url=f"{self.bugcrowd_api_url}/submissions",
)
response_subs.raise_for_status()
if response_programs.ok and response_subs.ok:
@@ -91,20 +91,20 @@ def test_connection(self):
progs = list(filter(lambda prog: prog["type"] == "program", data))
program_names = ", ".join(
- [p["attributes"]["code"] for p in progs]
+ [p["attributes"]["code"] for p in progs],
)
# Request targets to validate the org token
response_targets = self.session.get(
- url=f"{self.bugcrowd_api_url}/targets"
+ url=f"{self.bugcrowd_api_url}/targets",
)
response_targets.raise_for_status()
if response_targets.ok:
data_targets = response_targets.json().get("data")
targets = list(
- filter(lambda prog: prog["type"] == "target", data_targets)
+ filter(lambda prog: prog["type"] == "target", data_targets),
)
target_names = ", ".join(
- [p["attributes"]["name"] for p in targets]
+ [p["attributes"]["name"] for p in targets],
)
return (
f'With {total_subs} submissions, you have access to the "{program_names}" '
diff --git a/dojo/tools/api_bugcrowd/importer.py b/dojo/tools/api_bugcrowd/importer.py
index 677174cac0..4fb1219cb3 100644
--- a/dojo/tools/api_bugcrowd/importer.py
+++ b/dojo/tools/api_bugcrowd/importer.py
@@ -17,7 +17,7 @@ class BugcrowdApiImporter:
def get_findings(self, test):
client, config = self.prepare_client(test)
logger.debug(
- f"Fetching submissions program {str(config.service_key_1)} and target {str(config.service_key_2)}"
+ f"Fetching submissions program {str(config.service_key_1)} and target {str(config.service_key_2)}",
)
submissions_paged = client.get_findings(
diff --git a/dojo/tools/api_bugcrowd/parser.py b/dojo/tools/api_bugcrowd/parser.py
index 6ad71f295c..df9dbbf131 100644
--- a/dojo/tools/api_bugcrowd/parser.py
+++ b/dojo/tools/api_bugcrowd/parser.py
@@ -62,11 +62,11 @@ def get_findings(self, file, test):
if test.api_scan_configuration:
config = test.api_scan_configuration
links = "https://tracker.bugcrowd.com/{}{}".format(
- str(config.service_key_1), entry["links"]["self"]
+ str(config.service_key_1), entry["links"]["self"],
)
if api_scan_config is not None:
links = "https://tracker.bugcrowd.com/{}{}".format(
- str(api_scan_config.service_key_1), entry["links"]["self"]
+ str(api_scan_config.service_key_1), entry["links"]["self"],
)
else:
links = None
@@ -94,12 +94,12 @@ def get_findings(self, file, test):
"://" in entry["attributes"]["bug_url"]
): # is the host full uri?
bug_endpoint = Endpoint.from_uri(
- entry["attributes"]["bug_url"].strip()
+ entry["attributes"]["bug_url"].strip(),
)
# can raise exception if the host is not valid URL
else:
bug_endpoint = Endpoint.from_uri(
- "//" + entry["attributes"]["bug_url"].strip()
+ "//" + entry["attributes"]["bug_url"].strip(),
)
# can raise exception if there is no way to parse the
# host
@@ -108,8 +108,8 @@ def get_findings(self, file, test):
): # We don't want to fail the whole import just for 1 error in the bug_url
logger.error(
"Error parsing bugcrowd bug_url : {}".format(
- entry["attributes"]["bug_url"].strip()
- )
+ entry["attributes"]["bug_url"].strip(),
+ ),
)
bug_url = entry["attributes"]["bug_url"]
@@ -122,7 +122,7 @@ def get_findings(self, file, test):
f"- Bug Url: [{bug_url}]({bug_url})",
"",
f"Bugcrowd link: [{links}]({links})",
- ]
+ ],
)
mitigation = entry["attributes"]["remediation_advice"]
steps_to_reproduce = entry["attributes"]["description"]
@@ -160,11 +160,11 @@ def get_findings(self, file, test):
finding.unsaved_endpoints = [bug_endpoint]
except Exception as e:
logger.error(
- f"{str(bug_endpoint)} bug url from bugcrowd failed to parse to endpoint, error= {e}"
+ f"{str(bug_endpoint)} bug url from bugcrowd failed to parse to endpoint, error= {e}",
)
except ValidationError:
logger.error(
- f"Broken Bugcrowd endpoint {bug_endpoint.host} was skipped."
+ f"Broken Bugcrowd endpoint {bug_endpoint.host} was skipped.",
)
findings.append(finding)
@@ -202,7 +202,7 @@ def include_finding(self, entry):
else:
msg = (
"{} not in allowed bugcrowd submission states".format(
- entry["attributes"]["state"]
+ entry["attributes"]["state"],
)
)
raise ValueError(msg)
diff --git a/dojo/tools/api_cobalt/api_client.py b/dojo/tools/api_cobalt/api_client.py
index 36f37d734d..c18234ae73 100644
--- a/dojo/tools/api_cobalt/api_client.py
+++ b/dojo/tools/api_cobalt/api_client.py
@@ -44,7 +44,7 @@ def get_assets(self):
else:
msg = (
"Unable to get assets due to {} - {}".format(
- response.status_code, response.content.decode("utf-8")
+ response.status_code, response.content.decode("utf-8"),
)
)
raise Exception(msg)
@@ -65,7 +65,7 @@ def get_findings(self, asset_id):
else:
msg = (
"Unable to get asset findings due to {} - {}".format(
- response.status_code, response.content.decode("utf-8")
+ response.status_code, response.content.decode("utf-8"),
)
)
raise Exception(msg)
@@ -86,7 +86,7 @@ def test_connection(self):
if response_orgs.ok and response_assets.ok:
data = response_orgs.json().get("data")
orgs = filter(
- lambda org: org["resource"]["token"] == self.org_token, data
+ lambda org: org["resource"]["token"] == self.org_token, data,
)
org = list(orgs)[0]
org_name = org["resource"]["name"]
diff --git a/dojo/tools/api_cobalt/parser.py b/dojo/tools/api_cobalt/parser.py
index df0425d92b..fa82acabf5 100644
--- a/dojo/tools/api_cobalt/parser.py
+++ b/dojo/tools/api_cobalt/parser.py
@@ -67,7 +67,7 @@ def get_findings(self, file, test):
"",
"Cobalt.io link:",
links["ui"]["url"],
- ]
+ ],
)
mitigation = resource["suggested_fix"]
steps_to_reproduce = resource["proof_of_concept"]
diff --git a/dojo/tools/api_edgescan/importer.py b/dojo/tools/api_edgescan/importer.py
index e740051afa..e4e9bf0c98 100644
--- a/dojo/tools/api_edgescan/importer.py
+++ b/dojo/tools/api_edgescan/importer.py
@@ -27,7 +27,7 @@ def prepare_client(self, test):
raise ValidationError(msg)
else:
configs = Product_API_Scan_Configuration.objects.filter(
- product=product
+ product=product,
)
if configs.count() == 1:
config = configs.first()
diff --git a/dojo/tools/api_edgescan/parser.py b/dojo/tools/api_edgescan/parser.py
index b9becbfc5d..66b00f9246 100644
--- a/dojo/tools/api_edgescan/parser.py
+++ b/dojo/tools/api_edgescan/parser.py
@@ -60,7 +60,7 @@ def make_finding(self, test, vulnerability):
if vulnerability["cvss_version"] == 3:
if vulnerability["cvss_vector"]:
cvss_objects = cvss_parser.parse_cvss_from_text(
- vulnerability["cvss_vector"]
+ vulnerability["cvss_vector"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
@@ -76,7 +76,7 @@ def make_finding(self, test, vulnerability):
finding.unsaved_endpoints = [
Endpoint.from_uri(vulnerability["location"])
if "://" in vulnerability["location"]
- else Endpoint.from_uri("//" + vulnerability["location"])
+ else Endpoint.from_uri("//" + vulnerability["location"]),
]
return finding
diff --git a/dojo/tools/api_sonarqube/api_client.py b/dojo/tools/api_sonarqube/api_client.py
index 09a983d744..e42150f641 100644
--- a/dojo/tools/api_sonarqube/api_client.py
+++ b/dojo/tools/api_sonarqube/api_client.py
@@ -281,7 +281,7 @@ def get_rule(self, rule_id, organization=None):
rule = self.rules_cache.get(rule_id)
if not rule:
request_filter = {
- "key": rule_id
+ "key": rule_id,
}
if organization:
request_filter["organization"] = organization
@@ -424,7 +424,7 @@ def test_connection(self):
def test_product_connection(self, api_scan_configuration):
organization = api_scan_configuration.service_key_2 or None
project = self.get_project(
- api_scan_configuration.service_key_1, organization=organization
+ api_scan_configuration.service_key_1, organization=organization,
)
project_name = project.get("name")
message_prefix = "You have access to project"
diff --git a/dojo/tools/api_sonarqube/importer.py b/dojo/tools/api_sonarqube/importer.py
index 79794e3a56..a8bd748108 100644
--- a/dojo/tools/api_sonarqube/importer.py
+++ b/dojo/tools/api_sonarqube/importer.py
@@ -128,7 +128,7 @@ def import_issues(self, test):
branch=test.branch_tag,
)
logging.info(
- f'Found {len(issues)} issues for component {component["key"]}'
+ f'Found {len(issues)} issues for component {component["key"]}',
)
sonarUrl = client.sonar_api_url[:-3] # [:-3] removes the /api part of the sonarqube/cloud URL
@@ -158,7 +158,7 @@ def import_issues(self, test):
# custom (user defined) SQ rules may not have 'htmlDesc'
if "htmlDesc" in rule:
description = self.clean_rule_description_html(
- rule["htmlDesc"]
+ rule["htmlDesc"],
)
cwe = self.clean_cwe(rule["htmlDesc"])
references = sonarqube_permalink + self.get_references(rule["htmlDesc"])
@@ -178,7 +178,7 @@ def import_issues(self, test):
# Only assign the SonarQube_issue to the first finding related
# to the issue
if Finding.objects.filter(
- sonarqube_issue=sonarqube_issue
+ sonarqube_issue=sonarqube_issue,
).exists():
sonarqube_issue = None
@@ -247,7 +247,7 @@ def import_hotspots(self, test):
branch=test.branch_tag,
)
logging.info(
- f'Found {len(hotspots)} hotspots for project {component["key"]}'
+ f'Found {len(hotspots)} hotspots for project {component["key"]}',
)
sonarUrl = client.sonar_api_url[:-3] # [:-3] removes the /api part of the sonarqube/cloud URL
@@ -269,19 +269,19 @@ def import_hotspots(self, test):
else:
severity = "Info"
title = textwrap.shorten(
- text=hotspot.get("message", ""), width=500
+ text=hotspot.get("message", ""), width=500,
)
component_key = hotspot.get("component")
line = hotspot.get("line")
rule_id = hotspot.get("key", "")
rule = client.get_hotspot_rule(rule_id)
scanner_confidence = self.convert_scanner_confidence(
- hotspot.get("vulnerabilityProbability", "")
+ hotspot.get("vulnerabilityProbability", ""),
)
description = self.clean_rule_description_html(
rule.get(
- "vulnerabilityDescription", "No description provided."
- )
+ "vulnerabilityDescription", "No description provided.",
+ ),
)
cwe = self.clean_cwe(rule.get("fixRecommendations", ""))
try:
@@ -289,7 +289,7 @@ def import_hotspots(self, test):
except KeyError:
sonarqube_permalink = "No permalink \n"
references = sonarqube_permalink + self.get_references(
- rule.get("riskDescription", "")
+ rule.get("riskDescription", ""),
) + self.get_references(rule.get("fixRecommendations", ""))
sonarqube_issue, _ = Sonarqube_Issue.objects.update_or_create(
@@ -300,7 +300,7 @@ def import_hotspots(self, test):
# Only assign the SonarQube_issue to the first finding related
# to the issue
if Finding.objects.filter(
- sonarqube_issue=sonarqube_issue
+ sonarqube_issue=sonarqube_issue,
).exists():
sonarqube_issue = None
diff --git a/dojo/tools/api_sonarqube/updater.py b/dojo/tools/api_sonarqube/updater.py
index 67c724660d..980079f894 100644
--- a/dojo/tools/api_sonarqube/updater.py
+++ b/dojo/tools/api_sonarqube/updater.py
@@ -68,7 +68,7 @@ def get_sonarqube_status_for(finding):
return target_status
def get_sonarqube_required_transitions_for(
- self, current_status, target_status
+ self, current_status, target_status,
):
# If current and target is the same... do nothing
if current_status == target_status:
@@ -107,7 +107,7 @@ def get_sonarqube_required_transitions_for(
for t_from in transition.get("from"):
possible_transition = (
self.get_sonarqube_required_transitions_for(
- current_status, t_from
+ current_status, t_from,
)
)
if possible_transition:
@@ -120,7 +120,7 @@ def update_sonarqube_finding(self, finding):
return
logger.debug(
- f"Checking if finding '{finding}' needs to be updated in SonarQube"
+ f"Checking if finding '{finding}' needs to be updated in SonarQube",
)
client, _ = SonarQubeApiImporter.prepare_client(finding.test)
@@ -135,21 +135,21 @@ def update_sonarqube_finding(self, finding):
): # Issue could have disappeared in SQ because a previous scan has resolved the issue as fixed
if issue.get("resolution"):
current_status = "{} / {}".format(
- issue.get("status"), issue.get("resolution")
+ issue.get("status"), issue.get("resolution"),
)
else:
current_status = issue.get("status")
logger.debug(
- f"--> SQ Current status: {current_status}. Current target status: {target_status}"
+ f"--> SQ Current status: {current_status}. Current target status: {target_status}",
)
transitions = self.get_sonarqube_required_transitions_for(
- current_status, target_status
+ current_status, target_status,
)
if transitions:
logger.info(
- f"Updating finding '{finding}' in SonarQube"
+ f"Updating finding '{finding}' in SonarQube",
)
for transition in transitions:
@@ -162,7 +162,7 @@ def update_sonarqube_finding(self, finding):
# to sonarqube we changed Accepted into Risk Accepted, but we change it back to be sure we don't
# break the integration
finding_status=finding.status().replace(
- "Risk Accepted", "Accepted"
+ "Risk Accepted", "Accepted",
)
if finding.status()
else finding.status(),
diff --git a/dojo/tools/api_sonarqube/updater_from_source.py b/dojo/tools/api_sonarqube/updater_from_source.py
index 1c97f8fe40..93afa04c4a 100644
--- a/dojo/tools/api_sonarqube/updater_from_source.py
+++ b/dojo/tools/api_sonarqube/updater_from_source.py
@@ -41,7 +41,7 @@ def update(self, finding):
current_status = issue.get("resolution") or issue.get("status")
current_finding_status = self.get_sonarqube_status_for(finding)
logger.debug(
- f"--> SQ Current status: {current_status}. Finding status: {current_finding_status}"
+ f"--> SQ Current status: {current_status}. Finding status: {current_finding_status}",
)
if (
@@ -49,7 +49,7 @@ def update(self, finding):
and current_finding_status != current_status
):
logger.info(
- f"Original SonarQube issue '{sonarqube_issue}' has changed. Updating DefectDojo finding '{finding}'..."
+ f"Original SonarQube issue '{sonarqube_issue}' has changed. Updating DefectDojo finding '{finding}'...",
)
self.update_finding_status(finding, current_status)
diff --git a/dojo/tools/api_vulners/importer.py b/dojo/tools/api_vulners/importer.py
index 89950ae97d..8ebbbe83f6 100644
--- a/dojo/tools/api_vulners/importer.py
+++ b/dojo/tools/api_vulners/importer.py
@@ -37,7 +37,7 @@ def prepare_client(self, test):
raise ValidationError(msg)
else:
configs = Product_API_Scan_Configuration.objects.filter(
- product=product, tool_configuration__tool_type__name="Vulners"
+ product=product, tool_configuration__tool_type__name="Vulners",
)
if configs.count() == 1:
config = configs.first()
diff --git a/dojo/tools/api_vulners/parser.py b/dojo/tools/api_vulners/parser.py
index a6203ec559..5d6382caaf 100644
--- a/dojo/tools/api_vulners/parser.py
+++ b/dojo/tools/api_vulners/parser.py
@@ -91,7 +91,7 @@ def get_findings(self, file, test):
finding.cvssv3 = CVSS3(
vuln.get("cvss3", {})
.get("cvssV3", {})
- .get("vectorString", "")
+ .get("vectorString", ""),
).clean_vector()
# References
diff --git a/dojo/tools/aqua/parser.py b/dojo/tools/aqua/parser.py
index d6ea61edc9..ab8c6e1bdb 100644
--- a/dojo/tools/aqua/parser.py
+++ b/dojo/tools/aqua/parser.py
@@ -83,7 +83,7 @@ def get_item(resource, vuln, test):
f"NVD score v3 ({score}) used for classification.\n"
)
severity_justification += "\nNVD v3 vectors: {}".format(
- vuln.get("nvd_vectors_v3")
+ vuln.get("nvd_vectors_v3"),
)
# Add the CVSS3 to Finding
cvssv3 = vuln.get("nvd_vectors_v3")
@@ -93,7 +93,7 @@ def get_item(resource, vuln, test):
f"NVD score v2 ({score}) used for classification.\n"
)
severity_justification += "\nNVD v2 vectors: {}".format(
- vuln.get("nvd_vectors")
+ vuln.get("nvd_vectors"),
)
severity = severity_of(score)
severity_justification += f"\n{used_for_classification}"
diff --git a/dojo/tools/arachni/parser.py b/dojo/tools/arachni/parser.py
index 7ca6528de0..7b28d7e9f0 100644
--- a/dojo/tools/arachni/parser.py
+++ b/dojo/tools/arachni/parser.py
@@ -32,7 +32,7 @@ def get_items(self, tree, test):
report_date = None
if "finish_datetime" in tree:
report_date = datetime.strptime(
- tree.get("finish_datetime"), "%Y-%m-%d %H:%M:%S %z"
+ tree.get("finish_datetime"), "%Y-%m-%d %H:%M:%S %z",
)
for node in tree["issues"]:
item = self.get_item(node, report_date)
diff --git a/dojo/tools/auditjs/parser.py b/dojo/tools/auditjs/parser.py
index 678e11e8e6..8135fe1fc5 100644
--- a/dojo/tools/auditjs/parser.py
+++ b/dojo/tools/auditjs/parser.py
@@ -55,7 +55,7 @@ def get_findings(self, filename, test):
)
component_name, component_version = pacakge_full_name.split(
- "@"
+ "@",
)
# Check if there are any vulnerabilities
@@ -94,16 +94,16 @@ def get_findings(self, filename, test):
cvss_score = vulnerability["cvssScore"]
if "cvssVector" in vulnerability:
cvss_vectors = cvss.parser.parse_cvss_from_text(
- vulnerability["cvssVector"]
+ vulnerability["cvssVector"],
)
if len(cvss_vectors) > 0 and isinstance(
- cvss_vectors[0], CVSS3
+ cvss_vectors[0], CVSS3,
):
# Only set finding vector if it's version 3
cvss_vector = cvss_vectors[0].clean_vector()
severity = cvss_vectors[0].severities()[0]
elif len(cvss_vectors) > 0 and isinstance(
- cvss_vectors[0], CVSS2
+ cvss_vectors[0], CVSS2,
):
# Otherwise add it to description
description = (
@@ -148,7 +148,7 @@ def get_findings(self, filename, test):
if finding.description:
find.description += "\n" + finding.description
find.unsaved_endpoints.extend(
- finding.unsaved_endpoints
+ finding.unsaved_endpoints,
)
dupes[dupe_key] = find
else:
diff --git a/dojo/tools/aws_prowler/parser.py b/dojo/tools/aws_prowler/parser.py
index 4a1ed7af91..8a084ff6f3 100644
--- a/dojo/tools/aws_prowler/parser.py
+++ b/dojo/tools/aws_prowler/parser.py
@@ -104,7 +104,7 @@ def process_csv(self, file, test):
# improving key to get duplicates
dupe_key = hashlib.sha256(
- (sev + "|" + region + "|" + result_extended).encode("utf-8")
+ (sev + "|" + region + "|" + result_extended).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
@@ -189,7 +189,7 @@ def process_json(self, file, test):
# improving key to get duplicates
dupe_key = hashlib.sha256(
- (sev + "|" + region + "|" + result_extended).encode("utf-8")
+ (sev + "|" + region + "|" + result_extended).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
diff --git a/dojo/tools/awssecurityhub/parser.py b/dojo/tools/awssecurityhub/parser.py
index b761bdd214..2bc71c2e91 100644
--- a/dojo/tools/awssecurityhub/parser.py
+++ b/dojo/tools/awssecurityhub/parser.py
@@ -31,7 +31,7 @@ def get_tests(self, scan_type, scan):
aws_acc.append(finding.get("AwsAccountId"))
report_date = data.get("createdAt")
test = ParserTest(
- name=self.ID, type=self.ID, version=""
+ name=self.ID, type=self.ID, version="",
)
test.description = "**AWS Accounts:** " + ', '.join(set(aws_acc)) + "\n"
test.description += "**Finding Origins:** " + ', '.join(set(prod)) + "\n"
diff --git a/dojo/tools/azure_security_center_recommendations/parser.py b/dojo/tools/azure_security_center_recommendations/parser.py
index e4f02cf3b8..7fbfac83c9 100644
--- a/dojo/tools/azure_security_center_recommendations/parser.py
+++ b/dojo/tools/azure_security_center_recommendations/parser.py
@@ -47,7 +47,7 @@ def process_csv(self, file, test):
recommendation_id = row.get("recommendationId")
recommendation_name = row.get("recommendationName")
recommendation_display_name = row.get(
- "recommendationDisplayName"
+ "recommendationDisplayName",
)
azure_description = row.get("description")
remediation_steps = row.get("remediationSteps")
@@ -57,7 +57,7 @@ def process_csv(self, file, test):
status_change_date = row.get("statusChangeDate")
controls = row.get("controls")
azure_portal_recommendation_link = row.get(
- "azurePortalRecommendationLink"
+ "azurePortalRecommendationLink",
)
native_cloud_account_id = row.get("nativeCloudAccountId")
@@ -107,13 +107,13 @@ def process_csv(self, file, test):
references=azure_portal_recommendation_link,
mitigation=remediation_steps,
date=datetime.strptime(
- status_change_date[0:10], "%Y-%m-%d"
+ status_change_date[0:10], "%Y-%m-%d",
).date(),
vuln_id_from_tool=recommendation_name,
unique_id_from_tool=recommendation_id,
static_finding=True,
dynamic_finding=False,
- )
+ ),
)
return findings
diff --git a/dojo/tools/bandit/parser.py b/dojo/tools/bandit/parser.py
index b209648847..1ad385114a 100644
--- a/dojo/tools/bandit/parser.py
+++ b/dojo/tools/bandit/parser.py
@@ -34,7 +34,7 @@ def get_findings(self, filename, test):
"```",
str(item.get("code")).replace("```", "\\`\\`\\`"),
"```",
- ]
+ ],
)
finding = Finding(
@@ -48,7 +48,7 @@ def get_findings(self, filename, test):
static_finding=True,
dynamic_finding=False,
vuln_id_from_tool=":".join(
- [item["test_name"], item["test_id"]]
+ [item["test_name"], item["test_id"]],
),
nb_occurences=1,
)
diff --git a/dojo/tools/blackduck/importer.py b/dojo/tools/blackduck/importer.py
index 7c1e098a37..0afd9d4771 100644
--- a/dojo/tools/blackduck/importer.py
+++ b/dojo/tools/blackduck/importer.py
@@ -38,7 +38,7 @@ def _process_csvfile(self, report):
project_ids = set(security_issues.keys())
return self._process_project_findings(
- project_ids, security_issues, None
+ project_ids, security_issues, None,
)
def _process_zipfile(self, report):
@@ -63,11 +63,11 @@ def _process_zipfile(self, report):
project_ids = set(files.keys()) & set(security_issues.keys())
return self._process_project_findings(
- project_ids, security_issues, files
+ project_ids, security_issues, files,
)
def _process_project_findings(
- self, project_ids, security_issues, files=None
+ self, project_ids, security_issues, files=None,
):
"""
Process findings per projects and return a BlackduckFinding object per the model
@@ -97,7 +97,7 @@ def _process_project_findings(
for issue in security_issues[project_id]:
security_issue_dict = dict(issue)
cve = self.get_cve(
- security_issue_dict.get("Vulnerability id")
+ security_issue_dict.get("Vulnerability id"),
).upper()
location = ", ".join(locations)
diff --git a/dojo/tools/blackduck/parser.py b/dojo/tools/blackduck/parser.py
index 4b21c28619..a79e9db967 100644
--- a/dojo/tools/blackduck/parser.py
+++ b/dojo/tools/blackduck/parser.py
@@ -29,7 +29,7 @@ def normalize_findings(self, filename):
importer = BlackduckImporter()
findings = sorted(
- importer.parse_findings(filename), key=lambda f: f.vuln_id
+ importer.parse_findings(filename), key=lambda f: f.vuln_id,
)
return findings
@@ -46,7 +46,7 @@ def ingest_findings(self, normalized_findings, test):
references = self.format_reference(i)
dupe_key = hashlib.md5(
- f"{title} | {i.vuln_source}".encode()
+ f"{title} | {i.vuln_source}".encode(),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/blackduck_binary_analysis/importer.py b/dojo/tools/blackduck_binary_analysis/importer.py
index 2c7528ae2d..5b54321129 100644
--- a/dojo/tools/blackduck_binary_analysis/importer.py
+++ b/dojo/tools/blackduck_binary_analysis/importer.py
@@ -32,11 +32,11 @@ def _process_csvfile(self, report, orig_report_name):
sha1_hash_keys = set(vulnerabilities.keys())
return self._process_vuln_results(
- sha1_hash_keys, report, orig_report_name, vulnerabilities
+ sha1_hash_keys, report, orig_report_name, vulnerabilities,
)
def _process_vuln_results(
- self, sha1_hash_keys, report, orig_report_name, vulnerabilities
+ self, sha1_hash_keys, report, orig_report_name, vulnerabilities,
):
"""
Process findings for each project.
@@ -72,7 +72,7 @@ def _process_vuln_results(
vuln_dict.get("Vulnerability URL"),
vuln_dict.get("Missing exploit mitigations"),
vuln_dict.get("BDSA"),
- vuln_dict.get("Version override type")
+ vuln_dict.get("Version override type"),
)
def __partition_by_key(self, csv_file):
diff --git a/dojo/tools/blackduck_binary_analysis/parser.py b/dojo/tools/blackduck_binary_analysis/parser.py
index 2f0523223e..b69fad5d91 100644
--- a/dojo/tools/blackduck_binary_analysis/parser.py
+++ b/dojo/tools/blackduck_binary_analysis/parser.py
@@ -47,7 +47,7 @@ def ingest_findings(self, sorted_findings, test):
if str(i.cvss_vector_v3) != "":
cvss_vectors = "{}{}".format(
"CVSS:3.1/",
- i.cvss_vector_v3
+ i.cvss_vector_v3,
)
cvss_obj = CVSS3(cvss_vectors)
elif str(i.cvss_vector_v2) != "":
@@ -68,7 +68,7 @@ def ingest_findings(self, sorted_findings, test):
references = self.format_references(i)
unique_finding_key = hashlib.sha256(
- f"{file_path + object_sha1 + title}".encode()
+ f"{file_path + object_sha1 + title}".encode(),
).hexdigest()
if unique_finding_key in findings:
diff --git a/dojo/tools/blackduck_component_risk/parser.py b/dojo/tools/blackduck_component_risk/parser.py
index 7e683364f4..274ff74e02 100644
--- a/dojo/tools/blackduck_component_risk/parser.py
+++ b/dojo/tools/blackduck_component_risk/parser.py
@@ -149,23 +149,23 @@ def license_description(self, component, source):
:return:
"""
desc = "**License Name:** {} \n".format(
- component.get("License names")
+ component.get("License names"),
)
desc += "**License Families:** {} \n".format(
- component.get("License families")
+ component.get("License families"),
)
desc += "**License Usage:** {} \n".format(component.get("Usage"))
desc += "**License Origin name:** {} \n".format(
- component.get("Origin name")
+ component.get("Origin name"),
)
desc += "**License Origin id:** {} \n".format(
- component.get("Origin id")
+ component.get("Origin id"),
)
desc += "**Match type:** {}\n".format(component.get("Match type"))
try:
desc += "**Path:** {}\n".format(source.get("Path"))
desc += "**Archive context:** {}\n".format(
- source.get("Archive context")
+ source.get("Archive context"),
)
desc += "**Scan:** {}\n".format(source.get("Scan"))
except KeyError:
@@ -207,7 +207,7 @@ def security_title(self, vulns):
:return:
"""
title = "Security Risk: {}:{}".format(
- vulns[0]["Component name"], vulns[0]["Component version name"]
+ vulns[0]["Component name"], vulns[0]["Component version name"],
)
return title
@@ -225,12 +225,12 @@ def security_description(self, vulns):
for vuln in vulns:
desc += "###{} \n".format(vuln["Vulnerability id"])
desc += "**Base Score:** {} \n**Exploitability:** {} \n**Impact:** {}\n".format(
- vuln["Base score"], vuln["Exploitability"], vuln["Impact"]
+ vuln["Base score"], vuln["Exploitability"], vuln["Impact"],
)
# Not all have a URL
if vuln["URL"] != "":
desc += "**URL:** [{}]({})\n".format(
- vuln["Vulnerability id"], vuln["URL"]
+ vuln["Vulnerability id"], vuln["URL"],
)
desc += "**Description:** {}\n".format(vuln["Description"])
return desc
@@ -290,7 +290,7 @@ def security_mitigation(self, vulns):
:return:
"""
mit = "Update component {}:{} to a secure version".format(
- vulns[0]["Component name"], vulns[0]["Component version name"]
+ vulns[0]["Component name"], vulns[0]["Component version name"],
)
return mit
@@ -318,7 +318,7 @@ def security_references(self, vulns):
for vuln in vulns:
if vuln["URL"] != "":
references += "{}: [{}]({})\n".format(
- vuln["Vulnerability id"], vuln["URL"], vuln["URL"]
+ vuln["Vulnerability id"], vuln["URL"], vuln["URL"],
)
return references
@@ -334,7 +334,7 @@ def security_filepath(self, vulns):
"""
if vulns[0]["Component origin id"] == "":
component_key = "{}/{}".format(
- vulns[0]["Component name"], vulns[0]["Component version name"]
+ vulns[0]["Component name"], vulns[0]["Component version name"],
)
else:
component_key = vulns[0]["Component origin id"]
diff --git a/dojo/tools/bugcrowd/parser.py b/dojo/tools/bugcrowd/parser.py
index d3672255bf..a643499976 100644
--- a/dojo/tools/bugcrowd/parser.py
+++ b/dojo/tools/bugcrowd/parser.py
@@ -25,7 +25,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []
@@ -38,7 +38,7 @@ def get_findings(self, filename, test):
url = row.get("bug_url", None)
pre_description = self.split_description(
- row.get("description", None)
+ row.get("description", None),
)
Description = (
pre_description.get("description", "")
@@ -120,7 +120,7 @@ def get_findings(self, filename, test):
+ row.get("vrt_lineage", "")
)
finding.steps_to_reproduce = pre_description.get(
- "steps_to_reproduce", None
+ "steps_to_reproduce", None,
)
finding.references = References
finding.severity = self.convert_severity(row.get("priority", 0))
@@ -139,7 +139,7 @@ def get_findings(self, filename, test):
finding.description = ""
key = hashlib.md5(
- (finding.title + "|" + finding.description).encode("utf-8")
+ (finding.title + "|" + finding.description).encode("utf-8"),
).hexdigest()
if key not in dupes:
@@ -173,7 +173,7 @@ def description_parse(self, ret):
ret[
"steps_to_reproduce"
] = "### Steps To Reproduce\n" + ret.get(
- "imsteps_to_reproducepact", ""
+ "imsteps_to_reproducepact", "",
)
steps = skip = 1
poc = impact = 0
diff --git a/dojo/tools/burp/parser.py b/dojo/tools/burp/parser.py
index f260e598b3..3fcc728a19 100644
--- a/dojo/tools/burp/parser.py
+++ b/dojo/tools/burp/parser.py
@@ -74,7 +74,7 @@ def get_attrib_from_subnode(xml_node, subnode_xpath_expr, attrib_name):
if ETREE_VERSION[0] <= 1 and ETREE_VERSION[1] < 3:
match_obj = re.search(
- r"([^\@]+?)\[\@([^=]*?)=\'([^\']*?)\'", subnode_xpath_expr
+ r"([^\@]+?)\[\@([^=]*?)=\'([^\']*?)\'", subnode_xpath_expr,
)
if match_obj is not None:
node_to_find = match_obj.group(1)
@@ -111,7 +111,7 @@ def get_clean_base64(value):
return ""
try:
return base64.b64decode(value).decode(
- "utf-8", "replace"
+ "utf-8", "replace",
) # wouldn't this be cleaner than below?
except UnicodeDecodeError:
# decoding of UTF-8 fail when you have a binary payload in the HTTP response
@@ -120,7 +120,7 @@ def get_clean_base64(value):
[
base64.b64decode(value).split(b"\r\n\r\n")[0].decode(),
"",
- ]
+ ],
)
@@ -152,7 +152,7 @@ def get_item(item_node, test):
request = get_clean_base64(request_response.findall("request")[0].text)
if request_response.findall("response"):
response = get_clean_base64(
- request_response.findall("response")[0].text
+ request_response.findall("response")[0].text,
)
else:
response = ""
@@ -186,10 +186,10 @@ def get_item(item_node, test):
for request_response in event.findall("./requestresponse"):
request = get_clean_base64(
- request_response.findall("request")[0].text
+ request_response.findall("request")[0].text,
)
response = get_clean_base64(
- request_response.findall("response")[0].text
+ request_response.findall("response")[0].text,
)
unsaved_req_resp.append({"req": request, "resp": response})
if collab_details[0] == "HTTP":
@@ -275,7 +275,7 @@ def get_item(item_node, test):
if len(cwes) > 1:
# FIXME support more than one CWE
logger.debug(
- f"more than one CWE for a finding {cwes}. NOT supported by parser API"
+ f"more than one CWE for a finding {cwes}. NOT supported by parser API",
)
if len(cwes) > 0:
finding.cwe = cwes[0]
diff --git a/dojo/tools/burp_api/parser.py b/dojo/tools/burp_api/parser.py
index 75e4e87507..1ec4c6b62d 100644
--- a/dojo/tools/burp_api/parser.py
+++ b/dojo/tools/burp_api/parser.py
@@ -65,10 +65,10 @@ def get_findings(self, file, test):
static_finding=False, # by definition
dynamic_finding=True, # by definition
unique_id_from_tool=str(
- issue.get("serial_number", "")
+ issue.get("serial_number", ""),
), # the serial number is a good candidate for this attribute
vuln_id_from_tool=str(
- issue.get("type_index", "")
+ issue.get("type_index", ""),
), # the type index is a good candidate for this attribute
)
# manage confidence
@@ -78,8 +78,8 @@ def get_findings(self, file, test):
if "origin" in issue and "path" in issue:
finding.unsaved_endpoints = [
Endpoint.from_uri(
- issue.get("origin") + issue.get("path")
- )
+ issue.get("origin") + issue.get("path"),
+ ),
]
finding.unsaved_req_resp = []
for evidence in issue.get("evidence", []):
@@ -89,13 +89,13 @@ def get_findings(self, file, test):
]:
continue
request = self.get_clean_base64(
- evidence.get("request_response").get("request")
+ evidence.get("request_response").get("request"),
)
response = self.get_clean_base64(
- evidence.get("request_response").get("response")
+ evidence.get("request_response").get("response"),
)
finding.unsaved_req_resp.append(
- {"req": request, "resp": response}
+ {"req": request, "resp": response},
)
items.append(finding)
diff --git a/dojo/tools/burp_enterprise/parser.py b/dojo/tools/burp_enterprise/parser.py
index 1984cc65d7..b652dda32c 100644
--- a/dojo/tools/burp_enterprise/parser.py
+++ b/dojo/tools/burp_enterprise/parser.py
@@ -72,10 +72,10 @@ def get_content(self, container):
def pre_allocate_items(self, tree):
items = []
endpoint_text = tree.xpath(
- "/html/body/div/div[contains(@class, 'section')]/h1"
+ "/html/body/div/div[contains(@class, 'section')]/h1",
)
severities = tree.xpath(
- "/html/body/div/div[contains(@class, 'section')]/table[contains(@class, 'issue-table')]/tbody"
+ "/html/body/div/div[contains(@class, 'section')]/table[contains(@class, 'issue-table')]/tbody",
)
endpoint_text = [
endpoint
@@ -116,7 +116,7 @@ def get_items(self, tree, test):
# Check that there is at least one vulnerability (the vulnerabilities
# table is absent when no vuln are found)
vulns = tree.xpath(
- "/html/body/div/div[contains(@class, 'section details')]/div[contains(@class, 'issue-container')]"
+ "/html/body/div/div[contains(@class, 'section details')]/div[contains(@class, 'issue-container')]",
)
if len(vulns) == 0:
return []
@@ -237,7 +237,7 @@ def create_findings(self, items, test):
unsaved_req_resp = []
for index in range(len(requests)):
unsaved_req_resp.append(
- {"req": requests[index], "resp": responses[index]}
+ {"req": requests[index], "resp": responses[index]},
)
find.unsaved_req_resp = unsaved_req_resp
diff --git a/dojo/tools/burp_graphql/parser.py b/dojo/tools/burp_graphql/parser.py
index f6d032bc28..c026694122 100644
--- a/dojo/tools/burp_graphql/parser.py
+++ b/dojo/tools/burp_graphql/parser.py
@@ -63,7 +63,7 @@ def parse_findings(self, scan_data):
for issue in scan_data:
if not issue.get("issue_type") or not issue["issue_type"].get(
- "name"
+ "name",
):
msg = "Issue does not have a name"
raise ValueError(msg)
@@ -89,11 +89,11 @@ def combine_findings(self, finding, issue):
if issue.get("evidence"):
finding["Evidence"] = finding["Evidence"] + self.parse_evidence(
- issue.get("evidence")
+ issue.get("evidence"),
)
finding["Endpoints"].append(
- Endpoint.from_uri(issue["origin"] + issue["path"])
+ Endpoint.from_uri(issue["origin"] + issue["path"]),
)
def create_finding(self, issue):
@@ -107,18 +107,18 @@ def create_finding(self, issue):
if issue.get("description_html"):
finding["Description"] += "**Issue Detail**\n"
finding["Description"] += html2text.html2text(
- issue.get("description_html")
+ issue.get("description_html"),
)
if issue["issue_type"].get("description_html"):
finding["Impact"] += "**Issue Background**\n"
finding["Impact"] += html2text.html2text(
- issue["issue_type"].get("description_html")
+ issue["issue_type"].get("description_html"),
)
elif issue["issue_type"].get("description_html"):
finding["Description"] += "**Issue Background**\n"
finding["Description"] += html2text.html2text(
- issue["issue_type"].get("description_html")
+ issue["issue_type"].get("description_html"),
)
if issue.get("remediation_html"):
@@ -128,12 +128,12 @@ def create_finding(self, issue):
if issue["issue_type"].get("remediation_html"):
finding["Mitigation"] += "**Remediation Background**\n"
finding["Mitigation"] += html2text.html2text(
- issue["issue_type"].get("remediation_html")
+ issue["issue_type"].get("remediation_html"),
)
elif issue["issue_type"].get("remediation_html"):
finding["Impact"] += "**Remediation Background**\n"
finding["Impact"] += html2text.html2text(
- issue["issue_type"].get("remediation_html")
+ issue["issue_type"].get("remediation_html"),
)
if issue.get("severity"):
@@ -142,7 +142,7 @@ def create_finding(self, issue):
finding["Severity"] = "Info"
finding["Endpoints"] = [
- Endpoint.from_uri(issue["origin"] + issue["path"])
+ Endpoint.from_uri(issue["origin"] + issue["path"]),
]
if issue.get("evidence"):
@@ -153,16 +153,16 @@ def create_finding(self, issue):
if issue["issue_type"].get("references_html"):
finding["References"] += "**References**\n"
finding["References"] += html2text.html2text(
- issue["issue_type"].get("references_html")
+ issue["issue_type"].get("references_html"),
)
if issue["issue_type"].get("vulnerability_classifications_html"):
finding["References"] += "**CWE Information**\n"
finding["References"] += html2text.html2text(
- issue["issue_type"].get("vulnerability_classifications_html")
+ issue["issue_type"].get("vulnerability_classifications_html"),
)
finding["CWE"] = self.get_cwe(
- issue["issue_type"].get("vulnerability_classifications_html")
+ issue["issue_type"].get("vulnerability_classifications_html"),
)
else:
finding["CWE"] = 0
@@ -182,11 +182,11 @@ def parse_evidence(self, evidence):
for data in request_dict.get("request_segments"):
if data.get("data_html"):
request += html2text.html2text(
- data.get("data_html")
+ data.get("data_html"),
).strip()
elif data.get("highlight_html"):
request += html2text.html2text(
- data.get("highlight_html")
+ data.get("highlight_html"),
).strip()
if (
@@ -201,11 +201,11 @@ def parse_evidence(self, evidence):
for data in response_dict.get("response_segments"):
if data.get("data_html"):
response += html2text.html2text(
- data.get("data_html")
+ data.get("data_html"),
).strip()
elif data.get("highlight_html"):
response += html2text.html2text(
- data.get("highlight_html")
+ data.get("highlight_html"),
).strip()
i += 2
diff --git a/dojo/tools/cargo_audit/parser.py b/dojo/tools/cargo_audit/parser.py
index e992e93651..1447bf5908 100644
--- a/dojo/tools/cargo_audit/parser.py
+++ b/dojo/tools/cargo_audit/parser.py
@@ -52,7 +52,7 @@ def get_findings(self, filename, test):
)
references = f"{advisory.get('url')}\n" + "\n".join(
- advisory["references"]
+ advisory["references"],
)
date = advisory.get("date")
@@ -73,8 +73,8 @@ def get_findings(self, filename, test):
mitigation = "No information about patched version"
dupe_key = hashlib.sha256(
(vuln_id + date + package_name + package_version).encode(
- "utf-8"
- )
+ "utf-8",
+ ),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/checkmarx/parser.py b/dojo/tools/checkmarx/parser.py
index 1cfdfdac25..02e242c0d1 100644
--- a/dojo/tools/checkmarx/parser.py
+++ b/dojo/tools/checkmarx/parser.py
@@ -82,17 +82,17 @@ def _get_findings_xml(self, filename, test):
if result.get("Status") is not None:
findingdetail = "{}**Status:** {}\n".format(
- findingdetail, result.get("Status")
+ findingdetail, result.get("Status"),
)
deeplink = "[{}]({})".format(
- result.get("DeepLink"), result.get("DeepLink")
+ result.get("DeepLink"), result.get("DeepLink"),
)
findingdetail = f"{findingdetail}**Finding Link:** {deeplink}\n"
if self.mode == "detailed":
self._process_result_detailed(
- test, dupes, findingdetail, query, result, find_date
+ test, dupes, findingdetail, query, result, find_date,
)
else:
self._process_result_file_name_aggregated(
@@ -111,11 +111,11 @@ def _get_findings_xml(self, filename, test):
for key in list(dupes):
vuln_ids_from_tool[key].sort
dupes[key].vuln_id_from_tool = ",".join(
- vuln_ids_from_tool[key]
+ vuln_ids_from_tool[key],
)[:500]
for lang in language_list:
add_language(
- test.engagement.product, lang, files=language_list[lang]
+ test.engagement.product, lang, files=language_list[lang],
)
return list(dupes.values())
@@ -137,7 +137,7 @@ def _process_result_file_name_aggregated(
_name, cwe, _categories, queryId = self.getQueryElements(query)
titleStart = query.get("name").replace("_", " ")
description, lastPathnode = self.get_description_file_name_aggregated(
- query, result
+ query, result,
)
sinkFilename = lastPathnode.find("FileName").text
if sinkFilename:
@@ -218,14 +218,14 @@ def get_description_file_name_aggregated(self, query, result):
# At this point we have iterated over all path nodes (function calls)
# and pathnode is at the sink of the vulnerability
sinkFilename, sinkLineNumber, sinkObject = self.get_pathnode_elements(
- pathnode
+ pathnode,
)
description = f"Source file: {sourceFilename} (line {sourceLineNumber})\nSource object: {sourceObject}"
description = f"{description}\nSink file: {sinkFilename} (line {sinkLineNumber})\nSink object: {sinkObject}"
return description, pathnode
def _process_result_detailed(
- self, test, dupes, findingdetail, query, result, find_date
+ self, test, dupes, findingdetail, query, result, find_date,
):
"""Process one result = one pathId for scanner "Checkmarx Scan detailed"
Create the finding and add it into the dupes list
@@ -240,7 +240,7 @@ def _process_result_detailed(
logger.warning(
"Checkmarx scan: more than one path found: "
+ str(len(paths))
- + ". Only the last one will be used"
+ + ". Only the last one will be used",
)
for path in paths:
@@ -257,7 +257,7 @@ def _process_result_detailed(
# Loop over function calls / assignments in the data flow graph
for pathnode in path.findall("PathNode"):
findingdetail = self.get_description_detailed(
- pathnode, findingdetail
+ pathnode, findingdetail,
)
nodeId = pathnode.find("NodeId").text
if nodeId == "1":
@@ -313,17 +313,17 @@ def get_pathnode_elements(self, pathnode):
def get_description_detailed(self, pathnode, findingdetail):
if pathnode.find("Line").text is not None:
findingdetail = "{}**Line Number:** {}\n".format(
- findingdetail, pathnode.find("Line").text
+ findingdetail, pathnode.find("Line").text,
)
if pathnode.find("Column").text is not None:
findingdetail = "{}**Column:** {}\n".format(
- findingdetail, pathnode.find("Column").text
+ findingdetail, pathnode.find("Column").text,
)
if pathnode.find("Name").text is not None:
findingdetail = "{}**Source Object:** {}\n".format(
- findingdetail, pathnode.find("Name").text
+ findingdetail, pathnode.find("Name").text,
)
for codefragment in pathnode.findall("Snippet/Line"):
@@ -392,7 +392,7 @@ def _get_findings_json(self, file, test):
description=descriptiondetails,
title=title,
date=self._parse_date(
- vulnerability.get("firstFoundDate")
+ vulnerability.get("firstFoundDate"),
),
severity=vulnerability.get("severity").title(),
active=(
@@ -412,7 +412,7 @@ def _get_findings_json(self, file, test):
)
else:
finding.unique_id_from_tool = str(
- vulnerability.get("similarityId")
+ vulnerability.get("similarityId"),
)
# get the last node and set some values
if vulnerability.get("nodes"):
@@ -431,7 +431,7 @@ def _get_findings_json(self, file, test):
title=f"{component_name}:{component_version} | {cve}",
description=vulnerability.get("description"),
date=self._parse_date(
- vulnerability.get("firstFoundDate")
+ vulnerability.get("firstFoundDate"),
),
severity=vulnerability.get("severity").title(),
active=(
@@ -449,15 +449,15 @@ def _get_findings_json(self, file, test):
)
if vulnerability.get("cveId"):
finding.unsaved_vulnerability_ids = [
- vulnerability.get("cveId")
+ vulnerability.get("cveId"),
]
if vulnerability.get("id"):
finding.unique_id_from_tool = vulnerability.get(
- "id"
+ "id",
)
else:
finding.unique_id_from_tool = str(
- vulnerability.get("similarityId")
+ vulnerability.get("similarityId"),
)
finding.unsaved_tags = [result_type]
findings.append(finding)
@@ -469,7 +469,7 @@ def _get_findings_json(self, file, test):
title=f'{name} | {vulnerability.get("issueType")}',
description=vulnerability.get("description"),
date=self._parse_date(
- vulnerability.get("firstFoundDate")
+ vulnerability.get("firstFoundDate"),
),
severity=vulnerability.get("severity").title(),
active=(
@@ -482,18 +482,18 @@ def _get_findings_json(self, file, test):
file_path=vulnerability.get("fileName"),
line=vulnerability.get("line", 0),
severity_justification=vulnerability.get(
- "actualValue"
+ "actualValue",
),
test=test,
static_finding=True,
)
if vulnerability.get("id"):
finding.unique_id_from_tool = vulnerability.get(
- "id"
+ "id",
)
else:
finding.unique_id_from_tool = str(
- vulnerability.get("similarityId")
+ vulnerability.get("similarityId"),
)
finding.unsaved_tags = [result_type, name]
findings.append(finding)
diff --git a/dojo/tools/checkmarx_one/parser.py b/dojo/tools/checkmarx_one/parser.py
index 381f705f82..aa6e1b4a11 100644
--- a/dojo/tools/checkmarx_one/parser.py
+++ b/dojo/tools/checkmarx_one/parser.py
@@ -135,7 +135,7 @@ def get_node_snippet(nodes: list) -> str:
f"**File Name**: {node.get('fileName')}\n"
f"**Method**: {node.get('method')}\n"
f"**Line**: {node.get('line')}\n"
- f"**Code Snippet**: {node.get('code')}\n"
+ f"**Code Snippet**: {node.get('code')}\n",
)
return "\n---\n".join(formatted_nodes)
@@ -148,7 +148,7 @@ def get_node_snippet(nodes: list) -> str:
# instance of the vulnerability
base_finding_details = {
"title": result.get(
- "queryPath", result.get("queryName", "SAST Finding")
+ "queryPath", result.get("queryName", "SAST Finding"),
).replace("_", " "),
"description": (
f"{result.get('description')}\n\n"
diff --git a/dojo/tools/checkmarx_osa/parser.py b/dojo/tools/checkmarx_osa/parser.py
index f61f5de656..43cb255698 100644
--- a/dojo/tools/checkmarx_osa/parser.py
+++ b/dojo/tools/checkmarx_osa/parser.py
@@ -52,7 +52,7 @@ def get_findings(self, filehandle, test):
vulnerability_id = item.get("cveName", "NC")
finding_item = Finding(
title="{} {} | {}".format(
- library["name"], library["version"], vulnerability_id
+ library["name"], library["version"], vulnerability_id,
),
severity=item["severity"]["name"],
description=item.get("description", "NC"),
@@ -69,14 +69,14 @@ def get_findings(self, filehandle, test):
cwe=1035,
cvssv3_score=item.get("score", None),
publish_date=datetime.strptime(
- item["publishDate"], "%Y-%m-%dT%H:%M:%S"
+ item["publishDate"], "%Y-%m-%dT%H:%M:%S",
)
if "publishDate" in item
else None,
static_finding=True,
dynamic_finding=False,
scanner_confidence=self.checkmarx_confidence_to_defectdojo_confidence(
- library["confidenceLevel"]
+ library["confidenceLevel"],
)
if "confidenceLevel" in library
else None,
@@ -115,7 +115,7 @@ def get_vunlerabilities(self, tree):
# 100% = Certain
# 70% = Firm
def checkmarx_confidence_to_defectdojo_confidence(
- self, checkmarx_confidence
+ self, checkmarx_confidence,
):
return round((100 - checkmarx_confidence) / 10) + 1
diff --git a/dojo/tools/chefinspect/parser.py b/dojo/tools/chefinspect/parser.py
index 1dd413df62..22b7cdfd0a 100644
--- a/dojo/tools/chefinspect/parser.py
+++ b/dojo/tools/chefinspect/parser.py
@@ -54,6 +54,6 @@ def get_findings(self, file, test):
description=description,
severity=self.convert_score(json_object.get("impact")),
active=True,
- )
+ ),
)
return result
diff --git a/dojo/tools/clair/clairklar_parser.py b/dojo/tools/clair/clairklar_parser.py
index c42ba78b32..efef6483d5 100644
--- a/dojo/tools/clair/clairklar_parser.py
+++ b/dojo/tools/clair/clairklar_parser.py
@@ -19,7 +19,7 @@ def get_items_klar(self, tree, test):
]
for clair_severity in clair_severities:
items.extend(
- self.set_items_for_severity(tree, test, clair_severity)
+ self.set_items_for_severity(tree, test, clair_severity),
)
return items
@@ -60,7 +60,7 @@ def get_item_clairklar(self, item_node, test):
)
if "FeatureVersion" in item_node:
description += " Vulnerable Versions: " + str(
- item_node["FeatureVersion"]
+ item_node["FeatureVersion"],
)
mitigation = ""
diff --git a/dojo/tools/cloudsploit/parser.py b/dojo/tools/cloudsploit/parser.py
index 22e8de1a2b..7ad446bcf7 100644
--- a/dojo/tools/cloudsploit/parser.py
+++ b/dojo/tools/cloudsploit/parser.py
@@ -56,7 +56,7 @@ def get_findings(self, file, test):
# internal de-duplication
dupe_key = hashlib.sha256(
- str(description + title).encode("utf-8")
+ str(description + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/cobalt/parser.py b/dojo/tools/cobalt/parser.py
index 2e4a74f41f..4ac5c43b73 100644
--- a/dojo/tools/cobalt/parser.py
+++ b/dojo/tools/cobalt/parser.py
@@ -25,7 +25,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
for row in reader:
@@ -76,7 +76,7 @@ def get_findings(self, filename, test):
finding.description = ""
key = hashlib.md5(
- (finding.title + "|" + finding.description).encode("utf-8")
+ (finding.title + "|" + finding.description).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/contrast/parser.py b/dojo/tools/contrast/parser.py
index 73e3b94c7b..fb31316e5f 100644
--- a/dojo/tools/contrast/parser.py
+++ b/dojo/tools/contrast/parser.py
@@ -41,7 +41,7 @@ def get_findings(self, filename, test):
if severity == "Note":
severity = "Info"
date_raw = datetime.datetime.utcfromtimestamp(
- int(row.get("First Seen")) / 1000
+ int(row.get("First Seen")) / 1000,
)
finding = Finding(
title=title.split(" from")[0],
@@ -76,11 +76,11 @@ def get_findings(self, filename, test):
+ "\n"
+ row.get("Request Body"),
"resp": "",
- }
+ },
)
dupe_key = hashlib.sha256(
- f"{finding.vuln_id_from_tool}".encode()
+ f"{finding.vuln_id_from_tool}".encode(),
).digest()
if dupe_key in dupes:
@@ -90,7 +90,7 @@ def get_findings(self, filename, test):
+ finding.description
)
dupes[dupe_key].unsaved_endpoints.extend(
- finding.unsaved_endpoints
+ finding.unsaved_endpoints,
)
dupes[dupe_key].nb_occurences += finding.nb_occurences
dupes[
diff --git a/dojo/tools/coverity_api/parser.py b/dojo/tools/coverity_api/parser.py
index c3b15f573f..194939de98 100644
--- a/dojo/tools/coverity_api/parser.py
+++ b/dojo/tools/coverity_api/parser.py
@@ -35,14 +35,14 @@ def get_findings(self, file, test):
f"**Type:** `{issue.get('displayType')}`",
f"**Status:** `{issue.get('status')}`",
f"**Classification:** `{issue.get('classification')}`",
- ]
+ ],
)
finding = Finding()
finding.test = test
finding.title = issue["displayType"]
finding.severity = self.convert_displayImpact(
- issue.get("displayImpact")
+ issue.get("displayImpact"),
)
finding.description = description_formated
finding.static_finding = True
@@ -51,7 +51,7 @@ def get_findings(self, file, test):
if "firstDetected" in issue:
finding.date = datetime.strptime(
- issue["firstDetected"], "%m/%d/%y"
+ issue["firstDetected"], "%m/%d/%y",
).date()
if "cwe" in issue and isinstance(issue["cwe"], int):
diff --git a/dojo/tools/crashtest_security/parser.py b/dojo/tools/crashtest_security/parser.py
index 8770013b79..2c118d8466 100644
--- a/dojo/tools/crashtest_security/parser.py
+++ b/dojo/tools/crashtest_security/parser.py
@@ -30,7 +30,7 @@ def get_findings(self, file, test):
crashtest_scan = crashtest_scan["data"]
descriptions = self.create_descriptions_dict(
- crashtest_scan["descriptions"]
+ crashtest_scan["descriptions"],
)
# Iterate scanner which contain the items
@@ -39,14 +39,14 @@ def get_findings(self, file, test):
# Iterate all findings of the scanner
for finding in scanner:
items.append(
- self.generate_finding(finding, test, descriptions)
+ self.generate_finding(finding, test, descriptions),
)
# Iterate all connected CVE findings if any
if "cve_findings" in finding:
for cve_finding in finding["cve_findings"]:
items.append(
- self.generate_cve_finding(cve_finding, test)
+ self.generate_cve_finding(cve_finding, test),
)
return items
@@ -103,7 +103,7 @@ def generate_cve_finding(self, cve_finding, test):
"""
severity = self.get_severity(cve_finding["cvss"])
references = "https://nvd.nist.gov/vuln/detail/{}".format(
- cve_finding["cve_id"]
+ cve_finding["cve_id"],
)
finding = Finding(
title=cve_finding["cve_id"],
diff --git a/dojo/tools/cred_scan/parser.py b/dojo/tools/cred_scan/parser.py
index 6b67305caa..e796284346 100644
--- a/dojo/tools/cred_scan/parser.py
+++ b/dojo/tools/cred_scan/parser.py
@@ -29,7 +29,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8-sig")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
@@ -41,11 +41,11 @@ def get_findings(self, filename, test):
description += "\n Is Supressed: " + str(row["IsSuppressed"])
if "SuppressJustification" in row:
description += "\n Supress Justifcation: " + str(
- row["SuppressJustification"]
+ row["SuppressJustification"],
)
if "MatchingScore" in row:
description += "\n Matching Score: " + str(
- row["MatchingScore"]
+ row["MatchingScore"],
)
finding = Finding(
@@ -59,7 +59,7 @@ def get_findings(self, filename, test):
# Update the finding date if it specified
if "TimeofDiscovery" in row:
finding.date = parser.parse(
- row["TimeofDiscovery"].replace("Z", "")
+ row["TimeofDiscovery"].replace("Z", ""),
)
# internal de-duplication
diff --git a/dojo/tools/crunch42/parser.py b/dojo/tools/crunch42/parser.py
index ff09dddcd6..10917ca84b 100644
--- a/dojo/tools/crunch42/parser.py
+++ b/dojo/tools/crunch42/parser.py
@@ -49,7 +49,7 @@ def get_items(self, tree, test):
for key, node in results.items():
for issue in node["issues"]:
item = self.get_item(
- issue, key, test
+ issue, key, test,
)
items[iterator] = item
iterator += 1
diff --git a/dojo/tools/cyclonedx/helpers.py b/dojo/tools/cyclonedx/helpers.py
index fb658dfdc1..8e2bd29d24 100644
--- a/dojo/tools/cyclonedx/helpers.py
+++ b/dojo/tools/cyclonedx/helpers.py
@@ -15,7 +15,7 @@ def _get_cvssv3(self, raw_vector):
return CVSS3(raw_vector)
except BaseException:
LOGGER.exception(
- f"error while parsing vector CVSS v3 {raw_vector}"
+ f"error while parsing vector CVSS v3 {raw_vector}",
)
return None
diff --git a/dojo/tools/cyclonedx/json_parser.py b/dojo/tools/cyclonedx/json_parser.py
index a57b6debaf..772e948f86 100644
--- a/dojo/tools/cyclonedx/json_parser.py
+++ b/dojo/tools/cyclonedx/json_parser.py
@@ -17,7 +17,7 @@ def _get_findings_json(self, file, test):
report_date = None
if data.get("metadata") and data.get("metadata").get("timestamp"):
report_date = dateutil.parser.parse(
- data.get("metadata").get("timestamp")
+ data.get("metadata").get("timestamp"),
)
# for each component we keep data
components = {}
@@ -55,7 +55,7 @@ def _get_findings_json(self, file, test):
for affect in vulnerability.get("affects", []):
reference = affect["ref"] # required by the specification
component_name, component_version = Cyclonedxhelper()._get_component(
- components, reference
+ components, reference,
)
if not description:
description = "Description was not provided."
@@ -105,7 +105,7 @@ def _get_findings_json(self, file, test):
if cwes and len(cwes) > 1:
# FIXME support more than one CWE
LOGGER.debug(
- f"more than one CWE for a finding {cwes}. NOT supported by parser API"
+ f"more than one CWE for a finding {cwes}. NOT supported by parser API",
)
if cwes and len(cwes) > 0:
finding.cwe = cwes[0]
@@ -138,7 +138,7 @@ def _flatten_components(self, components, flatted_components):
for component in components:
if "components" in component:
self._flatten_components(
- component.get("components", []), flatted_components
+ component.get("components", []), flatted_components,
)
# according to specification 1.4, 'bom-ref' is mandatory but some
# tools don't provide it
diff --git a/dojo/tools/cyclonedx/xml_parser.py b/dojo/tools/cyclonedx/xml_parser.py
index 91ba3ab0a9..dc0cfe4ff3 100644
--- a/dojo/tools/cyclonedx/xml_parser.py
+++ b/dojo/tools/cyclonedx/xml_parser.py
@@ -20,21 +20,21 @@ def _get_findings_xml(self, file, test):
raise ValueError(msg)
ns = {
"b": namespace.replace("{", "").replace(
- "}", ""
+ "}", "",
), # we accept whatever the version
"v": "http://cyclonedx.org/schema/ext/vulnerability/1.0",
}
# get report date
report_date = None
report_date_raw = root.findtext(
- "b:metadata/b:timestamp", namespaces=ns
+ "b:metadata/b:timestamp", namespaces=ns,
)
if report_date_raw:
report_date = dateutil.parser.parse(report_date_raw)
bom_refs = {}
findings = []
for component in root.findall(
- "b:components/b:component", namespaces=ns
+ "b:components/b:component", namespaces=ns,
):
component_name = component.findtext(f"{namespace}name")
component_version = component.findtext(f"{namespace}version")
@@ -46,7 +46,7 @@ def _get_findings_xml(self, file, test):
}
# for each vulnerabilities add a finding
for vulnerability in component.findall(
- "v:vulnerabilities/v:vulnerability", namespaces=ns
+ "v:vulnerabilities/v:vulnerability", namespaces=ns,
):
finding_vuln = self.manage_vulnerability_legacy(
vulnerability,
@@ -59,20 +59,20 @@ def _get_findings_xml(self, file, test):
findings.append(finding_vuln)
# manage adhoc vulnerabilities
for vulnerability in root.findall(
- "v:vulnerabilities/v:vulnerability", namespaces=ns
+ "v:vulnerabilities/v:vulnerability", namespaces=ns,
):
finding_vuln = self.manage_vulnerability_legacy(
- vulnerability, ns, bom_refs, report_date
+ vulnerability, ns, bom_refs, report_date,
)
findings.append(finding_vuln)
# manage adhoc vulnerabilities (compatible with 1.4 of the spec)
for vulnerability in root.findall(
- "b:vulnerabilities/b:vulnerability", namespaces=ns
+ "b:vulnerabilities/b:vulnerability", namespaces=ns,
):
findings.extend(
self._manage_vulnerability_xml(
- vulnerability, ns, bom_refs, report_date
- )
+ vulnerability, ns, bom_refs, report_date,
+ ),
)
return findings
@@ -94,7 +94,7 @@ def manage_vulnerability_legacy(
vuln_id = vulnerability.findtext("v:id", namespaces=ns)
severity = vulnerability.findtext(
- "v:ratings/v:rating/v:severity", namespaces=ns
+ "v:ratings/v:rating/v:severity", namespaces=ns,
)
description = vulnerability.findtext("v:description", namespaces=ns)
# by the schema, only id and ref are mandatory, even the severity is
@@ -105,7 +105,7 @@ def manage_vulnerability_legacy(
f"**Ref:** {ref}",
f"**Id:** {vuln_id}",
f"**Severity:** {str(severity)}",
- ]
+ ],
)
if component_name is None:
bom = bom_refs[ref]
@@ -115,7 +115,7 @@ def manage_vulnerability_legacy(
severity = Cyclonedxhelper().fix_severity(severity)
references = ""
for adv in vulnerability.findall(
- "v:advisories/v:advisory", namespaces=ns
+ "v:advisories/v:advisory", namespaces=ns,
):
references += f"{adv.text}\n"
finding = Finding(
@@ -132,14 +132,14 @@ def manage_vulnerability_legacy(
finding.date = report_date
mitigation = ""
for recommend in vulnerability.findall(
- "v:recommendations/v:recommendation", namespaces=ns
+ "v:recommendations/v:recommendation", namespaces=ns,
):
mitigation += f"{recommend.text}\n"
if mitigation != "":
finding.mitigation = mitigation
# manage CVSS
for rating in vulnerability.findall(
- "v:ratings/v:rating", namespaces=ns
+ "v:ratings/v:rating", namespaces=ns,
):
if "CVSSv3" == rating.findtext("v:method", namespaces=ns):
raw_vector = rating.findtext("v:vector", namespaces=ns)
@@ -156,7 +156,7 @@ def manage_vulnerability_legacy(
if len(cwes) > 1:
# FIXME support more than one CWE
LOGGER.debug(
- f"more than one CWE for a finding {cwes}. NOT supported by parser API"
+ f"more than one CWE for a finding {cwes}. NOT supported by parser API",
)
if len(cwes) > 0:
finding.cwe = cwes[0]
@@ -171,7 +171,7 @@ def manage_vulnerability_legacy(
def get_cwes(self, node, prefix, namespaces):
cwes = []
for cwe in node.findall(
- prefix + ":cwes/" + prefix + ":cwe", namespaces
+ prefix + ":cwes/" + prefix + ":cwe", namespaces,
):
if cwe.text.isdigit():
cwes.append(int(cwe.text))
@@ -195,12 +195,12 @@ def _manage_vulnerability_xml(
else:
description = f"\n{detail}"
severity = vulnerability.findtext(
- "b:ratings/b:rating/b:severity", namespaces=ns
+ "b:ratings/b:rating/b:severity", namespaces=ns,
)
severity = Cyclonedxhelper().fix_severity(severity)
references = ""
for advisory in vulnerability.findall(
- "b:advisories/b:advisory", namespaces=ns
+ "b:advisories/b:advisory", namespaces=ns,
):
title = advisory.findtext("b:title", namespaces=ns)
if title:
@@ -215,7 +215,7 @@ def _manage_vulnerability_xml(
vulnerability_ids.append(vuln_id)
# check references to see if we have other vulnerability ids
for reference in vulnerability.findall(
- "b:references/b:reference", namespaces=ns
+ "b:references/b:reference", namespaces=ns,
):
vulnerability_id = reference.findtext("b:id", namespaces=ns)
if vulnerability_id:
@@ -223,18 +223,18 @@ def _manage_vulnerability_xml(
# for all component affected
findings = []
for target in vulnerability.findall(
- "b:affects/b:target", namespaces=ns
+ "b:affects/b:target", namespaces=ns,
):
ref = target.find("b:ref", namespaces=ns)
component_name, component_version = Cyclonedxhelper()._get_component(
- bom_refs, ref.text
+ bom_refs, ref.text,
)
finding = Finding(
title=f"{component_name}:{component_version} | {vuln_id}",
description=description,
severity=severity,
mitigation=vulnerability.findtext(
- "b:recommendation", namespaces=ns
+ "b:recommendation", namespaces=ns,
),
references=references,
component_name=component_name,
@@ -250,7 +250,7 @@ def _manage_vulnerability_xml(
finding.date = report_date
# manage CVSS
for rating in vulnerability.findall(
- "b:ratings/b:rating", namespaces=ns
+ "b:ratings/b:rating", namespaces=ns,
):
method = rating.findtext("b:method", namespaces=ns)
if "CVSSv3" == method or "CVSSv31" == method:
@@ -270,7 +270,7 @@ def _manage_vulnerability_xml(
if len(cwes) > 1:
# FIXME support more than one CWE
LOGGER.debug(
- f"more than one CWE for a finding {cwes}. NOT supported by parser API"
+ f"more than one CWE for a finding {cwes}. NOT supported by parser API",
)
if len(cwes) > 0:
finding.cwe = cwes[0]
@@ -291,7 +291,7 @@ def _manage_vulnerability_xml(
finding.active = False
if not finding.active:
detail = analysis[0].findtext(
- "b:detail", namespaces=ns
+ "b:detail", namespaces=ns,
)
if detail:
finding.mitigation = (
diff --git a/dojo/tools/dawnscanner/parser.py b/dojo/tools/dawnscanner/parser.py
index 9fb2085a1f..c2b9ab930a 100644
--- a/dojo/tools/dawnscanner/parser.py
+++ b/dojo/tools/dawnscanner/parser.py
@@ -45,7 +45,7 @@ def get_findings(self, filename, test):
if self.CVE_REGEX.match(item["name"]):
finding.unsaved_vulnerability_ids = [
- self.CVE_REGEX.findall(item["name"])[0]
+ self.CVE_REGEX.findall(item["name"])[0],
]
items.append(finding)
diff --git a/dojo/tools/dependency_check/parser.py b/dojo/tools/dependency_check/parser.py
index 870aba11fb..11b9bb10e7 100644
--- a/dojo/tools/dependency_check/parser.py
+++ b/dojo/tools/dependency_check/parser.py
@@ -29,22 +29,22 @@ def add_finding(self, finding, dupes):
str(finding.title),
str(finding.cwe),
str(finding.file_path).lower(),
- ]
+ ],
)
key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
if key not in dupes:
dupes[key] = finding
def get_filename_and_path_from_dependency(
- self, dependency, related_dependency, namespace
+ self, dependency, related_dependency, namespace,
):
if not related_dependency:
return dependency.findtext(
- f"{namespace}fileName"
+ f"{namespace}fileName",
), dependency.findtext(f"{namespace}filePath")
if related_dependency.findtext(f"{namespace}fileName"):
return related_dependency.findtext(
- f"{namespace}fileName"
+ f"{namespace}fileName",
), related_dependency.findtext(f"{namespace}filePath")
else:
# without filename, it would be just a duplicate finding so we have to skip it. filename
@@ -54,7 +54,7 @@ def get_filename_and_path_from_dependency(
return None, None
def get_component_name_and_version_from_dependency(
- self, dependency, related_dependency, namespace
+ self, dependency, related_dependency, namespace,
):
identifiers_node = dependency.find(namespace + "identifiers")
if identifiers_node:
@@ -94,7 +94,7 @@ def get_component_name_and_version_from_dependency(
# return component_name, component_version
cpe_node = identifiers_node.find(
- ".//" + namespace + 'identifier[@type="cpe"]'
+ ".//" + namespace + 'identifier[@type="cpe"]',
)
if cpe_node:
id = cpe_node.findtext(f"{namespace}name")
@@ -116,11 +116,11 @@ def get_component_name_and_version_from_dependency(
return component_name, component_version
maven_node = identifiers_node.find(
- ".//" + namespace + 'identifier[@type="maven"]'
+ ".//" + namespace + 'identifier[@type="maven"]',
)
if maven_node:
maven_parts = maven_node.findtext(f"{namespace}name").split(
- ":"
+ ":",
)
# logger.debug('maven_parts:' + str(maven_parts))
if len(maven_parts) == 3:
@@ -131,7 +131,7 @@ def get_component_name_and_version_from_dependency(
# TODO what happens when there multiple evidencecollectednodes with
# product or version as type?
evidence_collected_node = dependency.find(
- namespace + "evidenceCollected"
+ namespace + "evidenceCollected",
)
if evidence_collected_node:
#
@@ -149,16 +149,16 @@ def get_component_name_and_version_from_dependency(
# since 6.0.0 howoever it seems like there's always a packageurl above so not sure if we need the effort to
# implement more logic here
product_node = evidence_collected_node.find(
- ".//" + namespace + 'evidence[@type="product"]'
+ ".//" + namespace + 'evidence[@type="product"]',
)
if product_node:
component_name = product_node.findtext(f"{namespace}value")
version_node = evidence_collected_node.find(
- ".//" + namespace + 'evidence[@type="version"]'
+ ".//" + namespace + 'evidence[@type="version"]',
)
if version_node:
component_version = version_node.findtext(
- f"{namespace}value"
+ f"{namespace}value",
)
return component_name, component_version
@@ -166,13 +166,13 @@ def get_component_name_and_version_from_dependency(
return None, None
def get_finding_from_vulnerability(
- self, dependency, related_dependency, vulnerability, test, namespace
+ self, dependency, related_dependency, vulnerability, test, namespace,
):
(
dependency_filename,
dependency_filepath,
) = self.get_filename_and_path_from_dependency(
- dependency, related_dependency, namespace
+ dependency, related_dependency, namespace,
)
# logger.debug('dependency_filename: %s', dependency_filename)
@@ -185,7 +185,7 @@ def get_finding_from_vulnerability(
name = vulnerability.findtext(f"{namespace}name")
if vulnerability.find(f"{namespace}cwes"):
cwe_field = vulnerability.find(f"{namespace}cwes").findtext(
- f"{namespace}cwe"
+ f"{namespace}cwe",
)
else:
cwe_field = vulnerability.findtext(f"{namespace}cwe")
@@ -217,13 +217,13 @@ def get_finding_from_vulnerability(
component_name,
component_version,
) = self.get_component_name_and_version_from_dependency(
- dependency, related_dependency, namespace
+ dependency, related_dependency, namespace,
)
stripped_name = name
# startswith CVE-XXX-YYY
stripped_name = re.sub(
- r"^CVE-\d{4}-\d{4,7}", "", stripped_name
+ r"^CVE-\d{4}-\d{4,7}", "", stripped_name,
).strip()
# startswith CWE-XXX:
stripped_name = re.sub(r"^CWE-\d+\:", "", stripped_name).strip()
@@ -232,7 +232,7 @@ def get_finding_from_vulnerability(
if component_name is None:
logger.warning(
- f"component_name was None for File: {dependency_filename}, using dependency file name instead."
+ f"component_name was None for File: {dependency_filename}, using dependency file name instead.",
)
component_name = dependency_filename
@@ -261,7 +261,7 @@ def get_finding_from_vulnerability(
if severity:
if severity.strip().lower() not in self.SEVERITY_MAPPING:
logger.warning(
- f"Warning: Unknow severity value detected '{severity}'. Bypass to 'Medium' value"
+ f"Warning: Unknow severity value detected '{severity}'. Bypass to 'Medium' value",
)
severity = "Medium"
else:
@@ -275,7 +275,7 @@ def get_finding_from_vulnerability(
if references_node is not None:
reference_detail = ""
for reference_node in references_node.findall(
- namespace + "reference"
+ namespace + "reference",
):
ref_source = reference_node.findtext(f"{namespace}source")
ref_url = reference_node.findtext(f"{namespace}url")
@@ -365,17 +365,17 @@ def get_findings(self, filename, test):
projectInfo_node = scan.find(f"{namespace}projectInfo")
if projectInfo_node.findtext(f"{namespace}reportDate"):
scan_date = dateutil.parser.parse(
- projectInfo_node.findtext(f"{namespace}reportDate")
+ projectInfo_node.findtext(f"{namespace}reportDate"),
)
if dependencies:
for dependency in dependencies.findall(namespace + "dependency"):
vulnerabilities = dependency.find(
- namespace + "vulnerabilities"
+ namespace + "vulnerabilities",
)
if vulnerabilities is not None:
for vulnerability in vulnerabilities.findall(
- namespace + "vulnerability"
+ namespace + "vulnerability",
):
if vulnerability:
finding = self.get_finding_from_vulnerability(
@@ -390,13 +390,13 @@ def get_findings(self, filename, test):
self.add_finding(finding, dupes)
relatedDependencies = dependency.find(
- namespace + "relatedDependencies"
+ namespace + "relatedDependencies",
)
if relatedDependencies:
for (
relatedDependency
) in relatedDependencies.findall(
- namespace + "relatedDependency"
+ namespace + "relatedDependency",
):
finding = (
self.get_finding_from_vulnerability(
@@ -413,7 +413,7 @@ def get_findings(self, filename, test):
self.add_finding(finding, dupes)
for suppressedVulnerability in vulnerabilities.findall(
- namespace + "suppressedVulnerability"
+ namespace + "suppressedVulnerability",
):
if suppressedVulnerability:
finding = self.get_finding_from_vulnerability(
diff --git a/dojo/tools/detect_secrets/parser.py b/dojo/tools/detect_secrets/parser.py
index 7f139b8230..b3ff15af67 100644
--- a/dojo/tools/detect_secrets/parser.py
+++ b/dojo/tools/detect_secrets/parser.py
@@ -38,7 +38,7 @@ def get_findings(self, filename, test):
description += "**Type:** " + type + "\n"
dupe_key = hashlib.sha256(
- (type + file + str(line) + hashed_secret).encode("utf-8")
+ (type + file + str(line) + hashed_secret).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/dockerbench/parser.py b/dojo/tools/dockerbench/parser.py
index 4c08b850e0..120da8eb6f 100644
--- a/dojo/tools/dockerbench/parser.py
+++ b/dojo/tools/dockerbench/parser.py
@@ -34,7 +34,7 @@ def get_tests(tree, test):
for node in tree["tests"]:
items_from_results = get_results(
- node, test, test_start, test_end, description
+ node, test, test_start, test_end, description,
)
items_from_tests += items_from_results
@@ -108,7 +108,7 @@ def get_item(vuln, test, test_start, test_end, description):
if vuln.get("remediation-impact"):
mitigation += "\n"
mitigation += "mitigation impact: {}\n".format(
- vuln["remediation-impact"]
+ vuln["remediation-impact"],
)
finding = Finding(
diff --git a/dojo/tools/dockle/parser.py b/dojo/tools/dockle/parser.py
index b650694078..6bb70769dd 100644
--- a/dojo/tools/dockle/parser.py
+++ b/dojo/tools/dockle/parser.py
@@ -41,7 +41,7 @@ def get_findings(self, filename, test):
description = sorted(item.get("alerts", []))
description = "\n".join(description)
dupe_key = hashlib.sha256(
- (code + title).encode("utf-8")
+ (code + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/dsop/parser.py b/dojo/tools/dsop/parser.py
index 0e4834f367..0d01e5b8d8 100644
--- a/dojo/tools/dsop/parser.py
+++ b/dojo/tools/dsop/parser.py
@@ -21,11 +21,11 @@ def get_findings(self, file, test):
self.__parse_disa(test, items, book["OpenSCAP - DISA Compliance"])
self.__parse_oval(test, items, book["OpenSCAP - OVAL Results"])
self.__parse_twistlock(
- test, items, book["Twistlock Vulnerability Results"]
+ test, items, book["Twistlock Vulnerability Results"],
)
self.__parse_anchore(test, items, book["Anchore CVE Results"])
self.__parse_anchore_compliance(
- test, items, book["Anchore Compliance Results"]
+ test, items, book["Anchore Compliance Results"],
)
return items
@@ -68,7 +68,7 @@ def __parse_disa(self, test, items, sheet):
if row[headers["identifiers"]]:
finding.unsaved_vulnerability_ids = [
- row[headers["identifiers"]]
+ row[headers["identifiers"]],
]
finding.unsaved_tags = tags
@@ -140,7 +140,7 @@ def __parse_twistlock(self, test, items, sheet):
component_name = row[headers["packageName"]]
component_version = row[headers["packageVersion"]]
title = "{}: {} - {}".format(
- row[headers["cve"]], component_name, component_version
+ row[headers["cve"]], component_name, component_version,
)
if row[headers["severity"]] == "important":
severity = "High"
@@ -235,7 +235,7 @@ def __parse_anchore_compliance(self, test, items, sheet):
row[headers["check_output"]],
)
title = "{}: {}".format(
- row[headers["policy_id"]], row[headers["trigger_id"]]
+ row[headers["policy_id"]], row[headers["trigger_id"]],
)
tags = "anchore_compliance"
diff --git a/dojo/tools/eslint/parser.py b/dojo/tools/eslint/parser.py
index 2b698e7b17..9e282cca41 100644
--- a/dojo/tools/eslint/parser.py
+++ b/dojo/tools/eslint/parser.py
@@ -48,7 +48,7 @@ def get_findings(self, filename, test):
findingdetail += "Line number: " + str(message["line"]) + "\n"
sev = self._convert_eslint_severity_to_dojo_severity(
- message["severity"]
+ message["severity"],
)
find = Finding(
diff --git a/dojo/tools/fortify/fpr_parser.py b/dojo/tools/fortify/fpr_parser.py
index ca4d268668..b2fb90474e 100644
--- a/dojo/tools/fortify/fpr_parser.py
+++ b/dojo/tools/fortify/fpr_parser.py
@@ -63,7 +63,7 @@ def parse_fpr(self, filename, test):
unique_id_from_tool=ClassID,
file_path=SourceLocationpath,
line=SourceLocationline,
- )
+ ),
)
return items
diff --git a/dojo/tools/fortify/xml_parser.py b/dojo/tools/fortify/xml_parser.py
index 92469da88c..5a09e8e1e7 100644
--- a/dojo/tools/fortify/xml_parser.py
+++ b/dojo/tools/fortify/xml_parser.py
@@ -75,7 +75,7 @@ def parse_xml(self, filename, test):
dupes = set()
for issue_key, issue in issue_map.items():
title = self.format_title(
- issue["Category"], issue["FileName"], issue["LineStart"]
+ issue["Category"], issue["FileName"], issue["LineStart"],
)
if title not in dupes:
items.append(
@@ -89,7 +89,7 @@ def parse_xml(self, filename, test):
description=self.format_description(issue, cat_meta),
mitigation=self.format_mitigation(issue, cat_meta),
unique_id_from_tool=issue_key,
- )
+ ),
)
dupes.add(title)
return items
@@ -117,7 +117,7 @@ def format_description(self, issue, meta_info) -> str:
"##Source:\nThis snippet provides more context on the execution path that "
"leads to this finding. \n"
"####Snippet:\n**File: {}: {}**\n```\n{}\n```\n".format(
- source["FileName"], source["LineStart"], source["Snippet"]
+ source["FileName"], source["LineStart"], source["Snippet"],
)
)
if explanation:
diff --git a/dojo/tools/gcloud_artifact_scan/parser.py b/dojo/tools/gcloud_artifact_scan/parser.py
index d531f9b6f6..e53da28967 100644
--- a/dojo/tools/gcloud_artifact_scan/parser.py
+++ b/dojo/tools/gcloud_artifact_scan/parser.py
@@ -51,7 +51,7 @@ def get_findings(self, json_output, test):
component_version=vuln["vulnerability"]["packageIssue"][0]["affectedVersion"]["fullName"],
static_finding=True,
dynamic_finding=False,
- cvssv3_score=vuln["vulnerability"]["cvssScore"]
+ cvssv3_score=vuln["vulnerability"]["cvssScore"],
)
findings.append(finding)
return findings
diff --git a/dojo/tools/generic/csv_parser.py b/dojo/tools/generic/csv_parser.py
index 2bf500da1b..b41b3789f2 100644
--- a/dojo/tools/generic/csv_parser.py
+++ b/dojo/tools/generic/csv_parser.py
@@ -16,7 +16,7 @@ def _get_findings_csv(self, filename):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
@@ -27,7 +27,7 @@ def _get_findings_csv(self, filename):
date=parse(row["Date"]).date(),
severity=self.get_severity(row["Severity"]),
duplicate=self._convert_bool(
- row.get("Duplicate", "FALSE")
+ row.get("Duplicate", "FALSE"),
), # bool False by default
nb_occurences=1,
)
@@ -56,11 +56,11 @@ def _get_findings_csv(self, filename):
if "Vulnerability Id" in row and row["Vulnerability Id"]:
if finding.unsaved_vulnerability_ids:
finding.unsaved_vulnerability_ids.append(
- row["Vulnerability Id"]
+ row["Vulnerability Id"],
)
else:
finding.unsaved_vulnerability_ids = [
- row["Vulnerability Id"]
+ row["Vulnerability Id"],
]
# manage CWE
if "CweId" in row:
@@ -76,19 +76,19 @@ def _get_findings_csv(self, filename):
finding.unsaved_endpoints = [
Endpoint.from_uri(row["Url"])
if "://" in row["Url"]
- else Endpoint.from_uri("//" + row["Url"])
+ else Endpoint.from_uri("//" + row["Url"]),
]
# manage internal de-duplication
key = hashlib.sha256(
- f"{finding.severity}|{finding.title}|{finding.description}".encode()
+ f"{finding.severity}|{finding.title}|{finding.description}".encode(),
).hexdigest()
if key in dupes:
find = dupes[key]
find.unsaved_endpoints.extend(finding.unsaved_endpoints)
if find.unsaved_vulnerability_ids:
find.unsaved_vulnerability_ids.extend(
- finding.unsaved_vulnerability_ids
+ finding.unsaved_vulnerability_ids,
)
else:
find.unsaved_vulnerability_ids = (
diff --git a/dojo/tools/generic/json_parser.py b/dojo/tools/generic/json_parser.py
index ecf605e835..296209f3d2 100644
--- a/dojo/tools/generic/json_parser.py
+++ b/dojo/tools/generic/json_parser.py
@@ -109,7 +109,7 @@ def _get_test_json(self, data):
if unsaved_vulnerability_ids:
if finding.unsaved_vulnerability_ids:
finding.unsaved_vulnerability_ids.append(
- unsaved_vulnerability_ids
+ unsaved_vulnerability_ids,
)
else:
finding.unsaved_vulnerability_ids = (
diff --git a/dojo/tools/ggshield/parser.py b/dojo/tools/ggshield/parser.py
index 383c334390..54a5bd23e7 100644
--- a/dojo/tools/ggshield/parser.py
+++ b/dojo/tools/ggshield/parser.py
@@ -110,7 +110,7 @@ def get_items(self, item, findings, dupes, test):
+ findings["match"]
+ str(findings["line_start"])
+ str(findings["line_end"])
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/github_vulnerability/parser.py b/dojo/tools/github_vulnerability/parser.py
index 3739fc6f20..b03dbc01e5 100644
--- a/dojo/tools/github_vulnerability/parser.py
+++ b/dojo/tools/github_vulnerability/parser.py
@@ -27,7 +27,7 @@ def get_findings(self, filename, test):
if "repository" in data["data"]:
if "nameWithOwner" in data["data"]["repository"]:
repository_url = "https://github.com/{}".format(
- data["data"]["repository"]["nameWithOwner"]
+ data["data"]["repository"]["nameWithOwner"],
)
if "url" in data["data"]["repository"]:
repository_url = data["data"]["repository"]["url"]
@@ -50,7 +50,7 @@ def get_findings(self, filename, test):
test=test,
description=description,
severity=self._convert_security(
- alert["securityVulnerability"].get("severity", "MODERATE")
+ alert["securityVulnerability"].get("severity", "MODERATE"),
),
static_finding=True,
dynamic_finding=False,
@@ -85,7 +85,7 @@ def get_findings(self, filename, test):
]:
if identifier.get("value"):
unsaved_vulnerability_ids.append(
- identifier.get("value")
+ identifier.get("value"),
)
if unsaved_vulnerability_ids:
finding.unsaved_vulnerability_ids = (
@@ -110,7 +110,7 @@ def get_findings(self, filename, test):
]["cvss"]["vectorString"]
if cvss_vector_string is not None:
cvss_objects = cvss_parser.parse_cvss_from_text(
- cvss_vector_string
+ cvss_vector_string,
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
diff --git a/dojo/tools/gitlab_api_fuzzing/parser.py b/dojo/tools/gitlab_api_fuzzing/parser.py
index a0992b0e51..f7fae3718e 100644
--- a/dojo/tools/gitlab_api_fuzzing/parser.py
+++ b/dojo/tools/gitlab_api_fuzzing/parser.py
@@ -42,7 +42,7 @@ def get_findings(self, file, test):
static_finding=False,
dynamic_finding=True,
unique_id_from_tool=vulnerability["id"],
- )
+ ),
)
return findings
diff --git a/dojo/tools/gitlab_container_scan/parser.py b/dojo/tools/gitlab_container_scan/parser.py
index 4aa245c399..7dd65305e8 100644
--- a/dojo/tools/gitlab_container_scan/parser.py
+++ b/dojo/tools/gitlab_container_scan/parser.py
@@ -119,13 +119,13 @@ def get_findings(self, file, test):
dependency_name = self._get_dependency_name(dependency)
if dependency_name:
finding.component_name = textwrap.shorten(
- dependency_name, width=190, placeholder="..."
+ dependency_name, width=190, placeholder="...",
)
dependency_version = self._get_dependency_version(dependency)
if dependency_version:
finding.component_version = textwrap.shorten(
- dependency_version, width=90, placeholder="..."
+ dependency_version, width=90, placeholder="...",
)
if "solution" in vulnerability:
diff --git a/dojo/tools/gitlab_dast/parser.py b/dojo/tools/gitlab_dast/parser.py
index 83a7829af6..7728dd00ef 100644
--- a/dojo/tools/gitlab_dast/parser.py
+++ b/dojo/tools/gitlab_dast/parser.py
@@ -35,12 +35,12 @@ def get_items(self, tree, test):
item = self.get_item(node, test, scanner)
item_key = hashlib.sha256(
- f"{item.severity}|{item.title}|{item.description}".encode()
+ f"{item.severity}|{item.title}|{item.description}".encode(),
).hexdigest()
if item_key in items:
items[item_key].unsaved_endpoints.extend(
- item.unsaved_endpoints
+ item.unsaved_endpoints,
)
items[item_key].nb_occurences += 1
else:
@@ -64,7 +64,7 @@ def get_confidence_numeric(self, confidence):
def get_item(self, vuln, test, scanner):
# scanner_confidence
scanner_confidence = self.get_confidence_numeric(
- vuln.get("confidence", "Could not be determined")
+ vuln.get("confidence", "Could not be determined"),
)
# description
@@ -88,7 +88,7 @@ def get_item(self, vuln, test, scanner):
# date
if "discovered_at" in vuln:
finding.date = datetime.strptime(
- vuln["discovered_at"], "%Y-%m-%dT%H:%M:%S.%f"
+ vuln["discovered_at"], "%Y-%m-%dT%H:%M:%S.%f",
)
# id
diff --git a/dojo/tools/gitlab_sast/parser.py b/dojo/tools/gitlab_sast/parser.py
index b00a04a5e6..68dcfc9fd6 100644
--- a/dojo/tools/gitlab_sast/parser.py
+++ b/dojo/tools/gitlab_sast/parser.py
@@ -37,7 +37,7 @@ def get_tests(self, scan_type, handle):
test = ParserTest(
name=scanner_name,
type=scanner_type,
- version=scanner_version
+ version=scanner_version,
)
test.findings = self.get_items(tree)
return [test]
@@ -67,7 +67,7 @@ def get_confidence_numeric(self, argument):
'High': 3, # Firm
'Medium': 4, # Firm
'Low': 6, # Tentative
- 'Experimental': 7 # Tentative
+ 'Experimental': 7, # Tentative
}
return switcher.get(argument, None)
diff --git a/dojo/tools/gitleaks/parser.py b/dojo/tools/gitleaks/parser.py
index 40ec9b9a81..83c4b3beb3 100644
--- a/dojo/tools/gitleaks/parser.py
+++ b/dojo/tools/gitleaks/parser.py
@@ -98,7 +98,7 @@ def get_finding_legacy(self, issue, test, dupes):
finding.unsaved_tags = issue.get("tags", "").split(", ")
dupe_key = hashlib.sha256(
- (issue["offender"] + file_path + str(line)).encode("utf-8")
+ (issue["offender"] + file_path + str(line)).encode("utf-8"),
).hexdigest()
if dupe_key not in dupes:
@@ -152,7 +152,7 @@ def get_finding_current(self, issue, test, dupes):
severity = "High"
dupe_key = hashlib.md5(
- (title + secret + str(line)).encode("utf-8")
+ (title + secret + str(line)).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
@@ -173,7 +173,7 @@ def get_finding_current(self, issue, test, dupes):
line=line,
dynamic_finding=False,
static_finding=True,
- nb_occurences=1
+ nb_occurences=1,
)
if tags:
finding.unsaved_tags = tags
diff --git a/dojo/tools/gosec/parser.py b/dojo/tools/gosec/parser.py
index 69056d9281..cbcf3b4507 100644
--- a/dojo/tools/gosec/parser.py
+++ b/dojo/tools/gosec/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, filename, test):
# Best attempt at ongoing documentation provided by gosec, based on
# rule id
references = "https://securego.io/docs/rules/{}.html".format(
- item["rule_id"]
+ item["rule_id"],
).lower()
if scanner_confidence:
@@ -80,7 +80,7 @@ def get_findings(self, filename, test):
file_path=filename,
line=line,
scanner_confidence=scanner_confidence,
- static_finding=True
+ static_finding=True,
)
dupes[dupe_key] = find
diff --git a/dojo/tools/govulncheck/parser.py b/dojo/tools/govulncheck/parser.py
index f348a33a06..6e76330e68 100644
--- a/dojo/tools/govulncheck/parser.py
+++ b/dojo/tools/govulncheck/parser.py
@@ -81,7 +81,7 @@ def get_findings(self, scan_file, test):
# Parsing for old govulncheck output format
list_vulns = data["Vulns"]
for cve, elems in groupby(
- list_vulns, key=lambda vuln: vuln["OSV"]["aliases"][0]
+ list_vulns, key=lambda vuln: vuln["OSV"]["aliases"][0],
):
first_elem = list(islice(elems, 1))
d = {
@@ -92,7 +92,7 @@ def get_findings(self, scan_file, test):
"package"
]["name"],
"component_version": self.get_version(
- data, first_elem[0]["RequireSink"]
+ data, first_elem[0]["RequireSink"],
),
}
d["references"] = first_elem[0]["OSV"]["references"][0][
@@ -105,19 +105,19 @@ def get_findings(self, scan_file, test):
vuln_methods = set(
first_elem[0]["OSV"]["affected"][0][
"ecosystem_specific"
- ]["imports"][0]["symbols"]
+ ]["imports"][0]["symbols"],
)
impact = set(
- self.get_location(data, first_elem[0]["CallSink"])
+ self.get_location(data, first_elem[0]["CallSink"]),
)
for elem in elems:
impact.update(
- self.get_location(data, elem["CallSink"])
+ self.get_location(data, elem["CallSink"]),
)
vuln_methods.update(
elem["OSV"]["affected"][0]["ecosystem_specific"][
"imports"
- ][0]["symbols"]
+ ][0]["symbols"],
)
d["impact"] = "; ".join(impact) if impact else None
d[
@@ -151,7 +151,7 @@ def get_findings(self, scan_file, test):
range_info = "\n ".join(formatted_ranges)
vuln_functions = ", ".join(
- set(osv_data["affected"][0]["ecosystem_specific"]["imports"][0].get("symbols", []))
+ set(osv_data["affected"][0]["ecosystem_specific"]["imports"][0].get("symbols", [])),
)
description = (
@@ -195,7 +195,7 @@ def get_findings(self, scan_file, test):
"references": references,
"file_path": path,
"url": db_specific_url,
- "unique_id_from_tool": id
+ "unique_id_from_tool": id,
}
findings.append(Finding(**d))
diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py
index 9708bedfc0..457e01c06f 100644
--- a/dojo/tools/h1/parser.py
+++ b/dojo/tools/h1/parser.py
@@ -38,7 +38,7 @@ def get_findings(self, file, test):
# Get all relevant data
date = content["attributes"]["created_at"]
date = datetime.strftime(
- datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d"
+ datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d",
)
# Build the title of the Dojo finding
title = "#" + content["id"] + " " + content["attributes"]["title"]
@@ -68,7 +68,7 @@ def get_findings(self, file, test):
severity = "Info"
# Build the references of the Dojo finding
ref_link = "https://hackerone.com/reports/{}".format(
- content.get("id")
+ content.get("id"),
)
references += f"[{ref_link}]({ref_link})"
@@ -83,13 +83,13 @@ def get_findings(self, file, test):
cwe = int(
content["relationships"]["weakness"]["data"]["attributes"][
"external_id"
- ][4:]
+ ][4:],
)
except Exception:
cwe = 0
dupe_key = hashlib.md5(
- str(references + title).encode("utf-8")
+ str(references + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
finding = dupes[dupe_key]
@@ -112,7 +112,7 @@ def get_findings(self, file, test):
impact="No impact provided",
references=references,
cwe=cwe,
- dynamic_finding=False
+ dynamic_finding=False,
)
finding.unsaved_endpoints = []
dupes[dupe_key] = finding
@@ -121,7 +121,7 @@ def get_findings(self, file, test):
def build_description(self, content):
date = content["attributes"]["created_at"]
date = datetime.strftime(
- datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d"
+ datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d",
)
reporter = content["relationships"]["reporter"]["data"]["attributes"][
"username"
@@ -151,7 +151,7 @@ def build_description(self, content):
# Build rest of description meat
description += "##Report: \n{}\n".format(
- content["attributes"]["vulnerability_information"]
+ content["attributes"]["vulnerability_information"],
)
# Try to grab weakness if it's there
diff --git a/dojo/tools/hadolint/parser.py b/dojo/tools/hadolint/parser.py
index 4624dcbf99..d781e83b8a 100644
--- a/dojo/tools/hadolint/parser.py
+++ b/dojo/tools/hadolint/parser.py
@@ -55,7 +55,7 @@ def get_item(vulnerability, test):
file_path=vulnerability["file"],
line=vulnerability["line"],
description="Vulnerability ID: {}\nDetails: {}\n".format(
- vulnerability["code"], vulnerability["message"]
+ vulnerability["code"], vulnerability["message"],
),
static_finding=True,
dynamic_finding=False,
diff --git a/dojo/tools/horusec/parser.py b/dojo/tools/horusec/parser.py
index 8e9571820f..7dce06ad64 100644
--- a/dojo/tools/horusec/parser.py
+++ b/dojo/tools/horusec/parser.py
@@ -29,7 +29,7 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
data = json.load(filename)
report_date = datetime.strptime(
- data.get("createdAt")[0:10], "%Y-%m-%d"
+ data.get("createdAt")[0:10], "%Y-%m-%d",
)
return [
self._get_finding(node, report_date)
@@ -40,7 +40,7 @@ def get_tests(self, scan_type, scan):
data = json.load(scan)
report_date = parse(data.get("createdAt"))
test = ParserTest(
- name=self.ID, type=self.ID, version=data.get("version").lstrip("v")
+ name=self.ID, type=self.ID, version=data.get("version").lstrip("v"),
) # remove the v in vX.Y.Z
test.description = "\n".join(
[
@@ -49,7 +49,7 @@ def get_tests(self, scan_type, scan):
"```",
data.get("errors").replace("```", "``````"),
"```",
- ]
+ ],
)
test.findings = [
self._get_finding(node, report_date)
@@ -65,7 +65,7 @@ def _get_finding(self, data, date):
f"```{data['vulnerabilities']['language']}",
data["vulnerabilities"]["code"].replace("```", "``````").replace("\x00", ""),
"```",
- ]
+ ],
)
finding = Finding(
title=data["vulnerabilities"]["details"].split("\n")[0],
diff --git a/dojo/tools/huskyci/parser.py b/dojo/tools/huskyci/parser.py
index 472ffbbf6f..028f4e1845 100644
--- a/dojo/tools/huskyci/parser.py
+++ b/dojo/tools/huskyci/parser.py
@@ -53,7 +53,7 @@ def get_items(self, tree, test):
if vuln["severity"] not in ("High", "Medium", "Low"):
continue
unique_key = hashlib.md5(
- str(vuln).encode("utf-8")
+ str(vuln).encode("utf-8"),
).hexdigest()
item = get_item(vuln, test)
items[unique_key] = item
@@ -86,7 +86,7 @@ def get_item(item_node, test):
line=item_node.get("line"),
static_finding=True,
dynamic_finding=False,
- impact="No impact provided"
+ impact="No impact provided",
)
return finding
diff --git a/dojo/tools/hydra/parser.py b/dojo/tools/hydra/parser.py
index 677130bd47..c42e8637f2 100644
--- a/dojo/tools/hydra/parser.py
+++ b/dojo/tools/hydra/parser.py
@@ -44,7 +44,7 @@ def get_findings(self, json_output, test):
return findings
def __extract_findings(
- self, raw_findings, metadata: HydraScanMetadata, test
+ self, raw_findings, metadata: HydraScanMetadata, test,
):
findings = []
@@ -54,13 +54,13 @@ def __extract_findings(
findings.append(finding)
except ValueError:
logger.warning(
- "Error when digesting a finding from hydra! Please revise supplied report, vital information was missing (e.g. host)!"
+ "Error when digesting a finding from hydra! Please revise supplied report, vital information was missing (e.g. host)!",
)
return findings
def __extract_finding(
- self, raw_finding, metadata: HydraScanMetadata, test
+ self, raw_finding, metadata: HydraScanMetadata, test,
) -> Finding:
host = raw_finding.get("host")
port = raw_finding.get("port")
@@ -92,7 +92,7 @@ def __extract_finding(
+ password,
static_finding=False,
dynamic_finding=True,
- service=metadata.service_type
+ service=metadata.service_type,
)
finding.unsaved_endpoints = [Endpoint(host=host, port=port)]
diff --git a/dojo/tools/ibm_app/parser.py b/dojo/tools/ibm_app/parser.py
index 72700fd72c..908b632926 100644
--- a/dojo/tools/ibm_app/parser.py
+++ b/dojo/tools/ibm_app/parser.py
@@ -53,21 +53,21 @@ def get_findings(self, file, test):
if severity == "Informational":
severity = "Info"
issue_description = self.fetch_advisory_group(
- root, issue_data["advisory"]
+ root, issue_data["advisory"],
)
for fix_recommendation_group in root.iter(
- "fix-recommendation-group"
+ "fix-recommendation-group",
):
for recommendation in fix_recommendation_group.iter(
- "item"
+ "item",
):
if (
recommendation.attrib["id"]
== issue_data["fix-recommendation"]
):
data = recommendation.find(
- "general/fixRecommendation"
+ "general/fixRecommendation",
)
for data_text in data.iter("text"):
recommendation_data += (
@@ -82,8 +82,8 @@ def get_findings(self, file, test):
# endpoints
dupe_key = hashlib.md5(
str(issue_description + name + severity).encode(
- "utf-8"
- )
+ "utf-8",
+ ),
).hexdigest()
# check if finding is a duplicate
if dupe_key in dupes:
@@ -100,11 +100,11 @@ def get_findings(self, file, test):
severity=severity,
mitigation=recommendation_data,
references=ref_link,
- dynamic_finding=True
+ dynamic_finding=True,
)
if vulnerability_id:
finding.unsaved_vulnerability_ids = [
- vulnerability_id
+ vulnerability_id,
]
finding.unsaved_endpoints = []
dupes[dupe_key] = finding
@@ -115,7 +115,7 @@ def get_findings(self, file, test):
# urls
if url:
finding.unsaved_endpoints.append(
- Endpoint.from_uri(url)
+ Endpoint.from_uri(url),
)
return list(dupes.values())
@@ -129,7 +129,7 @@ def fetch_issue_types(self, root):
"name": item.find("name").text,
"advisory": item.find("advisory/ref").text,
"fix-recommendation": item.find(
- "fix-recommendation/ref"
+ "fix-recommendation/ref",
).text,
}
@@ -155,7 +155,7 @@ def fetch_advisory_group(self, root, advisory):
for item in advisory_group.iter("item"):
if item.attrib["id"] == advisory:
return item.find(
- "advisory/testTechnicalDescription/text"
+ "advisory/testTechnicalDescription/text",
).text
return "N/A"
diff --git a/dojo/tools/immuniweb/parser.py b/dojo/tools/immuniweb/parser.py
index 5076259f7f..53242fcd2f 100644
--- a/dojo/tools/immuniweb/parser.py
+++ b/dojo/tools/immuniweb/parser.py
@@ -58,7 +58,7 @@ def get_findings(self, file, test):
url = vulnerability.find("URL").text
dupe_key = hashlib.md5(
- str(description + title + severity).encode("utf-8")
+ str(description + title + severity).encode("utf-8"),
).hexdigest()
# check if finding is a duplicate
@@ -78,7 +78,7 @@ def get_findings(self, file, test):
mitigation=mitigation,
impact=impact,
references=reference,
- dynamic_finding=True
+ dynamic_finding=True,
)
if vulnerability_id:
finding.unsaved_vulnerability_ids = [vulnerability_id]
diff --git a/dojo/tools/intsights/csv_handler.py b/dojo/tools/intsights/csv_handler.py
index 828cfaf802..c9493d7db2 100644
--- a/dojo/tools/intsights/csv_handler.py
+++ b/dojo/tools/intsights/csv_handler.py
@@ -33,7 +33,7 @@ def _parse_csv(self, csv_file) -> [dict]:
"Closed Reason",
"Additional Info",
"Rating",
- "Alert Link"
+ "Alert Link",
]
# These keys require a value. If one ore more of the values is null or empty, the entire Alert is ignored.
@@ -47,12 +47,12 @@ def _parse_csv(self, csv_file) -> [dict]:
if isinstance(content, bytes):
content = content.decode("utf-8")
csv_reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
# Don't bother parsing if the keys don't match exactly what's expected
if collections.Counter(default_keys) == collections.Counter(
- csv_reader.fieldnames
+ csv_reader.fieldnames,
):
default_valud = "None provided"
for alert in csv_reader:
@@ -64,13 +64,13 @@ def _parse_csv(self, csv_file) -> [dict]:
"Type",
)
alert["source_date"] = alert.pop(
- "Source Date (UTC)", default_valud
+ "Source Date (UTC)", default_valud,
)
alert["report_date"] = alert.pop(
- "Report Date (UTC)", default_valud
+ "Report Date (UTC)", default_valud,
)
alert["network_type"] = alert.pop(
- "Network Type", default_valud
+ "Network Type", default_valud,
)
alert["source_url"] = alert.pop("Source URL", default_valud)
alert["assets"] = alert.pop("Assets", default_valud)
@@ -89,7 +89,7 @@ def _parse_csv(self, csv_file) -> [dict]:
alerts.append(alert)
else:
self._LOGGER.error(
- "The CSV file has one or more missing or unexpected header values"
+ "The CSV file has one or more missing or unexpected header values",
)
return alerts
diff --git a/dojo/tools/intsights/json_handler.py b/dojo/tools/intsights/json_handler.py
index ec315ac101..db45e50364 100644
--- a/dojo/tools/intsights/json_handler.py
+++ b/dojo/tools/intsights/json_handler.py
@@ -21,19 +21,19 @@ def _parse_json(self, json_file) -> [dict]:
alert["severity"] = original_alert["Details"]["Severity"]
alert["type"] = original_alert["Details"]["Type"]
alert["source_date"] = original_alert["Details"]["Source"].get(
- "Date", "None provided"
+ "Date", "None provided",
)
alert["report_date"] = original_alert.get(
- "FoundDate", "None provided"
+ "FoundDate", "None provided",
)
alert["network_type"] = original_alert["Details"]["Source"].get(
- "NetworkType"
+ "NetworkType",
)
alert["source_url"] = original_alert["Details"]["Source"].get(
- "URL"
+ "URL",
)
alert["assets"] = ",".join(
- [item.get("Value") for item in original_alert["Assets"]]
+ [item.get("Value") for item in original_alert["Assets"]],
)
alert["tags"] = original_alert["Details"].get("Tags")
alert["status"] = (
diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py
index cd6a61a57a..e49c61b852 100644
--- a/dojo/tools/intsights/parser.py
+++ b/dojo/tools/intsights/parser.py
@@ -38,8 +38,8 @@ def _build_finding_description(self, alert: dict) -> str:
f'**Source Date**: ` {alert.get("source_date", "None provided")} `',
f'**Source Network Type**: `{alert.get("network_type", "None provided")} `',
f'**Assets Affected**: `{alert.get("assets", "None provided")} `',
- f'**Alert Link**: {alert.get("alert_link", "None provided")}'
- ]
+ f'**Alert Link**: {alert.get("alert_link", "None provided")}',
+ ],
)
return description
@@ -66,7 +66,7 @@ def get_findings(self, file, test):
references=alert["alert_link"],
static_finding=False,
dynamic_finding=True,
- unique_id_from_tool=alert["alert_id"]
+ unique_id_from_tool=alert["alert_id"],
)
duplicates[dupe_key] = alert
if dupe_key not in duplicates:
diff --git a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py
index 7453669b47..5261b802f2 100644
--- a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py
+++ b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py
@@ -98,7 +98,7 @@ def get_item(
artifact_sha256
+ impact_path.name
+ impact_path.version
- + vulnerability["issue_id"]
+ + vulnerability["issue_id"],
)
vuln_id_from_tool = vulnerability["issue_id"]
elif cve:
@@ -108,7 +108,7 @@ def get_item(
artifact_sha256
+ impact_path.name
+ impact_path.version
- + vulnerability["summary"]
+ + vulnerability["summary"],
)
vuln_id_from_tool = ""
result.update(unique_id.encode())
diff --git a/dojo/tools/jfrog_xray_unified/parser.py b/dojo/tools/jfrog_xray_unified/parser.py
index e8b36d1b34..12efe1afff 100644
--- a/dojo/tools/jfrog_xray_unified/parser.py
+++ b/dojo/tools/jfrog_xray_unified/parser.py
@@ -106,7 +106,7 @@ def get_item(vulnerability, test):
references = "\n".join(vulnerability["references"])
scan_time = datetime.strptime(
- vulnerability["artifact_scan_time"], "%Y-%m-%dT%H:%M:%S%z"
+ vulnerability["artifact_scan_time"], "%Y-%m-%dT%H:%M:%S%z",
)
# component has several parts separated by colons. Last part is the
diff --git a/dojo/tools/jfrogxray/parser.py b/dojo/tools/jfrogxray/parser.py
index 36ffa900cf..a1351dc077 100644
--- a/dojo/tools/jfrogxray/parser.py
+++ b/dojo/tools/jfrogxray/parser.py
@@ -32,7 +32,7 @@ def get_items(self, tree, test):
title_cve = "No CVE"
more_details = node.get("component_versions").get(
- "more_details"
+ "more_details",
)
if "cves" in more_details:
if "cve" in more_details.get("cves")[0]:
@@ -97,13 +97,13 @@ def get_item(vulnerability, test):
if "fixed_versions" in vulnerability["component_versions"]:
mitigation = "**Versions containing a fix:**\n"
mitigation = mitigation + "\n".join(
- vulnerability["component_versions"]["fixed_versions"]
+ vulnerability["component_versions"]["fixed_versions"],
)
if "vulnerable_versions" in vulnerability["component_versions"]:
extra_desc = "\n**Versions that are vulnerable:**\n"
extra_desc += "\n".join(
- vulnerability["component_versions"]["vulnerable_versions"]
+ vulnerability["component_versions"]["vulnerable_versions"],
)
provider = (
diff --git a/dojo/tools/kics/parser.py b/dojo/tools/kics/parser.py
index f0b2c1defc..2708047399 100644
--- a/dojo/tools/kics/parser.py
+++ b/dojo/tools/kics/parser.py
@@ -65,7 +65,7 @@ def get_findings(self, filename, test):
+ file_name
+ expected_value
+ str(line_number)
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/kiuwan/parser.py b/dojo/tools/kiuwan/parser.py
index 4eeb8146af..5d91e5a315 100644
--- a/dojo/tools/kiuwan/parser.py
+++ b/dojo/tools/kiuwan/parser.py
@@ -40,7 +40,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []
@@ -124,7 +124,7 @@ def get_findings(self, filename, test):
+ finding.description
+ "|"
+ str(finding.cwe)
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/kubehunter/parser.py b/dojo/tools/kubehunter/parser.py
index 54e2bfa842..ef9abf25c8 100644
--- a/dojo/tools/kubehunter/parser.py
+++ b/dojo/tools/kubehunter/parser.py
@@ -74,7 +74,7 @@ def get_findings(self, file, test):
duplicate=False,
out_of_scope=False,
vuln_id_from_tool=vulnerability_id,
- steps_to_reproduce=steps_to_reproduce
+ steps_to_reproduce=steps_to_reproduce,
)
# internal de-duplication
diff --git a/dojo/tools/kubescape/parser.py b/dojo/tools/kubescape/parser.py
index be9cd6d741..d26031a6a7 100644
--- a/dojo/tools/kubescape/parser.py
+++ b/dojo/tools/kubescape/parser.py
@@ -116,7 +116,7 @@ def get_findings(self, filename, test):
severity=severity,
component_name=resourceid,
static_finding=True,
- dynamic_finding=False
+ dynamic_finding=False,
)
findings.append(find)
return findings
diff --git a/dojo/tools/mend/parser.py b/dojo/tools/mend/parser.py
index 5fc6464526..60ad893109 100644
--- a/dojo/tools/mend/parser.py
+++ b/dojo/tools/mend/parser.py
@@ -76,7 +76,7 @@ def _build_common_output(node, lib_name=None):
cvss3_score = node.get("cvss3_score", None)
cvss3_vector = node.get("scoreMetadataVector", None)
severity_justification = "CVSS v3 score: {} ({})".format(
- cvss3_score if cvss3_score is not None else "N/A", cvss3_vector if cvss3_vector is not None else "N/A"
+ cvss3_score if cvss3_score is not None else "N/A", cvss3_vector if cvss3_vector is not None else "N/A",
)
cwe = 1035 # default OWASP a9 until the report actually has them
@@ -99,7 +99,7 @@ def _build_common_output(node, lib_name=None):
filepaths.append(sfile.get("localPath"))
except Exception:
logger.exception(
- "Error handling local paths for vulnerability."
+ "Error handling local paths for vulnerability.",
)
new_finding = Finding(
@@ -115,7 +115,7 @@ def _build_common_output(node, lib_name=None):
severity_justification=severity_justification,
dynamic_finding=True,
cvssv3=cvss3_vector,
- cvssv3_score=float(cvss3_score) if cvss3_score is not None else None
+ cvssv3_score=float(cvss3_score) if cvss3_score is not None else None,
)
if cve:
new_finding.unsaved_vulnerability_ids = [cve]
@@ -136,7 +136,7 @@ def _build_common_output(node, lib_name=None):
):
for vuln in lib_node.get("vulnerabilities"):
findings.append(
- _build_common_output(vuln, lib_node.get("name"))
+ _build_common_output(vuln, lib_node.get("name")),
)
elif "vulnerabilities" in content:
@@ -152,7 +152,7 @@ def create_finding_key(f: Finding) -> str:
"""
return hashlib.md5(
f.description.encode("utf-8")
- + f.title.encode("utf-8")
+ + f.title.encode("utf-8"),
).hexdigest()
dupes = {}
diff --git a/dojo/tools/meterian/parser.py b/dojo/tools/meterian/parser.py
index ab9fa93392..cb5f0193be 100644
--- a/dojo/tools/meterian/parser.py
+++ b/dojo/tools/meterian/parser.py
@@ -20,11 +20,11 @@ def get_findings(self, report, test):
report_json = json.load(report)
security_reports = self.get_security_reports(report_json)
scan_date = str(
- datetime.fromisoformat(report_json["timestamp"]).date()
+ datetime.fromisoformat(report_json["timestamp"]).date(),
)
for single_security_report in security_reports:
findings += self.do_get_findings(
- single_security_report, scan_date, test
+ single_security_report, scan_date, test,
)
return findings
diff --git a/dojo/tools/microfocus_webinspect/parser.py b/dojo/tools/microfocus_webinspect/parser.py
index 9764a2e8db..6b1669ffaf 100644
--- a/dojo/tools/microfocus_webinspect/parser.py
+++ b/dojo/tools/microfocus_webinspect/parser.py
@@ -36,7 +36,7 @@ def get_findings(self, file, test):
mitigation = None
reference = None
severity = MicrofocusWebinspectParser.convert_severity(
- issue.find("Severity").text
+ issue.find("Severity").text,
)
for content in issue.findall("ReportSection"):
name = content.find("Name").text
@@ -49,7 +49,7 @@ def get_findings(self, file, test):
if "Reference" in name:
if name and content.find("SectionText").text:
reference = html2text.html2text(
- content.find("SectionText").text
+ content.find("SectionText").text,
)
cwe = 0
description = ""
@@ -81,7 +81,7 @@ def get_findings(self, file, test):
# make dupe hash key
dupe_key = hashlib.sha256(
- f"{finding.description}|{finding.title}|{finding.severity}".encode()
+ f"{finding.description}|{finding.title}|{finding.severity}".encode(),
).hexdigest()
# check if dupes are present.
if dupe_key in dupes:
diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py
index 2cbdca7920..6567c69b82 100644
--- a/dojo/tools/mobsf/parser.py
+++ b/dojo/tools/mobsf/parser.py
@@ -95,7 +95,7 @@ def get_findings(self, filename, test):
"title": details.get("name", ""),
"severity": self.getSeverityForPermission(details.get("status")),
"description": "**Permission Type:** " + details.get("name", "") + " (" + details.get("status", "") + ")\n\n**Description:** " + details.get("description", "") + "\n\n**Reason:** " + details.get("reason", ""),
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -105,7 +105,7 @@ def get_findings(self, filename, test):
"title": permission,
"severity": self.getSeverityForPermission(details.get("status", "")),
"description": "**Permission Type:** " + permission + "\n\n**Description:** " + details.get("description", ""),
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -121,7 +121,7 @@ def get_findings(self, filename, test):
"title": "Insecure Connections",
"severity": "Low",
"description": insecure_urls,
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -136,7 +136,7 @@ def get_findings(self, filename, test):
"title": details[2],
"severity": details[0].title(),
"description": details[1] + "\n\n**Certificate Info:** " + certificate_info,
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
elif len(details) == 2:
@@ -145,7 +145,7 @@ def get_findings(self, filename, test):
"title": details[1],
"severity": details[0].title(),
"description": details[1] + "\n\n**Certificate Info:** " + certificate_info,
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -161,7 +161,7 @@ def get_findings(self, filename, test):
"title": details["title"],
"severity": details["severity"].title(),
"description": details["description"] + "\n\n " + details["name"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -171,7 +171,7 @@ def get_findings(self, filename, test):
"title": details["title"],
"severity": details["stat"].title(),
"description": details["desc"] + "\n\n " + details["name"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -186,7 +186,7 @@ def get_findings(self, filename, test):
"title": details,
"severity": metadata["metadata"]["severity"].title(),
"description": metadata["metadata"]["description"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -198,7 +198,7 @@ def get_findings(self, filename, test):
"title": details,
"severity": metadata["metadata"]["severity"].title(),
"description": metadata["metadata"]["description"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -213,7 +213,7 @@ def get_findings(self, filename, test):
"title": details[binary_analysis_type]["description"].split(".")[0],
"severity": details[binary_analysis_type]["severity"].title(),
"description": details[binary_analysis_type]["description"],
- "file_path": details["name"]
+ "file_path": details["name"],
}
mobsf_findings.append(mobsf_item)
elif data["binary_analysis"].get("findings"):
@@ -232,7 +232,7 @@ def get_findings(self, filename, test):
"title": details["detailed_desc"],
"severity": details["severity"].title(),
"description": details["detailed_desc"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -250,7 +250,7 @@ def get_findings(self, filename, test):
"title": details["detailed_desc"],
"severity": details["severity"].title(),
"description": details["detailed_desc"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -282,7 +282,7 @@ def get_findings(self, filename, test):
"title": details["metadata"]["description"],
"severity": details["metadata"]["severity"].title(),
"description": "**API:** " + api + "\n\n**Description:** " + details["metadata"]["description"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -294,7 +294,7 @@ def get_findings(self, filename, test):
"title": details["title"],
"severity": details["stat"],
"description": details["desc"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -316,7 +316,7 @@ def get_findings(self, filename, test):
"title": title,
"severity": finding["level"],
"description": description,
- "file_path": file_path
+ "file_path": file_path,
}
mobsf_findings.append(mobsf_item)
@@ -327,7 +327,7 @@ def get_findings(self, filename, test):
"title": finding["name"],
"severity": finding["severity"],
"description": finding["description"] + "\n" + "**apk_exploit_dict:** " + str(finding["apk_exploit_dict"]) + "\n" + "**line_number:** " + str(finding["line_number"]),
- "file_path": finding["file_object"]
+ "file_path": finding["file_object"],
}
mobsf_findings.append(mobsf_item)
for mobsf_finding in mobsf_findings:
diff --git a/dojo/tools/mobsfscan/parser.py b/dojo/tools/mobsfscan/parser.py
index 67c30ffb1c..ae7eecc122 100644
--- a/dojo/tools/mobsfscan/parser.py
+++ b/dojo/tools/mobsfscan/parser.py
@@ -35,8 +35,8 @@ def get_findings(self, filename, test):
metadata = item.get("metadata")
cwe = int(
re.match(r"(cwe|CWE)-([0-9]+)", metadata.get("cwe")).group(
- 2
- )
+ 2,
+ ),
)
masvs = metadata.get("masvs")
owasp_mobile = metadata.get("owasp-mobile")
@@ -45,7 +45,7 @@ def get_findings(self, filename, test):
f"**Description:** `{metadata.get('description')}`",
f"**OWASP MASVS:** `{masvs}`",
f"**OWASP Mobile:** `{owasp_mobile}`",
- ]
+ ],
)
references = metadata.get("reference")
if metadata.get("severity") in self.SEVERITY:
@@ -70,7 +70,7 @@ def get_findings(self, filename, test):
finding.line = line
dupe_key = hashlib.sha256(
- (key + str(cwe) + masvs + owasp_mobile).encode("utf-8")
+ (key + str(cwe) + masvs + owasp_mobile).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/mozilla_observatory/parser.py b/dojo/tools/mozilla_observatory/parser.py
index 1d88b3cf11..783e0ada6f 100644
--- a/dojo/tools/mozilla_observatory/parser.py
+++ b/dojo/tools/mozilla_observatory/parser.py
@@ -41,7 +41,7 @@ def get_findings(self, file, test):
+ "`",
"**Result** : `" + node["result"] + "`"
"**expectation** : " + str(node.get("expectation")) + "`",
- ]
+ ],
)
finding = Finding(
diff --git a/dojo/tools/netsparker/parser.py b/dojo/tools/netsparker/parser.py
index e0cbce557c..35a0892054 100644
--- a/dojo/tools/netsparker/parser.py
+++ b/dojo/tools/netsparker/parser.py
@@ -26,11 +26,11 @@ def get_findings(self, filename, test):
dupes = {}
if "UTC" in data["Generated"]:
scan_date = datetime.datetime.strptime(
- data["Generated"].split(" ")[0], "%d/%m/%Y"
+ data["Generated"].split(" ")[0], "%d/%m/%Y",
).date()
else:
scan_date = datetime.datetime.strptime(
- data["Generated"], "%d/%m/%Y %H:%M %p"
+ data["Generated"], "%d/%m/%Y %H:%M %p",
).date()
for item in data["Vulnerabilities"]:
@@ -79,13 +79,13 @@ def get_findings(self, filename, test):
if item["Classification"] is not None:
if item["Classification"].get("Cvss") is not None and item["Classification"].get("Cvss").get("Vector") is not None:
cvss_objects = cvss_parser.parse_cvss_from_text(
- item["Classification"]["Cvss"]["Vector"]
+ item["Classification"]["Cvss"]["Vector"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
elif item["Classification"].get("Cvss31") is not None and item["Classification"].get("Cvss31").get("Vector") is not None:
cvss_objects = cvss_parser.parse_cvss_from_text(
- item["Classification"]["Cvss31"]["Vector"]
+ item["Classification"]["Cvss31"]["Vector"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
diff --git a/dojo/tools/neuvector/parser.py b/dojo/tools/neuvector/parser.py
index f2f20ebd53..7cf278ce7e 100644
--- a/dojo/tools/neuvector/parser.py
+++ b/dojo/tools/neuvector/parser.py
@@ -43,7 +43,7 @@ def get_items(self, tree, test):
unique_key = node.get("name") + str(
package_name
+ str(node.get("package_version"))
- + str(node.get("severity"))
+ + str(node.get("severity")),
)
items[unique_key] = item
return list(items.values())
diff --git a/dojo/tools/nexpose/parser.py b/dojo/tools/nexpose/parser.py
index 67908e03d6..c695b819f2 100644
--- a/dojo/tools/nexpose/parser.py
+++ b/dojo/tools/nexpose/parser.py
@@ -191,13 +191,13 @@ def get_vuln_definitions(self, tree):
url_index += 1
else:
vuln["refs"][ref.get("source")] = str(
- ref.text
+ ref.text,
).strip()
elif item.tag == "solution":
for htmlType in list(item):
vuln["resolution"] += self.parse_html_type(
- htmlType
+ htmlType,
)
# there is currently no method to register tags in vulns
@@ -224,7 +224,7 @@ def get_items(self, tree, vulns, test):
"name": "Host Up",
"desc": "Host is up because it replied on ICMP request or some TCP/UDP port is up",
"severity": "Info",
- }
+ },
)
for names in node.findall("names"):
@@ -242,11 +242,11 @@ def get_items(self, tree, vulns, test):
for service in services.findall("service"):
svc["name"] = service.get("name", "").lower()
svc["vulns"] = self.parse_tests_type(
- service, vulns
+ service, vulns,
)
for configs in service.findall(
- "configurations"
+ "configurations",
):
for config in configs.findall("config"):
if "banner" in config.get("name"):
@@ -269,11 +269,11 @@ def get_items(self, tree, vulns, test):
"[^A-Za-z0-9]+",
"-",
service.get("name").lower(),
- ).rstrip("-")
+ ).rstrip("-"),
]
if service.get("name") != ""
else [],
- }
+ },
)
host["services"].append(svc)
@@ -308,7 +308,7 @@ def get_items(self, tree, vulns, test):
else service["protocol"],
fragment=service["protocol"].lower()
if service["name"] == "dns"
- else None
+ else None,
# A little dirty hack but in case of DNS it is
# important to know if vulnerability is on TCP or UDP
)
diff --git a/dojo/tools/nikto/json_parser.py b/dojo/tools/nikto/json_parser.py
index bde6ef3e62..a51deafce6 100644
--- a/dojo/tools/nikto/json_parser.py
+++ b/dojo/tools/nikto/json_parser.py
@@ -27,12 +27,12 @@ def process_json(self, file, test):
description=description,
vuln_id_from_tool=vulnerability.get("id"),
nb_occurences=1,
- references=vulnerability.get("references")
+ references=vulnerability.get("references"),
)
# manage if we have an ID from OSVDB
if "OSVDB" in vulnerability and "0" != vulnerability.get("OSVDB"):
finding.unique_id_from_tool = "OSVDB-" + vulnerability.get(
- "OSVDB"
+ "OSVDB",
)
finding.description += "\n*This finding is marked as medium as there is a link to OSVDB*"
finding.severity = "Medium"
diff --git a/dojo/tools/nikto/xml_parser.py b/dojo/tools/nikto/xml_parser.py
index ab5dffe906..bb831b7c3c 100644
--- a/dojo/tools/nikto/xml_parser.py
+++ b/dojo/tools/nikto/xml_parser.py
@@ -33,7 +33,7 @@ def process_scandetail(self, scan, test, dupes):
description = item.findtext("description")
# Cut the title down to the first sentence
sentences = re.split(
- r"(? 0:
titleText = sentences[0][:900]
@@ -45,7 +45,7 @@ def process_scandetail(self, scan, test, dupes):
f"**Host:** `{item.findtext('iplink')}`",
f"**Description:** `{item.findtext('description')}`",
f"**HTTP Method:** `{item.attrib.get('method')}`",
- ]
+ ],
)
# Manage severity the same way with JSON
severity = "Info" # Nikto doesn't assign severity, default to Info
diff --git a/dojo/tools/nmap/parser.py b/dojo/tools/nmap/parser.py
index f0eb012895..35ea7c3464 100644
--- a/dojo/tools/nmap/parser.py
+++ b/dojo/tools/nmap/parser.py
@@ -27,7 +27,7 @@ def get_findings(self, file, test):
report_date = None
try:
report_date = datetime.datetime.fromtimestamp(
- int(root.attrib["start"])
+ int(root.attrib["start"]),
)
except ValueError:
pass
@@ -57,7 +57,7 @@ def get_findings(self, file, test):
)
if "accuracy" in os_match.attrib:
host_info += "**Accuracy:** {}%\n".format(
- os_match.attrib["accuracy"]
+ os_match.attrib["accuracy"],
)
host_info += "\n\n"
@@ -65,7 +65,7 @@ def get_findings(self, file, test):
for port_element in host.findall("ports/port"):
protocol = port_element.attrib["protocol"]
endpoint = Endpoint(
- host=fqdn if fqdn else ip, protocol=protocol
+ host=fqdn if fqdn else ip, protocol=protocol,
)
if (
"portid" in port_element.attrib
@@ -104,10 +104,10 @@ def get_findings(self, file, test):
# manage some script like
# https://github.com/vulnersCom/nmap-vulners
for script_element in port_element.findall(
- 'script[@id="vulners"]'
+ 'script[@id="vulners"]',
):
self.manage_vulner_script(
- test, dupes, script_element, endpoint, report_date
+ test, dupes, script_element, endpoint, report_date,
)
severity = "Info"
@@ -153,7 +153,7 @@ def convert_cvss_score(self, raw_value):
return "Critical"
def manage_vulner_script(
- self, test, dupes, script_element, endpoint, report_date=None
+ self, test, dupes, script_element, endpoint, report_date=None,
):
for component_element in script_element.findall("table"):
component_cpe = CPE(component_element.attrib["key"])
diff --git a/dojo/tools/noseyparker/parser.py b/dojo/tools/noseyparker/parser.py
index 965b9e28f5..787d696f46 100644
--- a/dojo/tools/noseyparker/parser.py
+++ b/dojo/tools/noseyparker/parser.py
@@ -91,7 +91,7 @@ def get_findings(self, file, test):
line=line_num,
static_finding=True,
nb_occurences=1,
- dynamic_finding=False
+ dynamic_finding=False,
)
dupes[key] = finding
diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py
index 4e97c4f6b7..4ce0d22b74 100644
--- a/dojo/tools/nuclei/parser.py
+++ b/dojo/tools/nuclei/parser.py
@@ -78,7 +78,7 @@ def get_findings(self, filename, test):
finding.description = info.get("description")
if item.get("extracted-results"):
finding.description += "\n**Results:**\n" + "\n".join(
- item.get("extracted-results")
+ item.get("extracted-results"),
)
if info.get("tags"):
finding.unsaved_tags = info.get("tags")
@@ -108,7 +108,7 @@ def get_findings(self, filename, test):
and classification["cvss-metrics"]
):
cvss_objects = cvss_parser.parse_cvss_from_text(
- classification["cvss-metrics"]
+ classification["cvss-metrics"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
@@ -151,8 +151,8 @@ def get_findings(self, filename, test):
dupe_key = hashlib.sha256(
(template_id + item_type + matcher + endpoint.host).encode(
- "utf-8"
- )
+ "utf-8",
+ ),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/openscap/parser.py b/dojo/tools/openscap/parser.py
index a96a1cdcca..186243526b 100644
--- a/dojo/tools/openscap/parser.py
+++ b/dojo/tools/openscap/parser.py
@@ -36,7 +36,7 @@ def get_findings(self, file, test):
rules = {}
for rule in root.findall(f".//{namespace}Rule"):
rules[rule.attrib["id"]] = {
- "title": rule.findtext(f"./{namespace}title")
+ "title": rule.findtext(f"./{namespace}title"),
}
# go to test result
test_result = tree.find(f"./{namespace}TestResult")
@@ -51,7 +51,7 @@ def get_findings(self, file, test):
# run both rule, and rule-result in parallel so that we can get title
# for failed test from rule.
for rule_result in test_result.findall(
- f"./{namespace}rule-result"
+ f"./{namespace}rule-result",
):
result = rule_result.findtext(f"./{namespace}result")
# find only failed report.
@@ -63,11 +63,11 @@ def get_findings(self, file, test):
[
"**IdRef:** `" + rule_result.attrib["idref"] + "`",
"**Title:** `" + title + "`",
- ]
+ ],
)
vulnerability_ids = []
for vulnerability_id in rule_result.findall(
- f"./{namespace}ident[@system='http://cve.mitre.org']"
+ f"./{namespace}ident[@system='http://cve.mitre.org']",
):
vulnerability_ids.append(vulnerability_id.text)
# get severity.
@@ -82,7 +82,7 @@ def get_findings(self, file, test):
references = ""
# get references.
for check_content in rule_result.findall(
- f"./{namespace}check/{namespace}check-content-ref"
+ f"./{namespace}check/{namespace}check-content-ref",
):
references += (
"**name:** : " + check_content.attrib["name"] + "\n"
@@ -115,7 +115,7 @@ def get_findings(self, file, test):
finding.unsaved_endpoints.append(endpoint)
dupe_key = hashlib.sha256(
- references.encode("utf-8")
+ references.encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
diff --git a/dojo/tools/openvas/csv_parser.py b/dojo/tools/openvas/csv_parser.py
index ff9e8bf888..1a5cc9a056 100644
--- a/dojo/tools/openvas/csv_parser.py
+++ b/dojo/tools/openvas/csv_parser.py
@@ -289,7 +289,7 @@ def get_findings(self, filename, test):
column_number = 0
for column in row:
chain.process_column(
- column_names[column_number], column, finding
+ column_names[column_number], column, finding,
)
column_number += 1
if finding is not None and row_number > 0:
@@ -306,7 +306,7 @@ def get_findings(self, filename, test):
+ finding.title
+ "|"
+ finding.description
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
dupes[key] = finding
diff --git a/dojo/tools/openvas/xml_parser.py b/dojo/tools/openvas/xml_parser.py
index 3746d5c27b..bd9d365e0d 100644
--- a/dojo/tools/openvas/xml_parser.py
+++ b/dojo/tools/openvas/xml_parser.py
@@ -41,7 +41,7 @@ def get_findings(self, filename, test):
description="\n".join(description),
severity=severity,
dynamic_finding=True,
- static_finding=False
+ static_finding=False,
)
findings.append(finding)
return findings
diff --git a/dojo/tools/ort/parser.py b/dojo/tools/ort/parser.py
index b2c33b0c45..7be74278cb 100644
--- a/dojo/tools/ort/parser.py
+++ b/dojo/tools/ort/parser.py
@@ -47,16 +47,16 @@ def get_items(self, evaluatedModel, test):
rule_violations = evaluatedModel["rule_violations"]
licenses = evaluatedModel["licenses"]
rule_violations_unresolved = get_unresolved_rule_violations(
- rule_violations
+ rule_violations,
)
rule_violations_models = get_rule_violation_models(
- rule_violations_unresolved, packages, licenses, dependency_trees
+ rule_violations_unresolved, packages, licenses, dependency_trees,
)
for model in rule_violations_models:
item = get_item(model, test)
unique_key = hashlib.md5(
- (item.title + item.references).encode()
+ (item.title + item.references).encode(),
).hexdigest()
items[unique_key] = item
@@ -109,23 +109,23 @@ def get_name_id_for_package(packages, package__id):
def get_rule_violation_models(
- rule_violations_unresolved, packages, licenses, dependency_trees
+ rule_violations_unresolved, packages, licenses, dependency_trees,
):
models = []
for violation in rule_violations_unresolved:
models.append(
get_rule_violation_model(
- violation, packages, licenses, dependency_trees
- )
+ violation, packages, licenses, dependency_trees,
+ ),
)
return models
def get_rule_violation_model(
- rule_violation_unresolved, packages, licenses, dependency_trees
+ rule_violation_unresolved, packages, licenses, dependency_trees,
):
project_ids = get_project_ids_for_package(
- dependency_trees, rule_violation_unresolved["pkg"]
+ dependency_trees, rule_violation_unresolved["pkg"],
)
project_names = []
for id in project_ids:
@@ -140,7 +140,7 @@ def get_rule_violation_model(
license_id = find_license_id(licenses, license_tmp)
return RuleViolationModel(
- package, license_id, project_names, rule_violation_unresolved
+ package, license_id, project_names, rule_violation_unresolved,
)
@@ -193,7 +193,7 @@ def get_item(model, test):
# rule_violation: dict
RuleViolationModel = namedtuple(
- "RuleViolationModel", ["pkg", "license_id", "projects", "rule_violation"]
+ "RuleViolationModel", ["pkg", "license_id", "projects", "rule_violation"],
)
diff --git a/dojo/tools/ossindex_devaudit/parser.py b/dojo/tools/ossindex_devaudit/parser.py
index e9abb97770..ed89887e29 100644
--- a/dojo/tools/ossindex_devaudit/parser.py
+++ b/dojo/tools/ossindex_devaudit/parser.py
@@ -60,7 +60,7 @@ def get_items(self, tree, test):
def get_item(
- dependency_name, dependency_version, dependency_source, vulnerability, test
+ dependency_name, dependency_version, dependency_source, vulnerability, test,
):
cwe_data = vulnerability.get("cwe", "CWE-1035")
if cwe_data is None or cwe_data.startswith("CWE") is False:
diff --git a/dojo/tools/outpost24/parser.py b/dojo/tools/outpost24/parser.py
index 6d42ee855e..011e38b2d1 100644
--- a/dojo/tools/outpost24/parser.py
+++ b/dojo/tools/outpost24/parser.py
@@ -26,13 +26,13 @@ def get_findings(self, file, test):
# date = detail.findtext('date') # can be used for Finding.date?
vulnerability_id = detail.findtext("./cve/id")
url = detail.findtext(
- "./referencelist/reference/[type='solution']/../url"
+ "./referencelist/reference/[type='solution']/../url",
)
description = detail.findtext("description")
mitigation = detail.findtext("solution")
impact = detail.findtext("information")
cvss_score = detail.findtext("cvss_v3_score") or detail.findtext(
- "cvss_score"
+ "cvss_score",
)
if not cvss_score:
cvss_score = 0
@@ -80,7 +80,7 @@ def get_findings(self, file, test):
logger.debug("General port given. Assigning 0 as default.")
port = 0
finding.unsaved_endpoints.append(
- Endpoint(protocol=protocol, host=host, port=port)
+ Endpoint(protocol=protocol, host=host, port=port),
)
items.append(finding)
return items
diff --git a/dojo/tools/php_security_audit_v2/parser.py b/dojo/tools/php_security_audit_v2/parser.py
index 91b7e4c6c3..e677e25254 100644
--- a/dojo/tools/php_security_audit_v2/parser.py
+++ b/dojo/tools/php_security_audit_v2/parser.py
@@ -36,7 +36,7 @@ def get_findings(self, filename, test):
findingdetail += "Details: " + issue["message"] + "\n"
sev = PhpSecurityAuditV2Parser.get_severity_word(
- issue["severity"]
+ issue["severity"],
)
dupe_key = (
diff --git a/dojo/tools/php_symfony_security_check/parser.py b/dojo/tools/php_symfony_security_check/parser.py
index a124a4d419..f22bf45f4e 100644
--- a/dojo/tools/php_symfony_security_check/parser.py
+++ b/dojo/tools/php_symfony_security_check/parser.py
@@ -43,10 +43,10 @@ def get_items(self, tree, test):
for advisory in advisories:
item = get_item(
- dependency_name, dependency_version, advisory, test
+ dependency_name, dependency_version, advisory, test,
)
unique_key = str(dependency_name) + str(
- dependency_data["version"] + str(advisory["cve"])
+ dependency_data["version"] + str(advisory["cve"]),
)
items[unique_key] = item
diff --git a/dojo/tools/pmd/parser.py b/dojo/tools/pmd/parser.py
index 484d289b03..1047a92a95 100644
--- a/dojo/tools/pmd/parser.py
+++ b/dojo/tools/pmd/parser.py
@@ -22,7 +22,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = list(
- csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"')
+ csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"'),
)
for row in reader:
@@ -43,7 +43,7 @@ def get_findings(self, filename, test):
finding.severity = priority
description = "Description: {}\n".format(
- row["Description"].strip()
+ row["Description"].strip(),
)
description += "Rule set: {}\n".format(row["Rule set"].strip())
description += "Problem: {}\n".format(row["Problem"].strip())
@@ -55,7 +55,7 @@ def get_findings(self, filename, test):
finding.mitigation = "No mitigation provided"
key = hashlib.sha256(
- f"{finding.title}|{finding.description}|{finding.file_path}|{finding.line}".encode()
+ f"{finding.title}|{finding.description}|{finding.file_path}|{finding.line}".encode(),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/popeye/parser.py b/dojo/tools/popeye/parser.py
index 65ac0d8580..e3806c6f8d 100644
--- a/dojo/tools/popeye/parser.py
+++ b/dojo/tools/popeye/parser.py
@@ -37,7 +37,7 @@ def get_findings(self, file, test):
+ issue["message"]
)
severity = self.get_defect_dojo_severity(
- issue["level"]
+ issue["level"],
)
description = (
"**Sanitizer** : "
@@ -56,7 +56,7 @@ def get_findings(self, file, test):
+ issue["message"]
)
vuln_id_from_tool = re.search(
- r"\[(POP-\d+)\].+", issue["message"]
+ r"\[(POP-\d+)\].+", issue["message"],
).group(1)
finding = Finding(
title=title,
@@ -69,7 +69,7 @@ def get_findings(self, file, test):
)
# internal de-duplication
dupe_key = hashlib.sha256(
- str(description + title).encode("utf-8")
+ str(description + title).encode("utf-8"),
).hexdigest()
if dupe_key not in dupes:
dupes[dupe_key] = finding
diff --git a/dojo/tools/progpilot/parser.py b/dojo/tools/progpilot/parser.py
index 9947976e6e..6badb4c044 100644
--- a/dojo/tools/progpilot/parser.py
+++ b/dojo/tools/progpilot/parser.py
@@ -64,7 +64,7 @@ def get_findings(self, filename, test):
severity="Medium",
dynamic_finding=False,
static_finding=True,
- unique_id_from_tool=vuln_id
+ unique_id_from_tool=vuln_id,
)
if sink_line is not None:
find.line = sink_line
diff --git a/dojo/tools/pwn_sast/parser.py b/dojo/tools/pwn_sast/parser.py
index 0a4ba9652e..d66afa3512 100644
--- a/dojo/tools/pwn_sast/parser.py
+++ b/dojo/tools/pwn_sast/parser.py
@@ -59,7 +59,7 @@ def get_findings(self, filename, test):
"Install pwn_sast Driver via: https://github.com/0dayinc/pwn#installation",
"Execute the pwn_sast Driver via:",
f"```pwn_sast --dir-path . --uri-source-root {git_repo_root_uri} -s```",
- ]
+ ],
)
for line in line_no_and_contents:
@@ -76,7 +76,7 @@ def get_findings(self, filename, test):
f"Committed By: {author}",
"Line Contents:",
f"```{contents}```",
- ]
+ ],
)
impact = "\n".join(
@@ -84,17 +84,17 @@ def get_findings(self, filename, test):
f"Security Control Impacted: {section}",
f"NIST 800-53 Security Control Details: {nist_800_53_uri}",
f"CWE Details: {cwe_uri}",
- ]
+ ],
)
mitigation = "\n".join(
[
f"NIST 800-53 Security Control Details / Mitigation Strategy: {nist_800_53_uri}",
- ]
+ ],
)
unique_finding_key = hashlib.sha256(
- (offending_uri + contents).encode("utf-8")
+ (offending_uri + contents).encode("utf-8"),
).hexdigest()
if unique_finding_key in findings:
diff --git a/dojo/tools/qualys/csv_parser.py b/dojo/tools/qualys/csv_parser.py
index 20f5314305..98a8ec00af 100644
--- a/dojo/tools/qualys/csv_parser.py
+++ b/dojo/tools/qualys/csv_parser.py
@@ -23,7 +23,7 @@ def parse_csv(csv_file) -> [Finding]:
if isinstance(content, bytes):
content = content.decode("utf-8")
csv_reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
report_findings = get_report_findings(csv_reader)
@@ -78,17 +78,17 @@ def _extract_cvss_vectors(cvss_base, cvss_temporal):
if cvss_temporal:
try:
cvss_temporal_vector = re.search(
- vector_pattern, cvss_temporal
+ vector_pattern, cvss_temporal,
).group(1)
cvss_vector += "/"
cvss_vector += cvss_temporal_vector
except IndexError:
_logger.error(
- f"CVSS3 Temporal Vector not found in {cvss_base}"
+ f"CVSS3 Temporal Vector not found in {cvss_base}",
)
except AttributeError:
_logger.error(
- f"CVSS3 Temporal Vector not found in {cvss_base}"
+ f"CVSS3 Temporal Vector not found in {cvss_base}",
)
return cvss_vector
@@ -159,11 +159,11 @@ def build_findings_from_dict(report_findings: [dict]) -> [Finding]:
if "CVSS3 Base" in report_finding:
cvssv3 = _extract_cvss_vectors(
- report_finding["CVSS3 Base"], report_finding["CVSS3 Temporal"]
+ report_finding["CVSS3 Base"], report_finding["CVSS3 Temporal"],
)
elif "CVSS3.1 Base" in report_finding:
cvssv3 = _extract_cvss_vectors(
- report_finding["CVSS3.1 Base"], report_finding["CVSS3.1 Temporal"]
+ report_finding["CVSS3.1 Base"], report_finding["CVSS3.1 Temporal"],
)
# Get the date based on the first_seen setting
try:
@@ -189,13 +189,13 @@ def build_findings_from_dict(report_findings: [dict]) -> [Finding]:
impact=report_finding["Impact"],
date=date,
vuln_id_from_tool=report_finding["QID"],
- cvssv3=cvssv3
+ cvssv3=cvssv3,
)
# Qualys reports regression findings as active, but with a Date Last
# Fixed.
if report_finding["Date Last Fixed"]:
finding.mitigated = datetime.strptime(
- report_finding["Date Last Fixed"], "%m/%d/%Y %H:%M:%S"
+ report_finding["Date Last Fixed"], "%m/%d/%Y %H:%M:%S",
)
finding.is_mitigated = True
else:
@@ -229,7 +229,7 @@ def build_findings_from_dict(report_findings: [dict]) -> [Finding]:
severity=report_finding["SEVERITY"],
impact=report_finding["IMPACT"],
date=date,
- vuln_id_from_tool=report_finding["QID"]
+ vuln_id_from_tool=report_finding["QID"],
)
# Make sure we have something to append to
if isinstance(finding.unsaved_vulnerability_ids, list):
diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py
index 2af9a528f1..ade88d2d32 100644
--- a/dojo/tools/qualys/parser.py
+++ b/dojo/tools/qualys/parser.py
@@ -111,7 +111,7 @@ def split_cvss(value, _temp):
# remove ")" at the end
if _temp.get("CVSS_vector") is None:
_temp["CVSS_vector"] = CVSS3(
- "CVSS:3.0/" + split[1][:-1]
+ "CVSS:3.0/" + split[1][:-1],
).clean_vector()
else:
if _temp.get("CVSS_value") is None:
@@ -174,7 +174,7 @@ def parse_finding(host, tree):
last_fixed = vuln_details.findtext("LAST_FIXED")
if last_fixed is not None:
_temp["mitigation_date"] = datetime.datetime.strptime(
- last_fixed, "%Y-%m-%dT%H:%M:%SZ"
+ last_fixed, "%Y-%m-%dT%H:%M:%SZ",
)
else:
_temp["mitigation_date"] = None
@@ -217,7 +217,7 @@ def parse_finding(host, tree):
htmltext("First Found: " + _first_found),
htmltext("Last Found: " + _last_found),
htmltext("Times Found: " + _times_found),
- ]
+ ],
)
# Impact description
_temp["IMPACT"] = htmltext(vuln_item.findtext("IMPACT"))
diff --git a/dojo/tools/qualys_infrascan_webgui/parser.py b/dojo/tools/qualys_infrascan_webgui/parser.py
index 1ac6909eea..f252e7d541 100644
--- a/dojo/tools/qualys_infrascan_webgui/parser.py
+++ b/dojo/tools/qualys_infrascan_webgui/parser.py
@@ -59,7 +59,7 @@ def issue_r(raw_row, vuln, scan_date):
_description = str(vuln_details.findtext("DIAGNOSIS"))
# Solution Strips Heading Workaround(s)
_temp["solution"] = htmltext(
- str(vuln_details.findtext("SOLUTION"))
+ str(vuln_details.findtext("SOLUTION")),
)
# Vuln_description
@@ -70,11 +70,11 @@ def issue_r(raw_row, vuln, scan_date):
htmltext("**QID:** " + str(_gid)),
htmltext("**Port:** " + str(_port)),
htmltext("**Result Evidence:** " + _result),
- ]
+ ],
)
# Impact description
_temp["IMPACT"] = htmltext(
- str(vuln_details.findtext("CONSEQUENCE"))
+ str(vuln_details.findtext("CONSEQUENCE")),
)
# CVE and LINKS
diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py
index 4c8c595cf1..47be5bb948 100644
--- a/dojo/tools/qualys_webapp/parser.py
+++ b/dojo/tools/qualys_webapp/parser.py
@@ -90,13 +90,13 @@ def attach_unique_extras(
protocol=protocol,
query=truncate_str(query, 1000),
fragment=truncate_str(fragment, 500),
- )
+ ),
)
for i in range(len(requests)):
if requests[i] != "" or responses[i] != "":
finding.unsaved_req_resp.append(
- {"req": requests[i], "resp": responses[i]}
+ {"req": requests[i], "resp": responses[i]},
)
if active_text is not None:
@@ -133,7 +133,7 @@ def attach_extras(endpoints, requests, responses, finding, date, qid, test):
for i in range(len(requests)):
if requests[i] != "" or responses[i] != "":
finding.unsaved_req_resp.append(
- {"req": requests[i], "resp": responses[i]}
+ {"req": requests[i], "resp": responses[i]},
)
return finding
@@ -186,7 +186,7 @@ def get_request_response(payloads):
def get_unique_vulnerabilities(
- vulnerabilities, test, is_info=False, is_app_report=False
+ vulnerabilities, test, is_info=False, is_app_report=False,
):
findings = {}
# Iterate through all vulnerabilites to pull necessary info
@@ -216,11 +216,11 @@ def get_unique_vulnerabilities(
if raw_finding_date is not None:
if raw_finding_date.endswith("GMT"):
finding_date = datetime.strptime(
- raw_finding_date, "%d %b %Y %I:%M%p GMT"
+ raw_finding_date, "%d %b %Y %I:%M%p GMT",
)
else:
finding_date = datetime.strptime(
- raw_finding_date, "%d %b %Y %I:%M%p GMT%z"
+ raw_finding_date, "%d %b %Y %I:%M%p GMT%z",
)
else:
finding_date = None
@@ -253,7 +253,7 @@ def get_unique_vulnerabilities(
# Traverse and retreive any information in the VULNERABILITY_LIST
# section of the report. This includes all endpoints and request/response pairs
def get_vulnerabilities(
- vulnerabilities, test, is_info=False, is_app_report=False
+ vulnerabilities, test, is_info=False, is_app_report=False,
):
findings = {}
# Iterate through all vulnerabilites to pull necessary info
@@ -283,18 +283,18 @@ def get_vulnerabilities(
if raw_finding_date is not None:
if raw_finding_date.endswith("GMT"):
finding_date = datetime.strptime(
- raw_finding_date, "%d %b %Y %I:%M%p GMT"
+ raw_finding_date, "%d %b %Y %I:%M%p GMT",
)
else:
finding_date = datetime.strptime(
- raw_finding_date, "%d %b %Y %I:%M%p GMT%z"
+ raw_finding_date, "%d %b %Y %I:%M%p GMT%z",
)
else:
finding_date = None
finding = findings.get(qid, None)
findings[qid] = attach_extras(
- urls, req_resps[0], req_resps[1], finding, finding_date, qid, test
+ urls, req_resps[0], req_resps[1], finding, finding_date, qid, test,
)
return findings
@@ -351,22 +351,22 @@ def get_unique_items(
findings = {}
for unique_id, finding in get_unique_vulnerabilities(
- vulnerabilities, test, False, is_app_report
+ vulnerabilities, test, False, is_app_report,
).items():
qid = int(finding.vuln_id_from_tool)
if qid in g_qid_list:
index = g_qid_list.index(qid)
findings[unique_id] = get_glossary_item(
- glossary[index], finding, enable_weakness=enable_weakness
+ glossary[index], finding, enable_weakness=enable_weakness,
)
for unique_id, finding in get_unique_vulnerabilities(
- info_gathered, test, True, is_app_report
+ info_gathered, test, True, is_app_report,
).items():
qid = int(finding.vuln_id_from_tool)
if qid in g_qid_list:
index = g_qid_list.index(qid)
finding = get_glossary_item(
- glossary[index], finding, True, enable_weakness=enable_weakness
+ glossary[index], finding, True, enable_weakness=enable_weakness,
)
if qid in ig_qid_list:
index = ig_qid_list.index(qid)
@@ -390,20 +390,20 @@ def get_items(
findings = {}
for qid, finding in get_vulnerabilities(
- vulnerabilities, test, False, is_app_report
+ vulnerabilities, test, False, is_app_report,
).items():
if qid in g_qid_list:
index = g_qid_list.index(qid)
findings[qid] = get_glossary_item(
- glossary[index], finding, enable_weakness=enable_weakness
+ glossary[index], finding, enable_weakness=enable_weakness,
)
for qid, finding in get_vulnerabilities(
- info_gathered, test, True, is_app_report
+ info_gathered, test, True, is_app_report,
).items():
if qid in g_qid_list:
index = g_qid_list.index(qid)
finding = get_glossary_item(
- glossary[index], finding, True, enable_weakness=enable_weakness
+ glossary[index], finding, True, enable_weakness=enable_weakness,
)
if qid in ig_qid_list:
index = ig_qid_list.index(qid)
@@ -423,17 +423,17 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False):
if is_app_report:
vulnerabilities = tree.findall(
- "./RESULTS/WEB_APPLICATION/VULNERABILITY_LIST/VULNERABILITY"
+ "./RESULTS/WEB_APPLICATION/VULNERABILITY_LIST/VULNERABILITY",
)
info_gathered = tree.findall(
- "./RESULTS/WEB_APPLICATION/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED"
+ "./RESULTS/WEB_APPLICATION/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED",
)
else:
vulnerabilities = tree.findall(
- "./RESULTS/VULNERABILITY_LIST/VULNERABILITY"
+ "./RESULTS/VULNERABILITY_LIST/VULNERABILITY",
)
info_gathered = tree.findall(
- "./RESULTS/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED"
+ "./RESULTS/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED",
)
glossary = tree.findall("./GLOSSARY/QID_LIST/QID")
@@ -446,7 +446,7 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False):
is_app_report,
test,
enable_weakness,
- ).values()
+ ).values(),
)
else:
items = list(
@@ -457,7 +457,7 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False):
is_app_report,
test,
enable_weakness,
- ).values()
+ ).values(),
)
return items
@@ -474,8 +474,8 @@ def get_description_for_scan_types(self, scan_type):
return "Qualys WebScan output files can be imported in XML format."
def get_findings(
- self, file, test, enable_weakness=QUALYS_WAS_WEAKNESS_IS_VULN
+ self, file, test, enable_weakness=QUALYS_WAS_WEAKNESS_IS_VULN,
):
return qualys_webapp_parser(
- file, test, QUALYS_WAS_UNIQUE_ID, enable_weakness
+ file, test, QUALYS_WAS_UNIQUE_ID, enable_weakness,
)
diff --git a/dojo/tools/retirejs/parser.py b/dojo/tools/retirejs/parser.py
index aaf038f898..4cb162e8a4 100644
--- a/dojo/tools/retirejs/parser.py
+++ b/dojo/tools/retirejs/parser.py
@@ -35,7 +35,7 @@ def get_items(self, tree, test):
+ ")"
)
item.description += "\n\n Raw Result: " + str(
- json.dumps(vulnerability, indent=4, sort_keys=True)
+ json.dumps(vulnerability, indent=4, sort_keys=True),
)
item.references = item.references
@@ -47,7 +47,7 @@ def get_items(self, tree, test):
unique_key = hashlib.md5(
(
item.title + item.references + encrypted_file
- ).encode()
+ ).encode(),
).hexdigest()
items[unique_key] = item
return list(items.values())
diff --git a/dojo/tools/risk_recon/parser.py b/dojo/tools/risk_recon/parser.py
index 7d14b6ebce..30c08e5161 100644
--- a/dojo/tools/risk_recon/parser.py
+++ b/dojo/tools/risk_recon/parser.py
@@ -104,7 +104,7 @@ def _get_findings_internal(self, findings, test):
finding.unsaved_tags = tags
dupe_key = item.get(
- "finding_id", title + "|" + tags + "|" + findingdetail
+ "finding_id", title + "|" + tags + "|" + findingdetail,
)
if dupe_key in dupes:
diff --git a/dojo/tools/rubocop/parser.py b/dojo/tools/rubocop/parser.py
index f0454a7652..4d6459dd1c 100644
--- a/dojo/tools/rubocop/parser.py
+++ b/dojo/tools/rubocop/parser.py
@@ -49,7 +49,7 @@ def get_findings(self, scan_file, test):
f"**Message**: {offense.get('message')}",
f"**Is correctable?**: `{offense.get('correctable')}`",
f"**Location**: `{'-'.join(offense['location'])}`",
- ]
+ ],
)
finding = Finding(
test=test,
diff --git a/dojo/tools/rusty_hog/parser.py b/dojo/tools/rusty_hog/parser.py
index 4ffd6c9ade..fa2a4f6ebc 100644
--- a/dojo/tools/rusty_hog/parser.py
+++ b/dojo/tools/rusty_hog/parser.py
@@ -25,7 +25,7 @@ def parse_json(self, json_output):
def get_items(self, json_output, scanner, test):
items = {}
findings = self.__getitem(
- vulnerabilities=self.parse_json(json_output), scanner=scanner
+ vulnerabilities=self.parse_json(json_output), scanner=scanner,
)
for finding in findings:
unique_key = f"Finding {finding}"
@@ -67,7 +67,7 @@ def get_tests(self, scan_type, handle):
else:
test.description = parsername
test.findings = self.__getitem(
- vulnerabilities=tree, scanner=parsername
+ vulnerabilities=tree, scanner=parsername,
)
tests.append(test)
return tests
@@ -85,15 +85,15 @@ def __getitem(self, vulnerabilities, scanner):
description = f"**This string was found:** {found_secret_string}"
if vulnerability.get("commit") is not None:
description += "\n**Commit message:** {}".format(
- vulnerability.get("commit")
+ vulnerability.get("commit"),
)
if vulnerability.get("commitHash") is not None:
description += "\n**Commit hash:** {}".format(
- vulnerability.get("commitHash")
+ vulnerability.get("commitHash"),
)
if vulnerability.get("parent_commit_hash") is not None:
description += "\n**Parent commit hash:** {}".format(
- vulnerability.get("parent_commit_hash")
+ vulnerability.get("parent_commit_hash"),
)
if (
vulnerability.get("old_file_id") is not None
@@ -121,15 +121,15 @@ def __getitem(self, vulnerabilities, scanner):
description = f"**This string was found:** {found_secret_string}"
if vulnerability.get("path") is not None:
description += "\n**Path of Issue:** {}".format(
- vulnerability.get("path")
+ vulnerability.get("path"),
)
if vulnerability.get("linenum") is not None:
description += "\n**Linenum of Issue:** {}".format(
- vulnerability.get("linenum")
+ vulnerability.get("linenum"),
)
if vulnerability.get("diff") is not None:
description += "\n**Diff:** {}".format(
- vulnerability.get("diff")
+ vulnerability.get("diff"),
)
elif scanner == "Gottingen Hog":
"""Gottingen Hog"""
@@ -137,31 +137,31 @@ def __getitem(self, vulnerabilities, scanner):
description = f"**This string was found:** {found_secret_string}"
if vulnerability.get("issue_id") is not None:
description += "\n**JIRA Issue ID:** {}".format(
- vulnerability.get("issue_id")
+ vulnerability.get("issue_id"),
)
if vulnerability.get("location") is not None:
description += "\n**JIRA location:** {}".format(
- vulnerability.get("location")
+ vulnerability.get("location"),
)
if vulnerability.get("url") is not None:
description += "\n**JIRA url:** [{}]({})".format(
- vulnerability.get("url"), vulnerability.get("url")
+ vulnerability.get("url"), vulnerability.get("url"),
)
elif scanner == "Essex Hog":
found_secret_string = vulnerability.get("stringsFound")
description = f"**This string was found:** {found_secret_string}"
if vulnerability.get("page_id") is not None:
description += "\n**Confluence URL:** [{}]({})".format(
- vulnerability.get("url"), vulnerability.get("url")
+ vulnerability.get("url"), vulnerability.get("url"),
)
description += "\n**Confluence Page ID:** {}".format(
- vulnerability.get("page_id")
+ vulnerability.get("page_id"),
)
"""General - for all Rusty Hogs"""
file_path = vulnerability.get("path")
if vulnerability.get("date") is not None:
description += "\n**Date:** {}".format(
- vulnerability.get("date")
+ vulnerability.get("date"),
)
"""Finding Title"""
if scanner == "Choctaw Hog":
@@ -172,7 +172,7 @@ def __getitem(self, vulnerabilities, scanner):
)
elif scanner == "Duroc Hog":
title = "{} found in path {}".format(
- vulnerability.get("reason"), vulnerability.get("path")
+ vulnerability.get("reason"), vulnerability.get("path"),
)
elif scanner == "Gottingen Hog":
title = "{} found in Jira ID {} ({})".format(
@@ -182,7 +182,7 @@ def __getitem(self, vulnerabilities, scanner):
)
elif scanner == "Essex Hog":
title = "{} found in Confluence Page ID {}".format(
- vulnerability.get("reason"), vulnerability.get("page_id")
+ vulnerability.get("reason"), vulnerability.get("page_id"),
)
# create the finding object
finding = Finding(
diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py
index 2fe52197b1..f311d03463 100644
--- a/dojo/tools/sarif/parser.py
+++ b/dojo/tools/sarif/parser.py
@@ -164,16 +164,16 @@ def get_title(result, rule):
title = None
if "message" in result:
title = get_message_from_multiformatMessageString(
- result["message"], rule
+ result["message"], rule,
)
if title is None and rule is not None:
if "shortDescription" in rule:
title = get_message_from_multiformatMessageString(
- rule["shortDescription"], rule
+ rule["shortDescription"], rule,
)
elif "fullDescription" in rule:
title = get_message_from_multiformatMessageString(
- rule["fullDescription"], rule
+ rule["fullDescription"], rule,
)
elif "name" in rule:
title = rule["name"]
@@ -267,7 +267,7 @@ def get_description(result, rule):
message = ""
if "message" in result:
message = get_message_from_multiformatMessageString(
- result["message"], rule
+ result["message"], rule,
)
description += f"**Result message:** {message}\n"
if get_snippet(result) is not None:
@@ -278,13 +278,13 @@ def get_description(result, rule):
shortDescription = ""
if "shortDescription" in rule:
shortDescription = get_message_from_multiformatMessageString(
- rule["shortDescription"], rule
+ rule["shortDescription"], rule,
)
if shortDescription != message:
description += f"**{_('Rule short description')}:** {shortDescription}\n"
if "fullDescription" in rule:
fullDescription = get_message_from_multiformatMessageString(
- rule["fullDescription"], rule
+ rule["fullDescription"], rule,
)
if (
fullDescription != message
@@ -308,7 +308,7 @@ def get_references(rule):
reference = rule["helpUri"]
elif "help" in rule:
helpText = get_message_from_multiformatMessageString(
- rule["help"], rule
+ rule["help"], rule,
)
if helpText.startswith("http"):
reference = helpText
@@ -435,7 +435,7 @@ def get_item(result, rules, artifacts, run_date):
# manage fixes provided in the report
if "fixes" in result:
finding.mitigation = "\n".join(
- [fix.get("description", {}).get("text") for fix in result["fixes"]]
+ [fix.get("description", {}).get("text") for fix in result["fixes"]],
)
if run_date:
@@ -460,7 +460,7 @@ def get_item(result, rules, artifacts, run_date):
hashes = get_fingerprints_hashes(result["partialFingerprints"])
sorted_hashes = sorted(hashes.keys())
finding.unique_id_from_tool = "|".join(
- [f'{key}:{hashes[key]["value"]}' for key in sorted_hashes]
+ [f'{key}:{hashes[key]["value"]}' for key in sorted_hashes],
)
return finding
diff --git a/dojo/tools/scantist/parser.py b/dojo/tools/scantist/parser.py
index b2b3b5f302..c67de9a51c 100644
--- a/dojo/tools/scantist/parser.py
+++ b/dojo/tools/scantist/parser.py
@@ -84,7 +84,7 @@ def get_findings(vuln, test):
if item:
hash_key = hashlib.md5(
node.get("Public ID").encode("utf-8")
- + node.get("Library").encode("utf-8")
+ + node.get("Library").encode("utf-8"),
).hexdigest()
items[hash_key] = get_findings(node, test)
diff --git a/dojo/tools/scout_suite/parser.py b/dojo/tools/scout_suite/parser.py
index e6344fa67a..45dd1dbdf0 100644
--- a/dojo/tools/scout_suite/parser.py
+++ b/dojo/tools/scout_suite/parser.py
@@ -63,7 +63,7 @@ def get_tests(self, scan_type, handle):
str(items["max_level"]),
str(items["resources_count"]),
str(items["rules_count"]),
- ]
+ ],
)
tests = []
@@ -92,7 +92,7 @@ def __get_items(self, data):
last_run_date = None
if "time" in data.get("last_run", {}):
last_run_date = datetime.strptime(
- data["last_run"]["time"][0:10], "%Y-%m-%d"
+ data["last_run"]["time"][0:10], "%Y-%m-%d",
).date()
# Configured Services
@@ -138,7 +138,7 @@ def __get_items(self, data):
dynamic_finding=False,
static_finding=True,
vuln_id_from_tool=":".join(
- [data["provider_code"], finding_name]
+ [data["provider_code"], finding_name],
),
)
if finding.get("references"):
diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py
index 97e711bf23..aa4f730750 100644
--- a/dojo/tools/semgrep/parser.py
+++ b/dojo/tools/semgrep/parser.py
@@ -45,20 +45,20 @@ def get_findings(self, filename, test):
item["extra"]["metadata"]
.get("cwe")[0]
.partition(":")[0]
- .partition("-")[2]
+ .partition("-")[2],
)
else:
finding.cwe = int(
item["extra"]["metadata"]
.get("cwe")
.partition(":")[0]
- .partition("-")[2]
+ .partition("-")[2],
)
# manage references from metadata
if "references" in item["extra"]["metadata"]:
finding.references = "\n".join(
- item["extra"]["metadata"]["references"]
+ item["extra"]["metadata"]["references"],
)
# manage mitigation from metadata
@@ -71,7 +71,7 @@ def get_findings(self, filename, test):
"\n```\n",
json.dumps(item["extra"]["fix_regex"]),
"\n```\n",
- ]
+ ],
)
dupe_key = finding.title + finding.file_path + str(finding.line)
@@ -109,14 +109,14 @@ def get_findings(self, filename, test):
item["advisory"]["references"]
.get("cweIds")[0]
.partition(":")[0]
- .partition("-")[2]
+ .partition("-")[2],
)
else:
finding.cwe = int(
item["advisory"]["references"]
.get("cweIds")
.partition(":")[0]
- .partition("-")[2]
+ .partition("-")[2],
)
dupe_key = finding.title + finding.file_path + str(finding.line)
diff --git a/dojo/tools/skf/parser.py b/dojo/tools/skf/parser.py
index 74ec86bba4..887716c509 100644
--- a/dojo/tools/skf/parser.py
+++ b/dojo/tools/skf/parser.py
@@ -32,7 +32,7 @@ def __init__(self):
def map_column_value(self, finding, column_value):
finding.date = datetime.strptime(
- column_value, "%Y-%m-%d %H:%M:%S"
+ column_value, "%Y-%m-%d %H:%M:%S",
).date()
@@ -101,7 +101,7 @@ def get_findings(self, filename, test):
row_number = 0
reader = csv.reader(
- io.StringIO(content), delimiter=",", quotechar='"', escapechar="\\"
+ io.StringIO(content), delimiter=",", quotechar='"', escapechar="\\",
)
dupes = {}
for row in reader:
@@ -116,7 +116,7 @@ def get_findings(self, filename, test):
column_number = 0
for column in row:
chain.process_column(
- column_names[column_number], column, finding
+ column_names[column_number], column, finding,
)
column_number += 1
@@ -127,8 +127,8 @@ def get_findings(self, filename, test):
+ "|"
+ finding.title
+ "|"
- + finding.description
- ).encode("utf-8")
+ + finding.description,
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/snyk/parser.py b/dojo/tools/snyk/parser.py
index 055d278d29..4d1a0e8943 100644
--- a/dojo/tools/snyk/parser.py
+++ b/dojo/tools/snyk/parser.py
@@ -51,7 +51,7 @@ def get_items(self, tree, test):
vulnerabilityTree = tree["vulnerabilities"]
for node in vulnerabilityTree:
item = self.get_item(
- node, test, target_file=target_file, upgrades=upgrades
+ node, test, target_file=target_file, upgrades=upgrades,
)
items[iterator] = item
iterator += 1
@@ -59,7 +59,7 @@ def get_items(self, tree, test):
results = tree["runs"][0]["results"]
for node in results:
item = self.get_code_item(
- node, test
+ node, test,
)
items[iterator] = item
iterator += 1
@@ -70,7 +70,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
# or an array for multiple versions depending on the language.
if isinstance(vulnerability["semver"]["vulnerable"], list):
vulnerable_versions = ", ".join(
- vulnerability["semver"]["vulnerable"]
+ vulnerability["semver"]["vulnerable"],
)
else:
vulnerable_versions = vulnerability["semver"]["vulnerable"]
@@ -172,7 +172,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
references = ""
if "id" in vulnerability:
references = "**SNYK ID**: https://app.snyk.io/vuln/{}\n\n".format(
- vulnerability["id"]
+ vulnerability["id"],
)
if cwe_references:
@@ -211,7 +211,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
for lib in tertiary_upgrade_list
):
finding.unsaved_tags.append(
- f"upgrade_to:{upgraded_pack}"
+ f"upgrade_to:{upgraded_pack}",
)
finding.mitigation += f"\nUpgrade from {current_pack_version} to {upgraded_pack} to fix this issue, as well as updating the following:\n - "
finding.mitigation += "\n - ".join(tertiary_upgrade_list)
diff --git a/dojo/tools/snyk_code/parser.py b/dojo/tools/snyk_code/parser.py
index cd7d74a4a4..a35b37251c 100644
--- a/dojo/tools/snyk_code/parser.py
+++ b/dojo/tools/snyk_code/parser.py
@@ -51,7 +51,7 @@ def get_items(self, tree, test):
vulnerabilityTree = tree["vulnerabilities"]
for node in vulnerabilityTree:
item = self.get_item(
- node, test, target_file=target_file, upgrades=upgrades
+ node, test, target_file=target_file, upgrades=upgrades,
)
items[iterator] = item
iterator += 1
@@ -59,7 +59,7 @@ def get_items(self, tree, test):
results = tree["runs"][0]["results"]
for node in results:
item = self.get_code_item(
- node, test
+ node, test,
)
items[iterator] = item
iterator += 1
@@ -70,7 +70,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
# or an array for multiple versions depending on the language.
if isinstance(vulnerability["semver"]["vulnerable"], list):
vulnerable_versions = ", ".join(
- vulnerability["semver"]["vulnerable"]
+ vulnerability["semver"]["vulnerable"],
)
else:
vulnerable_versions = vulnerability["semver"]["vulnerable"]
@@ -168,7 +168,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
references = ""
if "id" in vulnerability:
references = "**SNYK ID**: https://app.snyk.io/vuln/{}\n\n".format(
- vulnerability["id"]
+ vulnerability["id"],
)
if cwe_references:
@@ -207,7 +207,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
for lib in tertiary_upgrade_list
):
finding.unsaved_tags.append(
- f"upgrade_to:{upgraded_pack}"
+ f"upgrade_to:{upgraded_pack}",
)
finding.mitigation += f"\nUpgrade from {current_pack_version} to {upgraded_pack} to fix this issue, as well as updating the following:\n - "
finding.mitigation += "\n - ".join(tertiary_upgrade_list)
diff --git a/dojo/tools/solar_appscreener/parser.py b/dojo/tools/solar_appscreener/parser.py
index b5655a4292..fc6110ebcd 100644
--- a/dojo/tools/solar_appscreener/parser.py
+++ b/dojo/tools/solar_appscreener/parser.py
@@ -26,7 +26,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []
diff --git a/dojo/tools/sonarqube/parser.py b/dojo/tools/sonarqube/parser.py
index 1f268001d8..9d92c6e3e9 100644
--- a/dojo/tools/sonarqube/parser.py
+++ b/dojo/tools/sonarqube/parser.py
@@ -53,6 +53,6 @@ def get_findings(self, file, test):
raise ValueError(
"Internal error: Invalid mode "
+ self.mode
- + ". Expected: one of None, 'detailed'"
+ + ". Expected: one of None, 'detailed'",
)
return SonarQubeSoprasteriaHTML().get_items(tree, test, self.mode)
diff --git a/dojo/tools/sonarqube/soprasteria_helper.py b/dojo/tools/sonarqube/soprasteria_helper.py
index 99d2c3125f..47ddc3ddf7 100644
--- a/dojo/tools/sonarqube/soprasteria_helper.py
+++ b/dojo/tools/sonarqube/soprasteria_helper.py
@@ -27,7 +27,7 @@ def convert_sonar_severity(self, sonar_severity):
def get_description(self, vuln_details):
rule_description = etree.tostring(
- vuln_details, pretty_print=True
+ vuln_details, pretty_print=True,
).decode("utf-8", errors="replace")
rule_description = rule_description.split("See", 1)[0]
rule_description = (str(rule_description)).replace("", "**")
diff --git a/dojo/tools/sonarqube/soprasteria_html.py b/dojo/tools/sonarqube/soprasteria_html.py
index 8865ac618e..c4fb4e688c 100644
--- a/dojo/tools/sonarqube/soprasteria_html.py
+++ b/dojo/tools/sonarqube/soprasteria_html.py
@@ -10,7 +10,7 @@ def get_items(self, tree, test, mode):
# Check that there is at least one vulnerability (the vulnerabilities
# table is absent when no vuln are found)
detailTbody = tree.xpath(
- "/html/body/div[contains(@class,'detail')]/table/tbody"
+ "/html/body/div[contains(@class,'detail')]/table/tbody",
)
dupes = {}
if len(detailTbody) == 2:
@@ -32,7 +32,7 @@ def get_items(self, tree, test, mode):
rule_key = list(vuln_properties[0].iter("a"))[0].text
vuln_rule_name = rule_key and rule_key.strip()
vuln_severity = SonarQubeSoprasteriaHelper().convert_sonar_severity(
- vuln_properties[1].text and vuln_properties[1].text.strip()
+ vuln_properties[1].text and vuln_properties[1].text.strip(),
)
vuln_file_path = vuln_properties[2].text and vuln_properties[2].text.strip()
vuln_line = vuln_properties[3].text and vuln_properties[3].text.strip()
@@ -42,13 +42,13 @@ def get_items(self, tree, test, mode):
if vuln_title is None or vuln_mitigation is None:
raise ValueError(
"Parser ValueError: can't find a title or a mitigation for vulnerability of name "
- + vuln_rule_name
+ + vuln_rule_name,
)
try:
vuln_details = rulesDic[vuln_rule_name]
vuln_description = SonarQubeSoprasteriaHelper().get_description(vuln_details)
vuln_references = SonarQubeSoprasteriaHelper().get_references(
- vuln_rule_name, vuln_details
+ vuln_rule_name, vuln_details,
)
vuln_cwe = SonarQubeSoprasteriaHelper().get_cwe(vuln_references)
except KeyError:
diff --git a/dojo/tools/sonarqube/soprasteria_json.py b/dojo/tools/sonarqube/soprasteria_json.py
index 5feb49343c..aabc637740 100644
--- a/dojo/tools/sonarqube/soprasteria_json.py
+++ b/dojo/tools/sonarqube/soprasteria_json.py
@@ -24,7 +24,7 @@ def get_json_items(self, json_content, test, mode):
if title is None or mitigation is None:
raise ValueError(
"Parser ValueError: can't find a title or a mitigation for vulnerability of name "
- + rule_id
+ + rule_id,
)
try:
@@ -34,7 +34,7 @@ def get_json_items(self, json_content, test, mode):
issue_description = SonarQubeSoprasteriaHelper().get_description(html_desc_as_e_tree)
logger.debug(issue_description)
issue_references = SonarQubeSoprasteriaHelper().get_references(
- rule_id, html_desc_as_e_tree
+ rule_id, html_desc_as_e_tree,
)
issue_cwe = SonarQubeSoprasteriaHelper().get_cwe(issue_references)
except KeyError:
diff --git a/dojo/tools/sonatype/parser.py b/dojo/tools/sonatype/parser.py
index ef2f0df367..e1b7bac167 100644
--- a/dojo/tools/sonatype/parser.py
+++ b/dojo/tools/sonatype/parser.py
@@ -57,7 +57,7 @@ def get_finding(security_issue, component, test):
mitigation=status,
references=reference,
impact=threat_category,
- static_finding=True
+ static_finding=True,
)
if "cwe" in security_issue:
finding.cwe = security_issue["cwe"]
diff --git a/dojo/tools/spotbugs/parser.py b/dojo/tools/spotbugs/parser.py
index 367fd54d49..65ecac2153 100644
--- a/dojo/tools/spotbugs/parser.py
+++ b/dojo/tools/spotbugs/parser.py
@@ -36,8 +36,8 @@ def get_findings(self, filename, test):
# Parse ... html content
html_text = html_parser.handle(
ET.tostring(pattern.find("Details"), method="text").decode(
- "utf-8"
- )
+ "utf-8",
+ ),
)
# Parse mitigation from html
@@ -109,7 +109,7 @@ def get_findings(self, filename, test):
finding.file_path = source_extract.get("sourcepath")
finding.sast_source_object = source_extract.get("classname")
finding.sast_source_file_path = source_extract.get(
- "sourcepath"
+ "sourcepath",
)
if (
"start" in source_extract.attrib
diff --git a/dojo/tools/ssl_labs/parser.py b/dojo/tools/ssl_labs/parser.py
index f70992674a..6a1ff7a7d9 100644
--- a/dojo/tools/ssl_labs/parser.py
+++ b/dojo/tools/ssl_labs/parser.py
@@ -113,7 +113,7 @@ def get_findings(self, filename, test):
for item in endpoints["details"]["suites"]:
for suites in item["list"]:
suite_info = suite_info + self.suite_data(
- suites
+ suites,
)
except Exception:
suite_info = "Not provided." + "\n\n"
@@ -336,16 +336,16 @@ def get_findings(self, filename, test):
find.unsaved_endpoints = []
find.unsaved_endpoints.append(
- Endpoint(host=hostName, port=port, protocol=protocol)
+ Endpoint(host=hostName, port=port, protocol=protocol),
)
if ipAddress:
find.unsaved_endpoints.append(
- Endpoint(host=ipAddress, port=port, protocol=protocol)
+ Endpoint(host=ipAddress, port=port, protocol=protocol),
)
if endpoints["details"]["httpTransactions"]:
for url in endpoints["details"]["httpTransactions"]:
find.unsaved_endpoints.append(
- Endpoint.from_uri(url["requestUrl"])
+ Endpoint.from_uri(url["requestUrl"]),
)
return list(dupes.values())
diff --git a/dojo/tools/sslscan/parser.py b/dojo/tools/sslscan/parser.py
index 421e197442..621ded3daf 100644
--- a/dojo/tools/sslscan/parser.py
+++ b/dojo/tools/sslscan/parser.py
@@ -67,7 +67,7 @@ def get_findings(self, file, test):
if title and description is not None:
dupe_key = hashlib.sha256(
- str(description + title).encode("utf-8")
+ str(description + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
finding = dupes[dupe_key]
diff --git a/dojo/tools/sslyze/parser_json.py b/dojo/tools/sslyze/parser_json.py
index 48dc625c04..e364de8faf 100644
--- a/dojo/tools/sslyze/parser_json.py
+++ b/dojo/tools/sslyze/parser_json.py
@@ -256,7 +256,7 @@ def get_heartbleed(node, test, endpoint):
)
vulnerability_id = "CVE-2014-0160"
return get_finding(
- title, description, vulnerability_id, None, test, endpoint
+ title, description, vulnerability_id, None, test, endpoint,
)
elif "result" in heartbleed:
hb_result = heartbleed["result"]
@@ -296,7 +296,7 @@ def get_ccs(node, test, endpoint):
)
vulnerability_id = "CVE-2014-0224"
return get_finding(
- title, description, vulnerability_id, None, test, endpoint
+ title, description, vulnerability_id, None, test, endpoint,
)
elif "result" in ccs_injection:
@@ -354,7 +354,7 @@ def get_renegotiation(node, test, endpoint):
)
if vulnerable:
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
elif "result" in renegotiation:
@@ -370,7 +370,7 @@ def get_renegotiation(node, test, endpoint):
+ " has problems with session renegotiation:"
)
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
if "supports_secure_renegotiation" in reneg_result:
reneg_secure = reneg_result["supports_secure_renegotiation"]
@@ -381,7 +381,7 @@ def get_renegotiation(node, test, endpoint):
+ " has problems with session renegotiation:"
)
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
return None
return None
@@ -401,7 +401,7 @@ def get_weak_protocol(cipher, text, node, test, endpoint):
get_url(endpoint) + " accepts " + text + " connections"
)
return get_finding(
- title, description, None, REFERENCES, test, endpoint
+ title, description, None, REFERENCES, test, endpoint,
)
elif "result" in weak_node:
weak_node_result = weak_node["result"]
@@ -414,7 +414,7 @@ def get_weak_protocol(cipher, text, node, test, endpoint):
get_url(endpoint) + " accepts " + text + " connections"
)
return get_finding(
- title, description, None, REFERENCES, test, endpoint
+ title, description, None, REFERENCES, test, endpoint,
)
return None
return None
@@ -446,7 +446,7 @@ def get_strong_protocol(cipher, text, suites, node, test, endpoint):
description += "\n - " + cs_node["name"]
if unrecommended_cipher_found:
return get_finding(
- title, description, None, REFERENCES, test, endpoint
+ title, description, None, REFERENCES, test, endpoint,
)
elif "result" in strong_node:
@@ -473,7 +473,7 @@ def get_strong_protocol(cipher, text, suites, node, test, endpoint):
description += "\n - " + cs_node["name"]
if unrecommended_cipher_found:
return get_finding(
- title, description, None, REFERENCES, test, endpoint
+ title, description, None, REFERENCES, test, endpoint,
)
return None
return None
@@ -523,7 +523,7 @@ def get_certificate_information(node, test, endpoint):
description += ", version " + version
if vulnerable:
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
elif "result" in ci_node:
@@ -565,14 +565,14 @@ def get_certificate_information(node, test, endpoint):
description += ", version " + version
if vulnerable:
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
return None
return None
def get_finding(
- title, description, vulnerability_id, references, test, endpoint
+ title, description, vulnerability_id, references, test, endpoint,
):
title += " (" + get_url(endpoint) + ")"
severity = "Medium"
diff --git a/dojo/tools/sslyze/parser_xml.py b/dojo/tools/sslyze/parser_xml.py
index 07c2adcaad..bddda3ac49 100644
--- a/dojo/tools/sslyze/parser_xml.py
+++ b/dojo/tools/sslyze/parser_xml.py
@@ -120,7 +120,7 @@ def get_findings(self, file, test):
if cipher.attrib["name"] in WEAK_CIPHER_LIST:
if cipher.attrib["name"] not in weak_cipher[element.tag]:
weak_cipher[element.tag].append(
- cipher.attrib["name"]
+ cipher.attrib["name"],
)
if len(weak_cipher[element.tag]) > 0:
title = (
@@ -135,7 +135,7 @@ def get_findings(self, file, test):
)
if title and description is not None:
dupe_key = hashlib.md5(
- str(description + title).encode("utf-8")
+ str(description + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
finding = dupes[dupe_key]
@@ -158,7 +158,7 @@ def get_findings(self, file, test):
if host is not None:
finding.unsaved_endpoints.append(
Endpoint(
- host=host, port=port, protocol=protocol
- )
+ host=host, port=port, protocol=protocol,
+ ),
)
return dupes.values()
diff --git a/dojo/tools/stackhawk/parser.py b/dojo/tools/stackhawk/parser.py
index 5aa85dfa49..99d708cdc8 100644
--- a/dojo/tools/stackhawk/parser.py
+++ b/dojo/tools/stackhawk/parser.py
@@ -38,7 +38,7 @@ def get_findings(self, json_output, test):
return findings
def __extract_findings(
- self, completed_scan, metadata: StackHawkScanMetadata, test
+ self, completed_scan, metadata: StackHawkScanMetadata, test,
):
findings = {}
@@ -49,19 +49,19 @@ def __extract_findings(
key = raw_finding["pluginId"]
if key not in findings:
finding = self.__extract_finding(
- raw_finding, metadata, test
+ raw_finding, metadata, test,
)
findings[key] = finding
# Update the test description these scan results are linked to.
test.description = "View scan details here: " + self.__hyperlink(
- completed_scan["scan"]["scanURL"]
+ completed_scan["scan"]["scanURL"],
)
return list(findings.values())
def __extract_finding(
- self, raw_finding, metadata: StackHawkScanMetadata, test
+ self, raw_finding, metadata: StackHawkScanMetadata, test,
) -> Finding:
steps_to_reproduce = "Use a specific message link and click 'Validate' to see the cURL!\n\n"
@@ -83,10 +83,10 @@ def __extract_finding(
endpoints.append(endpoint)
are_all_endpoints_risk_accepted = self.__are_all_endpoints_in_status(
- paths, "RISK_ACCEPTED"
+ paths, "RISK_ACCEPTED",
)
are_all_endpoints_false_positive = self.__are_all_endpoints_in_status(
- paths, "FALSE_POSITIVE"
+ paths, "FALSE_POSITIVE",
)
finding = Finding(
diff --git a/dojo/tools/sysdig_reports/sysdig_data.py b/dojo/tools/sysdig_reports/sysdig_data.py
index 24f3019fbf..930c07c411 100644
--- a/dojo/tools/sysdig_reports/sysdig_data.py
+++ b/dojo/tools/sysdig_reports/sysdig_data.py
@@ -9,7 +9,7 @@ def _map_severity(self, severity):
"HIGH": "High",
"MEDIUM": "Medium",
"LOW": "Low",
- "NEGLIGIBLE": "Informational"
+ "NEGLIGIBLE": "Informational",
}
return severity_mapping.get(severity, "Informational")
diff --git a/dojo/tools/talisman/parser.py b/dojo/tools/talisman/parser.py
index 20d2874c40..f3d0413887 100644
--- a/dojo/tools/talisman/parser.py
+++ b/dojo/tools/talisman/parser.py
@@ -76,7 +76,7 @@ def get_findings(self, filename, test):
+ file_path
+ description
+ severity
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/tenable/csv_format.py b/dojo/tools/tenable/csv_format.py
index c88287cf6e..e4a3cd9cd8 100644
--- a/dojo/tools/tenable/csv_format.py
+++ b/dojo/tools/tenable/csv_format.py
@@ -51,7 +51,7 @@ def _format_cve(self, val):
if val is None or val == "":
return None
cve_match = re.findall(
- r"CVE-[0-9]+-[0-9]+", val.upper(), re.IGNORECASE
+ r"CVE-[0-9]+-[0-9]+", val.upper(), re.IGNORECASE,
)
if cve_match:
return cve_match
@@ -130,7 +130,7 @@ def get_findings(self, filename: str, test: Test):
cvss_vector = row.get("CVSS V3 Vector", "")
if cvss_vector != "":
find.cvssv3 = CVSS3(
- "CVSS:3.0/" + str(cvss_vector)
+ "CVSS:3.0/" + str(cvss_vector),
).clean_vector(output_prefix=True)
# Add CVSS score if present
@@ -143,7 +143,7 @@ def get_findings(self, filename: str, test: Test):
# FIXME support more than one CPE in Nessus CSV parser
if len(detected_cpe) > 1:
LOGGER.debug(
- "more than one CPE for a finding. NOT supported by Nessus CSV parser"
+ "more than one CPE for a finding. NOT supported by Nessus CSV parser",
)
cpe_decoded = CPE(detected_cpe[0])
find.component_name = (
diff --git a/dojo/tools/tenable/parser.py b/dojo/tools/tenable/parser.py
index b24b072a68..2c8e00c468 100644
--- a/dojo/tools/tenable/parser.py
+++ b/dojo/tools/tenable/parser.py
@@ -16,7 +16,7 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
if filename.name.lower().endswith(
- ".xml"
+ ".xml",
) or filename.name.lower().endswith(".nessus"):
return TenableXMLParser().get_findings(filename, test)
elif filename.name.lower().endswith(".csv"):
diff --git a/dojo/tools/tenable/xml_format.py b/dojo/tools/tenable/xml_format.py
index d0c231b67d..11f842d2f7 100644
--- a/dojo/tools/tenable/xml_format.py
+++ b/dojo/tools/tenable/xml_format.py
@@ -74,7 +74,7 @@ def get_findings(self, filename: str, test: Test) -> list:
ip = host.attrib.get("name")
fqdn = None
fqdn_element_text = self.safely_get_element_text(
- host.find('.//HostProperties/tag[@name="host-fqdn"]')
+ host.find('.//HostProperties/tag[@name="host-fqdn"]'),
)
if fqdn_element_text is not None:
fqdn = fqdn_element_text
@@ -104,12 +104,12 @@ def get_findings(self, filename: str, test: Test) -> list:
description = ""
plugin_output = None
synopsis_element_text = self.safely_get_element_text(
- item.find("synopsis")
+ item.find("synopsis"),
)
if synopsis_element_text is not None:
description = f"{synopsis_element_text}\n\n"
plugin_output_element_text = self.safely_get_element_text(
- item.find("plugin_output")
+ item.find("plugin_output"),
)
if plugin_output_element_text is not None:
plugin_output = f"Plugin Output: {ip}{str(f':{port}' if port is not None else '')}"
@@ -123,27 +123,27 @@ def get_findings(self, filename: str, test: Test) -> list:
# Build up the impact
impact = ""
description_element_text = self.safely_get_element_text(
- item.find("description")
+ item.find("description"),
)
if description_element_text is not None:
impact = description_element_text + "\n\n"
cvss_element_text = self.safely_get_element_text(
- item.find("cvss")
+ item.find("cvss"),
)
if cvss_element_text is not None:
impact += f"CVSS Score: {cvss_element_text}\n"
cvssv3_element_text = self.safely_get_element_text(
- item.find("cvssv3")
+ item.find("cvssv3"),
)
if cvssv3_element_text is not None:
impact += f"CVSSv3 Score: {cvssv3_element_text}\n"
cvss_vector_element_text = self.safely_get_element_text(
- item.find("cvss_vector")
+ item.find("cvss_vector"),
)
if cvss_vector_element_text is not None:
impact += f"CVSS Vector: {cvss_vector_element_text}\n"
cvssv3_vector_element_text = self.safely_get_element_text(
- item.find("cvss3_vector")
+ item.find("cvss3_vector"),
)
if cvssv3_vector_element_text is not None:
impact += (
@@ -151,14 +151,14 @@ def get_findings(self, filename: str, test: Test) -> list:
)
cvss_base_score_element_text = (
self.safely_get_element_text(
- item.find("cvss_base_score")
+ item.find("cvss_base_score"),
)
)
if cvss_base_score_element_text is not None:
impact += f"CVSS Base Score: {cvss_base_score_element_text}\n"
cvss_temporal_score_element_text = (
self.safely_get_element_text(
- item.find("cvss_temporal_score")
+ item.find("cvss_temporal_score"),
)
)
if cvss_temporal_score_element_text is not None:
@@ -167,7 +167,7 @@ def get_findings(self, filename: str, test: Test) -> list:
# Set the mitigation
mitigation = "N/A"
mitigation_element_text = self.safely_get_element_text(
- item.find("solution")
+ item.find("solution"),
)
if mitigation_element_text is not None:
mitigation = mitigation_element_text
@@ -187,21 +187,21 @@ def get_findings(self, filename: str, test: Test) -> list:
vulnerability_id = None
cve_element_text = self.safely_get_element_text(
- item.find("cve")
+ item.find("cve"),
)
if cve_element_text is not None:
vulnerability_id = cve_element_text
cwe = None
cwe_element_text = self.safely_get_element_text(
- item.find("cwe")
+ item.find("cwe"),
)
if cwe_element_text is not None:
cwe = cwe_element_text
cvssv3 = None
cvssv3_element_text = self.safely_get_element_text(
- item.find("cvss3_vector")
+ item.find("cvss3_vector"),
)
if cvssv3_element_text is not None:
if "CVSS:3.0/" not in cvssv3_element_text:
@@ -209,12 +209,12 @@ def get_findings(self, filename: str, test: Test) -> list:
f"CVSS:3.0/{cvssv3_element_text}"
)
cvssv3 = CVSS3(cvssv3_element_text).clean_vector(
- output_prefix=True
+ output_prefix=True,
)
cvssv3_score = None
cvssv3_score_element_text = self.safely_get_element_text(
- item.find("cvssv3")
+ item.find("cvssv3"),
)
if cvssv3_score_element_text is not None:
cvssv3_score = cvssv3_score_element_text
diff --git a/dojo/tools/terrascan/parser.py b/dojo/tools/terrascan/parser.py
index ebc761f93b..c8b07f4e14 100644
--- a/dojo/tools/terrascan/parser.py
+++ b/dojo/tools/terrascan/parser.py
@@ -55,7 +55,7 @@ def get_findings(self, filename, test):
+ resource_type
+ file
+ str(line)
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/testssl/parser.py b/dojo/tools/testssl/parser.py
index 01369ea439..8eb41184df 100644
--- a/dojo/tools/testssl/parser.py
+++ b/dojo/tools/testssl/parser.py
@@ -20,7 +20,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
@@ -68,7 +68,7 @@ def get_findings(self, filename, test):
finding.cwe = int(row["cwe"].split("-")[1].strip())
# manage endpoint
finding.unsaved_endpoints = [
- Endpoint(host=row["fqdn/ip"].split("/")[0])
+ Endpoint(host=row["fqdn/ip"].split("/")[0]),
]
if row.get("port") and row["port"].isdigit():
finding.unsaved_endpoints[0].port = int(row["port"])
@@ -80,16 +80,16 @@ def get_findings(self, filename, test):
finding.description,
finding.title,
str(vulnerability),
- ]
- ).encode("utf-8")
+ ],
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
dupes[dupe_key].unsaved_endpoints.extend(
- finding.unsaved_endpoints
+ finding.unsaved_endpoints,
)
if dupes[dupe_key].unsaved_vulnerability_ids:
dupes[dupe_key].unsaved_vulnerability_ids.extend(
- finding.unsaved_vulnerability_ids
+ finding.unsaved_vulnerability_ids,
)
else:
dupes[
diff --git a/dojo/tools/tfsec/parser.py b/dojo/tools/tfsec/parser.py
index 8e145a92d9..d0bc390f3a 100644
--- a/dojo/tools/tfsec/parser.py
+++ b/dojo/tools/tfsec/parser.py
@@ -47,7 +47,7 @@ def get_findings(self, filename, test):
start_line = item.get("location").get("start_line")
end_line = item.get("location").get("end_line")
description = "\n".join(
- ["Rule ID: " + rule_id, item.get("description")]
+ ["Rule ID: " + rule_id, item.get("description")],
)
impact = item.get("impact")
resolution = item.get("resolution")
@@ -67,7 +67,7 @@ def get_findings(self, filename, test):
+ file
+ str(start_line)
+ str(end_line)
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/threagile/parser.py b/dojo/tools/threagile/parser.py
index 418fabcf31..796d260e6d 100644
--- a/dojo/tools/threagile/parser.py
+++ b/dojo/tools/threagile/parser.py
@@ -44,7 +44,7 @@
"untrusted-deserialization": 502,
"wrong-communication-link": 1008,
"wrong-trust-boudnary-content": 1008,
- "xml-external-entity": 611
+ "xml-external-entity": 611,
}
@@ -92,7 +92,7 @@ def get_items(self, tree, test):
impact=item.get("exploitation_impact"),
severity=severity,
test=test,
- unique_id_from_tool=item.get("synthetic_id")
+ unique_id_from_tool=item.get("synthetic_id"),
)
self.determine_mitigated(finding, item)
self.determine_accepted(finding, item)
diff --git a/dojo/tools/trivy/parser.py b/dojo/tools/trivy/parser.py
index 400f71c36d..e50ce0963b 100644
--- a/dojo/tools/trivy/parser.py
+++ b/dojo/tools/trivy/parser.py
@@ -108,7 +108,7 @@ def get_findings(self, scan_file, test):
if len(service_name) >= 3:
service_name = service_name[:-3]
findings += self.get_result_items(
- test, service.get("Results", []), service_name
+ test, service.get("Results", []), service_name,
)
misconfigurations = data.get("Misconfigurations", [])
for service in misconfigurations:
@@ -125,7 +125,7 @@ def get_findings(self, scan_file, test):
if len(service_name) >= 3:
service_name = service_name[:-3]
findings += self.get_result_items(
- test, service.get("Results", []), service_name
+ test, service.get("Results", []), service_name,
)
resources = data.get("Resources", [])
for resource in resources:
@@ -141,7 +141,7 @@ def get_findings(self, scan_file, test):
if len(resource_name) >= 3:
resource_name = resource_name[:-3]
findings += self.get_result_items(
- test, resource.get("Results", []), resource_name
+ test, resource.get("Results", []), resource_name,
)
return findings
else:
@@ -259,7 +259,7 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""):
target=target_target,
type=misc_type,
description=misc_description,
- message=misc_message
+ message=misc_message,
)
severity = TRIVY_SEVERITIES[misc_severity]
references = None
diff --git a/dojo/tools/trivy_operator/parser.py b/dojo/tools/trivy_operator/parser.py
index 7bd3a3d1da..3e83cfccf0 100644
--- a/dojo/tools/trivy_operator/parser.py
+++ b/dojo/tools/trivy_operator/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, scan_file, test):
findings = []
if report is not None:
resource_namespace = labels.get(
- "trivy-operator.resource.namespace", ""
+ "trivy-operator.resource.namespace", "",
)
resource_kind = labels.get("trivy-operator.resource.kind", "")
resource_name = labels.get("trivy-operator.resource.name", "")
diff --git a/dojo/tools/trivy_operator/vulnerability_handler.py b/dojo/tools/trivy_operator/vulnerability_handler.py
index bdd282648e..13be3e55a4 100644
--- a/dojo/tools/trivy_operator/vulnerability_handler.py
+++ b/dojo/tools/trivy_operator/vulnerability_handler.py
@@ -55,7 +55,7 @@ def handle_vulns(self, service, vulnerabilities, test):
file_path = None
description = DESCRIPTION_TEMPLATE.format(
- title=vulnerability.get("title"), fixed_version=mitigation
+ title=vulnerability.get("title"), fixed_version=mitigation,
)
title = f"{vuln_id} {package_name} {package_version}"
diff --git a/dojo/tools/trufflehog/parser.py b/dojo/tools/trufflehog/parser.py
index 7c6dc905f0..9dd8234d09 100644
--- a/dojo/tools/trufflehog/parser.py
+++ b/dojo/tools/trufflehog/parser.py
@@ -168,7 +168,7 @@ def get_findings_v3(self, data, test):
severity = "Medium"
dupe_key = hashlib.md5(
- (file + detector_name + str(line_number) + commit + (raw + rawV2)).encode("utf-8")
+ (file + detector_name + str(line_number) + commit + (raw + rawV2)).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
@@ -193,7 +193,7 @@ def get_findings_v3(self, data, test):
url="N/A",
dynamic_finding=False,
static_finding=True,
- nb_occurences=1
+ nb_occurences=1,
)
dupes[dupe_key] = finding
@@ -207,7 +207,7 @@ def walk_dict(self, obj, tab_count=1):
for key, value in obj.items():
if isinstance(value, dict):
return_string += self.walk_dict(
- value, tab_count=(tab_count + 1)
+ value, tab_count=(tab_count + 1),
)
continue
else:
diff --git a/dojo/tools/trufflehog3/parser.py b/dojo/tools/trufflehog3/parser.py
index 11cbe68072..c4879bc4cc 100644
--- a/dojo/tools/trufflehog3/parser.py
+++ b/dojo/tools/trufflehog3/parser.py
@@ -142,7 +142,7 @@ def get_finding_current(self, json_data, test, dupes):
description = description[:-1]
dupe_key = hashlib.md5(
- (title + secret + severity + str(line)).encode("utf-8")
+ (title + secret + severity + str(line)).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/trustwave/parser.py b/dojo/tools/trustwave/parser.py
index 229d658802..4e0d1562cc 100644
--- a/dojo/tools/trustwave/parser.py
+++ b/dojo/tools/trustwave/parser.py
@@ -20,7 +20,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
severity_mapping = {
@@ -44,7 +44,7 @@ def get_findings(self, filename, test):
if row.get("Port") is not None and not "" == row.get("Port"):
finding.unsaved_endpoints[0].port = int(row["Port"])
if row.get("Protocol") is not None and not "" == row.get(
- "Protocol"
+ "Protocol",
):
finding.unsaved_endpoints[0].protocol = row["Protocol"]
finding.title = row["Vulnerability Name"]
@@ -60,7 +60,7 @@ def get_findings(self, filename, test):
finding.unsaved_vulnerability_ids = [row.get("CVE")]
dupes_key = hashlib.sha256(
- f"{finding.severity}|{finding.title}|{finding.description}".encode()
+ f"{finding.severity}|{finding.title}|{finding.description}".encode(),
).hexdigest()
if dupes_key in dupes:
diff --git a/dojo/tools/trustwave_fusion_api/parser.py b/dojo/tools/trustwave_fusion_api/parser.py
index 6b6bf2a27a..1e3f14b92e 100644
--- a/dojo/tools/trustwave_fusion_api/parser.py
+++ b/dojo/tools/trustwave_fusion_api/parser.py
@@ -32,12 +32,12 @@ def get_findings(self, file, test):
item = get_item(node, test)
item_key = hashlib.sha256(
- f"{item.severity}|{item.title}|{item.description}".encode()
+ f"{item.severity}|{item.title}|{item.description}".encode(),
).hexdigest()
if item_key in items:
items[item_key].unsaved_endpoints.extend(
- item.unsaved_endpoints
+ item.unsaved_endpoints,
)
items[item_key].nb_occurences += 1
else:
diff --git a/dojo/tools/twistlock/parser.py b/dojo/tools/twistlock/parser.py
index d561555042..53a7f21fd1 100644
--- a/dojo/tools/twistlock/parser.py
+++ b/dojo/tools/twistlock/parser.py
@@ -49,7 +49,7 @@ def parse_issue(self, row, test):
+ "
",
mitigation=data_fix_status,
component_name=textwrap.shorten(
- data_package_name, width=200, placeholder="..."
+ data_package_name, width=200, placeholder="...",
),
component_version=data_package_version,
false_p=False,
@@ -73,7 +73,7 @@ def parse(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
for row in reader:
finding = self.parse_issue(row, test)
@@ -85,7 +85,7 @@ def parse(self, filename, test):
+ finding.title
+ "|"
+ finding.description
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
dupes[key] = finding
@@ -122,7 +122,7 @@ def get_items(self, tree, test):
unique_key = node["id"] + str(
node["packageName"]
+ str(node["packageVersion"])
- + str(node["severity"])
+ + str(node["severity"]),
)
items[unique_key] = item
return list(items.values())
diff --git a/dojo/tools/vcg/parser.py b/dojo/tools/vcg/parser.py
index 9c2bc3a540..0d29448a2c 100644
--- a/dojo/tools/vcg/parser.py
+++ b/dojo/tools/vcg/parser.py
@@ -81,7 +81,7 @@ def parse_issue(self, issue, test):
data.priority = 6
else:
data.priority = int(
- float(self.get_field_from_xml(issue, "Priority"))
+ float(self.get_field_from_xml(issue, "Priority")),
)
data.title = (
@@ -119,7 +119,7 @@ def parse(self, content, test):
+ finding.title
+ "|"
+ finding.description
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
@@ -159,7 +159,7 @@ def parse_issue(self, row, test):
data.priority = 6
else:
data.priority = int(
- float(self.get_field_from_row(row, priority_column))
+ float(self.get_field_from_row(row, priority_column)),
)
data.severity = self.get_field_from_row(row, severity_column)
@@ -187,7 +187,7 @@ def parse(self, content, test):
+ finding.title
+ "|"
+ finding.description
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/veracode/json_parser.py b/dojo/tools/veracode/json_parser.py
index 9e6818effc..fe707b964c 100644
--- a/dojo/tools/veracode/json_parser.py
+++ b/dojo/tools/veracode/json_parser.py
@@ -197,7 +197,7 @@ def add_dynamic_details(self, finding, finding_details, backup_title=None) -> Fi
if url := finding_details.get("url"):
# Create the Endpoint object from the url
finding.unsaved_endpoints.append(
- Endpoint.from_uri(url)
+ Endpoint.from_uri(url),
)
else:
# build it from the other attributes
@@ -210,7 +210,7 @@ def add_dynamic_details(self, finding, finding_details, backup_title=None) -> Fi
host=host,
port=port,
path=path,
- )
+ ),
)
# Add the plugin if available
if plugin := finding_details.get("plugin"):
diff --git a/dojo/tools/veracode/xml_parser.py b/dojo/tools/veracode/xml_parser.py
index ce08e14f45..ecf620aa6c 100644
--- a/dojo/tools/veracode/xml_parser.py
+++ b/dojo/tools/veracode/xml_parser.py
@@ -30,7 +30,7 @@ def get_findings(self, filename, test):
app_id = root.attrib["app_id"]
report_date = datetime.strptime(
- root.attrib["last_update_time"], "%Y-%m-%d %H:%M:%S %Z"
+ root.attrib["last_update_time"], "%Y-%m-%d %H:%M:%S %Z",
)
dupes = {}
@@ -39,13 +39,13 @@ def get_findings(self, filename, test):
# This assumes `` only exists within the ``
# nodes.
for category_node in root.findall(
- "x:severity/x:category", namespaces=XML_NAMESPACE
+ "x:severity/x:category", namespaces=XML_NAMESPACE,
):
# Mitigation text.
mitigation_text = ""
mitigation_text += (
category_node.find(
- "x:recommendations/x:para", namespaces=XML_NAMESPACE
+ "x:recommendations/x:para", namespaces=XML_NAMESPACE,
).get("text")
+ "\n\n"
)
@@ -54,11 +54,11 @@ def get_findings(self, filename, test):
[" * " + x.get("text") + "\n" for x in category_node.findall(
"x:recommendations/x:para/x:bulletitem",
namespaces=XML_NAMESPACE,
- )]
+ )],
)
for flaw_node in category_node.findall(
- "x:cwe/x:staticflaws/x:flaw", namespaces=XML_NAMESPACE
+ "x:cwe/x:staticflaws/x:flaw", namespaces=XML_NAMESPACE,
):
dupe_key = flaw_node.attrib["issueid"]
@@ -66,17 +66,17 @@ def get_findings(self, filename, test):
if dupe_key not in dupes:
# Add to list.
dupes[dupe_key] = self.__xml_static_flaw_to_finding(
- app_id, flaw_node, mitigation_text, test
+ app_id, flaw_node, mitigation_text, test,
)
for flaw_node in category_node.findall(
- "x:cwe/x:dynamicflaws/x:flaw", namespaces=XML_NAMESPACE
+ "x:cwe/x:dynamicflaws/x:flaw", namespaces=XML_NAMESPACE,
):
dupe_key = flaw_node.attrib["issueid"]
if dupe_key not in dupes:
dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding(
- app_id, flaw_node, mitigation_text, test
+ app_id, flaw_node, mitigation_text, test,
)
# Get SCA findings
@@ -98,7 +98,7 @@ def get_findings(self, filename, test):
_version = component.attrib["version"]
for vulnerability in component.findall(
- "x:vulnerabilities/x:vulnerability", namespaces=XML_NAMESPACE
+ "x:vulnerabilities/x:vulnerability", namespaces=XML_NAMESPACE,
):
# We don't have a Id for SCA findings so just generate a random
# one
@@ -121,7 +121,7 @@ def __xml_flaw_to_unique_id(cls, app_id, xml_node):
@classmethod
def __xml_flaw_to_severity(cls, xml_node):
return cls.vc_severity_mapping.get(
- int(xml_node.attrib["severity"]), "Info"
+ int(xml_node.attrib["severity"]), "Info",
)
@classmethod
@@ -133,7 +133,7 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test):
finding.static_finding = True
finding.dynamic_finding = False
finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id(
- app_id, xml_node
+ app_id, xml_node,
)
# Report values
@@ -189,11 +189,11 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test):
# This happens if any mitigation (including 'Potential false positive')
# was accepted in VC.
for mitigation in xml_node.findall(
- "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE
+ "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE,
):
_is_mitigated = True
_mitigated_date = datetime.strptime(
- mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z"
+ mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z",
)
finding.is_mitigated = _is_mitigated
finding.mitigated = _mitigated_date
@@ -217,10 +217,10 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test):
@classmethod
def __xml_static_flaw_to_finding(
- cls, app_id, xml_node, mitigation_text, test
+ cls, app_id, xml_node, mitigation_text, test,
):
finding = cls.__xml_flaw_to_finding(
- app_id, xml_node, mitigation_text, test
+ app_id, xml_node, mitigation_text, test,
)
finding.static_finding = True
finding.dynamic_finding = False
@@ -253,10 +253,10 @@ def __xml_static_flaw_to_finding(
@classmethod
def __xml_dynamic_flaw_to_finding(
- cls, app_id, xml_node, mitigation_text, test
+ cls, app_id, xml_node, mitigation_text, test,
):
finding = cls.__xml_flaw_to_finding(
- app_id, xml_node, mitigation_text, test
+ app_id, xml_node, mitigation_text, test,
)
finding.static_finding = False
finding.dynamic_finding = True
@@ -279,7 +279,7 @@ def _get_cwe(val):
@classmethod
def __xml_sca_flaw_to_finding(
- cls, test, report_date, vendor, library, version, xml_node
+ cls, test, report_date, vendor, library, version, xml_node,
):
# Defaults
finding = Finding()
@@ -311,7 +311,7 @@ def __xml_sca_flaw_to_finding(
xml_node.attrib.get("first_found_date"),
xml_node.attrib["cvss_score"],
cls.vc_severity_mapping.get(
- int(xml_node.attrib["severity"]), "Info"
+ int(xml_node.attrib["severity"]), "Info",
),
xml_node.attrib["cve_summary"],
)
@@ -329,11 +329,11 @@ def __xml_sca_flaw_to_finding(
# This happens if any mitigation (including 'Potential false positive')
# was accepted in VC.
for mitigation in xml_node.findall(
- "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE
+ "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE,
):
_is_mitigated = True
_mitigated_date = datetime.strptime(
- mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z"
+ mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z",
)
finding.is_mitigated = _is_mitigated
finding.mitigated = _mitigated_date
diff --git a/dojo/tools/veracode_sca/parser.py b/dojo/tools/veracode_sca/parser.py
index a37a08cf7e..15de639330 100644
--- a/dojo/tools/veracode_sca/parser.py
+++ b/dojo/tools/veracode_sca/parser.py
@@ -81,7 +81,7 @@ def _get_findings_json(self, file, test):
"Project name: {}\n"
"Title: \n>{}"
"\n\n-----\n\n".format(
- issue.get("project_name"), vulnerability.get("title")
+ issue.get("project_name"), vulnerability.get("title"),
)
)
@@ -119,7 +119,7 @@ def _get_findings_json(self, file, test):
finding.cwe = int(cwe)
finding.references = "\n\n" + issue.get("_links").get("html").get(
- "href"
+ "href",
)
status = issue.get("issue_status")
if (
@@ -144,7 +144,7 @@ def get_findings_csv(self, file, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []
@@ -162,7 +162,7 @@ def get_findings_csv(self, file, test):
issueId = list(row.values())[0]
library = row.get("Library", None)
if row.get("Package manager") == "MAVEN" and row.get(
- "Coordinate 2"
+ "Coordinate 2",
):
library = row.get("Coordinate 2")
version = row.get("Version in use", None)
@@ -178,11 +178,11 @@ def get_findings_csv(self, file, test):
try:
if settings.USE_FIRST_SEEN:
date = datetime.strptime(
- row.get("Issue opened: Scan date"), "%d %b %Y %H:%M%p %Z"
+ row.get("Issue opened: Scan date"), "%d %b %Y %H:%M%p %Z",
)
else:
date = datetime.strptime(
- row.get("Issue opened: Scan date"), "%d %b %Y %H:%M%p %Z"
+ row.get("Issue opened: Scan date"), "%d %b %Y %H:%M%p %Z",
)
except Exception:
date = None
diff --git a/dojo/tools/wapiti/parser.py b/dojo/tools/wapiti/parser.py
index 4245e72f1a..deb6309d5a 100644
--- a/dojo/tools/wapiti/parser.py
+++ b/dojo/tools/wapiti/parser.py
@@ -56,7 +56,7 @@ def get_findings(self, file, test):
if reference_title.startswith("CWE"):
cwe = self.get_cwe(reference_title)
references.append(
- f"* [{reference_title}]({reference.findtext('url')})"
+ f"* [{reference_title}]({reference.findtext('url')})",
)
references = "\n".join(references)
@@ -84,12 +84,12 @@ def get_findings(self, file, test):
finding.unsaved_endpoints = [Endpoint.from_uri(url)]
finding.unsaved_req_resp = [
- {"req": entry.findtext("http_request"), "resp": ""}
+ {"req": entry.findtext("http_request"), "resp": ""},
]
# make dupe hash key
dupe_key = hashlib.sha256(
- str(description + title + severity).encode("utf-8")
+ str(description + title + severity).encode("utf-8"),
).hexdigest()
# check if dupes are present.
if dupe_key in dupes:
diff --git a/dojo/tools/wfuzz/parser.py b/dojo/tools/wfuzz/parser.py
index 2ac1dfbb27..41d4ebeee6 100644
--- a/dojo/tools/wfuzz/parser.py
+++ b/dojo/tools/wfuzz/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, filename, test):
severity = self.severity_mapper(input=return_code)
description = f"The URL {url.to_text()} must not be exposed\n Please review your configuration\n"
dupe_key = hashlib.sha256(
- (url.to_text() + str(return_code)).encode("utf-8")
+ (url.to_text() + str(return_code)).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
@@ -67,10 +67,10 @@ def get_findings(self, filename, test):
host=url.host,
protocol=url.scheme,
port=url.port,
- )
+ ),
]
finding.unsaved_req_resp = [
- {"req": item["payload"], "resp": str(return_code)}
+ {"req": item["payload"], "resp": str(return_code)},
]
dupes[dupe_key] = finding
return list(dupes.values())
diff --git a/dojo/tools/whispers/parser.py b/dojo/tools/whispers/parser.py
index 5c819df6ac..5fa1401459 100644
--- a/dojo/tools/whispers/parser.py
+++ b/dojo/tools/whispers/parser.py
@@ -62,7 +62,7 @@ def get_findings(self, file, test):
references="https://cwe.mitre.org/data/definitions/798.html",
cwe=798,
severity=self.SEVERITY_MAP.get(
- vuln.get("severity"), "Info"
+ vuln.get("severity"), "Info",
),
file_path=vuln.get("file"),
line=int(vuln.get("line")),
@@ -70,7 +70,7 @@ def get_findings(self, file, test):
static_finding=True,
dynamic_finding=False,
test=test,
- )
+ ),
)
return findings
diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py
index 77428939ec..c478786b59 100644
--- a/dojo/tools/whitehat_sentinel/parser.py
+++ b/dojo/tools/whitehat_sentinel/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, file, test):
# Convert a WhiteHat Vuln with Attack Vectors to a list of DefectDojo
# findings
dojo_findings = self._convert_whitehat_sentinel_vulns_to_dojo_finding(
- findings_collection["collection"], test
+ findings_collection["collection"], test,
)
# # Loop through each vuln from WhiteHat
@@ -54,7 +54,7 @@ def get_findings(self, file, test):
return dojo_findings
def _convert_whitehat_severity_id_to_dojo_severity(
- self, whitehat_severity_id: int
+ self, whitehat_severity_id: int,
) -> Union[str, None]:
"""
Converts a WhiteHat Sentinel numerical severity to a DefectDojo severity.
@@ -109,12 +109,12 @@ def _parse_description(self, whitehat_sentinel_description: dict):
description = description_chunks[0]
description_ref["description"] = self.__remove_paragraph_tags(
- description
+ description,
)
if len(description_chunks) > 1:
description_ref["reference_link"] = self.__get_href_url(
- description_chunks[1]
+ description_chunks[1],
)
return description_ref
@@ -167,7 +167,7 @@ def __remove_paragraph_tags(self, html_string):
return re.sub(r"|
", "", html_string)
def _convert_attack_vectors_to_endpoints(
- self, attack_vectors: List[dict]
+ self, attack_vectors: List[dict],
) -> List["Endpoint"]:
"""
Takes a list of Attack Vectors dictionaries from the WhiteHat vuln API and converts them to Defect Dojo
@@ -182,13 +182,13 @@ def _convert_attack_vectors_to_endpoints(
# This should be in the Endpoint class should it not?
for attack_vector in attack_vectors:
endpoints_list.append(
- Endpoint.from_uri(attack_vector["request"]["url"])
+ Endpoint.from_uri(attack_vector["request"]["url"]),
)
return endpoints_list
def _convert_whitehat_sentinel_vulns_to_dojo_finding(
- self, whitehat_sentinel_vulns: [dict], test: str
+ self, whitehat_sentinel_vulns: [dict], test: str,
):
"""
Converts a WhiteHat Sentinel vuln to a DefectDojo finding
@@ -206,10 +206,10 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(
if mitigated_ts is not None:
mitigated_ts = datetime.strptime(mitigated_ts, "%Y-%m-%dT%H:%M:%SZ")
cwe = self._parse_cwe_from_tags(
- whitehat_vuln["attack_vectors"][0].get("scanner_tags", [])
+ whitehat_vuln["attack_vectors"][0].get("scanner_tags", []),
)
description_ref = self._parse_description(
- whitehat_vuln["description"]
+ whitehat_vuln["description"],
)
description = description_ref["description"]
references = (
@@ -225,7 +225,7 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(
else whitehat_vuln.get("risk")
)
severity = self._convert_whitehat_severity_id_to_dojo_severity(
- risk_id
+ risk_id,
)
false_positive = whitehat_vuln.get("status") == "invalid"
@@ -233,7 +233,7 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(
is_mitigated = not active
dupe_key = hashlib.md5(
- whitehat_vuln["id"].encode("utf-8")
+ whitehat_vuln["id"].encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
@@ -266,7 +266,7 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(
# Get Endpoints from Attack Vectors
endpoints = self._convert_attack_vectors_to_endpoints(
- whitehat_vuln["attack_vectors"]
+ whitehat_vuln["attack_vectors"],
)
finding.unsaved_endpoints = endpoints
diff --git a/dojo/tools/wiz/parser.py b/dojo/tools/wiz/parser.py
index 722a7d2dec..a68ecae2bb 100644
--- a/dojo/tools/wiz/parser.py
+++ b/dojo/tools/wiz/parser.py
@@ -86,6 +86,6 @@ def get_findings(self, filename, test):
dynamic_finding=True,
mitigation=row.get("Remediation Recommendation"),
test=test,
- )
+ ),
)
return findings
diff --git a/dojo/tools/wpscan/parser.py b/dojo/tools/wpscan/parser.py
index 30f523265c..70081dc064 100644
--- a/dojo/tools/wpscan/parser.py
+++ b/dojo/tools/wpscan/parser.py
@@ -46,7 +46,7 @@ def get_vulnerabilities(
dynamic_finding=True,
static_finding=False,
scanner_confidence=self._get_scanner_confidence(
- detection_confidence
+ detection_confidence,
),
unique_id_from_tool=vul["references"]["wpvulndb"][0],
nb_occurences=1,
@@ -68,12 +68,12 @@ def get_vulnerabilities(
finding.unsaved_vulnerability_ids = []
for vulnerability_id in vul["references"]["cve"]:
finding.unsaved_vulnerability_ids.append(
- f"CVE-{vulnerability_id}"
+ f"CVE-{vulnerability_id}",
)
# internal de-duplication
dupe_key = hashlib.sha256(
- str(finding.unique_id_from_tool).encode("utf-8")
+ str(finding.unique_id_from_tool).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
@@ -121,13 +121,13 @@ def get_findings(self, file, test):
# manage interesting interesting_findings
for interesting_finding in tree.get("interesting_findings", []):
references = self.generate_references(
- interesting_finding["references"]
+ interesting_finding["references"],
)
description = "\n".join(
[
"**Type:** `" + interesting_finding.get("type") + "`\n",
"**Url:** `" + interesting_finding["url"] + "`\n",
- ]
+ ],
)
if interesting_finding["interesting_entries"]:
description += (
@@ -143,7 +143,7 @@ def get_findings(self, file, test):
dynamic_finding=True,
static_finding=False,
scanner_confidence=self._get_scanner_confidence(
- interesting_finding.get("confidence")
+ interesting_finding.get("confidence"),
),
)
# manage endpoint
@@ -159,8 +159,8 @@ def get_findings(self, file, test):
str(
"interesting_findings"
+ finding.title
- + interesting_finding["url"]
- ).encode("utf-8")
+ + interesting_finding["url"],
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
diff --git a/dojo/tools/xanitizer/parser.py b/dojo/tools/xanitizer/parser.py
index 0486967517..13a898b9f6 100644
--- a/dojo/tools/xanitizer/parser.py
+++ b/dojo/tools/xanitizer/parser.py
@@ -100,7 +100,7 @@ def generate_title(self, finding, line):
def generate_description(self, finding):
description = "**Description:**\n{}".format(
- finding.find("description").text
+ finding.find("description").text,
)
if finding.find("startNode") is not None:
@@ -108,11 +108,11 @@ def generate_description(self, finding):
endnode = finding.find("endNode")
description = f"{description}\n-----\n"
description = "{}\n**Starting at:** {} - **Line** {}".format(
- description, startnode.get("classFQN"), startnode.get("lineNo")
+ description, startnode.get("classFQN"), startnode.get("lineNo"),
)
description = self.add_code(startnode, False, description)
description = "{}\n\n**Ending at:** {} - **Line** {}".format(
- description, endnode.get("classFQN"), endnode.get("lineNo")
+ description, endnode.get("classFQN"), endnode.get("lineNo"),
)
description = self.add_code(endnode, True, description)
elif finding.find("node") is not None:
@@ -146,11 +146,11 @@ def add_code(self, node, showline, description):
for code in codelines:
if code.text:
description = "{}\n{}: {}".format(
- description, code.get("lineNo"), code.text
+ description, code.get("lineNo"), code.text,
)
else:
description = "{}\n{}: ".format(
- description, code.get("lineNo")
+ description, code.get("lineNo"),
)
return description
@@ -158,11 +158,11 @@ def add_code(self, node, showline, description):
def generate_file_path(self, finding):
if finding.find("endNode") is not None and finding.find("endNode").get(
- "relativePath"
+ "relativePath",
):
return finding.find("endNode").get("relativePath")
elif finding.find("node") is not None and finding.find("node").get(
- "relativePath"
+ "relativePath",
):
return finding.find("node").get("relativePath")
diff --git a/dojo/tools/zap/parser.py b/dojo/tools/zap/parser.py
index f8e983f152..c56ec6169f 100644
--- a/dojo/tools/zap/parser.py
+++ b/dojo/tools/zap/parser.py
@@ -35,10 +35,10 @@ def get_findings(self, file, test):
title=item.findtext("alert"),
description=html2text(item.findtext("desc")),
severity=self.MAPPING_SEVERITY.get(
- item.findtext("riskcode")
+ item.findtext("riskcode"),
),
scanner_confidence=self.MAPPING_CONFIDENCE.get(
- item.findtext("riskcode")
+ item.findtext("riskcode"),
),
mitigation=html2text(item.findtext("solution")),
references=html2text(item.findtext("reference")),
@@ -62,10 +62,10 @@ def get_findings(self, file, test):
if instance.findtext("requestheader") is not None:
# Assemble the request from header and body
request = instance.findtext(
- "requestheader"
+ "requestheader",
) + instance.findtext("requestbody")
response = instance.findtext(
- "responseheader"
+ "responseheader",
) + instance.findtext("responsebody")
else:
# The report is in the regular XML format, without requests and responses.
@@ -81,7 +81,7 @@ def get_findings(self, file, test):
endpoint.fragment = None
finding.unsaved_endpoints.append(endpoint)
finding.unsaved_req_resp.append(
- {"req": request, "resp": response}
+ {"req": request, "resp": response},
)
items.append(finding)
return items
diff --git a/dojo/urls.py b/dojo/urls.py
index b9d9493c66..dd438c8f72 100644
--- a/dojo/urls.py
+++ b/dojo/urls.py
@@ -222,7 +222,7 @@
f"^{get_system_setting('url_prefix')}api/v2/api-token-auth/",
tokenviews.obtain_auth_token,
name='api-token-auth',
- )
+ ),
]
urlpatterns = []
@@ -243,7 +243,7 @@
re_path(r'^robots.txt', lambda x: HttpResponse("User-Agent: *\nDisallow: /", content_type="text/plain"), name="robots_file"),
re_path(r'^manage_files/(?P\d+)/(?P\w+)$', views.manage_files, name='manage_files'),
re_path(r'^access_file/(?P\d+)/(?P\d+)/(?P\w+)$', views.access_file, name='access_file'),
- re_path(r'^{}/(?P.*)$'.format(settings.MEDIA_URL.strip('/')), views.protected_serve, {'document_root': settings.MEDIA_ROOT})
+ re_path(r'^{}/(?P.*)$'.format(settings.MEDIA_URL.strip('/')), views.protected_serve, {'document_root': settings.MEDIA_ROOT}),
]
urlpatterns += api_v2_urls
diff --git a/dojo/user/urls.py b/dojo/user/urls.py
index adf3dd80cb..8dbf0b0686 100644
--- a/dojo/user/urls.py
+++ b/dojo/user/urls.py
@@ -25,7 +25,7 @@
re_path(r'^user/(?P\d+)/add_product_type_member$', views.add_product_type_member, name='add_product_type_member_user'),
re_path(r'^user/(?P\d+)/add_product_member$', views.add_product_member, name='add_product_member_user'),
re_path(r'^user/(?P\d+)/add_group_member$', views.add_group_member, name='add_group_member_user'),
- re_path(r'^user/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_user_permissions')
+ re_path(r'^user/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_user_permissions'),
]
if settings.FORGOT_PASSWORD:
urlpatterns.extend([
@@ -50,7 +50,7 @@
), name="forgot_username_done"),
re_path(r'^forgot_username/$', views.DojoForgotUsernameView.as_view(
template_name='login/forgot_username.html',
- success_url=reverse_lazy("forgot_username_done")
+ success_url=reverse_lazy("forgot_username_done"),
), name="forgot_username"),
])
diff --git a/dojo/user/views.py b/dojo/user/views.py
index ea60c93fc1..25d4692ea9 100644
--- a/dojo/user/views.py
+++ b/dojo/user/views.py
@@ -126,7 +126,7 @@ def login_view(request):
settings.AUTH0_OAUTH2_ENABLED,
settings.KEYCLOAK_OAUTH2_ENABLED,
settings.GITHUB_ENTERPRISE_OAUTH2_ENABLED,
- settings.SAML2_ENABLED
+ settings.SAML2_ENABLED,
]) == 1 and 'force_login_form' not in request.GET:
if settings.GOOGLE_OAUTH_ENABLED:
social_auth = 'google-oauth2'
@@ -587,7 +587,7 @@ def add_group_member(request, uid):
add_breadcrumb(title=_("Add Group Member"), top_level=False, request=request)
return render(request, 'dojo/new_group_member_user.html', {
'user': user,
- 'form': memberform
+ 'form': memberform,
})
diff --git a/dojo/utils.py b/dojo/utils.py
index d66c538529..2bf51f60f1 100644
--- a/dojo/utils.py
+++ b/dojo/utils.py
@@ -89,14 +89,14 @@ def do_false_positive_history(finding, *args, **kwargs):
existing_findings = match_finding_to_existing_findings(finding, product=finding.test.engagement.product)
deduplicationLogger.debug(
"FALSE_POSITIVE_HISTORY: Found %i existing findings in the same product",
- len(existing_findings)
+ len(existing_findings),
)
existing_fp_findings = existing_findings.filter(false_p=True)
deduplicationLogger.debug(
"FALSE_POSITIVE_HISTORY: Found %i existing findings in the same product "
+ "that were previously marked as false positive",
- len(existing_fp_findings)
+ len(existing_fp_findings),
)
if existing_fp_findings:
@@ -119,7 +119,7 @@ def do_false_positive_history(finding, *args, **kwargs):
for find in to_mark_as_fp:
deduplicationLogger.debug(
"FALSE_POSITIVE_HISTORY: Marking Finding %i:%s from %s as false positive",
- find.id, find.title, find.test.engagement
+ find.id, find.title, find.test.engagement,
)
try:
find.false_p = True
@@ -164,14 +164,14 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
deduplicationLogger.debug(
'Matching finding %i:%s to existing findings in %s %s using %s as deduplication algorithm.',
- finding.id, finding.title, custom_filter_type, list(custom_filter.values())[0], deduplication_algorithm
+ finding.id, finding.title, custom_filter_type, list(custom_filter.values())[0], deduplication_algorithm,
)
if deduplication_algorithm == 'hash_code':
return (
Finding.objects.filter(
**custom_filter,
- hash_code=finding.hash_code
+ hash_code=finding.hash_code,
).exclude(hash_code=None)
.exclude(id=finding.id)
.order_by('id')
@@ -181,7 +181,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
return (
Finding.objects.filter(
**custom_filter,
- unique_id_from_tool=finding.unique_id_from_tool
+ unique_id_from_tool=finding.unique_id_from_tool,
).exclude(unique_id_from_tool=None)
.exclude(id=finding.id)
.order_by('id')
@@ -193,7 +193,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
(
(Q(hash_code__isnull=False) & Q(hash_code=finding.hash_code))
| (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=finding.unique_id_from_tool))
- )
+ ),
).exclude(id=finding.id).order_by('id')
deduplicationLogger.debug(query.query)
return query
@@ -209,7 +209,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
**custom_filter,
title=finding.title,
severity=finding.severity,
- numerical_severity=Finding.get_numerical_severity(finding.severity)
+ numerical_severity=Finding.get_numerical_severity(finding.severity),
).order_by('id')
)
@@ -625,7 +625,7 @@ def findings_this_period(findings, period_type, stuff, o_stuff, a_stuff):
'one': 0,
'two': 0,
'three': 0,
- 'total': 0
+ 'total': 0,
}
a_count = {
'closed': 0,
@@ -633,7 +633,7 @@ def findings_this_period(findings, period_type, stuff, o_stuff, a_stuff):
'one': 0,
'two': 0,
'three': 0,
- 'total': 0
+ 'total': 0,
}
for f in findings:
if f.mitigated is not None and end_of_period >= f.mitigated >= start_of_period:
@@ -710,7 +710,7 @@ def add_breadcrumb(parent=None,
crumbs = [
{
'title': _('Home'),
- 'url': reverse('home')
+ 'url': reverse('home'),
},
]
if parent is not None and getattr(parent, "get_breadcrumbs", None):
@@ -718,7 +718,7 @@ def add_breadcrumb(parent=None,
else:
crumbs += [{
'title': title,
- 'url': request.get_full_path() if url is None else url
+ 'url': request.get_full_path() if url is None else url,
}]
else:
resolver = get_resolver(None).resolve
@@ -727,12 +727,12 @@ def add_breadcrumb(parent=None,
if title is not None:
obj_crumbs += [{
'title': title,
- 'url': request.get_full_path() if url is None else url
+ 'url': request.get_full_path() if url is None else url,
}]
else:
obj_crumbs = [{
'title': title,
- 'url': request.get_full_path() if url is None else url
+ 'url': request.get_full_path() if url is None else url,
}]
for crumb in crumbs:
@@ -930,13 +930,13 @@ def get_period_counts_legacy(findings,
new_date.year,
new_date.month,
monthrange(new_date.year, new_date.month)[1],
- tzinfo=timezone.get_current_timezone())
+ tzinfo=timezone.get_current_timezone()),
])
else:
risks_a = None
crit_count, high_count, med_count, low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
for finding in findings:
if new_date <= datetime.combine(finding.date, datetime.min.time(
@@ -956,7 +956,7 @@ def get_period_counts_legacy(findings,
crit_count, high_count, med_count, low_count, total,
closed_in_range_count])
crit_count, high_count, med_count, low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
if risks_a is not None:
for finding in risks_a:
@@ -976,7 +976,7 @@ def get_period_counts_legacy(findings,
return {
'opened_per_period': opened_in_period,
- 'accepted_per_period': accepted_in_period
+ 'accepted_per_period': accepted_in_period,
}
@@ -1023,7 +1023,7 @@ def get_period_counts(findings,
if accepted_findings:
date_range = [
datetime(new_date.year, new_date.month, new_date.day, tzinfo=tz),
- datetime(end_date.year, end_date.month, end_date.day, tzinfo=tz)
+ datetime(end_date.year, end_date.month, end_date.day, tzinfo=tz),
]
try:
risks_a = accepted_findings.filter(risk_acceptance__created__date__range=date_range)
@@ -1033,13 +1033,13 @@ def get_period_counts(findings,
risks_a = None
f_crit_count, f_high_count, f_med_count, f_low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
ra_crit_count, ra_high_count, ra_med_count, ra_low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
active_crit_count, active_high_count, active_med_count, active_low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
for finding in findings:
@@ -1113,7 +1113,7 @@ def get_period_counts(findings,
return {
'opened_per_period': opened_in_period,
'accepted_per_period': accepted_in_period,
- 'active_per_period': active_in_period
+ 'active_per_period': active_in_period,
}
@@ -1191,7 +1191,7 @@ def opened_in_period(start_date, end_date, **kwargs):
out_of_scope=False,
mitigated__isnull=True,
**kwargs,
- severity__in=('Critical', 'High', 'Medium', 'Low')).count()
+ severity__in=('Critical', 'High', 'Medium', 'Low')).count(),
}
for o in opened_in_period:
@@ -2295,7 +2295,7 @@ def __init__(self, *args, **kwargs):
'Engagement': [
(Finding, 'test__engagement'),
(Test, 'engagement')],
- 'Test': [(Finding, 'test')]
+ 'Test': [(Finding, 'test')],
}
@dojo_async_task
@@ -2359,7 +2359,7 @@ def log_user_login(sender, request, user, **kwargs):
logger.info('login user: {user} via ip: {ip}'.format(
user=user.username,
- ip=request.META.get('REMOTE_ADDR')
+ ip=request.META.get('REMOTE_ADDR'),
))
@@ -2368,7 +2368,7 @@ def log_user_logout(sender, request, user, **kwargs):
logger.info('logout user: {user} via ip: {ip}'.format(
user=user.username,
- ip=request.META.get('REMOTE_ADDR')
+ ip=request.META.get('REMOTE_ADDR'),
))
@@ -2378,11 +2378,11 @@ def log_user_login_failed(sender, credentials, request, **kwargs):
if 'username' in credentials:
logger.warning('login failed for: {credentials} via ip: {ip}'.format(
credentials=credentials['username'],
- ip=request.META['REMOTE_ADDR']
+ ip=request.META['REMOTE_ADDR'],
))
else:
logger.error('login failed because of missing username via ip: {ip}'.format(
- ip=request.META['REMOTE_ADDR']
+ ip=request.META['REMOTE_ADDR'],
))
@@ -2514,7 +2514,7 @@ def get_open_findings_burndown(product):
'High': [],
'Medium': [],
'Low': [],
- 'Info': []
+ 'Info': [],
}
# count the number of open findings for the 90-day window
diff --git a/dojo/views.py b/dojo/views.py
index 09a0dcad73..cd22e6ac2d 100644
--- a/dojo/views.py
+++ b/dojo/views.py
@@ -118,7 +118,7 @@ def action_history(request, cid, oid):
"obj": obj,
"test": test,
"object_value": object_value,
- "finding": finding
+ "finding": finding,
})
diff --git a/dojo/widgets.py b/dojo/widgets.py
index 0d0b245e41..83d3267c31 100644
--- a/dojo/widgets.py
+++ b/dojo/widgets.py
@@ -27,6 +27,6 @@ def render(self, name, value, attrs=None, renderer=None):
'paginator': paginator,
'page_number': page_number,
'page': page,
- 'page_param': 'apage'
+ 'page_param': 'apage',
}
return render_to_string(self.template_name, context)
diff --git a/dojo/wsgi.py b/dojo/wsgi.py
index 1f79043d49..0e8b2c7f8c 100644
--- a/dojo/wsgi.py
+++ b/dojo/wsgi.py
@@ -45,7 +45,7 @@ def is_debugger_listening(port):
# Required, otherwise debugpy will try to use the uwsgi binary as the python interpreter - https://github.com/microsoft/debugpy/issues/262
debugpy.configure({
"python": "python",
- "subProcess": True
+ "subProcess": True,
})
debugpy.listen(("0.0.0.0", debugpy_port)) # noqa: T100
if os.environ.get("DD_DEBUG_WAIT_FOR_CLIENT") == "True":
diff --git a/ruff.toml b/ruff.toml
index ed814e15f9..30a62e2c0c 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -41,6 +41,7 @@ select = [
"ASYNC",
"TRIO",
"S2", "S5", "S7",
+ "COM",
"C4",
"T10",
"DJ003", "DJ012", "DJ013",
diff --git a/tests/base_test_class.py b/tests/base_test_class.py
index 8f27bed85b..e676e91916 100644
--- a/tests/base_test_class.py
+++ b/tests/base_test_class.py
@@ -70,7 +70,7 @@ def setUpClass(cls):
dd_driver_options.add_argument("--no-sandbox")
dd_driver_options.add_argument("--disable-dev-shm-usage")
dd_driver_options.add_argument(
- "--disable-gpu"
+ "--disable-gpu",
) # on windows sometimes chrome can't start with certain gpu driver versions, even in headless mode
# start maximized or at least with sufficient with because datatables will hide certain controls when the screen is too narrow
@@ -89,7 +89,7 @@ def setUpClass(cls):
# change path of chromedriver according to which directory you have chromedriver.
print(
- "starting chromedriver with options: ", vars(dd_driver_options), desired
+ "starting chromedriver with options: ", vars(dd_driver_options), desired,
)
# TODO - this filter needs to be removed
@@ -124,14 +124,14 @@ def login_page(self):
driver.find_element(By.ID, "id_username").send_keys(os.environ["DD_ADMIN_USER"])
driver.find_element(By.ID, "id_password").clear()
driver.find_element(By.ID, "id_password").send_keys(
- os.environ["DD_ADMIN_PASSWORD"]
+ os.environ["DD_ADMIN_PASSWORD"],
)
driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click()
self.assertFalse(
self.is_element_by_css_selector_present(
- ".alert-danger", "Please enter a correct username and password"
- )
+ ".alert-danger", "Please enter a correct username and password",
+ ),
)
return driver
@@ -146,8 +146,8 @@ def login_standard_page(self):
self.assertFalse(
self.is_element_by_css_selector_present(
- ".alert-danger", "Please enter a correct username and password"
- )
+ ".alert-danger", "Please enter a correct username and password",
+ ),
)
return driver
@@ -244,7 +244,7 @@ def wait_for_datatable_if_content(self, no_content_id, wrapper_id):
if no_content is None:
# wait for product_wrapper div as datatables javascript modifies the DOM on page load.
WebDriverWait(self.driver, 30).until(
- EC.presence_of_element_located((By.ID, wrapper_id))
+ EC.presence_of_element_located((By.ID, wrapper_id)),
)
def is_element_by_css_selector_present(self, selector, text=None):
@@ -353,7 +353,7 @@ def set_block_execution(self, block_execution=True):
# check if it's enabled after reload
self.assertTrue(
driver.find_element(By.ID, "id_block_execution").is_selected()
- == block_execution
+ == block_execution,
)
return driver
@@ -428,19 +428,19 @@ def assertNoConsoleErrors(self):
print(entry)
print(
- "There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens"
+ "There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens",
)
print(
"Currently there is no reliable way to find out at which url the error happened, but it could be: ."
- + self.driver.current_url
+ + self.driver.current_url,
)
if self.accept_javascript_errors:
print(
- "WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!"
+ "WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!",
)
elif re.search(accepted_javascript_messages, entry["message"]):
print(
- "WARNING: skipping javascript errors related to known issues images, see https://github.com/DefectDojo/django-DefectDojo/blob/master/tests/base_test_class.py#L324"
+ "WARNING: skipping javascript errors related to known issues images, see https://github.com/DefectDojo/django-DefectDojo/blob/master/tests/base_test_class.py#L324",
)
else:
self.assertNotEqual(entry["level"], "SEVERE")
diff --git a/tests/false_positive_history_test.py b/tests/false_positive_history_test.py
index 5d4c4c91f4..d330ffb194 100644
--- a/tests/false_positive_history_test.py
+++ b/tests/false_positive_history_test.py
@@ -102,13 +102,13 @@ def test_retroactive_edit_finding(self):
product_name='QA Test',
engagement_name='FP History Eng 1',
test_name='FP History Test',
- finding_name='Fake Vulnerability for Edit Test'
+ finding_name='Fake Vulnerability for Edit Test',
)
finding_2 = self.create_finding(
product_name='QA Test',
engagement_name='FP History Eng 2',
test_name='FP History Test',
- finding_name='Fake Vulnerability for Edit Test'
+ finding_name='Fake Vulnerability for Edit Test',
)
# Assert that both findings are active
self.assert_is_active(finding_1)
@@ -130,13 +130,13 @@ def test_retroactive_bulk_edit_finding(self):
product_name='QA Test',
engagement_name='FP History Eng 1',
test_name='FP History Test',
- finding_name='Fake Vulnerability for Bulk Edit Test'
+ finding_name='Fake Vulnerability for Bulk Edit Test',
)
finding_2 = self.create_finding(
product_name='QA Test',
engagement_name='FP History Eng 2',
test_name='FP History Test',
- finding_name='Fake Vulnerability for Bulk Edit Test'
+ finding_name='Fake Vulnerability for Bulk Edit Test',
)
# Assert that both findings are active
self.assert_is_active(finding_1)
diff --git a/tests/notifications_test.py b/tests/notifications_test.py
index d6f0b46382..2a5c832ab2 100644
--- a/tests/notifications_test.py
+++ b/tests/notifications_test.py
@@ -136,7 +136,7 @@ def test_user_mail_notifications_change(self):
originally_selected = {
'product_added': driver.find_element(By.XPATH,
"//input[@name='product_added' and @value='mail']").is_selected(),
- 'scan_added': driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected()
+ 'scan_added': driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected(),
}
driver.find_element(By.XPATH, "//input[@name='product_added' and @value='mail']").click()
diff --git a/tests/zap.py b/tests/zap.py
index 3516779342..db0f77bf3d 100755
--- a/tests/zap.py
+++ b/tests/zap.py
@@ -76,7 +76,7 @@ class Main:
for alert in zap.core.alerts():
sort_by_url[alert['url']].append({
'risk': alert['risk'],
- 'alert': alert['alert']
+ 'alert': alert['alert'],
})
summary = PrettyTable(["Risk", "Count"])
diff --git a/unittests/dojo_test_case.py b/unittests/dojo_test_case.py
index 1a4468c6f0..2c8cd2abfe 100644
--- a/unittests/dojo_test_case.py
+++ b/unittests/dojo_test_case.py
@@ -176,7 +176,7 @@ def get_new_product_with_jira_project_data(self):
'jira-project-form-push_notes': 'on',
'jira-project-form-product_jira_sla_notification': 'on',
'jira-project-form-custom_fields': 'null',
- 'sla_configuration': 1
+ 'sla_configuration': 1,
}
@@ -207,7 +207,7 @@ def get_product_with_jira_project_data(self, product):
'jira-project-form-push_notes': 'on',
'jira-project-form-product_jira_sla_notification': 'on',
'jira-project-form-custom_fields': 'null',
- 'sla_configuration': 1
+ 'sla_configuration': 1,
}
@@ -223,7 +223,7 @@ def get_product_with_jira_project_data2(self, product):
'jira-project-form-push_notes': 'on',
'jira-project-form-product_jira_sla_notification': 'on',
'jira-project-form-custom_fields': 'null',
- 'sla_configuration': 1
+ 'sla_configuration': 1,
}
diff --git a/unittests/test_api_sonarqube_updater.py b/unittests/test_api_sonarqube_updater.py
index 56d341093f..42f3f65731 100644
--- a/unittests/test_api_sonarqube_updater.py
+++ b/unittests/test_api_sonarqube_updater.py
@@ -15,83 +15,83 @@ def setUp(self):
def test_transitions_for_sonarqube_from_open_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('OPEN', 'CONFIRMED'),
- ['confirm']
+ ['confirm'],
)
def test_transitions_for_sonarqube_from_open_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('OPEN', 'RESOLVED / FIXED'),
- ['resolve']
+ ['resolve'],
)
def test_transitions_for_sonarqube_from_reopened_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('REOPENED', 'RESOLVED / FIXED'),
- ['resolve']
+ ['resolve'],
)
def test_transitions_for_sonarqube_from_reopened_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('REOPENED', 'CONFIRMED'),
- ['confirm']
+ ['confirm'],
)
def test_transitions_for_sonarqube_from_resolved_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'CONFIRMED'),
- ['reopen', 'confirm']
+ ['reopen', 'confirm'],
)
def test_transitions_for_sonarqube_from_resolved_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'RESOLVED / FALSE-POSITIVE'),
- ['reopen', 'falsepositive']
+ ['reopen', 'falsepositive'],
)
def test_transitions_for_sonarqube_from_resolved_3(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'RESOLVED / WONTFIX'),
- ['reopen', 'wontfix']
+ ['reopen', 'wontfix'],
)
def test_transitions_for_sonarqube_fake_target_origin(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('FAKE_STATUS', 'RESOLVED / FIXED'),
- None
+ None,
)
def test_transitions_for_sonarqube_fake_target_status(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'FAKE_STATUS'),
- None
+ None,
)
def test_transitions_for_sonarqube_from_confirmed_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('CONFIRMED', 'REOPENED'),
- ['unconfirm']
+ ['unconfirm'],
)
def test_transitions_for_sonarqube_from_confirmed_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('CONFIRMED', 'RESOLVED / FIXED'),
- ['resolve']
+ ['resolve'],
)
def test_transitions_for_open_reopen_status_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('OPEN', 'REOPENED'),
- None
+ None,
)
def test_transitions_for_open_reopen_status_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('REOPENED', 'OPEN'),
- None
+ None,
)
def test_transitions_for_open_reopen_status_3(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('REOPENED', 'REOPENED'),
- None
+ None,
)
diff --git a/unittests/test_apiv2_endpoint.py b/unittests/test_apiv2_endpoint.py
index e197fb6eec..b0900f9fe3 100644
--- a/unittests/test_apiv2_endpoint.py
+++ b/unittests/test_apiv2_endpoint.py
@@ -16,13 +16,13 @@ def setUp(self):
def test_endpoint_missing_host_product(self):
r = self.client.post(reverse('endpoint-list'), {
- "host": "FOO.BAR"
+ "host": "FOO.BAR",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Attribute 'product' is required", r.content.decode("utf-8"))
r = self.client.post(reverse('endpoint-list'), {
- "product": 1
+ "product": 1,
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Host must not be empty", r.content.decode("utf-8"))
@@ -30,13 +30,13 @@ def test_endpoint_missing_host_product(self):
def test_endpoint_add_existing(self):
r = self.client.post(reverse('endpoint-list'), {
"product": 1,
- "host": "FOO.BAR"
+ "host": "FOO.BAR",
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
r = self.client.post(reverse('endpoint-list'), {
"product": 1,
- "host": "FOO.BAR"
+ "host": "FOO.BAR",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn('It appears as though an endpoint with this data already '
@@ -44,7 +44,7 @@ def test_endpoint_add_existing(self):
r = self.client.post(reverse('endpoint-list'), {
"product": 1,
- "host": "foo.bar"
+ "host": "foo.bar",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn('It appears as though an endpoint with this data already '
@@ -53,13 +53,13 @@ def test_endpoint_add_existing(self):
def test_endpoint_change_product(self):
r = self.client.post(reverse('endpoint-list'), {
"product": 1,
- "host": "product1"
+ "host": "product1",
}, format='json')
eid = r.json()['id']
self.assertEqual(r.status_code, 201, r.content[:1000])
r = self.client.patch(reverse('endpoint-detail', args=(eid,)), {
- "product": 2
+ "product": 2,
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Change of product is not possible", r.content.decode("utf-8"))
@@ -67,13 +67,13 @@ def test_endpoint_change_product(self):
def test_endpoint_remove_host(self):
payload = {
"product": 1,
- "host": "host1"
+ "host": "host1",
}
r = self.client.post(reverse('endpoint-list'), payload, format='json')
eid = r.json()['id']
self.assertEqual(r.status_code, 201, r.content[:1000])
r = self.client.patch(reverse('endpoint-detail', args=(eid,)), {
- "host": None
+ "host": None,
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Host must not be empty", r.content.decode("utf-8"))
diff --git a/unittests/test_apiv2_methods_and_endpoints.py b/unittests/test_apiv2_methods_and_endpoints.py
index a3508f9880..6169f28f75 100644
--- a/unittests/test_apiv2_methods_and_endpoints.py
+++ b/unittests/test_apiv2_methods_and_endpoints.py
@@ -51,7 +51,7 @@ def test_is_defined(self):
'questionnaire_answers', 'questionnaire_answered_questionnaires',
'questionnaire_engagement_questionnaires', 'questionnaire_general_questionnaires',
'dojo_group_members', 'product_members', 'product_groups', 'product_type_groups',
- 'product_type_members'
+ 'product_type_members',
]
for reg, _, _ in sorted(self.registry):
if reg in exempt_list:
diff --git a/unittests/test_apiv2_notifications.py b/unittests/test_apiv2_notifications.py
index 06aa7413c6..f45d7433b9 100644
--- a/unittests/test_apiv2_notifications.py
+++ b/unittests/test_apiv2_notifications.py
@@ -16,7 +16,7 @@ def setUp(self):
r = self.create(
template=True,
- scan_added=['alert', 'slack']
+ scan_added=['alert', 'slack'],
)
self.assertEqual(r.status_code, 201)
@@ -27,7 +27,7 @@ def create_test_user(self):
password = 'testTEST1234!@#$'
r = self.client.post(reverse('user-list'), {
"username": "api-user-notification",
- "password": password
+ "password": password,
}, format='json')
return r.json()["id"]
diff --git a/unittests/test_apiv2_user.py b/unittests/test_apiv2_user.py
index 8bdac8b813..e93fb39fa1 100644
--- a/unittests/test_apiv2_user.py
+++ b/unittests/test_apiv2_user.py
@@ -28,7 +28,7 @@ def test_user_list(self):
def test_user_add(self):
# simple user without password
r = self.client.post(reverse('user-list'), {
- "username": "api-user-1"
+ "username": "api-user-1",
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
@@ -36,21 +36,21 @@ def test_user_add(self):
password = 'testTEST1234!@#$'
r = self.client.post(reverse('user-list'), {
"username": "api-user-2",
- "password": password
+ "password": password,
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
# test password by fetching API key
r = self.client.post(reverse('api-token-auth'), {
"username": "api-user-2",
- "password": password
+ "password": password,
}, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
# user with weak password
r = self.client.post(reverse('user-list'), {
"username": "api-user-3",
- "password": "weakPassword"
+ "password": "weakPassword",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn('Password must contain at least 1 digit, 0-9.', r.content.decode("utf-8"))
@@ -58,31 +58,31 @@ def test_user_add(self):
def test_user_change_password(self):
# some user
r = self.client.post(reverse('user-list'), {
- "username": "api-user-4"
+ "username": "api-user-4",
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
user_id = r.json()['id']
r = self.client.put("{}{}/".format(reverse('user-list'), user_id), {
"username": "api-user-4",
- "first_name": "first"
- }, format='json',)
+ "first_name": "first",
+ }, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), {
- "last_name": "last"
+ "last_name": "last",
}, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
r = self.client.put("{}{}/".format(reverse('user-list'), user_id), {
"username": "api-user-4",
- "password": "testTEST1234!@#$"
+ "password": "testTEST1234!@#$",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8"))
r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), {
- "password": "testTEST1234!@#$"
+ "password": "testTEST1234!@#$",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8"))
diff --git a/unittests/test_apply_finding_template.py b/unittests/test_apply_finding_template.py
index 3042098b41..5e58bdde62 100644
--- a/unittests/test_apply_finding_template.py
+++ b/unittests/test_apply_finding_template.py
@@ -179,7 +179,7 @@ def test_unauthorized_apply_template_to_finding_fails(self):
'severity': 'High',
'description': 'Finding for Testing Apply Template Functionality',
'mitigation': 'template mitigation',
- 'impact': 'template impact'}
+ 'impact': 'template impact'},
)
self.assertEqual(302, result.status_code)
self.assertIn('login', result.url)
diff --git a/unittests/test_dashboard.py b/unittests/test_dashboard.py
index 8d853c46a9..a5f73a14e8 100644
--- a/unittests/test_dashboard.py
+++ b/unittests/test_dashboard.py
@@ -21,7 +21,7 @@ def create(when: datetime, product_id: int, titles_and_severities: List[Tuple[st
test = Test.objects.create(engagement=engagement, test_type_id=120, target_start=when, target_end=when)
Finding.objects.bulk_create(
(Finding(title=title, test=test, severity=severity, verified=False)
- for title, severity in titles_and_severities)
+ for title, severity in titles_and_severities),
)
@@ -36,7 +36,7 @@ def create_with_duplicates(when: datetime, product_id: int, titles_and_severitie
Finding.objects.bulk_create(
(Finding(title=title, test=test, severity=severity, verified=False,
duplicate=(title in originals_map), duplicate_finding=originals_map.get(title))
- for title, severity in titles_and_severities)
+ for title, severity in titles_and_severities),
)
diff --git a/unittests/test_deduplication_logic.py b/unittests/test_deduplication_logic.py
index 6369d26d3a..46a99090b4 100644
--- a/unittests/test_deduplication_logic.py
+++ b/unittests/test_deduplication_logic.py
@@ -1166,7 +1166,7 @@ def log_findings(self, findings):
+ ': dup: ' + f'{str(finding.duplicate):5.5}' + ': dup_id: '
+ (f'{str(finding.duplicate_finding.id):4.4}' if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code)
+ ': eps: ' + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()])
- + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else '')
+ + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else ''),
)
logger.debug('\t\tendpoints')
diff --git a/unittests/test_endpoint_model.py b/unittests/test_endpoint_model.py
index 72b0e1843e..69694680df 100644
--- a/unittests/test_endpoint_model.py
+++ b/unittests/test_endpoint_model.py
@@ -112,47 +112,47 @@ def test_url_normalize(self):
def test_get_or_create(self):
_endpoint1, created1 = endpoint_get_or_create(
protocol='http',
- host='bar.foo'
+ host='bar.foo',
)
self.assertTrue(created1)
_endpoint2, created2 = endpoint_get_or_create(
protocol='http',
- host='bar.foo'
+ host='bar.foo',
)
self.assertFalse(created2)
_endpoint3, created3 = endpoint_get_or_create(
protocol='http',
host='bar.foo',
- port=80
+ port=80,
)
self.assertFalse(created3)
_endpoint4, created4 = endpoint_get_or_create(
protocol='http',
host='bar.foo',
- port=8080
+ port=8080,
)
self.assertTrue(created4)
_endpoint5, created5 = endpoint_get_or_create(
protocol='https',
host='bar.foo',
- port=443
+ port=443,
)
self.assertTrue(created5)
_endpoint6, created6 = endpoint_get_or_create(
protocol='https',
- host='bar.foo'
+ host='bar.foo',
)
self.assertFalse(created6)
_endpoint7, created7 = endpoint_get_or_create(
protocol='https',
host='bar.foo',
- port=8443
+ port=8443,
)
self.assertTrue(created7)
@@ -171,7 +171,7 @@ def test_equality_with_one_product_one_without(self):
p = Product.objects.get_or_create(
name="test product",
description="",
- prod_type=Product_Type.objects.get_or_create(name="test pt")[0]
+ prod_type=Product_Type.objects.get_or_create(name="test pt")[0],
)[0]
e1 = Endpoint(host="localhost")
e2 = Endpoint(host="localhost", product=p)
@@ -184,12 +184,12 @@ def test_equality_with_products(self):
p1 = Product.objects.get_or_create(
name="test product 1",
description="",
- prod_type=Product_Type.objects.get_or_create(name="test pt")[0]
+ prod_type=Product_Type.objects.get_or_create(name="test pt")[0],
)[0]
p2 = Product.objects.get_or_create(
name="test product 2",
description="",
- prod_type=Product_Type.objects.get_or_create(name="test pt")[0]
+ prod_type=Product_Type.objects.get_or_create(name="test pt")[0],
)[0]
# Define the endpoints
e1 = Endpoint(host="localhost", product=p1)
@@ -213,13 +213,13 @@ def test_endpoint_status_broken(self):
self.engagement = Engagement.objects.create(
product=self.product,
target_start=datetime.datetime(2020, 1, 1, tzinfo=timezone.utc),
- target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc)
+ target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc),
)
self.test = Test.objects.create(
engagement=self.engagement,
target_start=datetime.datetime(2020, 1, 1, tzinfo=timezone.utc),
target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc),
- test_type_id=1
+ test_type_id=1,
)
from django.contrib.auth import get_user_model
user = get_user_model().objects.create().pk
@@ -233,36 +233,36 @@ def test_endpoint_status_broken(self):
last_modified=datetime.datetime(2021, 4, 1, tzinfo=timezone.utc),
mitigated=False,
finding_id=self.finding,
- endpoint_id=self.endpoint
+ endpoint_id=self.endpoint,
).pk,
'removed_endpoint': Endpoint_Status.objects.create(
date=datetime.datetime(2021, 2, 1, tzinfo=timezone.utc),
last_modified=datetime.datetime(2021, 5, 1, tzinfo=timezone.utc),
mitigated=True,
finding_id=self.another_finding,
- endpoint_id=None
+ endpoint_id=None,
).pk,
'removed_finding': Endpoint_Status.objects.create(
date=datetime.datetime(2021, 2, 1, tzinfo=timezone.utc),
last_modified=datetime.datetime(2021, 5, 1, tzinfo=timezone.utc),
mitigated=True,
finding_id=None,
- endpoint_id=self.another_endpoint
+ endpoint_id=self.another_endpoint,
).pk,
}
Finding.objects.get(id=self.finding).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status['standard'])
+ Endpoint_Status.objects.get(id=self.endpoint_status['standard']),
)
Finding.objects.get(id=self.another_finding).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status['removed_endpoint'])
+ Endpoint_Status.objects.get(id=self.endpoint_status['removed_endpoint']),
)
Endpoint.objects.get(id=self.endpoint).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status['standard'])
+ Endpoint_Status.objects.get(id=self.endpoint_status['standard']),
)
Endpoint.objects.get(id=self.another_endpoint).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status['removed_finding'])
+ Endpoint_Status.objects.get(id=self.endpoint_status['removed_finding']),
)
remove_broken_endpoint_statuses(apps)
diff --git a/unittests/test_false_positive_history_logic.py b/unittests/test_false_positive_history_logic.py
index 29cb18f947..ac949bf5f7 100644
--- a/unittests/test_false_positive_history_logic.py
+++ b/unittests/test_false_positive_history_logic.py
@@ -1683,7 +1683,7 @@ def log_findings(self, findings):
+ ': dup: ' + f'{str(finding.duplicate):5.5}' + ': dup_id: '
+ (f'{str(finding.duplicate_finding.id):4.4}' if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code)
+ ': eps: ' + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()])
- + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else '')
+ + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else ''),
)
logger.debug('\t\tendpoints')
diff --git a/unittests/test_finding_helper.py b/unittests/test_finding_helper.py
index a3491a423c..1ef97136b5 100644
--- a/unittests/test_finding_helper.py
+++ b/unittests/test_finding_helper.py
@@ -40,7 +40,7 @@ def test_new_finding(self, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (True, False, False, False, False, None, None, frozen_datetime)
+ (True, False, False, False, False, None, None, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -58,7 +58,7 @@ def test_no_status_change(self, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- status_fields
+ status_fields,
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -70,7 +70,7 @@ def test_mark_fresh_as_mitigated(self, mock_dt):
finding.save()
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -88,7 +88,7 @@ def test_mark_old_active_as_mitigated(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -110,7 +110,7 @@ def test_mark_old_active_as_mitigated_custom_edit(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime)
+ (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -132,7 +132,7 @@ def test_update_old_mitigated_with_custom_edit(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime)
+ (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -155,7 +155,7 @@ def test_update_old_mitigated_with_missing_data(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -174,7 +174,7 @@ def test_set_old_mitigated_as_active(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (True, False, False, False, False, None, None, frozen_datetime)
+ (True, False, False, False, False, None, None, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -192,7 +192,7 @@ def test_set_active_as_false_p(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
# TODO marking as false positive resets verified to False, possible bug / undesired behaviour?
- (False, False, True, False, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, True, False, True, frozen_datetime, self.user_1, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -210,7 +210,7 @@ def test_set_active_as_out_of_scope(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
# TODO marking as false positive resets verified to False, possible bug / undesired behaviour?
- (False, False, False, True, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, False, True, True, frozen_datetime, self.user_1, frozen_datetime),
)
diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py
index dfbd9c21ca..45c8ed63fa 100644
--- a/unittests/test_import_reimport.py
+++ b/unittests/test_import_reimport.py
@@ -1491,8 +1491,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4}
- }
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4},
+ },
})
test_id = import0['test']
@@ -1541,8 +1541,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
'low': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
- 'total': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}}
- }
+ 'total': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}},
+ },
})
with assertTestImportModelsCreated(self, reimports=1, affected_findings=2, closed=1, reactivated=1, untouched=3):
@@ -1591,8 +1591,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
'low': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0},
'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
- 'total': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}}
- }
+ 'total': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}},
+ },
})
# without import history, there are no delta statistics
@@ -1609,8 +1609,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4}
- }
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4},
+ },
})
test_id = import0['test']
@@ -1624,7 +1624,7 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4}
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4},
},
'after': {
'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
@@ -1632,8 +1632,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5}
- }
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5},
+ },
})
with assertTestImportModelsCreated(self, reimports=0, affected_findings=0, closed=0, reactivated=0, untouched=0):
@@ -1646,7 +1646,7 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5}
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5},
},
'after': {
'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
@@ -1654,8 +1654,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5}
- }
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5},
+ },
})
# Reimport tests to test Scan_Date logic (usecase not supported on UI)
diff --git a/unittests/test_jira_webhook.py b/unittests/test_jira_webhook.py
index 5d7eccd2f5..d88161e46f 100644
--- a/unittests/test_jira_webhook.py
+++ b/unittests/test_jira_webhook.py
@@ -29,11 +29,11 @@ class JIRAWebhookTest(DojoTestCase):
"48x48": "http://www.testjira.com/secure/useravatar?ownerId=valentijn&avatarId=11101",
"24x24": "http://www.testjira.com/secure/useravatar?size=small&ownerId=valentijn&avatarId=11101",
"16x16": "http://www.testjira.com/secure/useravatar?size=x small&ownerId=valentijn&avatarId=11101",
- "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101"
+ "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101",
},
"displayName": "Valentijn Scholten",
"active": "true",
- "timeZone": "Europe/Amsterdam"
+ "timeZone": "Europe/Amsterdam",
},
"body": "test2",
"updateAuthor": {
@@ -43,15 +43,15 @@ class JIRAWebhookTest(DojoTestCase):
"48x48": "http://www.testjira.com/secure/useravatar?ownerId=valentijn&avatarId=11101",
"24x24": "http://www.testjira.com/secure/useravatar?size=small&ownerId=valentijn&avatarId=11101",
"16x16": "http://www.testjira.com/secure/useravatar?size=xsmall&ownerId=valentijn&avatarId=11101",
- "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101"
+ "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101",
},
"displayName": "Valentijn Scholten",
"active": "true",
- "timeZone": "Europe/Amsterdam"
+ "timeZone": "Europe/Amsterdam",
},
"created": "2020-11-11T18:55:21.425+0100",
- "updated": "2020-11-11T18:55:21.425+0100"
- }
+ "updated": "2020-11-11T18:55:21.425+0100",
+ },
}
jira_issue_comment_template_json_with_email = {
@@ -67,11 +67,11 @@ class JIRAWebhookTest(DojoTestCase):
"48x48": "http://www.testjira.com/secure/useravatar?ownerId=valentijn&avatarId=11101",
"24x24": "http://www.testjira.com/secure/useravatar?size=small&ownerId=valentijn&avatarId=11101",
"16x16": "http://www.testjira.com/secure/useravatar?size=x small&ownerId=valentijn&avatarId=11101",
- "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101"
+ "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101",
},
"displayName": "Valentijn Scholten",
"active": "true",
- "timeZone": "Europe/Amsterdam"
+ "timeZone": "Europe/Amsterdam",
},
"body": "test2",
"updateAuthor": {
@@ -81,15 +81,15 @@ class JIRAWebhookTest(DojoTestCase):
"48x48": "http://www.testjira.com/secure/useravatar?ownerId=valentijn&avatarId=11101",
"24x24": "http://www.testjira.com/secure/useravatar?size=small&ownerId=valentijn&avatarId=11101",
"16x16": "http://www.testjira.com/secure/useravatar?size=xsmall&ownerId=valentijn&avatarId=11101",
- "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101"
+ "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101",
},
"displayName": "Valentijn Scholten",
"active": "true",
- "timeZone": "Europe/Amsterdam"
+ "timeZone": "Europe/Amsterdam",
},
"created": "2020-11-11T18:55:21.425+0100",
- "updated": "2020-11-11T18:55:21.425+0100"
- }
+ "updated": "2020-11-11T18:55:21.425+0100",
+ },
}
jira_issue_update_template_string = """
diff --git a/unittests/test_metrics_queries.py b/unittests/test_metrics_queries.py
index 6378248e0e..c8fdc30007 100644
--- a/unittests/test_metrics_queries.py
+++ b/unittests/test_metrics_queries.py
@@ -36,7 +36,7 @@ def test_finding_queries_no_data(self):
product_types = []
finding_queries = utils.finding_queries(
product_types,
- self.request
+ self.request,
)
self.assertSequenceEqual(
@@ -54,7 +54,7 @@ def test_finding_queries(self, mock_timezone):
product_types = []
finding_queries = utils.finding_queries(
product_types,
- self.request
+ self.request,
)
self.assertSequenceEqual(
@@ -71,48 +71,48 @@ def test_finding_queries(self, mock_timezone):
'start_date',
'end_date',
'form',
- ]
+ ],
)
# Assert that we get expected querysets back. This is to be used to
# support refactoring, in attempt of lowering the query count.
self.assertSequenceEqual(
finding_queries['all'].values(),
- []
+ [],
# [{'id': 226, 'title': 'Test Endpoint Mitigation - Finding F1 Without Endpoints', 'date': date(2022, 10, 15), 'sla_start_date': None, 'cwe': None, 'cve': None, 'cvssv3': None, 'cvssv3_score': None, 'url': None, 'severity': 'Info', 'description': 'vulnerability', 'mitigation': '', 'impact': '', 'steps_to_reproduce': '', 'severity_justification': '', 'references': '', 'test_id': 89, 'active': True, 'verified': True, 'false_p': False, 'duplicate': False, 'duplicate_finding_id': None, 'out_of_scope': False, 'risk_accepted': False, 'under_review': False, 'last_status_update': None, 'review_requested_by_id': None, 'under_defect_review': False, 'defect_review_requested_by_id': None, 'is_mitigated': False, 'thread_id': 0, 'mitigated': None, 'mitigated_by_id': None, 'reporter_id': 1, 'numerical_severity': 'S4', 'last_reviewed': None, 'last_reviewed_by_id': None, 'param': None, 'payload': None, 'hash_code': 'a6dd6bd359ff0b504a21b8a7ae5e59f1b40dd0fa1715728bd58de8f688f01b19', 'line': None, 'file_path': '', 'component_name': None, 'component_version': None, 'static_finding': False, 'dynamic_finding': True, 'created': datetime(2022, 10, 15, 23, 12, 52, 966000, tzinfo=pytz.UTC), 'scanner_confidence': None, 'sonarqube_issue_id': None, 'unique_id_from_tool': None, 'vuln_id_from_tool': None, 'sast_source_object': None, 'sast_sink_object': None, 'sast_source_line': None, 'sast_source_file_path': None, 'nb_occurences': None, 'publish_date': None, 'service': None, 'planned_remediation_date': None, 'test__engagement__product__prod_type__member': True, 'test__engagement__product__member': True, 'test__engagement__product__prod_type__authorized_group': False, 'test__engagement__product__authorized_group': False}]
)
self.assertSequenceEqual(
finding_queries['closed'].values(),
- []
+ [],
)
self.assertSequenceEqual(
finding_queries['accepted'].values(),
- []
+ [],
)
self.assertSequenceEqual(
list(finding_queries['accepted_count'].values()),
- [0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0],
)
self.assertSequenceEqual(
finding_queries['top_ten'].values(),
- []
+ [],
)
self.assertEqual(
list(finding_queries['monthly_counts'].values()),
[
[
{'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
- {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}
+ {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
],
[
{'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
- {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}
+ {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
],
[
{'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
- {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}
- ]
- ]
+ {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
+ ],
+ ],
)
self.assertEqual(
finding_queries['weekly_counts'],
@@ -120,19 +120,19 @@ def test_finding_queries(self, mock_timezone):
'opened_per_period': [
{'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0},
{'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0},
- {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0}
+ {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0},
],
'accepted_per_period': [
{'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
{'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
- {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}
+ {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
],
'active_per_period': [
{'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
{'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
- {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}
- ]
- }
+ {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
+ ],
+ },
)
self.assertEqual(finding_queries['weeks_between'], 2)
self.assertIsInstance(finding_queries['start_date'], datetime)
@@ -155,7 +155,7 @@ def test_endpoint_queries_no_data(self):
product_types = []
endpoint_queries = utils.endpoint_queries(
product_types,
- self.request
+ self.request,
)
self.assertSequenceEqual(
@@ -169,7 +169,7 @@ def test_endpoint_queries(self):
product_types = []
endpoint_queries = utils.endpoint_queries(
product_types,
- self.request
+ self.request,
)
self.assertSequenceEqual(
@@ -186,7 +186,7 @@ def test_endpoint_queries(self):
'start_date',
'end_date',
'form',
- ]
+ ],
)
# Assert that we get expected querysets back. This is to be used to
@@ -199,7 +199,7 @@ def test_endpoint_queries(self):
{'id': 4, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': True, 'risk_accepted': False, 'endpoint_id': 5, 'finding_id': 229, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False},
{'id': 5, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': True, 'endpoint_id': 5, 'finding_id': 230, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False},
{'id': 7, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 7, 'finding_id': 227, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False},
- {'id': 8, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 8, 'finding_id': 231, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False}
+ {'id': 8, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 8, 'finding_id': 231, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False},
],
)
self.assertSequenceEqual(
@@ -223,16 +223,16 @@ def test_endpoint_queries(self):
[
[
{'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
- {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0}
+ {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0},
],
[
{'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
- {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5}
+ {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5},
],
[
{'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
- {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1}
- ]
+ {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1},
+ ],
],
)
self.assertEqual(
@@ -241,18 +241,18 @@ def test_endpoint_queries(self):
[
{'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
{'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0},
- {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}
+ {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
],
[
{'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
{'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5},
- {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}
+ {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
],
[
{'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
{'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1},
- {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}
- ]
+ {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
+ ],
],
)
self.assertEqual(endpoint_queries['weeks_between'], 2)
diff --git a/unittests/test_migrations.py b/unittests/test_migrations.py
index 6800f3346e..16b7525c47 100644
--- a/unittests/test_migrations.py
+++ b/unittests/test_migrations.py
@@ -24,13 +24,13 @@ def prepare(self):
self.engagement = Engagement.objects.create(
product_id=self.product.pk,
target_start=datetime.datetime(2020, 1, 1, tzinfo=timezone.utc),
- target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc)
+ target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc),
)
self.test = Test.objects.create(
engagement_id=self.engagement.pk,
target_start=datetime.datetime(2020, 1, 1, tzinfo=timezone.utc),
target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc),
- test_type_id=1
+ test_type_id=1,
)
from django.contrib.auth import get_user_model
user = get_user_model().objects.create().pk
@@ -39,16 +39,16 @@ def prepare(self):
self.endpoint = Endpoint.objects.create(host='foo.bar', product_id=self.product.pk).pk
self.endpoint_status = Endpoint_Status.objects.create(
finding_id=self.finding,
- endpoint_id=self.endpoint
+ endpoint_id=self.endpoint,
).pk
Endpoint.objects.get(id=self.endpoint).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status)
+ Endpoint_Status.objects.get(id=self.endpoint_status),
)
Finding.objects.get(id=self.finding).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status)
+ Endpoint_Status.objects.get(id=self.endpoint_status),
)
Finding.objects.get(id=self.finding).endpoints.add(
- Endpoint.objects.get(id=self.endpoint).pk
+ Endpoint.objects.get(id=self.endpoint).pk,
)
self.presudotest_before_migration()
diff --git a/unittests/test_parsers.py b/unittests/test_parsers.py
index 43ee4e2419..53af54d17a 100644
--- a/unittests/test_parsers.py
+++ b/unittests/test_parsers.py
@@ -27,23 +27,23 @@ def test_file_existence(self):
doc_file = os.path.join(basedir, 'docs', 'content', 'en', 'integrations', 'parsers', category, f"{doc_name}.md")
self.assertTrue(
os.path.isfile(doc_file),
- f"Documentation file '{doc_file}' is missing or using different name"
+ f"Documentation file '{doc_file}' is missing or using different name",
)
with open(doc_file) as file:
content = file.read()
self.assertTrue(re.search("title:", content),
- f"Documentation file '{doc_file}' does not contain a title"
+ f"Documentation file '{doc_file}' does not contain a title",
)
self.assertTrue(re.search("toc_hide: true", content),
- f"Documentation file '{doc_file}' does not contain toc_hide: true"
+ f"Documentation file '{doc_file}' does not contain toc_hide: true",
)
if category == "file":
self.assertTrue(re.search("### Sample Scan Data", content),
- f"Documentation file '{doc_file}' does not contain ### Sample Scan Data"
+ f"Documentation file '{doc_file}' does not contain ### Sample Scan Data",
)
self.assertTrue(re.search("https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans", content),
- f"Documentation file '{doc_file}' does not contain https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans"
+ f"Documentation file '{doc_file}' does not contain https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans",
)
if parser_dir.name not in [
@@ -53,7 +53,7 @@ def test_file_existence(self):
parser_test_file = os.path.join(basedir, 'unittests', 'tools', f"test_{parser_dir.name}_parser.py")
self.assertTrue(
os.path.isfile(parser_test_file),
- f"Unittest of parser '{parser_test_file}' is missing or using different name"
+ f"Unittest of parser '{parser_test_file}' is missing or using different name",
)
if parser_dir.name not in [
@@ -63,7 +63,7 @@ def test_file_existence(self):
scan_dir = os.path.join(basedir, 'unittests', 'scans', parser_dir.name)
self.assertTrue(
os.path.isdir(scan_dir),
- f"Test files for unittest of parser '{scan_dir}' are missing or using different name"
+ f"Test files for unittest of parser '{scan_dir}' are missing or using different name",
)
if category == 'api':
@@ -75,7 +75,7 @@ def test_file_existence(self):
importer_test_file = os.path.join(basedir, 'unittests', 'tools', f"test_{parser_dir.name}_importer.py")
self.assertTrue(
os.path.isfile(importer_test_file),
- f"Unittest of importer '{importer_test_file}' is missing or using different name"
+ f"Unittest of importer '{importer_test_file}' is missing or using different name",
)
for file in os.scandir(os.path.join(basedir, 'dojo', 'tools', parser_dir.name)):
if file.is_file() and file.name != '__pycache__' and file.name != "__init__.py":
@@ -100,11 +100,11 @@ def test_file_existence(self):
def test_parser_existence(self):
for docs in os.scandir(os.path.join(basedir, 'docs', 'content', 'en', 'integrations', 'parsers', 'file')):
if docs.name not in [
- '_index.md', 'codeql.md', 'edgescan.md'
+ '_index.md', 'codeql.md', 'edgescan.md',
]:
with self.subTest(parser=docs.name.split('.md')[0], category='parser'):
parser = os.path.join(basedir, 'dojo', 'tools', f"{docs.name.split('.md')[0]}", "parser.py")
self.assertTrue(
os.path.isfile(parser),
- f"Parser '{parser}' is missing or using different name"
+ f"Parser '{parser}' is missing or using different name",
)
diff --git a/unittests/test_remote_user.py b/unittests/test_remote_user.py
index 28d9a139bd..02dd871169 100644
--- a/unittests/test_remote_user.py
+++ b/unittests/test_remote_user.py
@@ -34,8 +34,8 @@ def test_disabled(self):
def test_basic(self):
resp = self.client1.get('/profile',
headers={
- "Remote-User": self.user.username
- }
+ "Remote-User": self.user.username,
+ },
)
self.assertEqual(resp.status_code, 200)
@@ -53,7 +53,7 @@ def test_update_user(self):
"Remote-Firstname": "new_first",
"Remote-Lastname": "new_last",
"Remote-Email": "new@mail.com",
- }
+ },
)
self.assertEqual(resp.status_code, 200)
updated_user = User.objects.get(pk=self.user.pk)
@@ -72,7 +72,7 @@ def test_update_groups_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": self.group1.name,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
dgms = Dojo_Group_Member.objects.filter(user=self.user)
@@ -83,7 +83,7 @@ def test_update_groups_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": self.group2.name,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
dgms = Dojo_Group_Member.objects.all().filter(user=self.user)
@@ -101,7 +101,7 @@ def test_update_multiple_groups_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": f"{self.group1.name},{self.group2.name}",
- }
+ },
)
self.assertEqual(resp.status_code, 200)
dgms = Dojo_Group_Member.objects.filter(user=self.user)
@@ -118,7 +118,7 @@ def test_update_groups_no_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": self.group1.name,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
@@ -126,7 +126,7 @@ def test_update_groups_no_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": self.group2.name,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
dgms = Dojo_Group_Member.objects.filter(user=self.user)
@@ -142,7 +142,7 @@ def test_trusted_proxy(self):
REMOTE_ADDR='192.168.0.42',
headers={
"Remote-User": self.user.username,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
@@ -157,7 +157,7 @@ def test_untrusted_proxy(self):
REMOTE_ADDR='192.168.1.42',
headers={
"Remote-User": self.user.username,
- }
+ },
)
self.assertEqual(resp.status_code, 302)
self.assertIn('Requested came from untrusted proxy', cm.output[0])
diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py
index ce1ad77da1..242c95d223 100644
--- a/unittests/test_rest_framework.py
+++ b/unittests/test_rest_framework.py
@@ -406,7 +406,7 @@ def test_detail_prefetch(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '{}/'.format(current_objects['results'][0]['id'])
response = self.client.get(relative_url, data={
- "prefetch": ','.join(prefetchable_fields)
+ "prefetch": ','.join(prefetchable_fields),
})
self.assertEqual(200, response.status_code)
@@ -496,7 +496,7 @@ def test_list_prefetch(self):
prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)]
response = self.client.get(self.url, data={
- "prefetch": ','.join(prefetchable_fields)
+ "prefetch": ','.join(prefetchable_fields),
})
self.assertEqual(200, response.status_code)
@@ -830,7 +830,7 @@ def __init__(self, *args, **kwargs):
'icon': '',
'website': '',
'website_found': '',
- 'created': '2018-08-16T16:58:23.908Z'
+ 'created': '2018-08-16T16:58:23.908Z',
}
self.update_fields = {'version': '9.0'}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -902,7 +902,7 @@ def test_update_patch_unsuccessful(self):
unsucessful_payload = {
'endpoint': object2['endpoint'],
- 'finding': object2['finding']
+ 'finding': object2['finding'],
}
relative_url = self.url + '{}/'.format(object1['id'])
@@ -923,7 +923,7 @@ def test_update_put_unsuccessful(self):
unsucessful_payload = {
'endpoint': object2['endpoint'],
- 'finding': object2['finding']
+ 'finding': object2['finding'],
}
relative_url = self.url + '{}/'.format(object1['id'])
@@ -948,7 +948,7 @@ def __init__(self, *args, **kwargs):
'query': 'test=true',
'fragment': 'test-1',
'product': 1,
- "tags": ["mytag", "yourtag"]
+ "tags": ["mytag", "yourtag"],
}
self.update_fields = {'protocol': 'ftp', 'tags': ['one_new_tag']}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -979,7 +979,7 @@ def __init__(self, *args, **kwargs):
"reason": "",
"test_strategy": "",
"product": "1",
- "tags": ["mytag"]
+ "tags": ["mytag"],
}
self.update_fields = {'version': 'latest'}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -1017,9 +1017,9 @@ def __init__(self, *args, **kwargs):
"updated": "2023-09-15T17:17:39.462854Z",
"owner": 1,
"accepted_findings": [
- 226
+ 226,
],
- "notes": []
+ "notes": [],
}
self.update_fields = {'name': 'newName'}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -1054,9 +1054,9 @@ def test_update_forbidden_engagement(self):
"updated": "2023-09-15T17:17:39.462854Z",
"owner": 1,
"accepted_findings": [
- 4
+ 4,
],
- "notes": []
+ "notes": [],
}
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '{}/'.format(current_objects['results'][0]['id'])
@@ -1076,7 +1076,7 @@ def setUp(self):
def test_request_response_post(self):
length = BurpRawRequestResponse.objects.count()
payload = {
- "req_resp": [{"request": "POST", "response": "200"}]
+ "req_resp": [{"request": "POST", "response": "200"}],
}
response = self.client.post('/api/v2/findings/7/request_response/', dumps(payload), content_type='application/json')
self.assertEqual(200, response.status_code, response.content[:1000])
@@ -1102,7 +1102,7 @@ def setUp(self):
self.url_levels = {
'findings/7': 0,
'tests/3': 0,
- 'engagements/1': 0
+ 'engagements/1': 0,
}
def test_request_response_post_and_download(self):
@@ -1112,7 +1112,7 @@ def test_request_response_post_and_download(self):
with open(f'{str(self.path)}/scans/acunetix/one_finding.xml') as testfile:
payload = {
"title": level,
- "file": testfile
+ "file": testfile,
}
response = self.client.post(f'/api/v2/{level}/files/', payload)
self.assertEqual(201, response.status_code, response.data)
@@ -1355,7 +1355,7 @@ def __init__(self, *args, **kwargs):
"high_mapping_severity": "LOW",
"critical_mapping_severity": "LOW",
"finding_text": "",
- "global_jira_sla_notification": False
+ "global_jira_sla_notification": False,
}
self.update_fields = {'epic_name_id': 1}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -1424,7 +1424,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"key": "AREwS5n5TxsFUNm31CxP",
"status": "OPEN",
- "type": "VULNERABILITY"
+ "type": "VULNERABILITY",
}
self.update_fields = {'key': 'AREwS5n5TxsFUNm31CxP'}
self.test_type = TestType.STANDARD
@@ -1444,7 +1444,7 @@ def __init__(self, *args, **kwargs):
"sonarqube_issue": 1,
"finding_status": "Active, Verified",
"sonarqube_status": "OPEN",
- "transitions": "confirm"
+ "transitions": "confirm",
}
self.update_fields = {'sonarqube_status': 'CLOSED'}
self.test_type = TestType.STANDARD
@@ -1462,7 +1462,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product": 2,
"service_key_1": "dojo_sonar_key",
- "tool_configuration": 3
+ "tool_configuration": 3,
}
self.update_fields = {'tool_configuration': 2}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -1489,7 +1489,7 @@ def __init__(self, *args, **kwargs):
"prod_type": 1,
"name": "Test Product",
"description": "test product",
- "tags": ["mytag", "yourtag"]
+ "tags": ["mytag", "yourtag"],
}
self.update_fields = {'prod_type': 2}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -1629,7 +1629,7 @@ def __init__(self, *args, **kwargs):
self.viewset = ToolTypesViewSet
self.payload = {
"name": "Tool Type",
- "description": "test tool type"
+ "description": "test tool type",
}
self.update_fields = {'description': 'changed description'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -1650,7 +1650,7 @@ def __init__(self, *args, **kwargs):
"description": "not that much",
"is_single": False,
"is_active": True,
- "is_mandatory": False
+ "is_mandatory": False,
}
self.update_fields = {'description': 'changed description'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -1670,7 +1670,7 @@ def __init__(self, *args, **kwargs):
"id": 1,
"entry": "updated_entry",
"author": '{"username": "admin"}',
- "editor": '{"username": "user1"}'
+ "editor": '{"username": "user1"}',
}
self.update_fields = {'entry': 'changed entry'}
self.test_type = TestType.STANDARD
@@ -1691,7 +1691,7 @@ def __init__(self, *args, **kwargs):
"last_name": "user",
"email": "example@email.com",
"is_active": True,
- "configuration_permissions": [217, 218]
+ "configuration_permissions": [217, 218],
}
self.update_fields = {"first_name": "test changed", "configuration_permissions": [219, 220]}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -1841,7 +1841,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -1873,7 +1873,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product(
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -1905,7 +1905,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -1938,7 +1938,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -1949,7 +1949,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
Permissions.Engagement_Add),
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
- Permissions.Import_Scan_Result)
+ Permissions.Import_Scan_Result),
])
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@@ -1974,7 +1974,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2006,7 +2006,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product_type
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2136,7 +2136,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2147,7 +2147,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
Permissions.Engagement_Add),
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
- Permissions.Import_Scan_Result)
+ Permissions.Import_Scan_Result),
])
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@@ -2173,7 +2173,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2205,7 +2205,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product_type
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2231,7 +2231,7 @@ def test_create_not_authorized_test_id(self, mock, importer_mock, reimporter_moc
"scan_type": 'ZAP Scan',
"file": testfile,
"test": 3,
- "version": "1.0.1"
+ "version": "1.0.1",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
@@ -2263,7 +2263,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2295,7 +2295,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product(
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2327,7 +2327,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2407,7 +2407,7 @@ def __init__(self, *args, **kwargs):
"name": "Test Product Type",
"description": "Test",
"key_product": True,
- "critical_product": False
+ "critical_product": False,
}
self.update_fields = {'description': "changed"}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2514,7 +2514,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"group": 1,
"user": 3,
- "role": 4
+ "role": 4,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2548,7 +2548,7 @@ def __init__(self, *args, **kwargs):
self.viewset = GlobalRoleViewSet
self.payload = {
"user": 2,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.STANDARD
@@ -2567,7 +2567,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product_type": 1,
"user": 3,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2590,7 +2590,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product": 3,
"user": 2,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2613,7 +2613,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product_type": 1,
"group": 2,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2636,7 +2636,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product": 1,
"group": 2,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2659,7 +2659,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
'language': 'Test',
'color': 'red',
- 'created': '2018-08-16T16:58:23.908Z'
+ 'created': '2018-08-16T16:58:23.908Z',
}
self.update_fields = {'color': 'blue'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -2683,7 +2683,7 @@ def __init__(self, *args, **kwargs):
'blank': 3,
'comment': 4,
'code': 5,
- 'created': '2018-08-16T16:58:23.908Z'
+ 'created': '2018-08-16T16:58:23.908Z',
}
self.update_fields = {'code': 10}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2705,7 +2705,7 @@ def __init__(self, *args, **kwargs):
self.viewset = ImportLanguagesView
self.payload = {
'product': 1,
- 'file': open("unittests/files/defectdojo_cloc.json")
+ 'file': open("unittests/files/defectdojo_cloc.json"),
}
self.test_type = TestType.OBJECT_PERMISSIONS
self.permission_check_class = Languages
@@ -2748,7 +2748,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
'product': 1,
'user': 3,
- 'product_type_added': ["alert", "msteams"]
+ 'product_type_added': ["alert", "msteams"],
}
self.update_fields = {'product_added': ["alert", "msteams"]}
self.test_type = TestType.STANDARD
@@ -2794,7 +2794,7 @@ def __init__(self, *args, **kwargs):
self.viewname = 'development_environment'
self.viewset = DevelopmentEnvironmentViewSet
self.payload = {
- 'name': 'Test_1'
+ 'name': 'Test_1',
}
self.update_fields = {'name': 'Test_2'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -2817,7 +2817,7 @@ def __init__(self, *args, **kwargs):
self.viewname = 'test_type'
self.viewset = TestTypesViewSet
self.payload = {
- 'name': 'Test_1'
+ 'name': 'Test_1',
}
self.update_fields = {'name': 'Test_2'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -2848,7 +2848,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
'cred_id': 1,
'product': 1,
- 'url': 'https://google.com'
+ 'url': 'https://google.com',
}
self.update_fields = {'url': 'https://bing.com'}
self.test_type = TestType.OBJECT_PERMISSIONS
diff --git a/unittests/test_risk_acceptance.py b/unittests/test_risk_acceptance.py
index 43a0bd578a..6a7961affa 100644
--- a/unittests/test_risk_acceptance.py
+++ b/unittests/test_risk_acceptance.py
@@ -35,7 +35,7 @@ class RiskAcceptanceTestUI(DojoTestCase):
# 'path: (binary)
'owner': 1,
'expiration_date': '2021-07-15',
- 'reactivate_expired': True
+ 'reactivate_expired': True,
}
data_remove_finding_from_ra = {
@@ -53,7 +53,7 @@ def setUp(self):
def add_risk_acceptance(self, eid, data_risk_accceptance, fid=None):
if fid:
- args = (eid, fid, )
+ args = (eid, fid)
else:
args = (eid, )
@@ -103,7 +103,7 @@ def test_add_findings_to_risk_acceptance_findings_accepted(self):
data_add_findings_to_ra = {
'add_findings': 'Add Selected Findings',
- 'accepted_findings': [4, 5]
+ 'accepted_findings': [4, 5],
}
response = self.client.post(reverse('view_risk_acceptance', args=(1, ra.id)),
@@ -133,7 +133,7 @@ def test_remove_risk_acceptance_findings_active(self):
data = {'id': ra.id}
- self.client.post(reverse('delete_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('delete_risk_acceptance', args=(1, ra.id)), data)
self.assert_all_active_not_risk_accepted(findings)
self.assert_all_active_not_risk_accepted(Finding.objects.filter(test__engagement=1))
@@ -148,7 +148,7 @@ def test_expire_risk_acceptance_findings_active(self):
data = {'id': ra.id}
- self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
self.assert_all_active_not_risk_accepted(findings)
@@ -170,7 +170,7 @@ def test_expire_risk_acceptance_findings_not_active(self):
data = {'id': ra.id}
- self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
# no reactivation on expiry
@@ -193,7 +193,7 @@ def test_expire_risk_acceptance_sla_not_reset(self):
data = {'id': ra.id}
- self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
@@ -209,7 +209,7 @@ def test_expire_risk_acceptance_sla_reset(self):
data = {'id': ra.id}
- self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
@@ -224,7 +224,7 @@ def test_reinstate_risk_acceptance_findings_accepted(self):
data = {'id': ra.id}
- self.client.post(reverse('reinstate_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('reinstate_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
expiration_delta_days = get_system_setting('risk_acceptance_form_default_days', 90)
diff --git a/unittests/test_search_parser.py b/unittests/test_search_parser.py
index 06115c7e07..9e4d221fc5 100644
--- a/unittests/test_search_parser.py
+++ b/unittests/test_search_parser.py
@@ -58,7 +58,7 @@ def test_parse_query(self):
self.assertEqual(keywords[1], "space inside")
operators, keywords = parse_search_query(
- "tags:anchore cve:CVE-2020-1234 jquery tags:beer"
+ "tags:anchore cve:CVE-2020-1234 jquery tags:beer",
)
self.assertEqual(len(operators), 2)
self.assertEqual(len(operators["tags"]), 2)
diff --git a/unittests/test_utils.py b/unittests/test_utils.py
index 70c8a9c8c0..3bf031ba10 100644
--- a/unittests/test_utils.py
+++ b/unittests/test_utils.py
@@ -198,8 +198,8 @@ def __exit__(self, exc_type, exc_value, exc_traceback):
self.test_case.assertEqual(
created_count, self.num,
"%i %s objects created, %i expected. query: %s, first 100 objects: %s" % (
- created_count, self.queryset.model, self.num, self.queryset.query, self.queryset.all().order_by('-id')[:100]
- )
+ created_count, self.queryset.model, self.num, self.queryset.query, self.queryset.all().order_by('-id')[:100],
+ ),
)
@@ -222,7 +222,7 @@ def assertTestImportModelsCreated(test_case, imports=0, reimports=0, affected_fi
tifa_created_count,
tifa_closed_count,
tifa_reactivated_count,
- tifa_untouched_count
+ tifa_untouched_count,
)
diff --git a/unittests/tools/test_anchore_enterprise_parser.py b/unittests/tools/test_anchore_enterprise_parser.py
index 1d286d8a84..b7badd1571 100644
--- a/unittests/tools/test_anchore_enterprise_parser.py
+++ b/unittests/tools/test_anchore_enterprise_parser.py
@@ -45,22 +45,22 @@ def test_anchore_policy_check_extract_vulnerability_id(self):
def test_anchore_policy_check_parser_search_filepath(self):
file_path = search_filepath(
- "MEDIUM Vulnerability found in non-os package type (python) - /usr/lib64/python2.7/lib-dynload/Python (CVE-2014-4616 - https://nvd.nist.gov/vuln/detail/CVE-2014-4616)"
+ "MEDIUM Vulnerability found in non-os package type (python) - /usr/lib64/python2.7/lib-dynload/Python (CVE-2014-4616 - https://nvd.nist.gov/vuln/detail/CVE-2014-4616)",
)
self.assertEqual("/usr/lib64/python2.7/lib-dynload/Python", file_path)
file_path = search_filepath(
- "HIGH Vulnerability found in non-os package type (java) - /root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar (CVE-2015-0899 - https://nvd.nist.gov/vuln/detail/CVE-2015-0899)"
+ "HIGH Vulnerability found in non-os package type (java) - /root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar (CVE-2015-0899 - https://nvd.nist.gov/vuln/detail/CVE-2015-0899)",
)
self.assertEqual(
"/root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar",
file_path,
)
file_path = search_filepath(
- "test /usr/local/bin/ag package type (java) - /root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar (CVE-2015-0899 - https://nvd.nist.gov/vuln/detail/CVE-2015-0899)"
+ "test /usr/local/bin/ag package type (java) - /root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar (CVE-2015-0899 - https://nvd.nist.gov/vuln/detail/CVE-2015-0899)",
)
self.assertEqual("/usr/local/bin/ag", file_path)
file_path = search_filepath(
- "HIGH Vulnerability found in os package type (rpm) - kernel-headers (RHSA-2017:0372 - https://access.redhat.com/errata/RHSA-2017:0372)"
+ "HIGH Vulnerability found in os package type (rpm) - kernel-headers (RHSA-2017:0372 - https://access.redhat.com/errata/RHSA-2017:0372)",
)
self.assertEqual("", file_path)
file_path = search_filepath("test")
diff --git a/unittests/tools/test_api_bugcrowd_importer.py b/unittests/tools/test_api_bugcrowd_importer.py
index e8fb4f784e..9e8ca88ac7 100644
--- a/unittests/tools/test_api_bugcrowd_importer.py
+++ b/unittests/tools/test_api_bugcrowd_importer.py
@@ -91,7 +91,7 @@ def test_prepare_client_no_configuration(self, mock_foo):
mock_foo.count.return_value = 0
with self.assertRaisesRegex(
- ValidationError, r'There are no API Scan Configurations for this Product\. Please add at least one API Scan Configuration for bugcrowd to this Product\. Product: "Product" \(None\)'
+ ValidationError, r'There are no API Scan Configurations for this Product\. Please add at least one API Scan Configuration for bugcrowd to this Product\. Product: "Product" \(None\)',
):
bugrcrowd_api_importer = BugcrowdApiImporter()
bugrcrowd_api_importer.prepare_client(self.test)
@@ -106,11 +106,11 @@ def test_prepare_client_one_product_configuration(self, mock_foo):
bugrcrowd_api_importer = BugcrowdApiImporter()
bugcrowd_api, api_scan_configuration = bugrcrowd_api_importer.prepare_client(
- self.test
+ self.test,
)
mock_foo.filter.assert_called_with(
- product=self.product, tool_configuration__tool_type__name="Bugcrowd API"
+ product=self.product, tool_configuration__tool_type__name="Bugcrowd API",
)
self.assertEqual(api_scan_configuration, self.api_scan_configuration)
self.assertEqual(bugcrowd_api.api_token, "API_KEY")
@@ -118,7 +118,7 @@ def test_prepare_client_one_product_configuration(self, mock_foo):
def test_prepare_client_one_test_configuration(self):
bugrcrowd_api_importer = BugcrowdApiImporter()
bugcrowd_api, api_scan_configuration = bugrcrowd_api_importer.prepare_client(
- self.test_2
+ self.test_2,
)
self.assertEqual(api_scan_configuration, self.api_scan_configuration_2)
diff --git a/unittests/tools/test_api_bugcrowd_parser.py b/unittests/tools/test_api_bugcrowd_parser.py
index 2569fb16cc..4433ea61ee 100644
--- a/unittests/tools/test_api_bugcrowd_parser.py
+++ b/unittests/tools/test_api_bugcrowd_parser.py
@@ -33,7 +33,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
finding = findings[0]
self.assertEqual(finding.title, "JWT Alg none")
self.assertEqual(
- datetime.datetime.date(finding.date), datetime.date(2002, 4, 1)
+ datetime.datetime.date(finding.date), datetime.date(2002, 4, 1),
)
self.assertEqual(str(finding.unsaved_endpoints[0]), "https://example.com")
self.assertEqual(finding.severity, "Info")
@@ -41,11 +41,11 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
self.assertEqual(finding.mitigation, "Properly do JWT")
self.assertEqual(finding.active, True)
self.assertEqual(
- finding.unique_id_from_tool, "a4201d47-62e1-4287-9ff6-30807ae9d36a"
+ finding.unique_id_from_tool, "a4201d47-62e1-4287-9ff6-30807ae9d36a",
)
self.assertIn(
"/submissions/a4201d47-62e1-4287-9ff6-30807ae9d36a",
- finding.references
+ finding.references,
)
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
@@ -64,23 +64,23 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
self.assertEqual(finding_3.title, "you did something wrong (returned)")
self.assertEqual(
- datetime.datetime.date(finding_1.date), datetime.date(2000, 1, 1)
+ datetime.datetime.date(finding_1.date), datetime.date(2000, 1, 1),
)
self.assertEqual(
- datetime.datetime.date(finding_2.date), datetime.date(2000, 1, 2)
+ datetime.datetime.date(finding_2.date), datetime.date(2000, 1, 2),
)
self.assertEqual(
- datetime.datetime.date(finding_3.date), datetime.date(2000, 1, 3)
+ datetime.datetime.date(finding_3.date), datetime.date(2000, 1, 3),
)
self.assertEqual(
- str(finding_1.unsaved_endpoints[0]), "https://example.com/1"
+ str(finding_1.unsaved_endpoints[0]), "https://example.com/1",
)
self.assertEqual(
- str(finding_2.unsaved_endpoints[0]), "https://example.com/2"
+ str(finding_2.unsaved_endpoints[0]), "https://example.com/2",
)
self.assertEqual(
- str(finding_3.unsaved_endpoints[0]), "https://example.com/3"
+ str(finding_3.unsaved_endpoints[0]), "https://example.com/3",
)
for endpoint in finding_1.unsaved_endpoints:
endpoint.clean()
@@ -106,18 +106,18 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
self.assertEqual(finding_3.risk_accepted, False)
self.assertEqual(
- finding_1.unique_id_from_tool, "3b0e6b2a-c21e-493e-bd19-de40f525016e"
+ finding_1.unique_id_from_tool, "3b0e6b2a-c21e-493e-bd19-de40f525016e",
)
self.assertEqual(
- finding_2.unique_id_from_tool, "b2f1066a-6188-4479-bab8-39cc5434f06f"
+ finding_2.unique_id_from_tool, "b2f1066a-6188-4479-bab8-39cc5434f06f",
)
self.assertEqual(
- finding_3.unique_id_from_tool, "335a7ba5-57ba-485a-b40e-2f9aa4e19786"
+ finding_3.unique_id_from_tool, "335a7ba5-57ba-485a-b40e-2f9aa4e19786",
)
def test_parse_file_with_not_reproducible_finding(self):
with open(
- "unittests/scans/api_bugcrowd/bugcrowd_not_reproducible.json"
+ "unittests/scans/api_bugcrowd/bugcrowd_not_reproducible.json",
) as testfile:
# description = """
@@ -134,7 +134,7 @@ def test_parse_file_with_not_reproducible_finding(self):
finding = findings[0]
self.assertEqual(finding.title, "JWT Alg none")
self.assertEqual(
- datetime.datetime.date(finding.date), datetime.date(2002, 4, 1)
+ datetime.datetime.date(finding.date), datetime.date(2002, 4, 1),
)
self.assertEqual(str(finding.unsaved_endpoints[0]), "https://example.com")
self.assertEqual(finding.severity, "Info")
@@ -143,7 +143,7 @@ def test_parse_file_with_not_reproducible_finding(self):
self.assertEqual(finding.active, False)
self.assertEqual(finding.false_p, True)
self.assertEqual(
- finding.unique_id_from_tool, "a4201d47-62e1-4287-9ff6-30807ae9d36a"
+ finding.unique_id_from_tool, "a4201d47-62e1-4287-9ff6-30807ae9d36a",
)
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
diff --git a/unittests/tools/test_api_edgescan_parser.py b/unittests/tools/test_api_edgescan_parser.py
index 94d45fabf6..93399d952e 100644
--- a/unittests/tools/test_api_edgescan_parser.py
+++ b/unittests/tools/test_api_edgescan_parser.py
@@ -20,7 +20,7 @@ def get_description_for_scan_types(self):
parser = ApiEdgescanParser()
self.assertEqual(
parser.get_description_for_scan_types(scan_type),
- "Edgescan findings can be imported by API or JSON file."
+ "Edgescan findings can be imported by API or JSON file.",
)
def test_requires_file(self):
diff --git a/unittests/tools/test_api_sonarqube_importer.py b/unittests/tools/test_api_sonarqube_importer.py
index 0b30008c1c..f2a49cc20d 100644
--- a/unittests/tools/test_api_sonarqube_importer.py
+++ b/unittests/tools/test_api_sonarqube_importer.py
@@ -70,7 +70,7 @@ class TestSonarqubeImporterNoSQToolConfig(DojoTestCase):
# Testing case no 1. https://github.com/DefectDojo/django-DefectDojo/pull/4676
fixtures = [
'unit_sonarqube_toolType.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -88,7 +88,7 @@ class TestSonarqubeImporterOneSQToolConfig(DojoTestCase):
fixtures = [
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -107,7 +107,7 @@ class TestSonarqubeImporterMultipleSQToolConfig(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_toolConfig2.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -127,7 +127,7 @@ class TestSonarqubeImporterOneSQConfigNoKey(DojoTestCase):
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
- 'unit_sonarqube_sqcNoKey.json'
+ 'unit_sonarqube_sqcNoKey.json',
]
def setUp(self):
@@ -153,7 +153,7 @@ class TestSonarqubeImporterOneSQConfigWithKey(DojoTestCase):
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -180,7 +180,7 @@ class TestSonarqubeImporterMultipleSQConfigs(DojoTestCase):
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
'unit_sonarqube_sqcNoKey.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -201,7 +201,7 @@ class TestSonarqubeImporterSelectedSQConfigsNoKey(DojoTestCase):
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
'unit_sonarqube_sqcNoKey.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -209,7 +209,7 @@ def setUp(self):
engagement = Engagement(product=product)
self.test = Test(
engagement=engagement,
- api_scan_configuration=Product_API_Scan_Configuration.objects.all().first()
+ api_scan_configuration=Product_API_Scan_Configuration.objects.all().first(),
)
@mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_project', dummy_product)
@@ -231,7 +231,7 @@ class TestSonarqubeImporterSelectedSQConfigsWithKey(DojoTestCase):
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
'unit_sonarqube_sqcNoKey.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -239,13 +239,13 @@ def setUp(self):
engagement = Engagement(product=product)
self.test = Test(
engagement=engagement,
- api_scan_configuration=Product_API_Scan_Configuration.objects.all().last()
+ api_scan_configuration=Product_API_Scan_Configuration.objects.all().last(),
)
other_product = Product(name='other product')
other_engagement = Engagement(product=other_product)
self.other_test = Test(
engagement=other_engagement,
- api_scan_configuration=Product_API_Scan_Configuration.objects.all().last()
+ api_scan_configuration=Product_API_Scan_Configuration.objects.all().last(),
)
@mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product)
@@ -272,7 +272,7 @@ class TestSonarqubeImporterExternalRule(DojoTestCase):
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
'unit_sonarqube_sqcNoKey.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -280,7 +280,7 @@ def setUp(self):
engagement = Engagement(product=product)
self.test = Test(
engagement=engagement,
- api_scan_configuration=Product_API_Scan_Configuration.objects.all().last()
+ api_scan_configuration=Product_API_Scan_Configuration.objects.all().last(),
)
@mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product)
@@ -308,7 +308,7 @@ class TestSonarqubeImporterTwoIssuesNoHotspots(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -333,7 +333,7 @@ class TestSonarqubeImporterNoIssuesOneHotspot(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -358,7 +358,7 @@ class TestSonarqubeImporterNoIssuesTwoHotspots(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -383,7 +383,7 @@ class TestSonarqubeImporterTwoIssuesTwoHotspots(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -408,7 +408,7 @@ class TestSonarqubeImporterValidateHotspotData(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -437,7 +437,7 @@ def test_parser(self):
'\n\n'
'There is a risk if you answered yes to any of those questions.'
'\n\n',
- findings[0].description
+ findings[0].description,
)
self.assertEqual(str(findings[0].severity), 'High')
self.assertMultiLineEqual(
@@ -459,7 +459,7 @@ def test_parser(self):
'\n'
'[Hard Coded Password](http://h3xstream.github.io/find-sec-bugs/bugs.htm#HARD_CODE_PASSWORD)'
'\n',
- findings[0].references
+ findings[0].references,
)
self.assertEqual(str(findings[0].file_path), 'internal.dummy.project:spec/support/user_fixture.rb')
self.assertEqual(findings[0].line, 9)
@@ -479,7 +479,7 @@ class TestSonarqubeImporterHotspotRule_WO_Risk_Description(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -508,7 +508,7 @@ def test_parser(self):
'\n\n'
'There is a risk if you answered yes to any of those questions.'
'\n\n',
- findings[0].description
+ findings[0].description,
)
self.assertEqual(str(findings[0].severity), 'High')
self.assertEqual(findings[0].references, '[Hotspot permalink](http://localhosecurity_hotspots?id=internal.dummy.project&hotspots=AXgm6Z-ophPPY0C1qhRq) \n')
diff --git a/unittests/tools/test_api_sonarqube_parser.py b/unittests/tools/test_api_sonarqube_parser.py
index ffb33b76ee..d9963d1fb7 100644
--- a/unittests/tools/test_api_sonarqube_parser.py
+++ b/unittests/tools/test_api_sonarqube_parser.py
@@ -50,10 +50,10 @@ def setUp(self):
# build Sonarqube conf (the parser need it)
tool_type, _ = Tool_Type.objects.get_or_create(name="SonarQube")
tool_conf, _ = Tool_Configuration.objects.get_or_create(
- name="SQ1_unittests", authentication_type="API", tool_type=tool_type, url='http://dummy.url.foo.bar/api'
+ name="SQ1_unittests", authentication_type="API", tool_type=tool_type, url='http://dummy.url.foo.bar/api',
)
pasc, _ = Product_API_Scan_Configuration.objects.get_or_create(
- product=product, tool_configuration=tool_conf, service_key_1='ABCD'
+ product=product, tool_configuration=tool_conf, service_key_1='ABCD',
)
self.test = Test(engagement=engagement, api_scan_configuration=pasc)
diff --git a/unittests/tools/test_auditjs_parser.py b/unittests/tools/test_auditjs_parser.py
index 789efc73d0..7e128183a8 100644
--- a/unittests/tools/test_auditjs_parser.py
+++ b/unittests/tools/test_auditjs_parser.py
@@ -64,7 +64,7 @@ def test_auditjs_parser_empty_with_error(self):
parser.get_findings(testfile, Test())
self.assertTrue(
- "Invalid JSON format. Are you sure you used --json option ?" in str(context.exception)
+ "Invalid JSON format. Are you sure you used --json option ?" in str(context.exception),
)
def test_auditjs_parser_with_package_name_has_namespace(self):
diff --git a/unittests/tools/test_aws_prowler_parser.py b/unittests/tools/test_aws_prowler_parser.py
index 2c33d706bd..db567d00f0 100644
--- a/unittests/tools/test_aws_prowler_parser.py
+++ b/unittests/tools/test_aws_prowler_parser.py
@@ -22,7 +22,7 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings(self):
open("unittests/scans/aws_prowler/one_vuln.csv"))
self.assertEqual(1, len(findings))
self.assertEqual(
- "Root user in the account wasn't accessed in the last 1 days", findings[0].title
+ "Root user in the account wasn't accessed in the last 1 days", findings[0].title,
)
def test_aws_prowler_parser_with_many_vuln_has_many_findings(self):
diff --git a/unittests/tools/test_awssecurityhub_parser.py b/unittests/tools/test_awssecurityhub_parser.py
index 7993b065a5..f287f8937b 100644
--- a/unittests/tools/test_awssecurityhub_parser.py
+++ b/unittests/tools/test_awssecurityhub_parser.py
@@ -53,7 +53,7 @@ def test_unique_id(self):
findings = parser.get_findings(test_file, Test())
self.assertEqual(
"arn:aws:securityhub:us-east-1:012345678912:subscription/aws-foundational-security-best-practices/v/1.0.0/IAM.5/finding/de861909-2d26-4e45-bd86-19d2ab6ceef1",
- findings[0].unique_id_from_tool
+ findings[0].unique_id_from_tool,
)
def test_inspector_ec2(self):
diff --git a/unittests/tools/test_bandit_parser.py b/unittests/tools/test_bandit_parser.py
index dc4a3f34c8..a27d629e63 100644
--- a/unittests/tools/test_bandit_parser.py
+++ b/unittests/tools/test_bandit_parser.py
@@ -77,7 +77,7 @@ def test_bandit_parser_has_many_findings_recent2(self):
with self.subTest(i=50):
item = findings[50]
self.assertEqual(
- "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed.", item.title
+ "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed.", item.title,
)
self.assertEqual(datetime.datetime(2021, 10, 3, 12, 53, 18, tzinfo=tzlocal()), item.date)
self.assertEqual("Medium", item.severity)
diff --git a/unittests/tools/test_blackduck_binary_analysis_parser.py b/unittests/tools/test_blackduck_binary_analysis_parser.py
index 29c4130294..92d92c111b 100644
--- a/unittests/tools/test_blackduck_binary_analysis_parser.py
+++ b/unittests/tools/test_blackduck_binary_analysis_parser.py
@@ -21,7 +21,7 @@ def test_parse_one_vuln(self):
self.assertIsNotNone(finding.title)
self.assertEqual(
"instrument.dll: zlib 1.2.13 Vulnerable to CVE-2023-45853",
- finding.title
+ finding.title,
)
self.assertIsNotNone(finding.description)
@@ -37,7 +37,7 @@ def test_parse_one_vuln(self):
self.assertIsNotNone(finding.file_path)
self.assertEqual(
"JRE.msi:JRE.msi-30276-90876123.cab:instrument.dll",
- finding.file_path
+ finding.file_path,
)
self.assertIsNotNone(finding.vuln_id_from_tool)
diff --git a/unittests/tools/test_blackduck_component_risk_parser.py b/unittests/tools/test_blackduck_component_risk_parser.py
index 2a520c33aa..ccb613ce9e 100644
--- a/unittests/tools/test_blackduck_component_risk_parser.py
+++ b/unittests/tools/test_blackduck_component_risk_parser.py
@@ -9,7 +9,7 @@ class TestBlackduckComponentRiskParser(DojoTestCase):
def test_blackduck_enhanced_zip_upload(self):
testfile = Path(
get_unit_tests_path() + "/scans/blackduck_component_risk/"
- "blackduck_hub_component_risk.zip"
+ "blackduck_hub_component_risk.zip",
)
parser = BlackduckComponentRiskParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_blackduck_parser.py b/unittests/tools/test_blackduck_parser.py
index 96c0f2eac3..d2d16c6942 100644
--- a/unittests/tools/test_blackduck_parser.py
+++ b/unittests/tools/test_blackduck_parser.py
@@ -44,7 +44,7 @@ def test_blackduck_csv_parser_new_format_has_many_findings(self):
def test_blackduck_enhanced_has_many_findings(self):
testfile = Path(
- get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest.zip"
+ get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest.zip",
)
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
@@ -52,7 +52,7 @@ def test_blackduck_enhanced_has_many_findings(self):
def test_blackduck_enhanced_zip_upload(self):
testfile = Path(
- get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest_v2.zip"
+ get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest_v2.zip",
)
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_checkmarx_osa_parser.py b/unittests/tools/test_checkmarx_osa_parser.py
index bfe1590c77..ba348b64e5 100644
--- a/unittests/tools/test_checkmarx_osa_parser.py
+++ b/unittests/tools/test_checkmarx_osa_parser.py
@@ -28,7 +28,7 @@ def test_checkmarx_osa_parse_file_with_no_vulnerabilities_has_no_findings(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/no_finding.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/no_finding.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -42,7 +42,7 @@ def test_checkmarx_osa_parse_file_with_single_vulnerability_has_single_finding(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -94,7 +94,7 @@ def test_checkmarx_osa_parse_file_with_false_positive_is_false_positive(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_false_positive.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_false_positive.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -115,7 +115,7 @@ def test_checkmarx_osa_parse_file_with_confirmed_is_verified(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_confirmed.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_confirmed.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -136,7 +136,7 @@ def test_checkmarx_osa_parse_file_with_multiple_findings(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/multiple_findings.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/multiple_findings.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -150,7 +150,7 @@ def test_checkmarx_osa_parse_file_with_no_score(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_score.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_score.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -166,7 +166,7 @@ def test_checkmarx_osa_parse_file_with_no_url(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_url.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_url.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -183,12 +183,12 @@ def test_checkmarx_osa_parse_file_with_no_libraryId_raises_ValueError(
):
with self.assertRaises(ValueError) as context:
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_libraryId.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_libraryId.json",
)
with my_file_handle:
parser = CheckmarxOsaParser()
parser.get_findings(my_file_handle, test)
self.assertEqual(
- "Invalid format: missing mandatory field libraryId", str(context.exception)
+ "Invalid format: missing mandatory field libraryId", str(context.exception),
)
diff --git a/unittests/tools/test_checkmarx_parser.py b/unittests/tools/test_checkmarx_parser.py
index 88e5cc965b..6bfbbc1304 100644
--- a/unittests/tools/test_checkmarx_parser.py
+++ b/unittests/tools/test_checkmarx_parser.py
@@ -29,7 +29,7 @@ def teardown(self, my_file_handle):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings(self, mock):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/no_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/no_finding.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -40,7 +40,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings
def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self, mock):
"""Checkmarx detailed scanner, with all vulnerabilities from checkmarx"""
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/no_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/no_finding.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -51,7 +51,7 @@ def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self, mock)
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_finding(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -79,7 +79,7 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -213,7 +213,7 @@ def check_parse_file_with_single_vulnerability_has_single_finding(self, findings
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_false_positive_is_false_positive(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -225,7 +225,7 @@ def test_file_name_aggregated_parse_file_with_false_positive_is_false_positive(s
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_false_positive_is_false_positive(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -254,7 +254,7 @@ def check_parse_file_with_false_positive_is_false_positive(self, findings):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_two_aggregated_findings_one_is_false_p(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/two_aggregated_findings_one_is_false_positive.xml"
+ get_unit_tests_path() + "/scans/checkmarx/two_aggregated_findings_one_is_false_positive.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -280,7 +280,7 @@ def test_file_name_aggregated_parse_file_with_two_aggregated_findings_one_is_fal
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -299,7 +299,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -322,7 +322,7 @@ def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_different_sourceFilename_same_sinkFilename_is_aggregated(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -336,7 +336,7 @@ def test_file_name_aggregated_parse_file_with_different_sourceFilename_same_sink
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_different_sourceFilename_same_sinkFilename_is_not_aggregated(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -353,7 +353,7 @@ def test_detailed_parse_file_with_different_sourceFilename_same_sinkFilename_is_
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_same_sourceFilename_different_sinkFilename_is_not_aggregated(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -365,7 +365,7 @@ def test_file_name_aggregated_parse_file_with_same_sourceFilename_different_sink
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_same_sourceFilename_different_sinkFilename_is_not_aggregated(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -380,7 +380,7 @@ def test_detailed_parse_file_with_same_sourceFilename_different_sinkFilename_is_
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_utf8_replacement_char(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml"
+ get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -408,7 +408,7 @@ def test_file_name_aggregated_parse_file_with_utf8_replacement_char(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_utf8_replacement_char(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml"
+ get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -526,7 +526,7 @@ def check_parse_file_with_utf8_replacement_char(self, findings):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_utf8_various_non_ascii_char(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml"
+ get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -554,7 +554,7 @@ def test_file_name_aggregated_parse_file_with_utf8_various_non_ascii_char(self,
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_utf8_various_non_ascii_char(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml"
+ get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -672,7 +672,7 @@ def check_parse_file_with_utf8_various_non_ascii_char(self, findings):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_query_id.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_query_id.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -692,7 +692,7 @@ def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_with_empty_filename(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_no_filename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_no_filename.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -712,7 +712,7 @@ def test_file_with_empty_filename(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_with_many_aggregated_findings(self, mock):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/many_aggregated_findings.xml"
+ get_unit_tests_path() + "/scans/checkmarx/many_aggregated_findings.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -729,7 +729,7 @@ def test_file_with_many_aggregated_findings(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_with_many_findings_json(self, mock):
my_file_handle, _product, _engagement, _test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, Test())
@@ -763,7 +763,7 @@ def test_file_with_many_findings_json(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_issue6956(self, mock):
my_file_handle, _product, _engagement, _test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/sample_report.json"
+ get_unit_tests_path() + "/scans/checkmarx/sample_report.json",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, Test())
@@ -826,7 +826,7 @@ def test_file_issue6956(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_finding_date_should_be_date_xml(self, mock):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -837,7 +837,7 @@ def test_finding_date_should_be_date_xml(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_finding_date_should_be_date_json(self, mock):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
diff --git a/unittests/tools/test_checkov_parser.py b/unittests/tools/test_checkov_parser.py
index b626fc952e..49bc3d1a5f 100644
--- a/unittests/tools/test_checkov_parser.py
+++ b/unittests/tools/test_checkov_parser.py
@@ -53,7 +53,7 @@ def test_parse_file_with_multiple_check_type_has_multiple_check_type(self):
'Check Type: terraform\n'
'Check Id: CKV_AWS_161\n'
'Ensure RDS database has IAM authentication enabled\n',
- first_terraform_finding.description
+ first_terraform_finding.description,
)
self.assertEqual('/aws/db-app.tf', first_terraform_finding.file_path)
self.assertEqual(1, first_terraform_finding.line)
@@ -68,7 +68,7 @@ def test_parse_file_with_multiple_check_type_has_multiple_check_type(self):
'Check Type: dockerfile\n'
'Check Id: CKV_DOCKER_3\n'
'Ensure that a user for the container has been created\n',
- first_dockerfile_finding.description
+ first_dockerfile_finding.description,
)
self.assertEqual('/aws/resources/Dockerfile', first_dockerfile_finding.file_path)
self.assertEqual(0, first_dockerfile_finding.line)
@@ -76,7 +76,7 @@ def test_parse_file_with_multiple_check_type_has_multiple_check_type(self):
self.assertEqual('', first_dockerfile_finding.mitigation)
self.assertEqual(
'https://docs.bridgecrew.io/docs/ensure-that-a-user-for-the-container-has-been-created',
- first_dockerfile_finding.references
+ first_dockerfile_finding.references,
)
def test_parse_file_with_specified_severity(self):
diff --git a/unittests/tools/test_codechecker_parser.py b/unittests/tools/test_codechecker_parser.py
index 8c6d9e6cc6..756ba4c780 100644
--- a/unittests/tools/test_codechecker_parser.py
+++ b/unittests/tools/test_codechecker_parser.py
@@ -7,7 +7,7 @@ class TestCodeCheckerParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-0-vuln.json"
+ get_unit_tests_path() + "/scans/codechecker/cc-report-0-vuln.json",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def test_parse_file_with_one_vuln_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-1-vuln.json"
+ get_unit_tests_path() + "/scans/codechecker/cc-report-1-vuln.json",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
@@ -33,7 +33,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-many-vuln.json"
+ get_unit_tests_path() + "/scans/codechecker/cc-report-many-vuln.json",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
@@ -60,7 +60,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
def test_parse_file_with_various_review_statuses(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-review-status.json"
+ get_unit_tests_path() + "/scans/codechecker/cc-report-review-status.json",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_crashtest_security_parser.py b/unittests/tools/test_crashtest_security_parser.py
index 2eaa5211cd..a87248ca3d 100644
--- a/unittests/tools/test_crashtest_security_parser.py
+++ b/unittests/tools/test_crashtest_security_parser.py
@@ -25,7 +25,7 @@ def test_crashtest_security_json_parser_full_file_has_many_findings(self):
def test_crashtest_security_json_parser_extracted_data_file_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/crashtest_security/data_extracted.json"
+ get_unit_tests_path() + "/scans/crashtest_security/data_extracted.json",
)
parser = CrashtestSecurityParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_dependency_check_parser.py b/unittests/tools/test_dependency_check_parser.py
index 0bf78406f4..f72db048a4 100644
--- a/unittests/tools/test_dependency_check_parser.py
+++ b/unittests/tools/test_dependency_check_parser.py
@@ -77,7 +77,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
"Update org.dom4j:dom4j:2.1.1.redhat-00001 to at least the version recommended in the description",
)
self.assertEqual(
- items[0].date, datetime(2016, 11, 5, 14, 52, 15, 748000, tzinfo=tzoffset(None, -14400))
+ items[0].date, datetime(2016, 11, 5, 14, 52, 15, 748000, tzinfo=tzoffset(None, -14400)),
) # 2016-11-05T14:52:15.748-0400
self.assertEqual(1, len(items[0].unsaved_vulnerability_ids))
self.assertEqual('CVE-0000-0001', items[0].unsaved_vulnerability_ids[0])
@@ -128,7 +128,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
with self.subTest(i=3):
# identifier -> package url javascript, no vulnerabilitids, 3 vulnerabilities, relateddependencies without filename (pre v6.0.0)
self.assertEqual(
- items[3].title, "yargs-parser:5.0.0 | 1500"
+ items[3].title, "yargs-parser:5.0.0 | 1500",
)
self.assertEqual(items[3].component_name, "yargs-parser")
self.assertEqual(items[3].component_version, "5.0.0")
@@ -137,7 +137,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[3].severity, "Low")
self.assertEqual(items[3].file_path, "yargs-parser:5.0.0")
self.assertEqual(
- items[3].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description"
+ items[3].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description",
)
self.assertIn(
"**Source:** NPM",
@@ -163,7 +163,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[4].severity, "High")
self.assertEqual(items[4].file_path, "yargs-parser:5.0.0")
self.assertEqual(
- items[4].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description"
+ items[4].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description",
)
self.assertEqual(1, len(items[4].unsaved_vulnerability_ids))
self.assertEqual('CVE-2020-7608', items[4].unsaved_vulnerability_ids[0])
@@ -187,7 +187,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[5].severity, "High")
self.assertEqual(items[5].file_path, "yargs-parser:5.0.0")
self.assertEqual(
- items[5].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description"
+ items[5].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description",
)
self.assertIsNone(items[5].unsaved_vulnerability_ids)
@@ -212,7 +212,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[7].component_version, "2.1.1")
self.assertEqual(items[7].severity, "High")
self.assertEqual(
- items[7].mitigation, "Update dom4j:2.1.1 to at least the version recommended in the description"
+ items[7].mitigation, "Update dom4j:2.1.1 to at least the version recommended in the description",
)
with self.subTest(i=8):
@@ -225,7 +225,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[8].component_version, "3.1.1")
self.assertEqual(items[8].severity, "High")
self.assertEqual(
- items[8].mitigation, "Update jquery:3.1.1 to at least the version recommended in the description"
+ items[8].mitigation, "Update jquery:3.1.1 to at least the version recommended in the description",
)
with self.subTest(i=9):
@@ -299,7 +299,7 @@ def test_parse_file_pr6439(self):
items[0].mitigation,
)
self.assertEqual(
- items[0].date, datetime(2022, 12, 14, 1, 35, 43, 684166, tzinfo=tzlocal())
+ items[0].date, datetime(2022, 12, 14, 1, 35, 43, 684166, tzinfo=tzlocal()),
) # 2016-11-05T14:52:15.748-0400
self.assertEqual(1, len(items[0].unsaved_vulnerability_ids))
self.assertEqual('CVE-2015-3208', items[0].unsaved_vulnerability_ids[0])
diff --git a/unittests/tools/test_dependency_track_parser.py b/unittests/tools/test_dependency_track_parser.py
index 41fb2591fa..40fd0a8177 100644
--- a/unittests/tools/test_dependency_track_parser.py
+++ b/unittests/tools/test_dependency_track_parser.py
@@ -5,9 +5,9 @@
class TestDependencyTrackParser(DojoTestCase):
- def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_findings(self,):
+ def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_empty_list.json"
+ get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_empty_list.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_finding
def test_dependency_track_parser_with_missing_findings_key_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_missing.json"
+ get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_missing.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -23,7 +23,7 @@ def test_dependency_track_parser_with_missing_findings_key_has_no_findings(self)
def test_dependency_track_parser_with_null_findings_key_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_null.json"
+ get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_null.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -31,7 +31,7 @@ def test_dependency_track_parser_with_null_findings_key_has_no_findings(self):
def test_dependency_track_parser_has_many_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/many_findings.json"
+ get_unit_tests_path() + "/scans/dependency_track/many_findings.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -49,7 +49,7 @@ def test_dependency_track_parser_has_many_findings(self):
def test_dependency_track_parser_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/one_finding.json"
+ get_unit_tests_path() + "/scans/dependency_track/one_finding.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -57,7 +57,7 @@ def test_dependency_track_parser_has_one_finding(self):
def test_dependency_track_parser_v3_8_0(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/dependency_track_3.8.0_2021-01-18.json"
+ get_unit_tests_path() + "/scans/dependency_track/dependency_track_3.8.0_2021-01-18.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -67,7 +67,7 @@ def test_dependency_track_parser_v3_8_0(self):
def test_dependency_track_parser_findings_with_alias(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/many_findings_with_alias.json"
+ get_unit_tests_path() + "/scans/dependency_track/many_findings_with_alias.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -79,7 +79,7 @@ def test_dependency_track_parser_findings_with_alias(self):
def test_dependency_track_parser_findings_with_empty_alias(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/many_findings_with_empty_alias.json"
+ get_unit_tests_path() + "/scans/dependency_track/many_findings_with_empty_alias.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_dockerbench_parser.py b/unittests/tools/test_dockerbench_parser.py
index 8a2ec6137d..b3d5f603f0 100644
--- a/unittests/tools/test_dockerbench_parser.py
+++ b/unittests/tools/test_dockerbench_parser.py
@@ -7,7 +7,7 @@ class TestDockerBenchParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-zero-vulns.json"
+ get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-zero-vulns.json",
) as testfile:
parser = DockerBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def test_parse_file_with_one_vuln_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-single-vuln.json"
+ get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-single-vuln.json",
) as testfile:
parser = DockerBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -29,7 +29,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-many-vulns.json"
+ get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-many-vulns.json",
) as testfile:
parser = DockerBenchParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_gitlab_container_scan_parser.py b/unittests/tools/test_gitlab_container_scan_parser.py
index e4da366286..ab3e05a2b6 100644
--- a/unittests/tools/test_gitlab_container_scan_parser.py
+++ b/unittests/tools/test_gitlab_container_scan_parser.py
@@ -93,7 +93,7 @@ def test_gitlab_container_scan_parser_with_fless_data_v14(self):
finding = findings[50]
self.assertIsNone(finding.date)
self.assertEqual(
- "openssl: Infinite loop in BN_mod_sqrt() reachable when parsing certificates", finding.title
+ "openssl: Infinite loop in BN_mod_sqrt() reachable when parsing certificates", finding.title,
)
self.assertEqual("libretls", finding.component_name)
self.assertEqual("3.3.4-r2", finding.component_version)
diff --git a/unittests/tools/test_gitlab_dast_parser.py b/unittests/tools/test_gitlab_dast_parser.py
index 4e6cc5d41c..a2d5c2f762 100644
--- a/unittests/tools/test_gitlab_dast_parser.py
+++ b/unittests/tools/test_gitlab_dast_parser.py
@@ -22,7 +22,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v14(self):
endpoint.clean()
self.assertEqual(
- "5ec00bbc-2e53-44cb-83e9-3d35365277e3", finding.unique_id_from_tool
+ "5ec00bbc-2e53-44cb-83e9-3d35365277e3", finding.unique_id_from_tool,
)
self.assertEqual(3, finding.scanner_confidence)
# vulnerability does not have a name: fallback to using id as a title
@@ -50,7 +50,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v15(self):
endpoint.clean()
self.assertEqual(
- "5ec00bbc-2e53-44cb-83e9-3d35365277e3", finding.unique_id_from_tool
+ "5ec00bbc-2e53-44cb-83e9-3d35365277e3", finding.unique_id_from_tool,
)
self.assertEqual(None, finding.scanner_confidence)
# vulnerability does not have a name: fallback to using id as a title
@@ -90,7 +90,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self):
date = finding.date.strftime("%Y-%m-%dT%H:%M:%S.%f")
self.assertEqual("2021-04-23T15:46:40.644000", date)
self.assertEqual(
- "87e98ddf-7d75-444a-be6d-45400151a0fe", finding.unique_id_from_tool
+ "87e98ddf-7d75-444a-be6d-45400151a0fe", finding.unique_id_from_tool,
)
# vulnerability does not have a name: fallback to using id as a title
self.assertEqual(finding.unique_id_from_tool, finding.title)
@@ -128,7 +128,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self):
date = finding.date.strftime("%Y-%m-%dT%H:%M:%S.%f")
self.assertEqual("2021-04-23T15:46:40.644000", date)
self.assertEqual(
- "87e98ddf-7d75-444a-be6d-45400151a0fe", finding.unique_id_from_tool
+ "87e98ddf-7d75-444a-be6d-45400151a0fe", finding.unique_id_from_tool,
)
# vulnerability does not have a name: fallback to using id as a title
self.assertEqual(finding.unique_id_from_tool, finding.title)
diff --git a/unittests/tools/test_govulncheck_parser.py b/unittests/tools/test_govulncheck_parser.py
index f90a699fb1..78f706e47a 100644
--- a/unittests/tools/test_govulncheck_parser.py
+++ b/unittests/tools/test_govulncheck_parser.py
@@ -11,7 +11,7 @@ def test_parse_empty(self):
parser = GovulncheckParser()
parser.get_findings(testfile, Test())
self.assertIn(
- "Invalid JSON format", str(exp.exception)
+ "Invalid JSON format", str(exp.exception),
)
def test_parse_no_findings(self):
diff --git a/unittests/tools/test_huskyci_parser.py b/unittests/tools/test_huskyci_parser.py
index d0b76d7313..22199ed5bb 100644
--- a/unittests/tools/test_huskyci_parser.py
+++ b/unittests/tools/test_huskyci_parser.py
@@ -13,7 +13,7 @@ def test_parse_file_no_finding(self):
def test_parse_file_has_one_finding_one_tool(self):
with open(
- get_unit_tests_path() + "/scans/huskyci/huskyci_report_one_finding_one_tool.json"
+ get_unit_tests_path() + "/scans/huskyci/huskyci_report_one_finding_one_tool.json",
) as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
@@ -21,7 +21,7 @@ def test_parse_file_has_one_finding_one_tool(self):
def test_parse_file_has_many_finding_one_tool(self):
with open(
- get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_one_tool.json"
+ get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_one_tool.json",
) as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
@@ -29,7 +29,7 @@ def test_parse_file_has_many_finding_one_tool(self):
def test_parse_file_has_many_finding_two_tools(self):
with open(
- get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_two_tools.json"
+ get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_two_tools.json",
) as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_hydra_parser.py b/unittests/tools/test_hydra_parser.py
index 22beeccebe..93077abb16 100644
--- a/unittests/tools/test_hydra_parser.py
+++ b/unittests/tools/test_hydra_parser.py
@@ -41,7 +41,7 @@ def test_hydra_parser_with_one_finding_has_one_finding(self):
"127.0.0.1",
"9999",
"bill@example.com",
- "bill"
+ "bill",
)
def test_hydra_parser_with_one_finding_and_missing_date_has_one_finding(self):
@@ -59,7 +59,7 @@ def test_hydra_parser_with_one_finding_and_missing_date_has_one_finding(self):
"127.0.0.1",
"9999",
"bill@example.com",
- "bill"
+ "bill",
)
def test_hydra_parser_with_two_findings_with_one_incomplete_has_one_finding(self):
@@ -77,7 +77,7 @@ def test_hydra_parser_with_two_findings_with_one_incomplete_has_one_finding(self
"127.0.0.1",
"9999",
"bill@example.com",
- "bill"
+ "bill",
)
def test_hydra_parser_with_many_findings_has_many_findings(self):
@@ -93,7 +93,7 @@ def test_hydra_parser_with_many_findings_has_many_findings(self):
"127.0.0.1",
"9999",
"bill@example.com",
- "bill"
+ "bill",
)
self.__assertFindingEquals(
findings[1],
@@ -101,7 +101,7 @@ def test_hydra_parser_with_many_findings_has_many_findings(self):
"192.168.0.1",
"1234",
"joe@example.com",
- "joe"
+ "joe",
)
self.__assertFindingEquals(
findings[2],
@@ -109,7 +109,7 @@ def test_hydra_parser_with_many_findings_has_many_findings(self):
"something.bad.com",
"4321",
"jimmy@bad.com",
- "somesimplepassword"
+ "somesimplepassword",
)
def __assertFindingEquals(
@@ -119,7 +119,7 @@ def __assertFindingEquals(
finding_url,
finding_port,
finding_username,
- finding_password
+ finding_password,
):
self.assertEqual("Weak username / password combination found for " + finding_url, actual_finding.title)
self.assertEqual(date, actual_finding.date)
diff --git a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
index 615cad2595..330b35431c 100644
--- a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
+++ b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
@@ -53,7 +53,7 @@ def test_parse_file_with_one_vuln(self):
def test_parse_file_with_many_vulns(self):
testfile = open(
- "unittests/scans/jfrog_xray_api_summary_artifact/many_vulns.json"
+ "unittests/scans/jfrog_xray_api_summary_artifact/many_vulns.json",
)
parser = JFrogXrayApiSummaryArtifactParser()
findings = parser.get_findings(testfile, Test())
@@ -65,7 +65,7 @@ def test_parse_file_with_many_vulns(self):
def test_parse_file_with_malformed_cvssv3_score(self):
testfile = open(
- "unittests/scans/jfrog_xray_api_summary_artifact/malformed_cvssv3.json"
+ "unittests/scans/jfrog_xray_api_summary_artifact/malformed_cvssv3.json",
)
parser = JFrogXrayApiSummaryArtifactParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_kubebench_parser.py b/unittests/tools/test_kubebench_parser.py
index 601db2707d..2e732fef40 100644
--- a/unittests/tools/test_kubebench_parser.py
+++ b/unittests/tools/test_kubebench_parser.py
@@ -7,7 +7,7 @@ class TestKubeBenchParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-report-zero-vuln.json"
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-report-zero-vuln.json",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def test_parse_file_with_one_vuln_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-report-one-vuln.json"
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-report-one-vuln.json",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -23,7 +23,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-report-many-vuln.json"
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-report-many-vuln.json",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -33,7 +33,7 @@ def test_parse_file_with_controls_tag(self):
# The testfile has been derived from https://github.com/kubernetes-sigs/wg-policy-prototypes/blob/master/policy-report/kube-bench-adapter/samples/kube-bench-output.json
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-controls.json"
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-controls.json",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_kubehunter_parser.py b/unittests/tools/test_kubehunter_parser.py
index 6912df0380..df5cf17334 100644
--- a/unittests/tools/test_kubehunter_parser.py
+++ b/unittests/tools/test_kubehunter_parser.py
@@ -41,7 +41,7 @@ def test_kubehunter_parser_empty_with_error(self):
parser.get_findings(testfile, Test())
self.assertEqual(
- "Expecting value: line 1 column 1 (char 0)", str(context.exception)
+ "Expecting value: line 1 column 1 (char 0)", str(context.exception),
)
def test_kubehunter_parser_dupe(self):
diff --git a/unittests/tools/test_mend_parser.py b/unittests/tools/test_mend_parser.py
index 8deaf6f2be..3a48c5c49e 100644
--- a/unittests/tools/test_mend_parser.py
+++ b/unittests/tools/test_mend_parser.py
@@ -30,7 +30,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
def test_parse_file_with_multiple_vuln_cli_output(self):
with open(
- get_unit_tests_path() + "/scans/mend/cli_generated_many_vulns.json"
+ get_unit_tests_path() + "/scans/mend/cli_generated_many_vulns.json",
) as testfile:
parser = MendParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_microfocus_webinspect_parser.py b/unittests/tools/test_microfocus_webinspect_parser.py
index 0d2dd131c1..76869be044 100644
--- a/unittests/tools/test_microfocus_webinspect_parser.py
+++ b/unittests/tools/test_microfocus_webinspect_parser.py
@@ -10,7 +10,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_no_vuln.xml"
+ get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_no_vuln.xml",
) as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
@@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_one_vuln.xml"
+ get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_one_vuln.xml",
) as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
@@ -42,7 +42,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_many_vuln.xml"
+ get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_many_vuln.xml",
)as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
@@ -54,7 +54,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
self.assertEqual(525, item.cwe)
self.assertIsNotNone(item.references)
self.assertEqual(
- "1cfe38ee-89f7-4110-ad7c-8fca476b2f04", item.unique_id_from_tool
+ "1cfe38ee-89f7-4110-ad7c-8fca476b2f04", item.unique_id_from_tool,
)
self.assertEqual(1, len(item.unsaved_endpoints))
endpoint = item.unsaved_endpoints[0]
@@ -65,11 +65,11 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
def test_convert_severity(self):
with self.subTest("convert info", val="0"):
self.assertEqual(
- "Info", MicrofocusWebinspectParser.convert_severity("0")
+ "Info", MicrofocusWebinspectParser.convert_severity("0"),
)
with self.subTest("convert medium", val="2"):
self.assertEqual(
- "Medium", MicrofocusWebinspectParser.convert_severity("2")
+ "Medium", MicrofocusWebinspectParser.convert_severity("2"),
)
def test_parse_file_version_18_20(self):
@@ -121,7 +121,7 @@ def test_parse_file_issue7690(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/issue_7690.xml"
+ get_unit_tests_path() + "/scans/microfocus_webinspect/issue_7690.xml",
) as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
diff --git a/unittests/tools/test_noseyparker_parser.py b/unittests/tools/test_noseyparker_parser.py
index 4e98bbc04f..e55087eb3e 100644
--- a/unittests/tools/test_noseyparker_parser.py
+++ b/unittests/tools/test_noseyparker_parser.py
@@ -40,6 +40,6 @@ def test_noseyparker_parser_error(self):
testfile.close()
self.assertEqual(0, len(findings))
self.assertTrue(
- "Invalid Nosey Parker data, make sure to use Nosey Parker v0.16.0" in str(context.exception)
+ "Invalid Nosey Parker data, make sure to use Nosey Parker v0.16.0" in str(context.exception),
)
self.assertTrue("ECONNREFUSED" in str(context.exception))
diff --git a/unittests/tools/test_ort_parser.py b/unittests/tools/test_ort_parser.py
index f523d35626..0d5c618cb6 100644
--- a/unittests/tools/test_ort_parser.py
+++ b/unittests/tools/test_ort_parser.py
@@ -11,7 +11,7 @@ def test_parse_without_file_has_no_finding(self):
def test_parse_file_has_many_finding_one_tool(self):
testfile = open(
- get_unit_tests_path() + "/scans/ort/evaluated-model-reporter-test-output.json"
+ get_unit_tests_path() + "/scans/ort/evaluated-model-reporter-test-output.json",
)
parser = OrtParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_ossindex_devaudit_parser.py b/unittests/tools/test_ossindex_devaudit_parser.py
index e617654a20..8f30f96466 100644
--- a/unittests/tools/test_ossindex_devaudit_parser.py
+++ b/unittests/tools/test_ossindex_devaudit_parser.py
@@ -7,7 +7,7 @@ class TestOssIndexDevauditParser(DojoTestCase):
def test_ossindex_devaudit_parser_with_no_vulns_has_no_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_no_vuln.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_no_vuln.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -16,7 +16,7 @@ def test_ossindex_devaudit_parser_with_no_vulns_has_no_findings(self):
def test_ossindex_devaudit_parser_with_one_critical_vuln_has_one_finding(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -25,7 +25,7 @@ def test_ossindex_devaudit_parser_with_one_critical_vuln_has_one_finding(self):
def test_ossindex_devaudit_parser_with_multiple_vulns_has_multiple_finding(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_multiple_vulns.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_multiple_vulns.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -34,7 +34,7 @@ def test_ossindex_devaudit_parser_with_multiple_vulns_has_multiple_finding(self)
def test_ossindex_devaudit_parser_with_no_cve_returns_info_severity(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_vuln_no_cvssscore.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_vuln_no_cvssscore.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -43,7 +43,7 @@ def test_ossindex_devaudit_parser_with_no_cve_returns_info_severity(self):
def test_ossindex_devaudit_parser_with_reference_shows_reference(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -55,7 +55,7 @@ def test_ossindex_devaudit_parser_with_reference_shows_reference(self):
def test_ossindex_devaudit_parser_with_empty_reference_shows_empty_reference(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_reference.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_reference.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -66,7 +66,7 @@ def test_ossindex_devaudit_parser_with_empty_reference_shows_empty_reference(sel
def test_ossindex_devaudit_parser_with_missing_reference_shows_empty(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_reference.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_reference.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -77,7 +77,7 @@ def test_ossindex_devaudit_parser_with_missing_reference_shows_empty(self):
def test_ossindex_devaudit_parser_with_missing_cwe_shows_1035(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_cwe.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_cwe.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -88,7 +88,7 @@ def test_ossindex_devaudit_parser_with_missing_cwe_shows_1035(self):
def test_ossindex_devaudit_parser_with_null_cwe_shows_1035(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_null_cwe.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_null_cwe.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -99,7 +99,7 @@ def test_ossindex_devaudit_parser_with_null_cwe_shows_1035(self):
def test_ossindex_devaudit_parser_with_empty_cwe_shows_1035(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_cwe.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_cwe.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -110,7 +110,7 @@ def test_ossindex_devaudit_parser_with_empty_cwe_shows_1035(self):
def test_ossindex_devaudit_parser_get_severity_shows_info(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_info.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_info.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -121,7 +121,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_info(self):
def test_ossindex_devaudit_parser_get_severity_shows_critical(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_critical.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_critical.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -132,7 +132,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_critical(self):
def test_ossindex_devaudit_parser_get_severity_shows_high(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_high.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_high.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -143,7 +143,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_high(self):
def test_ossindex_devaudit_parser_get_severity_shows_medium(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_medium.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_medium.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -154,7 +154,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_medium(self):
def test_ossindex_devaudit_parser_get_severity_shows_low(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_low.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_low.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_php_symfony_security_check_parser.py b/unittests/tools/test_php_symfony_security_check_parser.py
index 6566c02ebe..5e8c4bd51d 100644
--- a/unittests/tools/test_php_symfony_security_check_parser.py
+++ b/unittests/tools/test_php_symfony_security_check_parser.py
@@ -7,7 +7,7 @@ class TestPhpSymfonySecurityCheckerParser(DojoTestCase):
def test_php_symfony_security_check_parser_with_no_vuln_has_no_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_no_vuln.json"
+ get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_no_vuln.json",
)
parser = PhpSymfonySecurityCheckParser()
findings = parser.get_findings(testfile, Test())
@@ -19,7 +19,7 @@ def test_php_symfony_security_check_parser_with_one_criticle_vuln_has_one_findin
self,
):
testfile = open(
- get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_one_vuln.json"
+ get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_one_vuln.json",
)
parser = PhpSymfonySecurityCheckParser()
findings = parser.get_findings(testfile, Test())
@@ -28,7 +28,7 @@ def test_php_symfony_security_check_parser_with_one_criticle_vuln_has_one_findin
def test_php_symfony_security_check_parser_with_many_vuln_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_many_vuln.json"
+ get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_many_vuln.json",
)
parser = PhpSymfonySecurityCheckParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_qualys_infrascan_webgui_parser.py b/unittests/tools/test_qualys_infrascan_webgui_parser.py
index 78e57188a6..941aee124c 100644
--- a/unittests/tools/test_qualys_infrascan_webgui_parser.py
+++ b/unittests/tools/test_qualys_infrascan_webgui_parser.py
@@ -11,7 +11,7 @@ class TestQualysInfrascanWebguiParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_0.xml"
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_0.xml",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
@@ -21,7 +21,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
# + also verify data with one test
def test_parse_file_with_one_vuln_has_one_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_1.xml"
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_1.xml",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
@@ -38,7 +38,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
# Sample with Multiple Test
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_multiple.xml"
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_multiple.xml",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
@@ -61,7 +61,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
# Sample with Multiple Test
def test_parse_file_with_finding_no_dns(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_3.xml"
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_3.xml",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_qualys_parser.py b/unittests/tools/test_qualys_parser.py
index be96a3334a..3580196116 100644
--- a/unittests/tools/test_qualys_parser.py
+++ b/unittests/tools/test_qualys_parser.py
@@ -18,7 +18,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys/empty.xml"
+ get_unit_tests_path() + "/scans/qualys/empty.xml",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -35,7 +35,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
def parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml"
+ get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -46,13 +46,13 @@ def parse_file_with_multiple_vuln_has_multiple_findings(self):
finding = findings[0]
self.assertEqual(
- finding.title, "QID-6 | DNS Host Name"
+ finding.title, "QID-6 | DNS Host Name",
)
self.assertEqual(
- finding.severity, "Informational"
+ finding.severity, "Informational",
)
self.assertEqual(
- finding.unsaved_endpoints[0].host, "demo13.s02.sjc01.qualys.com"
+ finding.unsaved_endpoints[0].host, "demo13.s02.sjc01.qualys.com",
)
for finding in findings:
if finding.unsaved_endpoints[0].host == "demo14.s02.sjc01.qualys.com" and finding.title == "QID-370876 | AMD Processors Multiple Security Vulnerabilities (RYZENFALL/MASTERKEY/CHIMERA-FW/FALLOUT)":
@@ -61,15 +61,15 @@ def parse_file_with_multiple_vuln_has_multiple_findings(self):
finding_cvssv3_vector = finding
self.assertEqual(
# CVSS_FINAL is defined without a cvssv3 vector
- finding_cvssv3_score.cvssv3, None
+ finding_cvssv3_score.cvssv3, None,
)
self.assertEqual(
- finding_cvssv3_score.severity, "High"
+ finding_cvssv3_score.severity, "High",
)
self.assertEqual(finding_cvssv3_vector.cvssv3,
"CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H")
self.assertEqual(
- finding_cvssv3_vector.severity, "High"
+ finding_cvssv3_vector.severity, "High",
)
return finding
@@ -82,7 +82,7 @@ def test_parse_file_with_no_vuln_has_no_findings_csv(self):
def parse_file_with_no_vuln_has_no_findings_csv(self):
with open(
- get_unit_tests_path() + "/scans/qualys/empty.csv"
+ get_unit_tests_path() + "/scans/qualys/empty.csv",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -99,7 +99,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_csv(self):
def parse_file_with_multiple_vuln_has_multiple_findings_csv(self):
with open(
- get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.csv"
+ get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.csv",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -113,30 +113,30 @@ def parse_file_with_multiple_vuln_has_multiple_findings_csv(self):
finding.title,
"QID-105971 | EOL/Obsolete Software: Microsoft ASP.NET 1.0 Detected")
self.assertEqual(
- finding.severity, "Critical"
+ finding.severity, "Critical",
)
self.assertEqual(
- finding.unsaved_endpoints[0].host, "ip-10-98-57-180.eu-west-1.compute.internal"
+ finding.unsaved_endpoints[0].host, "ip-10-98-57-180.eu-west-1.compute.internal",
)
for finding in findings:
if finding.unsaved_endpoints[0].host == "ip-10-98-57-180.eu-west-1.compute.internal" and finding.title == "QID-105971 | EOL/Obsolete Software: Microsoft ASP.NET 1.0 Detected":
self.assertEqual(
- finding.severity, "Critical"
+ finding.severity, "Critical",
)
self.assertEqual(
finding.cvssv3,
"CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:H/E:U/RL:U/RC:C")
self.assertEqual(
- finding.severity, "Critical"
+ finding.severity, "Critical",
)
return findings[0]
def test_parse_file_monthly_pci_issue6932(self):
with open(
- get_unit_tests_path() + "/scans/qualys/monthly_pci_issue6932.csv"
+ get_unit_tests_path() + "/scans/qualys/monthly_pci_issue6932.csv",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -144,7 +144,7 @@ def test_parse_file_monthly_pci_issue6932(self):
def test_parse_file_with_cvss_values_and_scores(self):
with open(
- get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml"
+ get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -158,24 +158,24 @@ def test_parse_file_with_cvss_values_and_scores(self):
# The CVSS Vector is not used from the Knowledgebase
self.assertEqual(
# CVSS_FINAL is defined without a cvssv3 vector
- finding_cvssv3_score.cvssv3, None
+ finding_cvssv3_score.cvssv3, None,
)
# Nevertheless the CVSSv3 Score should be set
self.assertEqual(
- finding_cvssv3_score.cvssv3_score, 8.2
+ finding_cvssv3_score.cvssv3_score, 8.2,
)
# If no cvss information is present in detection and not in knowledgebase values should be empty
self.assertEqual(
- finding_no_cvssv3.cvssv3, None
+ finding_no_cvssv3.cvssv3, None,
)
self.assertEqual(
- finding_no_cvssv3.cvssv3_score, None
+ finding_no_cvssv3.cvssv3_score, None,
)
# No CVSS Values available in detection and it uses the knowledgebase then
self.assertEqual(finding_no_cvssv3_at_detection.cvssv3,
"CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H")
self.assertEqual(
- finding_no_cvssv3_at_detection.cvssv3_score, 9.0
+ finding_no_cvssv3_at_detection.cvssv3_score, 9.0,
)
def test_get_severity_legacy(self):
diff --git a/unittests/tools/test_qualys_webapp_parser.py b/unittests/tools/test_qualys_webapp_parser.py
index 2df655e36b..078e8f7dd0 100644
--- a/unittests/tools/test_qualys_webapp_parser.py
+++ b/unittests/tools/test_qualys_webapp_parser.py
@@ -31,7 +31,7 @@ def test_qualys_webapp_parser_with_one_criticle_vuln_has_one_findings(self):
def test_qualys_webapp_parser_with_many_vuln_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml"
+ get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml",
)
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test())
@@ -45,7 +45,7 @@ def test_qualys_webapp_parser_with_many_vuln_has_many_findings(self):
def test_qualys_webapp_parser_info_is_vuln(self):
testfile = open(
- get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml"
+ get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml",
)
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test(), True)
@@ -59,7 +59,7 @@ def test_qualys_webapp_parser_info_is_vuln(self):
def test_discussion_10239(self):
testfile = open(
- get_unit_tests_path() + "/scans/qualys_webapp/discussion_10239.xml"
+ get_unit_tests_path() + "/scans/qualys_webapp/discussion_10239.xml",
)
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test(), True)
diff --git a/unittests/tools/test_sarif_parser.py b/unittests/tools/test_sarif_parser.py
index eb3dd05332..a819846169 100644
--- a/unittests/tools/test_sarif_parser.py
+++ b/unittests/tools/test_sarif_parser.py
@@ -18,8 +18,8 @@ def common_checks(self, finding):
def test_example_report(self):
with open(
path.join(
- get_unit_tests_path() + "/scans/sarif/DefectDojo_django-DefectDojo__2020-12-11_13 42 10__export.sarif"
- )
+ get_unit_tests_path() + "/scans/sarif/DefectDojo_django-DefectDojo__2020-12-11_13 42 10__export.sarif",
+ ),
)as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
@@ -109,7 +109,7 @@ def test_example_k4_report_mitigation(self):
with self.subTest(i=0):
finding = findings[0]
self.assertEqual(
- 'Variable "ptr" was used without being initialized. It was declared [here](0).', finding.title
+ 'Variable "ptr" was used without being initialized. It was declared [here](0).', finding.title,
)
self.assertEqual("C2001", finding.vuln_id_from_tool)
self.assertEqual("collections/list.h", finding.file_path)
@@ -276,7 +276,7 @@ def test_dockle(self):
**Rule short description:** Do not store credential in ENVIRONMENT vars/files"""
self.assertEqual(description, finding.description)
self.assertEqual(
- "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0010", finding.references
+ "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0010", finding.references,
)
with self.subTest(i=1):
finding = findings[1]
@@ -286,7 +286,7 @@ def test_dockle(self):
**Rule short description:** Enable Content trust for Docker"""
self.assertEqual(description, finding.description)
self.assertEqual(
- "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0005", finding.references
+ "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0005", finding.references,
)
with self.subTest(i=2):
finding = findings[2]
@@ -296,7 +296,7 @@ def test_dockle(self):
**Rule short description:** Add HEALTHCHECK instruction to the container image"""
self.assertEqual(description, finding.description)
self.assertEqual(
- "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0006", finding.references
+ "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0006", finding.references,
)
with self.subTest(i=3):
finding = findings[3]
@@ -306,7 +306,7 @@ def test_dockle(self):
**Rule short description:** Confirm safety of setuid/setgid files"""
self.assertEqual(description, finding.description)
self.assertEqual(
- "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0008", finding.references
+ "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0008", finding.references,
)
def test_mobsfscan(self):
@@ -394,7 +394,7 @@ def test_flawfinder(self):
self.assertEqual(327, finding.cwe)
self.assertEqual("FF1048", finding.vuln_id_from_tool)
self.assertEqual(
- "e6c1ad2b1d96ffc4035ed8df070600566ad240b8ded025dac30620f3fd4aa9fd", finding.unique_id_from_tool
+ "e6c1ad2b1d96ffc4035ed8df070600566ad240b8ded025dac30620f3fd4aa9fd", finding.unique_id_from_tool,
)
self.assertEqual("https://cwe.mitre.org/data/definitions/327.html", finding.references)
with self.subTest(i=20):
@@ -417,7 +417,7 @@ def test_flawfinder(self):
self.assertEqual(120, finding.cwe)
self.assertEqual("FF1004", finding.vuln_id_from_tool)
self.assertEqual(
- "327fc54b75ab37bbbb31a1b71431aaefa8137ff755acc103685ad5adf88f5dda", finding.unique_id_from_tool
+ "327fc54b75ab37bbbb31a1b71431aaefa8137ff755acc103685ad5adf88f5dda", finding.unique_id_from_tool,
)
self.assertEqual("https://cwe.mitre.org/data/definitions/120.html", finding.references)
with self.subTest(i=52):
@@ -439,7 +439,7 @@ def test_flawfinder(self):
self.assertEqual(482, finding.line)
self.assertEqual("FF1021", finding.vuln_id_from_tool)
self.assertEqual(
- "ad8408027235170e870e7662751a01386beb2d2ed8beb75dd4ba8e4a70e91d65", finding.unique_id_from_tool
+ "ad8408027235170e870e7662751a01386beb2d2ed8beb75dd4ba8e4a70e91d65", finding.unique_id_from_tool,
)
self.assertEqual("https://cwe.mitre.org/data/definitions/120.html", finding.references)
diff --git a/unittests/tools/test_scantist_parser.py b/unittests/tools/test_scantist_parser.py
index 7b8e0b0d4c..a51223869a 100644
--- a/unittests/tools/test_scantist_parser.py
+++ b/unittests/tools/test_scantist_parser.py
@@ -26,7 +26,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
"attack against hashes associated with the maximum exponent.",
)
self.assertEqual(
- findings.severity, "Medium"
+ findings.severity, "Medium",
) # Negligible is translated to Informational
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
diff --git a/unittests/tools/test_snyk_parser.py b/unittests/tools/test_snyk_parser.py
index 59fde5a85f..ba6f27cb3c 100644
--- a/unittests/tools/test_snyk_parser.py
+++ b/unittests/tools/test_snyk_parser.py
@@ -63,7 +63,7 @@ def test_snykParser_finding_has_fields(self):
finding.severity_justification,
)
self.assertEqual(
- "SNYK-JAVA-ORGAPACHESANTUARIO-460281", finding.vuln_id_from_tool
+ "SNYK-JAVA-ORGAPACHESANTUARIO-460281", finding.vuln_id_from_tool,
)
self.assertEqual(1, len(finding.unsaved_vulnerability_ids))
self.assertEqual("CVE-2019-12400", finding.unsaved_vulnerability_ids[0])
@@ -85,7 +85,7 @@ def test_snykParser_finding_has_fields(self):
finding.references,
)
self.assertEqual(
- "com.test:myframework > org.apache.santuario:xmlsec", finding.file_path
+ "com.test:myframework > org.apache.santuario:xmlsec", finding.file_path,
)
def test_snykParser_file_path_with_ampersand_is_preserved(self):
@@ -97,7 +97,7 @@ def test_snykParser_file_path_with_ampersand_is_preserved(self):
finding = findings[0]
self.assertEqual(
"myproject > @angular/localize > @babel/core > lodash",
- finding.file_path
+ finding.file_path,
)
def test_snykParser_allprojects_issue4277(self):
@@ -146,7 +146,7 @@ def test_snykParser_cvssscore_none(self):
finding = findings[0]
self.assertEqual("Low", finding.severity)
self.assertEqual(
- "SNYK-SLES153-PERMISSIONS-2648113", finding.vuln_id_from_tool
+ "SNYK-SLES153-PERMISSIONS-2648113", finding.vuln_id_from_tool,
)
def test_snykParser_target_file(self):
diff --git a/unittests/tools/test_solar_appscreener_parser.py b/unittests/tools/test_solar_appscreener_parser.py
index 3e2284ee80..0fb8cf4ee4 100644
--- a/unittests/tools/test_solar_appscreener_parser.py
+++ b/unittests/tools/test_solar_appscreener_parser.py
@@ -59,6 +59,6 @@ def test_solar_appscreener_parser_with_many_vuln_has_many_findings(self):
self.assertEqual("Trust boundary violation", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual("index.php", finding.sast_source_file_path)
- self.assertEqual(51, finding.sast_source_line),
+ self.assertEqual(51, finding.sast_source_line)
self.assertEqual("index.php", finding.file_path)
self.assertEqual(51, finding.line)
diff --git a/unittests/tools/test_sonarqube_parser.py b/unittests/tools/test_sonarqube_parser.py
index cf72d020e5..57e110f59c 100644
--- a/unittests/tools/test_sonarqube_parser.py
+++ b/unittests/tools/test_sonarqube_parser.py
@@ -21,7 +21,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -31,7 +31,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings
# SonarQube Scan detailed - no finding
def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -44,7 +44,7 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -93,7 +93,7 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi
def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -141,7 +141,7 @@ def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -154,7 +154,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -167,7 +167,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi
def test_detailed_parse_file_with_table_in_table(self):
"""Test parsing when the vulnerability details include a table, with tr and td that should be ignored when looking for list of rules"""
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -232,7 +232,7 @@ def test_detailed_parse_file_with_table_in_table(self):
)
self.assertEqual(str, type(item.file_path))
self.assertEqual(
- "java/org/apache/catalina/util/URLEncoder.java", item.file_path
+ "java/org/apache/catalina/util/URLEncoder.java", item.file_path,
)
self.assertEqual(str, type(item.line))
self.assertEqual("190", item.line)
@@ -247,7 +247,7 @@ def test_detailed_parse_file_with_table_in_table(self):
def test_detailed_parse_file_with_rule_undefined(self):
"""the vulnerability's rule is not in the list of rules"""
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-rule-undefined.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-rule-undefined.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -278,7 +278,7 @@ def test_detailed_parse_file_with_rule_undefined(self):
self.assertEqual("", item.references)
self.assertEqual(str, type(item.file_path))
self.assertEqual(
- "java/org/apache/catalina/util/URLEncoder.java", item.file_path
+ "java/org/apache/catalina/util/URLEncoder.java", item.file_path,
)
self.assertEqual(str, type(item.line))
self.assertEqual("190", item.line)
@@ -293,7 +293,7 @@ def test_detailed_parse_file_with_rule_undefined(self):
# SonarQube Scan - report with aggregations to be made
def test_file_name_aggregated_parse_file_with_vuln_on_same_filename(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -356,7 +356,7 @@ def test_file_name_aggregated_parse_file_with_vuln_on_same_filename(self):
# SonarQube Scan detailed - report with aggregations to be made
def test_detailed_parse_file_with_vuln_on_same_filename(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -384,7 +384,7 @@ def test_detailed_parse_file_table_has_whitespace(self):
Data table will have some whitespaces, parser should strip it before compare or use these properties.
"""
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table-with-whitespace.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table-with-whitespace.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -449,7 +449,7 @@ def test_detailed_parse_file_table_has_whitespace(self):
)
self.assertEqual(str, type(item.file_path))
self.assertEqual(
- "java/org/apache/catalina/util/URLEncoder.java", item.file_path
+ "java/org/apache/catalina/util/URLEncoder.java", item.file_path,
)
self.assertEqual(str, type(item.line))
self.assertEqual("190", item.line)
@@ -463,7 +463,7 @@ def test_detailed_parse_file_table_has_whitespace(self):
def test_detailed_parse_json_file_with_no_vulnerabilities_has_no_findings(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.json"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.json",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -473,7 +473,7 @@ def test_detailed_parse_json_file_with_no_vulnerabilities_has_no_findings(self):
def test_detailed_parse_json_file_with_single_vulnerability_has_single_finding(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.json"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.json",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -552,7 +552,7 @@ def test_detailed_parse_json_file_with_single_vulnerability_has_single_finding(s
def test_detailed_parse_json_file_with_multiple_vulnerabilities_has_multiple_findings(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.json"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.json",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -564,7 +564,7 @@ def test_detailed_parse_json_file_with_multiple_vulnerabilities_has_multiple_fin
def test_parse_json_file_from_api_with_multiple_findings_json(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/findings_over_api.json"
+ get_unit_tests_path() + "/scans/sonarqube/findings_over_api.json",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -596,7 +596,7 @@ def test_parse_json_file_from_api_with_multiple_findings_json(self):
def test_parse_json_file_from_api_with_multiple_findings_hotspots_json(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/findings_over_api_hotspots.json"
+ get_unit_tests_path() + "/scans/sonarqube/findings_over_api_hotspots.json",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -615,7 +615,7 @@ def test_parse_json_file_from_api_with_multiple_findings_hotspots_json(self):
def test_parse_json_file_from_api_with_empty_json(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/findings_over_api_empty.json"
+ get_unit_tests_path() + "/scans/sonarqube/findings_over_api_empty.json",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -624,7 +624,7 @@ def test_parse_json_file_from_api_with_empty_json(self):
def test_parse_json_file_from_api_with_emppty_zip(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/empty_zip.zip"
+ get_unit_tests_path() + "/scans/sonarqube/empty_zip.zip",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -633,7 +633,7 @@ def test_parse_json_file_from_api_with_emppty_zip(self):
def test_parse_json_file_from_api_with_multiple_findings_zip(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/findings_over_api.zip"
+ get_unit_tests_path() + "/scans/sonarqube/findings_over_api.zip",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -652,7 +652,7 @@ def test_parse_json_file_from_api_with_multiple_findings_zip(self):
def test_parse_json_file_issue_10150(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/issue_10150.json"
+ get_unit_tests_path() + "/scans/sonarqube/issue_10150.json",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
diff --git a/unittests/tools/test_spotbugs_parser.py b/unittests/tools/test_spotbugs_parser.py
index 879c971312..2587bc71b2 100644
--- a/unittests/tools/test_spotbugs_parser.py
+++ b/unittests/tools/test_spotbugs_parser.py
@@ -75,7 +75,7 @@ def test_description(self):
test_finding = findings[0]
# Test if line 13 is correct
self.assertEqual(
- "At IdentityFunctionCommandInjection.kt:[lines 20-170]", test_finding.description.splitlines()[12]
+ "At IdentityFunctionCommandInjection.kt:[lines 20-170]", test_finding.description.splitlines()[12],
)
def test_mitigation(self):
diff --git a/unittests/tools/test_sslyze_parser.py b/unittests/tools/test_sslyze_parser.py
index 03194a31d0..24c930b6f2 100644
--- a/unittests/tools/test_sslyze_parser.py
+++ b/unittests/tools/test_sslyze_parser.py
@@ -95,7 +95,7 @@ def test_parse_json_file_with_one_target_has_one_vuln_new(self):
self.assertEqual('Medium', finding.severity)
self.assertEqual(
'TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10',
- finding.references
+ finding.references,
)
self.assertEqual(1, len(finding.unsaved_endpoints))
@@ -134,7 +134,7 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self):
self.assertEqual('Medium', finding.severity)
self.assertEqual(
'TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10',
- finding.references
+ finding.references,
)
self.assertEqual(1, len(finding.unsaved_endpoints))
@@ -148,7 +148,7 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self):
self.assertEqual('Medium', finding.severity)
self.assertEqual(
'TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10',
- finding.references
+ finding.references,
)
self.assertEqual(1, len(finding.unsaved_endpoints))
diff --git a/unittests/tools/test_stackhawk_parser.py b/unittests/tools/test_stackhawk_parser.py
index 64151495da..94cf2de470 100644
--- a/unittests/tools/test_stackhawk_parser.py
+++ b/unittests/tools/test_stackhawk_parser.py
@@ -46,7 +46,7 @@ def test_stackhawk_parser_with_one_high_vuln_has_one_findings(self):
"20012",
"10",
False,
- False
+ False,
)
def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicates(self):
@@ -67,7 +67,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"90027",
"10",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -81,7 +81,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"40025",
"10",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -95,7 +95,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"20012",
"10",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -109,7 +109,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"40012",
"1",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -123,7 +123,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"10038",
"12",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -137,7 +137,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"10063",
"12",
False,
- False
+ False,
)
def test_that_a_scan_import_updates_the_test_description(self):
@@ -149,7 +149,7 @@ def test_that_a_scan_import_updates_the_test_description(self):
test.description,
'View scan details here: '
+ '[https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27]'
- + '(https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27)'
+ + '(https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27)',
)
def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_false_positive(self):
@@ -169,7 +169,7 @@ def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_fal
"90027",
"3",
True,
- False
+ False,
)
def test_that_a_scan_with_all_risk_accepted_endpoints_on_a_finding_marks_as_risk_accepted(self):
@@ -189,7 +189,7 @@ def test_that_a_scan_with_all_risk_accepted_endpoints_on_a_finding_marks_as_risk
"90027",
"3",
False,
- True
+ True,
)
def test_that_a_scan_with_endpoints_in_differing_statuses_does_not_mark_as_risk_accepted_or_false_positive(self):
@@ -209,7 +209,7 @@ def test_that_a_scan_with_endpoints_in_differing_statuses_does_not_mark_as_risk_
"90027",
"3",
False,
- False
+ False,
)
def __assertFindingEquals(
@@ -224,7 +224,7 @@ def __assertFindingEquals(
finding_id,
count,
false_positive,
- risk_accepted
+ risk_accepted,
):
self.assertEqual(title, actual_finding.title)
self.assertEqual(date, actual_finding.date)
@@ -235,7 +235,7 @@ def __assertFindingEquals(
actual_finding.description)
self.assertRegex(
actual_finding.steps_to_reproduce,
- "Use a specific message link and click 'Validate' to see the cURL!.*"
+ "Use a specific message link and click 'Validate' to see the cURL!.*",
)
self.assertFalse(actual_finding.static_finding)
self.assertTrue(actual_finding.dynamic_finding)
diff --git a/unittests/tools/test_sysdig_reports_parser.py b/unittests/tools/test_sysdig_reports_parser.py
index 00979f66e8..2e38af87e0 100644
--- a/unittests/tools/test_sysdig_reports_parser.py
+++ b/unittests/tools/test_sysdig_reports_parser.py
@@ -42,7 +42,7 @@ def test_sysdig_parser_missing_cve_field_id_from_csv_file(self):
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(
- "Number of fields in row (22) does not match number of headers (21)", str(context.exception)
+ "Number of fields in row (22) does not match number of headers (21)", str(context.exception),
)
def test_sysdig_parser_missing_cve_field_not_starting_with_cve(self):
@@ -54,7 +54,7 @@ def test_sysdig_parser_missing_cve_field_not_starting_with_cve(self):
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(
- "Number of fields in row (22) does not match number of headers (21)", str(context.exception)
+ "Number of fields in row (22) does not match number of headers (21)", str(context.exception),
)
def test_sysdig_parser_json_with_many_findings(self):
diff --git a/unittests/tools/test_talisman_parser.py b/unittests/tools/test_talisman_parser.py
index 0f05b83d71..9862f6088a 100644
--- a/unittests/tools/test_talisman_parser.py
+++ b/unittests/tools/test_talisman_parser.py
@@ -27,7 +27,7 @@ def test_parse_many_finding(self):
self.assertEqual(3, len(findings))
finding = findings[0]
self.assertEqual(
- "talisman_report/talisman_reports/data/report.json", finding.file_path
+ "talisman_report/talisman_reports/data/report.json", finding.file_path,
)
self.assertEqual(
"Secret pattern found in talisman_report/talisman_reports/data/report.json file",
diff --git a/unittests/tools/test_trustwave_fusion_api_parser.py b/unittests/tools/test_trustwave_fusion_api_parser.py
index 7773af5cb2..f09a31a0d0 100644
--- a/unittests/tools/test_trustwave_fusion_api_parser.py
+++ b/unittests/tools/test_trustwave_fusion_api_parser.py
@@ -6,7 +6,7 @@
class TestTrustwaveFusionAPIParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_zero_vul.json"
+ get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_zero_vul.json",
) as testfile:
parser = TrustwaveFusionAPIParser()
findings = parser.get_findings(testfile, Test())
@@ -26,7 +26,7 @@ def test_vuln_with_valid_cve(self):
self.assertEqual(1, len(finding.unsaved_vulnerability_ids))
self.assertEqual("CVE-2017-7529", finding.unsaved_vulnerability_ids[0])
self.assertEqual(
- "Vulnerability/Missing Patch", finding.description
+ "Vulnerability/Missing Patch", finding.description,
)
# second example
@@ -42,7 +42,7 @@ def test_vuln_with_valid_cve(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_many_vul.json"
+ get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_many_vul.json",
) as testfile:
parser = TrustwaveFusionAPIParser()
findings = parser.get_findings(testfile, Test())
@@ -58,7 +58,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
self.assertEqual("0123456:id", finding.unique_id_from_tool)
self.assertEqual("Website Detected", finding.title)
self.assertEqual(
- "Information/Service Discovery", finding.description
+ "Information/Service Discovery", finding.description,
)
self.assertIsNone(finding.unsaved_vulnerability_ids)
date = finding.date.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
diff --git a/unittests/tools/test_twistlock_parser.py b/unittests/tools/test_twistlock_parser.py
index 8d8121305b..ce91e7cd0d 100644
--- a/unittests/tools/test_twistlock_parser.py
+++ b/unittests/tools/test_twistlock_parser.py
@@ -47,7 +47,7 @@ def test_parse_file_which_contain_packages_info(self):
def test_parse_file_prisma_twistlock_images_no_vuln(self):
testfile = open(
- path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_no_vuln.csv")
+ path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_no_vuln.csv"),
)
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
@@ -56,7 +56,7 @@ def test_parse_file_prisma_twistlock_images_no_vuln(self):
def test_parse_file_prisma_twistlock_images_four_vulns(self):
testfile = open(
- path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_four_vulns.csv")
+ path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_four_vulns.csv"),
)
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
@@ -68,8 +68,8 @@ def test_parse_file_prisma_twistlock_images_four_vulns(self):
def test_parse_file_prisma_twistlock_images_long_package_name(self):
testfile = open(
path.join(
- path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_long_package_name.csv"
- )
+ path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_long_package_name.csv",
+ ),
)
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_veracode_parser.py b/unittests/tools/test_veracode_parser.py
index 9a00b0d646..3daed41862 100644
--- a/unittests/tools/test_veracode_parser.py
+++ b/unittests/tools/test_veracode_parser.py
@@ -390,7 +390,7 @@ def json_dynamic_findings_test(self, file_name):
host="application.insecure-company-alliance.com",
port=443,
path="api/*_*//new_user_sign_up",
- query="param=wild-things"
+ query="param=wild-things",
))
@override_settings(USE_FIRST_SEEN=True)
diff --git a/unittests/tools/test_yarn_audit_parser.py b/unittests/tools/test_yarn_audit_parser.py
index 1206f88b43..65206725ca 100644
--- a/unittests/tools/test_yarn_audit_parser.py
+++ b/unittests/tools/test_yarn_audit_parser.py
@@ -68,7 +68,7 @@ def test_yarn_audit_parser_empty_with_error(self):
parser = YarnAuditParser()
parser.get_findings(testfile, self.get_test())
self.assertTrue(
- "yarn audit report contains errors:" in str(context.exception)
+ "yarn audit report contains errors:" in str(context.exception),
)
self.assertTrue("ECONNREFUSED" in str(context.exception))
From 74b55e8972fd1ea13c15ba6ca3b6992613d0c0b8 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 5 Jul 2024 11:13:43 -0500
Subject: [PATCH 022/111] Bump packageurl-python from 0.15.1 to 0.15.2 (#10516)
Bumps [packageurl-python](https://github.com/package-url/packageurl-python) from 0.15.1 to 0.15.2.
- [Release notes](https://github.com/package-url/packageurl-python/releases)
- [Changelog](https://github.com/package-url/packageurl-python/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/package-url/packageurl-python/compare/v0.15.1...v0.15.2)
---
updated-dependencies:
- dependency-name: packageurl-python
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 3feab385ac..8801a56f4a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -54,7 +54,7 @@ gitpython==3.1.43
debugpy==1.8.2
python-gitlab==4.7.0
cpe==1.2.1
-packageurl-python==0.15.1
+packageurl-python==0.15.2
django-crum==0.7.9
JSON-log-formatter==1.0
django-split-settings==1.3.1
From bf8e4e70bb18eeb8b0f1bbcad6aea3cf1ae09af2 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 5 Jul 2024 11:14:22 -0500
Subject: [PATCH 023/111] Bump psycopg[binary] from 3.1.19 to 3.2.1 (#10517)
Bumps [psycopg[binary]](https://github.com/psycopg/psycopg) from 3.1.19 to 3.2.1.
- [Changelog](https://github.com/psycopg/psycopg/blob/master/docs/news.rst)
- [Commits](https://github.com/psycopg/psycopg/compare/3.1.19...3.2.1)
---
updated-dependencies:
- dependency-name: psycopg[binary]
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 8801a56f4a..f9edb8c4e9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -35,7 +35,7 @@ Markdown==3.6
mysqlclient==2.1.1
openpyxl==3.1.5
Pillow==10.4.0 # required by django-imagekit
-psycopg[binary]==3.1.19
+psycopg[binary]==3.2.1
cryptography==42.0.8
python-dateutil==2.9.0.post0
pytz==2024.1
From a6262652cdc8dabed23506c772a30638e75b1de1 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 5 Jul 2024 11:14:44 -0500
Subject: [PATCH 024/111] Bump boto3 from 1.34.138 to 1.34.139 (#10518)
Bumps [boto3](https://github.com/boto/boto3) from 1.34.138 to 1.34.139.
- [Release notes](https://github.com/boto/boto3/releases)
- [Commits](https://github.com/boto/boto3/compare/1.34.138...1.34.139)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index f9edb8c4e9..ebafe74957 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -75,7 +75,7 @@ django-ratelimit==4.1.0
argon2-cffi==23.1.0
blackduck==1.1.3
pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support
-boto3==1.34.138 # Required for Celery Broker AWS (SQS) support
+boto3==1.34.139 # Required for Celery Broker AWS (SQS) support
netaddr==1.3.0
vulners==2.1.7
fontawesomefree==6.5.1
From fe620a970a2d73068a5bd4f8da8bffd6605b0e3c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 5 Jul 2024 11:15:30 -0500
Subject: [PATCH 025/111] Bump django-debug-toolbar from 4.4.2 to 4.4.4
(#10520)
Bumps [django-debug-toolbar](https://github.com/jazzband/django-debug-toolbar) from 4.4.2 to 4.4.4.
- [Release notes](https://github.com/jazzband/django-debug-toolbar/releases)
- [Changelog](https://github.com/jazzband/django-debug-toolbar/blob/main/docs/changes.rst)
- [Commits](https://github.com/jazzband/django-debug-toolbar/compare/4.4.2...4.4.4)
---
updated-dependencies:
- dependency-name: django-debug-toolbar
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index ebafe74957..29d9c30e23 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -58,7 +58,7 @@ packageurl-python==0.15.2
django-crum==0.7.9
JSON-log-formatter==1.0
django-split-settings==1.3.1
-django-debug-toolbar==4.4.2
+django-debug-toolbar==4.4.4
django-debug-toolbar-request-history==0.1.4
vcrpy==6.0.1
vcrpy-unittest==0.1.7
From 9e2f02b6a8103d1f297a6ee61c79338845b286f6 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Fri, 5 Jul 2024 11:56:47 -0500
Subject: [PATCH 026/111] Update dependency ruff from 0.5.0 to v0.5.1
(requirements-lint.txt) (#10521)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
requirements-lint.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements-lint.txt b/requirements-lint.txt
index 0eceab328e..437ebf6bdf 100644
--- a/requirements-lint.txt
+++ b/requirements-lint.txt
@@ -1 +1 @@
-ruff==0.5.0
\ No newline at end of file
+ruff==0.5.1
\ No newline at end of file
From 70c32fe817b96205f879bb5c281adddbac4ba84d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 8 Jul 2024 11:40:38 -0500
Subject: [PATCH 027/111] Bump humanize from 4.9.0 to 4.10.0 (#10525)
Bumps [humanize](https://github.com/python-humanize/humanize) from 4.9.0 to 4.10.0.
- [Release notes](https://github.com/python-humanize/humanize/releases)
- [Commits](https://github.com/python-humanize/humanize/compare/4.9.0...4.10.0)
---
updated-dependencies:
- dependency-name: humanize
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 29d9c30e23..154e9ca5ea 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -27,7 +27,7 @@ django-prometheus==2.3.1
Django==4.2.13
djangorestframework==3.14.0
html2text==2024.2.26
-humanize==4.9.0
+humanize==4.10.0
jira==3.8.0
PyGithub==1.58.2
lxml==5.2.2
From da3fb128f6a54ae80d2eb1ac5a04083bc5b81e8d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 8 Jul 2024 11:40:56 -0500
Subject: [PATCH 028/111] Bump django-split-settings from 1.3.1 to 1.3.2
(#10526)
Bumps [django-split-settings](https://github.com/sponsors/wemake-services) from 1.3.1 to 1.3.2.
- [Commits](https://github.com/sponsors/wemake-services/commits)
---
updated-dependencies:
- dependency-name: django-split-settings
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 154e9ca5ea..af8616baef 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -57,7 +57,7 @@ cpe==1.2.1
packageurl-python==0.15.2
django-crum==0.7.9
JSON-log-formatter==1.0
-django-split-settings==1.3.1
+django-split-settings==1.3.2
django-debug-toolbar==4.4.4
django-debug-toolbar-request-history==0.1.4
vcrpy==6.0.1
From d0f3d30a1546d7bdf3a042f4f329483823bdff2d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 8 Jul 2024 11:42:48 -0500
Subject: [PATCH 029/111] Bump boto3 from 1.34.139 to 1.34.140 (#10528)
Bumps [boto3](https://github.com/boto/boto3) from 1.34.139 to 1.34.140.
- [Release notes](https://github.com/boto/boto3/releases)
- [Commits](https://github.com/boto/boto3/compare/1.34.139...1.34.140)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index af8616baef..9fb73a3a62 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -75,7 +75,7 @@ django-ratelimit==4.1.0
argon2-cffi==23.1.0
blackduck==1.1.3
pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support
-boto3==1.34.139 # Required for Celery Broker AWS (SQS) support
+boto3==1.34.140 # Required for Celery Broker AWS (SQS) support
netaddr==1.3.0
vulners==2.1.7
fontawesomefree==6.5.1
From 63cbaab77f9096aead16807d647914e51ea83c6c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 8 Jul 2024 11:43:16 -0500
Subject: [PATCH 030/111] Bump openapitools/openapi-generator-cli from v7.6.0
to v7.7.0 (#10531)
Bumps openapitools/openapi-generator-cli from v7.6.0 to v7.7.0.
---
updated-dependencies:
- dependency-name: openapitools/openapi-generator-cli
dependency-type: direct:production
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Dockerfile.integration-tests-debian | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian
index c7f38d4fd0..75fb8d39ca 100644
--- a/Dockerfile.integration-tests-debian
+++ b/Dockerfile.integration-tests-debian
@@ -1,7 +1,7 @@
# code: language=Dockerfile
-FROM openapitools/openapi-generator-cli:v7.6.0@sha256:f86ca824293602b71b9b66683cc0011f8ff963858bd853621c554ff5cc7dd1d5 as openapitools
+FROM openapitools/openapi-generator-cli:v7.7.0@sha256:99924315933d49e7b33a7d2074bb2b64fc8def8f74519939036e24eb48f00336 as openapitools
FROM python:3.11.9-slim-bookworm@sha256:8c1036ec919826052306dfb5286e4753ffd9d5f6c24fbc352a5399c3b405b57e as build
WORKDIR /app
RUN \
From 002a0af5ae077f1595ece4e7ebc421ebb66fe9b7 Mon Sep 17 00:00:00 2001
From: kiblik <5609770+kiblik@users.noreply.github.com>
Date: Mon, 8 Jul 2024 18:50:10 +0200
Subject: [PATCH 031/111] Ruff: add and fix G1 and G2 (#10088)
---
dojo/finding/views.py | 5 ++---
dojo/finding_group/views.py | 5 ++---
ruff.toml | 1 +
3 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/dojo/finding/views.py b/dojo/finding/views.py
index 8373022d72..14de369728 100644
--- a/dojo/finding/views.py
+++ b/dojo/finding/views.py
@@ -3386,9 +3386,8 @@ def push_to_jira(request, fid):
)
return JsonResponse({"result": "OK"})
- except Exception as e:
- logger.exception(e)
- logger.error("Error pushing to JIRA: ", exc_info=True)
+ except Exception:
+ logger.exception("Error pushing to JIRA")
messages.add_message(
request, messages.ERROR, "Error pushing to JIRA", extra_tags="alert-danger",
)
diff --git a/dojo/finding_group/views.py b/dojo/finding_group/views.py
index b22c75d0e7..ce4a59e3be 100644
--- a/dojo/finding_group/views.py
+++ b/dojo/finding_group/views.py
@@ -194,9 +194,8 @@ def push_to_jira(request, fgid):
extra_tags='alert-danger')
return JsonResponse({'result': 'OK'})
- except Exception as e:
- logger.exception(e)
- logger.error('Error pushing to JIRA: ', exc_info=True)
+ except Exception:
+ logger.exception('Error pushing to JIRA')
messages.add_message(
request,
messages.ERROR,
diff --git a/ruff.toml b/ruff.toml
index 30a62e2c0c..33976c5b20 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -49,6 +49,7 @@ select = [
"EXE",
"ICN",
"LOG",
+ "G1", "G2",
"INP",
"SLOT",
"PIE",
From fe6f9e0a00e26c86024a05200cb41c6537d648c7 Mon Sep 17 00:00:00 2001
From: DefectDojo release bot
Date: Mon, 8 Jul 2024 18:20:15 +0000
Subject: [PATCH 032/111] Update versions in application files
---
components/package.json | 2 +-
dojo/__init__.py | 2 +-
helm/defectdojo/Chart.yaml | 4 ++--
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/components/package.json b/components/package.json
index 05ce909836..ab3201e6a4 100644
--- a/components/package.json
+++ b/components/package.json
@@ -1,6 +1,6 @@
{
"name": "defectdojo",
- "version": "2.36.1",
+ "version": "2.37.0-dev",
"license" : "BSD-3-Clause",
"private": true,
"dependencies": {
diff --git a/dojo/__init__.py b/dojo/__init__.py
index 27272ee327..707177ee3e 100644
--- a/dojo/__init__.py
+++ b/dojo/__init__.py
@@ -4,6 +4,6 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app # noqa: F401
-__version__ = '2.36.1'
+__version__ = '2.37.0-dev'
__url__ = 'https://github.com/DefectDojo/django-DefectDojo'
__docs__ = 'https://documentation.defectdojo.com'
diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml
index b982afd9d7..80417e6a8a 100644
--- a/helm/defectdojo/Chart.yaml
+++ b/helm/defectdojo/Chart.yaml
@@ -1,8 +1,8 @@
apiVersion: v2
-appVersion: "2.36.1"
+appVersion: "2.37.0-dev"
description: A Helm chart for Kubernetes to install DefectDojo
name: defectdojo
-version: 1.6.138
+version: 1.6.139-dev
icon: https://www.defectdojo.org/img/favicon.ico
maintainers:
- name: madchap
From 5ad97d7ef3497975ba8b7c4ee536a898a6da0b90 Mon Sep 17 00:00:00 2001
From: DefectDojo
Date: Mon, 8 Jul 2024 18:27:31 +0000
Subject: [PATCH 033/111] Update helm lock file
Signed-off-by: DefectDojo
---
helm/defectdojo/Chart.lock | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/helm/defectdojo/Chart.lock b/helm/defectdojo/Chart.lock
index 9bd08b08b0..fc443fb193 100644
--- a/helm/defectdojo/Chart.lock
+++ b/helm/defectdojo/Chart.lock
@@ -4,15 +4,15 @@ dependencies:
version: 9.19.1
- name: postgresql
repository: https://charts.bitnami.com/bitnami
- version: 15.5.11
+ version: 15.5.14
- name: postgresql-ha
repository: https://charts.bitnami.com/bitnami
version: 9.4.11
- name: rabbitmq
repository: https://charts.bitnami.com/bitnami
- version: 14.4.4
+ version: 14.4.6
- name: redis
repository: https://charts.bitnami.com/bitnami
- version: 19.6.0
-digest: sha256:d00f56b5b3cf6525a4e06c82789ec7dd68526959ce38ea50e5251151535dcd8b
-generated: "2024-07-01T16:26:01.747085461Z"
+ version: 19.6.1
+digest: sha256:c694c252fd27dfbe2d365c55f6109cf656110ef64e0b11e1734905b988e6aa44
+generated: "2024-07-08T18:27:21.758754594Z"
From 66302c50881cc1702061bc65f45d040728f34ab8 Mon Sep 17 00:00:00 2001
From: Blake Owens <76979297+blakeaowens@users.noreply.github.com>
Date: Mon, 8 Jul 2024 13:33:23 -0500
Subject: [PATCH 034/111] Ruff fix
---
dojo/product/queries.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/dojo/product/queries.py b/dojo/product/queries.py
index 413835d69b..0f47cc2aeb 100644
--- a/dojo/product/queries.py
+++ b/dojo/product/queries.py
@@ -244,7 +244,7 @@ def get_authorized_dojo_meta(permission):
finding__test__engagement__product__prod_type__member=Exists(finding_authorized_product_type_roles),
finding__test__engagement__product__member=Exists(finding_authorized_product_roles),
finding__test__engagement__product__prod_type__authorized_group=Exists(finding_authorized_product_type_groups),
- finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups)
+ finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups),
).order_by("id")
dojo_meta = dojo_meta.filter(
Q(product__prod_type__member=True)
From 42ef652de8c3fa7a12e2d718fb0e8770aaacd6b4 Mon Sep 17 00:00:00 2001
From: Blake Owens <76979297+blakeaowens@users.noreply.github.com>
Date: Mon, 8 Jul 2024 13:55:40 -0500
Subject: [PATCH 035/111] Fix ruff
---
dojo/api_v2/views.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py
index 2cfd9dbb42..234cc4236d 100644
--- a/dojo/api_v2/views.py
+++ b/dojo/api_v2/views.py
@@ -3266,7 +3266,7 @@ class ConfigurationPermissionViewSet(
def filter_queryset(self, queryset):
return Permission.objects.filter(
- codename__in=get_configuration_permissions_codenames()
+ codename__in=get_configuration_permissions_codenames(),
).order_by("id")
From d7c6d9cfcddc6c246aa172e441764f0a7e7c36bf Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 8 Jul 2024 14:05:16 -0500
Subject: [PATCH 036/111] Bump django-debug-toolbar from 4.4.4 to 4.4.5
(#10527)
Bumps [django-debug-toolbar](https://github.com/jazzband/django-debug-toolbar) from 4.4.4 to 4.4.5.
- [Release notes](https://github.com/jazzband/django-debug-toolbar/releases)
- [Changelog](https://github.com/jazzband/django-debug-toolbar/blob/main/docs/changes.rst)
- [Commits](https://github.com/jazzband/django-debug-toolbar/compare/4.4.4...4.4.5)
---
updated-dependencies:
- dependency-name: django-debug-toolbar
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 9fb73a3a62..4f467490c4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -58,7 +58,7 @@ packageurl-python==0.15.2
django-crum==0.7.9
JSON-log-formatter==1.0
django-split-settings==1.3.2
-django-debug-toolbar==4.4.4
+django-debug-toolbar==4.4.5
django-debug-toolbar-request-history==0.1.4
vcrpy==6.0.1
vcrpy-unittest==0.1.7
From dda00cb8aaedb7296eaae2ddbc01dd5b05bc4e67 Mon Sep 17 00:00:00 2001
From: DefectDojo release bot
Date: Tue, 9 Jul 2024 17:23:31 +0000
Subject: [PATCH 037/111] Update versions in application files
---
components/package.json | 2 +-
dojo/__init__.py | 2 +-
helm/defectdojo/Chart.yaml | 4 ++--
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/components/package.json b/components/package.json
index 386a26a368..ab3201e6a4 100644
--- a/components/package.json
+++ b/components/package.json
@@ -1,6 +1,6 @@
{
"name": "defectdojo",
- "version": "2.36.2",
+ "version": "2.37.0-dev",
"license" : "BSD-3-Clause",
"private": true,
"dependencies": {
diff --git a/dojo/__init__.py b/dojo/__init__.py
index 86cf9498d0..707177ee3e 100644
--- a/dojo/__init__.py
+++ b/dojo/__init__.py
@@ -4,6 +4,6 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app # noqa: F401
-__version__ = '2.36.2'
+__version__ = '2.37.0-dev'
__url__ = 'https://github.com/DefectDojo/django-DefectDojo'
__docs__ = 'https://documentation.defectdojo.com'
diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml
index 502bc52246..b59fa2d35c 100644
--- a/helm/defectdojo/Chart.yaml
+++ b/helm/defectdojo/Chart.yaml
@@ -1,8 +1,8 @@
apiVersion: v2
-appVersion: "2.36.2"
+appVersion: "2.37.0-dev"
description: A Helm chart for Kubernetes to install DefectDojo
name: defectdojo
-version: 1.6.139
+version: 1.6.140-dev
icon: https://www.defectdojo.org/img/favicon.ico
maintainers:
- name: madchap
From bee37dee99c3fcdf0e2eeb8a0cdf6e0bc57f3a57 Mon Sep 17 00:00:00 2001
From: DefectDojo
Date: Tue, 9 Jul 2024 17:25:18 +0000
Subject: [PATCH 038/111] Update helm lock file
Signed-off-by: DefectDojo
---
helm/defectdojo/Chart.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/helm/defectdojo/Chart.lock b/helm/defectdojo/Chart.lock
index fc443fb193..c50b042bf8 100644
--- a/helm/defectdojo/Chart.lock
+++ b/helm/defectdojo/Chart.lock
@@ -4,7 +4,7 @@ dependencies:
version: 9.19.1
- name: postgresql
repository: https://charts.bitnami.com/bitnami
- version: 15.5.14
+ version: 15.5.15
- name: postgresql-ha
repository: https://charts.bitnami.com/bitnami
version: 9.4.11
@@ -14,5 +14,5 @@ dependencies:
- name: redis
repository: https://charts.bitnami.com/bitnami
version: 19.6.1
-digest: sha256:c694c252fd27dfbe2d365c55f6109cf656110ef64e0b11e1734905b988e6aa44
-generated: "2024-07-08T18:27:21.758754594Z"
+digest: sha256:933d2f3df74ce23fe4f3a73c3e581c6e0d847f3af8cd56130cdd740a06d4323f
+generated: "2024-07-09T17:25:08.104375211Z"
From 3c3a2751e5c58326ed2137bca24a7b4daddf4261 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 9 Jul 2024 13:41:26 -0500
Subject: [PATCH 039/111] Bump boto3 from 1.34.140 to 1.34.141 (#10542)
Bumps [boto3](https://github.com/boto/boto3) from 1.34.140 to 1.34.141.
- [Release notes](https://github.com/boto/boto3/releases)
- [Commits](https://github.com/boto/boto3/compare/1.34.140...1.34.141)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 4f467490c4..207f952f66 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -75,7 +75,7 @@ django-ratelimit==4.1.0
argon2-cffi==23.1.0
blackduck==1.1.3
pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support
-boto3==1.34.140 # Required for Celery Broker AWS (SQS) support
+boto3==1.34.141 # Required for Celery Broker AWS (SQS) support
netaddr==1.3.0
vulners==2.1.7
fontawesomefree==6.5.1
From 17a3673082c2118926e6d9df563fe0cd2948c9bb Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 9 Jul 2024 16:47:33 -0500
Subject: [PATCH 040/111] Bump packageurl-python from 0.15.2 to 0.15.3 (#10541)
Bumps [packageurl-python](https://github.com/package-url/packageurl-python) from 0.15.2 to 0.15.3.
- [Release notes](https://github.com/package-url/packageurl-python/releases)
- [Changelog](https://github.com/package-url/packageurl-python/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/package-url/packageurl-python/compare/v0.15.2...v0.15.3)
---
updated-dependencies:
- dependency-name: packageurl-python
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 207f952f66..6045467925 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -54,7 +54,7 @@ gitpython==3.1.43
debugpy==1.8.2
python-gitlab==4.7.0
cpe==1.2.1
-packageurl-python==0.15.2
+packageurl-python==0.15.3
django-crum==0.7.9
JSON-log-formatter==1.0
django-split-settings==1.3.2
From b7de5eb6c5fa2793a7b87e6843b4f4b7a4332fcc Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 10 Jul 2024 13:09:31 -0500
Subject: [PATCH 041/111] Bump boto3 from 1.34.141 to 1.34.142 (#10551)
Bumps [boto3](https://github.com/boto/boto3) from 1.34.141 to 1.34.142.
- [Release notes](https://github.com/boto/boto3/releases)
- [Commits](https://github.com/boto/boto3/compare/1.34.141...1.34.142)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 6045467925..e39a4f7743 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -75,7 +75,7 @@ django-ratelimit==4.1.0
argon2-cffi==23.1.0
blackduck==1.1.3
pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support
-boto3==1.34.141 # Required for Celery Broker AWS (SQS) support
+boto3==1.34.142 # Required for Celery Broker AWS (SQS) support
netaddr==1.3.0
vulners==2.1.7
fontawesomefree==6.5.1
From e5adc226d16524341d9f119c16084bc13c7484f9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 11 Jul 2024 11:24:32 -0500
Subject: [PATCH 042/111] Bump django-debug-toolbar from 4.4.5 to 4.4.6
(#10557)
Bumps [django-debug-toolbar](https://github.com/jazzband/django-debug-toolbar) from 4.4.5 to 4.4.6.
- [Release notes](https://github.com/jazzband/django-debug-toolbar/releases)
- [Changelog](https://github.com/jazzband/django-debug-toolbar/blob/main/docs/changes.rst)
- [Commits](https://github.com/jazzband/django-debug-toolbar/compare/4.4.5...4.4.6)
---
updated-dependencies:
- dependency-name: django-debug-toolbar
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index e39a4f7743..7fc4a0955c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -58,7 +58,7 @@ packageurl-python==0.15.3
django-crum==0.7.9
JSON-log-formatter==1.0
django-split-settings==1.3.2
-django-debug-toolbar==4.4.5
+django-debug-toolbar==4.4.6
django-debug-toolbar-request-history==0.1.4
vcrpy==6.0.1
vcrpy-unittest==0.1.7
From a6b8c86f5a19daca2a16e89717294092f3959567 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 11 Jul 2024 11:24:50 -0500
Subject: [PATCH 043/111] Bump boto3 from 1.34.142 to 1.34.143 (#10558)
Bumps [boto3](https://github.com/boto/boto3) from 1.34.142 to 1.34.143.
- [Release notes](https://github.com/boto/boto3/releases)
- [Commits](https://github.com/boto/boto3/compare/1.34.142...1.34.143)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 7fc4a0955c..8e2af13483 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -75,7 +75,7 @@ django-ratelimit==4.1.0
argon2-cffi==23.1.0
blackduck==1.1.3
pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support
-boto3==1.34.142 # Required for Celery Broker AWS (SQS) support
+boto3==1.34.143 # Required for Celery Broker AWS (SQS) support
netaddr==1.3.0
vulners==2.1.7
fontawesomefree==6.5.1
From 68ff68daa3ee6b9f4a0d010afe2fa4bf24c5f0f1 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 11 Jul 2024 12:12:28 -0500
Subject: [PATCH 044/111] Bump djangorestframework from 3.14.0 to 3.15.2
(#10431)
Bumps [djangorestframework](https://github.com/encode/django-rest-framework) from 3.14.0 to 3.15.2.
- [Release notes](https://github.com/encode/django-rest-framework/releases)
- [Commits](https://github.com/encode/django-rest-framework/compare/3.14.0...3.15.2)
---
updated-dependencies:
- dependency-name: djangorestframework
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 8e2af13483..ac70ee7233 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -25,7 +25,7 @@ git+https://github.com/DefectDojo/django-tagging@develop#egg=django-tagging
django-watson==1.6.3
django-prometheus==2.3.1
Django==4.2.13
-djangorestframework==3.14.0
+djangorestframework==3.15.2
html2text==2024.2.26
humanize==4.10.0
jira==3.8.0
From e2b47cdcf6bb35952ad7874cad795cff4d1e73c6 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 12 Jul 2024 16:42:44 -0500
Subject: [PATCH 045/111] Bump asteval from 1.0.0 to 1.0.1 (#10561)
Bumps [asteval](https://github.com/lmfit/asteval) from 1.0.0 to 1.0.1.
- [Release notes](https://github.com/lmfit/asteval/releases)
- [Commits](https://github.com/lmfit/asteval/compare/1.0.0...1.0.1)
---
updated-dependencies:
- dependency-name: asteval
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index ac70ee7233..7e6a0a71da 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
# requirements.txt for DefectDojo using Python 3.x
-asteval==1.0.0
+asteval==1.0.1
bleach==6.1.0
bleach[css]
celery==5.4.0
From 3462db01d174599b24dca3be59b298095d59358f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 12 Jul 2024 16:43:01 -0500
Subject: [PATCH 046/111] Bump coverage from 7.5.4 to 7.6.0 (#10560)
Bumps [coverage](https://github.com/nedbat/coveragepy) from 7.5.4 to 7.6.0.
- [Release notes](https://github.com/nedbat/coveragepy/releases)
- [Changelog](https://github.com/nedbat/coveragepy/blob/master/CHANGES.rst)
- [Commits](https://github.com/nedbat/coveragepy/compare/7.5.4...7.6.0)
---
updated-dependencies:
- dependency-name: coverage
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 7e6a0a71da..1a6cc872e9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@ asteval==1.0.1
bleach==6.1.0
bleach[css]
celery==5.4.0
-coverage==7.5.4
+coverage==7.6.0
defusedxml==0.7.1
django_celery_results==2.5.1
django-auditlog==2.3.0
From 5730df25d2bb31308ac04ce87a4ca08fb09ff6db Mon Sep 17 00:00:00 2001
From: Marius <49275246+gietschess@users.noreply.github.com>
Date: Fri, 12 Jul 2024 23:51:56 +0200
Subject: [PATCH 047/111] fix(deps): build python psycopg3 dependency instead
of use the pre-build binary (#10491)
Signed-off-by: gietschess <49275246+gietschess@users.noreply.github.com>
---
Dockerfile.django-alpine | 6 ++++--
Dockerfile.nginx-alpine | 4 +++-
requirements.txt | 2 +-
3 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/Dockerfile.django-alpine b/Dockerfile.django-alpine
index 7d712ad626..5fb9dc8e77 100644
--- a/Dockerfile.django-alpine
+++ b/Dockerfile.django-alpine
@@ -16,13 +16,15 @@ RUN \
bind-tools \
mysql-client \
mariadb-dev \
- postgresql14-client \
+ postgresql16-client \
xmlsec \
git \
util-linux \
curl-dev \
openssl \
libffi-dev \
+ python3-dev \
+ libpq-dev \
&& \
rm -rf /var/cache/apk/* && \
true
@@ -49,7 +51,7 @@ RUN \
xmlsec \
git \
util-linux \
- postgresql14-client \
+ postgresql16-client \
curl-dev \
openssl \
# needed for integration-tests
diff --git a/Dockerfile.nginx-alpine b/Dockerfile.nginx-alpine
index b9a55ac415..bd4caaa206 100644
--- a/Dockerfile.nginx-alpine
+++ b/Dockerfile.nginx-alpine
@@ -16,13 +16,15 @@ RUN \
bind-tools \
mysql-client \
mariadb-dev \
- postgresql14-client \
+ postgresql16-client \
xmlsec \
git \
util-linux \
curl-dev \
openssl \
libffi-dev \
+ python3-dev \
+ libpq-dev \
&& \
rm -rf /var/cache/apk/* && \
true
diff --git a/requirements.txt b/requirements.txt
index 1a6cc872e9..dcb4e7e6cd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -35,7 +35,7 @@ Markdown==3.6
mysqlclient==2.1.1
openpyxl==3.1.5
Pillow==10.4.0 # required by django-imagekit
-psycopg[binary]==3.2.1
+psycopg[c]==3.2.1
cryptography==42.0.8
python-dateutil==2.9.0.post0
pytz==2024.1
From cdbb35a6a8f9b5d3593b4e40d6ae941e20eb4572 Mon Sep 17 00:00:00 2001
From: kiblik <5609770+kiblik@users.noreply.github.com>
Date: Fri, 12 Jul 2024 23:52:42 +0200
Subject: [PATCH 048/111] Fix(django): Upgrade of 4.2 (#10553)
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index dcb4e7e6cd..11df8529d2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -24,7 +24,7 @@ django-slack==5.19.0
git+https://github.com/DefectDojo/django-tagging@develop#egg=django-tagging
django-watson==1.6.3
django-prometheus==2.3.1
-Django==4.2.13
+Django==4.2.14
djangorestframework==3.15.2
html2text==2024.2.26
humanize==4.10.0
From df280a692a8d2317a812ca60be6c5658d9e88e41 Mon Sep 17 00:00:00 2001
From: kiblik <5609770+kiblik@users.noreply.github.com>
Date: Sat, 13 Jul 2024 00:18:04 +0200
Subject: [PATCH 049/111] Ruff: Add and fix Q000 (#10095)
* Ruff: add Q000
* Ruff: fix Q000
* Adopt update script as wellAdopt update script as well
* Update .settings.dist.py.sha256sum
---
.github/workflows/release-1-create-pr.yml | 6 +-
.../workflows/release-3-master-into-dev.yml | 6 +-
dojo/__init__.py | 6 +-
dojo/admin.py | 4 +-
dojo/api_v2/prefetch/schema.py | 8 +-
dojo/api_v2/serializers.py | 24 +-
dojo/api_v2/views.py | 6 +-
dojo/apps.py | 22 +-
dojo/celery.py | 8 +-
dojo/components/views.py | 2 +-
dojo/cred/queries.py | 8 +-
dojo/cred/urls.py | 44 +-
dojo/cred/views.py | 368 ++--
dojo/decorators.py | 48 +-
dojo/development_environment/urls.py | 10 +-
dojo/development_environment/views.py | 72 +-
dojo/endpoint/queries.py | 16 +-
dojo/endpoint/signals.py | 12 +-
dojo/endpoint/urls.py | 68 +-
dojo/endpoint/utils.py | 148 +-
dojo/endpoint/views.py | 228 +--
dojo/engagement/queries.py | 8 +-
dojo/engagement/services.py | 4 +-
dojo/engagement/signals.py | 30 +-
dojo/engagement/urls.py | 100 +-
dojo/engagement/views.py | 638 +++---
dojo/filters.py | 1722 ++++++++--------
dojo/finding/helper.py | 104 +-
dojo/finding/queries.py | 16 +-
dojo/finding/urls.py | 216 +-
dojo/finding/views.py | 16 +-
dojo/finding_group/queries.py | 8 +-
dojo/finding_group/signals.py | 12 +-
dojo/finding_group/urls.py | 8 +-
dojo/finding_group/views.py | 112 +-
dojo/forms.py | 1744 ++++++++---------
dojo/github.py | 42 +-
dojo/github_issue_link/urls.py | 10 +-
dojo/github_issue_link/views.py | 54 +-
dojo/group/queries.py | 20 +-
dojo/group/urls.py | 22 +-
dojo/group/utils.py | 16 +-
dojo/group/views.py | 202 +-
dojo/home/urls.py | 6 +-
dojo/home/views.py | 56 +-
dojo/importers/auto_create_context.py | 4 +-
dojo/importers/base_importer.py | 34 +-
dojo/importers/default_importer.py | 30 +-
dojo/importers/default_reimporter.py | 28 +-
dojo/importers/endpoint_manager.py | 2 +-
dojo/jira_link/helper.py | 396 ++--
dojo/jira_link/queries.py | 40 +-
dojo/jira_link/urls.py | 18 +-
dojo/jira_link/views.py | 242 +--
dojo/management/commands/clear_alerts.py | 14 +-
.../commands/csv_findings_export.py | 12 +-
dojo/management/commands/dedupe.py | 40 +-
dojo/management/commands/dupecheck.py | 16 +-
.../management/commands/endpoint_migration.py | 10 +-
dojo/management/commands/fix_0120.py | 12 +-
.../commands/fix_broken_endpoint_status.py | 2 +-
.../commands/fix_loop_duplicates.py | 2 +-
.../commands/import_github_languages.py | 14 +-
dojo/management/commands/import_surveys.py | 10 +-
.../commands/initialize_permissions.py | 16 +-
.../commands/initialize_test_types.py | 4 +-
.../management/commands/jira_async_updates.py | 8 +-
.../commands/jira_refactor_data_migration.py | 34 +-
.../commands/jira_status_reconciliation.py | 110 +-
.../commands/migrate_authorization_v2.py | 2 +-
.../commands/migrate_staff_users.py | 70 +-
dojo/management/commands/migrate_surveys.py | 48 +-
.../commands/migrate_textquestions.py | 6 +-
dojo/management/commands/print_settings.py | 4 +-
.../commands/push_to_jira_update.py | 4 +-
.../commands/rename_mend_findings.py | 14 +-
.../risk_acceptance_handle_expiration.py | 4 +-
dojo/management/commands/sla_notifications.py | 2 +-
.../commands/stamp_finding_last_reviewed.py | 20 +-
dojo/management/commands/system_settings.py | 2 +-
.../commands/test_celery_decorator.py | 6 +-
dojo/metrics/urls.py | 40 +-
dojo/metrics/utils.py | 196 +-
dojo/metrics/views.py | 474 ++---
dojo/middleware.py | 30 +-
dojo/models.py | 1658 ++++++++--------
dojo/note_type/urls.py | 20 +-
dojo/note_type/views.py | 88 +-
dojo/notes/urls.py | 6 +-
dojo/notes/views.py | 46 +-
dojo/notifications/helper.py | 202 +-
dojo/notifications/urls.py | 8 +-
dojo/notifications/views.py | 12 +-
dojo/object/urls.py | 10 +-
dojo/object/views.py | 66 +-
dojo/okta.py | 44 +-
dojo/pipeline.py | 86 +-
dojo/product/queries.py | 98 +-
dojo/product/signals.py | 16 +-
dojo/product/urls.py | 128 +-
dojo/product/views.py | 1256 ++++++------
dojo/product_type/queries.py | 32 +-
dojo/product_type/signals.py | 16 +-
dojo/product_type/urls.py | 46 +-
dojo/product_type/views.py | 270 +--
dojo/regulations/urls.py | 8 +-
dojo/regulations/views.py | 44 +-
dojo/remote_user.py | 30 +-
dojo/reports/urls.py | 64 +-
dojo/reports/views.py | 726 +++----
dojo/reports/widgets.py | 164 +-
dojo/risk_acceptance/api.py | 18 +-
dojo/risk_acceptance/helper.py | 78 +-
dojo/risk_acceptance/queries.py | 8 +-
dojo/search/urls.py | 4 +-
dojo/search/views.py | 256 +--
dojo/settings/.settings.dist.py.sha256sum | 2 +-
.../attribute-maps/django_saml_uri.py | 28 +-
dojo/settings/attribute-maps/saml_uri.py | 454 ++---
dojo/settings/settings.dist.py | 1736 ++++++++--------
dojo/settings/settings.py | 10 +-
dojo/settings/unittest.py | 6 +-
dojo/sla_config/urls.py | 6 +-
dojo/sla_config/views.py | 56 +-
dojo/survey/urls.py | 80 +-
dojo/survey/views.py | 444 ++---
dojo/system_settings/urls.py | 4 +-
dojo/system_settings/views.py | 32 +-
dojo/tags_signals.py | 2 +-
dojo/tasks.py | 52 +-
dojo/templatetags/announcement_banner_tags.py | 4 +-
dojo/templatetags/dict_key.py | 2 +-
dojo/templatetags/display_tags.py | 294 +--
dojo/templatetags/event_tags.py | 10 +-
dojo/templatetags/get_attribute.py | 2 +-
dojo/templatetags/get_banner.py | 6 +-
dojo/templatetags/get_endpoint_status.py | 8 +-
dojo/templatetags/get_note_status.py | 2 +-
.../templatetags/get_notetype_availability.py | 2 +-
dojo/templatetags/navigation_tags.py | 58 +-
dojo/templatetags/survey_tags.py | 10 +-
dojo/test/queries.py | 16 +-
dojo/test/signals.py | 12 +-
dojo/test/urls.py | 36 +-
dojo/test/views.py | 370 ++--
dojo/test_type/urls.py | 10 +-
dojo/test_type/views.py | 60 +-
dojo/tool_config/factory.py | 12 +-
dojo/tool_config/urls.py | 8 +-
dojo/tool_config/views.py | 60 +-
dojo/tool_product/queries.py | 8 +-
dojo/tool_product/urls.py | 10 +-
dojo/tool_product/views.py | 66 +-
dojo/tool_type/urls.py | 8 +-
dojo/tool_type/views.py | 34 +-
dojo/tools/acunetix/parser.py | 4 +-
dojo/tools/anchorectl_policies/parser.py | 2 +-
dojo/tools/aws_prowler_v3plus/parser.py | 6 +-
dojo/tools/aws_prowler_v3plus/prowler_v3.py | 46 +-
dojo/tools/aws_prowler_v3plus/prowler_v4.py | 44 +-
dojo/tools/awssecurityhub/compliance.py | 2 +-
dojo/tools/awssecurityhub/guardduty.py | 4 +-
dojo/tools/awssecurityhub/parser.py | 4 +-
.../blackduck_binary_analysis/importer.py | 2 +-
.../tools/blackduck_binary_analysis/parser.py | 2 +-
dojo/tools/burp_api/parser.py | 2 +-
dojo/tools/checkmarx_one/parser.py | 2 +-
dojo/tools/chefinspect/parser.py | 26 +-
dojo/tools/clair/parser.py | 4 +-
dojo/tools/crunch42/parser.py | 2 +-
dojo/tools/dependency_track/parser.py | 78 +-
dojo/tools/factory.py | 14 +-
dojo/tools/fortify/fpr_parser.py | 6 +-
dojo/tools/fortify/parser.py | 4 +-
dojo/tools/gitlab_sast/parser.py | 98 +-
dojo/tools/govulncheck/parser.py | 22 +-
dojo/tools/harbor_vulnerability/parser.py | 2 +-
dojo/tools/hcl_appscan/parser.py | 38 +-
dojo/tools/humble/parser.py | 10 +-
.../parser.py | 4 +-
dojo/tools/kubeaudit/parser.py | 26 +-
dojo/tools/kubehunter/parser.py | 34 +-
dojo/tools/kubescape/parser.py | 6 +-
dojo/tools/microfocus_webinspect/parser.py | 4 +-
dojo/tools/mobsf/__init__.py | 2 +-
dojo/tools/mobsf/parser.py | 4 +-
dojo/tools/ms_defender/parser.py | 128 +-
dojo/tools/nancy/parser.py | 30 +-
dojo/tools/nikto/json_parser.py | 2 +-
dojo/tools/noseyparker/parser.py | 16 +-
dojo/tools/nuclei/parser.py | 2 +-
dojo/tools/openvas/csv_parser.py | 2 +-
dojo/tools/openvas/parser.py | 4 +-
dojo/tools/redhatsatellite/parser.py | 2 +-
dojo/tools/sarif/parser.py | 18 +-
dojo/tools/sonarqube/parser.py | 4 +-
.../tools/sonarqube/sonarqube_restapi_json.py | 14 +-
dojo/tools/sonarqube/sonarqube_restapi_zip.py | 2 +-
dojo/tools/ssh_audit/parser.py | 94 +-
dojo/tools/sysdig_reports/parser.py | 62 +-
.../tools/sysdig_reports/sysdig_csv_parser.py | 68 +-
dojo/tools/tenable/csv_format.py | 8 +-
dojo/tools/trivy/parser.py | 2 +-
dojo/tools/utils.py | 6 +-
dojo/tools/wazuh/parser.py | 2 +-
dojo/tools/yarn_audit/parser.py | 8 +-
dojo/urls.py | 162 +-
dojo/user/queries.py | 22 +-
dojo/user/urls.py | 68 +-
dojo/user/utils.py | 86 +-
dojo/user/validators.py | 42 +-
dojo/user/views.py | 380 ++--
dojo/utils.py | 824 ++++----
dojo/views.py | 50 +-
dojo/widgets.py | 20 +-
dojo/wsgi.py | 2 +-
ruff.toml | 2 +-
tests/Import_scanner_test.py | 162 +-
tests/announcement_banner_test.py | 122 +-
tests/check_various_pages.py | 6 +-
tests/close_old_findings_dedupe_test.py | 88 +-
tests/close_old_findings_test.py | 66 +-
tests/dedupe_test.py | 192 +-
tests/endpoint_test.py | 20 +-
tests/engagement_test.py | 50 +-
tests/environment_test.py | 20 +-
tests/false_positive_history_test.py | 66 +-
tests/file_test.py | 44 +-
tests/finding_test.py | 146 +-
tests/group_test.py | 46 +-
tests/ibm_appscan_test.py | 16 +-
tests/note_type_test.py | 20 +-
tests/notes_test.py | 36 +-
tests/notifications_test.py | 50 +-
tests/product_group_test.py | 48 +-
tests/product_member_test.py | 96 +-
tests/product_test.py | 112 +-
tests/product_type_group_test.py | 44 +-
tests/product_type_member_test.py | 92 +-
tests/product_type_test.py | 24 +-
tests/regulations_test.py | 20 +-
tests/report_builder_test.py | 64 +-
tests/search_test.py | 50 +-
tests/sla_configuration_test.py | 24 +-
tests/test_test.py | 50 +-
tests/tool_config.py | 24 +-
tests/user_test.py | 54 +-
tests/zap.py | 28 +-
unittests/authorization/test_authorization.py | 86 +-
.../test_authorization_decorators.py | 28 +-
.../authorization/test_authorization_tags.py | 42 +-
unittests/dojo_test_case.py | 320 +--
unittests/test_adminsite.py | 4 +-
unittests/test_api_sonarqube_updater.py | 50 +-
unittests/test_apiv2_endpoint.py | 52 +-
unittests/test_apiv2_limit_reqresp.py | 16 +-
unittests/test_apiv2_metadata.py | 70 +-
unittests/test_apiv2_methods_and_endpoints.py | 38 +-
unittests/test_apiv2_notifications.py | 44 +-
unittests/test_apiv2_scan_import_options.py | 26 +-
unittests/test_apiv2_user.py | 56 +-
unittests/test_apply_finding_template.py | 142 +-
unittests/test_bulk_risk_acceptance_api.py | 50 +-
unittests/test_cleanup_alerts.py | 10 +-
unittests/test_copy_model.py | 150 +-
unittests/test_dashboard.py | 108 +-
unittests/test_deduplication_logic.py | 142 +-
unittests/test_duplication_loops.py | 26 +-
unittests/test_endpoint_meta_import.py | 46 +-
unittests/test_endpoint_model.py | 148 +-
.../test_false_positive_history_logic.py | 52 +-
unittests/test_finding_helper.py | 68 +-
unittests/test_finding_model.py | 196 +-
unittests/test_flush_auditlog.py | 2 +-
unittests/test_import_reimport.py | 820 ++++----
unittests/test_importers_importer.py | 278 +--
unittests/test_jira_config_engagement.py | 184 +-
unittests/test_jira_config_engagement_epic.py | 36 +-
unittests/test_jira_config_product.py | 92 +-
unittests/test_jira_import_and_pushing_api.py | 206 +-
unittests/test_jira_template.py | 10 +-
unittests/test_jira_webhook.py | 70 +-
unittests/test_metrics_queries.py | 186 +-
unittests/test_migrations.py | 56 +-
unittests/test_notifications.py | 264 +--
unittests/test_parsers.py | 52 +-
unittests/test_remote_user.py | 40 +-
unittests/test_rest_framework.py | 1284 ++++++------
unittests/test_risk_acceptance.py | 88 +-
unittests/test_sample_data.py | 2 +-
unittests/test_tags.py | 144 +-
unittests/test_tool_config.py | 62 +-
unittests/test_user_queries.py | 46 +-
unittests/test_user_validators.py | 104 +-
unittests/test_utils.py | 38 +-
unittests/test_utils_deduplication_reopen.py | 24 +-
unittests/tools/test_acunetix_parser.py | 82 +-
unittests/tools/test_anchore_engine_parser.py | 4 +-
unittests/tools/test_anchore_grype_parser.py | 96 +-
.../tools/test_anchorectl_policies_parser.py | 6 +-
.../tools/test_anchorectl_vulns_parser.py | 6 +-
unittests/tools/test_api_bugcrowd_parser.py | 6 +-
unittests/tools/test_api_cobalt_importer.py | 34 +-
unittests/tools/test_api_cobalt_parser.py | 12 +-
unittests/tools/test_api_edgescan_importer.py | 26 +-
.../tools/test_api_sonarqube_importer.py | 392 ++--
unittests/tools/test_api_sonarqube_parser.py | 10 +-
unittests/tools/test_aqua_parser.py | 38 +-
unittests/tools/test_arachni_parser.py | 18 +-
unittests/tools/test_aws_prowler_parser.py | 34 +-
.../tools/test_aws_prowler_v3plus_parser.py | 20 +-
unittests/tools/test_awssecurityhub_parser.py | 8 +-
..._security_center_recommendations_parser.py | 40 +-
unittests/tools/test_burp_graphql_parser.py | 14 +-
unittests/tools/test_checkmarx_osa_parser.py | 2 +-
unittests/tools/test_checkmarx_parser.py | 102 +-
unittests/tools/test_checkov_parser.py | 36 +-
unittests/tools/test_codechecker_parser.py | 10 +-
unittests/tools/test_contrast_parser.py | 18 +-
.../tools/test_crashtest_security_parser.py | 2 +-
unittests/tools/test_cyclonedx_parser.py | 26 +-
.../tools/test_dependency_check_parser.py | 12 +-
.../tools/test_dependency_track_parser.py | 12 +-
unittests/tools/test_dockerbench_parser.py | 8 +-
unittests/tools/test_fortify_parser.py | 12 +-
unittests/tools/test_generic_parser.py | 30 +-
.../tools/test_harbor_vulnerability_parser.py | 22 +-
unittests/tools/test_horusec_parser.py | 4 +-
unittests/tools/test_ibm_app_parser.py | 6 +-
unittests/tools/test_intsights_parser.py | 6 +-
...jfrog_xray_on_demand_binary_scan_parser.py | 2 +-
.../tools/test_jfrog_xray_unified_parser.py | 2 +-
unittests/tools/test_kics_parser.py | 4 +-
unittests/tools/test_kubebench_parser.py | 4 +-
unittests/tools/test_kubehunter_parser.py | 2 +-
.../test_microfocus_webinspect_parser.py | 4 +-
unittests/tools/test_mobsf_parser.py | 50 +-
unittests/tools/test_nancy_parser.py | 2 +-
unittests/tools/test_nexpose_parser.py | 34 +-
unittests/tools/test_nmap_parser.py | 16 +-
unittests/tools/test_npm_audit_parser.py | 4 +-
unittests/tools/test_nuclei_parser.py | 4 +-
unittests/tools/test_pip_audit_parser.py | 50 +-
unittests/tools/test_popeye_parser.py | 2 +-
.../test_qualys_infrascan_webgui_parser.py | 2 +-
unittests/tools/test_qualys_webapp_parser.py | 2 +-
unittests/tools/test_scout_suite_parser.py | 16 +-
unittests/tools/test_semgrep_parser.py | 2 +-
unittests/tools/test_snyk_parser.py | 8 +-
unittests/tools/test_sonarqube_parser.py | 24 +-
unittests/tools/test_sslyze_parser.py | 36 +-
unittests/tools/test_stackhawk_parser.py | 8 +-
unittests/tools/test_tenable_parser.py | 42 +-
unittests/tools/test_trivy_parser.py | 64 +-
unittests/tools/test_trufflehog3_parser.py | 18 +-
unittests/tools/test_trufflehog_parser.py | 6 +-
unittests/tools/test_veracode_parser.py | 212 +-
357 files changed, 15088 insertions(+), 15088 deletions(-)
diff --git a/.github/workflows/release-1-create-pr.yml b/.github/workflows/release-1-create-pr.yml
index 0b7bf2c92c..c93b1d0ee6 100644
--- a/.github/workflows/release-1-create-pr.yml
+++ b/.github/workflows/release-1-create-pr.yml
@@ -51,9 +51,9 @@ jobs:
- name: Update version numbers in key files
run: |
- sed -ri "s/__version__ = '.*'/__version__ = '${{ github.event.inputs.release_number }}'/" dojo/__init__.py
- sed -ri "s/\"version\": \".*\"/\"version\": \"${{ github.event.inputs.release_number }}\"/" components/package.json
- sed -ri "s/appVersion: \".*\"/appVersion: \"${{ github.event.inputs.release_number }}\"/" helm/defectdojo/Chart.yaml
+ sed -ri 's/__version__ = ".*"/__version__ = "${{ github.event.inputs.release_number }}"/' dojo/__init__.py
+ sed -ri 's/"version": ".*"/"version": "${{ github.event.inputs.release_number }}"/' components/package.json
+ sed -ri 's/appVersion: ".*"/appVersion: "${{ github.event.inputs.release_number }}"/' helm/defectdojo/Chart.yaml
if grep "\-dev" helm/defectdojo/Chart.yaml; then
echo "x.y.z-dev found in Chart.yaml, probably releasing a new minor version"
diff --git a/.github/workflows/release-3-master-into-dev.yml b/.github/workflows/release-3-master-into-dev.yml
index 2da1dc0dd9..b5c8828ee1 100644
--- a/.github/workflows/release-3-master-into-dev.yml
+++ b/.github/workflows/release-3-master-into-dev.yml
@@ -44,9 +44,9 @@ jobs:
- name: Update version numbers in key files
run: |
- sed -ri "s/__version__ = '.*'/__version__ = '${{ github.event.inputs.release_number_dev }}'/" dojo/__init__.py
- sed -ri "s/appVersion: \".*\"/appVersion: \"${{ github.event.inputs.release_number_dev }}\"/" helm/defectdojo/Chart.yaml
- sed -ri "s/\"version\": \".*\"/\"version\": \"${{ github.event.inputs.release_number_dev }}\"/" components/package.json
+ sed -ri 's/__version__ = ".*"/__version__ = "${{ github.event.inputs.release_number_dev }}"/' dojo/__init__.py
+ sed -ri 's/"version": ".*"/"version": "${{ github.event.inputs.release_number_dev }}"/' components/package.json
+ sed -ri 's/appVersion: ".*"/appVersion: "${{ github.event.inputs.release_number_dev }}"/' helm/defectdojo/Chart.yaml
CURRENT_CHART_VERSION=$(grep -oP 'version: (\K\S*)?' helm/defectdojo/Chart.yaml | head -1)
sed -ri "0,/version/s/version: \S+/$(echo "version: $CURRENT_CHART_VERSION" | awk -F. -v OFS=. 'NF==1{print ++$NF}; NF>1{$NF=sprintf("%0*d", length($NF), ($NF+1)); print}')-dev/" helm/defectdojo/Chart.yaml
diff --git a/dojo/__init__.py b/dojo/__init__.py
index 707177ee3e..a31f5294e2 100644
--- a/dojo/__init__.py
+++ b/dojo/__init__.py
@@ -4,6 +4,6 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app # noqa: F401
-__version__ = '2.37.0-dev'
-__url__ = 'https://github.com/DefectDojo/django-DefectDojo'
-__docs__ = 'https://documentation.defectdojo.com'
+__version__ = "2.37.0-dev"
+__url__ = "https://github.com/DefectDojo/django-DefectDojo"
+__docs__ = "https://documentation.defectdojo.com"
diff --git a/dojo/admin.py b/dojo/admin.py
index 87823ff4d0..a2452ce1e5 100644
--- a/dojo/admin.py
+++ b/dojo/admin.py
@@ -85,8 +85,8 @@ class AnswerParentAdmin(PolymorphicParentModelAdmin):
"""
list_display = (
- 'answered_survey',
- 'question',
+ "answered_survey",
+ "question",
)
base_model = Answer
diff --git a/dojo/api_v2/prefetch/schema.py b/dojo/api_v2/prefetch/schema.py
index 030a572a15..1a50f6ba7d 100644
--- a/dojo/api_v2/prefetch/schema.py
+++ b/dojo/api_v2/prefetch/schema.py
@@ -26,14 +26,14 @@ def get_serializer_ref_name(serializer):
:return: Serializer's ``ref_name`` or ``None`` for inline serializer
:rtype: str or None
"""
- serializer_meta = getattr(serializer, 'Meta', None)
+ serializer_meta = getattr(serializer, "Meta", None)
serializer_name = type(serializer).__name__
- if hasattr(serializer_meta, 'ref_name'):
+ if hasattr(serializer_meta, "ref_name"):
ref_name = serializer_meta.ref_name
else:
ref_name = serializer_name
- if ref_name.endswith('Serializer'):
- ref_name = ref_name[:-len('Serializer')]
+ if ref_name.endswith("Serializer"):
+ ref_name = ref_name[:-len("Serializer")]
return ref_name
diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py
index f6ece66da7..bafdd319bb 100644
--- a/dojo/api_v2/serializers.py
+++ b/dojo/api_v2/serializers.py
@@ -200,7 +200,7 @@ def __init__(self, **kwargs):
self.pretty_print = pretty_print
def to_internal_value(self, data):
- if isinstance(data, list) and data == [''] and self.allow_empty:
+ if isinstance(data, list) and data == [""] and self.allow_empty:
return []
if isinstance(data, six.string_types):
if not data:
@@ -1100,7 +1100,7 @@ def validate(self, data):
name = data.get("name")
# Make sure this will not create a duplicate test type
if Tool_Type.objects.filter(name=name).count() > 0:
- msg = 'A Tool Type with the name already exists'
+ msg = "A Tool Type with the name already exists"
raise serializers.ValidationError(msg)
return data
@@ -1512,12 +1512,12 @@ def get_engagement(self, obj):
def validate(self, data):
def validate_findings_have_same_engagement(finding_objects: List[Finding]):
- engagements = finding_objects.values_list('test__engagement__id', flat=True).distinct().count()
+ engagements = finding_objects.values_list("test__engagement__id", flat=True).distinct().count()
if engagements > 1:
msg = "You are not permitted to add findings from multiple engagements"
raise PermissionDenied(msg)
- findings = data.get('accepted_findings', [])
+ findings = data.get("accepted_findings", [])
findings_ids = [x.id for x in findings]
finding_objects = Finding.objects.filter(id__in=findings_ids)
authed_findings = get_authorized_findings(Permissions.Finding_Edit).filter(id__in=findings_ids)
@@ -1526,7 +1526,7 @@ def validate_findings_have_same_engagement(finding_objects: List[Finding]):
raise PermissionDenied(msg)
if self.context["request"].method == "POST":
validate_findings_have_same_engagement(finding_objects)
- elif self.context['request'].method in ['PATCH', 'PUT']:
+ elif self.context["request"].method in ["PATCH", "PUT"]:
existing_findings = Finding.objects.filter(risk_acceptance=self.instance.id)
existing_and_new_findings = existing_findings | finding_objects
validate_findings_have_same_engagement(existing_and_new_findings)
@@ -2024,12 +2024,12 @@ class Meta:
)
def validate(self, data):
- async_updating = getattr(self.instance, 'async_updating', None)
+ async_updating = getattr(self.instance, "async_updating", None)
if async_updating:
- new_sla_config = data.get('sla_configuration', None)
- old_sla_config = getattr(self.instance, 'sla_configuration', None)
+ new_sla_config = data.get("sla_configuration", None)
+ old_sla_config = getattr(self.instance, "sla_configuration", None)
if new_sla_config and old_sla_config and new_sla_config != old_sla_config:
- msg = 'Finding SLA expiration dates are currently being recalculated. The SLA configuration for this product cannot be changed until the calculation is complete.'
+ msg = "Finding SLA expiration dates are currently being recalculated. The SLA configuration for this product cannot be changed until the calculation is complete."
raise serializers.ValidationError(msg)
return data
@@ -3002,13 +3002,13 @@ class Meta:
)
def validate(self, data):
- async_updating = getattr(self.instance, 'async_updating', None)
+ async_updating = getattr(self.instance, "async_updating", None)
if async_updating:
- for field in ['critical', 'enforce_critical', 'high', 'enforce_high', 'medium', 'enforce_medium', 'low', 'enforce_low']:
+ for field in ["critical", "enforce_critical", "high", "enforce_high", "medium", "enforce_medium", "low", "enforce_low"]:
old_days = getattr(self.instance, field, None)
new_days = data.get(field, None)
if old_days is not None and new_days is not None and (old_days != new_days):
- msg = 'Finding SLA expiration dates are currently being calculated. The SLA days for this SLA configuration cannot be changed until the calculation is complete.'
+ msg = "Finding SLA expiration dates are currently being calculated. The SLA days for this SLA configuration cannot be changed until the calculation is complete."
raise serializers.ValidationError(msg)
return data
diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py
index ce0d4e2c24..0f78a071eb 100644
--- a/dojo/api_v2/views.py
+++ b/dojo/api_v2/views.py
@@ -173,9 +173,9 @@
class DojoOpenApiJsonRenderer(OpenApiJsonRenderer2):
def get_indent(self, accepted_media_type, renderer_context):
- if accepted_media_type and 'indent' in accepted_media_type:
+ if accepted_media_type and "indent" in accepted_media_type:
return super().get_indent(accepted_media_type, renderer_context)
- return renderer_context.get('indent', None)
+ return renderer_context.get("indent", None)
class DojoSpectacularAPIView(SpectacularAPIView):
@@ -206,7 +206,7 @@ class RoleViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (IsAuthenticated,)
def get_queryset(self):
- return Role.objects.all().order_by('id')
+ return Role.objects.all().order_by("id")
# Authorization: object-based
diff --git a/dojo/apps.py b/dojo/apps.py
index 9b3f786408..2411e7e725 100644
--- a/dojo/apps.py
+++ b/dojo/apps.py
@@ -11,7 +11,7 @@
class DojoAppConfig(AppConfig):
- name = 'dojo'
+ name = "dojo"
verbose_name = "Defect Dojo"
def ready(self):
@@ -25,12 +25,12 @@ def ready(self):
# charfields/textfields are the fields that watson indexes by default (but we have to repeat here if we add extra fields)
# and watson likes to have tuples instead of lists
- watson.register(self.get_model('Product'), fields=get_model_fields_with_extra(self.get_model('Product'), ('id', 'prod_type__name')), store=('prod_type__name', ))
+ watson.register(self.get_model("Product"), fields=get_model_fields_with_extra(self.get_model("Product"), ("id", "prod_type__name")), store=("prod_type__name", ))
- watson.register(self.get_model('Test'), fields=get_model_fields_with_extra(self.get_model('Test'), ('id', 'engagement__product__name')), store=('engagement__product__name', )) # test_type__name?
+ watson.register(self.get_model("Test"), fields=get_model_fields_with_extra(self.get_model("Test"), ("id", "engagement__product__name")), store=("engagement__product__name", )) # test_type__name?
- watson.register(self.get_model('Finding'), fields=get_model_fields_with_extra(self.get_model('Finding'), ('id', 'url', 'unique_id_from_tool', 'test__engagement__product__name', 'jira_issue__jira_key')),
- store=('status', 'jira_issue__jira_key', 'test__engagement__product__name', 'severity', 'severity_display', 'latest_note'))
+ watson.register(self.get_model("Finding"), fields=get_model_fields_with_extra(self.get_model("Finding"), ("id", "url", "unique_id_from_tool", "test__engagement__product__name", "jira_issue__jira_key")),
+ store=("status", "jira_issue__jira_key", "test__engagement__product__name", "severity", "severity_display", "latest_note"))
# some thoughts on Finding fields that are not indexed yet:
# CWE can't be indexed as it is an integer
@@ -58,16 +58,16 @@ def ready(self):
# help_text="Source line number of the attack vector")
# sast_source_file_path = models.CharField(null=True, blank=True, max_length=4000, help_text="Source filepath of the attack vector")
- watson.register(self.get_model('Finding_Template'))
- watson.register(self.get_model('Endpoint'), store=('product__name', )) # add product name also?
- watson.register(self.get_model('Engagement'), fields=get_model_fields_with_extra(self.get_model('Engagement'), ('id', 'product__name')), store=('product__name', ))
- watson.register(self.get_model('App_Analysis'))
- watson.register(self.get_model('Vulnerability_Id'), store=('finding__test__engagement__product__name', ))
+ watson.register(self.get_model("Finding_Template"))
+ watson.register(self.get_model("Endpoint"), store=("product__name", )) # add product name also?
+ watson.register(self.get_model("Engagement"), fields=get_model_fields_with_extra(self.get_model("Engagement"), ("id", "product__name")), store=("product__name", ))
+ watson.register(self.get_model("App_Analysis"))
+ watson.register(self.get_model("Vulnerability_Id"), store=("finding__test__engagement__product__name", ))
# YourModel = self.get_model("YourModel")
# watson.register(YourModel)
- register_check(check_configuration_deduplication, 'dojo')
+ register_check(check_configuration_deduplication, "dojo")
# Load any signals here that will be ready for runtime
# Importing the signals file is good enough if using the reciever decorator
diff --git a/dojo/celery.py b/dojo/celery.py
index b2c742a792..e9ec0417d4 100644
--- a/dojo/celery.py
+++ b/dojo/celery.py
@@ -8,20 +8,20 @@
logger = logging.getLogger(__name__)
# set the default Django settings module for the 'celery' program.
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dojo.settings.settings')
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dojo.settings.settings")
-app = Celery('dojo')
+app = Celery("dojo")
# Using a string here means the worker will not have to
# pickle the object when using Windows.
-app.config_from_object('django.conf:settings', namespace='CELERY')
+app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
- print(f'Request: {self.request!r}')
+ print(f"Request: {self.request!r}")
@setup_logging.connect
diff --git a/dojo/components/views.py b/dojo/components/views.py
index 45cf09727f..93a78787e5 100644
--- a/dojo/components/views.py
+++ b/dojo/components/views.py
@@ -25,7 +25,7 @@ def components(request):
.order_by("component_name")
.annotate(
component_version=StringAgg(
- "component_version", delimiter=separator, distinct=True, default=Value(''),
+ "component_version", delimiter=separator, distinct=True, default=Value(""),
),
)
)
diff --git a/dojo/cred/queries.py b/dojo/cred/queries.py
index f0868d7db1..4dd14385a0 100644
--- a/dojo/cred/queries.py
+++ b/dojo/cred/queries.py
@@ -24,19 +24,19 @@ def get_authorized_cred_mappings(permission, queryset=None):
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
- product_type=OuterRef('product__prod_type_id'),
+ product_type=OuterRef("product__prod_type_id"),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
- product=OuterRef('product_id'),
+ product=OuterRef("product_id"),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
- product_type=OuterRef('product__prod_type_id'),
+ product_type=OuterRef("product__prod_type_id"),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
- product=OuterRef('product_id'),
+ product=OuterRef("product_id"),
group__users=user,
role__in=roles)
cred_mappings = cred_mappings.annotate(
diff --git a/dojo/cred/urls.py b/dojo/cred/urls.py
index 65a3d4300f..05f2bfe132 100644
--- a/dojo/cred/urls.py
+++ b/dojo/cred/urls.py
@@ -3,26 +3,26 @@
from . import views
urlpatterns = [
- re_path(r'^cred/add', views.new_cred, name='add_cred'),
- re_path(r'^cred/(?P\d+)/view$', views.view_cred_details, name='view_cred_details'),
- re_path(r'^cred/(?P\d+)/edit$', views.edit_cred, name='edit_cred'),
- re_path(r'^cred/(?P\d+)/delete$', views.delete_cred, name='delete_cred'),
- re_path(r'^cred$', views.cred, name='cred'),
- re_path(r'^product/(?P\d+)/cred/add$', views.new_cred_product, name='new_cred_product'),
- re_path(r'^product/(?P\d+)/cred/all$', views.all_cred_product, name='all_cred_product'),
- re_path(r'^product/(?P\d+)/cred/(?P\d+)/edit$', views.edit_cred_product, name='edit_cred_product'),
- re_path(r'^product/(?P\d+)/cred/(?P\d+)/view$', views.view_cred_product, name='view_cred_product'),
- re_path(r'^product/(?P\d+)/cred/(?P\d+)/delete$', views.delete_cred_product, name='delete_cred_product'),
- re_path(r'^engagement/(?P\d+)/cred/add$', views.new_cred_product_engagement, name='new_cred_product_engagement'),
- re_path(r'^engagement/(?P\d+)/cred/(?P\d+)/view$', views.view_cred_product_engagement,
- name='view_cred_product_engagement'),
- re_path(r'^engagement/(?P\d+)/cred/(?P\d+)/delete$', views.delete_cred_engagement,
- name='delete_cred_engagement'),
- re_path(r'^test/(?P\d+)/cred/add$', views.new_cred_engagement_test, name='new_cred_engagement_test'),
- re_path(r'^test/(?P\d+)/cred/(?P\d+)/view$', views.view_cred_engagement_test,
- name='view_cred_engagement_test'),
- re_path(r'^test/(?P\d+)/cred/(?P\d+)/delete$', views.delete_cred_test, name='delete_cred_test'),
- re_path(r'^finding/(?P\d+)/cred/add$', views.new_cred_finding, name='new_cred_finding'),
- re_path(r'^finding/(?P\d+)/cred/(?P\d+)/view$', views.view_cred_finding, name='view_cred_finding'),
- re_path(r'^finding/(?P\d+)/cred/(?P\d+)/delete$', views.delete_cred_finding, name='delete_cred_finding'),
+ re_path(r"^cred/add", views.new_cred, name="add_cred"),
+ re_path(r"^cred/(?P\d+)/view$", views.view_cred_details, name="view_cred_details"),
+ re_path(r"^cred/(?P\d+)/edit$", views.edit_cred, name="edit_cred"),
+ re_path(r"^cred/(?P\d+)/delete$", views.delete_cred, name="delete_cred"),
+ re_path(r"^cred$", views.cred, name="cred"),
+ re_path(r"^product/(?P\d+)/cred/add$", views.new_cred_product, name="new_cred_product"),
+ re_path(r"^product/(?P\d+)/cred/all$", views.all_cred_product, name="all_cred_product"),
+ re_path(r"^product/(?P\d+)/cred/(?P\d+)/edit$", views.edit_cred_product, name="edit_cred_product"),
+ re_path(r"^product/(?P\d+)/cred/(?P\d+)/view$", views.view_cred_product, name="view_cred_product"),
+ re_path(r"^product/(?P\d+)/cred/(?P\d+)/delete$", views.delete_cred_product, name="delete_cred_product"),
+ re_path(r"^engagement/(?P\d+)/cred/add$", views.new_cred_product_engagement, name="new_cred_product_engagement"),
+ re_path(r"^engagement/(?P\d+)/cred/(?P\d+)/view$", views.view_cred_product_engagement,
+ name="view_cred_product_engagement"),
+ re_path(r"^engagement/(?P\d+)/cred/(?P\d+)/delete$", views.delete_cred_engagement,
+ name="delete_cred_engagement"),
+ re_path(r"^test/(?P\d+)/cred/add$", views.new_cred_engagement_test, name="new_cred_engagement_test"),
+ re_path(r"^test/(?P\d+)/cred/(?P\d+)/view$", views.view_cred_engagement_test,
+ name="view_cred_engagement_test"),
+ re_path(r"^test/(?P\d+)/cred/(?P\d+)/delete$", views.delete_cred_test, name="delete_cred_test"),
+ re_path(r"^finding/(?P\d+)/cred/add$", views.new_cred_finding, name="new_cred_finding"),
+ re_path(r"^finding/(?P\d+)/cred/(?P\d+)/view$", views.view_cred_finding, name="view_cred_finding"),
+ re_path(r"^finding/(?P\d+)/cred/(?P\d+)/delete$", views.delete_cred_finding, name="delete_cred_finding"),
]
diff --git a/dojo/cred/views.py b/dojo/cred/views.py
index 53136b4994..09e1cd34e4 100644
--- a/dojo/cred/views.py
+++ b/dojo/cred/views.py
@@ -18,53 +18,53 @@
@user_is_configuration_authorized(Permissions.Credential_Add)
def new_cred(request):
- if request.method == 'POST':
+ if request.method == "POST":
tform = CredUserForm(request.POST)
if tform.is_valid():
form_copy = tform.save(commit=False)
form_copy.password = dojo_crypto_encrypt(
- tform.cleaned_data['password'])
+ tform.cleaned_data["password"])
form_copy.save()
messages.add_message(
request,
messages.SUCCESS,
- 'Credential Successfully Created.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('cred'))
+ "Credential Successfully Created.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("cred"))
else:
tform = CredUserForm()
add_breadcrumb(
title="New Credential", top_level=False, request=request)
- return render(request, 'dojo/new_cred.html', {'tform': tform})
+ return render(request, "dojo/new_cred.html", {"tform": tform})
-@user_is_authorized(Product, Permissions.Product_View, 'pid')
+@user_is_authorized(Product, Permissions.Product_View, "pid")
def all_cred_product(request, pid):
prod = get_object_or_404(Product, id=pid)
- creds = Cred_Mapping.objects.filter(product=prod).order_by('cred_id__name')
+ creds = Cred_Mapping.objects.filter(product=prod).order_by("cred_id__name")
product_tab = Product_Tab(prod, title="Credentials", tab="settings")
- return render(request, 'dojo/view_cred_prod.html', {'product_tab': product_tab, 'creds': creds, 'prod': prod})
+ return render(request, "dojo/view_cred_prod.html", {"product_tab": product_tab, "creds": creds, "prod": prod})
-@user_is_authorized(Cred_User, Permissions.Credential_Edit, 'ttid')
+@user_is_authorized(Cred_User, Permissions.Credential_Edit, "ttid")
def edit_cred(request, ttid):
tool_config = Cred_User.objects.get(pk=ttid)
- if request.method == 'POST':
+ if request.method == "POST":
tform = CredUserForm(request.POST, request.FILES, instance=tool_config)
if tform.is_valid():
form_copy = tform.save(commit=False)
form_copy.password = dojo_crypto_encrypt(
- tform.cleaned_data['password'])
+ tform.cleaned_data["password"])
# handle_uploaded_selenium(request.FILES['selenium_script'], tool_config)
form_copy.save()
messages.add_message(
request,
messages.SUCCESS,
- 'Credential Successfully Updated.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('cred'))
+ "Credential Successfully Updated.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("cred"))
else:
tool_config.password = prepare_for_view(tool_config.password)
@@ -74,20 +74,20 @@ def edit_cred(request, ttid):
top_level=False,
request=request)
- return render(request, 'dojo/edit_cred.html', {
- 'tform': tform,
+ return render(request, "dojo/edit_cred.html", {
+ "tform": tform,
})
-@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid')
+@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid")
def view_cred_details(request, ttid):
cred = Cred_User.objects.get(pk=ttid)
notes = cred.notes.all()
- cred_products = Cred_Mapping.objects.select_related('product').filter(
- product_id__isnull=False, cred_id=ttid).order_by('product__name')
+ cred_products = Cred_Mapping.objects.select_related("product").filter(
+ product_id__isnull=False, cred_id=ttid).order_by("product__name")
cred_products = get_authorized_cred_mappings(Permissions.Product_View, cred_products)
- if request.method == 'POST':
+ if request.method == "POST":
form = NoteForm(request.POST)
if form.is_valid():
@@ -101,38 +101,38 @@ def view_cred_details(request, ttid):
messages.add_message(
request,
messages.SUCCESS,
- 'Note added successfully.',
- extra_tags='alert-success')
+ "Note added successfully.",
+ extra_tags="alert-success")
else:
form = NoteForm()
add_breadcrumb(title="View", top_level=False, request=request)
- return render(request, 'dojo/view_cred_details.html', {
- 'cred': cred,
- 'form': form,
- 'notes': notes,
- 'cred_products': cred_products,
+ return render(request, "dojo/view_cred_details.html", {
+ "cred": cred,
+ "form": form,
+ "notes": notes,
+ "cred_products": cred_products,
})
@user_is_configuration_authorized(Permissions.Credential_View)
def cred(request):
- confs = Cred_User.objects.all().order_by('name', 'environment', 'username')
+ confs = Cred_User.objects.all().order_by("name", "environment", "username")
add_breadcrumb(title="Credential Manager", top_level=True, request=request)
- return render(request, 'dojo/view_cred.html', {
- 'confs': confs,
+ return render(request, "dojo/view_cred.html", {
+ "confs": confs,
})
-@user_is_authorized(Product, Permissions.Product_View, 'pid')
-@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid')
+@user_is_authorized(Product, Permissions.Product_View, "pid")
+@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid")
def view_cred_product(request, pid, ttid):
cred = get_object_or_404(
- Cred_Mapping.objects.select_related('cred_id'), id=ttid)
+ Cred_Mapping.objects.select_related("cred_id"), id=ttid)
notes = cred.cred_id.notes.all()
- if request.method == 'POST':
+ if request.method == "POST":
form = NoteForm(request.POST)
if form.is_valid():
@@ -145,8 +145,8 @@ def view_cred_product(request, pid, ttid):
messages.add_message(
request,
messages.SUCCESS,
- 'Note added successfully.',
- extra_tags='alert-success')
+ "Note added successfully.",
+ extra_tags="alert-success")
else:
form = NoteForm()
@@ -154,43 +154,43 @@ def view_cred_product(request, pid, ttid):
title="Credential Manager", top_level=False, request=request)
cred_type = "Product"
view_link = reverse(
- 'view_cred_product', args=(
+ "view_cred_product", args=(
cred.product.id,
cred.id,
))
edit_link = reverse(
- 'edit_cred_product', args=(
+ "edit_cred_product", args=(
cred.product.id,
cred.id,
))
delete_link = reverse(
- 'delete_cred_product', args=(
+ "delete_cred_product", args=(
cred.product.id,
cred.id,
))
return render(
- request, 'dojo/view_cred_all_details.html', {
- 'cred': cred,
- 'form': form,
- 'notes': notes,
- 'cred_type': cred_type,
- 'edit_link': edit_link,
- 'delete_link': delete_link,
- 'view_link': view_link,
+ request, "dojo/view_cred_all_details.html", {
+ "cred": cred,
+ "form": form,
+ "notes": notes,
+ "cred_type": cred_type,
+ "edit_link": edit_link,
+ "delete_link": delete_link,
+ "view_link": view_link,
})
-@user_is_authorized(Product, Permissions.Engagement_View, 'eid')
-@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid')
+@user_is_authorized(Product, Permissions.Engagement_View, "eid")
+@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid")
def view_cred_product_engagement(request, eid, ttid):
cred = get_object_or_404(
- Cred_Mapping.objects.select_related('cred_id'), id=ttid)
+ Cred_Mapping.objects.select_related("cred_id"), id=ttid)
cred_product = Cred_Mapping.objects.filter(
cred_id=cred.cred_id.id, product=cred.engagement.product.id).first()
notes = cred.cred_id.notes.all()
- if request.method == 'POST':
+ if request.method == "POST":
form = NoteForm(request.POST)
if form.is_valid():
@@ -203,8 +203,8 @@ def view_cred_product_engagement(request, eid, ttid):
messages.add_message(
request,
messages.SUCCESS,
- 'Note added successfully.',
- extra_tags='alert-success')
+ "Note added successfully.",
+ extra_tags="alert-success")
else:
form = NoteForm()
@@ -213,35 +213,35 @@ def view_cred_product_engagement(request, eid, ttid):
cred_type = "Engagement"
edit_link = ""
delete_link = reverse(
- 'delete_cred_engagement', args=(
+ "delete_cred_engagement", args=(
eid,
cred.id,
))
return render(
- request, 'dojo/view_cred_all_details.html', {
- 'cred': cred,
- 'form': form,
- 'notes': notes,
- 'cred_type': cred_type,
- 'edit_link': edit_link,
- 'delete_link': delete_link,
- 'cred_product': cred_product,
+ request, "dojo/view_cred_all_details.html", {
+ "cred": cred,
+ "form": form,
+ "notes": notes,
+ "cred_type": cred_type,
+ "edit_link": edit_link,
+ "delete_link": delete_link,
+ "cred_product": cred_product,
})
-@user_is_authorized(Product, Permissions.Test_View, 'tid')
-@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid')
+@user_is_authorized(Product, Permissions.Test_View, "tid")
+@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid")
def view_cred_engagement_test(request, tid, ttid):
cred = get_object_or_404(
- Cred_Mapping.objects.select_related('cred_id'), id=ttid)
+ Cred_Mapping.objects.select_related("cred_id"), id=ttid)
cred_product = Cred_Mapping.objects.filter(
cred_id=cred.cred_id.id,
product=cred.test.engagement.product.id).first()
notes = cred.cred_id.notes.all()
- if request.method == 'POST':
+ if request.method == "POST":
form = NoteForm(request.POST)
if form.is_valid():
@@ -254,8 +254,8 @@ def view_cred_engagement_test(request, tid, ttid):
messages.add_message(
request,
messages.SUCCESS,
- 'Note added successfully.',
- extra_tags='alert-success')
+ "Note added successfully.",
+ extra_tags="alert-success")
else:
form = NoteForm()
@@ -264,35 +264,35 @@ def view_cred_engagement_test(request, tid, ttid):
cred_type = "Test"
edit_link = None
delete_link = reverse(
- 'delete_cred_test', args=(
+ "delete_cred_test", args=(
tid,
cred.id,
))
return render(
- request, 'dojo/view_cred_all_details.html', {
- 'cred': cred,
- 'form': form,
- 'notes': notes,
- 'cred_type': cred_type,
- 'edit_link': edit_link,
- 'delete_link': delete_link,
- 'cred_product': cred_product,
+ request, "dojo/view_cred_all_details.html", {
+ "cred": cred,
+ "form": form,
+ "notes": notes,
+ "cred_type": cred_type,
+ "edit_link": edit_link,
+ "delete_link": delete_link,
+ "cred_product": cred_product,
})
-@user_is_authorized(Product, Permissions.Finding_View, 'fid')
-@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid')
+@user_is_authorized(Product, Permissions.Finding_View, "fid")
+@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid")
def view_cred_finding(request, fid, ttid):
cred = get_object_or_404(
- Cred_Mapping.objects.select_related('cred_id'), id=ttid)
+ Cred_Mapping.objects.select_related("cred_id"), id=ttid)
cred_product = Cred_Mapping.objects.filter(
cred_id=cred.cred_id.id,
product=cred.finding.test.engagement.product.id).first()
notes = cred.cred_id.notes.all()
- if request.method == 'POST':
+ if request.method == "POST":
form = NoteForm(request.POST)
if form.is_valid():
@@ -305,8 +305,8 @@ def view_cred_finding(request, fid, ttid):
messages.add_message(
request,
messages.SUCCESS,
- 'Note added successfully.',
- extra_tags='alert-success')
+ "Note added successfully.",
+ extra_tags="alert-success")
else:
form = NoteForm()
@@ -315,139 +315,139 @@ def view_cred_finding(request, fid, ttid):
cred_type = "Finding"
edit_link = None
delete_link = reverse(
- 'delete_cred_finding', args=(
+ "delete_cred_finding", args=(
fid,
cred.id,
))
return render(
- request, 'dojo/view_cred_all_details.html', {
- 'cred': cred,
- 'form': form,
- 'notes': notes,
- 'cred_type': cred_type,
- 'edit_link': edit_link,
- 'delete_link': delete_link,
- 'cred_product': cred_product,
+ request, "dojo/view_cred_all_details.html", {
+ "cred": cred,
+ "form": form,
+ "notes": notes,
+ "cred_type": cred_type,
+ "edit_link": edit_link,
+ "delete_link": delete_link,
+ "cred_product": cred_product,
})
-@user_is_authorized(Product, Permissions.Product_Edit, 'pid')
-@user_is_authorized(Cred_User, Permissions.Credential_Edit, 'ttid')
+@user_is_authorized(Product, Permissions.Product_Edit, "pid")
+@user_is_authorized(Cred_User, Permissions.Credential_Edit, "ttid")
def edit_cred_product(request, pid, ttid):
cred = get_object_or_404(
- Cred_Mapping.objects.select_related('cred_id'), id=ttid)
+ Cred_Mapping.objects.select_related("cred_id"), id=ttid)
prod = get_object_or_404(Product, pk=pid)
- if request.method == 'POST':
+ if request.method == "POST":
tform = CredMappingFormProd(request.POST, instance=cred)
if tform.is_valid():
tform.save()
messages.add_message(
request,
messages.SUCCESS,
- 'Credential Successfully Updated.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('all_cred_product', args=(pid, )))
+ "Credential Successfully Updated.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("all_cred_product", args=(pid, )))
else:
tform = CredMappingFormProd(instance=cred)
product_tab = Product_Tab(prod, title="Edit Product Credential", tab="settings")
- return render(request, 'dojo/edit_cred_all.html', {
- 'tform': tform,
- 'product_tab': product_tab,
- 'cred_type': "Product",
+ return render(request, "dojo/edit_cred_all.html", {
+ "tform": tform,
+ "product_tab": product_tab,
+ "cred_type": "Product",
})
-@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
-@user_is_authorized(Cred_User, Permissions.Credential_Edit, 'ttid')
+@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid")
+@user_is_authorized(Cred_User, Permissions.Credential_Edit, "ttid")
def edit_cred_product_engagement(request, eid, ttid):
cred = get_object_or_404(
- Cred_Mapping.objects.select_related('cred_id'), id=ttid)
+ Cred_Mapping.objects.select_related("cred_id"), id=ttid)
eng = get_object_or_404(Engagement, pk=eid)
- if request.method == 'POST':
+ if request.method == "POST":
tform = CredMappingForm(request.POST, instance=cred)
if tform.is_valid():
tform.save()
messages.add_message(
request,
messages.SUCCESS,
- 'Credential Successfully Updated.',
- extra_tags='alert-success')
+ "Credential Successfully Updated.",
+ extra_tags="alert-success")
return HttpResponseRedirect(
- reverse('view_engagement', args=(eid, )))
+ reverse("view_engagement", args=(eid, )))
else:
tform = CredMappingFormProd(instance=cred)
tform.fields["cred_id"].queryset = Cred_Mapping.objects.filter(
- product=eng.product).order_by('cred_id')
+ product=eng.product).order_by("cred_id")
add_breadcrumb(
title="Edit Credential Configuration",
top_level=False,
request=request)
- return render(request, 'dojo/edit_cred_all.html', {
- 'tform': tform,
- 'cred_type': "Engagement",
+ return render(request, "dojo/edit_cred_all.html", {
+ "tform": tform,
+ "cred_type": "Engagement",
})
-@user_is_authorized(Product, Permissions.Product_Edit, 'pid')
+@user_is_authorized(Product, Permissions.Product_Edit, "pid")
def new_cred_product(request, pid):
prod = get_object_or_404(Product, pk=pid)
- if request.method == 'POST':
+ if request.method == "POST":
tform = CredMappingFormProd(request.POST)
if tform.is_valid():
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
- cred_id=tform.cleaned_data['cred_id'].id, product=pid).first()
+ cred_id=tform.cleaned_data["cred_id"].id, product=pid).first()
message = "Credential already associated."
- status_tag = 'alert-danger'
+ status_tag = "alert-danger"
if cred_user is None:
prod = Product.objects.get(id=pid)
new_f = tform.save(commit=False)
new_f.product = prod
new_f.save()
- message = 'Credential Successfully Updated.'
- status_tag = 'alert-success'
+ message = "Credential Successfully Updated."
+ status_tag = "alert-success"
messages.add_message(
request, messages.SUCCESS, message, extra_tags=status_tag)
- return HttpResponseRedirect(reverse('all_cred_product', args=(pid, )))
+ return HttpResponseRedirect(reverse("all_cred_product", args=(pid, )))
else:
tform = CredMappingFormProd()
product_tab = Product_Tab(prod, title="Add Credential Configuration", tab="settings")
- return render(request, 'dojo/new_cred_product.html', {
- 'tform': tform,
- 'pid': pid,
- 'product_tab': product_tab,
+ return render(request, "dojo/new_cred_product.html", {
+ "tform": tform,
+ "pid": pid,
+ "product_tab": product_tab,
})
-@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid")
def new_cred_product_engagement(request, eid):
eng = get_object_or_404(Engagement, pk=eid)
- if request.method == 'POST':
+ if request.method == "POST":
tform = CredMappingForm(request.POST)
tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
- product=eng.product).order_by('cred_id')
- if tform.is_valid() and tform.cleaned_data['cred_user']:
+ product=eng.product).order_by("cred_id")
+ if tform.is_valid() and tform.cleaned_data["cred_user"]:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
- pk=tform.cleaned_data['cred_user'].id,
- product=eng.product.id).order_by('cred_id').first()
+ pk=tform.cleaned_data["cred_user"].id,
+ product=eng.product.id).order_by("cred_id").first()
# search for cred_user and engagement id
cred_lookup = Cred_Mapping.objects.filter(
cred_id=cred_user.cred_id, engagement=eng.id)
message = "Credential already associated."
- status_tag = 'alert-danger'
+ status_tag = "alert-danger"
if not cred_user:
message = "Credential must first be associated with this product."
@@ -457,48 +457,48 @@ def new_cred_product_engagement(request, eid):
new_f.engagement = eng
new_f.cred_id = cred_user.cred_id
new_f.save()
- message = 'Credential Successfully Updated.'
- status_tag = 'alert-success'
+ message = "Credential Successfully Updated."
+ status_tag = "alert-success"
messages.add_message(
request, messages.SUCCESS, message, extra_tags=status_tag)
return HttpResponseRedirect(
- reverse('view_engagement', args=(eid, )))
+ reverse("view_engagement", args=(eid, )))
else:
tform = CredMappingForm()
tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
- product=eng.product).order_by('cred_id')
+ product=eng.product).order_by("cred_id")
add_breadcrumb(
title="Add Credential Configuration", top_level=False, request=request)
return render(
- request, 'dojo/new_cred_mapping.html', {
- 'tform': tform,
- 'eid': eid,
- 'formlink': reverse('new_cred_product_engagement', args=(eid, )),
+ request, "dojo/new_cred_mapping.html", {
+ "tform": tform,
+ "eid": eid,
+ "formlink": reverse("new_cred_product_engagement", args=(eid, )),
})
-@user_is_authorized(Test, Permissions.Test_Edit, 'tid')
+@user_is_authorized(Test, Permissions.Test_Edit, "tid")
def new_cred_engagement_test(request, tid):
test = get_object_or_404(Test, pk=tid)
- if request.method == 'POST':
+ if request.method == "POST":
tform = CredMappingForm(request.POST)
tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
- engagement=test.engagement).order_by('cred_id')
- if tform.is_valid() and tform.cleaned_data['cred_user']:
+ engagement=test.engagement).order_by("cred_id")
+ if tform.is_valid() and tform.cleaned_data["cred_user"]:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
- pk=tform.cleaned_data['cred_user'].id,
+ pk=tform.cleaned_data["cred_user"].id,
engagement=test.engagement.id).first()
# search for cred_user and test id
cred_lookup = Cred_Mapping.objects.filter(
cred_id=cred_user.cred_id, test=test.id)
message = "Credential already associated."
- status_tag = 'alert-danger'
+ status_tag = "alert-danger"
if not cred_user:
message = "Credential must first be associated with this product."
@@ -508,48 +508,48 @@ def new_cred_engagement_test(request, tid):
new_f.test = test
new_f.cred_id = cred_user.cred_id
new_f.save()
- message = 'Credential Successfully Updated.'
- status_tag = 'alert-success'
+ message = "Credential Successfully Updated."
+ status_tag = "alert-success"
messages.add_message(
request, messages.SUCCESS, message, extra_tags=status_tag)
- return HttpResponseRedirect(reverse('view_test', args=(tid, )))
+ return HttpResponseRedirect(reverse("view_test", args=(tid, )))
else:
tform = CredMappingForm()
tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
- engagement=test.engagement).order_by('cred_id')
+ engagement=test.engagement).order_by("cred_id")
add_breadcrumb(
title="Add Credential Configuration", top_level=False, request=request)
return render(
- request, 'dojo/new_cred_mapping.html', {
- 'tform': tform,
- 'eid': tid,
- 'formlink': reverse('new_cred_engagement_test', args=(tid, )),
+ request, "dojo/new_cred_mapping.html", {
+ "tform": tform,
+ "eid": tid,
+ "formlink": reverse("new_cred_engagement_test", args=(tid, )),
})
-@user_is_authorized(Finding, Permissions.Finding_Edit, 'fid')
+@user_is_authorized(Finding, Permissions.Finding_Edit, "fid")
def new_cred_finding(request, fid):
finding = get_object_or_404(Finding, pk=fid)
- if request.method == 'POST':
+ if request.method == "POST":
tform = CredMappingForm(request.POST)
tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
- engagement=finding.test.engagement).order_by('cred_id')
+ engagement=finding.test.engagement).order_by("cred_id")
- if tform.is_valid() and tform.cleaned_data['cred_user']:
+ if tform.is_valid() and tform.cleaned_data["cred_user"]:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
- pk=tform.cleaned_data['cred_user'].id,
+ pk=tform.cleaned_data["cred_user"].id,
engagement=finding.test.engagement.id).first()
# search for cred_user and test id
cred_lookup = Cred_Mapping.objects.filter(
cred_id=cred_user.cred_id, finding=finding.id)
message = "Credential already associated."
- status_tag = 'alert-danger'
+ status_tag = "alert-danger"
if not cred_user:
message = "Credential must first be associated with this product."
@@ -559,36 +559,36 @@ def new_cred_finding(request, fid):
new_f.finding = finding
new_f.cred_id = cred_user.cred_id
new_f.save()
- message = 'Credential Successfully Updated.'
- status_tag = 'alert-success'
+ message = "Credential Successfully Updated."
+ status_tag = "alert-success"
messages.add_message(
request, messages.SUCCESS, message, extra_tags=status_tag)
- return HttpResponseRedirect(reverse('view_finding', args=(fid, )))
+ return HttpResponseRedirect(reverse("view_finding", args=(fid, )))
else:
tform = CredMappingForm()
tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
- engagement=finding.test.engagement).order_by('cred_id')
+ engagement=finding.test.engagement).order_by("cred_id")
add_breadcrumb(
title="Add Credential Configuration", top_level=False, request=request)
return render(
- request, 'dojo/new_cred_mapping.html', {
- 'tform': tform,
- 'eid': fid,
- 'formlink': reverse('new_cred_finding', args=(fid, )),
+ request, "dojo/new_cred_mapping.html", {
+ "tform": tform,
+ "eid": fid,
+ "formlink": reverse("new_cred_finding", args=(fid, )),
})
-@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid')
+@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid")
def delete_cred_controller(request, destination_url, id, ttid):
cred = None
try:
cred = Cred_Mapping.objects.get(pk=ttid)
except:
pass
- if request.method == 'POST':
+ if request.method == "POST":
tform = CredMappingForm(request.POST, instance=cred)
message = ""
status_tag = ""
@@ -633,7 +633,7 @@ def delete_cred_controller(request, destination_url, id, ttid):
else:
cred.delete()
else:
- status_tag = 'alert-danger'
+ status_tag = "alert-danger"
messages.add_message(
request, messages.SUCCESS, message, extra_tags=status_tag)
@@ -661,36 +661,36 @@ def delete_cred_controller(request, destination_url, id, ttid):
finding = get_object_or_404(Finding, id=id)
product = finding.test.engagement.product
product_tab = Product_Tab(product, title="Delete Credential Mapping", tab="settings")
- return render(request, 'dojo/delete_cred_all.html', {
- 'tform': tform,
- 'product_tab': product_tab,
+ return render(request, "dojo/delete_cred_all.html", {
+ "tform": tform,
+ "product_tab": product_tab,
})
-@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid')
+@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid")
def delete_cred(request, ttid):
return delete_cred_controller(request, "cred", 0, ttid)
-@user_is_authorized(Product, Permissions.Product_Edit, 'pid')
-@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid')
+@user_is_authorized(Product, Permissions.Product_Edit, "pid")
+@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid")
def delete_cred_product(request, pid, ttid):
return delete_cred_controller(request, "all_cred_product", pid, ttid)
-@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
-@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid')
+@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid")
+@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid")
def delete_cred_engagement(request, eid, ttid):
return delete_cred_controller(request, "view_engagement", eid, ttid)
-@user_is_authorized(Test, Permissions.Test_Edit, 'tid')
-@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid')
+@user_is_authorized(Test, Permissions.Test_Edit, "tid")
+@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid")
def delete_cred_test(request, tid, ttid):
return delete_cred_controller(request, "view_test", tid, ttid)
-@user_is_authorized(Finding, Permissions.Finding_Edit, 'fid')
-@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid')
+@user_is_authorized(Finding, Permissions.Finding_Edit, "fid")
+@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid")
def delete_cred_finding(request, fid, ttid):
return delete_cred_controller(request, "view_finding", fid, ttid)
diff --git a/dojo/decorators.py b/dojo/decorators.py
index c919a2995b..e48ba31c56 100644
--- a/dojo/decorators.py
+++ b/dojo/decorators.py
@@ -16,19 +16,19 @@ def we_want_async(*args, func=None, **kwargs):
from dojo.models import Dojo_User
from dojo.utils import get_current_user
- sync = kwargs.get('sync', False)
+ sync = kwargs.get("sync", False)
if sync:
- logger.debug('dojo_async_task %s: running task in the foreground as sync=True has been found as kwarg', func)
+ logger.debug("dojo_async_task %s: running task in the foreground as sync=True has been found as kwarg", func)
return False
- user = kwargs.get('async_user', get_current_user())
- logger.debug('user: %s', user)
+ user = kwargs.get("async_user", get_current_user())
+ logger.debug("user: %s", user)
if Dojo_User.wants_block_execution(user):
- logger.debug('dojo_async_task %s: running task in the foreground as block_execution is set to True for %s', func, user)
+ logger.debug("dojo_async_task %s: running task in the foreground as block_execution is set to True for %s", func, user)
return False
- logger.debug('dojo_async_task %s: no current user, running task in the background', func)
+ logger.debug("dojo_async_task %s: no current user, running task in the background", func)
return True
@@ -39,7 +39,7 @@ def dojo_async_task(func):
def __wrapper__(*args, **kwargs):
from dojo.utils import get_current_user
user = get_current_user()
- kwargs['async_user'] = user
+ kwargs["async_user"] = user
countdown = kwargs.pop("countdown", 0)
if we_want_async(*args, func=func, **kwargs):
return func.apply_async(args=args, kwargs=kwargs, countdown=countdown)
@@ -66,7 +66,7 @@ def __wrapper__(*args, **kwargs):
if model_or_id:
if isinstance(model_or_id, models.Model) and we_want_async(*args, func=func, **kwargs):
- logger.debug('converting model_or_id to id: %s', model_or_id)
+ logger.debug("converting model_or_id to id: %s", model_or_id)
id = model_or_id.id
args = list(args)
args[parameter] = id
@@ -96,25 +96,25 @@ def __wrapper__(*args, **kwargs):
if not settings.CELERY_PASS_MODEL_BY_ID:
return func(*args, **kwargs)
- logger.debug('args:' + str(args))
- logger.debug('kwargs:' + str(kwargs))
+ logger.debug("args:" + str(args))
+ logger.debug("kwargs:" + str(kwargs))
- logger.debug('checking if we need to convert id to model: %s for parameter: %s', model.__name__, parameter)
+ logger.debug("checking if we need to convert id to model: %s for parameter: %s", model.__name__, parameter)
model_or_id = get_parameter_froms_args_kwargs(args, kwargs, parameter)
if model_or_id:
if not isinstance(model_or_id, models.Model) and we_want_async(*args, func=func, **kwargs):
- logger.debug('instantiating model_or_id: %s for model: %s', model_or_id, model)
+ logger.debug("instantiating model_or_id: %s for model: %s", model_or_id, model)
try:
instance = model.objects.get(id=model_or_id)
except model.DoesNotExist:
- logger.debug('error instantiating model_or_id: %s for model: %s: DoesNotExist', model_or_id, model)
+ logger.debug("error instantiating model_or_id: %s for model: %s: DoesNotExist", model_or_id, model)
instance = None
args = list(args)
args[parameter] = instance
else:
- logger.debug('model_or_id already a model instance %s for model: %s', model_or_id, model)
+ logger.debug("model_or_id already a model instance %s for model: %s", model_or_id, model)
return func(*args, **kwargs)
@@ -133,16 +133,16 @@ def get_parameter_froms_args_kwargs(args, kwargs, parameter):
# Lookup value came as a positional argument
args = list(args)
if parameter >= len(args):
- raise ValueError('parameter index invalid: ' + str(parameter))
+ raise ValueError("parameter index invalid: " + str(parameter))
model_or_id = args[parameter]
else:
# Lookup value was passed as keyword argument
model_or_id = kwargs.get(parameter, None)
- logger.debug('model_or_id: %s', model_or_id)
+ logger.debug("model_or_id: %s", model_or_id)
if not model_or_id:
- logger.error('unable to get parameter: ' + parameter)
+ logger.error("unable to get parameter: " + parameter)
return model_or_id
@@ -155,7 +155,7 @@ def wrapper(self, *args, **kwargs):
except Exception:
print("exception occured at url:", self.driver.current_url)
print("page source:", self.driver.page_source)
- f = open("/tmp/selenium_page_source.html", "w", encoding='utf-8')
+ f = open("/tmp/selenium_page_source.html", "w", encoding="utf-8")
f.writelines(self.driver.page_source)
# time.sleep(30)
raise
@@ -163,21 +163,21 @@ def wrapper(self, *args, **kwargs):
return wrapper
-def dojo_ratelimit(key='ip', rate=None, method=UNSAFE, block=False):
+def dojo_ratelimit(key="ip", rate=None, method=UNSAFE, block=False):
def decorator(fn):
@wraps(fn)
def _wrapped(request, *args, **kw):
- _block = getattr(settings, 'RATE_LIMITER_BLOCK', block)
- _rate = getattr(settings, 'RATE_LIMITER_RATE', rate)
- _lockout = getattr(settings, 'RATE_LIMITER_ACCOUNT_LOCKOUT', False)
- old_limited = getattr(request, 'limited', False)
+ _block = getattr(settings, "RATE_LIMITER_BLOCK", block)
+ _rate = getattr(settings, "RATE_LIMITER_RATE", rate)
+ _lockout = getattr(settings, "RATE_LIMITER_ACCOUNT_LOCKOUT", False)
+ old_limited = getattr(request, "limited", False)
ratelimited = is_ratelimited(request=request, fn=fn,
key=key, rate=_rate, method=method,
increment=True)
request.limited = ratelimited or old_limited
if ratelimited and _block:
if _lockout:
- username = request.POST.get('username', None)
+ username = request.POST.get("username", None)
if username:
dojo_user = Dojo_User.objects.filter(username=username).first()
if dojo_user:
diff --git a/dojo/development_environment/urls.py b/dojo/development_environment/urls.py
index a61b507325..1c1c60393d 100644
--- a/dojo/development_environment/urls.py
+++ b/dojo/development_environment/urls.py
@@ -4,9 +4,9 @@
urlpatterns = [
# dev envs
- re_path(r'^dev_env$', views.dev_env, name='dev_env'),
- re_path(r'^dev_env/add$', views.add_dev_env,
- name='add_dev_env'),
- re_path(r'^dev_env/(?P\d+)/edit$',
- views.edit_dev_env, name='edit_dev_env'),
+ re_path(r"^dev_env$", views.dev_env, name="dev_env"),
+ re_path(r"^dev_env/add$", views.add_dev_env,
+ name="add_dev_env"),
+ re_path(r"^dev_env/(?P\d+)/edit$",
+ views.edit_dev_env, name="edit_dev_env"),
]
diff --git a/dojo/development_environment/views.py b/dojo/development_environment/views.py
index 1bf998fadf..4a0b3b20df 100644
--- a/dojo/development_environment/views.py
+++ b/dojo/development_environment/views.py
@@ -20,59 +20,59 @@
@login_required
def dev_env(request):
- initial_queryset = Development_Environment.objects.all().order_by('name')
+ initial_queryset = Development_Environment.objects.all().order_by("name")
name_words = [de.name for de in
initial_queryset]
devs = DevelopmentEnvironmentFilter(request.GET, queryset=initial_queryset)
dev_page = get_page_items(request, devs.qs, 25)
add_breadcrumb(title="Environment List", top_level=True, request=request)
- return render(request, 'dojo/dev_env.html', {
- 'name': 'Environment',
- 'metric': False,
- 'user': request.user,
- 'devs': dev_page,
- 'dts': devs,
- 'name_words': name_words})
+ return render(request, "dojo/dev_env.html", {
+ "name": "Environment",
+ "metric": False,
+ "user": request.user,
+ "devs": dev_page,
+ "dts": devs,
+ "name_words": name_words})
-@user_is_configuration_authorized('dojo.add_development_environment')
+@user_is_configuration_authorized("dojo.add_development_environment")
def add_dev_env(request):
form = Development_EnvironmentForm()
- if request.method == 'POST':
+ if request.method == "POST":
form = Development_EnvironmentForm(request.POST)
if form.is_valid():
form.save()
messages.add_message(request,
messages.SUCCESS,
- 'Environment added successfully.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('dev_env'))
+ "Environment added successfully.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("dev_env"))
add_breadcrumb(title="Add Environment", top_level=False, request=request)
- return render(request, 'dojo/new_dev_env.html', {
- 'name': 'Add Environment',
- 'metric': False,
- 'user': request.user,
- 'form': form,
+ return render(request, "dojo/new_dev_env.html", {
+ "name": "Add Environment",
+ "metric": False,
+ "user": request.user,
+ "form": form,
})
-@user_is_configuration_authorized('dojo.change_development_environment')
+@user_is_configuration_authorized("dojo.change_development_environment")
def edit_dev_env(request, deid):
de = get_object_or_404(Development_Environment, pk=deid)
form1 = Development_EnvironmentForm(instance=de)
form2 = Delete_Dev_EnvironmentForm(instance=de)
- if request.method == 'POST' and request.POST.get('edit_dev_env'):
+ if request.method == "POST" and request.POST.get("edit_dev_env"):
form1 = Development_EnvironmentForm(request.POST, instance=de)
if form1.is_valid():
de = form1.save()
messages.add_message(
request,
messages.SUCCESS,
- 'Environment updated successfully.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('dev_env'))
- if request.method == 'POST' and request.POST.get('delete_dev_env'):
- user_has_configuration_permission_or_403(request.user, 'dojo.delete_development_environment')
+ "Environment updated successfully.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("dev_env"))
+ if request.method == "POST" and request.POST.get("delete_dev_env"):
+ user_has_configuration_permission_or_403(request.user, "dojo.delete_development_environment")
form2 = Delete_Dev_EnvironmentForm(request.POST, instance=de)
if form2.is_valid():
try:
@@ -80,19 +80,19 @@ def edit_dev_env(request, deid):
messages.add_message(
request,
messages.SUCCESS,
- 'Environment deleted successfully.',
- extra_tags='alert-success')
+ "Environment deleted successfully.",
+ extra_tags="alert-success")
except RestrictedError as err:
messages.add_message(request,
messages.WARNING,
- f'Environment cannot be deleted: {err}',
- extra_tags='alert-warning')
- return HttpResponseRedirect(reverse('dev_env'))
+ f"Environment cannot be deleted: {err}",
+ extra_tags="alert-warning")
+ return HttpResponseRedirect(reverse("dev_env"))
add_breadcrumb(title="Edit Environment", top_level=False, request=request)
- return render(request, 'dojo/edit_dev_env.html', {
- 'name': 'Edit Environment',
- 'metric': False,
- 'user': request.user,
- 'form1': form1,
- 'de': de})
+ return render(request, "dojo/edit_dev_env.html", {
+ "name": "Edit Environment",
+ "metric": False,
+ "user": request.user,
+ "form1": form1,
+ "de": de})
diff --git a/dojo/endpoint/queries.py b/dojo/endpoint/queries.py
index c1a3267ba2..581feefc13 100644
--- a/dojo/endpoint/queries.py
+++ b/dojo/endpoint/queries.py
@@ -33,19 +33,19 @@ def get_authorized_endpoints(permission, queryset=None, user=None):
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
- product_type=OuterRef('product__prod_type_id'),
+ product_type=OuterRef("product__prod_type_id"),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
- product=OuterRef('product_id'),
+ product=OuterRef("product_id"),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
- product_type=OuterRef('product__prod_type_id'),
+ product_type=OuterRef("product__prod_type_id"),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
- product=OuterRef('product_id'),
+ product=OuterRef("product_id"),
group__users=user,
role__in=roles)
endpoints = endpoints.annotate(
@@ -81,19 +81,19 @@ def get_authorized_endpoint_status(permission, queryset=None, user=None):
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
- product_type=OuterRef('endpoint__product__prod_type_id'),
+ product_type=OuterRef("endpoint__product__prod_type_id"),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
- product=OuterRef('endpoint__product_id'),
+ product=OuterRef("endpoint__product_id"),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
- product_type=OuterRef('endpoint__product__prod_type_id'),
+ product_type=OuterRef("endpoint__product__prod_type_id"),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
- product=OuterRef('endpoint__product_id'),
+ product=OuterRef("endpoint__product_id"),
group__users=user,
role__in=roles)
endpoint_status = endpoint_status.annotate(
diff --git a/dojo/endpoint/signals.py b/dojo/endpoint/signals.py
index 4c18d03d91..f96510df78 100644
--- a/dojo/endpoint/signals.py
+++ b/dojo/endpoint/signals.py
@@ -16,15 +16,15 @@ def endpoint_post_delete(sender, instance, using, origin, **kwargs):
if settings.ENABLE_AUDITLOG:
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
- content_type=ContentType.objects.get(app_label='dojo', model='endpoint'),
+ content_type=ContentType.objects.get(app_label="dojo", model="endpoint"),
object_id=instance.id,
)
description = _('The endpoint "%(name)s" was deleted by %(user)s') % {
- 'name': str(instance), 'user': le.actor}
+ "name": str(instance), "user": le.actor}
else:
- description = _('The endpoint "%(name)s" was deleted') % {'name': str(instance)}
- create_notification(event='endpoint_deleted', # template does not exists, it will default to "other" but this event name needs to stay because of unit testing
- title=_('Deletion of %(name)s') % {'name': str(instance)},
+ description = _('The endpoint "%(name)s" was deleted') % {"name": str(instance)}
+ create_notification(event="endpoint_deleted", # template does not exists, it will default to "other" but this event name needs to stay because of unit testing
+ title=_("Deletion of %(name)s") % {"name": str(instance)},
description=description,
- url=reverse('endpoint'),
+ url=reverse("endpoint"),
icon="exclamation-triangle")
diff --git a/dojo/endpoint/urls.py b/dojo/endpoint/urls.py
index 56afa2411b..290f32961a 100644
--- a/dojo/endpoint/urls.py
+++ b/dojo/endpoint/urls.py
@@ -4,38 +4,38 @@
urlpatterns = [
# endpoints
- re_path(r'^endpoint$', views.all_endpoints,
- name='endpoint'),
- re_path(r'^endpoint/host$', views.all_endpoint_hosts,
- name='endpoint_host'),
- re_path(r'^endpoint/vulnerable$', views.vulnerable_endpoints,
- name='vulnerable_endpoints'),
- re_path(r'^endpoint/host/vulnerable$', views.vulnerable_endpoint_hosts,
- name='vulnerable_endpoint_hosts'),
- re_path(r'^endpoint/(?P\d+)$', views.view_endpoint,
- name='view_endpoint'),
- re_path(r'^endpoint/host/(?P\d+)$', views.view_endpoint_host,
- name='view_endpoint_host'),
- re_path(r'^endpoint/(?P\d+)/edit$', views.edit_endpoint,
- name='edit_endpoint'),
- re_path(r'^endpoints/(?P\d+)/add$', views.add_endpoint,
- name='add_endpoint'),
- re_path(r'^endpoint/(?P\d+)/delete$', views.delete_endpoint,
- name='delete_endpoint'),
- re_path(r'^endpoints/add$', views.add_product_endpoint,
- name='add_product_endpoint'),
- re_path(r'^endpoint/(?P\d+)/add_meta_data$', views.add_meta_data,
- name='add_endpoint_meta_data'),
- re_path(r'^endpoint/(?P\d+)/edit_meta_data$', views.edit_meta_data,
- name='edit_endpoint_meta_data'),
- re_path(r'^endpoint/bulk$', views.endpoint_bulk_update_all,
- name='endpoints_bulk_all'),
- re_path(r'^product/(?P\d+)/endpoint/bulk_product$', views.endpoint_bulk_update_all,
- name='endpoints_bulk_update_all_product'),
- re_path(r'^endpoint/(?P\d+)/bulk_status$', views.endpoint_status_bulk_update,
- name='endpoints_status_bulk'),
- re_path(r'^endpoint/migrate$', views.migrate_endpoints_view,
- name='endpoint_migrate'),
- re_path(r'^endpoint/(?P\d+)/import_endpoint_meta$', views.import_endpoint_meta,
- name='import_endpoint_meta'),
+ re_path(r"^endpoint$", views.all_endpoints,
+ name="endpoint"),
+ re_path(r"^endpoint/host$", views.all_endpoint_hosts,
+ name="endpoint_host"),
+ re_path(r"^endpoint/vulnerable$", views.vulnerable_endpoints,
+ name="vulnerable_endpoints"),
+ re_path(r"^endpoint/host/vulnerable$", views.vulnerable_endpoint_hosts,
+ name="vulnerable_endpoint_hosts"),
+ re_path(r"^endpoint/(?P\d+)$", views.view_endpoint,
+ name="view_endpoint"),
+ re_path(r"^endpoint/host/(?P\d+)$", views.view_endpoint_host,
+ name="view_endpoint_host"),
+ re_path(r"^endpoint/(?P\d+)/edit$", views.edit_endpoint,
+ name="edit_endpoint"),
+ re_path(r"^endpoints/(?P\d+)/add$", views.add_endpoint,
+ name="add_endpoint"),
+ re_path(r"^endpoint/(?P\d+)/delete$", views.delete_endpoint,
+ name="delete_endpoint"),
+ re_path(r"^endpoints/add$", views.add_product_endpoint,
+ name="add_product_endpoint"),
+ re_path(r"^endpoint/(?P\d+)/add_meta_data$", views.add_meta_data,
+ name="add_endpoint_meta_data"),
+ re_path(r"^endpoint/(?P\d+)/edit_meta_data$", views.edit_meta_data,
+ name="edit_endpoint_meta_data"),
+ re_path(r"^endpoint/bulk$", views.endpoint_bulk_update_all,
+ name="endpoints_bulk_all"),
+ re_path(r"^product/(?P\d+)/endpoint/bulk_product$", views.endpoint_bulk_update_all,
+ name="endpoints_bulk_update_all_product"),
+ re_path(r"^endpoint/(?P\d+)/bulk_status$", views.endpoint_status_bulk_update,
+ name="endpoints_status_bulk"),
+ re_path(r"^endpoint/migrate$", views.migrate_endpoints_view,
+ name="endpoint_migrate"),
+ re_path(r"^endpoint/(?P\d+)/import_endpoint_meta$", views.import_endpoint_meta,
+ name="import_endpoint_meta"),
]
diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py
index 9b7733c553..0715e8b43d 100644
--- a/dojo/endpoint/utils.py
+++ b/dojo/endpoint/utils.py
@@ -20,53 +20,53 @@
def endpoint_filter(**kwargs):
qs = Endpoint.objects.all()
- if kwargs.get('protocol'):
- qs = qs.filter(protocol__iexact=kwargs['protocol'])
+ if kwargs.get("protocol"):
+ qs = qs.filter(protocol__iexact=kwargs["protocol"])
else:
qs = qs.filter(protocol__isnull=True)
- if kwargs.get('userinfo'):
- qs = qs.filter(userinfo__exact=kwargs['userinfo'])
+ if kwargs.get("userinfo"):
+ qs = qs.filter(userinfo__exact=kwargs["userinfo"])
else:
qs = qs.filter(userinfo__isnull=True)
- if kwargs.get('host'):
- qs = qs.filter(host__iexact=kwargs['host'])
+ if kwargs.get("host"):
+ qs = qs.filter(host__iexact=kwargs["host"])
else:
qs = qs.filter(host__isnull=True)
- if kwargs.get('port'):
- if (kwargs.get('protocol')) and \
- (kwargs['protocol'].lower() in SCHEME_PORT_MAP) and \
- (SCHEME_PORT_MAP[kwargs['protocol'].lower()] == kwargs['port']):
- qs = qs.filter(Q(port__isnull=True) | Q(port__exact=SCHEME_PORT_MAP[kwargs['protocol'].lower()]))
+ if kwargs.get("port"):
+ if (kwargs.get("protocol")) and \
+ (kwargs["protocol"].lower() in SCHEME_PORT_MAP) and \
+ (SCHEME_PORT_MAP[kwargs["protocol"].lower()] == kwargs["port"]):
+ qs = qs.filter(Q(port__isnull=True) | Q(port__exact=SCHEME_PORT_MAP[kwargs["protocol"].lower()]))
else:
- qs = qs.filter(port__exact=kwargs['port'])
+ qs = qs.filter(port__exact=kwargs["port"])
else:
- if (kwargs.get('protocol')) and (kwargs['protocol'].lower() in SCHEME_PORT_MAP):
- qs = qs.filter(Q(port__isnull=True) | Q(port__exact=SCHEME_PORT_MAP[kwargs['protocol'].lower()]))
+ if (kwargs.get("protocol")) and (kwargs["protocol"].lower() in SCHEME_PORT_MAP):
+ qs = qs.filter(Q(port__isnull=True) | Q(port__exact=SCHEME_PORT_MAP[kwargs["protocol"].lower()]))
else:
qs = qs.filter(port__isnull=True)
- if kwargs.get('path'):
- qs = qs.filter(path__exact=kwargs['path'])
+ if kwargs.get("path"):
+ qs = qs.filter(path__exact=kwargs["path"])
else:
qs = qs.filter(path__isnull=True)
- if kwargs.get('query'):
- qs = qs.filter(query__exact=kwargs['query'])
+ if kwargs.get("query"):
+ qs = qs.filter(query__exact=kwargs["query"])
else:
qs = qs.filter(query__isnull=True)
- if kwargs.get('fragment'):
- qs = qs.filter(fragment__exact=kwargs['fragment'])
+ if kwargs.get("fragment"):
+ qs = qs.filter(fragment__exact=kwargs["fragment"])
else:
qs = qs.filter(fragment__isnull=True)
- if kwargs.get('product'):
- qs = qs.filter(product__exact=kwargs['product'])
- elif kwargs.get('product_id'):
- qs = qs.filter(product_id__exact=kwargs['product_id'])
+ if kwargs.get("product"):
+ qs = qs.filter(product__exact=kwargs["product"])
+ elif kwargs.get("product_id"):
+ qs = qs.filter(product_id__exact=kwargs["product_id"])
else:
qs = qs.filter(product__isnull=True)
@@ -92,38 +92,38 @@ def endpoint_get_or_create(**kwargs):
def clean_hosts_run(apps, change):
def err_log(message, html_log, endpoint_html_log, endpoint):
- error_suffix = 'It is not possible to migrate it. Delete or edit this endpoint.'
- html_log.append({**endpoint_html_log, 'message': message})
- logger.error(f'Endpoint (id={endpoint.pk}) {message}. {error_suffix}')
+ error_suffix = "It is not possible to migrate it. Delete or edit this endpoint."
+ html_log.append({**endpoint_html_log, "message": message})
+ logger.error(f"Endpoint (id={endpoint.pk}) {message}. {error_suffix}")
broken_endpoints.add(endpoint.pk)
html_log = []
broken_endpoints = set()
- Endpoint_model = apps.get_model('dojo', 'Endpoint')
- Endpoint_Status_model = apps.get_model('dojo', 'Endpoint_Status')
- Product_model = apps.get_model('dojo', 'Product')
- for endpoint in Endpoint_model.objects.order_by('id'):
+ Endpoint_model = apps.get_model("dojo", "Endpoint")
+ Endpoint_Status_model = apps.get_model("dojo", "Endpoint_Status")
+ Product_model = apps.get_model("dojo", "Product")
+ for endpoint in Endpoint_model.objects.order_by("id"):
endpoint_html_log = {
- 'view': reverse('view_endpoint', args=[endpoint.pk]),
- 'edit': reverse('edit_endpoint', args=[endpoint.pk]),
- 'delete': reverse('delete_endpoint', args=[endpoint.pk]),
+ "view": reverse("view_endpoint", args=[endpoint.pk]),
+ "edit": reverse("edit_endpoint", args=[endpoint.pk]),
+ "delete": reverse("delete_endpoint", args=[endpoint.pk]),
}
if endpoint.host:
- if not re.match(r'^[A-Za-z][A-Za-z0-9\.\-\+]+$', endpoint.host): # is old host valid FQDN?
+ if not re.match(r"^[A-Za-z][A-Za-z0-9\.\-\+]+$", endpoint.host): # is old host valid FQDN?
try:
validate_ipv46_address(endpoint.host) # is old host valid IPv4/6?
except ValidationError:
try:
- if '://' in endpoint.host: # is the old host full uri?
+ if "://" in endpoint.host: # is the old host full uri?
parts = Endpoint.from_uri(endpoint.host)
# can raise exception if the old host is not valid URL
else:
- parts = Endpoint.from_uri('//' + endpoint.host)
+ parts = Endpoint.from_uri("//" + endpoint.host)
# can raise exception if there is no way to parse the old host
if parts.protocol:
if endpoint.protocol and (endpoint.protocol != parts.protocol):
- message = f'has defined protocol ({endpoint.protocol}) and it is not the same as protocol in host ' \
- f'({parts.protocol})'
+ message = f"has defined protocol ({endpoint.protocol}) and it is not the same as protocol in host " \
+ f"({parts.protocol})"
err_log(message, html_log, endpoint_html_log, endpoint)
else:
if change:
@@ -143,20 +143,20 @@ def err_log(message, html_log, endpoint_html_log, endpoint):
if parts.port:
try:
if (endpoint.port is not None) and (int(endpoint.port) != parts.port):
- message = f'has defined port number ({endpoint.port}) and it is not the same as port number in ' \
- f'host ({parts.port})'
+ message = f"has defined port number ({endpoint.port}) and it is not the same as port number in " \
+ f"host ({parts.port})"
err_log(message, html_log, endpoint_html_log, endpoint)
else:
if change:
endpoint.port = parts.port
except ValueError:
- message = f'uses non-numeric port: {endpoint.port}'
+ message = f"uses non-numeric port: {endpoint.port}"
err_log(message, html_log, endpoint_html_log, endpoint)
if parts.path:
if endpoint.path and (endpoint.path != parts.path):
- message = f'has defined path ({endpoint.path}) and it is not the same as path in host ' \
- f'({parts.path})'
+ message = f"has defined path ({endpoint.path}) and it is not the same as path in host " \
+ f"({parts.path})"
err_log(message, html_log, endpoint_html_log, endpoint)
else:
if change:
@@ -164,8 +164,8 @@ def err_log(message, html_log, endpoint_html_log, endpoint):
if parts.query:
if endpoint.query and (endpoint.query != parts.query):
- message = f'has defined query ({endpoint.query}) and it is not the same as query in host ' \
- f'({parts.query})'
+ message = f"has defined query ({endpoint.query}) and it is not the same as query in host " \
+ f"({parts.query})"
err_log(message, html_log, endpoint_html_log, endpoint)
else:
if change:
@@ -173,8 +173,8 @@ def err_log(message, html_log, endpoint_html_log, endpoint):
if parts.fragment:
if endpoint.fragment and (endpoint.fragment != parts.fragment):
- message = f'has defined fragment ({endpoint.fragment}) and it is not the same as fragment in host ' \
- f'({parts.fragment})'
+ message = f"has defined fragment ({endpoint.fragment}) and it is not the same as fragment in host " \
+ f"({parts.fragment})"
err_log(message, html_log, endpoint_html_log, endpoint)
else:
if change:
@@ -196,13 +196,13 @@ def err_log(message, html_log, endpoint_html_log, endpoint):
err_log(ve, html_log, endpoint_html_log, endpoint)
if not endpoint.product:
- err_log('Missing product', html_log, endpoint_html_log, endpoint)
+ err_log("Missing product", html_log, endpoint_html_log, endpoint)
if broken_endpoints:
- logger.error(f'It is not possible to migrate database because there is/are {len(broken_endpoints)} broken endpoint(s). '
- 'Please check logs.')
+ logger.error(f"It is not possible to migrate database because there is/are {len(broken_endpoints)} broken endpoint(s). "
+ "Please check logs.")
else:
- logger.info('There is not broken endpoint.')
+ logger.info("There is not broken endpoint.")
to_be_deleted = set()
for product in Product_model.objects.all().distinct():
@@ -218,7 +218,7 @@ def err_log(message, html_log, endpoint_html_log, endpoint):
query=endpoint.query,
fragment=endpoint.fragment,
product_id=product.pk if product else None,
- ).order_by('id')
+ ).order_by("id")
if ep.count() > 1:
ep_ids = [x.id for x in ep]
@@ -234,13 +234,13 @@ def err_log(message, html_log, endpoint_html_log, endpoint):
.update(endpoint=ep_ids[0])
epss = Endpoint_Status_model.objects\
.filter(endpoint=ep_ids[0])\
- .values('finding')\
- .annotate(total=Count('id'))\
+ .values("finding")\
+ .annotate(total=Count("id"))\
.filter(total__gt=1)
for eps in epss:
esm = Endpoint_Status_model.objects\
- .filter(finding=eps['finding'])\
- .order_by('-last_modified')
+ .filter(finding=eps["finding"])\
+ .order_by("-last_modified")
message = "Endpoint Statuses {} will be replaced by '{}'".format(
[f"last_modified: {x.last_modified} (id={x.pk})" for x in esm[1:]],
f"last_modified: {esm[0].last_modified} (id={esm[0].pk})")
@@ -266,12 +266,12 @@ def validate_endpoints_to_add(endpoints_to_add):
endpoints = endpoints_to_add.split()
for endpoint in endpoints:
try:
- if '://' in endpoint: # is it full uri?
+ if "://" in endpoint: # is it full uri?
endpoint_ins = Endpoint.from_uri(endpoint) # from_uri validate URI format + split to components
else:
# from_uri parse any '//localhost', '//127.0.0.1:80', '//foo.bar/path' correctly
# format doesn't follow RFC 3986 but users use it
- endpoint_ins = Endpoint.from_uri('//' + endpoint)
+ endpoint_ins = Endpoint.from_uri("//" + endpoint)
endpoint_ins.clean()
endpoint_list.append([
endpoint_ins.protocol,
@@ -307,32 +307,32 @@ def save_endpoints_to_add(endpoint_list, product):
return processed_endpoints
-def endpoint_meta_import(file, product, create_endpoints, create_tags, create_meta, origin='UI', request=None):
+def endpoint_meta_import(file, product, create_endpoints, create_tags, create_meta, origin="UI", request=None):
content = file.read()
- sig = content.decode('utf-8-sig')
+ sig = content.decode("utf-8-sig")
content = sig.encode("utf-8")
if isinstance(content, bytes):
- content = content.decode('utf-8')
+ content = content.decode("utf-8")
reader = csv.DictReader(io.StringIO(content))
- if 'hostname' not in reader.fieldnames:
- if origin == 'UI':
+ if "hostname" not in reader.fieldnames:
+ if origin == "UI":
messages.add_message(
request,
messages.ERROR,
'The column "hostname" must be present to map host to Endpoint.',
- extra_tags='alert-danger')
- return HttpResponseRedirect(reverse('import_endpoint_meta', args=(product.id, )))
- elif origin == 'API':
+ extra_tags="alert-danger")
+ return HttpResponseRedirect(reverse("import_endpoint_meta", args=(product.id, )))
+ elif origin == "API":
msg = 'The column "hostname" must be present to map host to Endpoint.'
raise ValidationError(msg)
- keys = [key for key in reader.fieldnames if key != 'hostname']
+ keys = [key for key in reader.fieldnames if key != "hostname"]
for row in reader:
meta = []
endpoint = None
- host = row.get('hostname', None)
+ host = row.get("hostname", None)
if not host:
continue
@@ -363,18 +363,18 @@ def endpoint_meta_import(file, product, create_endpoints, create_tags, create_me
# found existing. Update it
existing_tags.remove(tag)
break
- existing_tags += [item[0] + ':' + item[1]]
+ existing_tags += [item[0] + ":" + item[1]]
# if tags are not supposed to be added, this value remain unchanged
endpoint.tags = existing_tags
endpoint.save()
def remove_broken_endpoint_statuses(apps):
- Endpoint_Status = apps.get_model('dojo', 'endpoint_status')
+ Endpoint_Status = apps.get_model("dojo", "endpoint_status")
broken_eps = Endpoint_Status.objects.filter(Q(endpoint=None) | Q(finding=None))
if broken_eps.count() == 0:
- logger.info('There is no broken endpoint_status')
+ logger.info("There is no broken endpoint_status")
else:
- logger.warning('We identified %s broken endpoint_statuses', broken_eps.count())
+ logger.warning("We identified %s broken endpoint_statuses", broken_eps.count())
deleted = broken_eps.delete()
- logger.warning('We removed: %s', deleted)
+ logger.warning("We removed: %s", deleted)
diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py
index b6f838d793..c392f37c19 100644
--- a/dojo/endpoint/views.py
+++ b/dojo/endpoint/views.py
@@ -47,7 +47,7 @@ def process_endpoints_view(request, host_view=False, vulnerable=False):
else:
endpoints = Endpoint.objects.all()
- endpoints = endpoints.prefetch_related('product', 'product__tags', 'tags').distinct()
+ endpoints = endpoints.prefetch_related("product", "product__tags", "tags").distinct()
endpoints = get_authorized_endpoints(Permissions.Endpoint_View, endpoints, request.user)
filter_string_matching = get_system_setting("filter_string_matching", False)
filter_class = EndpointFilterWithoutObjectLookups if filter_string_matching else EndpointFilter
@@ -72,16 +72,16 @@ def process_endpoints_view(request, host_view=False, vulnerable=False):
add_breadcrumb(title=view_name, top_level=not len(request.GET), request=request)
product_tab = None
- if 'product' in request.GET:
- p = request.GET.getlist('product', [])
+ if "product" in request.GET:
+ p = request.GET.getlist("product", [])
if len(p) == 1:
product = get_object_or_404(Product, id=p[0])
user_has_permission_or_403(request.user, product, Permissions.Product_View)
product_tab = Product_Tab(product, view_name, tab="endpoints")
return render(
- request, 'dojo/endpoints.html', {
- 'product_tab': product_tab,
+ request, "dojo/endpoints.html", {
+ "product_tab": product_tab,
"endpoints": paged_endpoints,
"filtered": endpoints,
"name": view_name,
@@ -128,7 +128,7 @@ def process_endpoint_view(request, eid, host_view=False):
active_findings = endpoint.host_active_findings()
else:
endpoints = None
- endpoint_metadata = dict(endpoint.endpoint_meta.values_list('name', 'value'))
+ endpoint_metadata = dict(endpoint.endpoint_meta.values_list("name", "value"))
all_findings = endpoint.findings.all()
active_findings = endpoint.active_findings()
@@ -147,7 +147,7 @@ def process_endpoint_view(request, eid, host_view=False):
closed_findings = Finding.objects.none()
monthly_counts = get_period_counts(all_findings, closed_findings, None, months_between, start_date,
- relative_delta='months')
+ relative_delta="months")
paged_findings = get_page_items(request, active_findings, 25)
vulnerable = active_findings.count() != 0
@@ -156,41 +156,41 @@ def process_endpoint_view(request, eid, host_view=False):
return render(request,
"dojo/view_endpoint.html",
{"endpoint": endpoint,
- 'product_tab': product_tab,
+ "product_tab": product_tab,
"endpoints": endpoints,
"findings": paged_findings,
- 'all_findings': all_findings,
- 'opened_per_month': monthly_counts['opened_per_period'],
- 'endpoint_metadata': endpoint_metadata,
- 'vulnerable': vulnerable,
- 'host_view': host_view,
+ "all_findings": all_findings,
+ "opened_per_month": monthly_counts["opened_per_period"],
+ "endpoint_metadata": endpoint_metadata,
+ "vulnerable": vulnerable,
+ "host_view": host_view,
})
-@user_is_authorized(Endpoint, Permissions.Endpoint_View, 'eid')
+@user_is_authorized(Endpoint, Permissions.Endpoint_View, "eid")
def view_endpoint(request, eid):
return process_endpoint_view(request, eid, host_view=False)
-@user_is_authorized(Endpoint, Permissions.Endpoint_View, 'eid')
+@user_is_authorized(Endpoint, Permissions.Endpoint_View, "eid")
def view_endpoint_host(request, eid):
return process_endpoint_view(request, eid, host_view=True)
-@user_is_authorized(Endpoint, Permissions.Endpoint_View, 'eid')
+@user_is_authorized(Endpoint, Permissions.Endpoint_View, "eid")
def edit_endpoint(request, eid):
endpoint = get_object_or_404(Endpoint, id=eid)
- if request.method == 'POST':
+ if request.method == "POST":
form = EditEndpointForm(request.POST, instance=endpoint)
if form.is_valid():
- logger.debug('saving endpoint')
+ logger.debug("saving endpoint")
endpoint = form.save()
messages.add_message(request,
messages.SUCCESS,
- 'Endpoint updated successfully.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('view_endpoint', args=(endpoint.id,)))
+ "Endpoint updated successfully.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("view_endpoint", args=(endpoint.id,)))
else:
add_breadcrumb(parent=endpoint, title="Edit", top_level=False, request=request)
form = EditEndpointForm(instance=endpoint)
@@ -200,28 +200,28 @@ def edit_endpoint(request, eid):
return render(request,
"dojo/edit_endpoint.html",
{"endpoint": endpoint,
- 'product_tab': product_tab,
+ "product_tab": product_tab,
"form": form,
})
-@user_is_authorized(Endpoint, Permissions.Endpoint_Delete, 'eid')
+@user_is_authorized(Endpoint, Permissions.Endpoint_Delete, "eid")
def delete_endpoint(request, eid):
endpoint = get_object_or_404(Endpoint, pk=eid)
product = endpoint.product
form = DeleteEndpointForm(instance=endpoint)
- if request.method == 'POST':
- if 'id' in request.POST and str(endpoint.id) == request.POST['id']:
+ if request.method == "POST":
+ if "id" in request.POST and str(endpoint.id) == request.POST["id"]:
form = DeleteEndpointForm(request.POST, instance=endpoint)
if form.is_valid():
product = endpoint.product
endpoint.delete()
messages.add_message(request,
messages.SUCCESS,
- 'Endpoint and relationships removed.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('view_product', args=(product.id,)))
+ "Endpoint and relationships removed.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("view_product", args=(product.id,)))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([endpoint])
@@ -229,135 +229,135 @@ def delete_endpoint(request, eid):
product_tab = Product_Tab(endpoint.product, "Delete Endpoint", tab="endpoints")
- return render(request, 'dojo/delete_endpoint.html',
- {'endpoint': endpoint,
- 'product_tab': product_tab,
- 'form': form,
- 'rels': rels,
+ return render(request, "dojo/delete_endpoint.html",
+ {"endpoint": endpoint,
+ "product_tab": product_tab,
+ "form": form,
+ "rels": rels,
})
-@user_is_authorized(Product, Permissions.Endpoint_Add, 'pid')
+@user_is_authorized(Product, Permissions.Endpoint_Add, "pid")
def add_endpoint(request, pid):
product = get_object_or_404(Product, id=pid)
- template = 'dojo/add_endpoint.html'
+ template = "dojo/add_endpoint.html"
form = AddEndpointForm(product=product)
- if request.method == 'POST':
+ if request.method == "POST":
form = AddEndpointForm(request.POST, product=product)
if form.is_valid():
endpoints = form.save()
- tags = request.POST.get('tags')
+ tags = request.POST.get("tags")
for e in endpoints:
e.tags = tags
e.save()
messages.add_message(request,
messages.SUCCESS,
- 'Endpoint added successfully.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('endpoint') + "?product=" + pid)
+ "Endpoint added successfully.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("endpoint") + "?product=" + pid)
product_tab = Product_Tab(product, "Add Endpoint", tab="endpoints")
return render(request, template, {
- 'product_tab': product_tab,
- 'name': 'Add Endpoint',
- 'form': form})
+ "product_tab": product_tab,
+ "name": "Add Endpoint",
+ "form": form})
def add_product_endpoint(request):
form = AddEndpointForm()
- if request.method == 'POST':
+ if request.method == "POST":
form = AddEndpointForm(request.POST)
if form.is_valid():
user_has_permission_or_403(request.user, form.product, Permissions.Endpoint_Add)
endpoints = form.save()
- tags = request.POST.get('tags')
+ tags = request.POST.get("tags")
for e in endpoints:
e.tags = tags
e.save()
messages.add_message(request,
messages.SUCCESS,
- 'Endpoint added successfully.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('endpoint') + f"?product={form.product.id}")
+ "Endpoint added successfully.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("endpoint") + f"?product={form.product.id}")
add_breadcrumb(title="Add Endpoint", top_level=False, request=request)
return render(request,
- 'dojo/add_endpoint.html',
- {'name': 'Add Endpoint',
- 'form': form,
+ "dojo/add_endpoint.html",
+ {"name": "Add Endpoint",
+ "form": form,
})
-@user_is_authorized(Endpoint, Permissions.Endpoint_Edit, 'eid')
+@user_is_authorized(Endpoint, Permissions.Endpoint_Edit, "eid")
def add_meta_data(request, eid):
endpoint = Endpoint.objects.get(id=eid)
- if request.method == 'POST':
+ if request.method == "POST":
form = DojoMetaDataForm(request.POST, instance=DojoMeta(endpoint=endpoint))
if form.is_valid():
form.save()
messages.add_message(request,
messages.SUCCESS,
- 'Metadata added successfully.',
- extra_tags='alert-success')
- if 'add_another' in request.POST:
- return HttpResponseRedirect(reverse('add_endpoint_meta_data', args=(eid,)))
+ "Metadata added successfully.",
+ extra_tags="alert-success")
+ if "add_another" in request.POST:
+ return HttpResponseRedirect(reverse("add_endpoint_meta_data", args=(eid,)))
else:
- return HttpResponseRedirect(reverse('view_endpoint', args=(eid,)))
+ return HttpResponseRedirect(reverse("view_endpoint", args=(eid,)))
else:
form = DojoMetaDataForm()
add_breadcrumb(parent=endpoint, title="Add Metadata", top_level=False, request=request)
product_tab = Product_Tab(endpoint.product, "Add Metadata", tab="endpoints")
return render(request,
- 'dojo/add_endpoint_meta_data.html',
- {'form': form,
- 'product_tab': product_tab,
- 'endpoint': endpoint,
+ "dojo/add_endpoint_meta_data.html",
+ {"form": form,
+ "product_tab": product_tab,
+ "endpoint": endpoint,
})
-@user_is_authorized(Endpoint, Permissions.Endpoint_Edit, 'eid')
+@user_is_authorized(Endpoint, Permissions.Endpoint_Edit, "eid")
def edit_meta_data(request, eid):
endpoint = Endpoint.objects.get(id=eid)
- if request.method == 'POST':
+ if request.method == "POST":
for key, value in request.POST.items():
- if key.startswith('cfv_'):
- cfv_id = int(key.split('_')[1])
+ if key.startswith("cfv_"):
+ cfv_id = int(key.split("_")[1])
cfv = get_object_or_404(DojoMeta, id=cfv_id)
value = value.strip()
if value:
cfv.value = value
cfv.save()
- if key.startswith('delete_'):
- cfv_id = int(key.split('_')[2])
+ if key.startswith("delete_"):
+ cfv_id = int(key.split("_")[2])
cfv = get_object_or_404(DojoMeta, id=cfv_id)
cfv.delete()
messages.add_message(request,
messages.SUCCESS,
- 'Metadata edited successfully.',
- extra_tags='alert-success')
- return HttpResponseRedirect(reverse('view_endpoint', args=(eid,)))
+ "Metadata edited successfully.",
+ extra_tags="alert-success")
+ return HttpResponseRedirect(reverse("view_endpoint", args=(eid,)))
product_tab = Product_Tab(endpoint.product, "Edit Metadata", tab="endpoints")
return render(request,
- 'dojo/edit_endpoint_meta_data.html',
- {'endpoint': endpoint,
- 'product_tab': product_tab,
+ "dojo/edit_endpoint_meta_data.html",
+ {"endpoint": endpoint,
+ "product_tab": product_tab,
})
# bulk mitigate and delete are combined, so we can't have the nice user_is_authorized decorator
def endpoint_bulk_update_all(request, pid=None):
if request.method == "POST":
- endpoints_to_update = request.POST.getlist('endpoints_to_update')
+ endpoints_to_update = request.POST.getlist("endpoints_to_update")
endpoints = Endpoint.objects.filter(id__in=endpoints_to_update).order_by("endpoint_meta__product__id")
total_endpoint_count = endpoints.count()
- if request.POST.get('delete_bulk_endpoints') and endpoints_to_update:
+ if request.POST.get("delete_bulk_endpoints") and endpoints_to_update:
if pid is not None:
product = get_object_or_404(Product, id=pid)
@@ -374,13 +374,13 @@ def endpoint_bulk_update_all(request, pid=None):
calculate_grade(prod)
if skipped_endpoint_count > 0:
- add_error_message_to_response(f'Skipped deletion of {skipped_endpoint_count} endpoints because you are not authorized.')
+ add_error_message_to_response(f"Skipped deletion of {skipped_endpoint_count} endpoints because you are not authorized.")
if deleted_endpoint_count > 0:
messages.add_message(request,
messages.SUCCESS,
- f'Bulk delete of {deleted_endpoint_count} endpoints was successful.',
- extra_tags='alert-success')
+ f"Bulk delete of {deleted_endpoint_count} endpoints was successful.",
+ extra_tags="alert-success")
else:
if endpoints_to_update:
@@ -394,7 +394,7 @@ def endpoint_bulk_update_all(request, pid=None):
updated_endpoint_count = endpoints.count()
if skipped_endpoint_count > 0:
- add_error_message_to_response(f'Skipped mitigation of {skipped_endpoint_count} endpoints because you are not authorized.')
+ add_error_message_to_response(f"Skipped mitigation of {skipped_endpoint_count} endpoints because you are not authorized.")
eps_count = Endpoint_Status.objects.filter(endpoint__in=endpoints).update(
mitigated=True,
@@ -406,22 +406,22 @@ def endpoint_bulk_update_all(request, pid=None):
if updated_endpoint_count > 0:
messages.add_message(request,
messages.SUCCESS,
- f'Bulk mitigation of {updated_endpoint_count} endpoints ({eps_count} endpoint statuses) was successful.',
- extra_tags='alert-success')
+ f"Bulk mitigation of {updated_endpoint_count} endpoints ({eps_count} endpoint statuses) was successful.",
+ extra_tags="alert-success")
else:
messages.add_message(request,
messages.ERROR,
- 'Unable to process bulk update. Required fields were not selected.',
- extra_tags='alert-danger')
- return HttpResponseRedirect(reverse('endpoint', args=()))
+ "Unable to process bulk update. Required fields were not selected.",
+ extra_tags="alert-danger")
+ return HttpResponseRedirect(reverse("endpoint", args=()))
-@user_is_authorized(Finding, Permissions.Finding_Edit, 'fid')
+@user_is_authorized(Finding, Permissions.Finding_Edit, "fid")
def endpoint_status_bulk_update(request, fid):
if request.method == "POST":
post = request.POST
- endpoints_to_update = post.getlist('endpoints_to_update')
- status_list = ['active', 'false_positive', 'mitigated', 'out_of_scope', 'risk_accepted']
+ endpoints_to_update = post.getlist("endpoints_to_update")
+ status_list = ["active", "false_positive", "mitigated", "out_of_scope", "risk_accepted"]
enable = [item for item in status_list if item in list(post.keys())]
if endpoints_to_update and len(enable) > 0:
@@ -433,7 +433,7 @@ def endpoint_status_bulk_update(request, fid):
for status in status_list:
if status in enable:
endpoint_status.__setattr__(status, True)
- if status == 'mitigated':
+ if status == "mitigated":
endpoint_status.mitigated_by = request.user
endpoint_status.mitigated_time = timezone.now()
else:
@@ -442,70 +442,70 @@ def endpoint_status_bulk_update(request, fid):
endpoint_status.save()
messages.add_message(request,
messages.SUCCESS,
- 'Bulk edit of endpoints was successful. Check to make sure it is what you intended.',
- extra_tags='alert-success')
+ "Bulk edit of endpoints was successful. Check to make sure it is what you intended.",
+ extra_tags="alert-success")
else:
messages.add_message(request,
messages.ERROR,
- 'Unable to process bulk update. Required fields were not selected.',
- extra_tags='alert-danger')
- return redirect(request, post['return_url'])
+ "Unable to process bulk update. Required fields were not selected.",
+ extra_tags="alert-danger")
+ return redirect(request, post["return_url"])
def prefetch_for_endpoints(endpoints):
if isinstance(endpoints, QuerySet):
- endpoints = endpoints.prefetch_related('product', 'tags', 'product__tags')
- endpoints = endpoints.annotate(active_finding_count=Count('finding__id', filter=Q(finding__active=True)))
+ endpoints = endpoints.prefetch_related("product", "tags", "product__tags")
+ endpoints = endpoints.annotate(active_finding_count=Count("finding__id", filter=Q(finding__active=True)))
else:
- logger.debug('unable to prefetch because query was already executed')
+ logger.debug("unable to prefetch because query was already executed")
return endpoints
def migrate_endpoints_view(request):
- view_name = 'Migrate endpoints'
+ view_name = "Migrate endpoints"
- html_log = clean_hosts_run(apps=apps, change=(request.method == 'POST'))
+ html_log = clean_hosts_run(apps=apps, change=(request.method == "POST"))
return render(
- request, 'dojo/migrate_endpoints.html', {
- 'product_tab': None,
+ request, "dojo/migrate_endpoints.html", {
+ "product_tab": None,
"name": view_name,
"html_log": html_log,
})
-@user_is_authorized(Product, Permissions.Endpoint_Edit, 'pid')
+@user_is_authorized(Product, Permissions.Endpoint_Edit, "pid")
def import_endpoint_meta(request, pid):
product = get_object_or_404(Product, id=pid)
form = ImportEndpointMetaForm()
- if request.method == 'POST':
+ if request.method == "POST":
form = ImportEndpointMetaForm(request.POST, request.FILES)
if form.is_valid():
- file = request.FILES.get('file', None)
+ file = request.FILES.get("file", None)
# Make sure size is not too large
if file and is_scan_file_too_large(file):
messages.add_message(
request,
messages.ERROR,
f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB",
- extra_tags='alert-danger')
+ extra_tags="alert-danger")
- create_endpoints = form.cleaned_data['create_endpoints']
- create_tags = form.cleaned_data['create_tags']
- create_dojo_meta = form.cleaned_data['create_dojo_meta']
+ create_endpoints = form.cleaned_data["create_endpoints"]
+ create_tags = form.cleaned_data["create_tags"]
+ create_dojo_meta = form.cleaned_data["create_dojo_meta"]
try:
- endpoint_meta_import(file, product, create_endpoints, create_tags, create_dojo_meta, origin='UI', request=request)
+ endpoint_meta_import(file, product, create_endpoints, create_tags, create_dojo_meta, origin="UI", request=request)
except Exception as e:
logger.exception(e)
- add_error_message_to_response(f'An exception error occurred during the report import:{str(e)}')
- return HttpResponseRedirect(reverse('endpoint') + "?product=" + pid)
+ add_error_message_to_response(f"An exception error occurred during the report import:{str(e)}")
+ return HttpResponseRedirect(reverse("endpoint") + "?product=" + pid)
add_breadcrumb(title="Endpoint Meta Importer", top_level=False, request=request)
product_tab = Product_Tab(product, title="Endpoint Meta Importer", tab="endpoints")
- return render(request, 'dojo/endpoint_meta_importer.html', {
- 'product_tab': product_tab,
- 'form': form,
+ return render(request, "dojo/endpoint_meta_importer.html", {
+ "product_tab": product_tab,
+ "form": form,
})
diff --git a/dojo/engagement/queries.py b/dojo/engagement/queries.py
index ea116b5745..9d8e9b6ae4 100644
--- a/dojo/engagement/queries.py
+++ b/dojo/engagement/queries.py
@@ -19,19 +19,19 @@ def get_authorized_engagements(permission):
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
- product_type=OuterRef('product__prod_type_id'),
+ product_type=OuterRef("product__prod_type_id"),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
- product=OuterRef('product_id'),
+ product=OuterRef("product_id"),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
- product_type=OuterRef('product__prod_type_id'),
+ product_type=OuterRef("product__prod_type_id"),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
- product=OuterRef('product_id'),
+ product=OuterRef("product_id"),
group__users=user,
role__in=roles)
engagements = Engagement.objects.annotate(
diff --git a/dojo/engagement/services.py b/dojo/engagement/services.py
index 0331e87c5b..f11963867a 100644
--- a/dojo/engagement/services.py
+++ b/dojo/engagement/services.py
@@ -12,7 +12,7 @@
def close_engagement(eng):
eng.active = False
- eng.status = 'Completed'
+ eng.status = "Completed"
eng.save()
if jira_helper.get_jira_project(eng):
@@ -21,7 +21,7 @@ def close_engagement(eng):
def reopen_engagement(eng):
eng.active = True
- eng.status = 'In Progress'
+ eng.status = "In Progress"
eng.save()
diff --git a/dojo/engagement/signals.py b/dojo/engagement/signals.py
index 7a8e3352ba..3c5266fda0 100644
--- a/dojo/engagement/signals.py
+++ b/dojo/engagement/signals.py
@@ -13,9 +13,9 @@
@receiver(post_save, sender=Engagement)
def engagement_post_save(sender, instance, created, **kwargs):
if created:
- title = _('Engagement created for "%(product)s": %(name)s') % {'product': instance.product, 'name': instance.name}
- create_notification(event='engagement_added', title=title, engagement=instance, product=instance.product,
- url=reverse('view_engagement', args=(instance.id,)))
+ title = _('Engagement created for "%(product)s": %(name)s') % {"product": instance.product, "name": instance.name}
+ create_notification(event="engagement_added", title=title, engagement=instance, product=instance.product,
+ url=reverse("view_engagement", args=(instance.id,)))
@receiver(pre_save, sender=Engagement)
@@ -23,16 +23,16 @@ def engagement_pre_save(sender, instance, **kwargs):
old = sender.objects.filter(pk=instance.pk).first()
if old and instance.status != old.status:
if instance.status in ["Cancelled", "Completed"]:
- create_notification(event='engagement_closed',
- title=_('Closure of %s') % instance.name,
+ create_notification(event="engagement_closed",
+ title=_("Closure of %s") % instance.name,
description=_('The engagement "%s" was closed') % (instance.name),
- engagement=instance, url=reverse('engagement_all_findings', args=(instance.id, )))
+ engagement=instance, url=reverse("engagement_all_findings", args=(instance.id, )))
elif instance.status in ["In Progress"] and old.status not in ["Not Started"]:
- create_notification(event='engagement_reopened',
- title=_('Reopening of %s') % instance.name,
+ create_notification(event="engagement_reopened",
+ title=_("Reopening of %s") % instance.name,
engagement=instance,
description=_('The engagement "%s" was reopened') % (instance.name),
- url=reverse('view_engagement', args=(instance.id, )))
+ url=reverse("view_engagement", args=(instance.id, )))
@receiver(post_delete, sender=Engagement)
@@ -41,17 +41,17 @@ def engagement_post_delete(sender, instance, using, origin, **kwargs):
if settings.ENABLE_AUDITLOG:
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
- content_type=ContentType.objects.get(app_label='dojo', model='engagement'),
+ content_type=ContentType.objects.get(app_label="dojo", model="engagement"),
object_id=instance.id,
)
description = _('The engagement "%(name)s" was deleted by %(user)s') % {
- 'name': instance.name, 'user': le.actor}
+ "name": instance.name, "user": le.actor}
else:
- description = _('The engagement "%(name)s" was deleted') % {'name': instance.name}
- create_notification(event='engagement_deleted', # template does not exists, it will default to "other" but this event name needs to stay because of unit testing
- title=_('Deletion of %(name)s') % {'name': instance.name},
+ description = _('The engagement "%(name)s" was deleted') % {"name": instance.name}
+ create_notification(event="engagement_deleted", # template does not exists, it will default to "other" but this event name needs to stay because of unit testing
+ title=_("Deletion of %(name)s") % {"name": instance.name},
description=description,
product=instance.product,
- url=reverse('view_product', args=(instance.product.id, )),
+ url=reverse("view_product", args=(instance.product.id, )),
recipients=[instance.lead],
icon="exclamation-triangle")
diff --git a/dojo/engagement/urls.py b/dojo/engagement/urls.py
index df0a7f5af2..c70bb56a95 100644
--- a/dojo/engagement/urls.py
+++ b/dojo/engagement/urls.py
@@ -4,56 +4,56 @@
urlpatterns = [
# engagements and calendar
- re_path(r'^calendar$', views.engagement_calendar, name='calendar'),
- re_path(r'^calendar/engagements$', views.engagement_calendar, name='engagement_calendar'),
- re_path(r'^engagement$', views.engagements, {'view': 'active'}, name='engagement'),
- re_path(r'^engagements_all$', views.engagements_all, name='engagements_all'),
- re_path(r'^engagement/all$', views.engagements, {'view': 'all'}, name='all_engagements'),
- re_path(r'^engagement/active$', views.engagements, {'view': 'active'}, name='active_engagements'),
- re_path(r'^engagement/(?P\d+)$', views.ViewEngagement.as_view(),
- name='view_engagement'),
- re_path(r'^engagement/(?P\d+)/ics$', views.engagement_ics,
- name='engagement_ics'),
- re_path(r'^engagement/(?P\d+)/edit$', views.edit_engagement,
- name='edit_engagement'),
- re_path(r'^engagement/(?P\d+)/delete$', views.delete_engagement,
- name='delete_engagement'),
- re_path(r'^engagement/(?P\d+)/copy$', views.copy_engagement,
- name='copy_engagement'),
- re_path(r'^engagement/(?P\d+)/add_tests$', views.add_tests,
- name='add_tests'),
+ re_path(r"^calendar$", views.engagement_calendar, name="calendar"),
+ re_path(r"^calendar/engagements$", views.engagement_calendar, name="engagement_calendar"),
+ re_path(r"^engagement$", views.engagements, {"view": "active"}, name="engagement"),
+ re_path(r"^engagements_all$", views.engagements_all, name="engagements_all"),
+ re_path(r"^engagement/all$", views.engagements, {"view": "all"}, name="all_engagements"),
+ re_path(r"^engagement/active$", views.engagements, {"view": "active"}, name="active_engagements"),
+ re_path(r"^engagement/(?P\d+)$", views.ViewEngagement.as_view(),
+ name="view_engagement"),
+ re_path(r"^engagement/(?P\d+)/ics$", views.engagement_ics,
+ name="engagement_ics"),
+ re_path(r"^engagement/(?P\d+)/edit$", views.edit_engagement,
+ name="edit_engagement"),
+ re_path(r"^engagement/(?P\d+)/delete$", views.delete_engagement,
+ name="delete_engagement"),
+ re_path(r"^engagement/(?P\d+)/copy$", views.copy_engagement,
+ name="copy_engagement"),
+ re_path(r"^engagement/(?P\d+)/add_tests$", views.add_tests,
+ name="add_tests"),
re_path(
- r'^engagement/(?P\d+)/import_scan_results$',
+ r"^engagement/(?P\d+)/import_scan_results$",
views.ImportScanResultsView.as_view(),
- name='import_scan_results'),
- re_path(r'^engagement/(?P\d+)/close$', views.close_eng,
- name='close_engagement'),
- re_path(r'^engagement/(?P\d+)/reopen$', views.reopen_eng,
- name='reopen_engagement'),
- re_path(r'^engagement/(?P\d+)/complete_checklist$',
- views.complete_checklist, name='complete_checklist'),
- re_path(r'^engagement/(?P\d+)/risk_acceptance/add$',
- views.add_risk_acceptance, name='add_risk_acceptance'),
- re_path(r'^engagement/(?P\d+)/risk_acceptance/add/(?P\d+)$',
- views.add_risk_acceptance, name='add_risk_acceptance'),
- re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)$',
- views.view_risk_acceptance, name='view_risk_acceptance'),
- re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/edit$',
- views.edit_risk_acceptance, name='edit_risk_acceptance'),
- re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/expire$',
- views.expire_risk_acceptance, name='expire_risk_acceptance'),
- re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/reinstate$',
- views.reinstate_risk_acceptance, name='reinstate_risk_acceptance'),
- re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/delete$',
- views.delete_risk_acceptance, name='delete_risk_acceptance'),
- re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/download$',
- views.download_risk_acceptance, name='download_risk_acceptance'),
- re_path(r'^engagement/(?P\d+)/threatmodel$', views.view_threatmodel,
- name='view_threatmodel'),
- re_path(r'^engagement/(?P\d+)/threatmodel/upload$',
- views.upload_threatmodel, name='upload_threatmodel'),
- re_path(r'^engagement/csv_export$',
- views.csv_export, name='engagement_csv_export'),
- re_path(r'^engagement/excel_export$',
- views.excel_export, name='engagement_excel_export'),
+ name="import_scan_results"),
+ re_path(r"^engagement/(?P\d+)/close$", views.close_eng,
+ name="close_engagement"),
+ re_path(r"^engagement/(?P\d+)/reopen$", views.reopen_eng,
+ name="reopen_engagement"),
+ re_path(r"^engagement/(?P\d+)/complete_checklist$",
+ views.complete_checklist, name="complete_checklist"),
+ re_path(r"^engagement/(?P\d+)/risk_acceptance/add$",
+ views.add_risk_acceptance, name="add_risk_acceptance"),
+ re_path(r"^engagement/(?P\d+)/risk_acceptance/add/(?P\d+)$",
+ views.add_risk_acceptance, name="add_risk_acceptance"),
+ re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)$",
+ views.view_risk_acceptance, name="view_risk_acceptance"),
+ re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/edit$",
+ views.edit_risk_acceptance, name="edit_risk_acceptance"),
+ re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/expire$",
+ views.expire_risk_acceptance, name="expire_risk_acceptance"),
+ re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/reinstate$",
+ views.reinstate_risk_acceptance, name="reinstate_risk_acceptance"),
+ re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/delete$",
+ views.delete_risk_acceptance, name="delete_risk_acceptance"),
+ re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/download$",
+ views.download_risk_acceptance, name="download_risk_acceptance"),
+ re_path(r"^engagement/(?P\d+)/threatmodel$", views.view_threatmodel,
+ name="view_threatmodel"),
+ re_path(r"^engagement/(?P\d+)/threatmodel/upload$",
+ views.upload_threatmodel, name="upload_threatmodel"),
+ re_path(r"^engagement/csv_export$",
+ views.csv_export, name="engagement_csv_export"),
+ re_path(r"^engagement/excel_export$",
+ views.excel_export, name="engagement_excel_export"),
]
diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py
index f28a0863fb..a6e47589d0 100644
--- a/dojo/engagement/views.py
+++ b/dojo/engagement/views.py
@@ -113,52 +113,52 @@
@vary_on_cookie
def engagement_calendar(request):
- if not get_system_setting('enable_calendar'):
+ if not get_system_setting("enable_calendar"):
raise Resolver404
- if 'lead' not in request.GET or '0' in request.GET.getlist('lead'):
+ if "lead" not in request.GET or "0" in request.GET.getlist("lead"):
engagements = get_authorized_engagements(Permissions.Engagement_View)
else:
filters = []
- leads = request.GET.getlist('lead', '')
- if '-1' in request.GET.getlist('lead'):
- leads.remove('-1')
+ leads = request.GET.getlist("lead", "")
+ if "-1" in request.GET.getlist("lead"):
+ leads.remove("-1")
filters.append(Q(lead__isnull=True))
filters.append(Q(lead__in=leads))
engagements = get_authorized_engagements(Permissions.Engagement_View).filter(reduce(operator.or_, filters))
- engagements = engagements.select_related('lead')
- engagements = engagements.prefetch_related('product')
+ engagements = engagements.select_related("lead")
+ engagements = engagements.prefetch_related("product")
add_breadcrumb(
title="Engagement Calendar", top_level=True, request=request)
return render(
- request, 'dojo/calendar.html', {
- 'caltype': 'engagements',
- 'leads': request.GET.getlist('lead', ''),
- 'engagements': engagements,
- 'users': get_authorized_users(Permissions.Engagement_View),
+ request, "dojo/calendar.html", {
+ "caltype": "engagements",
+ "leads": request.GET.getlist("lead", ""),
+ "engagements": engagements,
+ "users": get_authorized_users(Permissions.Engagement_View),
})
def get_filtered_engagements(request, view):
- if view not in ['all', 'active']:
- msg = f'View {view} is not allowed'
+ if view not in ["all", "active"]:
+ msg = f"View {view} is not allowed"
raise ValidationError(msg)
- engagements = get_authorized_engagements(Permissions.Engagement_View).order_by('-target_start')
+ engagements = get_authorized_engagements(Permissions.Engagement_View).order_by("-target_start")
- if view == 'active':
+ if view == "active":
engagements = engagements.filter(active=True)
- engagements = engagements.select_related('product', 'product__prod_type') \
- .prefetch_related('lead', 'tags', 'product__tags')
+ engagements = engagements.select_related("product", "product__prod_type") \
+ .prefetch_related("lead", "tags", "product__tags")
if System_Settings.objects.get().enable_jira:
engagements = engagements.prefetch_related(
- 'jira_project__jira_instance',
- 'product__jira_project_set__jira_instance',
+ "jira_project__jira_instance",
+ "product__jira_project_set__jira_instance",
)
filter_string_matching = get_system_setting("filter_string_matching", False)
@@ -172,13 +172,13 @@ def get_test_counts(engagements):
# Get the test counts per engagement. As a separate query, this is much
# faster than annotating the above `engagements` query.
engagement_test_counts = {
- test['engagement']: test['test_count']
+ test["engagement"]: test["test_count"]
for test in Test.objects.filter(
engagement__in=engagements,
).values(
- 'engagement',
+ "engagement",
).annotate(
- test_count=Count('engagement'),
+ test_count=Count("engagement"),
)
}
return engagement_test_counts
@@ -187,13 +187,13 @@ def get_test_counts(engagements):
def engagements(request, view):
if not view:
- view = 'active'
+ view = "active"
filtered_engagements = get_filtered_engagements(request, view)
engs = get_page_items(request, filtered_engagements.qs, 25)
- product_name_words = sorted(get_authorized_products(Permissions.Product_View).values_list('name', flat=True))
- engagement_name_words = sorted(get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct())
+ product_name_words = sorted(get_authorized_products(Permissions.Product_View).values_list("name", flat=True))
+ engagement_name_words = sorted(get_authorized_engagements(Permissions.Engagement_View).values_list("name", flat=True).distinct())
add_breadcrumb(
title=f"{view.capitalize()} Engagements",
@@ -201,13 +201,13 @@ def engagements(request, view):
request=request)
return render(
- request, 'dojo/engagement.html', {
- 'engagements': engs,
- 'engagement_test_counts': get_test_counts(filtered_engagements.qs),
- 'filter_form': filtered_engagements.form,
- 'product_name_words': product_name_words,
- 'engagement_name_words': engagement_name_words,
- 'view': view.capitalize(),
+ request, "dojo/engagement.html", {
+ "engagements": engs,
+ "engagement_test_counts": get_test_counts(filtered_engagements.qs),
+ "filter_form": filtered_engagements.form,
+ "product_name_words": product_name_words,
+ "engagement_name_words": engagement_name_words,
+ "view": view.capitalize(),
})
@@ -217,23 +217,23 @@ def engagements_all(request):
products_with_engagements = products_with_engagements.filter(~Q(engagement=None)).distinct()
# count using prefetch instead of just using 'engagement__set_test_test` to avoid loading all test in memory just to count them
- filter_string_matching = get_system_setting('filter_string_matching', False)
+ filter_string_matching = get_system_setting("filter_string_matching", False)
products_filter_class = ProductEngagementsFilterWithoutObjectLookups if filter_string_matching else ProductEngagementsFilter
- engagement_query = Engagement.objects.annotate(test_count=Count('test__id'))
+ engagement_query = Engagement.objects.annotate(test_count=Count("test__id"))
filter_qs = products_with_engagements.prefetch_related(
- Prefetch('engagement_set', queryset=products_filter_class(request.GET, engagement_query).qs),
+ Prefetch("engagement_set", queryset=products_filter_class(request.GET, engagement_query).qs),
)
filter_qs = filter_qs.prefetch_related(
- 'engagement_set__tags',
- 'prod_type',
- 'engagement_set__lead',
- 'tags',
+ "engagement_set__tags",
+ "prod_type",
+ "engagement_set__lead",
+ "tags",
)
if System_Settings.objects.get().enable_jira:
filter_qs = filter_qs.prefetch_related(
- 'engagement_set__jira_project__jira_instance',
- 'jira_project_set__jira_instance',
+ "engagement_set__jira_project__jira_instance",
+ "jira_project_set__jira_instance",
)
filter_class = EngagementFilterWithoutObjectLookups if filter_string_matching else EngagementFilter
filtered = filter_class(
@@ -243,8 +243,8 @@ def engagements_all(request):
prods = get_page_items(request, filtered.qs, 25)
prods.paginator.count = sum(len(prod.engagement_set.all()) for prod in prods)
- name_words = products_with_engagements.values_list('name', flat=True)
- eng_words = get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct()
+ name_words = products_with_engagements.values_list("name", flat=True)
+ eng_words = get_authorized_engagements(Permissions.Engagement_View).values_list("name", flat=True).distinct()
add_breadcrumb(
title="All Engagements",
@@ -252,15 +252,15 @@ def engagements_all(request):
request=request)
return render(
- request, 'dojo/engagements_all.html', {
- 'products': prods,
- 'filter_form': filtered.form,
- 'name_words': sorted(set(name_words)),
- 'eng_words': sorted(set(eng_words)),
+ request, "dojo/engagements_all.html", {
+ "products": prods,
+ "filter_form": filtered.form,
+ "name_words": sorted(set(name_words)),
+ "eng_words": sorted(set(eng_words)),
})
-@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid")
def edit_engagement(request, eid):
engagement = Engagement.objects.get(pk=eid)
is_ci_cd = engagement.engagement_type == "CI/CD"
@@ -268,14 +268,14 @@ def edit_engagement(request, eid):
jira_epic_form = None
jira_project = None
- if request.method == 'POST':
+ if request.method == "POST":
form = EngForm(request.POST, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user)
jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False)
if form.is_valid():
# first save engagement details
- new_status = form.cleaned_data.get('status')
- engagement.product = form.cleaned_data.get('product')
+ new_status = form.cleaned_data.get("status")
+ engagement.product = form.cleaned_data.get("product")
engagement = form.save(commit=False)
if (new_status == "Cancelled" or new_status == "Completed"):
engagement.active = False
@@ -287,80 +287,80 @@ def edit_engagement(request, eid):
messages.add_message(
request,
messages.SUCCESS,
- 'Engagement updated successfully.',
- extra_tags='alert-success')
+ "Engagement updated successfully.",
+ extra_tags="alert-success")
- success, jira_project_form = jira_helper.process_jira_project_form(request, instance=jira_project, target='engagement', engagement=engagement, product=engagement.product)
+ success, jira_project_form = jira_helper.process_jira_project_form(request, instance=jira_project, target="engagement", engagement=engagement, product=engagement.product)
error = not success
success, jira_epic_form = jira_helper.process_jira_epic_form(request, engagement=engagement)
error = error or not success
if not error:
- if '_Add Tests' in request.POST:
+ if "_Add Tests" in request.POST:
return HttpResponseRedirect(
- reverse('add_tests', args=(engagement.id, )))
+ reverse("add_tests", args=(engagement.id, )))
else:
return HttpResponseRedirect(
- reverse('view_engagement', args=(engagement.id, )))
+ reverse("view_engagement", args=(engagement.id, )))
else:
logger.debug(form.errors)
else:
- form = EngForm(initial={'product': engagement.product}, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user)
+ form = EngForm(initial={"product": engagement.product}, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user)
jira_epic_form = None
- if get_system_setting('enable_jira'):
+ if get_system_setting("enable_jira"):
jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False)
- jira_project_form = JIRAProjectForm(instance=jira_project, target='engagement', product=engagement.product)
- logger.debug('showing jira-epic-form')
+ jira_project_form = JIRAProjectForm(instance=jira_project, target="engagement", product=engagement.product)
+ logger.debug("showing jira-epic-form")
jira_epic_form = JIRAEngagementForm(instance=engagement)
if is_ci_cd:
- title = 'Edit CI/CD Engagement'
+ title = "Edit CI/CD Engagement"
else:
- title = 'Edit Interactive Engagement'
+ title = "Edit Interactive Engagement"
product_tab = Product_Tab(engagement.product, title=title, tab="engagements")
product_tab.setEngagement(engagement)
- return render(request, 'dojo/new_eng.html', {
- 'product_tab': product_tab,
- 'title': title,
- 'form': form,
- 'edit': True,
- 'jira_epic_form': jira_epic_form,
- 'jira_project_form': jira_project_form,
- 'engagement': engagement,
+ return render(request, "dojo/new_eng.html", {
+ "product_tab": product_tab,
+ "title": title,
+ "form": form,
+ "edit": True,
+ "jira_epic_form": jira_epic_form,
+ "jira_project_form": jira_project_form,
+ "engagement": engagement,
})
-@user_is_authorized(Engagement, Permissions.Engagement_Delete, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_Delete, "eid")
def delete_engagement(request, eid):
engagement = get_object_or_404(Engagement, pk=eid)
product = engagement.product
form = DeleteEngagementForm(instance=engagement)
- if request.method == 'POST':
- if 'id' in request.POST and str(engagement.id) == request.POST['id']:
+ if request.method == "POST":
+ if "id" in request.POST and str(engagement.id) == request.POST["id"]:
form = DeleteEngagementForm(request.POST, instance=engagement)
if form.is_valid():
product = engagement.product
if get_setting("ASYNC_OBJECT_DELETE"):
async_del = async_delete()
async_del.delete(engagement)
- message = 'Engagement and relationships will be removed in the background.'
+ message = "Engagement and relationships will be removed in the background."
else:
- message = 'Engagement and relationships removed.'
+ message = "Engagement and relationships removed."
engagement.delete()
messages.add_message(
request,
messages.SUCCESS,
message,
- extra_tags='alert-success')
+ extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_engagements", args=(product.id, )))
- rels = ['Previewing the relationships has been disabled.', '']
- display_preview = get_setting('DELETE_PREVIEW')
+ rels = ["Previewing the relationships has been disabled.", ""]
+ display_preview = get_setting("DELETE_PREVIEW")
if display_preview:
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([engagement])
@@ -368,21 +368,21 @@ def delete_engagement(request, eid):
product_tab = Product_Tab(product, title="Delete Engagement", tab="engagements")
product_tab.setEngagement(engagement)
- return render(request, 'dojo/delete_engagement.html', {
- 'product_tab': product_tab,
- 'engagement': engagement,
- 'form': form,
- 'rels': rels,
+ return render(request, "dojo/delete_engagement.html", {
+ "product_tab": product_tab,
+ "engagement": engagement,
+ "form": form,
+ "rels": rels,
})
-@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid")
def copy_engagement(request, eid):
engagement = get_object_or_404(Engagement, id=eid)
product = engagement.product
form = DoneForm()
- if request.method == 'POST':
+ if request.method == "POST":
form = DoneForm(request.POST)
if form.is_valid():
engagement_copy = engagement.copy()
@@ -390,13 +390,13 @@ def copy_engagement(request, eid):
messages.add_message(
request,
messages.SUCCESS,
- 'Engagement Copied successfully.',
- extra_tags='alert-success')
- create_notification(event='engagement_copied', # TODO - if 'copy' functionality will be supported by API as well, 'create_notification' needs to be migrated to place where it will be able to cover actions from both interfaces
- title=_('Copying of %s') % engagement.name,
+ "Engagement Copied successfully.",
+ extra_tags="alert-success")
+ create_notification(event="engagement_copied", # TODO - if 'copy' functionality will be supported by API as well, 'create_notification' needs to be migrated to place where it will be able to cover actions from both interfaces
+ title=_("Copying of %s") % engagement.name,
description=f'The engagement "{engagement.name}" was copied by {request.user}',
product=product,
- url=request.build_absolute_uri(reverse('view_engagement', args=(engagement_copy.id, ))),
+ url=request.build_absolute_uri(reverse("view_engagement", args=(engagement_copy.id, ))),
recipients=[engagement.lead],
icon="exclamation-triangle")
return redirect_to_return_url_or_else(request, reverse("view_engagements", args=(product.id, )))
@@ -404,26 +404,26 @@ def copy_engagement(request, eid):
messages.add_message(
request,
messages.ERROR,
- 'Unable to copy engagement, please try again.',
- extra_tags='alert-danger')
+ "Unable to copy engagement, please try again.",
+ extra_tags="alert-danger")
product_tab = Product_Tab(product, title="Copy Engagement", tab="engagements")
- return render(request, 'dojo/copy_object.html', {
- 'source': engagement,
- 'source_label': 'Engagement',
- 'destination_label': 'Product',
- 'product_tab': product_tab,
- 'form': form,
+ return render(request, "dojo/copy_object.html", {
+ "source": engagement,
+ "source_label": "Engagement",
+ "destination_label": "Product",
+ "product_tab": product_tab,
+ "form": form,
})
class ViewEngagement(View):
def get_template(self):
- return 'dojo/view_eng.html'
+ return "dojo/view_eng.html"
def get_risks_accepted(self, eng):
- risks_accepted = eng.risk_acceptance.all().select_related('owner').annotate(accepted_findings_count=Count('accepted_findings__id'))
+ risks_accepted = eng.risk_acceptance.all().select_related("owner").annotate(accepted_findings_count=Count("accepted_findings__id"))
return risks_accepted
def get_filtered_tests(
@@ -438,7 +438,7 @@ def get_filtered_tests(
def get(self, request, eid, *args, **kwargs):
eng = get_object_or_404(Engagement, id=eid)
- tests = eng.test_set.all().order_by('test_type__name', '-updated')
+ tests = eng.test_set.all().order_by("test_type__name", "-updated")
default_page_num = 10
tests_filter = self.get_filtered_tests(request, tests, eng)
paged_tests = get_page_items(request, tests_filter.qs, default_page_num)
@@ -471,9 +471,9 @@ def get(self, request, eid, *args, **kwargs):
form = NoteForm()
creds = Cred_Mapping.objects.filter(
- product=eng.product).select_related('cred_id').order_by('cred_id')
+ product=eng.product).select_related("cred_id").order_by("cred_id")
cred_eng = Cred_Mapping.objects.filter(
- engagement=eng.id).select_related('cred_id').order_by('cred_id')
+ engagement=eng.id).select_related("cred_id").order_by("cred_id")
add_breadcrumb(parent=eng, top_level=False, request=request)
@@ -484,28 +484,28 @@ def get(self, request, eid, *args, **kwargs):
product_tab.setEngagement(eng)
return render(
request, self.get_template(), {
- 'eng': eng,
- 'product_tab': product_tab,
- 'system_settings': system_settings,
- 'tests': paged_tests,
- 'filter': tests_filter,
- 'check': check,
- 'threat': eng.tmodel_path,
- 'form': form,
- 'notes': notes,
- 'files': files,
- 'risks_accepted': risks_accepted,
- 'jissue': jissue,
- 'jira_project': jira_project,
- 'creds': creds,
- 'cred_eng': cred_eng,
- 'network': network,
- 'preset_test_type': preset_test_type,
+ "eng": eng,
+ "product_tab": product_tab,
+ "system_settings": system_settings,
+ "tests": paged_tests,
+ "filter": tests_filter,
+ "check": check,
+ "threat": eng.tmodel_path,
+ "form": form,
+ "notes": notes,
+ "files": files,
+ "risks_accepted": risks_accepted,
+ "jissue": jissue,
+ "jira_project": jira_project,
+ "creds": creds,
+ "cred_eng": cred_eng,
+ "network": network,
+ "preset_test_type": preset_test_type,
})
def post(self, request, eid, *args, **kwargs):
eng = get_object_or_404(Engagement, id=eid)
- tests = eng.test_set.all().order_by('test_type__name', '-updated')
+ tests = eng.test_set.all().order_by("test_type__name", "-updated")
default_page_num = 10
@@ -537,7 +537,7 @@ def post(self, request, eid, *args, **kwargs):
form = DoneForm()
files = eng.files.all()
user_has_permission_or_403(request.user, eng, Permissions.Note_Add)
- eng.progress = 'check_list'
+ eng.progress = "check_list"
eng.save()
if note_type_activation:
@@ -557,12 +557,12 @@ def post(self, request, eid, *args, **kwargs):
title = f"Engagement: {eng.name} on {eng.product.name}"
messages.add_message(request,
messages.SUCCESS,
- 'Note added successfully.',
- extra_tags='alert-success')
+ "Note added successfully.",
+ extra_tags="alert-success")
creds = Cred_Mapping.objects.filter(
- product=eng.product).select_related('cred_id').order_by('cred_id')
+ product=eng.product).select_related("cred_id").order_by("cred_id")
cred_eng = Cred_Mapping.objects.filter(
- engagement=eng.id).select_related('cred_id').order_by('cred_id')
+ engagement=eng.id).select_related("cred_id").order_by("cred_id")
add_breadcrumb(parent=eng, top_level=False, request=request)
@@ -573,23 +573,23 @@ def post(self, request, eid, *args, **kwargs):
product_tab.setEngagement(eng)
return render(
request, self.get_template(), {
- 'eng': eng,
- 'product_tab': product_tab,
- 'system_settings': system_settings,
- 'tests': paged_tests,
- 'filter': tests_filter,
- 'check': check,
- 'threat': eng.tmodel_path,
- 'form': form,
- 'notes': notes,
- 'files': files,
- 'risks_accepted': risks_accepted,
- 'jissue': jissue,
- 'jira_project': jira_project,
- 'creds': creds,
- 'cred_eng': cred_eng,
- 'network': network,
- 'preset_test_type': preset_test_type,
+ "eng": eng,
+ "product_tab": product_tab,
+ "system_settings": system_settings,
+ "tests": paged_tests,
+ "filter": tests_filter,
+ "check": check,
+ "threat": eng.tmodel_path,
+ "form": form,
+ "notes": notes,
+ "files": files,
+ "risks_accepted": risks_accepted,
+ "jissue": jissue,
+ "jira_project": jira_project,
+ "creds": creds,
+ "cred_eng": cred_eng,
+ "network": network,
+ "preset_test_type": preset_test_type,
})
@@ -598,40 +598,40 @@ def prefetch_for_view_tests(tests):
if isinstance(tests,
QuerySet): # old code can arrive here with prods being a list because the query was already executed
- prefetched = prefetched.select_related('lead')
- prefetched = prefetched.prefetch_related('tags', 'test_type', 'notes')
- prefetched = prefetched.annotate(count_findings_test_all=Count('finding__id', distinct=True))
- prefetched = prefetched.annotate(count_findings_test_active=Count('finding__id', filter=Q(finding__active=True), distinct=True))
- prefetched = prefetched.annotate(count_findings_test_active_verified=Count('finding__id', filter=Q(finding__active=True) & Q(finding__verified=True), distinct=True))
- prefetched = prefetched.annotate(count_findings_test_mitigated=Count('finding__id', filter=Q(finding__is_mitigated=True), distinct=True))
- prefetched = prefetched.annotate(count_findings_test_dups=Count('finding__id', filter=Q(finding__duplicate=True), distinct=True))
- prefetched = prefetched.annotate(total_reimport_count=Count('test_import__id', filter=Q(test_import__type=Test_Import.REIMPORT_TYPE), distinct=True))
+ prefetched = prefetched.select_related("lead")
+ prefetched = prefetched.prefetch_related("tags", "test_type", "notes")
+ prefetched = prefetched.annotate(count_findings_test_all=Count("finding__id", distinct=True))
+ prefetched = prefetched.annotate(count_findings_test_active=Count("finding__id", filter=Q(finding__active=True), distinct=True))
+ prefetched = prefetched.annotate(count_findings_test_active_verified=Count("finding__id", filter=Q(finding__active=True) & Q(finding__verified=True), distinct=True))
+ prefetched = prefetched.annotate(count_findings_test_mitigated=Count("finding__id", filter=Q(finding__is_mitigated=True), distinct=True))
+ prefetched = prefetched.annotate(count_findings_test_dups=Count("finding__id", filter=Q(finding__duplicate=True), distinct=True))
+ prefetched = prefetched.annotate(total_reimport_count=Count("test_import__id", filter=Q(test_import__type=Test_Import.REIMPORT_TYPE), distinct=True))
else:
- logger.warning('unable to prefetch because query was already executed')
+ logger.warning("unable to prefetch because query was already executed")
return prefetched
-@user_is_authorized(Engagement, Permissions.Test_Add, 'eid')
+@user_is_authorized(Engagement, Permissions.Test_Add, "eid")
def add_tests(request, eid):
eng = Engagement.objects.get(id=eid)
cred_form = CredMappingForm()
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
- engagement=eng).order_by('cred_id')
+ engagement=eng).order_by("cred_id")
- if request.method == 'POST':
+ if request.method == "POST":
form = TestForm(request.POST, engagement=eng)
cred_form = CredMappingForm(request.POST)
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
- engagement=eng).order_by('cred_id')
+ engagement=eng).order_by("cred_id")
if form.is_valid():
new_test = form.save(commit=False)
# set default scan_type as it's used in reimport
new_test.scan_type = new_test.test_type.name
new_test.engagement = eng
try:
- new_test.lead = User.objects.get(id=form['lead'].value())
+ new_test.lead = User.objects.get(id=form["lead"].value())
except:
new_test.lead = None
@@ -644,10 +644,10 @@ def add_tests(request, eid):
# Save the credential to the test
if cred_form.is_valid():
- if cred_form.cleaned_data['cred_user']:
+ if cred_form.cleaned_data["cred_user"]:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
- pk=cred_form.cleaned_data['cred_user'].id,
+ pk=cred_form.cleaned_data["cred_user"].id,
engagement=eid).first()
new_f = cred_form.save(commit=False)
@@ -658,35 +658,35 @@ def add_tests(request, eid):
messages.add_message(
request,
messages.SUCCESS,
- 'Test added successfully.',
- extra_tags='alert-success')
+ "Test added successfully.",
+ extra_tags="alert-success")
notifications_helper.notify_test_created(new_test)
- if '_Add Another Test' in request.POST:
+ if "_Add Another Test" in request.POST:
return HttpResponseRedirect(
- reverse('add_tests', args=(eng.id, )))
- elif '_Add Findings' in request.POST:
+ reverse("add_tests", args=(eng.id, )))
+ elif "_Add Findings" in request.POST:
return HttpResponseRedirect(
- reverse('add_findings', args=(new_test.id, )))
- elif '_Finished' in request.POST:
+ reverse("add_findings", args=(new_test.id, )))
+ elif "_Finished" in request.POST:
return HttpResponseRedirect(
- reverse('view_engagement', args=(eng.id, )))
+ reverse("view_engagement", args=(eng.id, )))
else:
form = TestForm(engagement=eng)
- form.initial['target_start'] = eng.target_start
- form.initial['target_end'] = eng.target_end
- form.initial['lead'] = request.user
+ form.initial["target_start"] = eng.target_start
+ form.initial["target_end"] = eng.target_end
+ form.initial["lead"] = request.user
add_breadcrumb(
parent=eng, title="Add Tests", top_level=False, request=request)
product_tab = Product_Tab(eng.product, title="Add Tests", tab="engagements")
product_tab.setEngagement(eng)
- return render(request, 'dojo/add_tests.html', {
- 'product_tab': product_tab,
- 'form': form,
- 'cred_form': cred_form,
- 'eid': eid,
- 'eng': eng,
+ return render(request, "dojo/add_tests.html", {
+ "product_tab": product_tab,
+ "form": form,
+ "cred_form": cred_form,
+ "eid": eid,
+ "eng": eng,
})
@@ -727,7 +727,7 @@ def get_engagement_or_product(
product = get_object_or_404(Product, id=product_id)
engagement_or_product = product
else:
- msg = 'Either Engagement or Product has to be provided'
+ msg = "Either Engagement or Product has to be provided"
raise Exception(msg)
# Ensure the supplied user has access to import to the engagement or product
user_has_permission_or_403(user, engagement_or_product, Permissions.Import_Scan_Result)
@@ -768,7 +768,7 @@ def get_credential_form(
initial={
"cred_user_queryset": Cred_Mapping.objects.filter(
engagement=engagement,
- ).order_by('cred_id'),
+ ).order_by("cred_id"),
},
)
@@ -790,12 +790,12 @@ def get_jira_form(
jira_form = JIRAImportScanForm(
request.POST,
push_all=push_all_jira_issues,
- prefix='jiraform',
+ prefix="jiraform",
)
else:
jira_form = JIRAImportScanForm(
push_all=push_all_jira_issues,
- prefix='jiraform',
+ prefix="jiraform",
)
return jira_form, push_all_jira_issues
@@ -905,7 +905,7 @@ def create_engagement(
target_start=timezone.now().date(),
target_end=timezone.now().date(),
product=context.get("product"),
- status='In Progress',
+ status="In Progress",
version=context.get("version"),
branch_tag=context.get("branch_tag"),
build_id=context.get("build_id"),
@@ -974,23 +974,23 @@ def process_form(
self.create_engagement(context)
# close_old_findings_product_scope is a modifier of close_old_findings.
# If it is selected, close_old_findings should also be selected.
- if close_old_findings_product_scope := form.cleaned_data.get('close_old_findings_product_scope', None):
+ if close_old_findings_product_scope := form.cleaned_data.get("close_old_findings_product_scope", None):
context["close_old_findings_product_scope"] = close_old_findings_product_scope
context["close_old_findings"] = True
# Save newly added endpoints
added_endpoints = save_endpoints_to_add(form.endpoints_to_add_list, context.get("engagement").product)
- endpoints_from_form = list(form.cleaned_data['endpoints'])
+ endpoints_from_form = list(form.cleaned_data["endpoints"])
context["endpoints_to_add"] = endpoints_from_form + added_endpoints
# Override the form values of active and verified
- if activeChoice := form.cleaned_data.get('active', None):
- if activeChoice == 'force_to_true':
+ if activeChoice := form.cleaned_data.get("active", None):
+ if activeChoice == "force_to_true":
context["active"] = True
- elif activeChoice == 'force_to_false':
+ elif activeChoice == "force_to_false":
context["active"] = False
- if verifiedChoice := form.cleaned_data.get('verified', None):
- if verifiedChoice == 'force_to_true':
+ if verifiedChoice := form.cleaned_data.get("verified", None):
+ if verifiedChoice == "force_to_true":
context["verified"] = True
- elif verifiedChoice == 'force_to_false':
+ elif verifiedChoice == "force_to_false":
context["verified"] = False
return None
@@ -1019,7 +1019,7 @@ def process_credentials_form(
"""
Process the credentials form by creating
"""
- if cred_user := form.cleaned_data['cred_user']:
+ if cred_user := form.cleaned_data["cred_user"]:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
pk=cred_user.id,
@@ -1111,27 +1111,27 @@ def post(
return self.success_redirect(context)
-@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid")
def close_eng(request, eid):
eng = Engagement.objects.get(id=eid)
close_engagement(eng)
messages.add_message(
request,
messages.SUCCESS,
- 'Engagement closed successfully.',
- extra_tags='alert-success')
+ "Engagement closed successfully.",
+ extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, )))
-@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid")
def reopen_eng(request, eid):
eng = Engagement.objects.get(id=eid)
reopen_engagement(eng)
messages.add_message(
request,
messages.SUCCESS,
- 'Engagement reopened successfully.',
- extra_tags='alert-success')
+ "Engagement reopened successfully.",
+ extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, )))
@@ -1142,7 +1142,7 @@ def reopen_eng(request, eid):
"""
-@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid")
def complete_checklist(request, eid):
eng = get_object_or_404(Engagement, id=eid)
try:
@@ -1155,7 +1155,7 @@ def complete_checklist(request, eid):
title="Complete checklist",
top_level=False,
request=request)
- if request.method == 'POST':
+ if request.method == "POST":
tests = Test.objects.filter(engagement=eng)
findings = Finding.objects.filter(test__in=tests).all()
form = CheckForm(request.POST, instance=checklist, findings=findings)
@@ -1173,10 +1173,10 @@ def complete_checklist(request, eid):
messages.add_message(
request,
messages.SUCCESS,
- 'Checklist saved.',
- extra_tags='alert-success')
+ "Checklist saved.",
+ extra_tags="alert-success")
return HttpResponseRedirect(
- reverse('view_engagement', args=(eid, )))
+ reverse("view_engagement", args=(eid, )))
else:
tests = Test.objects.filter(engagement=eng)
findings = Finding.objects.filter(test__in=tests).all()
@@ -1184,15 +1184,15 @@ def complete_checklist(request, eid):
product_tab = Product_Tab(eng.product, title="Checklist", tab="engagements")
product_tab.setEngagement(eng)
- return render(request, 'dojo/checklist.html', {
- 'form': form,
- 'product_tab': product_tab,
- 'eid': eng.id,
- 'findings': findings,
+ return render(request, "dojo/checklist.html", {
+ "form": form,
+ "product_tab": product_tab,
+ "eid": eng.id,
+ "findings": findings,
})
-@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
+@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid")
def add_risk_acceptance(request, eid, fid=None):
eng = get_object_or_404(Engagement, id=eid)
finding = None
@@ -1202,19 +1202,19 @@ def add_risk_acceptance(request, eid, fid=None):
if not eng.product.enable_full_risk_acceptance:
raise PermissionDenied
- if request.method == 'POST':
+ if request.method == "POST":
form = RiskAcceptanceForm(request.POST, request.FILES)
if form.is_valid():
# first capture notes param as it cannot be saved directly as m2m
notes = None
- if form.cleaned_data['notes']:
+ if form.cleaned_data["notes"]:
notes = Notes(
- entry=form.cleaned_data['notes'],
+ entry=form.cleaned_data["notes"],
author=request.user,
date=timezone.now())
notes.save()
- del form.cleaned_data['notes']
+ del form.cleaned_data["notes"]
try:
# we sometimes see a weird exception here, but are unable to reproduce.
@@ -1232,42 +1232,42 @@ def add_risk_acceptance(request, eid, fid=None):
eng.risk_acceptance.add(risk_acceptance)
- findings = form.cleaned_data['accepted_findings']
+ findings = form.cleaned_data["accepted_findings"]
risk_acceptance = ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings)
messages.add_message(
request,
messages.SUCCESS,
- 'Risk acceptance saved.',
- extra_tags='alert-success')
+ "Risk acceptance saved.",
+ extra_tags="alert-success")
- return redirect_to_return_url_or_else(request, reverse('view_engagement', args=(eid, )))
+ return redirect_to_return_url_or_else(request, reverse("view_engagement", args=(eid, )))
else:
- risk_acceptance_title_suggestion = f'Accept: {finding}'
- form = RiskAcceptanceForm(initial={'owner': request.user, 'name': risk_acceptance_title_suggestion})
+ risk_acceptance_title_suggestion = f"Accept: {finding}"
+ form = RiskAcceptanceForm(initial={"owner": request.user, "name": risk_acceptance_title_suggestion})
- finding_choices = Finding.objects.filter(duplicate=False, test__engagement=eng).filter(NOT_ACCEPTED_FINDINGS_QUERY).order_by('title')
+ finding_choices = Finding.objects.filter(duplicate=False, test__engagement=eng).filter(NOT_ACCEPTED_FINDINGS_QUERY).order_by("title")
- form.fields['accepted_findings'].queryset = finding_choices
+ form.fields["accepted_findings"].queryset = finding_choices
if fid:
- form.fields['accepted_findings'].initial = {fid}
+ form.fields["accepted_findings"].initial = {fid}
product_tab = Product_Tab(eng.product, title="Risk Acceptance", tab="engagements")
product_tab.setEngagement(eng)
- return render(request, 'dojo/add_risk_acceptance.html', {
- 'eng': eng,
- 'product_tab': product_tab,
- 'form': form,
+ return render(request, "dojo/add_risk_acceptance.html", {
+ "eng": eng,
+ "product_tab": product_tab,
+ "form": form,
})
-@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_View, "eid")
def view_risk_acceptance(request, eid, raid):
return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=False)
-@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
+@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid")
def edit_risk_acceptance(request, eid, raid):
return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=True)
@@ -1283,13 +1283,13 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
risk_acceptance_form = None
errors = False
- if request.method == 'POST':
+ if request.method == "POST":
# deleting before instantiating the form otherwise django messes up and we end up with an empty path value
if len(request.FILES) > 0:
- logger.debug('new proof uploaded')
+ logger.debug("new proof uploaded")
risk_acceptance.path.delete()
- if 'decision' in request.POST:
+ if "decision" in request.POST:
old_expiration_date = risk_acceptance.expiration_date
risk_acceptance_form = EditRiskAcceptanceForm(request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not risk_acceptance_form.is_valid()
@@ -1305,10 +1305,10 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
messages.add_message(
request,
messages.SUCCESS,
- 'Risk Acceptance saved successfully.',
- extra_tags='alert-success')
+ "Risk Acceptance saved successfully.",
+ extra_tags="alert-success")
- if 'entry' in request.POST:
+ if "entry" in request.POST:
note_form = NoteForm(request.POST)
errors = errors or not note_form.is_valid()
if not errors:
@@ -1320,39 +1320,39 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
messages.add_message(
request,
messages.SUCCESS,
- 'Note added successfully.',
- extra_tags='alert-success')
+ "Note added successfully.",
+ extra_tags="alert-success")
- if 'delete_note' in request.POST:
- note = get_object_or_404(Notes, pk=request.POST['delete_note_id'])
+ if "delete_note" in request.POST:
+ note = get_object_or_404(Notes, pk=request.POST["delete_note_id"])
if note.author.username == request.user.username:
risk_acceptance.notes.remove(note)
note.delete()
messages.add_message(
request,
messages.SUCCESS,
- 'Note deleted successfully.',
- extra_tags='alert-success')
+ "Note deleted successfully.",
+ extra_tags="alert-success")
else:
messages.add_message(
request,
messages.ERROR,
"Since you are not the note's author, it was not deleted.",
- extra_tags='alert-danger')
+ extra_tags="alert-danger")
- if 'remove_finding' in request.POST:
+ if "remove_finding" in request.POST:
finding = get_object_or_404(
- Finding, pk=request.POST['remove_finding_id'])
+ Finding, pk=request.POST["remove_finding_id"])
ra_helper.remove_finding_from_risk_acceptance(risk_acceptance, finding)
messages.add_message(
request,
messages.SUCCESS,
- 'Finding removed successfully from risk acceptance.',
- extra_tags='alert-success')
+ "Finding removed successfully from risk acceptance.",
+ extra_tags="alert-success")
- if 'replace_file' in request.POST:
+ if "replace_file" in request.POST:
replace_form = ReplaceRiskAcceptanceProofForm(
request.POST, request.FILES, instance=risk_acceptance)
@@ -1363,17 +1363,17 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
messages.add_message(
request,
messages.SUCCESS,
- 'New Proof uploaded successfully.',
- extra_tags='alert-success')
+ "New Proof uploaded successfully.",
+ extra_tags="alert-success")
else:
logger.error(replace_form.errors)
- if 'add_findings' in request.POST:
+ if "add_findings" in request.POST:
add_findings_form = AddFindingsRiskAcceptanceForm(
request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not add_findings_form.is_valid()
if not errors:
- findings = add_findings_form.cleaned_data['accepted_findings']
+ findings = add_findings_form.cleaned_data["accepted_findings"]
ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings)
@@ -1381,12 +1381,12 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
request,
messages.SUCCESS,
f"Finding{'s' if len(findings) > 1 else ''} added successfully.",
- extra_tags='alert-success')
+ extra_tags="alert-success")
if not errors:
- logger.debug('redirecting to return_url')
+ logger.debug("redirecting to return_url")
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
else:
- logger.error('errors found')
+ logger.error("errors found")
else:
if edit_mode:
@@ -1396,12 +1396,12 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
replace_form = ReplaceRiskAcceptanceProofForm(instance=risk_acceptance)
add_findings_form = AddFindingsRiskAcceptanceForm(instance=risk_acceptance)
- accepted_findings = risk_acceptance.accepted_findings.order_by('numerical_severity')
+ accepted_findings = risk_acceptance.accepted_findings.order_by("numerical_severity")
fpage = get_page_items(request, accepted_findings, 15)
unaccepted_findings = Finding.objects.filter(test__in=eng.test_set.all(), risk_accepted=False) \
.exclude(id__in=accepted_findings).order_by("title")
- add_fpage = get_page_items(request, unaccepted_findings, 25, 'apage')
+ add_fpage = get_page_items(request, unaccepted_findings, 25, "apage")
# on this page we need to add unaccepted findings as possible findings to add as accepted
add_findings_form.fields[
@@ -1414,26 +1414,26 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
product_tab = Product_Tab(eng.product, title="Risk Acceptance", tab="engagements")
product_tab.setEngagement(eng)
return render(
- request, 'dojo/view_risk_acceptance.html', {
- 'risk_acceptance': risk_acceptance,
- 'engagement': eng,
- 'product_tab': product_tab,
- 'accepted_findings': fpage,
- 'notes': risk_acceptance.notes.all(),
- 'eng': eng,
- 'edit_mode': edit_mode,
- 'risk_acceptance_form': risk_acceptance_form,
- 'note_form': note_form,
- 'replace_form': replace_form,
- 'add_findings_form': add_findings_form,
+ request, "dojo/view_risk_acceptance.html", {
+ "risk_acceptance": risk_acceptance,
+ "engagement": eng,
+ "product_tab": product_tab,
+ "accepted_findings": fpage,
+ "notes": risk_acceptance.notes.all(),
+ "eng": eng,
+ "edit_mode": edit_mode,
+ "risk_acceptance_form": risk_acceptance_form,
+ "note_form": note_form,
+ "replace_form": replace_form,
+ "add_findings_form": add_findings_form,
# 'show_add_findings_form': len(unaccepted_findings),
- 'request': request,
- 'add_findings': add_fpage,
- 'return_url': get_return_url(request),
+ "request": request,
+ "add_findings": add_fpage,
+ "return_url": get_return_url(request),
})
-@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
+@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid")
def expire_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid)
# Validate the engagement ID exists before moving forward
@@ -1444,7 +1444,7 @@ def expire_risk_acceptance(request, eid, raid):
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
-@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
+@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid")
def reinstate_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
@@ -1457,7 +1457,7 @@ def reinstate_risk_acceptance(request, eid, raid):
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
-@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
+@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid")
def delete_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
@@ -1467,12 +1467,12 @@ def delete_risk_acceptance(request, eid, raid):
messages.add_message(
request,
messages.SUCCESS,
- 'Risk acceptance deleted successfully.',
- extra_tags='alert-success')
+ "Risk acceptance deleted successfully.",
+ extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_engagement", args=(eng.id, )))
-@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_View, "eid")
def download_risk_acceptance(request, eid, raid):
import mimetypes
@@ -1482,10 +1482,10 @@ def download_risk_acceptance(request, eid, raid):
response = StreamingHttpResponse(
FileIterWrapper(
- open(settings.MEDIA_ROOT + "/" + risk_acceptance.path.name, mode='rb')))
- response['Content-Disposition'] = f'attachment; filename="{risk_acceptance.filename()}"'
+ open(settings.MEDIA_ROOT + "/" + risk_acceptance.path.name, mode="rb")))
+ response["Content-Disposition"] = f'attachment; filename="{risk_acceptance.filename()}"'
mimetype, _encoding = mimetypes.guess_type(risk_acceptance.path.name)
- response['Content-Type'] = mimetype
+ response["Content-Type"] = mimetype
return response
@@ -1497,7 +1497,7 @@ def download_risk_acceptance(request, eid, raid):
"""
-@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid")
def upload_threatmodel(request, eid):
eng = Engagement.objects.get(id=eid)
add_breadcrumb(
@@ -1506,38 +1506,38 @@ def upload_threatmodel(request, eid):
top_level=False,
request=request)
- if request.method == 'POST':
+ if request.method == "POST":
form = UploadThreatForm(request.POST, request.FILES)
if form.is_valid():
- handle_uploaded_threat(request.FILES['file'], eng)
- eng.progress = 'other'
+ handle_uploaded_threat(request.FILES["file"], eng)
+ eng.progress = "other"
eng.threat_model = True
eng.save()
messages.add_message(
request,
messages.SUCCESS,
- 'Threat model saved.',
- extra_tags='alert-success')
+ "Threat model saved.",
+ extra_tags="alert-success")
return HttpResponseRedirect(
- reverse('view_engagement', args=(eid, )))
+ reverse("view_engagement", args=(eid, )))
else:
form = UploadThreatForm()
product_tab = Product_Tab(eng.product, title="Upload Threat Model", tab="engagements")
- return render(request, 'dojo/up_threat.html', {
- 'form': form,
- 'product_tab': product_tab,
- 'eng': eng,
+ return render(request, "dojo/up_threat.html", {
+ "form": form,
+ "product_tab": product_tab,
+ "eng": eng,
})
-@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_View, "eid")
def view_threatmodel(request, eid):
eng = get_object_or_404(Engagement, pk=eid)
- response = FileResponse(open(eng.tmodel_path, 'rb'))
+ response = FileResponse(open(eng.tmodel_path, "rb"))
return response
-@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
+@user_is_authorized(Engagement, Permissions.Engagement_View, "eid")
def engagement_ics(request, eid):
eng = get_object_or_404(Engagement, id=eid)
start_date = datetime.combine(eng.target_start, datetime.min.time())
@@ -1555,8 +1555,8 @@ def engagement_ics(request, eid):
)
output = cal.serialize()
response = HttpResponse(content=output)
- response['Content-Type'] = 'text/calendar'
- response['Content-Disposition'] = f'attachment; filename={eng.name}.ics'
+ response["Content-Type"] = "text/calendar"
+ response["Content-Disposition"] = f"attachment; filename={eng.name}.ics"
return response
@@ -1569,26 +1569,26 @@ def get_list_index(list, index):
def get_engagements(request):
- url = request.META.get('QUERY_STRING')
+ url = request.META.get("QUERY_STRING")
if not url:
- msg = 'Please use the export button when exporting engagements'
+ msg = "Please use the export button when exporting engagements"
raise ValidationError(msg)
else:
- if url.startswith('url='):
+ if url.startswith("url="):
url = url[4:]
- path_items = list(filter(None, re.split(r'/|\?', url)))
+ path_items = list(filter(None, re.split(r"/|\?", url)))
- if not path_items or path_items[0] != 'engagement':
- msg = 'URL is not an engagement view'
+ if not path_items or path_items[0] != "engagement":
+ msg = "URL is not an engagement view"
raise ValidationError(msg)
view = query = None
- if get_list_index(path_items, 1) in ['active', 'all']:
+ if get_list_index(path_items, 1) in ["active", "all"]:
view = get_list_index(path_items, 1)
query = get_list_index(path_items, 2)
else:
- view = 'active'
+ view = "active"
query = get_list_index(path_items, 1)
request.GET = QueryDict(query)
@@ -1599,19 +1599,19 @@ def get_engagements(request):
def get_excludes():
- return ['is_ci_cd', 'jira_issue', 'jira_project', 'objects', 'unaccepted_open_findings']
+ return ["is_ci_cd", "jira_issue", "jira_project", "objects", "unaccepted_open_findings"]
def get_foreign_keys():
- return ['build_server', 'lead', 'orchestration_engine', 'preset', 'product',
- 'report_type', 'requester', 'source_code_management_server']
+ return ["build_server", "lead", "orchestration_engine", "preset", "product",
+ "report_type", "requester", "source_code_management_server"]
def csv_export(request):
engagements, test_counts = get_engagements(request)
- response = HttpResponse(content_type='text/csv')
- response['Content-Disposition'] = 'attachment; filename=engagements.csv'
+ response = HttpResponse(content_type="text/csv")
+ response["Content-Disposition"] = "attachment; filename=engagements.csv"
writer = csv.writer(response)
@@ -1620,9 +1620,9 @@ def csv_export(request):
if first_row:
fields = []
for key in dir(engagement):
- if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
+ if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith("_"):
fields.append(key)
- fields.append('tests')
+ fields.append("tests")
writer.writerow(fields)
@@ -1630,12 +1630,12 @@ def csv_export(request):
if not first_row:
fields = []
for key in dir(engagement):
- if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
+ if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith("_"):
value = engagement.__dict__.get(key)
if key in get_foreign_keys() and getattr(engagement, key):
value = str(getattr(engagement, key))
if value and isinstance(value, str):
- value = value.replace('\n', ' NEWLINE ').replace('\r', '')
+ value = value.replace("\n", " NEWLINE ").replace("\r", "")
fields.append(value)
fields.append(test_counts.get(engagement.id, 0))
@@ -1650,7 +1650,7 @@ def excel_export(request):
workbook = Workbook()
workbook.iso_dates = True
worksheet = workbook.active
- worksheet.title = 'Engagements'
+ worksheet.title = "Engagements"
font_bold = Font(bold=True)
@@ -1659,17 +1659,17 @@ def excel_export(request):
if row_num == 1:
col_num = 1
for key in dir(engagement):
- if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
+ if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith("_"):
cell = worksheet.cell(row=row_num, column=col_num, value=key)
cell.font = font_bold
col_num += 1
- cell = worksheet.cell(row=row_num, column=col_num, value='tests')
+ cell = worksheet.cell(row=row_num, column=col_num, value="tests")
cell.font = font_bold
row_num = 2
if row_num > 1:
col_num = 1
for key in dir(engagement):
- if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
+ if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith("_"):
value = engagement.__dict__.get(key)
if key in get_foreign_keys() and getattr(engagement, key):
value = str(getattr(engagement, key))
@@ -1687,7 +1687,7 @@ def excel_export(request):
response = HttpResponse(
content=stream,
- content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
+ content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
)
- response['Content-Disposition'] = 'attachment; filename=engagements.xlsx'
+ response["Content-Disposition"] = "attachment; filename=engagements.xlsx"
return response
diff --git a/dojo/filters.py b/dojo/filters.py
index 6d124d67f0..6c3da70e2b 100644
--- a/dojo/filters.py
+++ b/dojo/filters.py
@@ -94,30 +94,30 @@
logger = logging.getLogger(__name__)
-local_tz = pytz.timezone(get_system_setting('time_zone'))
+local_tz = pytz.timezone(get_system_setting("time_zone"))
-BOOLEAN_CHOICES = (('false', 'No'), ('true', 'Yes'))
+BOOLEAN_CHOICES = (("false", "No"), ("true", "Yes"))
EARLIEST_FINDING = None
def custom_filter(queryset, name, value):
- values = value.split(',')
- filter = (f'{name}__in')
+ values = value.split(",")
+ filter = (f"{name}__in")
return queryset.filter(Q(**{filter: values}))
def custom_vulnerability_id_filter(queryset, name, value):
- values = value.split(',')
+ values = value.split(",")
ids = Vulnerability_Id.objects \
.filter(vulnerability_id__in=values) \
- .values_list('finding_id', flat=True)
+ .values_list("finding_id", flat=True)
return queryset.filter(id__in=ids)
def vulnerability_id_filter(queryset, name, value):
ids = Vulnerability_Id.objects \
.filter(vulnerability_id=value) \
- .values_list('finding_id', flat=True)
+ .values_list("finding_id", flat=True)
return queryset.filter(id__in=ids)
@@ -163,19 +163,19 @@ def under_review(self, qs, name):
return qs.filter(UNDER_REVIEW_QUERY)
options = {
- None: (_('Any'), any),
- 0: (_('Open'), open),
- 1: (_('Verified'), verified),
- 2: (_('Out Of Scope'), out_of_scope),
- 3: (_('False Positive'), false_positive),
- 4: (_('Inactive'), inactive),
- 5: (_('Risk Accepted'), risk_accepted),
- 6: (_('Closed'), closed),
- 7: (_('Under Review'), under_review),
+ None: (_("Any"), any),
+ 0: (_("Open"), open),
+ 1: (_("Verified"), verified),
+ 2: (_("Out Of Scope"), out_of_scope),
+ 3: (_("False Positive"), false_positive),
+ 4: (_("Inactive"), inactive),
+ 5: (_("Risk Accepted"), risk_accepted),
+ 6: (_("Closed"), closed),
+ 7: (_("Under Review"), under_review),
}
def __init__(self, *args, **kwargs):
- kwargs['choices'] = [
+ kwargs["choices"] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super().__init__(*args, **kwargs)
@@ -217,13 +217,13 @@ def sla_violated(self, qs, name):
)
options = {
- None: (_('Any'), any),
- 0: (_('False'), sla_satisfied),
- 1: (_('True'), sla_violated),
+ None: (_("Any"), any),
+ 0: (_("False"), sla_satisfied),
+ 1: (_("True"), sla_violated),
}
def __init__(self, *args, **kwargs):
- kwargs['choices'] = [
+ kwargs["choices"] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super().__init__(*args, **kwargs)
@@ -252,13 +252,13 @@ def sla_violated(self, qs, name):
return qs
options = {
- None: (_('Any'), any),
- 0: (_('False'), sla_satisifed),
- 1: (_('True'), sla_violated),
+ None: (_("Any"), any),
+ 0: (_("False"), sla_satisifed),
+ 1: (_("True"), sla_violated),
}
def __init__(self, *args, **kwargs):
- kwargs['choices'] = [
+ kwargs["choices"] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super().__init__(*args, **kwargs)
@@ -275,7 +275,7 @@ def get_earliest_finding(queryset=None):
queryset = Finding.objects.all()
try:
- EARLIEST_FINDING = queryset.earliest('date')
+ EARLIEST_FINDING = queryset.earliest("date")
except (Finding.DoesNotExist, Endpoint_Status.DoesNotExist):
EARLIEST_FINDING = None
return EARLIEST_FINDING
@@ -284,7 +284,7 @@ def get_earliest_finding(queryset=None):
def cwe_options(queryset):
cwe = {}
cwe = dict([cwe, cwe]
- for cwe in queryset.order_by().values_list('cwe', flat=True).distinct()
+ for cwe in queryset.order_by().values_list("cwe", flat=True).distinct()
if isinstance(cwe, int) and cwe is not None and cwe > 0)
cwe = collections.OrderedDict(sorted(cwe.items()))
return list(cwe.items())
@@ -294,10 +294,10 @@ class DojoFilter(FilterSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- for field in ['tags', 'test__tags', 'test__engagement__tags', 'test__engagement__product__tags',
- 'not_tags', 'not_test__tags', 'not_test__engagement__tags', 'not_test__engagement__product__tags']:
+ for field in ["tags", "test__tags", "test__engagement__tags", "test__engagement__product__tags",
+ "not_tags", "not_test__tags", "not_test__engagement__tags", "not_test__engagement__product__tags"]:
if field in self.form.fields:
- tags_filter = self.filters['tags']
+ tags_filter = self.filters["tags"]
model = tags_filter.model
self.form.fields[field] = model._meta.get_field("tags").formfield()
@@ -305,34 +305,34 @@ def __init__(self, *args, **kwargs):
# and form.js would then apply select2 multiple times, resulting in duplicated fields
# the initialization now happens in filter_js_snippet.html
self.form.fields[field].widget.tag_options = \
- self.form.fields[field].widget.tag_options + tagulous.models.options.TagOptions(autocomplete_settings={'width': '200px', 'defer': True})
+ self.form.fields[field].widget.tag_options + tagulous.models.options.TagOptions(autocomplete_settings={"width": "200px", "defer": True})
tagged_model, exclude = get_tags_model_from_field_name(field)
if tagged_model: # only if not the normal tags field
self.form.fields[field].label = get_tags_label_from_model(tagged_model)
- self.form.fields[field].autocomplete_tags = tagged_model.tags.tag_model.objects.all().order_by('name')
+ self.form.fields[field].autocomplete_tags = tagged_model.tags.tag_model.objects.all().order_by("name")
if exclude:
- self.form.fields[field].label = 'Not ' + self.form.fields[field].label
+ self.form.fields[field].label = "Not " + self.form.fields[field].label
def get_tags_model_from_field_name(field):
exclude = False
- if field.startswith('not_'):
- field = field.replace('not_', '')
+ if field.startswith("not_"):
+ field = field.replace("not_", "")
exclude = True
try:
- parts = field.split('__')
+ parts = field.split("__")
model_name = parts[-2]
- return apps.get_model(f'dojo.{model_name}', require_ready=True), exclude
+ return apps.get_model(f"dojo.{model_name}", require_ready=True), exclude
except Exception:
return None, exclude
def get_tags_label_from_model(model):
if model:
- return f'Tags ({model.__name__.title()})'
+ return f"Tags ({model.__name__.title()})"
else:
- return 'Tags (Unknown)'
+ return "Tags (Unknown)"
def get_finding_filterset_fields(metrics=False, similar=False, filter_string_matching=False):
@@ -340,106 +340,106 @@ def get_finding_filterset_fields(metrics=False, similar=False, filter_string_mat
if similar:
fields.extend([
- 'id',
- 'hash_code',
+ "id",
+ "hash_code",
])
- fields.extend(['title', 'component_name', 'component_version'])
+ fields.extend(["title", "component_name", "component_version"])
if metrics:
fields.extend([
- 'start_date',
- 'end_date',
+ "start_date",
+ "end_date",
])
fields.extend([
- 'date',
- 'cwe',
- 'severity',
- 'last_reviewed',
- 'last_status_update',
- 'mitigated',
- 'reporter',
- 'reviewers',
+ "date",
+ "cwe",
+ "severity",
+ "last_reviewed",
+ "last_status_update",
+ "mitigated",
+ "reporter",
+ "reviewers",
])
if filter_string_matching:
fields.extend([
- 'reporter',
- 'reviewers',
- 'test__engagement__product__prod_type__name',
- 'test__engagement__product__name',
- 'test__engagement__name',
- 'test__title',
+ "reporter",
+ "reviewers",
+ "test__engagement__product__prod_type__name",
+ "test__engagement__product__name",
+ "test__engagement__name",
+ "test__title",
])
else:
fields.extend([
- 'reporter',
- 'reviewers',
- 'test__engagement__product__prod_type',
- 'test__engagement__product',
- 'test__engagement',
- 'test',
+ "reporter",
+ "reviewers",
+ "test__engagement__product__prod_type",
+ "test__engagement__product",
+ "test__engagement",
+ "test",
])
fields.extend([
- 'test__test_type',
- 'test__engagement__version',
- 'test__version',
- 'endpoints',
- 'status',
- 'active',
- 'verified',
- 'duplicate',
- 'is_mitigated',
- 'out_of_scope',
- 'false_p',
- 'has_component',
- 'has_notes',
- 'file_path',
- 'unique_id_from_tool',
- 'vuln_id_from_tool',
- 'service',
- 'epss_score',
- 'epss_score_range',
- 'epss_percentile',
- 'epss_percentile_range',
+ "test__test_type",
+ "test__engagement__version",
+ "test__version",
+ "endpoints",
+ "status",
+ "active",
+ "verified",
+ "duplicate",
+ "is_mitigated",
+ "out_of_scope",
+ "false_p",
+ "has_component",
+ "has_notes",
+ "file_path",
+ "unique_id_from_tool",
+ "vuln_id_from_tool",
+ "service",
+ "epss_score",
+ "epss_score_range",
+ "epss_percentile",
+ "epss_percentile_range",
])
if similar:
fields.extend([
- 'id',
+ "id",
])
fields.extend([
- 'param',
- 'payload',
- 'risk_acceptance',
+ "param",
+ "payload",
+ "risk_acceptance",
])
- if get_system_setting('enable_jira'):
+ if get_system_setting("enable_jira"):
fields.extend([
- 'has_jira_issue',
- 'jira_creation',
- 'jira_change',
- 'jira_issue__jira_key',
+ "has_jira_issue",
+ "jira_creation",
+ "jira_change",
+ "jira_issue__jira_key",
])
if is_finding_groups_enabled():
if filter_string_matching:
fields.extend([
- 'has_finding_group',
- 'finding_group__name',
+ "has_finding_group",
+ "finding_group__name",
])
else:
fields.extend([
- 'has_finding_group',
- 'finding_group',
+ "has_finding_group",
+ "finding_group",
])
- if get_system_setting('enable_jira'):
+ if get_system_setting("enable_jira"):
fields.extend([
- 'has_jira_group_issue',
+ "has_jira_group_issue",
])
return fields
@@ -605,39 +605,39 @@ def __init__(self, *args, **kwargs):
class DateRangeFilter(ChoiceFilter):
options = {
- None: (_('Any date'), lambda qs, name: qs.all()),
- 1: (_('Today'), lambda qs, name: qs.filter(**{
- f'{name}__year': now().year,
- f'{name}__month': now().month,
- f'{name}__day': now().day,
+ None: (_("Any date"), lambda qs, name: qs.all()),
+ 1: (_("Today"), lambda qs, name: qs.filter(**{
+ f"{name}__year": now().year,
+ f"{name}__month": now().month,
+ f"{name}__day": now().day,
})),
- 2: (_('Past 7 days'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() - timedelta(days=7)),
- f'{name}__lt': _truncate(now() + timedelta(days=1)),
+ 2: (_("Past 7 days"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() - timedelta(days=7)),
+ f"{name}__lt": _truncate(now() + timedelta(days=1)),
})),
- 3: (_('Past 30 days'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() - timedelta(days=30)),
- f'{name}__lt': _truncate(now() + timedelta(days=1)),
+ 3: (_("Past 30 days"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() - timedelta(days=30)),
+ f"{name}__lt": _truncate(now() + timedelta(days=1)),
})),
- 4: (_('Past 90 days'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() - timedelta(days=90)),
- f'{name}__lt': _truncate(now() + timedelta(days=1)),
+ 4: (_("Past 90 days"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() - timedelta(days=90)),
+ f"{name}__lt": _truncate(now() + timedelta(days=1)),
})),
- 5: (_('Current month'), lambda qs, name: qs.filter(**{
- f'{name}__year': now().year,
- f'{name}__month': now().month,
+ 5: (_("Current month"), lambda qs, name: qs.filter(**{
+ f"{name}__year": now().year,
+ f"{name}__month": now().month,
})),
- 6: (_('Current year'), lambda qs, name: qs.filter(**{
- f'{name}__year': now().year,
+ 6: (_("Current year"), lambda qs, name: qs.filter(**{
+ f"{name}__year": now().year,
})),
- 7: (_('Past year'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() - timedelta(days=365)),
- f'{name}__lt': _truncate(now() + timedelta(days=1)),
+ 7: (_("Past year"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() - timedelta(days=365)),
+ f"{name}__lt": _truncate(now() + timedelta(days=1)),
})),
}
def __init__(self, *args, **kwargs):
- kwargs['choices'] = [
+ kwargs["choices"] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super().__init__(*args, **kwargs)
@@ -651,55 +651,55 @@ def filter(self, qs, value):
class DateRangeOmniFilter(ChoiceFilter):
options = {
- None: (_('Any date'), lambda qs, name: qs.all()),
- 1: (_('Today'), lambda qs, name: qs.filter(**{
- f'{name}__year': now().year,
- f'{name}__month': now().month,
- f'{name}__day': now().day,
+ None: (_("Any date"), lambda qs, name: qs.all()),
+ 1: (_("Today"), lambda qs, name: qs.filter(**{
+ f"{name}__year": now().year,
+ f"{name}__month": now().month,
+ f"{name}__day": now().day,
})),
- 2: (_('Next 7 days'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() + timedelta(days=1)),
- f'{name}__lt': _truncate(now() + timedelta(days=7)),
+ 2: (_("Next 7 days"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() + timedelta(days=1)),
+ f"{name}__lt": _truncate(now() + timedelta(days=7)),
})),
- 3: (_('Next 30 days'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() + timedelta(days=1)),
- f'{name}__lt': _truncate(now() + timedelta(days=30)),
+ 3: (_("Next 30 days"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() + timedelta(days=1)),
+ f"{name}__lt": _truncate(now() + timedelta(days=30)),
})),
- 4: (_('Next 90 days'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() + timedelta(days=1)),
- f'{name}__lt': _truncate(now() + timedelta(days=90)),
+ 4: (_("Next 90 days"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() + timedelta(days=1)),
+ f"{name}__lt": _truncate(now() + timedelta(days=90)),
})),
- 5: (_('Past 7 days'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() - timedelta(days=7)),
- f'{name}__lt': _truncate(now() + timedelta(days=1)),
+ 5: (_("Past 7 days"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() - timedelta(days=7)),
+ f"{name}__lt": _truncate(now() + timedelta(days=1)),
})),
- 6: (_('Past 30 days'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() - timedelta(days=30)),
- f'{name}__lt': _truncate(now() + timedelta(days=1)),
+ 6: (_("Past 30 days"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() - timedelta(days=30)),
+ f"{name}__lt": _truncate(now() + timedelta(days=1)),
})),
- 7: (_('Past 90 days'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() - timedelta(days=90)),
- f'{name}__lt': _truncate(now() + timedelta(days=1)),
+ 7: (_("Past 90 days"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() - timedelta(days=90)),
+ f"{name}__lt": _truncate(now() + timedelta(days=1)),
})),
- 8: (_('Current month'), lambda qs, name: qs.filter(**{
- f'{name}__year': now().year,
- f'{name}__month': now().month,
+ 8: (_("Current month"), lambda qs, name: qs.filter(**{
+ f"{name}__year": now().year,
+ f"{name}__month": now().month,
})),
- 9: (_('Past year'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() - timedelta(days=365)),
- f'{name}__lt': _truncate(now() + timedelta(days=1)),
+ 9: (_("Past year"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() - timedelta(days=365)),
+ f"{name}__lt": _truncate(now() + timedelta(days=1)),
})),
- 10: (_('Current year'), lambda qs, name: qs.filter(**{
- f'{name}__year': now().year,
+ 10: (_("Current year"), lambda qs, name: qs.filter(**{
+ f"{name}__year": now().year,
})),
- 11: (_('Next year'), lambda qs, name: qs.filter(**{
- f'{name}__gte': _truncate(now() + timedelta(days=1)),
- f'{name}__lt': _truncate(now() + timedelta(days=365)),
+ 11: (_("Next year"), lambda qs, name: qs.filter(**{
+ f"{name}__gte": _truncate(now() + timedelta(days=1)),
+ f"{name}__lt": _truncate(now() + timedelta(days=365)),
})),
}
def __init__(self, *args, **kwargs):
- kwargs['choices'] = [
+ kwargs["choices"] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super().__init__(*args, **kwargs)
@@ -713,17 +713,17 @@ def filter(self, qs, value):
class ReportBooleanFilter(ChoiceFilter):
options = {
- None: (_('Either'), lambda qs, name: qs.all()),
- 1: (_('Yes'), lambda qs, name: qs.filter(**{
- f'{name}': True,
+ None: (_("Either"), lambda qs, name: qs.all()),
+ 1: (_("Yes"), lambda qs, name: qs.filter(**{
+ f"{name}": True,
})),
- 2: (_('No'), lambda qs, name: qs.filter(**{
- f'{name}': False,
+ 2: (_("No"), lambda qs, name: qs.filter(**{
+ f"{name}": False,
})),
}
def __init__(self, *args, **kwargs):
- kwargs['choices'] = [
+ kwargs["choices"] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super().__init__(*args, **kwargs)
@@ -751,14 +751,14 @@ def was_accepted(self, qs, name):
return qs.filter(WAS_ACCEPTED_FINDINGS_QUERY)
options = {
- None: (_('Either'), any),
- 1: (_('Yes'), accepted),
- 2: (_('No'), not_accepted),
- 3: (_('Expired'), was_accepted),
+ None: (_("Either"), any),
+ 1: (_("Yes"), accepted),
+ 2: (_("No"), not_accepted),
+ 3: (_("Expired"), was_accepted),
}
def __init__(self, *args, **kwargs):
- kwargs['choices'] = [
+ kwargs["choices"] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super().__init__(*args, **kwargs)
@@ -786,8 +786,8 @@ def current_month(self, qs, name):
datetime(now().year, now().month, 1, 0, 0, 0))
self.end_date = now()
return qs.filter(**{
- f'{name}__year': self.start_date.year,
- f'{name}__month': self.start_date.month,
+ f"{name}__year": self.start_date.year,
+ f"{name}__month": self.start_date.month,
})
def current_year(self, qs, name):
@@ -795,15 +795,15 @@ def current_year(self, qs, name):
datetime(now().year, 1, 1, 0, 0, 0))
self.end_date = now()
return qs.filter(**{
- f'{name}__year': now().year,
+ f"{name}__year": now().year,
})
def past_x_days(self, qs, name, days):
self.start_date = _truncate(now() - timedelta(days=days))
self.end_date = _truncate(now() + timedelta(days=1))
return qs.filter(**{
- f'{name}__gte': self.start_date,
- f'{name}__lt': self.end_date,
+ f"{name}__gte": self.start_date,
+ f"{name}__lt": self.end_date,
})
def past_seven_days(self, qs, name):
@@ -822,18 +822,18 @@ def past_year(self, qs, name):
return self.past_x_days(qs, name, 365)
options = {
- None: (_('Past 30 days'), past_thirty_days),
- 1: (_('Past 7 days'), past_seven_days),
- 2: (_('Past 90 days'), past_ninety_days),
- 3: (_('Current month'), current_month),
- 4: (_('Current year'), current_year),
- 5: (_('Past 6 Months'), past_six_months),
- 6: (_('Past year'), past_year),
- 7: (_('Any date'), any),
+ None: (_("Past 30 days"), past_thirty_days),
+ 1: (_("Past 7 days"), past_seven_days),
+ 2: (_("Past 90 days"), past_ninety_days),
+ 3: (_("Current month"), current_month),
+ 4: (_("Current year"), current_year),
+ 5: (_("Past 6 Months"), past_six_months),
+ 6: (_("Past year"), past_year),
+ 7: (_("Any date"), any),
}
def __init__(self, *args, **kwargs):
- kwargs['choices'] = [
+ kwargs["choices"] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super().__init__(*args, **kwargs)
@@ -855,23 +855,23 @@ def filter(self, qs, value):
class ProductComponentFilter(DojoFilter):
- component_name = CharFilter(lookup_expr='icontains', label="Module Name")
- component_version = CharFilter(lookup_expr='icontains', label="Module Version")
+ component_name = CharFilter(lookup_expr="icontains", label="Module Name")
+ component_version = CharFilter(lookup_expr="icontains", label="Module Version")
o = OrderingFilter(
fields=(
- ('component_name', 'component_name'),
- ('component_version', 'component_version'),
- ('active', 'active'),
- ('duplicate', 'duplicate'),
- ('total', 'total'),
+ ("component_name", "component_name"),
+ ("component_version", "component_version"),
+ ("active", "active"),
+ ("duplicate", "duplicate"),
+ ("total", "total"),
),
field_labels={
- 'component_name': 'Component Name',
- 'component_version': 'Component Version',
- 'active': 'Active',
- 'duplicate': 'Duplicate',
- 'total': 'Total',
+ "component_name": "Component Name",
+ "component_version": "Component Version",
+ "active": "Active",
+ "duplicate": "Duplicate",
+ "total": "Total",
},
)
@@ -910,9 +910,9 @@ class ComponentFilter(ProductComponentFilter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.form.fields[
- 'test__engagement__product__prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View)
+ "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View)
self.form.fields[
- 'test__engagement__product'].queryset = get_authorized_products(Permissions.Product_View)
+ "test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View)
class EngagementDirectFilterHelper(FilterSet):
@@ -1049,26 +1049,26 @@ class EngagementFilter(EngagementFilterHelper, DojoFilter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- self.form.fields['prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View)
- self.form.fields['engagement__lead'].queryset = get_authorized_users(Permissions.Product_Type_View) \
+ self.form.fields["prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View)
+ self.form.fields["engagement__lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \
.filter(engagement__lead__isnull=False).distinct()
class Meta:
model = Product
- fields = ['name', 'prod_type']
+ fields = ["name", "prod_type"]
class ProductEngagementsFilter(DojoFilter):
- engagement__name = CharFilter(field_name='name', lookup_expr='icontains', label='Engagement name contains')
- engagement__lead = ModelChoiceFilter(field_name='lead', queryset=Dojo_User.objects.none(), label="Lead")
- engagement__version = CharFilter(field_name='version', lookup_expr='icontains', label='Engagement version')
- engagement__test__version = CharFilter(field_name='test__version', lookup_expr='icontains', label='Test version')
- engagement__status = MultipleChoiceFilter(field_name='status', choices=ENGAGEMENT_STATUS_CHOICES,
+ engagement__name = CharFilter(field_name="name", lookup_expr="icontains", label="Engagement name contains")
+ engagement__lead = ModelChoiceFilter(field_name="lead", queryset=Dojo_User.objects.none(), label="Lead")
+ engagement__version = CharFilter(field_name="version", lookup_expr="icontains", label="Engagement version")
+ engagement__test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version")
+ engagement__status = MultipleChoiceFilter(field_name="status", choices=ENGAGEMENT_STATUS_CHOICES,
label="Status")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- self.form.fields['engagement__lead'].queryset = get_authorized_users(Permissions.Product_Type_View) \
+ self.form.fields["engagement__lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \
.filter(engagement__lead__isnull=False).distinct()
class Meta:
@@ -1108,36 +1108,36 @@ class EngagementFilterWithoutObjectLookups(EngagementFilterHelper):
class Meta:
model = Product
- fields = ['name']
+ fields = ["name"]
class ProductEngagementFilterHelper(FilterSet):
- version = CharFilter(lookup_expr='icontains', label='Engagement version')
- test__version = CharFilter(field_name='test__version', lookup_expr='icontains', label='Test version')
- name = CharFilter(lookup_expr='icontains')
+ version = CharFilter(lookup_expr="icontains", label="Engagement version")
+ test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version")
+ name = CharFilter(lookup_expr="icontains")
status = MultipleChoiceFilter(choices=ENGAGEMENT_STATUS_CHOICES, label="Status")
target_start = DateRangeFilter()
target_end = DateRangeFilter()
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains')
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True)
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains")
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True)
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('name', 'name'),
- ('version', 'version'),
- ('target_start', 'target_start'),
- ('target_end', 'target_end'),
- ('status', 'status'),
- ('lead', 'lead'),
+ ("name", "name"),
+ ("version", "version"),
+ ("target_start", "target_start"),
+ ("target_end", "target_end"),
+ ("status", "status"),
+ ("lead", "lead"),
),
field_labels={
- 'name': 'Engagement Name',
+ "name": "Engagement Name",
},
)
class Meta:
model = Product
- fields = ['name']
+ fields = ["name"]
class ProductEngagementFilter(ProductEngagementFilterHelper, DojoFilter):
@@ -1172,87 +1172,87 @@ class ProductEngagementFilterWithoutObjectLookups(ProductEngagementFilterHelper,
class ApiEngagementFilter(DojoFilter):
- product__prod_type = NumberInFilter(field_name='product__prod_type', lookup_expr='in')
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
- tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags')
- product__tags = CharFieldInFilter(field_name='product__tags__name',
- lookup_expr='in',
- help_text='Comma separated list of exact tags present on product')
-
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
- not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags not present on model', exclude='True')
- not_product__tags = CharFieldInFilter(field_name='product__tags__name',
- lookup_expr='in',
- help_text='Comma separated list of exact tags not present on product',
- exclude='True')
- has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags')
+ product__prod_type = NumberInFilter(field_name="product__prod_type", lookup_expr="in")
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains")
+ tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags")
+ product__tags = CharFieldInFilter(field_name="product__tags__name",
+ lookup_expr="in",
+ help_text="Comma separated list of exact tags present on product")
+
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True")
+ not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on model", exclude="True")
+ not_product__tags = CharFieldInFilter(field_name="product__tags__name",
+ lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on product",
+ exclude="True")
+ has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('name', 'name'),
- ('version', 'version'),
- ('target_start', 'target_start'),
- ('target_end', 'target_end'),
- ('status', 'status'),
- ('lead', 'lead'),
- ('created', 'created'),
- ('updated', 'updated'),
+ ("name", "name"),
+ ("version", "version"),
+ ("target_start", "target_start"),
+ ("target_end", "target_end"),
+ ("status", "status"),
+ ("lead", "lead"),
+ ("created", "created"),
+ ("updated", "updated"),
),
field_labels={
- 'name': 'Engagement Name',
+ "name": "Engagement Name",
},
)
class Meta:
model = Engagement
- fields = ['id', 'active', 'target_start',
- 'target_end', 'requester', 'report_type',
- 'updated', 'threat_model', 'api_test',
- 'pen_test', 'status', 'product', 'name', 'version', 'tags']
+ fields = ["id", "active", "target_start",
+ "target_end", "requester", "report_type",
+ "updated", "threat_model", "api_test",
+ "pen_test", "status", "product", "name", "version", "tags"]
class ProductFilterHelper(FilterSet):
- name = CharFilter(lookup_expr='icontains', label="Product Name")
- name_exact = CharFilter(field_name='name', lookup_expr='iexact', label="Exact Product Name")
+ name = CharFilter(lookup_expr="icontains", label="Product Name")
+ name_exact = CharFilter(field_name="name", lookup_expr="iexact", label="Exact Product Name")
business_criticality = MultipleChoiceFilter(choices=Product.BUSINESS_CRITICALITY_CHOICES, null_label="Empty")
platform = MultipleChoiceFilter(choices=Product.PLATFORM_CHOICES, null_label="Empty")
lifecycle = MultipleChoiceFilter(choices=Product.LIFECYCLE_CHOICES, null_label="Empty")
origin = MultipleChoiceFilter(choices=Product.ORIGIN_CHOICES, null_label="Empty")
- external_audience = BooleanFilter(field_name='external_audience')
- internet_accessible = BooleanFilter(field_name='internet_accessible')
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label="Tag contains")
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True)
+ external_audience = BooleanFilter(field_name="external_audience")
+ internet_accessible = BooleanFilter(field_name="internet_accessible")
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag contains")
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True)
outside_of_sla = ProductSLAFilter(label="Outside of SLA")
- has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags')
+ has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('name', 'name'),
- ('name_exact', 'name_exact'),
- ('prod_type__name', 'prod_type__name'),
- ('business_criticality', 'business_criticality'),
- ('platform', 'platform'),
- ('lifecycle', 'lifecycle'),
- ('origin', 'origin'),
- ('external_audience', 'external_audience'),
- ('internet_accessible', 'internet_accessible'),
- ('findings_count', 'findings_count'),
+ ("name", "name"),
+ ("name_exact", "name_exact"),
+ ("prod_type__name", "prod_type__name"),
+ ("business_criticality", "business_criticality"),
+ ("platform", "platform"),
+ ("lifecycle", "lifecycle"),
+ ("origin", "origin"),
+ ("external_audience", "external_audience"),
+ ("internet_accessible", "internet_accessible"),
+ ("findings_count", "findings_count"),
),
field_labels={
- 'name': 'Product Name',
- 'name_exact': 'Exact Product Name',
- 'prod_type__name': 'Product Type',
- 'business_criticality': 'Business Criticality',
- 'platform': 'Platform ',
- 'lifecycle': 'Lifecycle ',
- 'origin': 'Origin ',
- 'external_audience': 'External Audience ',
- 'internet_accessible': 'Internet Accessible ',
- 'findings_count': 'Findings Count ',
+ "name": "Product Name",
+ "name_exact": "Exact Product Name",
+ "prod_type__name": "Product Type",
+ "business_criticality": "Business Criticality",
+ "platform": "Platform ",
+ "lifecycle": "Lifecycle ",
+ "origin": "Origin ",
+ "external_audience": "External Audience ",
+ "internet_accessible": "Internet Accessible ",
+ "findings_count": "Findings Count ",
},
)
@@ -1313,35 +1313,35 @@ class Meta:
class ApiProductFilter(DojoFilter):
# BooleanFilter
- external_audience = BooleanFilter(field_name='external_audience')
- internet_accessible = BooleanFilter(field_name='internet_accessible')
+ external_audience = BooleanFilter(field_name="external_audience")
+ internet_accessible = BooleanFilter(field_name="internet_accessible")
# CharFilter
- name = CharFilter(lookup_expr='icontains')
- name_exact = CharFilter(field_name='name', lookup_expr='iexact')
- description = CharFilter(lookup_expr='icontains')
- business_criticality = CharFilter(method=custom_filter, field_name='business_criticality')
- platform = CharFilter(method=custom_filter, field_name='platform')
- lifecycle = CharFilter(method=custom_filter, field_name='lifecycle')
- origin = CharFilter(method=custom_filter, field_name='origin')
+ name = CharFilter(lookup_expr="icontains")
+ name_exact = CharFilter(field_name="name", lookup_expr="iexact")
+ description = CharFilter(lookup_expr="icontains")
+ business_criticality = CharFilter(method=custom_filter, field_name="business_criticality")
+ platform = CharFilter(method=custom_filter, field_name="platform")
+ lifecycle = CharFilter(method=custom_filter, field_name="lifecycle")
+ origin = CharFilter(method=custom_filter, field_name="origin")
# NumberInFilter
- id = NumberInFilter(field_name='id', lookup_expr='in')
- product_manager = NumberInFilter(field_name='product_manager', lookup_expr='in')
- technical_contact = NumberInFilter(field_name='technical_contact', lookup_expr='in')
- team_manager = NumberInFilter(field_name='team_manager', lookup_expr='in')
- prod_type = NumberInFilter(field_name='prod_type', lookup_expr='in')
- tid = NumberInFilter(field_name='tid', lookup_expr='in')
- prod_numeric_grade = NumberInFilter(field_name='prod_numeric_grade', lookup_expr='in')
- user_records = NumberInFilter(field_name='user_records', lookup_expr='in')
- regulations = NumberInFilter(field_name='regulations', lookup_expr='in')
-
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains')
- tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags')
-
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
- not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags not present on product', exclude='True')
- has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags')
+ id = NumberInFilter(field_name="id", lookup_expr="in")
+ product_manager = NumberInFilter(field_name="product_manager", lookup_expr="in")
+ technical_contact = NumberInFilter(field_name="technical_contact", lookup_expr="in")
+ team_manager = NumberInFilter(field_name="team_manager", lookup_expr="in")
+ prod_type = NumberInFilter(field_name="prod_type", lookup_expr="in")
+ tid = NumberInFilter(field_name="tid", lookup_expr="in")
+ prod_numeric_grade = NumberInFilter(field_name="prod_numeric_grade", lookup_expr="in")
+ user_records = NumberInFilter(field_name="user_records", lookup_expr="in")
+ regulations = NumberInFilter(field_name="regulations", lookup_expr="in")
+
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains")
+ tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags")
+
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True")
+ not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on product", exclude="True")
+ has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags")
outside_of_sla = extend_schema_field(OpenApiTypes.NUMBER)(ProductSLAFilter())
# DateRangeFilter
@@ -1353,173 +1353,173 @@ class ApiProductFilter(DojoFilter):
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('id', 'id'),
- ('tid', 'tid'),
- ('name', 'name'),
- ('created', 'created'),
- ('prod_numeric_grade', 'prod_numeric_grade'),
- ('business_criticality', 'business_criticality'),
- ('platform', 'platform'),
- ('lifecycle', 'lifecycle'),
- ('origin', 'origin'),
- ('revenue', 'revenue'),
- ('external_audience', 'external_audience'),
- ('internet_accessible', 'internet_accessible'),
- ('product_manager', 'product_manager'),
- ('product_manager__first_name', 'product_manager__first_name'),
- ('product_manager__last_name', 'product_manager__last_name'),
- ('technical_contact', 'technical_contact'),
- ('technical_contact__first_name', 'technical_contact__first_name'),
- ('technical_contact__last_name', 'technical_contact__last_name'),
- ('team_manager', 'team_manager'),
- ('team_manager__first_name', 'team_manager__first_name'),
- ('team_manager__last_name', 'team_manager__last_name'),
- ('prod_type', 'prod_type'),
- ('prod_type__name', 'prod_type__name'),
- ('updated', 'updated'),
- ('user_records', 'user_records'),
+ ("id", "id"),
+ ("tid", "tid"),
+ ("name", "name"),
+ ("created", "created"),
+ ("prod_numeric_grade", "prod_numeric_grade"),
+ ("business_criticality", "business_criticality"),
+ ("platform", "platform"),
+ ("lifecycle", "lifecycle"),
+ ("origin", "origin"),
+ ("revenue", "revenue"),
+ ("external_audience", "external_audience"),
+ ("internet_accessible", "internet_accessible"),
+ ("product_manager", "product_manager"),
+ ("product_manager__first_name", "product_manager__first_name"),
+ ("product_manager__last_name", "product_manager__last_name"),
+ ("technical_contact", "technical_contact"),
+ ("technical_contact__first_name", "technical_contact__first_name"),
+ ("technical_contact__last_name", "technical_contact__last_name"),
+ ("team_manager", "team_manager"),
+ ("team_manager__first_name", "team_manager__first_name"),
+ ("team_manager__last_name", "team_manager__last_name"),
+ ("prod_type", "prod_type"),
+ ("prod_type__name", "prod_type__name"),
+ ("updated", "updated"),
+ ("user_records", "user_records"),
),
)
class ApiFindingFilter(DojoFilter):
# BooleanFilter
- active = BooleanFilter(field_name='active')
- duplicate = BooleanFilter(field_name='duplicate')
- dynamic_finding = BooleanFilter(field_name='dynamic_finding')
- false_p = BooleanFilter(field_name='false_p')
- is_mitigated = BooleanFilter(field_name='is_mitigated')
- out_of_scope = BooleanFilter(field_name='out_of_scope')
- static_finding = BooleanFilter(field_name='static_finding')
- under_defect_review = BooleanFilter(field_name='under_defect_review')
- under_review = BooleanFilter(field_name='under_review')
- verified = BooleanFilter(field_name='verified')
- has_jira = BooleanFilter(field_name='jira_issue', lookup_expr='isnull', exclude=True)
+ active = BooleanFilter(field_name="active")
+ duplicate = BooleanFilter(field_name="duplicate")
+ dynamic_finding = BooleanFilter(field_name="dynamic_finding")
+ false_p = BooleanFilter(field_name="false_p")
+ is_mitigated = BooleanFilter(field_name="is_mitigated")
+ out_of_scope = BooleanFilter(field_name="out_of_scope")
+ static_finding = BooleanFilter(field_name="static_finding")
+ under_defect_review = BooleanFilter(field_name="under_defect_review")
+ under_review = BooleanFilter(field_name="under_review")
+ verified = BooleanFilter(field_name="verified")
+ has_jira = BooleanFilter(field_name="jira_issue", lookup_expr="isnull", exclude=True)
# CharFilter
- component_version = CharFilter(lookup_expr='icontains')
- component_name = CharFilter(lookup_expr='icontains')
+ component_version = CharFilter(lookup_expr="icontains")
+ component_name = CharFilter(lookup_expr="icontains")
vulnerability_id = CharFilter(method=custom_vulnerability_id_filter)
- description = CharFilter(lookup_expr='icontains')
- file_path = CharFilter(lookup_expr='icontains')
- hash_code = CharFilter(lookup_expr='icontains')
- impact = CharFilter(lookup_expr='icontains')
- mitigation = CharFilter(lookup_expr='icontains')
- numerical_severity = CharFilter(method=custom_filter, field_name='numerical_severity')
- param = CharFilter(lookup_expr='icontains')
- payload = CharFilter(lookup_expr='icontains')
- references = CharFilter(lookup_expr='icontains')
- severity = CharFilter(method=custom_filter, field_name='severity')
- severity_justification = CharFilter(lookup_expr='icontains')
- steps_to_reproduce = CharFilter(lookup_expr='icontains')
- unique_id_from_tool = CharFilter(lookup_expr='icontains')
- title = CharFilter(lookup_expr='icontains')
- product_name = CharFilter(lookup_expr='engagement__product__name__iexact', field_name='test', label='exact product name')
- product_name_contains = CharFilter(lookup_expr='engagement__product__name__icontains', field_name='test', label='exact product name')
- product_lifecycle = CharFilter(method=custom_filter, lookup_expr='engagement__product__lifecycle',
- field_name='test__engagement__product__lifecycle', label='Comma separated list of exact product lifecycles')
+ description = CharFilter(lookup_expr="icontains")
+ file_path = CharFilter(lookup_expr="icontains")
+ hash_code = CharFilter(lookup_expr="icontains")
+ impact = CharFilter(lookup_expr="icontains")
+ mitigation = CharFilter(lookup_expr="icontains")
+ numerical_severity = CharFilter(method=custom_filter, field_name="numerical_severity")
+ param = CharFilter(lookup_expr="icontains")
+ payload = CharFilter(lookup_expr="icontains")
+ references = CharFilter(lookup_expr="icontains")
+ severity = CharFilter(method=custom_filter, field_name="severity")
+ severity_justification = CharFilter(lookup_expr="icontains")
+ steps_to_reproduce = CharFilter(lookup_expr="icontains")
+ unique_id_from_tool = CharFilter(lookup_expr="icontains")
+ title = CharFilter(lookup_expr="icontains")
+ product_name = CharFilter(lookup_expr="engagement__product__name__iexact", field_name="test", label="exact product name")
+ product_name_contains = CharFilter(lookup_expr="engagement__product__name__icontains", field_name="test", label="exact product name")
+ product_lifecycle = CharFilter(method=custom_filter, lookup_expr="engagement__product__lifecycle",
+ field_name="test__engagement__product__lifecycle", label="Comma separated list of exact product lifecycles")
# DateRangeFilter
created = DateRangeFilter()
date = DateRangeFilter()
- on = DateFilter(field_name='date', lookup_expr='exact')
- before = DateFilter(field_name='date', lookup_expr='lt')
- after = DateFilter(field_name='date', lookup_expr='gt')
- jira_creation = DateRangeFilter(field_name='jira_issue__jira_creation')
- jira_change = DateRangeFilter(field_name='jira_issue__jira_change')
+ on = DateFilter(field_name="date", lookup_expr="exact")
+ before = DateFilter(field_name="date", lookup_expr="lt")
+ after = DateFilter(field_name="date", lookup_expr="gt")
+ jira_creation = DateRangeFilter(field_name="jira_issue__jira_creation")
+ jira_change = DateRangeFilter(field_name="jira_issue__jira_change")
last_reviewed = DateRangeFilter()
mitigated = DateRangeFilter()
# NumberInFilter
- cwe = NumberInFilter(field_name='cwe', lookup_expr='in')
- defect_review_requested_by = NumberInFilter(field_name='defect_review_requested_by', lookup_expr='in')
- endpoints = NumberInFilter(field_name='endpoints', lookup_expr='in')
- found_by = NumberInFilter(field_name='found_by', lookup_expr='in')
- id = NumberInFilter(field_name='id', lookup_expr='in')
- last_reviewed_by = NumberInFilter(field_name='last_reviewed_by', lookup_expr='in')
- mitigated_by = NumberInFilter(field_name='mitigated_by', lookup_expr='in')
- nb_occurences = NumberInFilter(field_name='nb_occurences', lookup_expr='in')
- reporter = NumberInFilter(field_name='reporter', lookup_expr='in')
- scanner_confidence = NumberInFilter(field_name='scanner_confidence', lookup_expr='in')
- review_requested_by = NumberInFilter(field_name='review_requested_by', lookup_expr='in')
- reviewers = NumberInFilter(field_name='reviewers', lookup_expr='in')
- sast_source_line = NumberInFilter(field_name='sast_source_line', lookup_expr='in')
- sonarqube_issue = NumberInFilter(field_name='sonarqube_issue', lookup_expr='in')
- test__test_type = NumberInFilter(field_name='test__test_type', lookup_expr='in', label='Test Type')
- test__engagement = NumberInFilter(field_name='test__engagement', lookup_expr='in')
- test__engagement__product = NumberInFilter(field_name='test__engagement__product', lookup_expr='in')
- test__engagement__product__prod_type = NumberInFilter(field_name='test__engagement__product__prod_type', lookup_expr='in')
- finding_group = NumberInFilter(field_name='finding_group', lookup_expr='in')
+ cwe = NumberInFilter(field_name="cwe", lookup_expr="in")
+ defect_review_requested_by = NumberInFilter(field_name="defect_review_requested_by", lookup_expr="in")
+ endpoints = NumberInFilter(field_name="endpoints", lookup_expr="in")
+ found_by = NumberInFilter(field_name="found_by", lookup_expr="in")
+ id = NumberInFilter(field_name="id", lookup_expr="in")
+ last_reviewed_by = NumberInFilter(field_name="last_reviewed_by", lookup_expr="in")
+ mitigated_by = NumberInFilter(field_name="mitigated_by", lookup_expr="in")
+ nb_occurences = NumberInFilter(field_name="nb_occurences", lookup_expr="in")
+ reporter = NumberInFilter(field_name="reporter", lookup_expr="in")
+ scanner_confidence = NumberInFilter(field_name="scanner_confidence", lookup_expr="in")
+ review_requested_by = NumberInFilter(field_name="review_requested_by", lookup_expr="in")
+ reviewers = NumberInFilter(field_name="reviewers", lookup_expr="in")
+ sast_source_line = NumberInFilter(field_name="sast_source_line", lookup_expr="in")
+ sonarqube_issue = NumberInFilter(field_name="sonarqube_issue", lookup_expr="in")
+ test__test_type = NumberInFilter(field_name="test__test_type", lookup_expr="in", label="Test Type")
+ test__engagement = NumberInFilter(field_name="test__engagement", lookup_expr="in")
+ test__engagement__product = NumberInFilter(field_name="test__engagement__product", lookup_expr="in")
+ test__engagement__product__prod_type = NumberInFilter(field_name="test__engagement__product__prod_type", lookup_expr="in")
+ finding_group = NumberInFilter(field_name="finding_group", lookup_expr="in")
# ReportRiskAcceptanceFilter
risk_acceptance = extend_schema_field(OpenApiTypes.NUMBER)(ReportRiskAcceptanceFilter())
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
- tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags')
- test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags present on test')
- test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags present on engagement')
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains")
+ tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags")
+ test__tags = CharFieldInFilter(field_name="test__tags__name", lookup_expr="in", help_text="Comma separated list of exact tags present on test")
+ test__engagement__tags = CharFieldInFilter(field_name="test__engagement__tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags present on engagement")
test__engagement__product__tags = CharFieldInFilter(
- field_name='test__engagement__product__tags__name',
- lookup_expr='in',
- help_text='Comma separated list of exact tags present on product')
-
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
- not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags not present on model', exclude='True')
- not_test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', exclude='True', help_text='Comma separated list of exact tags present on test')
- not_test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags not present on engagement',
- exclude='True')
+ field_name="test__engagement__product__tags__name",
+ lookup_expr="in",
+ help_text="Comma separated list of exact tags present on product")
+
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True")
+ not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on model", exclude="True")
+ not_test__tags = CharFieldInFilter(field_name="test__tags__name", lookup_expr="in", exclude="True", help_text="Comma separated list of exact tags present on test")
+ not_test__engagement__tags = CharFieldInFilter(field_name="test__engagement__tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on engagement",
+ exclude="True")
not_test__engagement__product__tags = CharFieldInFilter(
- field_name='test__engagement__product__tags__name',
- lookup_expr='in',
- help_text='Comma separated list of exact tags not present on product',
- exclude='True')
- has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags')
+ field_name="test__engagement__product__tags__name",
+ lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on product",
+ exclude="True")
+ has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags")
outside_of_sla = extend_schema_field(OpenApiTypes.NUMBER)(FindingSLAFilter())
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('active', 'active'),
- ('component_name', 'component_name'),
- ('component_version', 'component_version'),
- ('created', 'created'),
- ('last_status_update', 'last_status_update'),
- ('last_reviewed', 'last_reviewed'),
- ('cwe', 'cwe'),
- ('date', 'date'),
- ('duplicate', 'duplicate'),
- ('dynamic_finding', 'dynamic_finding'),
- ('false_p', 'false_p'),
- ('found_by', 'found_by'),
- ('id', 'id'),
- ('is_mitigated', 'is_mitigated'),
- ('numerical_severity', 'numerical_severity'),
- ('out_of_scope', 'out_of_scope'),
- ('severity', 'severity'),
- ('reviewers', 'reviewers'),
- ('static_finding', 'static_finding'),
- ('test__engagement__product__name', 'test__engagement__product__name'),
- ('title', 'title'),
- ('under_defect_review', 'under_defect_review'),
- ('under_review', 'under_review'),
- ('verified', 'verified'),
+ ("active", "active"),
+ ("component_name", "component_name"),
+ ("component_version", "component_version"),
+ ("created", "created"),
+ ("last_status_update", "last_status_update"),
+ ("last_reviewed", "last_reviewed"),
+ ("cwe", "cwe"),
+ ("date", "date"),
+ ("duplicate", "duplicate"),
+ ("dynamic_finding", "dynamic_finding"),
+ ("false_p", "false_p"),
+ ("found_by", "found_by"),
+ ("id", "id"),
+ ("is_mitigated", "is_mitigated"),
+ ("numerical_severity", "numerical_severity"),
+ ("out_of_scope", "out_of_scope"),
+ ("severity", "severity"),
+ ("reviewers", "reviewers"),
+ ("static_finding", "static_finding"),
+ ("test__engagement__product__name", "test__engagement__product__name"),
+ ("title", "title"),
+ ("under_defect_review", "under_defect_review"),
+ ("under_review", "under_review"),
+ ("verified", "verified"),
),
)
class Meta:
model = Finding
- exclude = ['url', 'thread_id', 'notes', 'files',
- 'line', 'cve']
+ exclude = ["url", "thread_id", "notes", "files",
+ "line", "cve"]
class PercentageFilter(NumberFilter):
def __init__(self, *args, **kwargs):
- kwargs['method'] = self.filter_percentage
+ kwargs["method"] = self.filter_percentage
super().__init__(*args, **kwargs)
def filter_percentage(self, queryset, name, value):
- value = value / decimal.Decimal('100.0')
+ value = value / decimal.Decimal("100.0")
# Provide some wiggle room for filtering since the UI rounds to two places (and because floats):
# a user may enter 0.15, but we'll return everything in [0.0015, 0.0016).
# To do this, add to our value 1^(whatever the exponent for our least significant digit place is), but ensure
@@ -1535,15 +1535,15 @@ def filter_percentage(self, queryset, name, value):
class PercentageRangeFilter(RangeFilter):
def filter(self, qs, value):
if value is not None:
- start = value.start / decimal.Decimal('100.0') if value.start else None
- stop = value.stop / decimal.Decimal('100.0') if value.stop else None
+ start = value.start / decimal.Decimal("100.0") if value.start else None
+ stop = value.stop / decimal.Decimal("100.0") if value.stop else None
value = slice(start, stop)
return super().filter(qs, value)
class FindingFilterHelper(FilterSet):
title = CharFilter(lookup_expr="icontains")
- date = DateFromToRangeFilter(field_name='date', label="Date Discovered")
+ date = DateFromToRangeFilter(field_name="date", label="Date Discovered")
on = DateFilter(field_name="date", lookup_expr="exact", label="On")
before = DateFilter(field_name="date", lookup_expr="lt", label="Before")
after = DateFilter(field_name="date", lookup_expr="gt", label="After")
@@ -1560,7 +1560,7 @@ class FindingFilterHelper(FilterSet):
file_path = CharFilter(lookup_expr="icontains")
param = CharFilter(lookup_expr="icontains")
payload = CharFilter(lookup_expr="icontains")
- test__test_type = ModelMultipleChoiceFilter(queryset=Test_Type.objects.all(), label='Test Type')
+ test__test_type = ModelMultipleChoiceFilter(queryset=Test_Type.objects.all(), label="Test Type")
endpoints__host = CharFilter(lookup_expr="icontains", label="Endpoint Host")
service = CharFilter(lookup_expr="icontains")
test__engagement__version = CharFilter(lookup_expr="icontains", label="Engagement Version")
@@ -1583,10 +1583,10 @@ class FindingFilterHelper(FilterSet):
if is_finding_groups_enabled():
has_finding_group = BooleanFilter(
- field_name='finding_group',
- lookup_expr='isnull',
+ field_name="finding_group",
+ lookup_expr="isnull",
exclude=True,
- label='Is Grouped')
+ label="Is Grouped")
if get_system_setting("enable_jira"):
has_jira_issue = BooleanFilter(
@@ -1630,28 +1630,28 @@ class FindingFilterHelper(FilterSet):
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('numerical_severity', 'numerical_severity'),
- ('date', 'date'),
- ('mitigated', 'mitigated'),
- ('risk_acceptance__created__date',
- 'risk_acceptance__created__date'),
- ('last_reviewed', 'last_reviewed'),
- ('title', 'title'),
- ('test__engagement__product__name',
- 'test__engagement__product__name'),
- ('service', 'service'),
- ('epss_score', 'epss_score'),
- ('epss_percentile', 'epss_percentile'),
+ ("numerical_severity", "numerical_severity"),
+ ("date", "date"),
+ ("mitigated", "mitigated"),
+ ("risk_acceptance__created__date",
+ "risk_acceptance__created__date"),
+ ("last_reviewed", "last_reviewed"),
+ ("title", "title"),
+ ("test__engagement__product__name",
+ "test__engagement__product__name"),
+ ("service", "service"),
+ ("epss_score", "epss_score"),
+ ("epss_percentile", "epss_percentile"),
),
field_labels={
- 'numerical_severity': 'Severity',
- 'date': 'Date',
- 'risk_acceptance__created__date': 'Acceptance Date',
- 'mitigated': 'Mitigated Date',
- 'title': 'Finding Name',
- 'test__engagement__product__name': 'Product Name',
- 'epss_score': 'EPSS Score',
- 'epss_percentile': 'EPSS Percentile',
+ "numerical_severity": "Severity",
+ "date": "Date",
+ "risk_acceptance__created__date": "Acceptance Date",
+ "mitigated": "Mitigated Date",
+ "title": "Finding Name",
+ "test__engagement__product__name": "Product Name",
+ "epss_score": "EPSS Score",
+ "epss_percentile": "EPSS Percentile",
},
)
@@ -1659,11 +1659,11 @@ def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def set_date_fields(self, *args: list, **kwargs: dict):
- date_input_widget = forms.DateInput(attrs={'class': 'datepicker', 'placeholder': 'YYYY-MM-DD'}, format="%Y-%m-%d")
- self.form.fields['on'].widget = date_input_widget
- self.form.fields['before'].widget = date_input_widget
- self.form.fields['after'].widget = date_input_widget
- self.form.fields['cwe'].choices = cwe_options(self.queryset)
+ date_input_widget = forms.DateInput(attrs={"class": "datepicker", "placeholder": "YYYY-MM-DD"}, format="%Y-%m-%d")
+ self.form.fields["on"].widget = date_input_widget
+ self.form.fields["before"].widget = date_input_widget
+ self.form.fields["after"].widget = date_input_widget
+ self.form.fields["cwe"].choices = cwe_options(self.queryset)
class FindingFilterWithoutObjectLookups(FindingFilterHelper, FindingTagStringFilter):
@@ -1746,34 +1746,34 @@ class Meta:
model = Finding
fields = get_finding_filterset_fields(filter_string_matching=True)
- exclude = ['url', 'description', 'mitigation', 'impact',
- 'endpoints', 'references',
- 'thread_id', 'notes', 'scanner_confidence',
- 'numerical_severity', 'line', 'duplicate_finding',
- 'hash_code', 'reviewers', 'created', 'files',
- 'sla_start_date', 'sla_expiration_date', 'cvssv3',
- 'severity_justification', 'steps_to_reproduce']
+ exclude = ["url", "description", "mitigation", "impact",
+ "endpoints", "references",
+ "thread_id", "notes", "scanner_confidence",
+ "numerical_severity", "line", "duplicate_finding",
+ "hash_code", "reviewers", "created", "files",
+ "sla_start_date", "sla_expiration_date", "cvssv3",
+ "severity_justification", "steps_to_reproduce"]
def __init__(self, *args, **kwargs):
self.user = None
self.pid = None
- if 'user' in kwargs:
- self.user = kwargs.pop('user')
+ if "user" in kwargs:
+ self.user = kwargs.pop("user")
- if 'pid' in kwargs:
- self.pid = kwargs.pop('pid')
+ if "pid" in kwargs:
+ self.pid = kwargs.pop("pid")
super().__init__(*args, **kwargs)
# Set some date fields
self.set_date_fields(*args, **kwargs)
# Don't show the product filter on the product finding view
if self.pid:
- del self.form.fields['test__engagement__product__name']
- del self.form.fields['test__engagement__product__name_contains']
- del self.form.fields['test__engagement__product__prod_type__name']
- del self.form.fields['test__engagement__product__prod_type__name_contains']
+ del self.form.fields["test__engagement__product__name"]
+ del self.form.fields["test__engagement__product__name_contains"]
+ del self.form.fields["test__engagement__product__prod_type__name"]
+ del self.form.fields["test__engagement__product__prod_type__name_contains"]
else:
- del self.form.fields['test__name']
- del self.form.fields['test__name_contains']
+ del self.form.fields["test__name"]
+ del self.form.fields["test__name_contains"]
class FindingFilter(FindingFilterHelper, FindingTagFilter):
@@ -1784,7 +1784,7 @@ class FindingFilter(FindingFilterHelper, FindingTagFilter):
label="Product Type")
test__engagement__product__lifecycle = MultipleChoiceFilter(
choices=Product.LIFECYCLE_CHOICES,
- label='Product lifecycle')
+ label="Product lifecycle")
test__engagement__product = ModelMultipleChoiceFilter(
queryset=Product.objects.none(),
label="Product")
@@ -1804,22 +1804,22 @@ class Meta:
model = Finding
fields = get_finding_filterset_fields()
- exclude = ['url', 'description', 'mitigation', 'impact',
- 'endpoints', 'references',
- 'thread_id', 'notes', 'scanner_confidence',
- 'numerical_severity', 'line', 'duplicate_finding',
- 'hash_code', 'reviewers', 'created', 'files',
- 'sla_start_date', 'sla_expiration_date', 'cvssv3',
- 'severity_justification', 'steps_to_reproduce']
+ exclude = ["url", "description", "mitigation", "impact",
+ "endpoints", "references",
+ "thread_id", "notes", "scanner_confidence",
+ "numerical_severity", "line", "duplicate_finding",
+ "hash_code", "reviewers", "created", "files",
+ "sla_start_date", "sla_expiration_date", "cvssv3",
+ "severity_justification", "steps_to_reproduce"]
def __init__(self, *args, **kwargs):
self.user = None
self.pid = None
- if 'user' in kwargs:
- self.user = kwargs.pop('user')
+ if "user" in kwargs:
+ self.user = kwargs.pop("user")
- if 'pid' in kwargs:
- self.pid = kwargs.pop('pid')
+ if "pid" in kwargs:
+ self.pid = kwargs.pop("pid")
super().__init__(*args, **kwargs)
# Set some date fields
self.set_date_fields(*args, **kwargs)
@@ -1828,25 +1828,25 @@ def __init__(self, *args, **kwargs):
def set_related_object_fields(self, *args: list, **kwargs: dict):
if self.pid is not None:
- del self.form.fields['test__engagement__product']
- del self.form.fields['test__engagement__product__prod_type']
+ del self.form.fields["test__engagement__product"]
+ del self.form.fields["test__engagement__product__prod_type"]
# TODO add authorized check to be sure
- self.form.fields['test__engagement'].queryset = Engagement.objects.filter(
+ self.form.fields["test__engagement"].queryset = Engagement.objects.filter(
product_id=self.pid,
).all()
- self.form.fields['test'].queryset = get_authorized_tests(Permissions.Test_View, product=self.pid).prefetch_related('test_type')
+ self.form.fields["test"].queryset = get_authorized_tests(Permissions.Test_View, product=self.pid).prefetch_related("test_type")
else:
self.form.fields[
- 'test__engagement__product__prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View)
- self.form.fields['test__engagement'].queryset = get_authorized_engagements(Permissions.Engagement_View)
- del self.form.fields['test']
+ "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View)
+ self.form.fields["test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View)
+ del self.form.fields["test"]
- if self.form.fields.get('test__engagement__product'):
- self.form.fields['test__engagement__product'].queryset = get_authorized_products(Permissions.Product_View)
- if self.form.fields.get('finding_group', None):
- self.form.fields['finding_group'].queryset = get_authorized_finding_groups(Permissions.Finding_Group_View)
- self.form.fields['reporter'].queryset = get_authorized_users(Permissions.Finding_View)
- self.form.fields['reviewers'].queryset = self.form.fields['reporter'].queryset
+ if self.form.fields.get("test__engagement__product"):
+ self.form.fields["test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View)
+ if self.form.fields.get("finding_group", None):
+ self.form.fields["finding_group"].queryset = get_authorized_finding_groups(Permissions.Finding_Group_View)
+ self.form.fields["reporter"].queryset = get_authorized_users(Permissions.Finding_View)
+ self.form.fields["reviewers"].queryset = self.form.fields["reporter"].queryset
class AcceptedFindingFilter(FindingFilter):
@@ -1860,8 +1860,8 @@ class AcceptedFindingFilter(FindingFilter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- self.form.fields['risk_acceptance__owner'].queryset = get_authorized_users(Permissions.Finding_View)
- self.form.fields['risk_acceptance'].queryset = get_authorized_risk_acceptances(Permissions.Risk_Acceptance)
+ self.form.fields["risk_acceptance__owner"].queryset = get_authorized_users(Permissions.Finding_View)
+ self.form.fields["risk_acceptance"].queryset = get_authorized_risk_acceptances(Permissions.Risk_Acceptance)
class AcceptedFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups):
@@ -1890,7 +1890,7 @@ class AcceptedFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookup
class SimilarFindingHelper(FilterSet):
hash_code = MultipleChoiceFilter()
- vulnerability_ids = CharFilter(method=custom_vulnerability_id_filter, label='Vulnerability Ids')
+ vulnerability_ids = CharFilter(method=custom_vulnerability_id_filter, label="Vulnerability Ids")
def update_data(self, data: dict, *args: list, **kwargs: dict):
# if filterset is bound, use initial values as defaults
@@ -1900,20 +1900,20 @@ def update_data(self, data: dict, *args: list, **kwargs: dict):
# get a mutable copy of the QueryDict
data = data.copy()
- data['vulnerability_ids'] = ','.join(self.finding.vulnerability_ids)
- data['cwe'] = self.finding.cwe
- data['file_path'] = self.finding.file_path
- data['line'] = self.finding.line
- data['unique_id_from_tool'] = self.finding.unique_id_from_tool
- data['test__test_type'] = self.finding.test.test_type
- data['test__engagement__product'] = self.finding.test.engagement.product
- data['test__engagement__product__prod_type'] = self.finding.test.engagement.product.prod_type
+ data["vulnerability_ids"] = ",".join(self.finding.vulnerability_ids)
+ data["cwe"] = self.finding.cwe
+ data["file_path"] = self.finding.file_path
+ data["line"] = self.finding.line
+ data["unique_id_from_tool"] = self.finding.unique_id_from_tool
+ data["test__test_type"] = self.finding.test.test_type
+ data["test__engagement__product"] = self.finding.test.engagement.product
+ data["test__engagement__product__prod_type"] = self.finding.test.engagement.product.prod_type
self.has_changed = False
def set_hash_codes(self, *args: list, **kwargs: dict):
if self.finding and self.finding.hash_code:
- self.form.fields['hash_code'] = forms.MultipleChoiceField(choices=[(self.finding.hash_code, self.finding.hash_code[:24] + '...')], required=False, initial=[])
+ self.form.fields["hash_code"] = forms.MultipleChoiceField(choices=[(self.finding.hash_code, self.finding.hash_code[:24] + "...")], required=False, initial=[])
def filter_queryset(self, *args: list, **kwargs: dict):
queryset = super().filter_queryset(*args, **kwargs)
@@ -1930,11 +1930,11 @@ class Meta(FindingFilter.Meta):
def __init__(self, data=None, *args, **kwargs):
self.user = None
- if 'user' in kwargs:
- self.user = kwargs.pop('user')
+ if "user" in kwargs:
+ self.user = kwargs.pop("user")
self.finding = None
- if 'finding' in kwargs:
- self.finding = kwargs.pop('finding')
+ if "finding" in kwargs:
+ self.finding = kwargs.pop("finding")
self.update_data(data, *args, **kwargs)
super().__init__(data, *args, **kwargs)
self.set_hash_codes(*args, **kwargs)
@@ -1948,133 +1948,133 @@ class Meta(FindingFilterWithoutObjectLookups.Meta):
def __init__(self, data=None, *args, **kwargs):
self.user = None
- if 'user' in kwargs:
- self.user = kwargs.pop('user')
+ if "user" in kwargs:
+ self.user = kwargs.pop("user")
self.finding = None
- if 'finding' in kwargs:
- self.finding = kwargs.pop('finding')
+ if "finding" in kwargs:
+ self.finding = kwargs.pop("finding")
self.update_data(data, *args, **kwargs)
super().__init__(data, *args, **kwargs)
self.set_hash_codes(*args, **kwargs)
class TemplateFindingFilter(DojoFilter):
- title = CharFilter(lookup_expr='icontains')
+ title = CharFilter(lookup_expr="icontains")
cwe = MultipleChoiceFilter(choices=[])
severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
tags = ModelMultipleChoiceFilter(
- field_name='tags__name',
- to_field_name='name',
- queryset=Finding.tags.tag_model.objects.all().order_by('name'),
+ field_name="tags__name",
+ to_field_name="name",
+ queryset=Finding.tags.tag_model.objects.all().order_by("name"),
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains')
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains")
not_tags = ModelMultipleChoiceFilter(
- field_name='tags__name',
- to_field_name='name',
+ field_name="tags__name",
+ to_field_name="name",
exclude=True,
- queryset=Finding.tags.tag_model.objects.all().order_by('name'),
+ queryset=Finding.tags.tag_model.objects.all().order_by("name"),
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True)
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True)
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('cwe', 'cwe'),
- ('title', 'title'),
- ('numerical_severity', 'numerical_severity'),
+ ("cwe", "cwe"),
+ ("title", "title"),
+ ("numerical_severity", "numerical_severity"),
),
field_labels={
- 'numerical_severity': 'Severity',
+ "numerical_severity": "Severity",
},
)
class Meta:
model = Finding_Template
- exclude = ['description', 'mitigation', 'impact',
- 'references', 'numerical_severity']
+ exclude = ["description", "mitigation", "impact",
+ "references", "numerical_severity"]
not_test__tags = ModelMultipleChoiceFilter(
- field_name='test__tags__name',
- to_field_name='name',
+ field_name="test__tags__name",
+ to_field_name="name",
exclude=True,
- label='Test without tags',
- queryset=Test.tags.tag_model.objects.all().order_by('name'),
+ label="Test without tags",
+ queryset=Test.tags.tag_model.objects.all().order_by("name"),
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
not_test__engagement__tags = ModelMultipleChoiceFilter(
- field_name='test__engagement__tags__name',
- to_field_name='name',
+ field_name="test__engagement__tags__name",
+ to_field_name="name",
exclude=True,
- label='Engagement without tags',
- queryset=Engagement.tags.tag_model.objects.all().order_by('name'),
+ label="Engagement without tags",
+ queryset=Engagement.tags.tag_model.objects.all().order_by("name"),
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
not_test__engagement__product__tags = ModelMultipleChoiceFilter(
- field_name='test__engagement__product__tags__name',
- to_field_name='name',
+ field_name="test__engagement__product__tags__name",
+ to_field_name="name",
exclude=True,
- label='Product without tags',
- queryset=Product.tags.tag_model.objects.all().order_by('name'),
+ label="Product without tags",
+ queryset=Product.tags.tag_model.objects.all().order_by("name"),
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- self.form.fields['cwe'].choices = cwe_options(self.queryset)
+ self.form.fields["cwe"].choices = cwe_options(self.queryset)
class ApiTemplateFindingFilter(DojoFilter):
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
- tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags')
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains")
+ tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags")
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
- not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags not present on model', exclude='True')
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True")
+ not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on model", exclude="True")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('title', 'title'),
- ('cwe', 'cwe'),
+ ("title", "title"),
+ ("cwe", "cwe"),
),
)
class Meta:
model = Finding_Template
- fields = ['id', 'title', 'cwe', 'severity', 'description',
- 'mitigation']
+ fields = ["id", "title", "cwe", "severity", "description",
+ "mitigation"]
class MetricsFindingFilter(FindingFilter):
- start_date = DateFilter(field_name='date', label='Start Date', lookup_expr=('gt'))
- end_date = DateFilter(field_name='date', label='End Date', lookup_expr=('lt'))
+ start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt"))
+ end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt"))
date = MetricsDateRangeFilter()
- vulnerability_id = CharFilter(method=vulnerability_id_filter, label='Vulnerability Id')
+ vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id")
not_tags = ModelMultipleChoiceFilter(
- field_name='tags__name',
- to_field_name='name',
+ field_name="tags__name",
+ to_field_name="name",
exclude=True,
- queryset=Endpoint.tags.tag_model.objects.all().order_by('name'),
+ queryset=Endpoint.tags.tag_model.objects.all().order_by("name"),
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True)
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True)
def __init__(self, *args, **kwargs):
if args[0]:
- if args[0].get('start_date', '') != '' or args[0].get('end_date', '') != '':
+ if args[0].get("start_date", "") != "" or args[0].get("end_date", "") != "":
args[0]._mutable = True
- args[0]['date'] = 8
+ args[0]["date"] = 8
args[0]._mutable = False
super().__init__(*args, **kwargs)
@@ -2085,26 +2085,26 @@ class Meta(FindingFilter.Meta):
class MetricsFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups):
- start_date = DateFilter(field_name='date', label='Start Date', lookup_expr=('gt'))
- end_date = DateFilter(field_name='date', label='End Date', lookup_expr=('lt'))
+ start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt"))
+ end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt"))
date = MetricsDateRangeFilter()
- vulnerability_id = CharFilter(method=vulnerability_id_filter, label='Vulnerability Id')
+ vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id")
not_tags = ModelMultipleChoiceFilter(
- field_name='tags__name',
- to_field_name='name',
+ field_name="tags__name",
+ to_field_name="name",
exclude=True,
- queryset=Endpoint.tags.tag_model.objects.all().order_by('name'),
+ queryset=Endpoint.tags.tag_model.objects.all().order_by("name"),
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True)
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True)
def __init__(self, *args, **kwargs):
if args[0]:
- if args[0].get('start_date', '') != '' or args[0].get('end_date', '') != '':
+ if args[0].get("start_date", "") != "" or args[0].get("end_date", "") != "":
args[0]._mutable = True
- args[0]['date'] = 8
+ args[0]["date"] = 8
args[0]._mutable = False
super().__init__(*args, **kwargs)
@@ -2134,60 +2134,60 @@ class MetricsEndpointFilter(MetricsEndpointFilterHelper):
queryset=Engagement.objects.none(),
label="Engagement")
endpoint__tags = ModelMultipleChoiceFilter(
- field_name='endpoint__tags__name',
- to_field_name='name',
- label='Endpoint tags',
- queryset=Endpoint.tags.tag_model.objects.all().order_by('name'))
+ field_name="endpoint__tags__name",
+ to_field_name="name",
+ label="Endpoint tags",
+ queryset=Endpoint.tags.tag_model.objects.all().order_by("name"))
finding__tags = ModelMultipleChoiceFilter(
- field_name='finding__tags__name',
- to_field_name='name',
- label='Finding tags',
- queryset=Finding.tags.tag_model.objects.all().order_by('name'))
+ field_name="finding__tags__name",
+ to_field_name="name",
+ label="Finding tags",
+ queryset=Finding.tags.tag_model.objects.all().order_by("name"))
finding__test__tags = ModelMultipleChoiceFilter(
- field_name='finding__test__tags__name',
- to_field_name='name',
- label='Test tags',
- queryset=Test.tags.tag_model.objects.all().order_by('name'))
+ field_name="finding__test__tags__name",
+ to_field_name="name",
+ label="Test tags",
+ queryset=Test.tags.tag_model.objects.all().order_by("name"))
finding__test__engagement__tags = ModelMultipleChoiceFilter(
- field_name='finding__test__engagement__tags__name',
- to_field_name='name',
- label='Engagement tags',
- queryset=Engagement.tags.tag_model.objects.all().order_by('name'))
+ field_name="finding__test__engagement__tags__name",
+ to_field_name="name",
+ label="Engagement tags",
+ queryset=Engagement.tags.tag_model.objects.all().order_by("name"))
finding__test__engagement__product__tags = ModelMultipleChoiceFilter(
- field_name='finding__test__engagement__product__tags__name',
- to_field_name='name',
- label='Product tags',
- queryset=Product.tags.tag_model.objects.all().order_by('name'))
+ field_name="finding__test__engagement__product__tags__name",
+ to_field_name="name",
+ label="Product tags",
+ queryset=Product.tags.tag_model.objects.all().order_by("name"))
not_endpoint__tags = ModelMultipleChoiceFilter(
- field_name='endpoint__tags__name',
- to_field_name='name',
+ field_name="endpoint__tags__name",
+ to_field_name="name",
exclude=True,
- label='Endpoint without tags',
- queryset=Endpoint.tags.tag_model.objects.all().order_by('name'))
+ label="Endpoint without tags",
+ queryset=Endpoint.tags.tag_model.objects.all().order_by("name"))
not_finding__tags = ModelMultipleChoiceFilter(
- field_name='finding__tags__name',
- to_field_name='name',
+ field_name="finding__tags__name",
+ to_field_name="name",
exclude=True,
- label='Finding without tags',
- queryset=Finding.tags.tag_model.objects.all().order_by('name'))
+ label="Finding without tags",
+ queryset=Finding.tags.tag_model.objects.all().order_by("name"))
not_finding__test__tags = ModelMultipleChoiceFilter(
- field_name='finding__test__tags__name',
- to_field_name='name',
+ field_name="finding__test__tags__name",
+ to_field_name="name",
exclude=True,
- label='Test without tags',
- queryset=Test.tags.tag_model.objects.all().order_by('name'))
+ label="Test without tags",
+ queryset=Test.tags.tag_model.objects.all().order_by("name"))
not_finding__test__engagement__tags = ModelMultipleChoiceFilter(
- field_name='finding__test__engagement__tags__name',
- to_field_name='name',
+ field_name="finding__test__engagement__tags__name",
+ to_field_name="name",
exclude=True,
- label='Engagement without tags',
- queryset=Engagement.tags.tag_model.objects.all().order_by('name'))
+ label="Engagement without tags",
+ queryset=Engagement.tags.tag_model.objects.all().order_by("name"))
not_finding__test__engagement__product__tags = ModelMultipleChoiceFilter(
- field_name='finding__test__engagement__product__tags__name',
- to_field_name='name',
+ field_name="finding__test__engagement__product__tags__name",
+ to_field_name="name",
exclude=True,
- label='Product without tags',
- queryset=Product.tags.tag_model.objects.all().order_by('name'))
+ label="Product without tags",
+ queryset=Product.tags.tag_model.objects.all().order_by("name"))
def __init__(self, *args, **kwargs):
if args[0]:
@@ -2370,21 +2370,21 @@ class Meta:
class EndpointFilterHelper(FilterSet):
- protocol = CharFilter(lookup_expr='icontains')
- userinfo = CharFilter(lookup_expr='icontains')
- host = CharFilter(lookup_expr='icontains')
+ protocol = CharFilter(lookup_expr="icontains")
+ userinfo = CharFilter(lookup_expr="icontains")
+ host = CharFilter(lookup_expr="icontains")
port = NumberFilter()
- path = CharFilter(lookup_expr='icontains')
- query = CharFilter(lookup_expr='icontains')
- fragment = CharFilter(lookup_expr='icontains')
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains')
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True)
- has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags')
+ path = CharFilter(lookup_expr="icontains")
+ query = CharFilter(lookup_expr="icontains")
+ fragment = CharFilter(lookup_expr="icontains")
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains")
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True)
+ has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('product', 'product'),
- ('host', 'host'),
+ ("product", "product"),
+ ("host", "host"),
),
)
@@ -2394,67 +2394,67 @@ class EndpointFilter(EndpointFilterHelper, DojoFilter):
queryset=Product.objects.none(),
label="Product")
tags = ModelMultipleChoiceFilter(
- field_name='tags__name',
- to_field_name='name',
+ field_name="tags__name",
+ to_field_name="name",
label="Endpoint Tags",
- queryset=Endpoint.tags.tag_model.objects.all().order_by('name'))
+ queryset=Endpoint.tags.tag_model.objects.all().order_by("name"))
findings__tags = ModelMultipleChoiceFilter(
- field_name='findings__tags__name',
- to_field_name='name',
+ field_name="findings__tags__name",
+ to_field_name="name",
label="Finding Tags",
- queryset=Finding.tags.tag_model.objects.all().order_by('name'))
+ queryset=Finding.tags.tag_model.objects.all().order_by("name"))
findings__test__tags = ModelMultipleChoiceFilter(
- field_name='findings__test__tags__name',
- to_field_name='name',
+ field_name="findings__test__tags__name",
+ to_field_name="name",
label="Test Tags",
- queryset=Test.tags.tag_model.objects.all().order_by('name'))
+ queryset=Test.tags.tag_model.objects.all().order_by("name"))
findings__test__engagement__tags = ModelMultipleChoiceFilter(
- field_name='findings__test__engagement__tags__name',
- to_field_name='name',
+ field_name="findings__test__engagement__tags__name",
+ to_field_name="name",
label="Engagement Tags",
- queryset=Engagement.tags.tag_model.objects.all().order_by('name'))
+ queryset=Engagement.tags.tag_model.objects.all().order_by("name"))
findings__test__engagement__product__tags = ModelMultipleChoiceFilter(
- field_name='findings__test__engagement__product__tags__name',
- to_field_name='name',
+ field_name="findings__test__engagement__product__tags__name",
+ to_field_name="name",
label="Product Tags",
- queryset=Product.tags.tag_model.objects.all().order_by('name'))
+ queryset=Product.tags.tag_model.objects.all().order_by("name"))
not_tags = ModelMultipleChoiceFilter(
- field_name='tags__name',
- to_field_name='name',
+ field_name="tags__name",
+ to_field_name="name",
label="Not Endpoint Tags",
exclude=True,
- queryset=Endpoint.tags.tag_model.objects.all().order_by('name'))
+ queryset=Endpoint.tags.tag_model.objects.all().order_by("name"))
not_findings__tags = ModelMultipleChoiceFilter(
- field_name='findings__tags__name',
- to_field_name='name',
+ field_name="findings__tags__name",
+ to_field_name="name",
label="Not Finding Tags",
exclude=True,
- queryset=Finding.tags.tag_model.objects.all().order_by('name'))
+ queryset=Finding.tags.tag_model.objects.all().order_by("name"))
not_findings__test__tags = ModelMultipleChoiceFilter(
- field_name='findings__test__tags__name',
- to_field_name='name',
+ field_name="findings__test__tags__name",
+ to_field_name="name",
label="Not Test Tags",
exclude=True,
- queryset=Test.tags.tag_model.objects.all().order_by('name'))
+ queryset=Test.tags.tag_model.objects.all().order_by("name"))
not_findings__test__engagement__tags = ModelMultipleChoiceFilter(
- field_name='findings__test__engagement__tags__name',
- to_field_name='name',
+ field_name="findings__test__engagement__tags__name",
+ to_field_name="name",
label="Not Engagement Tags",
exclude=True,
- queryset=Engagement.tags.tag_model.objects.all().order_by('name'))
+ queryset=Engagement.tags.tag_model.objects.all().order_by("name"))
not_findings__test__engagement__product__tags = ModelMultipleChoiceFilter(
- field_name='findings__test__engagement__product__tags__name',
- to_field_name='name',
+ field_name="findings__test__engagement__product__tags__name",
+ to_field_name="name",
label="Not Product Tags",
exclude=True,
- queryset=Product.tags.tag_model.objects.all().order_by('name'))
+ queryset=Product.tags.tag_model.objects.all().order_by("name"))
def __init__(self, *args, **kwargs):
self.user = None
- if 'user' in kwargs:
- self.user = kwargs.pop('user')
+ if "user" in kwargs:
+ self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
- self.form.fields['product'].queryset = get_authorized_products(Permissions.Product_View)
+ self.form.fields["product"].queryset = get_authorized_products(Permissions.Product_View)
@property
def qs(self):
@@ -2593,8 +2593,8 @@ class EndpointFilterWithoutObjectLookups(EndpointFilterHelper):
def __init__(self, *args, **kwargs):
self.user = None
- if 'user' in kwargs:
- self.user = kwargs.pop('user')
+ if "user" in kwargs:
+ self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
@property
@@ -2608,67 +2608,67 @@ class Meta:
class ApiEndpointFilter(DojoFilter):
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
- tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags')
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains")
+ tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags")
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
- not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags not present on model', exclude='True')
- has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags')
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True")
+ not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on model", exclude="True")
+ has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('host', 'host'),
- ('product', 'product'),
+ ("host", "host"),
+ ("product", "product"),
),
)
class Meta:
model = Endpoint
- fields = ['id', 'protocol', 'userinfo', 'host', 'port', 'path', 'query', 'fragment', 'product']
+ fields = ["id", "protocol", "userinfo", "host", "port", "path", "query", "fragment", "product"]
class ApiRiskAcceptanceFilter(DojoFilter):
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('name', 'name'),
+ ("name", "name"),
),
)
class Meta:
model = Risk_Acceptance
fields = [
- 'name', 'accepted_findings', 'recommendation', 'recommendation_details',
- 'decision', 'decision_details', 'accepted_by', 'owner', 'expiration_date',
- 'expiration_date_warned', 'expiration_date_handled', 'reactivate_expired',
- 'restart_sla_expired', 'notes',
+ "name", "accepted_findings", "recommendation", "recommendation_details",
+ "decision", "decision_details", "accepted_by", "owner", "expiration_date",
+ "expiration_date_warned", "expiration_date_handled", "reactivate_expired",
+ "restart_sla_expired", "notes",
]
class EngagementTestFilterHelper(FilterSet):
- version = CharFilter(lookup_expr='icontains', label='Version')
+ version = CharFilter(lookup_expr="icontains", label="Version")
if settings.TRACK_IMPORT_HISTORY:
- test_import__version = CharFilter(field_name='test_import__version', lookup_expr='icontains', label='Reimported Version')
+ test_import__version = CharFilter(field_name="test_import__version", lookup_expr="icontains", label="Reimported Version")
target_start = DateRangeFilter()
target_end = DateRangeFilter()
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains')
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True)
- has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags')
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains")
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True)
+ has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('title', 'title'),
- ('version', 'version'),
- ('target_start', 'target_start'),
- ('target_end', 'target_end'),
- ('lead', 'lead'),
- ('api_scan_configuration', 'api_scan_configuration'),
+ ("title", "title"),
+ ("version", "version"),
+ ("target_start", "target_start"),
+ ("target_end", "target_end"),
+ ("lead", "lead"),
+ ("api_scan_configuration", "api_scan_configuration"),
),
field_labels={
- 'name': 'Test Name',
+ "name": "Test Name",
},
)
@@ -2697,11 +2697,11 @@ class Meta:
]
def __init__(self, *args, **kwargs):
- self.engagement = kwargs.pop('engagement')
+ self.engagement = kwargs.pop("engagement")
super(DojoFilter, self).__init__(*args, **kwargs)
- self.form.fields['test_type'].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by('name')
- self.form.fields['api_scan_configuration'].queryset = Product_API_Scan_Configuration.objects.filter(product=self.engagement.product).distinct()
- self.form.fields['lead'].queryset = get_authorized_users(Permissions.Product_Type_View) \
+ self.form.fields["test_type"].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by("name")
+ self.form.fields["api_scan_configuration"].queryset = Product_API_Scan_Configuration.objects.filter(product=self.engagement.product).distinct()
+ self.form.fields["lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \
.filter(test__lead__isnull=False).distinct()
@@ -2757,123 +2757,123 @@ class Meta:
]
def __init__(self, *args, **kwargs):
- self.engagement = kwargs.pop('engagement')
+ self.engagement = kwargs.pop("engagement")
super().__init__(*args, **kwargs)
- self.form.fields['test_type'].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by('name')
+ self.form.fields["test_type"].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by("name")
class ApiTestFilter(DojoFilter):
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
- tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags')
- engagement__tags = CharFieldInFilter(field_name='engagement__tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags present on engagement')
- engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name',
- lookup_expr='in',
- help_text='Comma separated list of exact tags present on product')
-
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
- not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags not present on model', exclude='True')
- not_engagement__tags = CharFieldInFilter(field_name='engagement__tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags not present on engagement',
- exclude='True')
- not_engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name',
- lookup_expr='in',
- help_text='Comma separated list of exact tags not present on product',
- exclude='True')
- has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags')
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains")
+ tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags")
+ engagement__tags = CharFieldInFilter(field_name="engagement__tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags present on engagement")
+ engagement__product__tags = CharFieldInFilter(field_name="engagement__product__tags__name",
+ lookup_expr="in",
+ help_text="Comma separated list of exact tags present on product")
+
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True")
+ not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on model", exclude="True")
+ not_engagement__tags = CharFieldInFilter(field_name="engagement__tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on engagement",
+ exclude="True")
+ not_engagement__product__tags = CharFieldInFilter(field_name="engagement__product__tags__name",
+ lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on product",
+ exclude="True")
+ has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('title', 'title'),
- ('version', 'version'),
- ('target_start', 'target_start'),
- ('target_end', 'target_end'),
- ('test_type', 'test_type'),
- ('lead', 'lead'),
- ('version', 'version'),
- ('branch_tag', 'branch_tag'),
- ('build_id', 'build_id'),
- ('commit_hash', 'commit_hash'),
- ('api_scan_configuration', 'api_scan_configuration'),
- ('engagement', 'engagement'),
- ('created', 'created'),
- ('updated', 'updated'),
+ ("title", "title"),
+ ("version", "version"),
+ ("target_start", "target_start"),
+ ("target_end", "target_end"),
+ ("test_type", "test_type"),
+ ("lead", "lead"),
+ ("version", "version"),
+ ("branch_tag", "branch_tag"),
+ ("build_id", "build_id"),
+ ("commit_hash", "commit_hash"),
+ ("api_scan_configuration", "api_scan_configuration"),
+ ("engagement", "engagement"),
+ ("created", "created"),
+ ("updated", "updated"),
),
field_labels={
- 'name': 'Test Name',
+ "name": "Test Name",
},
)
class Meta:
model = Test
- fields = ['id', 'title', 'test_type', 'target_start',
- 'target_end', 'notes', 'percent_complete',
- 'actual_time', 'engagement', 'version',
- 'branch_tag', 'build_id', 'commit_hash',
- 'api_scan_configuration', 'scan_type']
+ fields = ["id", "title", "test_type", "target_start",
+ "target_end", "notes", "percent_complete",
+ "actual_time", "engagement", "version",
+ "branch_tag", "build_id", "commit_hash",
+ "api_scan_configuration", "scan_type"]
class ApiAppAnalysisFilter(DojoFilter):
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
- tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags')
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains")
+ tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags")
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
- not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma separated list of exact tags not present on model', exclude='True')
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True")
+ not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in",
+ help_text="Comma separated list of exact tags not present on model", exclude="True")
class Meta:
model = App_Analysis
- fields = ['product', 'name', 'user', 'version']
+ fields = ["product", "name", "user", "version"]
class ApiCredentialsFilter(DojoFilter):
class Meta:
model = Cred_Mapping
- fields = '__all__'
+ fields = "__all__"
class EndpointReportFilter(DojoFilter):
- protocol = CharFilter(lookup_expr='icontains')
- userinfo = CharFilter(lookup_expr='icontains')
- host = CharFilter(lookup_expr='icontains')
+ protocol = CharFilter(lookup_expr="icontains")
+ userinfo = CharFilter(lookup_expr="icontains")
+ host = CharFilter(lookup_expr="icontains")
port = NumberFilter()
- path = CharFilter(lookup_expr='icontains')
- query = CharFilter(lookup_expr='icontains')
- fragment = CharFilter(lookup_expr='icontains')
- finding__severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES, label='Severity')
- finding__mitigated = ReportBooleanFilter(label='Finding Mitigated')
+ path = CharFilter(lookup_expr="icontains")
+ query = CharFilter(lookup_expr="icontains")
+ fragment = CharFilter(lookup_expr="icontains")
+ finding__severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES, label="Severity")
+ finding__mitigated = ReportBooleanFilter(label="Finding Mitigated")
tags = ModelMultipleChoiceFilter(
- field_name='tags__name',
- to_field_name='name',
- queryset=Endpoint.tags.tag_model.objects.all().order_by('name'),
+ field_name="tags__name",
+ to_field_name="name",
+ queryset=Endpoint.tags.tag_model.objects.all().order_by("name"),
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
- tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains')
+ tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains")
not_tags = ModelMultipleChoiceFilter(
- field_name='tags__name',
- to_field_name='name',
+ field_name="tags__name",
+ to_field_name="name",
exclude=True,
- queryset=Endpoint.tags.tag_model.objects.all().order_by('name'),
+ queryset=Endpoint.tags.tag_model.objects.all().order_by("name"),
# label='tags', # doesn't work with tagulous, need to set in __init__ below
)
- not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True)
+ not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True)
class Meta:
model = Endpoint
- exclude = ['product']
+ exclude = ["product"]
class ReportFindingFilterHelper(FilterSet):
- title = CharFilter(lookup_expr='icontains', label='Name')
- date = DateFromToRangeFilter(field_name='date', label="Date Discovered")
+ title = CharFilter(lookup_expr="icontains", label="Name")
+ date = DateFromToRangeFilter(field_name="date", label="Date Discovered")
severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
active = ReportBooleanFilter()
is_mitigated = ReportBooleanFilter()
@@ -2884,30 +2884,30 @@ class ReportFindingFilterHelper(FilterSet):
duplicate = ReportBooleanFilter()
out_of_scope = ReportBooleanFilter()
outside_of_sla = FindingSLAFilter(label="Outside of SLA")
- file_path = CharFilter(lookup_expr='icontains')
+ file_path = CharFilter(lookup_expr="icontains")
class Meta:
model = Finding
# exclude sonarqube issue as by default it will show all without checking permissions
- exclude = ['date', 'cwe', 'url', 'description', 'mitigation', 'impact',
- 'references', 'sonarqube_issue', 'duplicate_finding',
- 'thread_id', 'notes', 'inherited_tags', 'endpoints',
- 'numerical_severity', 'reporter', 'last_reviewed',
- 'jira_creation', 'jira_change', 'files']
+ exclude = ["date", "cwe", "url", "description", "mitigation", "impact",
+ "references", "sonarqube_issue", "duplicate_finding",
+ "thread_id", "notes", "inherited_tags", "endpoints",
+ "numerical_severity", "reporter", "last_reviewed",
+ "jira_creation", "jira_change", "files"]
def manage_kwargs(self, kwargs):
self.prod_type = None
self.product = None
self.engagement = None
self.test = None
- if 'prod_type' in kwargs:
- self.prod_type = kwargs.pop('prod_type')
- if 'product' in kwargs:
- self.product = kwargs.pop('product')
- if 'engagement' in kwargs:
- self.engagement = kwargs.pop('engagement')
- if 'test' in kwargs:
- self.test = kwargs.pop('test')
+ if "prod_type" in kwargs:
+ self.prod_type = kwargs.pop("prod_type")
+ if "product" in kwargs:
+ self.product = kwargs.pop("product")
+ if "engagement" in kwargs:
+ self.engagement = kwargs.pop("engagement")
+ if "test" in kwargs:
+ self.test = kwargs.pop("test")
@property
def qs(self):
@@ -2931,36 +2931,36 @@ def __init__(self, *args, **kwargs):
# duplicate_finding queryset needs to restricted in line with permissions
# and inline with report scope to avoid a dropdown with 100K entries
- duplicate_finding_query_set = self.form.fields['duplicate_finding'].queryset
+ duplicate_finding_query_set = self.form.fields["duplicate_finding"].queryset
duplicate_finding_query_set = get_authorized_findings(Permissions.Finding_View, duplicate_finding_query_set)
if self.test:
duplicate_finding_query_set = duplicate_finding_query_set.filter(test=self.test)
- del self.form.fields['test__tags']
- del self.form.fields['test__engagement__tags']
- del self.form.fields['test__engagement__product__tags']
+ del self.form.fields["test__tags"]
+ del self.form.fields["test__engagement__tags"]
+ del self.form.fields["test__engagement__product__tags"]
if self.engagement:
duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement=self.engagement)
- del self.form.fields['test__engagement__tags']
- del self.form.fields['test__engagement__product__tags']
+ del self.form.fields["test__engagement__tags"]
+ del self.form.fields["test__engagement__product__tags"]
elif self.product:
duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement__product=self.product)
- del self.form.fields['test__engagement__product']
- del self.form.fields['test__engagement__product__tags']
+ del self.form.fields["test__engagement__product"]
+ del self.form.fields["test__engagement__product__tags"]
elif self.prod_type:
duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement__product__prod_type=self.prod_type)
- del self.form.fields['test__engagement__product__prod_type']
+ del self.form.fields["test__engagement__product__prod_type"]
- self.form.fields['duplicate_finding'].queryset = duplicate_finding_query_set
+ self.form.fields["duplicate_finding"].queryset = duplicate_finding_query_set
- if 'test__engagement__product__prod_type' in self.form.fields:
+ if "test__engagement__product__prod_type" in self.form.fields:
self.form.fields[
- 'test__engagement__product__prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View)
- if 'test__engagement__product' in self.form.fields:
+ "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View)
+ if "test__engagement__product" in self.form.fields:
self.form.fields[
- 'test__engagement__product'].queryset = get_authorized_products(Permissions.Product_View)
- if 'test__engagement' in self.form.fields:
- self.form.fields['test__engagement'].queryset = get_authorized_engagements(Permissions.Engagement_View)
+ "test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View)
+ if "test__engagement" in self.form.fields:
+ self.form.fields["test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View)
class ReportFindingFilterWithoutObjectLookups(ReportFindingFilterHelper, FindingTagStringFilter):
@@ -3120,62 +3120,62 @@ def __init__(self, *args, **kwargs):
class UserFilter(DojoFilter):
- first_name = CharFilter(lookup_expr='icontains')
- last_name = CharFilter(lookup_expr='icontains')
- username = CharFilter(lookup_expr='icontains')
- email = CharFilter(lookup_expr='icontains')
+ first_name = CharFilter(lookup_expr="icontains")
+ last_name = CharFilter(lookup_expr="icontains")
+ username = CharFilter(lookup_expr="icontains")
+ email = CharFilter(lookup_expr="icontains")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('username', 'username'),
- ('last_name', 'last_name'),
- ('first_name', 'first_name'),
- ('email', 'email'),
- ('is_active', 'is_active'),
- ('is_superuser', 'is_superuser'),
- ('date_joined', 'date_joined'),
- ('last_login', 'last_login'),
+ ("username", "username"),
+ ("last_name", "last_name"),
+ ("first_name", "first_name"),
+ ("email", "email"),
+ ("is_active", "is_active"),
+ ("is_superuser", "is_superuser"),
+ ("date_joined", "date_joined"),
+ ("last_login", "last_login"),
),
field_labels={
- 'username': 'User Name',
- 'is_active': 'Active',
- 'is_superuser': 'Superuser',
+ "username": "User Name",
+ "is_active": "Active",
+ "is_superuser": "Superuser",
},
)
class Meta:
model = Dojo_User
- fields = ['is_superuser', 'is_active', 'first_name', 'last_name', 'username', 'email']
+ fields = ["is_superuser", "is_active", "first_name", "last_name", "username", "email"]
class GroupFilter(DojoFilter):
- name = CharFilter(lookup_expr='icontains')
- description = CharFilter(lookup_expr='icontains')
+ name = CharFilter(lookup_expr="icontains")
+ description = CharFilter(lookup_expr="icontains")
class Meta:
model = Dojo_Group
- fields = ['name', 'description']
- exclude = ['users']
+ fields = ["name", "description"]
+ exclude = ["users"]
class TestImportFilter(DojoFilter):
- version = CharFilter(field_name='version', lookup_expr='icontains')
- version_exact = CharFilter(field_name='version', lookup_expr='iexact', label='Version Exact')
- branch_tag = CharFilter(lookup_expr='icontains', label='Branch/Tag')
- build_id = CharFilter(lookup_expr='icontains', label="Build ID")
- commit_hash = CharFilter(lookup_expr='icontains', label="Commit hash")
+ version = CharFilter(field_name="version", lookup_expr="icontains")
+ version_exact = CharFilter(field_name="version", lookup_expr="iexact", label="Version Exact")
+ branch_tag = CharFilter(lookup_expr="icontains", label="Branch/Tag")
+ build_id = CharFilter(lookup_expr="icontains", label="Build ID")
+ commit_hash = CharFilter(lookup_expr="icontains", label="Commit hash")
- findings_affected = BooleanFilter(field_name='findings_affected', lookup_expr='isnull', exclude=True, label='Findings affected')
+ findings_affected = BooleanFilter(field_name="findings_affected", lookup_expr="isnull", exclude=True, label="Findings affected")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('date', 'date'),
- ('version', 'version'),
- ('branch_tag', 'branch_tag'),
- ('build_id', 'build_id'),
- ('commit_hash', 'commit_hash'),
+ ("date", "date"),
+ ("version", "version"),
+ ("branch_tag", "branch_tag"),
+ ("build_id", "build_id"),
+ ("commit_hash", "commit_hash"),
),
)
@@ -3190,7 +3190,7 @@ class TestImportFindingActionFilter(DojoFilter):
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('action', 'action'),
+ ("action", "action"),
),
)
@@ -3208,87 +3208,87 @@ class LogEntryFilter(DojoFilter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- self.form.fields['actor'].queryset = get_authorized_users(Permissions.Product_View)
+ self.form.fields["actor"].queryset = get_authorized_users(Permissions.Product_View)
class Meta:
model = LogEntry
- exclude = ['content_type', 'object_pk', 'object_id', 'object_repr',
- 'changes', 'additional_data', 'remote_addr']
+ exclude = ["content_type", "object_pk", "object_id", "object_repr",
+ "changes", "additional_data", "remote_addr"]
filter_overrides = {
JSONField: {
- 'filter_class': CharFilter,
- 'extra': lambda f: {
- 'lookup_expr': 'icontains',
+ "filter_class": CharFilter,
+ "extra": lambda f: {
+ "lookup_expr": "icontains",
},
},
}
class ProductTypeFilter(DojoFilter):
- name = CharFilter(lookup_expr='icontains')
+ name = CharFilter(lookup_expr="icontains")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('name', 'name'),
+ ("name", "name"),
),
)
class Meta:
model = Product_Type
exclude = []
- include = ('name',)
+ include = ("name",)
class TestTypeFilter(DojoFilter):
- name = CharFilter(lookup_expr='icontains')
+ name = CharFilter(lookup_expr="icontains")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('name', 'name'),
+ ("name", "name"),
),
)
class Meta:
model = Test_Type
exclude = []
- include = ('name',)
+ include = ("name",)
class DevelopmentEnvironmentFilter(DojoFilter):
- name = CharFilter(lookup_expr='icontains')
+ name = CharFilter(lookup_expr="icontains")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('name', 'name'),
+ ("name", "name"),
),
)
class Meta:
model = Development_Environment
exclude = []
- include = ('name',)
+ include = ("name",)
class NoteTypesFilter(DojoFilter):
- name = CharFilter(lookup_expr='icontains')
+ name = CharFilter(lookup_expr="icontains")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
- ('name', 'name'),
- ('description', 'description'),
- ('is_single', 'is_single'),
- ('is_mandatory', 'is_mandatory'),
+ ("name", "name"),
+ ("description", "description"),
+ ("is_single", "is_single"),
+ ("is_mandatory", "is_mandatory"),
),
)
class Meta:
model = Note_Type
exclude = []
- include = ('name', 'is_single', 'description')
+ include = ("name", "is_single", "description")
# ==============================
# Defect Dojo Engaegment Surveys
@@ -3296,13 +3296,13 @@ class Meta:
class QuestionnaireFilter(FilterSet):
- name = CharFilter(lookup_expr='icontains')
- description = CharFilter(lookup_expr='icontains')
+ name = CharFilter(lookup_expr="icontains")
+ description = CharFilter(lookup_expr="icontains")
active = BooleanFilter()
class Meta:
model = Engagement_Survey
- exclude = ['questions']
+ exclude = ["questions"]
survey_set = FilterSet
@@ -3318,13 +3318,13 @@ def choice_question(self, qs, name):
return qs.filter(polymorphic_ctype=ContentType.objects.get_for_model(ChoiceQuestion))
options = {
- None: (_('Any'), any),
- 1: (_('Text Question'), text_question),
- 2: (_('Choice Question'), choice_question),
+ None: (_("Any"), any),
+ 1: (_("Text Question"), text_question),
+ 2: (_("Choice Question"), choice_question),
}
def __init__(self, *args, **kwargs):
- kwargs['choices'] = [
+ kwargs["choices"] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super().__init__(*args, **kwargs)
@@ -3338,11 +3338,11 @@ def filter(self, qs, value):
with warnings.catch_warnings(action="ignore", category=ManagerInheritanceWarning):
class QuestionFilter(FilterSet):
- text = CharFilter(lookup_expr='icontains')
+ text = CharFilter(lookup_expr="icontains")
type = QuestionTypeFilter()
class Meta:
model = Question
- exclude = ['polymorphic_ctype', 'created', 'modified', 'order']
+ exclude = ["polymorphic_ctype", "created", "modified", "order"]
question_set = FilterSet
diff --git a/dojo/finding/helper.py b/dojo/finding/helper.py
index 571e640790..d3ed4e3b70 100644
--- a/dojo/finding/helper.py
+++ b/dojo/finding/helper.py
@@ -51,7 +51,7 @@ def pre_save_finding_status_change(sender, instance, changed_fields=None, **kwar
# logger.debug('ignoring save of finding without id')
# return
- logger.debug('%i: changed status fields pre_save: %s', instance.id or 0, changed_fields)
+ logger.debug("%i: changed status fields pre_save: %s", instance.id or 0, changed_fields)
for field, (old, new) in changed_fields.items():
logger.debug("%i: %s changed from %s to %s" % (instance.id or 0, field, old, new))
@@ -82,9 +82,9 @@ def pre_save_finding_status_change(sender, instance, changed_fields=None, **kwar
def update_finding_status(new_state_finding, user, changed_fields=None):
now = timezone.now()
- logger.debug('changed fields: %s', changed_fields)
+ logger.debug("changed fields: %s", changed_fields)
- is_new_finding = not changed_fields or (changed_fields and len(changed_fields) == 1 and 'id' in changed_fields)
+ is_new_finding = not changed_fields or (changed_fields and len(changed_fields) == 1 and "id" in changed_fields)
# activated
# reactivated
@@ -94,11 +94,11 @@ def update_finding_status(new_state_finding, user, changed_fields=None):
# marked as duplicate
# marked as original
- if is_new_finding or 'is_mitigated' in changed_fields:
+ if is_new_finding or "is_mitigated" in changed_fields:
# finding is being mitigated
if new_state_finding.is_mitigated:
# when mitigating a finding, the meta fields can only be editted if allowed
- logger.debug('finding being mitigated, set mitigated and mitigated_by fields')
+ logger.debug("finding being mitigated, set mitigated and mitigated_by fields")
if can_edit_mitigated_data(user):
# only set if it was not already set by user
@@ -117,7 +117,7 @@ def update_finding_status(new_state_finding, user, changed_fields=None):
new_state_finding.mitigated = new_state_finding.mitigated or now
new_state_finding.mitigated_by = new_state_finding.mitigated_by or user
- if is_new_finding or 'active' in changed_fields:
+ if is_new_finding or "active" in changed_fields:
# finding is being (re)activated
if new_state_finding.active:
new_state_finding.false_p = False
@@ -129,10 +129,10 @@ def update_finding_status(new_state_finding, user, changed_fields=None):
# finding is being deactivated
pass
- if is_new_finding or 'verified' in changed_fields:
+ if is_new_finding or "verified" in changed_fields:
pass
- if is_new_finding or 'false_p' in changed_fields or 'out_of_scope' in changed_fields:
+ if is_new_finding or "false_p" in changed_fields or "out_of_scope" in changed_fields:
# existing behaviour is that false_p or out_of_scope implies mitigated
if new_state_finding.false_p or new_state_finding.out_of_scope:
new_state_finding.mitigated = new_state_finding.mitigated or now
@@ -154,12 +154,12 @@ def can_edit_mitigated_data(user):
def create_finding_group(finds, finding_group_name):
- logger.debug('creating finding_group_create')
+ logger.debug("creating finding_group_create")
if not finds or len(finds) == 0:
- msg = 'cannot create empty Finding Group'
+ msg = "cannot create empty Finding Group"
raise ValueError(msg)
- finding_group_name_dummy = 'bulk group ' + strftime("%a, %d %b %Y %X", timezone.now().timetuple())
+ finding_group_name_dummy = "bulk group " + strftime("%a, %d %b %Y %X", timezone.now().timetuple())
finding_group = Finding_Group(test=finds[0].test)
finding_group.creator = get_current_user()
@@ -192,7 +192,7 @@ def add_to_finding_group(finding_group, finds):
# Now update the JIRA to add the finding to the finding group
if finding_group.has_jira_issue and jira_helper.get_jira_instance(finding_group).finding_jira_sync:
- logger.debug('pushing to jira from finding.finding_bulk_update_all()')
+ logger.debug("pushing to jira from finding.finding_bulk_update_all()")
jira_helper.push_to_jira(finding_group)
added = len(available_findings)
@@ -219,7 +219,7 @@ def remove_from_finding_group(finds):
# Now update the JIRA to remove the finding from the finding group
for group in affected_groups:
if group.has_jira_issue and jira_helper.get_jira_instance(group).finding_jira_sync:
- logger.debug('pushing to jira from finding.finding_bulk_update_all()')
+ logger.debug("pushing to jira from finding.finding_bulk_update_all()")
jira_helper.push_to_jira(group)
return affected_groups, removed, skipped
@@ -230,36 +230,36 @@ def update_finding_group(finding, finding_group):
if finding_group is not None:
if finding_group != finding.finding_group:
if finding.finding_group:
- logger.debug('removing finding %d from finding_group %s', finding.id, finding.finding_group)
+ logger.debug("removing finding %d from finding_group %s", finding.id, finding.finding_group)
finding.finding_group.findings.remove(finding)
- logger.debug('adding finding %d to finding_group %s', finding.id, finding_group)
+ logger.debug("adding finding %d to finding_group %s", finding.id, finding_group)
finding_group.findings.add(finding)
else:
if finding.finding_group:
- logger.debug('removing finding %d from finding_group %s', finding.id, finding.finding_group)
+ logger.debug("removing finding %d from finding_group %s", finding.id, finding.finding_group)
finding.finding_group.findings.remove(finding)
def get_group_by_group_name(finding, finding_group_by_option):
group_name = None
- if finding_group_by_option == 'component_name':
+ if finding_group_by_option == "component_name":
group_name = finding.component_name
- elif finding_group_by_option == 'component_name+component_version':
+ elif finding_group_by_option == "component_name+component_version":
if finding.component_name or finding.component_version:
- group_name = '{}:{}'.format((finding.component_name if finding.component_name else 'None'),
- (finding.component_version if finding.component_version else 'None'))
- elif finding_group_by_option == 'file_path':
+ group_name = "{}:{}".format((finding.component_name if finding.component_name else "None"),
+ (finding.component_version if finding.component_version else "None"))
+ elif finding_group_by_option == "file_path":
if finding.file_path:
- group_name = f'Filepath {finding.file_path}'
- elif finding_group_by_option == 'finding_title':
+ group_name = f"Filepath {finding.file_path}"
+ elif finding_group_by_option == "finding_title":
group_name = finding.title
else:
msg = f"Invalid group_by option {finding_group_by_option}"
raise ValueError(msg)
if group_name:
- return f'Findings in: {group_name}'
+ return f"Findings in: {group_name}"
return group_name
@@ -296,7 +296,7 @@ def group_findings_by(finds, finding_group_by_option):
# Now update the JIRA to add the finding to the finding group
for group in affected_groups:
if group.has_jira_issue and jira_helper.get_jira_instance(group).finding_jira_sync:
- logger.debug('pushing to jira from finding.finding_bulk_update_all()')
+ logger.debug("pushing to jira from finding.finding_bulk_update_all()")
jira_helper.push_to_jira(group)
return affected_groups, grouped, skipped, groups_created
@@ -306,14 +306,14 @@ def add_findings_to_auto_group(name, findings, group_by, create_finding_groups_f
if name is not None and findings is not None and len(findings) > 0:
creator = get_current_user()
if not creator:
- creator = kwargs.get('async_user', None)
+ creator = kwargs.get("async_user", None)
test = findings[0].test
if create_finding_groups_for_all_findings or len(findings) > 1:
# Only create a finding group if we have more than one finding for a given finding group, unless configured otherwise
finding_group, created = Finding_Group.objects.get_or_create(test=test, creator=creator, name=name)
if created:
- logger.debug('Created Finding Group %d:%s for test %d:%s', finding_group.id, finding_group, test.id, test)
+ logger.debug("Created Finding Group %d:%s for test %d:%s", finding_group.id, finding_group, test.id, test)
# See if we have old findings in the same test that were created without a finding group
# that should be added to this new group
old_findings = Finding.objects.filter(test=test)
@@ -385,7 +385,7 @@ def post_process_finding_save(finding, dedupe_option=True, rules_option=True, pr
# Adding a snippet here for push to JIRA so that it's in one place
if push_to_jira:
- logger.debug('pushing finding %s to jira from finding.save()', finding.pk)
+ logger.debug("pushing finding %s to jira from finding.save()", finding.pk)
import dojo.jira_link.helper as jira_helper
# current approach is that whenever a finding is in a group, the group will be pushed to JIRA
@@ -399,7 +399,7 @@ def post_process_finding_save(finding, dedupe_option=True, rules_option=True, pr
@receiver(pre_delete, sender=Finding)
def finding_pre_delete(sender, instance, **kwargs):
- logger.debug('finding pre_delete: %d', instance.id)
+ logger.debug("finding pre_delete: %d", instance.id)
# this shouldn't be necessary as Django should remove any Many-To-Many entries automatically, might be a bug in Django?
# https://code.djangoproject.com/ticket/154
@@ -407,7 +407,7 @@ def finding_pre_delete(sender, instance, **kwargs):
def finding_delete(instance, **kwargs):
- logger.debug('finding delete, instance: %s', instance.id)
+ logger.debug("finding delete, instance: %s", instance.id)
# the idea is that the engagement/test pre delete already prepared all the duplicates inside
# the test/engagement to no longer point to any original so they can be safely deleted.
@@ -415,7 +415,7 @@ def finding_delete(instance, **kwargs):
# a manual / single finding delete, or a bulke delete of findings
# in which case we have to process all the duplicates
# TODO: should we add the prepocessing also to the bulk edit form?
- logger.debug('finding_delete: refresh from db: pk: %d', instance.pk)
+ logger.debug("finding_delete: refresh from db: pk: %d", instance.pk)
try:
instance.refresh_from_db()
@@ -428,17 +428,17 @@ def finding_delete(instance, **kwargs):
if duplicate_cluster:
reconfigure_duplicate_cluster(instance, duplicate_cluster)
else:
- logger.debug('no duplicate cluster found for finding: %d, so no need to reconfigure', instance.id)
+ logger.debug("no duplicate cluster found for finding: %d, so no need to reconfigure", instance.id)
# this shouldn't be necessary as Django should remove any Many-To-Many entries automatically, might be a bug in Django?
# https://code.djangoproject.com/ticket/154
- logger.debug('finding delete: clearing found by')
+ logger.debug("finding delete: clearing found by")
instance.found_by.clear()
@receiver(post_delete, sender=Finding)
def finding_post_delete(sender, instance, **kwargs):
- logger.debug('finding post_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance))
+ logger.debug("finding post_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance))
# calculate_grade(instance.test.engagement.product)
@@ -448,7 +448,7 @@ def reset_duplicate_before_delete(dupe):
def reset_duplicates_before_delete(qs):
- mass_model_updater(Finding, qs, lambda f: reset_duplicate_before_delete(f), fields=['duplicate', 'duplicate_finding'])
+ mass_model_updater(Finding, qs, lambda f: reset_duplicate_before_delete(f), fields=["duplicate", "duplicate_finding"])
def set_new_original(finding, new_original):
@@ -466,13 +466,13 @@ def reconfigure_duplicate_cluster(original, cluster_outside):
return
if settings.DUPLICATE_CLUSTER_CASCADE_DELETE:
- cluster_outside.order_by('-id').delete()
+ cluster_outside.order_by("-id").delete()
else:
- logger.debug('reconfigure_duplicate_cluster: cluster_outside: %s', cluster_outside)
+ logger.debug("reconfigure_duplicate_cluster: cluster_outside: %s", cluster_outside)
# set new original to first finding in cluster (ordered by id)
- new_original = cluster_outside.order_by('id').first()
+ new_original = cluster_outside.order_by("id").first()
if new_original:
- logger.debug('changing original of duplicate cluster %d to: %s:%s', original.id, new_original.id, new_original.title)
+ logger.debug("changing original of duplicate cluster %d to: %s:%s", original.id, new_original.id, new_original.title)
new_original.duplicate = False
new_original.duplicate_finding = None
@@ -488,13 +488,13 @@ def reconfigure_duplicate_cluster(original, cluster_outside):
# find.duplicate_finding = new_original
# find.save_no_options()
- mass_model_updater(Finding, cluster_outside, lambda f: set_new_original(f, new_original), fields=['duplicate_finding'])
+ mass_model_updater(Finding, cluster_outside, lambda f: set_new_original(f, new_original), fields=["duplicate_finding"])
def prepare_duplicates_for_delete(test=None, engagement=None):
- logger.debug('prepare duplicates for delete, test: %s, engagement: %s', test.id if test else None, engagement.id if engagement else None)
+ logger.debug("prepare duplicates for delete, test: %s, engagement: %s", test.id if test else None, engagement.id if engagement else None)
if test is None and engagement is None:
- logger.warning('nothing to prepare as test and engagement are None')
+ logger.warning("nothing to prepare as test and engagement are None")
fix_loop_duplicates()
@@ -509,7 +509,7 @@ def prepare_duplicates_for_delete(test=None, engagement=None):
originals = originals.distinct()
if len(originals) == 0:
- logger.debug('no originals found, so no duplicates to prepare for deletion of original')
+ logger.debug("no originals found, so no duplicates to prepare for deletion of original")
return
# remove the link to the original from the duplicates inside the cluster so they can be safely deleted by the django framework
@@ -518,7 +518,7 @@ def prepare_duplicates_for_delete(test=None, engagement=None):
# logger.debug('originals: %s', [original.id for original in originals])
for original in originals:
i += 1
- logger.debug('%d/%d: preparing duplicate cluster for deletion of original: %d', i, total, original.id)
+ logger.debug("%d/%d: preparing duplicate cluster for deletion of original: %d", i, total, original.id)
cluster_inside = original.original_finding.all()
if engagement:
cluster_inside = cluster_inside.filter(test__engagement=engagement)
@@ -540,29 +540,29 @@ def prepare_duplicates_for_delete(test=None, engagement=None):
if len(cluster_outside) > 0:
reconfigure_duplicate_cluster(original, cluster_outside)
- logger.debug('done preparing duplicate cluster for deletion of original: %d', original.id)
+ logger.debug("done preparing duplicate cluster for deletion of original: %d", original.id)
@receiver(pre_delete, sender=Test)
def test_pre_delete(sender, instance, **kwargs):
- logger.debug('test pre_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance))
+ logger.debug("test pre_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance))
prepare_duplicates_for_delete(test=instance)
@receiver(post_delete, sender=Test)
def test_post_delete(sender, instance, **kwargs):
- logger.debug('test post_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance))
+ logger.debug("test post_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance))
@receiver(pre_delete, sender=Engagement)
def engagement_pre_delete(sender, instance, **kwargs):
- logger.debug('engagement pre_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance))
+ logger.debug("engagement pre_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance))
prepare_duplicates_for_delete(engagement=instance)
@receiver(post_delete, sender=Engagement)
def engagement_post_delete(sender, instance, **kwargs):
- logger.debug('engagement post_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance))
+ logger.debug("engagement post_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance))
def fix_loop_duplicates():
@@ -574,7 +574,7 @@ def fix_loop_duplicates():
if loop_count > 0:
deduplicationLogger.info("Identified %d Findings with Loops" % len(candidates))
- for find_id in candidates.values_list('id', flat=True):
+ for find_id in candidates.values_list("id", flat=True):
removeLoop(find_id, 50)
new_originals = Finding.objects.filter(duplicate_finding__isnull=True, duplicate=True)
@@ -634,12 +634,12 @@ def add_endpoints(new_finding, form):
for endpoint in added_endpoints:
endpoint_ids.append(endpoint.id)
- new_finding.endpoints.set(form.cleaned_data['endpoints'] | Endpoint.objects.filter(id__in=endpoint_ids))
+ new_finding.endpoints.set(form.cleaned_data["endpoints"] | Endpoint.objects.filter(id__in=endpoint_ids))
for endpoint in new_finding.endpoints.all():
_eps, _created = Endpoint_Status.objects.get_or_create(
finding=new_finding,
- endpoint=endpoint, defaults={'date': form.cleaned_data['date'] or timezone.now()})
+ endpoint=endpoint, defaults={"date": form.cleaned_data["date"] or timezone.now()})
def save_vulnerability_ids(finding, vulnerability_ids):
diff --git a/dojo/finding/queries.py b/dojo/finding/queries.py
index 35f54df1a4..7f213805a4 100644
--- a/dojo/finding/queries.py
+++ b/dojo/finding/queries.py
@@ -16,19 +16,19 @@
def get_authorized_groups(permission, user=None):
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
- product_type=OuterRef('test__engagement__product__prod_type_id'),
+ product_type=OuterRef("test__engagement__product__prod_type_id"),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
- product=OuterRef('test__engagement__product_id'),
+ product=OuterRef("test__engagement__product_id"),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
- product_type=OuterRef('test__engagement__product__prod_type_id'),
+ product_type=OuterRef("test__engagement__product__prod_type_id"),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
- product=OuterRef('test__engagement__product_id'),
+ product=OuterRef("test__engagement__product_id"),
group__users=user,
role__in=roles)
@@ -131,19 +131,19 @@ def get_authorized_vulnerability_ids(permission, queryset=None, user=None):
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
- product_type=OuterRef('finding__test__engagement__product__prod_type_id'),
+ product_type=OuterRef("finding__test__engagement__product__prod_type_id"),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
- product=OuterRef('finding__test__engagement__product_id'),
+ product=OuterRef("finding__test__engagement__product_id"),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
- product_type=OuterRef('finding__test__engagement__product__prod_type_id'),
+ product_type=OuterRef("finding__test__engagement__product__prod_type_id"),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
- product=OuterRef('finding__test__engagement__product_id'),
+ product=OuterRef("finding__test__engagement__product_id"),
group__users=user,
role__in=roles)
vulnerability_ids = vulnerability_ids.annotate(
diff --git a/dojo/finding/urls.py b/dojo/finding/urls.py
index 5e20fd1b6f..3b59624029 100644
--- a/dojo/finding/urls.py
+++ b/dojo/finding/urls.py
@@ -5,188 +5,188 @@
urlpatterns = [
# CRUD operations
re_path(
- r'^finding/(?P\d+)$',
+ r"^finding/(?P\d+)$",
views.ViewFinding.as_view(),
- name='view_finding',
+ name="view_finding",
),
re_path(
- r'^finding/(?P\d+)/edit$',
+ r"^finding/(?P\d+)/edit$",
views.EditFinding.as_view(),
- name='edit_finding',
+ name="edit_finding",
),
re_path(
- r'^finding/(?P\d+)/delete$',
+ r"^finding/(?P\d+)/delete$",
views.DeleteFinding.as_view(),
- name='delete_finding',
+ name="delete_finding",
),
# Listing operations
re_path(
- r'^finding$',
+ r"^finding$",
views.ListFindings.as_view(),
- name='all_findings',
+ name="all_findings",
),
re_path(
- r'^finding/open$',
+ r"^finding/open$",
views.ListOpenFindings.as_view(),
- name='open_findings',
+ name="open_findings",
),
re_path(
- r'^finding/verified$',
+ r"^finding/verified$",
views.ListVerifiedFindings.as_view(),
- name='verified_findings',
+ name="verified_findings",
),
re_path(
- r'^finding/closed$',
+ r"^finding/closed$",
views.ListClosedFindings.as_view(),
- name='closed_findings',
+ name="closed_findings",
),
re_path(
- r'^finding/accepted$',
+ r"^finding/accepted$",
views.ListAcceptedFindings.as_view(),
- name='accepted_findings',
+ name="accepted_findings",
),
re_path(
- r'^product/(?P\d+)/finding/open$',
+ r"^product/(?P\d+)/finding/open$",
views.ListOpenFindings.as_view(),
- name='product_open_findings',
+ name="product_open_findings",
),
re_path(
- r'^product/(?P\d+)/findings$',
+ r"^product/(?P\d+)/findings$",
views.ListOpenFindings.as_view(),
- name='view_product_findings_old',
+ name="view_product_findings_old",
),
re_path(
- r'^product/(?P\d+)/finding/verified$',
+ r"^product/(?P\d+)/finding/verified$",
views.ListVerifiedFindings.as_view(),
- name='product_verified_findings',
+ name="product_verified_findings",
),
re_path(
- r'^product/(?P\d+)/finding/out_of_scope$',
+ r"^product/(?P\d+)/finding/out_of_scope$",
views.ListOutOfScopeFindings.as_view(),
- name='product_out_of_scope_findings',
+ name="product_out_of_scope_findings",
),
re_path(
- r'^product/(?P\d+)/finding/inactive$',
+ r"^product/(?P\d+)/finding/inactive$",
views.ListInactiveFindings.as_view(),
- name='product_inactive_findings',
+ name="product_inactive_findings",
),
re_path(
- r'^product/(?P\d+)/finding/all$',
+ r"^product/(?P\d+)/finding/all$",
views.ListFindings.as_view(),
- name='product_all_findings',
+ name="product_all_findings",
),
re_path(
- r'^product/(?P\d+)/finding/closed$',
+ r"^product/(?P\d+)/finding/closed$",
views.ListClosedFindings.as_view(),
- name='product_closed_findings',
+ name="product_closed_findings",
),
re_path(
- r'^product/(?P\d+)/finding/false_positive$',
+ r"^product/(?P\d+)/finding/false_positive$",
views.ListFalsePositiveFindings.as_view(),
- name='product_false_positive_findings',
+ name="product_false_positive_findings",
),
re_path(
- r'^product/(?P\d+)/finding/accepted$',
+ r"^product/(?P\d+)/finding/accepted$",
views.ListAcceptedFindings.as_view(),
- name='product_accepted_findings',
+ name="product_accepted_findings",
),
re_path(
- r'^engagement/(?P\d+)/finding/open$',
+ r"^engagement/(?P\d+)/finding/open$",
views.ListOpenFindings.as_view(),
- name='engagement_open_findings',
+ name="engagement_open_findings",
),
re_path(
- r'^engagement/(?P\d+)/finding/closed$',
+ r"^engagement/(?P\d+)/finding/closed$",
views.ListClosedFindings.as_view(),
- name='engagement_closed_findings',
+ name="engagement_closed_findings",
),
re_path(
- r'^engagement/(?P\d+)/finding/verified$',
+ r"^engagement/(?P\d+)/finding/verified$",
views.ListVerifiedFindings.as_view(),
- name='engagement_verified_findings',
+ name="engagement_verified_findings",
),
re_path(
- r'^engagement/(?P\d+)/finding/accepted$',
+ r"^engagement/(?P\d+)/finding/accepted$",
views.ListAcceptedFindings.as_view(),
- name='engagement_accepted_findings',
+ name="engagement_accepted_findings",
),
re_path(
- r'^engagement/(?P\d+)/finding/all$',
+ r"^engagement/(?P\d+)/finding/all$",
views.ListFindings.as_view(),
- name='engagement_all_findings',
+ name="engagement_all_findings",
),
# findings
- re_path(r'^finding/bulk$', views.finding_bulk_update_all,
- name='finding_bulk_update_all'),
- re_path(r'^product/(?P\d+)/finding/bulk_product$', views.finding_bulk_update_all,
- name='finding_bulk_update_all_product'),
+ re_path(r"^finding/bulk$", views.finding_bulk_update_all,
+ name="finding_bulk_update_all"),
+ re_path(r"^product/(?P\d+)/finding/bulk_product$", views.finding_bulk_update_all,
+ name="finding_bulk_update_all_product"),
# re_path(r'^test/(?P\d+)/bulk', views.finding_bulk_update_all,
# name='finding_bulk_update_all_test'),
- re_path(r'^finding/(?P\d+)/touch$',
- views.touch_finding, name='touch_finding'),
- re_path(r'^finding/(?P\d+)/simple_risk_accept$',
- views.simple_risk_accept, name='simple_risk_accept_finding'),
- re_path(r'^finding/(?P\d+)/simple_risk_unaccept$',
- views.risk_unaccept, name='risk_unaccept_finding'),
- re_path(r'^finding/(?P\d+)/request_review$',
- views.request_finding_review, name='request_finding_review'),
- re_path(r'^finding/(?P\d+)/review$',
- views.clear_finding_review, name='clear_finding_review'),
- re_path(r'^finding/(?P\d+)/copy$',
- views.copy_finding, name='copy_finding'),
- re_path(r'^finding/(?P\d+)/apply_cwe$',
- views.apply_template_cwe, name='apply_template_cwe'),
- re_path(r'^finding/(?P\d+)/mktemplate$', views.mktemplate,
- name='mktemplate'),
- re_path(r'^finding/(?P\d+)/find_template_to_apply$', views.find_template_to_apply,
- name='find_template_to_apply'),
- re_path(r'^finding/(?P\d+)/(?P\d+)/choose_finding_template_options$', views.choose_finding_template_options,
- name='choose_finding_template_options'),
- re_path(r'^finding/(?P\d+)/(?P\d+)/apply_template_to_finding$',
- views.apply_template_to_finding, name='apply_template_to_finding'),
- re_path(r'^finding/(?P\d+)/close$', views.close_finding,
- name='close_finding'),
- re_path(r'^finding/(?P\d+)/defect_review$',
- views.defect_finding_review, name='defect_finding_review'),
- re_path(r'^finding/(?P\d+)/open$', views.reopen_finding,
- name='reopen_finding'),
- re_path(r'^finding/image/(?P[^/]+)$', views.download_finding_pic,
- name='download_finding_pic'),
- re_path(r'^finding/(?P\d+)/merge$',
- views.merge_finding_product, name='merge_finding'),
- re_path(r'^product/(?P\d+)/merge$', views.merge_finding_product,
- name='merge_finding_product'),
- re_path(r'^finding/(?P\d+)/duplicate/(?P\d+)$',
- views.mark_finding_duplicate, name='mark_finding_duplicate'),
- re_path(r'^finding/(?P\d+)/duplicate/reset$',
- views.reset_finding_duplicate_status, name='reset_finding_duplicate_status'),
- re_path(r'^finding/(?P\d+)/original/(?P\d+)$',
- views.set_finding_as_original, name='set_finding_as_original'),
- re_path(r'^finding/(?P\d+)/remediation_date$', views.remediation_date,
- name='remediation_date'),
+ re_path(r"^finding/(?P\d+)/touch$",
+ views.touch_finding, name="touch_finding"),
+ re_path(r"^finding/(?P\d+)/simple_risk_accept$",
+ views.simple_risk_accept, name="simple_risk_accept_finding"),
+ re_path(r"^finding/(?P\d+)/simple_risk_unaccept$",
+ views.risk_unaccept, name="risk_unaccept_finding"),
+ re_path(r"^finding/(?P\d+)/request_review$",
+ views.request_finding_review, name="request_finding_review"),
+ re_path(r"^finding/(?P\d+)/review$",
+ views.clear_finding_review, name="clear_finding_review"),
+ re_path(r"^finding/(?P\d+)/copy$",
+ views.copy_finding, name="copy_finding"),
+ re_path(r"^finding/(?P\d+)/apply_cwe$",
+ views.apply_template_cwe, name="apply_template_cwe"),
+ re_path(r"^finding/(?P\d+)/mktemplate$", views.mktemplate,
+ name="mktemplate"),
+ re_path(r"^finding/(?P\d+)/find_template_to_apply$", views.find_template_to_apply,
+ name="find_template_to_apply"),
+ re_path(r"^finding/(?P\d+)/(?P\d+)/choose_finding_template_options$", views.choose_finding_template_options,
+ name="choose_finding_template_options"),
+ re_path(r"^finding/(?P\d+)/(?P\d+)/apply_template_to_finding$",
+ views.apply_template_to_finding, name="apply_template_to_finding"),
+ re_path(r"^finding/(?P\d+)/close$", views.close_finding,
+ name="close_finding"),
+ re_path(r"^finding/(?P\d+)/defect_review$",
+ views.defect_finding_review, name="defect_finding_review"),
+ re_path(r"^finding/(?P\d+)/open$", views.reopen_finding,
+ name="reopen_finding"),
+ re_path(r"^finding/image/(?P[^/]+)$", views.download_finding_pic,
+ name="download_finding_pic"),
+ re_path(r"^finding/(?P\d+)/merge$",
+ views.merge_finding_product, name="merge_finding"),
+ re_path(r"^product/(?P\d+)/merge$", views.merge_finding_product,
+ name="merge_finding_product"),
+ re_path(r"^finding/(?P\d+)/duplicate/(?P\d+)$",
+ views.mark_finding_duplicate, name="mark_finding_duplicate"),
+ re_path(r"^finding/(?P\d+)/duplicate/reset$",
+ views.reset_finding_duplicate_status, name="reset_finding_duplicate_status"),
+ re_path(r"^finding/(?P\d+)/original/(?P\d+)$",
+ views.set_finding_as_original, name="set_finding_as_original"),
+ re_path(r"^finding/(?P\d+)/remediation_date$", views.remediation_date,
+ name="remediation_date"),
# stub findings
- re_path(r'^stub_finding/(?P\d+)/add$',
- views.add_stub_finding, name='add_stub_finding'),
- re_path(r'^stub_finding/(?P\d+)/promote$',
- views.promote_to_finding, name='promote_to_finding'),
- re_path(r'^stub_finding/(?P\d+)/delete$',
- views.delete_stub_finding, name='delete_stub_finding'),
+ re_path(r"^stub_finding/(?P