From 5438663896ff78837f14917122b2a45fd9609a76 Mon Sep 17 00:00:00 2001 From: Florent Clarret Date: Wed, 3 Jul 2024 08:00:52 +0200 Subject: [PATCH] fix --- tasks/build_tags.py | 4 ++-- tasks/diff.py | 2 +- tasks/ebpf.py | 10 ++++----- tasks/gotest.py | 2 +- tasks/kernel_matrix_testing/vmconfig.py | 18 ++++++++-------- tasks/kmt.py | 28 ++++++++++++------------- tasks/libs/common/junit_upload_core.py | 3 ++- tasks/libs/json.py | 2 +- tasks/libs/pipeline/tools.py | 4 ++-- tasks/test_core.py | 2 +- 10 files changed, 38 insertions(+), 37 deletions(-) diff --git a/tasks/build_tags.py b/tasks/build_tags.py index 81dda4ac88e5b..0e2e964f670bc 100644 --- a/tasks/build_tags.py +++ b/tasks/build_tags.py @@ -219,8 +219,8 @@ def compute_build_tags_for_flavor( build: str, - build_include: list[str], - build_exclude: list[str], + build_include: str | None, + build_exclude: str | None, flavor: AgentFlavor = AgentFlavor.base, include_sds: bool = False, ): diff --git a/tasks/diff.py b/tasks/diff.py index 99d926ea56d53..8f9e0aef57f19 100644 --- a/tasks/diff.py +++ b/tasks/diff.py @@ -15,7 +15,7 @@ from tasks.libs.common.git import check_uncommitted_changes, get_commit_sha, get_current_branch from tasks.release import _get_release_json_value -BINARIES = { +BINARIES: dict[str, dict] = { "agent": { "entrypoint": "cmd/agent", "platforms": ["linux/x64", "linux/arm64", "win32/x64", "darwin/x64", "darwin/arm64"], diff --git a/tasks/ebpf.py b/tasks/ebpf.py index 25fcb0f6915a0..3106f185da71a 100644 --- a/tasks/ebpf.py +++ b/tasks/ebpf.py @@ -175,12 +175,12 @@ def collect_verification_stats( ctx.run(f"{sudo} chmod a+wr -R {VERIFIER_DATA_DIR}") ctx.run(f"{sudo} find {VERIFIER_DATA_DIR} -type d -exec chmod a+xr {{}} +") - with open(VERIFIER_STATS, "r+") as f: - verifier_stats = json.load(f) + with open(VERIFIER_STATS, "r+") as file: + verifier_stats = json.load(file) cleaned_up = format_verifier_stats(verifier_stats) - f.seek(0) - json.dump(cleaned_up, f, indent=4) - f.truncate() + file.seek(0) + json.dump(cleaned_up, file, indent=4) + file.truncate() @task( diff --git a/tasks/gotest.py b/tasks/gotest.py index b6ee686cafeb8..4b368ba9d27d3 100644 --- a/tasks/gotest.py +++ b/tasks/gotest.py @@ -200,7 +200,7 @@ def sanitize_env_vars(): del os.environ[env] -def process_test_result(test_results: ModuleTestResult, junit_tar: str, flavor: AgentFlavor, test_washer: bool) -> bool: +def process_test_result(test_results, junit_tar: str, flavor: AgentFlavor, test_washer: bool) -> bool: if junit_tar: junit_files = [ module_test_result.junit_file_path diff --git a/tasks/kernel_matrix_testing/vmconfig.py b/tasks/kernel_matrix_testing/vmconfig.py index 9bc303a0e393c..6c6489ff51c60 100644 --- a/tasks/kernel_matrix_testing/vmconfig.py +++ b/tasks/kernel_matrix_testing/vmconfig.py @@ -597,20 +597,20 @@ def generate_vmconfig( vmset.tags, ) - for vmset in vm_config["vmsets"]: - add_vcpu(vmset, vcpu) - add_memory(vmset, memory) - add_machine_type(vmconfig_template, vmset) + for vmset_config in vm_config["vmsets"]: + add_vcpu(vmset_config, vcpu) + add_memory(vmset_config, memory) + add_machine_type(vmconfig_template, vmset_config) - if vmset.get("recipe", "") != "custom": - add_disks(vmconfig_template, vmset) + if vmset_config.get("recipe", "") != "custom": + add_disks(vmconfig_template, vmset_config) # For local VMs we want to read images from the filesystem - if vmset.get("arch") == local_arch: - image_source_to_path(vmset) + if vmset_config.get("arch") == local_arch: + image_source_to_path(vmset_config) if ci: - add_console(vmset) + add_console(vmset_config) return vm_config diff --git a/tasks/kmt.py b/tasks/kmt.py index 52cd569b66ece..d33498386b87a 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -196,15 +196,15 @@ def gen_config_from_ci_pipeline( info(f"[+] setting vcpu to {vcpu}") failed_packages: set[str] = set() - for job in test_jobs: - if job.status == "failed" and job.component == vmconfig_template: - vm_arch = job.arch + for test_job in test_jobs: + if test_job.status == "failed" and job.component == vmconfig_template: + vm_arch = test_job.arch if use_local_if_possible and vm_arch == local_arch: vm_arch = local_arch - failed_tests = job.get_test_results() + failed_tests = test_job.get_test_results() failed_packages.update({test.split(':')[0] for test in failed_tests.keys()}) - vms.add(f"{vm_arch}-{job.distro}-distro") + vms.add(f"{vm_arch}-{test_job.distro}-distro") info(f"[+] generating {output_file} file for VMs {vms}") vcpu = DEFAULT_VCPU if vcpu is None else vcpu @@ -1515,12 +1515,12 @@ def explain_ci_failure(_, pipeline: str): return # Compute a reason for failure for each test run job - for job in failed_jobs: - if job.failure_reason == "script_failure": + for failed_job in failed_jobs: + if failed_job.failure_reason == "script_failure": failreason = testfail # By default, we assume it's a test failure # Now check the artifacts, we'll guess why the job failed based on the size - for artifact in job.job.artifacts: + for artifact in failed_job.job.artifacts: # type: ignore if artifact.get("filename") == "artifacts.zip": fsize = artifact.get("size", 0) if fsize < 1500: @@ -1529,9 +1529,9 @@ def explain_ci_failure(_, pipeline: str): failreason = infrafail break else: - failreason = job.failure_reason + failreason = failed_job.failure_reason - failreasons[job.name] = failreason + failreasons[failed_job.name] = failreason # Check setup-env jobs that failed, they are infra failures for all related test jobs for job in failed_setup_jobs: @@ -1555,14 +1555,14 @@ def groupby_comp_vmset(job: KMTTestRunJob) -> tuple[str, str]: # Build the distro table with all jobs for this component and vmset, to correctly # differentiate between skipped and ok jobs - for job in test_jobs: - if job.component != component or job.vmset != vmset: + for test_job in test_jobs: + if test_job.component != component or test_job.vmset != vmset: continue failreason = failreasons.get(job.name, ok) - distros[job.distro][job.arch] = failreason + distros[test_job.distro][job.arch] = failreason if failreason == testfail: - distro_arch_with_test_failures.append((job.distro, job.arch)) + distro_arch_with_test_failures.append((test_job.distro, test_job.arch)) # Filter out distros with no failures distros = {d: v for d, v in distros.items() if any(r == testfail or r == infrafail for r in v.values())} diff --git a/tasks/libs/common/junit_upload_core.py b/tasks/libs/common/junit_upload_core.py index 8ac7c578ef0d7..4365aac4dbc26 100644 --- a/tasks/libs/common/junit_upload_core.py +++ b/tasks/libs/common/junit_upload_core.py @@ -3,6 +3,7 @@ import os import platform import re +import sys import tarfile import tempfile import xml.etree.ElementTree as ET @@ -227,7 +228,7 @@ def upload_junitxmls(team_dir: Path): print(stdout) print(f" Uploaded {len(tuple(team_dir.iterdir()))} files for {team_dir.name}") if stderr: - print(f"Failed uploading junit:\n{stderr}", file=os.sys.stderr) + print(f"Failed uploading junit:\n{stderr}", file=sys.stderr) raise CalledProcessError(process.returncode, DATADOG_CI_COMMAND) return "" # For ThreadPoolExecutor.map. Without this it prints None in the log output. diff --git a/tasks/libs/json.py b/tasks/libs/json.py index 7645be76de30a..ae705e67211a0 100644 --- a/tasks/libs/json.py +++ b/tasks/libs/json.py @@ -1,5 +1,5 @@ import json -from json.decoder import WHITESPACE +from json.decoder import WHITESPACE # type: ignore from typing import Any diff --git a/tasks/libs/pipeline/tools.py b/tasks/libs/pipeline/tools.py index bd52ee6c4bf78..326c8c6121c23 100644 --- a/tasks/libs/pipeline/tools.py +++ b/tasks/libs/pipeline/tools.py @@ -93,8 +93,8 @@ def gracefully_cancel_pipeline(repo: Project, pipeline: ProjectPipeline, force_c if job.name.startswith("kmt_setup_env") or job.name.startswith("kmt_run"): component = "sysprobe" if "sysprobe" in job.name else "secagent" arch = "x64" if "x64" in job.name else "arm64" - cleanup_job = f"kmt_{component}_cleanup_{arch}_manual" - kmt_cleanup_jobs_to_run.add(cleanup_job) + cleanup_job_name = f"kmt_{component}_cleanup_{arch}_manual" + kmt_cleanup_jobs_to_run.add(cleanup_job_name) # Run manual cleanup jobs for KMT. If we canceled the setup env or the tests job, # the cleanup job will not run automatically. We need to trigger the manual variants diff --git a/tasks/test_core.py b/tasks/test_core.py index df9afc06c073e..8017dbdef4b7b 100644 --- a/tasks/test_core.py +++ b/tasks/test_core.py @@ -194,7 +194,7 @@ def process_input_args( return modules, flavor -def process_module_results(flavor: AgentFlavor, module_results: dict[str, dict[str, list[ModuleResult]]]): +def process_module_results(flavor: AgentFlavor, module_results): """ Prints failures in module results, and returns False if at least one module failed. """