Skip to content

Commit

Permalink
Enable mypy on Invoke tasks (#27254)
Browse files Browse the repository at this point in the history
  • Loading branch information
FlorentClarret authored Jul 3, 2024
1 parent 319ff1b commit 73d6e33
Show file tree
Hide file tree
Showing 14 changed files with 66 additions and 43 deletions.
2 changes: 1 addition & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ experimental:
templates:
job_template: &job_template
docker:
- image: gcr.io/datadoghq/agent-circleci-runner:v37111741-34301f4a
- image: gcr.io/datadoghq/agent-circleci-runner:v37150619-8250a4fc
environment:
USE_SYSTEM_LIBS: "1"
working_directory: /go/src/github.com/DataDog/datadog-agent
Expand Down
10 changes: 5 additions & 5 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -164,15 +164,15 @@ variables:
# To use images from datadog-agent-buildimages dev branches, set the corresponding
# SUFFIX variable to _test_only
DATADOG_AGENT_BUILDIMAGES_SUFFIX: ""
DATADOG_AGENT_BUILDIMAGES: v37111741-34301f4a
DATADOG_AGENT_BUILDIMAGES: v37150619-8250a4fc
DATADOG_AGENT_WINBUILDIMAGES_SUFFIX: ""
DATADOG_AGENT_WINBUILDIMAGES: v37111741-34301f4a
DATADOG_AGENT_WINBUILDIMAGES: v37150619-8250a4fc
DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX: ""
DATADOG_AGENT_ARMBUILDIMAGES: v37111741-34301f4a
DATADOG_AGENT_ARMBUILDIMAGES: v37150619-8250a4fc
DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX: ""
DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v37111741-34301f4a
DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v37150619-8250a4fc
DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX: ""
DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v37111741-34301f4a
DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v37150619-8250a4fc

DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded
DEB_GPG_KEY_ID: c0962c7d
Expand Down
21 changes: 21 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,27 @@ disallow_untyped_defs = false
show_column_numbers = true
# Exclude unit tests for now
exclude = ['tasks/unit-tests']
files = ['tasks']
disable_error_code = [
# TODO: fix them later
"assignment",
"index",
"operator",
"arg-type",
"typeddict-item",
"annotation-unchecked",
"call-overload",
"var-annotated",
"misc",
"type-arg",
"union-attr",
"override",
"list-item",
"return-value",
"return",
"str-bytes-safe",
"name-match",
]

[tool.vulture]
ignore_decorators = ["@task"]
Expand Down
4 changes: 2 additions & 2 deletions tasks/build_tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,8 +219,8 @@

def compute_build_tags_for_flavor(
build: str,
build_include: list[str],
build_exclude: list[str],
build_include: str | None,
build_exclude: str | None,
flavor: AgentFlavor = AgentFlavor.base,
include_sds: bool = False,
):
Expand Down
2 changes: 1 addition & 1 deletion tasks/diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from tasks.libs.common.git import check_uncommitted_changes, get_commit_sha, get_current_branch
from tasks.release import _get_release_json_value

BINARIES = {
BINARIES: dict[str, dict] = {
"agent": {
"entrypoint": "cmd/agent",
"platforms": ["linux/x64", "linux/arm64", "win32/x64", "darwin/x64", "darwin/arm64"],
Expand Down
10 changes: 5 additions & 5 deletions tasks/ebpf.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,12 +175,12 @@ def collect_verification_stats(
ctx.run(f"{sudo} chmod a+wr -R {VERIFIER_DATA_DIR}")
ctx.run(f"{sudo} find {VERIFIER_DATA_DIR} -type d -exec chmod a+xr {{}} +")

with open(VERIFIER_STATS, "r+") as f:
verifier_stats = json.load(f)
with open(VERIFIER_STATS, "r+") as file:
verifier_stats = json.load(file)
cleaned_up = format_verifier_stats(verifier_stats)
f.seek(0)
json.dump(cleaned_up, f, indent=4)
f.truncate()
file.seek(0)
json.dump(cleaned_up, file, indent=4)
file.truncate()


@task(
Expand Down
2 changes: 1 addition & 1 deletion tasks/gotest.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def sanitize_env_vars():
del os.environ[env]


def process_test_result(test_results: ModuleTestResult, junit_tar: str, flavor: AgentFlavor, test_washer: bool) -> bool:
def process_test_result(test_results, junit_tar: str, flavor: AgentFlavor, test_washer: bool) -> bool:
if junit_tar:
junit_files = [
module_test_result.junit_file_path
Expand Down
18 changes: 9 additions & 9 deletions tasks/kernel_matrix_testing/vmconfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -597,20 +597,20 @@ def generate_vmconfig(
vmset.tags,
)

for vmset in vm_config["vmsets"]:
add_vcpu(vmset, vcpu)
add_memory(vmset, memory)
add_machine_type(vmconfig_template, vmset)
for vmset_config in vm_config["vmsets"]:
add_vcpu(vmset_config, vcpu)
add_memory(vmset_config, memory)
add_machine_type(vmconfig_template, vmset_config)

if vmset.get("recipe", "") != "custom":
add_disks(vmconfig_template, vmset)
if vmset_config.get("recipe", "") != "custom":
add_disks(vmconfig_template, vmset_config)

# For local VMs we want to read images from the filesystem
if vmset.get("arch") == local_arch:
image_source_to_path(vmset)
if vmset_config.get("arch") == local_arch:
image_source_to_path(vmset_config)

if ci:
add_console(vmset)
add_console(vmset_config)

return vm_config

Expand Down
28 changes: 14 additions & 14 deletions tasks/kmt.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,15 +196,15 @@ def gen_config_from_ci_pipeline(
info(f"[+] setting vcpu to {vcpu}")

failed_packages: set[str] = set()
for job in test_jobs:
if job.status == "failed" and job.component == vmconfig_template:
vm_arch = job.arch
for test_job in test_jobs:
if test_job.status == "failed" and job.component == vmconfig_template:
vm_arch = test_job.arch
if use_local_if_possible and vm_arch == local_arch:
vm_arch = local_arch

failed_tests = job.get_test_results()
failed_tests = test_job.get_test_results()
failed_packages.update({test.split(':')[0] for test in failed_tests.keys()})
vms.add(f"{vm_arch}-{job.distro}-distro")
vms.add(f"{vm_arch}-{test_job.distro}-distro")

info(f"[+] generating {output_file} file for VMs {vms}")
vcpu = DEFAULT_VCPU if vcpu is None else vcpu
Expand Down Expand Up @@ -1515,12 +1515,12 @@ def explain_ci_failure(_, pipeline: str):
return

# Compute a reason for failure for each test run job
for job in failed_jobs:
if job.failure_reason == "script_failure":
for failed_job in failed_jobs:
if failed_job.failure_reason == "script_failure":
failreason = testfail # By default, we assume it's a test failure

# Now check the artifacts, we'll guess why the job failed based on the size
for artifact in job.job.artifacts:
for artifact in failed_job.job.artifacts: # type: ignore
if artifact.get("filename") == "artifacts.zip":
fsize = artifact.get("size", 0)
if fsize < 1500:
Expand All @@ -1529,9 +1529,9 @@ def explain_ci_failure(_, pipeline: str):
failreason = infrafail
break
else:
failreason = job.failure_reason
failreason = failed_job.failure_reason

failreasons[job.name] = failreason
failreasons[failed_job.name] = failreason

# Check setup-env jobs that failed, they are infra failures for all related test jobs
for job in failed_setup_jobs:
Expand All @@ -1555,14 +1555,14 @@ def groupby_comp_vmset(job: KMTTestRunJob) -> tuple[str, str]:

# Build the distro table with all jobs for this component and vmset, to correctly
# differentiate between skipped and ok jobs
for job in test_jobs:
if job.component != component or job.vmset != vmset:
for test_job in test_jobs:
if test_job.component != component or test_job.vmset != vmset:
continue

failreason = failreasons.get(job.name, ok)
distros[job.distro][job.arch] = failreason
distros[test_job.distro][job.arch] = failreason
if failreason == testfail:
distro_arch_with_test_failures.append((job.distro, job.arch))
distro_arch_with_test_failures.append((test_job.distro, test_job.arch))

# Filter out distros with no failures
distros = {d: v for d, v in distros.items() if any(r == testfail or r == infrafail for r in v.values())}
Expand Down
3 changes: 2 additions & 1 deletion tasks/libs/common/junit_upload_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import os
import platform
import re
import sys
import tarfile
import tempfile
import xml.etree.ElementTree as ET
Expand Down Expand Up @@ -227,7 +228,7 @@ def upload_junitxmls(team_dir: Path):
print(stdout)
print(f" Uploaded {len(tuple(team_dir.iterdir()))} files for {team_dir.name}")
if stderr:
print(f"Failed uploading junit:\n{stderr}", file=os.sys.stderr)
print(f"Failed uploading junit:\n{stderr}", file=sys.stderr)
raise CalledProcessError(process.returncode, DATADOG_CI_COMMAND)
return "" # For ThreadPoolExecutor.map. Without this it prints None in the log output.

Expand Down
2 changes: 1 addition & 1 deletion tasks/libs/json.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import json
from json.decoder import WHITESPACE
from json.decoder import WHITESPACE # type: ignore
from typing import Any


Expand Down
4 changes: 2 additions & 2 deletions tasks/libs/pipeline/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ def gracefully_cancel_pipeline(repo: Project, pipeline: ProjectPipeline, force_c
if job.name.startswith("kmt_setup_env") or job.name.startswith("kmt_run"):
component = "sysprobe" if "sysprobe" in job.name else "secagent"
arch = "x64" if "x64" in job.name else "arm64"
cleanup_job = f"kmt_{component}_cleanup_{arch}_manual"
kmt_cleanup_jobs_to_run.add(cleanup_job)
cleanup_job_name = f"kmt_{component}_cleanup_{arch}_manual"
kmt_cleanup_jobs_to_run.add(cleanup_job_name)

# Run manual cleanup jobs for KMT. If we canceled the setup env or the tests job,
# the cleanup job will not run automatically. We need to trigger the manual variants
Expand Down
1 change: 1 addition & 0 deletions tasks/linter.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ def python(ctx):
ctx.run("ruff check --fix .")

ctx.run("vulture")
ctx.run("mypy")


@task
Expand Down
2 changes: 1 addition & 1 deletion tasks/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def process_input_args(
return modules, flavor


def process_module_results(flavor: AgentFlavor, module_results: dict[str, dict[str, list[ModuleResult]]]):
def process_module_results(flavor: AgentFlavor, module_results):
"""
Prints failures in module results, and returns False if at least one module failed.
"""
Expand Down

0 comments on commit 73d6e33

Please sign in to comment.