Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
ewjoachim committed Dec 29, 2023
1 parent e162b70 commit 747294b
Show file tree
Hide file tree
Showing 12 changed files with 426 additions and 123 deletions.
18 changes: 10 additions & 8 deletions coverage_comment/badge.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,16 @@ def get_badge_color(


def get_evolution_badge_color(
rate_before: decimal.Decimal | None,
rate_after: decimal.Decimal,
delta: decimal.Decimal | int,
up_is_good: bool,
neutral_color: str = "grey",
) -> str:
if rate_before is None or rate_after > rate_before:
if delta == 0:
return neutral_color
elif (delta > 0) is up_is_good:
return "brightgreen"
elif rate_after == rate_before:
return "blue"
else:
return "orange"
return "red"


def compute_badge_endpoint_data(
Expand Down Expand Up @@ -66,9 +67,10 @@ def compute_badge_image(


def get_static_badge_url(label: str, message: str, color: str) -> str:
return "https://img.shields.io/badge/" + urllib.parse.quote(
f"{label}-{message}-{color}.svg"
code = "-".join(
e.replace("_", "__").replace("-", "--") for e in (label, message, color)
)
return "https://img.shields.io/badge/" + urllib.parse.quote(f"{code}.svg")


def get_endpoint_url(endpoint_url: str) -> str:
Expand Down
39 changes: 24 additions & 15 deletions coverage_comment/coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,14 @@
import itertools
import json
import pathlib
from collections.abc import Sequence
from collections.abc import Iterable, Sequence

from coverage_comment import log, subprocess


def collapse_lines(lines: list[int]) -> list[tuple[int, int]]:
# All consecutive line numbers have the same difference between their list index and their value.
# Grouping by this difference therefore leads to buckets of consecutive numbers.
for _, it in itertools.groupby(enumerate(lines), lambda x: x[1] - x[0]):
t = list(it)
yield t[0][1], t[-1][1]


# The dataclasses in this module are accessible in the template, which is overridable by the user.
# As a coutesy, we should do our best to keep the existing fields for backward compatibility,
# and if we really can't and can't add properties, at least bump the major version.
@dataclasses.dataclass
class CoverageMetadata:
version: str
Expand Down Expand Up @@ -67,13 +62,17 @@ class Coverage:
class FileDiffCoverage:
path: pathlib.Path
percent_covered: decimal.Decimal
missing_lines: list[int]
covered_statements: list[int]
missing_statements: list[int]
added_statements: list[int]
# Added lines tracks all the lines that were added in the diff, not just
# the statements (so it includes comments, blank lines, etc.)
added_lines: list[int]

# for backward compatibility
@property
def violation_lines(self) -> list[int]:
return self.missing_lines
return self.missing_statements

@functools.cached_property
def violation_lines_collapsed(self):
Expand Down Expand Up @@ -259,9 +258,9 @@ def get_diff_coverage_info(

missing = set(file.missing_lines) & set(added_lines_for_file)
count_missing = len(missing)
# Even partially covered lines are considered as covered, no line
# appears in both counts
count_total = count_executed + count_missing

added = executed | missing
count_total = len(added)

total_num_lines += count_total
total_num_violations += count_missing
Expand All @@ -273,7 +272,9 @@ def get_diff_coverage_info(
files[path] = FileDiffCoverage(
path=path,
percent_covered=percent_covered,
missing_lines=sorted(missing),
covered_statements=sorted(executed),
missing_statements=sorted(missing),
added_statements=sorted(added),
added_lines=added_lines_for_file,
)
final_percentage = compute_coverage(
Expand Down Expand Up @@ -327,3 +328,11 @@ def parse_line_number_diff_line(line: str) -> Sequence[int]:
"""
start, length = (int(i) for i in (line.split()[2][1:] + ",1").split(",")[:2])
return range(start, start + length)


def collapse_lines(lines: list[int]) -> Iterable[tuple[int, int]]:
# All consecutive line numbers have the same difference between their list index and their value.
# Grouping by this difference therefore leads to buckets of consecutive numbers.
for _, it in itertools.groupby(enumerate(lines), lambda x: x[1] - x[0]):
t = list(it)
yield t[0][1], t[-1][1]
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@


@dataclasses.dataclass(frozen=True)
class Annotation:
class Group:
file: pathlib.Path
line_start: int
line_end: int
Expand Down Expand Up @@ -68,10 +68,10 @@ def reducer(
return functools.reduce(reducer, contiguous_groups, [])


def group_annotations(
def get_diff_missing_groups(
coverage: coverage_module.Coverage,
diff_coverage: coverage_module.DiffCoverage,
) -> Iterable[Annotation]:
) -> Iterable[Group]:
for path, diff_file in diff_coverage.files.items():
coverage_file = coverage.files[path]

Expand All @@ -88,11 +88,11 @@ def group_annotations(
joiners = set(diff_file.added_lines) - separators

for start, end in compute_contiguous_groups(
values=diff_file.missing_lines,
values=diff_file.missing_statements,
separators=separators,
joiners=joiners,
):
yield Annotation(
yield Group(
file=path,
line_start=start,
line_end=end,
Expand Down
11 changes: 9 additions & 2 deletions coverage_comment/files.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,17 @@ def compute_datafile(
)


def parse_datafile(contents) -> decimal.Decimal:
return decimal.Decimal(str(json.loads(contents)["coverage"])) / decimal.Decimal(
def parse_datafile(contents) -> tuple[coverage.Coverage | None, decimal.Decimal]:
file_contents = json.loads(contents)
coverage_rate = decimal.Decimal(str(file_contents["coverage"])) / decimal.Decimal(
"100"
)
try:
return coverage.extract_info(
data=file_contents["raw_data"], coverage_path=file_contents["coverage_path"]
), coverage_rate
except KeyError:
return None, coverage_rate


class ImageURLs(TypedDict):
Expand Down
15 changes: 8 additions & 7 deletions coverage_comment/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,10 @@
import httpx

from coverage_comment import activity as activity_module
from coverage_comment import (
annotations as annotations_module,
)
from coverage_comment import (
comment_file,
communication,
diff_grouper,
files,
github,
github_client,
Expand Down Expand Up @@ -151,16 +149,19 @@ def process_pr(
branch=config.FINAL_COVERAGE_DATA_BRANCH,
)

previous_coverage = None
previous_coverage, previous_coverage_rate = None, None
if previous_coverage_data_file:
previous_coverage = files.parse_datafile(contents=previous_coverage_data_file)
previous_coverage, previous_coverage_rate = files.parse_datafile(
contents=previous_coverage_data_file
)

marker = template.get_marker(marker_id=config.SUBPROJECT_ID)
try:
comment = template.get_comment_markdown(
coverage=coverage,
diff_coverage=diff_coverage,
previous_coverage_rate=previous_coverage,
previous_coverage=previous_coverage,
previous_coverage_rate=previous_coverage_rate,
minimum_green=config.MINIMUM_GREEN,
minimum_orange=config.MINIMUM_ORANGE,
repo_name=config.GITHUB_REPOSITORY,
Expand Down Expand Up @@ -207,7 +208,7 @@ def process_pr(
pr_number = None

if pr_number is not None and config.ANNOTATE_MISSING_LINES:
annotations = annotations_module.group_annotations(
annotations = diff_grouper.get_diff_missing_groups(
coverage=coverage, diff_coverage=diff_coverage
)
github.create_missing_coverage_annotations(
Expand Down
Loading

0 comments on commit 747294b

Please sign in to comment.