Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove all "labels"-related code #782

Draft
wants to merge 5 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 0 additions & 73 deletions helpers/labels.py

This file was deleted.

6 changes: 0 additions & 6 deletions rollouts/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,6 @@
FLAKY_TEST_DETECTION = Feature("flaky_test_detection")
FLAKY_SHADOW_MODE = Feature("flaky_shadow_mode")

# Eventually we want all repos to use this
# This flag will just help us with the rollout process
USE_LABEL_INDEX_IN_REPORT_PROCESSING_BY_REPO_ID = Feature(
"use_label_index_in_report_processing"
)

PARALLEL_UPLOAD_PROCESSING_BY_REPO = Feature("parallel_upload_processing")

CARRYFORWARD_BASE_SEARCH_RANGE_BY_OWNER = Feature("carryforward_base_search_range")
Expand Down
16 changes: 6 additions & 10 deletions services/report/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,7 @@
RAW_UPLOAD_RAW_REPORT_COUNT,
RAW_UPLOAD_SIZE,
)
from services.report.raw_upload_processor import (
SessionAdjustmentResult,
process_raw_upload,
)
from services.report.raw_upload_processor import process_raw_upload
from services.repository import get_repo_provider_service
from services.yaml.reader import get_paths_from_flags, read_yaml_field

Expand All @@ -73,7 +70,7 @@ class ProcessingResult:
session: Session
report: Report | None = None
error: ProcessingError | None = None
session_adjustment: SessionAdjustmentResult | None = None
deleted_sessions: set[int] | None = None


@dataclass
Expand Down Expand Up @@ -717,7 +714,7 @@ def build_report_from_raw_content(
upload=upload,
)
result.report = process_result.report
result.session_adjustment = process_result.session_adjustment
result.deleted_sessions = process_result.deleted_sessions

log.info(
"Successfully processed report",
Expand Down Expand Up @@ -812,11 +809,10 @@ def update_upload_with_processing_result(
# delete all the carryforwarded `Upload` records corresponding to `Session`s
# which have been removed from the report.
# we always have a `session_adjustment` in the non-error case.
assert processing_result.session_adjustment
deleted_sessions = (
processing_result.session_adjustment.fully_deleted_sessions
assert processing_result.deleted_sessions is not None
delete_uploads_by_sessionid(
upload, list(processing_result.deleted_sessions)
)
delete_uploads_by_sessionid(upload, deleted_sessions)

else:
error = processing_result.error
Expand Down
81 changes: 3 additions & 78 deletions services/report/languages/pycoverage.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import sentry_sdk

from services.report.languages.base import BaseLanguageProcessor
from services.report.report_builder import ReportBuilderSession, SpecialLabelsEnum
from services.report.report_builder import ReportBuilderSession

COVERAGE_HIT = 1
COVERAGE_MISS = 0
Expand All @@ -16,8 +16,6 @@ def matches_content(self, content: dict, first_line: str, name: str) -> bool:
def process(
self, content: dict, report_builder_session: ReportBuilderSession
) -> None:
labels_table = LabelsTable(report_builder_session, content)

for filename, file_coverage in content["files"].items():
_file = report_builder_session.create_coverage_file(filename)
if _file is None:
Expand All @@ -28,79 +26,6 @@ def process(
] + [(COVERAGE_MISS, ln) for ln in file_coverage["missing_lines"]]
for cov, ln in lines_and_coverage:
if ln > 0:
label_list_of_lists: list[list[str]] | list[list[int]] = []
if report_builder_session.should_use_label_index:
label_list_of_lists = [
[single_id]
for single_id in labels_table._get_list_of_label_ids(
report_builder_session.label_index,
file_coverage.get("contexts", {}).get(str(ln), []),
)
]
else:
label_list_of_lists = [
[labels_table._normalize_label(testname)]
for testname in file_coverage.get("contexts", {}).get(
str(ln), []
)
]
_file.append(
ln,
report_builder_session.create_coverage_line(
cov,
labels_list_of_lists=label_list_of_lists,
),
)
_line = report_builder_session.create_coverage_line(cov)
_file.append(ln, _line)
report_builder_session.append(_file)


class LabelsTable:
def __init__(
self, report_builder_session: ReportBuilderSession, content: dict
) -> None:
self.labels_table: dict[str, str] = {}
self.reverse_table: dict[str, int] = {}
self.are_labels_already_encoded = False

# Compressed pycoverage files will include a labels_table
if "labels_table" in content:
self.labels_table = content["labels_table"]
# We can pre-populate some of the indexes that will be used
for idx, testname in self.labels_table.items():
clean_label = self._normalize_label(testname)
report_builder_session.label_index[int(idx)] = clean_label
self.are_labels_already_encoded = True

def _normalize_label(self, testname: int | float | str) -> str:
if isinstance(testname, int) or isinstance(testname, float):
# This is from a compressed report.
# Pull label from the labels_table
# But the labels_table keys are strings, because of JSON format
testname = self.labels_table[str(testname)]
if testname == "":
return SpecialLabelsEnum.CODECOV_ALL_LABELS_PLACEHOLDER.corresponding_label
return testname.split("|", 1)[0]

def _get_list_of_label_ids(
self,
current_label_idx: dict[int, str],
line_contexts: list[str | int],
) -> list[int]:
if self.are_labels_already_encoded:
# The line contexts already include indexes in the table.
# We can re-use the table and don't have to do anything with contexts.
return sorted(map(int, line_contexts))

# In this case we do need to fix the labels
label_ids_for_line = set()
for testname in line_contexts:
clean_label = self._normalize_label(testname)
if clean_label in self.reverse_table:
label_ids_for_line.add(self.reverse_table[clean_label])
else:
label_id = max([*current_label_idx.keys(), 0]) + 1
current_label_idx[label_id] = clean_label
self.reverse_table[clean_label] = label_id
label_ids_for_line.add(label_id)

return sorted(label_ids_for_line)
2 changes: 0 additions & 2 deletions services/report/languages/tests/unit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ def create_report_builder_session(
path_fixer: PathFixer | None = None,
filename: str = "filename",
current_yaml: dict | None = None,
should_use_label_index: bool = False,
) -> ReportBuilderSession:
def fixes(filename, bases_to_try=None):
return filename
Expand All @@ -16,6 +15,5 @@ def fixes(filename, bases_to_try=None):
ignored_lines={},
sessionid=0,
current_yaml=current_yaml,
should_use_label_index=should_use_label_index,
)
return report_builder.create_report_builder_session(filename)
Loading
Loading