aboutsummaryrefslogtreecommitdiff
path: root/.ci/generate_test_report_lib.py
diff options
context:
space:
mode:
Diffstat (limited to '.ci/generate_test_report_lib.py')
-rw-r--r--.ci/generate_test_report_lib.py98
1 files changed, 74 insertions, 24 deletions
diff --git a/.ci/generate_test_report_lib.py b/.ci/generate_test_report_lib.py
index 7820fbd..5edde25 100644
--- a/.ci/generate_test_report_lib.py
+++ b/.ci/generate_test_report_lib.py
@@ -3,8 +3,22 @@
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Library to parse JUnit XML files and return a markdown report."""
+from typing import TypedDict, Optional
+import platform
+
from junitparser import JUnitXml, Failure
+
+# This data structure should match the definition in llvm-zorg in
+# premerge/advisor/advisor_lib.py
+# TODO(boomanaiden154): Drop the Optional here and switch to str | None when
+# we require Python 3.10.
+class FailureExplanation(TypedDict):
+ name: str
+ explained: bool
+ reason: Optional[str]
+
+
SEE_BUILD_FILE_STR = "Download the build's log file to see the details."
UNRELATED_FAILURES_STR = (
"If these failures are unrelated to your changes (for example "
@@ -48,6 +62,18 @@ def _parse_ninja_log(ninja_log: list[str]) -> list[tuple[str, str]]:
# aligned with the failure.
failing_action = ninja_log[index].split("FAILED: ")[1]
failure_log = []
+
+ # Parse the lines above the FAILED: string if the line does not come
+ # immediately after a progress indicator to ensure that we capture the
+ # entire failure message.
+ if not ninja_log[index - 1].startswith("["):
+ before_index = index - 1
+ while before_index > 0 and not ninja_log[before_index].startswith("["):
+ failure_log.append(ninja_log[before_index])
+ before_index = before_index - 1
+ failure_log.reverse()
+
+ # Parse the failure information, which comes after the FAILED: tag.
while (
index < len(ninja_log)
and not ninja_log[index].startswith("[")
@@ -82,16 +108,29 @@ def find_failure_in_ninja_logs(ninja_logs: list[list[str]]) -> list[tuple[str, s
return failures
-def _format_ninja_failures(ninja_failures: list[tuple[str, str]]) -> list[str]:
- """Formats ninja failures into summary views for the report."""
+def _format_failures(
+ failures: list[tuple[str, str]], failure_explanations: dict[str, FailureExplanation]
+) -> list[str]:
+ """Formats failures into summary views for the report."""
output = []
- for build_failure in ninja_failures:
+ for build_failure in failures:
failed_action, failure_message = build_failure
+ failure_explanation = None
+ if failed_action in failure_explanations:
+ failure_explanation = failure_explanations[failed_action]
+ output.append("<details>")
+ if failure_explanation:
+ output.extend(
+ [
+ f"<summary>{failed_action} (Likely Already Failing)</summary>" "",
+ failure_explanation["reason"],
+ "",
+ ]
+ )
+ else:
+ output.extend([f"<summary>{failed_action}</summary>", ""])
output.extend(
[
- "<details>",
- f"<summary>{failed_action}</summary>",
- "",
"```",
failure_message,
"```",
@@ -100,6 +139,7 @@ def _format_ninja_failures(ninja_failures: list[tuple[str, str]]) -> list[str]:
)
return output
+
def get_failures(junit_objects) -> dict[str, list[tuple[str, str]]]:
failures = {}
for results in junit_objects:
@@ -131,12 +171,19 @@ def generate_report(
ninja_logs: list[list[str]],
size_limit=1024 * 1024,
list_failures=True,
+ failure_explanations_list: list[FailureExplanation] = [],
):
failures = get_failures(junit_objects)
tests_run = 0
tests_skipped = 0
tests_failed = 0
+ failure_explanations: dict[str, FailureExplanation] = {}
+ for failure_explanation in failure_explanations_list:
+ if not failure_explanation["explained"]:
+ continue
+ failure_explanations[failure_explanation["name"]] = failure_explanation
+
for results in junit_objects:
for testsuite in results:
tests_run += testsuite.tests
@@ -149,8 +196,8 @@ def generate_report(
if return_code == 0:
report.extend(
[
- "The build succeeded and no tests ran. This is expected in some "
- "build configurations."
+ ":white_check_mark: The build succeeded and no tests ran. "
+ "This is expected in some build configurations."
]
)
else:
@@ -175,7 +222,7 @@ def generate_report(
"",
]
)
- report.extend(_format_ninja_failures(ninja_failures))
+ report.extend(_format_failures(ninja_failures, failure_explanations))
report.extend(
[
"",
@@ -211,18 +258,7 @@ def generate_report(
for testsuite_name, failures in failures.items():
report.extend(["", f"### {testsuite_name}"])
- for name, output in failures:
- report.extend(
- [
- "<details>",
- f"<summary>{name}</summary>",
- "",
- "```",
- output,
- "```",
- "</details>",
- ]
- )
+ report.extend(_format_failures(failures, failure_explanations))
elif return_code != 0:
# No tests failed but the build was in a failed state. Bring this to the user's
# attention.
@@ -231,7 +267,7 @@ def generate_report(
report.extend(
[
"",
- "All tests passed but another part of the build **failed**. "
+ "All executed tests passed, but another part of the build **failed**. "
"Information about the build failure could not be automatically "
"obtained.",
"",
@@ -242,12 +278,16 @@ def generate_report(
report.extend(
[
"",
- "All tests passed but another part of the build **failed**. Click on "
+ "All executed tests passed, but another part of the build **failed**. Click on "
"a failure below to see the details.",
"",
]
)
- report.extend(_format_ninja_failures(ninja_failures))
+ report.extend(_format_failures(ninja_failures, failure_explanations))
+ else:
+ report.extend(
+ ["", ":white_check_mark: The build succeeded and all tests passed."]
+ )
if failures or return_code != 0:
report.extend(["", UNRELATED_FAILURES_STR])
@@ -284,3 +324,13 @@ def load_info_from_files(build_log_files):
def generate_report_from_files(title, return_code, build_log_files):
junit_objects, ninja_logs = load_info_from_files(build_log_files)
return generate_report(title, return_code, junit_objects, ninja_logs)
+
+
+def compute_platform_title() -> str:
+ logo = ":window:" if platform.system() == "Windows" else ":penguin:"
+ # On Linux the machine value is x86_64 on Windows it is AMD64.
+ if platform.machine() == "x86_64" or platform.machine() == "AMD64":
+ arch = "x64"
+ else:
+ arch = platform.machine()
+ return f"{logo} {platform.system()} {arch} Test Results"