aboutsummaryrefslogtreecommitdiff
path: root/.ci
diff options
context:
space:
mode:
Diffstat (limited to '.ci')
-rw-r--r--.ci/compute_projects.py62
-rw-r--r--.ci/compute_projects_test.py62
-rw-r--r--.ci/metrics/metrics_test.py75
3 files changed, 178 insertions, 21 deletions
diff --git a/.ci/compute_projects.py b/.ci/compute_projects.py
index 8e25fd6..2dc5629 100644
--- a/.ci/compute_projects.py
+++ b/.ci/compute_projects.py
@@ -144,6 +144,23 @@ PROJECT_CHECK_TARGETS = {
RUNTIMES = {"libcxx", "libcxxabi", "libunwind", "compiler-rt", "libc"}
+# Meta projects are projects that need explicit handling but do not reside
+# in their own top level folder. To add a meta project, the start of the path
+# for the metaproject should be mapped to the name of the project below.
+# Multiple paths can map to the same metaproject.
+META_PROJECTS = {
+ ("clang", "lib", "CIR"): "CIR",
+ ("clang", "test", "CIR"): "CIR",
+ ("clang", "include", "clang", "CIR"): "CIR",
+ ("*", "docs"): "docs",
+ ("llvm", "utils", "gn"): "gn",
+ (".github", "workflows", "premerge.yaml"): ".ci",
+ ("third-party",): ".ci",
+}
+
+# Projects that should not run any tests. These need to be metaprojects.
+SKIP_PROJECTS = ["docs", "gn"]
+
def _add_dependencies(projects: Set[str], runtimes: Set[str]) -> Set[str]:
projects_with_dependents = set(projects)
@@ -236,29 +253,34 @@ def _compute_runtimes_to_build(
return _exclude_projects(runtimes_to_build, platform)
+def _path_matches(matcher: tuple[str], file_path: tuple[str]) -> bool:
+ if len(file_path) < len(matcher):
+ return False
+ for match_part, file_part in zip(matcher, file_path):
+ if match_part == "*" or file_part == "*":
+ continue
+ if match_part != file_part:
+ return False
+ return True
+
+
+def _get_modified_projects_for_file(modified_file: str) -> Set[str]:
+ modified_projects = set()
+ path_parts = pathlib.Path(modified_file).parts
+ for meta_project_files in META_PROJECTS.keys():
+ if _path_matches(meta_project_files, path_parts):
+ meta_project = META_PROJECTS[meta_project_files]
+ if meta_project in SKIP_PROJECTS:
+ return set()
+ modified_projects.add(meta_project)
+ modified_projects.add(pathlib.Path(modified_file).parts[0])
+ return modified_projects
+
+
def _get_modified_projects(modified_files: list[str]) -> Set[str]:
modified_projects = set()
for modified_file in modified_files:
- path_parts = pathlib.Path(modified_file).parts
- # Exclude files in the docs directory. They do not impact an test
- # targets and there is a separate workflow used for ensuring the
- # documentation builds.
- if len(path_parts) > 2 and path_parts[1] == "docs":
- continue
- # Exclude files for the gn build. We do not test it within premerge
- # and changes occur often enough that they otherwise take up
- # capacity.
- if len(path_parts) > 3 and path_parts[:3] == ("llvm", "utils", "gn"):
- continue
- # If the file is in the clang/lib/CIR directory, add the CIR project.
- if len(path_parts) > 3 and (
- path_parts[:3] == ("clang", "lib", "CIR")
- or path_parts[:3] == ("clang", "test", "CIR")
- or path_parts[:4] == ("clang", "include", "clang", "CIR")
- ):
- modified_projects.add("CIR")
- # Fall through to add clang.
- modified_projects.add(pathlib.Path(modified_file).parts[0])
+ modified_projects.update(_get_modified_projects_for_file(modified_file))
return modified_projects
diff --git a/.ci/compute_projects_test.py b/.ci/compute_projects_test.py
index 1bbcd8a..11c4aea9 100644
--- a/.ci/compute_projects_test.py
+++ b/.ci/compute_projects_test.py
@@ -203,7 +203,7 @@ class TestComputeProjects(unittest.TestCase):
def test_invalid_subproject(self):
env_variables = compute_projects.get_env_variables(
- ["third-party/benchmark/CMakeLists.txt"], "Linux"
+ ["llvm-libgcc/CMakeLists.txt"], "Linux"
)
self.assertEqual(env_variables["projects_to_build"], "")
self.assertEqual(env_variables["project_check_targets"], "")
@@ -308,6 +308,66 @@ class TestComputeProjects(unittest.TestCase):
self.assertEqual(env_variables["runtimes_check_targets"], "check-libc")
self.assertEqual(env_variables["runtimes_check_targets_needs_reconfig"], "")
+ def test_premerge_workflow(self):
+ env_variables = compute_projects.get_env_variables(
+ [".github/workflows/premerge.yaml"], "Linux"
+ )
+ self.assertEqual(
+ env_variables["projects_to_build"],
+ "bolt;clang;clang-tools-extra;flang;libclc;lld;lldb;llvm;mlir;polly",
+ )
+ self.assertEqual(
+ env_variables["project_check_targets"],
+ "check-bolt check-clang check-clang-cir check-clang-tools check-flang check-lld check-lldb check-llvm check-mlir check-polly",
+ )
+ self.assertEqual(
+ env_variables["runtimes_to_build"],
+ "compiler-rt;libc;libcxx;libcxxabi;libunwind",
+ )
+ self.assertEqual(
+ env_variables["runtimes_check_targets"],
+ "check-compiler-rt check-libc",
+ )
+ self.assertEqual(
+ env_variables["runtimes_check_targets_needs_reconfig"],
+ "check-cxx check-cxxabi check-unwind",
+ )
+
+ def test_other_github_workflow(self):
+ env_variables = compute_projects.get_env_variables(
+ [".github/workflows/docs.yml"], "Linux"
+ )
+ self.assertEqual(env_variables["projects_to_build"], "")
+ self.assertEqual(env_variables["project_check_targets"], "")
+ self.assertEqual(env_variables["runtimes_to_build"], "")
+ self.assertEqual(env_variables["runtimes_check_targets"], "")
+ self.assertEqual(env_variables["runtimes_check_targets_needs_reconfig"], "")
+
+ def test_third_party_benchmark(self):
+ env_variables = compute_projects.get_env_variables(
+ ["third-party/benchmark/CMakeLists.txt"], "Linux"
+ )
+ self.assertEqual(
+ env_variables["projects_to_build"],
+ "bolt;clang;clang-tools-extra;flang;libclc;lld;lldb;llvm;mlir;polly",
+ )
+ self.assertEqual(
+ env_variables["project_check_targets"],
+ "check-bolt check-clang check-clang-cir check-clang-tools check-flang check-lld check-lldb check-llvm check-mlir check-polly",
+ )
+ self.assertEqual(
+ env_variables["runtimes_to_build"],
+ "compiler-rt;libc;libcxx;libcxxabi;libunwind",
+ )
+ self.assertEqual(
+ env_variables["runtimes_check_targets"],
+ "check-compiler-rt check-libc",
+ )
+ self.assertEqual(
+ env_variables["runtimes_check_targets_needs_reconfig"],
+ "check-cxx check-cxxabi check-unwind",
+ )
+
if __name__ == "__main__":
unittest.main()
diff --git a/.ci/metrics/metrics_test.py b/.ci/metrics/metrics_test.py
new file mode 100644
index 0000000..259e55f
--- /dev/null
+++ b/.ci/metrics/metrics_test.py
@@ -0,0 +1,75 @@
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+"""Tests for metrics.py"""
+
+from dataclasses import dataclass
+import requests
+import unittest
+import unittest.mock
+
+import metrics
+
+
+class TestMetrics(unittest.TestCase):
+ def test_upload_gauge_metric(self):
+ """Test that we can upload a gauge metric correctly.
+
+ Also verify that we pass around parameters like API keys and user IDs
+ correctly to the HTTP POST request.
+ """
+ test_metrics = [metrics.GaugeMetric("gauge_test", 5, 1000)]
+ return_value = requests.Response()
+ return_value.status_code = 204
+ with unittest.mock.patch(
+ "requests.post", return_value=return_value
+ ) as post_mock:
+ metrics.upload_metrics(test_metrics, "test_userid", "test_api_key")
+ self.assertSequenceEqual(post_mock.call_args.args, [metrics.GRAFANA_URL])
+ self.assertEqual(
+ post_mock.call_args.kwargs["data"], "gauge_test value=5 1000"
+ )
+ self.assertEqual(
+ post_mock.call_args.kwargs["auth"], ("test_userid", "test_api_key")
+ )
+
+ def test_upload_job_metric(self):
+ """Test that we can upload a job metric correctly."""
+ test_metrics = [
+ metrics.JobMetrics("test_job", 5, 10, 1, 1000, 7, "test_workflow")
+ ]
+ return_value = requests.Response()
+ return_value.status_code = 204
+ with unittest.mock.patch(
+ "requests.post", return_value=return_value
+ ) as post_mock:
+ metrics.upload_metrics(test_metrics, "test_userid", "test_aoi_key")
+ self.assertEqual(
+ post_mock.call_args.kwargs["data"],
+ "test_job queue_time=5,run_time=10,status=1 1000",
+ )
+
+ def test_upload_unknown_metric(self):
+ """Test we report an error if we encounter an unknown metric type."""
+
+ @dataclass
+ class FakeMetric:
+ fake_data: str
+
+ test_metrics = [FakeMetric("test")]
+
+ with self.assertRaises(ValueError):
+ metrics.upload_metrics(test_metrics, "test_userid", "test_api_key")
+
+ def test_bad_response_code(self):
+ """Test that we gracefully handle HTTP response errors."""
+ test_metrics = [metrics.GaugeMetric("gauge_test", 5, 1000)]
+ return_value = requests.Response()
+ return_value.status_code = 403
+ # Just assert that we continue running here and do not raise anything.
+ with unittest.mock.patch("requests.post", return_value=return_value) as _:
+ metrics.upload_metrics(test_metrics, "test_userid", "test_api_key")
+
+
+if __name__ == "__main__":
+ unittest.main()