aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorSimon Glass <sjg@chromium.org>2022-10-20 18:22:50 -0600
committerTom Rini <trini@konsulko.com>2022-10-31 11:02:44 -0400
commitcbd71fad6d468018727ab04b2bb912989aec0785 (patch)
tree64787584abda89116e91efe410fcce1ab326935d /test
parentc43635bdbc6cb1e4ba2d9e2f28f7f3cb3b287bf8 (diff)
downloadu-boot-cbd71fad6d468018727ab04b2bb912989aec0785.zip
u-boot-cbd71fad6d468018727ab04b2bb912989aec0785.tar.gz
u-boot-cbd71fad6d468018727ab04b2bb912989aec0785.tar.bz2
test: Support tests which can only be run manually
At present we normally write tests either in Python or in C. But most Python tests end up doing a lot of checks which would be better done in C. Checks done in C are orders of magnitude faster and it is possible to get full access to U-Boot's internal workings, rather than just relying on the command line. The model is to have a Python test set up some things and then use C code (in a unit test) to check that they were done correctly. But we don't want those checks to happen as part of normal test running, since each C unit tests is dependent on the associate Python tests, so cannot run without it. To acheive this, add a new UT_TESTF_MANUAL flag to use with the C 'check' tests, so that they can be skipped by default when the 'ut' command is used. Require that tests have a name ending with '_norun', so that pytest knows to skip them. Signed-off-by: Simon Glass <sjg@chromium.org>
Diffstat (limited to 'test')
-rw-r--r--test/cmd_ut.c16
-rw-r--r--test/dm/test-dm.c2
-rw-r--r--test/py/conftest.py8
-rw-r--r--test/test-main.c27
4 files changed, 47 insertions, 6 deletions
diff --git a/test/cmd_ut.c b/test/cmd_ut.c
index dc88c5f..beebd5c 100644
--- a/test/cmd_ut.c
+++ b/test/cmd_ut.c
@@ -19,16 +19,26 @@ int cmd_ut_category(const char *name, const char *prefix,
int argc, char *const argv[])
{
int runs_per_text = 1;
+ bool force_run = false;
int ret;
- if (argc > 1 && !strncmp("-r", argv[1], 2)) {
- runs_per_text = dectoul(argv[1] + 2, NULL);
+ while (argc > 1 && *argv[1] == '-') {
+ const char *str = argv[1];
+
+ switch (str[1]) {
+ case 'r':
+ runs_per_text = dectoul(str + 2, NULL);
+ break;
+ case 'f':
+ force_run = true;
+ break;
+ }
argv++;
argc++;
}
ret = ut_run_list(name, prefix, tests, n_ents,
- argc > 1 ? argv[1] : NULL, runs_per_text);
+ argc > 1 ? argv[1] : NULL, runs_per_text, force_run);
return ret ? CMD_RET_FAILURE : 0;
}
diff --git a/test/dm/test-dm.c b/test/dm/test-dm.c
index eb35813..66cc2bc 100644
--- a/test/dm/test-dm.c
+++ b/test/dm/test-dm.c
@@ -36,7 +36,7 @@ static int dm_test_run(const char *test_name, int runs_per_text)
int ret;
ret = ut_run_list("driver model", "dm_test_", tests, n_ents, test_name,
- runs_per_text);
+ runs_per_text, false);
return ret ? CMD_RET_FAILURE : 0;
}
diff --git a/test/py/conftest.py b/test/py/conftest.py
index 304e931..fc9dd3a 100644
--- a/test/py/conftest.py
+++ b/test/py/conftest.py
@@ -289,7 +289,13 @@ def generate_ut_subtest(metafunc, fixture_name, sym_path):
m = re_ut_test_list.search(l)
if not m:
continue
- vals.append(m.group(1) + ' ' + m.group(2))
+ suite, name = m.groups()
+
+ # Tests marked with _norun should only be run manually using 'ut -f'
+ if name.endswith('_norun'):
+ continue
+
+ vals.append(f'{suite} {name}')
ids = ['ut_' + s.replace(' ', '_') for s in vals]
metafunc.parametrize(fixture_name, vals, ids=ids)
diff --git a/test/test-main.c b/test/test-main.c
index 2323cba..ddfd89c 100644
--- a/test/test-main.c
+++ b/test/test-main.c
@@ -508,6 +508,30 @@ static int ut_run_tests(struct unit_test_state *uts, const char *prefix,
if (!test_matches(prefix, test_name, select_name))
continue;
+
+ if (test->flags & UT_TESTF_MANUAL) {
+ int len;
+
+ /*
+ * manual tests must have a name ending "_norun" as this
+ * is how pytest knows to skip them. See
+ * generate_ut_subtest() for this check.
+ */
+ len = strlen(test_name);
+ if (len < 6 || strcmp(test_name + len - 6, "_norun")) {
+ printf("Test %s is manual so must have a name ending in _norun\n",
+ test_name);
+ uts->fail_count++;
+ return -EBADF;
+ }
+ if (!uts->force_run) {
+ if (select_name) {
+ printf("Test %s skipped as it is manual (use -f to run it)\n",
+ test_name);
+ }
+ continue;
+ }
+ }
old_fail_count = uts->fail_count;
for (i = 0; i < uts->runs_per_test; i++)
ret = ut_run_test_live_flat(uts, test, select_name);
@@ -529,7 +553,7 @@ static int ut_run_tests(struct unit_test_state *uts, const char *prefix,
int ut_run_list(const char *category, const char *prefix,
struct unit_test *tests, int count, const char *select_name,
- int runs_per_test)
+ int runs_per_test, bool force_run)
{
struct unit_test_state uts = { .fail_count = 0 };
bool has_dm_tests = false;
@@ -563,6 +587,7 @@ int ut_run_list(const char *category, const char *prefix,
}
memcpy(uts.fdt_copy, gd->fdt_blob, uts.fdt_size);
}
+ uts.force_run = force_run;
ret = ut_run_tests(&uts, prefix, tests, count, select_name);
/* Best efforts only...ignore errors */