aboutsummaryrefslogtreecommitdiff
path: root/run_tests.py
diff options
context:
space:
mode:
authorNicolas Schneider <nioncode+github@gmail.com>2016-04-07 19:26:53 +0200
committerJussi Pakkanen <jpakkane@gmail.com>2016-04-07 20:26:53 +0300
commit1d2b4ed8e9c2a8aec792ca302adf8ef4ac485273 (patch)
treee21acb18ec8d1ed25f31efd546fa678709aeffef /run_tests.py
parent3b5dcdbd42f21bce3f7ca4036d9bd3bb44897b17 (diff)
downloadmeson-1d2b4ed8e9c2a8aec792ca302adf8ef4ac485273.zip
meson-1d2b4ed8e9c2a8aec792ca302adf8ef4ac485273.tar.gz
meson-1d2b4ed8e9c2a8aec792ca302adf8ef4ac485273.tar.bz2
simplify unit test output (#506)
Print status on single line only print a single line for succeeded tests and two lines for failed tests. This makes it easier to scan the output for failed tests.
Diffstat (limited to 'run_tests.py')
-rwxr-xr-xrun_tests.py21
1 files changed, 11 insertions, 10 deletions
diff --git a/run_tests.py b/run_tests.py
index 556a0a5..ad2450e 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -156,14 +156,8 @@ def validate_install(srcdir, installdir):
return 'Found extra file %s.' % fname
return ''
-def log_text_file(logfile, testdir, msg, stdo, stde):
- global passing_tests, failing_tests, stop
- if msg != '':
- print('Fail:', msg)
- failing_tests += 1
- else:
- print('Success')
- passing_tests += 1
+def log_text_file(logfile, testdir, stdo, stde):
+ global stop
logfile.write('%s\nstdout\n\n---\n' % testdir)
logfile.write(stdo)
logfile.write('\n\n---\n\nstderr\n\n---\n')
@@ -306,6 +300,7 @@ def detect_tests_to_run():
return all_tests
def run_tests(extra_args):
+ global passing_tests, failing_tests, stop
all_tests = detect_tests_to_run()
logfile = open('meson-test-run.txt', 'w', encoding="utf_8")
junit_root = ET.Element('testsuites')
@@ -340,12 +335,18 @@ def run_tests(extra_args):
skipped_tests += 1
else:
without_install = "" if len(install_commands) > 0 else " (without install)"
- print('Running test%s: %s' % (without_install, t))
+ if result.msg != '':
+ print('Failed test%s: %s' % (without_install, t))
+ print('Reason:', result.msg)
+ failing_tests += 1
+ else:
+ print('Succeeded test%s: %s' % (without_install, t))
+ passing_tests += 1
conf_time += result.conftime
build_time += result.buildtime
test_time += result.testtime
total_time = conf_time + build_time + test_time
- log_text_file(logfile, t, result.msg, result.stdo, result.stde)
+ log_text_file(logfile, t, result.stdo, result.stde)
current_test = ET.SubElement(current_suite, 'testcase', {'name' : testname,
'classname' : name,
'time' : '%.3f' % total_time})