aboutsummaryrefslogtreecommitdiff
path: root/lldb/packages/Python/lldbsuite
diff options
context:
space:
mode:
authorMichael Buch <michaelbuch12@gmail.com>2024-09-16 10:15:52 +0100
committerGitHub <noreply@github.com>2024-09-16 10:15:52 +0100
commit7e5fe3ec5aed001c3b8f0bf59167b6472b91b9cc (patch)
treea7da4b39392c980bc6d75830e2a676d56b599a9c /lldb/packages/Python/lldbsuite
parent9e9b1178ca435f690381ffe8241e4bf1bb7e60fb (diff)
downloadllvm-7e5fe3ec5aed001c3b8f0bf59167b6472b91b9cc.zip
llvm-7e5fe3ec5aed001c3b8f0bf59167b6472b91b9cc.tar.gz
llvm-7e5fe3ec5aed001c3b8f0bf59167b6472b91b9cc.tar.bz2
[lldb][test] Remove benchmark API tests (#108629)
These benchmarks don't get run as part of the regular API test-suite. And I'm not aware of any CI running this. Also, I haven't quite managed to actually run them locally using the `bench.py` script. It looks like these are obsolete, so I'm proposing to remove the infrastructure around it entirely. If anyone does know of a use for these do let me know.
Diffstat (limited to 'lldb/packages/Python/lldbsuite')
-rw-r--r--lldb/packages/Python/lldbsuite/test/bench.py77
-rw-r--r--lldb/packages/Python/lldbsuite/test/decorators.py12
2 files changed, 0 insertions, 89 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/bench.py b/lldb/packages/Python/lldbsuite/test/bench.py
deleted file mode 100644
index 1a11b3e..0000000
--- a/lldb/packages/Python/lldbsuite/test/bench.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-
-"""
-A simple bench runner which delegates to the ./dotest.py test driver to run the
-benchmarks defined in the list named 'benches'.
-
-You need to hand edit 'benches' to modify/change the command lines passed to the
-test driver.
-
-Use the following to get only the benchmark results in your terminal output:
-
- ./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:'
-"""
-
-import os
-from optparse import OptionParser
-
-# dotest.py invocation with no '-e exe-path' uses lldb as the inferior program,
-# unless there is a mentioning of custom executable program.
-benches = [
- # Measure startup delays creating a target, setting a breakpoint, and run
- # to breakpoint stop.
- "./dotest.py -v +b %E %X -n -p TestStartupDelays.py",
- # Measure 'frame variable' response after stopping at a breakpoint.
- "./dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py",
- # Measure stepping speed after stopping at a breakpoint.
- "./dotest.py -v +b %E %X -n -p TestSteppingSpeed.py",
- # Measure expression cmd response with a simple custom executable program.
- "./dotest.py +b -n -p TestExpressionCmd.py",
- # Attach to a spawned process then run disassembly benchmarks.
- "./dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py",
-]
-
-
-def main():
- """Read the items from 'benches' and run the command line one by one."""
- parser = OptionParser(
- usage="""\
-%prog [options]
-Run the standard benchmarks defined in the list named 'benches'.\
-"""
- )
- parser.add_option(
- "-e",
- "--executable",
- type="string",
- action="store",
- dest="exe",
- help="The target program launched by lldb.",
- )
- parser.add_option(
- "-x",
- "--breakpoint-spec",
- type="string",
- action="store",
- dest="break_spec",
- help="The lldb breakpoint spec for the target program.",
- )
-
- # Parses the options, if any.
- opts, args = parser.parse_args()
-
- print("Starting bench runner....")
-
- for item in benches:
- command = item.replace("%E", '-e "%s"' % opts.exe if opts.exe else "")
- command = command.replace(
- "%X", '-x "%s"' % opts.break_spec if opts.break_spec else ""
- )
- print("Running %s" % (command))
- os.system(command)
-
- print("Bench runner done.")
-
-
-if __name__ == "__main__":
- main()
diff --git a/lldb/packages/Python/lldbsuite/test/decorators.py b/lldb/packages/Python/lldbsuite/test/decorators.py
index 834f01a..34319e2 100644
--- a/lldb/packages/Python/lldbsuite/test/decorators.py
+++ b/lldb/packages/Python/lldbsuite/test/decorators.py
@@ -426,18 +426,6 @@ def add_test_categories(cat):
return impl
-def benchmarks_test(func):
- """Decorate the item as a benchmarks test."""
-
- def should_skip_benchmarks_test():
- return "benchmarks test"
-
- # Mark this function as such to separate them from the regular tests.
- result = skipTestIfFn(should_skip_benchmarks_test)(func)
- result.__benchmarks_test__ = True
- return result
-
-
def no_debug_info_test(func):
"""Decorate the item as a test what don't use any debug info. If this annotation is specified
then the test runner won't generate a separate test for each debug info format."""