aboutsummaryrefslogtreecommitdiff
path: root/benchtests/Makefile
diff options
context:
space:
mode:
authorSiddhesh Poyarekar <siddhesh@redhat.com>2013-04-16 17:37:24 +0530
committerSiddhesh Poyarekar <siddhesh@redhat.com>2013-06-11 15:08:13 +0530
commitc1f75dc386d533806d29b7e94118363a7b50eed8 (patch)
tree38d917f7a99bf569704a09e56a0dd5b7b4475be5 /benchtests/Makefile
parent50fd745b4dec07e8e213cf2703b5cabcfa128225 (diff)
downloadglibc-c1f75dc386d533806d29b7e94118363a7b50eed8.zip
glibc-c1f75dc386d533806d29b7e94118363a7b50eed8.tar.gz
glibc-c1f75dc386d533806d29b7e94118363a7b50eed8.tar.bz2
Begin porting string performance tests to benchtests
This is the initial support for string function performance tests, along with copying tests for memcpy and memcpy-ifunc as proof of concept. The string function benchmarks perform operations at different alignments and for different sizes and compare performance between plain operations and the optimized string operations. Due to this their output is incompatible with the function benchmarks where we're interested in fastest time, throughput, etc. In future, the correctness checks in the benchmark tests can be removed. Same goes for the performance measurements in the string/test-*.
Diffstat (limited to 'benchtests/Makefile')
-rw-r--r--benchtests/Makefile27
1 files changed, 24 insertions, 3 deletions
diff --git a/benchtests/Makefile b/benchtests/Makefile
index 680440f..27d83f4 100644
--- a/benchtests/Makefile
+++ b/benchtests/Makefile
@@ -23,6 +23,13 @@ subdir := benchtests
bench := acos acosh asin asinh atan atanh cos cosh exp log modf pow rint sin \
sinh tan tanh
+# String function benchmarks.
+string-bench := memcpy
+string-bench-ifunc := $(addsuffix -ifunc, $(string-bench))
+string-bench-all := $(string-bench) $(string-bench-ifunc)
+
+benchset := $(string-bench-all)
+
acos-ARGLIST = double
acos-RET = double
LDFLAGS-bench-acos = -lm
@@ -92,10 +99,15 @@ LDFLAGS-bench-tanh = -lm
# Rules to build and execute the benchmarks. Do not put any benchmark
# parameters beyond this point.
+# We don't want the benchmark programs to run in parallel since that could
+# affect their performance.
+.NOTPARALLEL:
+
include ../Makeconfig
include ../Rules
binaries-bench := $(addprefix $(objpfx)bench-,$(bench))
+binaries-benchset := $(addprefix $(objpfx)bench-,$(benchset))
# The default duration: 10 seconds.
ifndef BENCH_DURATION
@@ -112,7 +124,7 @@ endif
# This makes sure CPPFLAGS-nonlib and CFLAGS-nonlib are passed
# for all these modules.
-cpp-srcs-left := $(binaries-bench:=.c)
+cpp-srcs-left := $(binaries-benchset:=.c) $(binaries-bench:=.c)
lib := nonlib
include $(patsubst %,$(..)cppflags-iterator.mk,$(cpp-srcs-left))
@@ -124,8 +136,17 @@ run-bench = $(test-wrapper-env) \
bench-clean:
rm -f $(binaries-bench) $(addsuffix .o,$(binaries-bench))
+ rm -f $(binaries-benchset) $(addsuffix .o,$(binaries-benchset))
+
+bench: bench-set bench-func
+
+bench-set: $(binaries-benchset)
+ for run in $^; do \
+ echo "Running $${run}"; \
+ $(run-bench) > $${run}.out; \
+ done
-bench: $(binaries-bench)
+bench-func: $(binaries-bench)
{ for run in $^; do \
echo "Running $${run}" >&2; \
$(run-bench); \
@@ -135,7 +156,7 @@ bench: $(binaries-bench)
fi; \
mv -f $(objpfx)bench.out-tmp $(objpfx)bench.out
-$(binaries-bench): %: %.o \
+$(binaries-bench) $(binaries-benchset): %: %.o \
$(sort $(filter $(common-objpfx)lib%,$(link-libc))) \
$(addprefix $(csu-objpfx),start.o) $(+preinit) $(+postinit)
$(+link)