summaryrefslogtreecommitdiff
path: root/tools/perf/tests/shell
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/tests/shell')
-rwxr-xr-xtools/perf/tests/shell/buildid.sh203
-rwxr-xr-xtools/perf/tests/shell/c2c.sh62
-rwxr-xr-xtools/perf/tests/shell/evlist.sh79
-rwxr-xr-xtools/perf/tests/shell/jitdump-python.sh81
-rwxr-xr-xtools/perf/tests/shell/kallsyms.sh56
-rwxr-xr-xtools/perf/tests/shell/kvm.sh154
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py9
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh2
-rwxr-xr-xtools/perf/tests/shell/record_weak_term.sh37
-rwxr-xr-xtools/perf/tests/shell/script_dlfilter.sh107
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh4
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh4
-rwxr-xr-xtools/perf/tests/shell/stat.sh45
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh3
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh30
-rwxr-xr-xtools/perf/tests/shell/test_event_open_fallback.sh71
-rwxr-xr-xtools/perf/tests/shell/timechart.sh67
-rwxr-xr-xtools/perf/tests/shell/top.sh74
20 files changed, 1044 insertions, 48 deletions
diff --git a/tools/perf/tests/shell/buildid.sh b/tools/perf/tests/shell/buildid.sh
index d2eb213da01d..102808cca9db 100755
--- a/tools/perf/tests/shell/buildid.sh
+++ b/tools/perf/tests/shell/buildid.sh
@@ -36,16 +36,69 @@ if [ ${run_pe} -eq 1 ]; then
unset WAYLAND_DISPLAY
fi
-ex_md5=$(mktemp /tmp/perf.ex.MD5.XXX)
-ex_sha1=$(mktemp /tmp/perf.ex.SHA1.XXX)
+build_id_dir=
+ex_source=$(mktemp /tmp/perf_buildid_test.ex.XXX.c)
+ex_md5=$(mktemp /tmp/perf_buildid_test.ex.MD5.XXX)
+ex_sha1=$(mktemp /tmp/perf_buildid_test.ex.SHA1.XXX)
ex_pe=$(dirname $0)/../pe-file.exe
+data=$(mktemp /tmp/perf_buildid_test.data.XXX)
+log_out=$(mktemp /tmp/perf_buildid_test.log.out.XXX)
+log_err=$(mktemp /tmp/perf_buildid_test.log.err.XXX)
-echo 'int main(void) { return 0; }' | cc -Wl,--build-id=sha1 -o ${ex_sha1} -x c -
-echo 'int main(void) { return 0; }' | cc -Wl,--build-id=md5 -o ${ex_md5} -x c -
+cleanup() {
+ rm -f ${ex_source} ${ex_md5} ${ex_sha1} ${data} ${log_out} ${log_err}
+ if [ ${run_pe} -eq 1 ]; then
+ rm -r ${wineprefix}
+ fi
+ if [ -d ${build_id_dir} ]; then
+ rm -rf ${build_id_dir}
+ fi
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# Test program based on the noploop workload.
+cat <<EOF > ${ex_source}
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+static volatile sig_atomic_t done;
+
+static void sighandler(int sig)
+{
+ (void)sig;
+ done = 1;
+}
+
+int main(int argc, const char **argv)
+{
+ int sec = 1;
+
+ if (argc > 1)
+ sec = atoi(argv[1]);
+
+ signal(SIGINT, sighandler);
+ signal(SIGALRM, sighandler);
+ alarm(sec);
+
+ while (!done)
+ continue;
+
+ return 0;
+}
+EOF
+cc -Wl,--build-id=sha1 ${ex_source} -o ${ex_sha1} -x c -
+cc -Wl,--build-id=md5 ${ex_source} -o ${ex_md5} -x c -
echo "test binaries: ${ex_sha1} ${ex_md5} ${ex_pe}"
-check()
+get_build_id()
{
case $1 in
*.exe)
@@ -64,6 +117,15 @@ check()
id=`readelf -n ${1} 2>/dev/null | grep 'Build ID' | awk '{print $3}'`
;;
esac
+ echo ${id}
+}
+
+check()
+{
+ file=$1
+ perf_data=$2
+
+ id=$(get_build_id $file)
echo "build id: ${id}"
id_file=${id#??}
@@ -76,45 +138,53 @@ check()
exit 1
fi
- file=${build_id_dir}/.build-id/$id_dir/`readlink ${link}`/elf
- echo "file: ${file}"
+ cached_file=${build_id_dir}/.build-id/$id_dir/`readlink ${link}`/elf
+ echo "file: ${cached_file}"
# Check for file permission of original file
# in case of pe-file.exe file
echo $1 | grep ".exe"
if [ $? -eq 0 ]; then
- if [ -x $1 ] && [ ! -x $file ]; then
- echo "failed: file ${file} executable does not exist"
+ if [ -x $1 ] && [ ! -x $cached_file ]; then
+ echo "failed: file ${cached_file} executable does not exist"
exit 1
fi
- if [ ! -x $file ] && [ ! -e $file ]; then
- echo "failed: file ${file} does not exist"
+ if [ ! -x $cached_file ] && [ ! -e $cached_file ]; then
+ echo "failed: file ${cached_file} does not exist"
exit 1
fi
- elif [ ! -x $file ]; then
- echo "failed: file ${file} does not exist"
+ elif [ ! -x $cached_file ]; then
+ echo "failed: file ${cached_file} does not exist"
exit 1
fi
- diff ${file} ${1}
+ diff ${cached_file} ${1}
if [ $? -ne 0 ]; then
- echo "failed: ${file} do not match"
+ echo "failed: ${cached_file} do not match"
exit 1
fi
- ${perf} buildid-cache -l | grep ${id}
+ ${perf} buildid-cache -l | grep -q ${id}
if [ $? -ne 0 ]; then
echo "failed: ${id} is not reported by \"perf buildid-cache -l\""
exit 1
fi
+ if [ -n "${perf_data}" ]; then
+ ${perf} buildid-list -i ${perf_data} | grep -q ${id}
+ if [ $? -ne 0 ]; then
+ echo "failed: ${id} is not reported by \"perf buildid-list -i ${perf_data}\""
+ exit 1
+ fi
+ fi
+
echo "OK for ${1}"
}
test_add()
{
- build_id_dir=$(mktemp -d /tmp/perf.debug.XXX)
+ build_id_dir=$(mktemp -d /tmp/perf_buildid_test.debug.XXX)
perf="perf --buildid-dir ${build_id_dir}"
${perf} buildid-cache -v -a ${1}
@@ -128,12 +198,88 @@ test_add()
rm -rf ${build_id_dir}
}
+test_remove()
+{
+ build_id_dir=$(mktemp -d /tmp/perf_buildid_test.debug.XXX)
+ perf="perf --buildid-dir ${build_id_dir}"
+
+ ${perf} buildid-cache -v -a ${1}
+ if [ $? -ne 0 ]; then
+ echo "failed: add ${1} to build id cache"
+ exit 1
+ fi
+
+ id=$(get_build_id ${1})
+ if ! ${perf} buildid-cache -l | grep -q ${id}; then
+ echo "failed: ${id} not in cache"
+ exit 1
+ fi
+
+ ${perf} buildid-cache -v -r ${1}
+ if [ $? -ne 0 ]; then
+ echo "failed: remove ${id} from build id cache"
+ exit 1
+ fi
+
+ if ${perf} buildid-cache -l | grep -q ${id}; then
+ echo "failed: ${id} still in cache after remove"
+ exit 1
+ fi
+
+ echo "remove: OK"
+ rm -rf ${build_id_dir}
+}
+
+test_purge()
+{
+ build_id_dir=$(mktemp -d /tmp/perf_buildid_test.debug.XXX)
+ perf="perf --buildid-dir ${build_id_dir}"
+
+ id1=$(get_build_id ${ex_sha1})
+ ${perf} buildid-cache -v -a ${ex_sha1}
+ if ! ${perf} buildid-cache -l | grep -q ${id1}; then
+ echo "failed: ${id1} not in cache"
+ exit 1
+ fi
+
+ id2=$(get_build_id ${ex_md5})
+ ${perf} buildid-cache -v -a ${ex_md5}
+ if ! ${perf} buildid-cache -l | grep -q ${id2}; then
+ echo "failed: ${id2} not in cache"
+ exit 1
+ fi
+
+ # Purge by path
+ ${perf} buildid-cache -v -p ${ex_sha1}
+ if [ $? -ne 0 ]; then
+ echo "failed: purge build id cache of ${ex_sha1}"
+ exit 1
+ fi
+
+ ${perf} buildid-cache -v -p ${ex_md5}
+ if [ $? -ne 0 ]; then
+ echo "failed: purge build id cache of ${ex_md5}"
+ exit 1
+ fi
+
+ # Verify both are gone
+ if ${perf} buildid-cache -l | grep -q ${id1}; then
+ echo "failed: ${id1} still in cache after purge"
+ exit 1
+ fi
+
+ if ${perf} buildid-cache -l | grep -q ${id2}; then
+ echo "failed: ${id2} still in cache after purge"
+ exit 1
+ fi
+
+ echo "purge: OK"
+ rm -rf ${build_id_dir}
+}
+
test_record()
{
- data=$(mktemp /tmp/perf.data.XXX)
- build_id_dir=$(mktemp -d /tmp/perf.debug.XXX)
- log_out=$(mktemp /tmp/perf.log.out.XXX)
- log_err=$(mktemp /tmp/perf.log.err.XXX)
+ build_id_dir=$(mktemp -d /tmp/perf_buildid_test.debug.XXX)
perf="perf --buildid-dir ${build_id_dir}"
echo "running: perf record $*"
@@ -145,7 +291,7 @@ test_record()
fi
args="$*"
- check ${args##* }
+ check ${args##* } ${data}
rm -f ${log_out} ${log_err}
rm -rf ${build_id_dir}
@@ -166,10 +312,15 @@ if [ ${run_pe} -eq 1 ]; then
test_record wine ${ex_pe}
fi
-# cleanup
-rm ${ex_sha1} ${ex_md5}
-if [ ${run_pe} -eq 1 ]; then
- rm -r ${wineprefix}
+# remove binaries manually via perf buildid-cache -r
+test_remove ${ex_sha1}
+test_remove ${ex_md5}
+if [ ${add_pe} -eq 1 ]; then
+ test_remove ${ex_pe}
fi
+# purge binaries manually via perf buildid-cache -p
+test_purge
+
+cleanup
exit 0
diff --git a/tools/perf/tests/shell/c2c.sh b/tools/perf/tests/shell/c2c.sh
new file mode 100755
index 000000000000..2471d44595c3
--- /dev/null
+++ b/tools/perf/tests/shell/c2c.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# perf c2c tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_c2c_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+check_c2c_support() {
+ # Check if perf c2c record works.
+ if ! perf c2c record -o "${perfdata}" -- true > /dev/null 2>&1 ; then
+ return 1
+ fi
+ return 0
+}
+
+test_c2c_record_report() {
+ echo "c2c record and report test"
+ if ! check_c2c_support ; then
+ echo "c2c record and report test [Skipped: perf c2c record failed (possibly missing hardware support)]"
+ err=2
+ return
+ fi
+
+ # Run a workload that does some memory operations.
+ if ! perf c2c record -o "${perfdata}" -- perf test -w datasym 1 > /dev/null 2>&1 ; then
+ echo "c2c record and report test [Skipped: perf c2c record failed during workload]"
+ return
+ fi
+
+ if ! perf c2c report -i "${perfdata}" --stdio > /dev/null 2>&1 ; then
+ echo "c2c record and report test [Failed: report failed]"
+ err=1
+ return
+ fi
+
+ if ! perf c2c report -i "${perfdata}" -N > /dev/null 2>&1 ; then
+ echo "c2c record and report test [Failed: report -N failed]"
+ err=1
+ return
+ fi
+
+ echo "c2c record and report test [Success]"
+}
+
+test_c2c_record_report
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/evlist.sh b/tools/perf/tests/shell/evlist.sh
new file mode 100755
index 000000000000..140f099e75c1
--- /dev/null
+++ b/tools/perf/tests/shell/evlist.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+# perf evlist tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_evlist_simple() {
+ echo "Simple evlist test"
+ if ! perf record -e cycles -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Simple evlist [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf evlist -i "${perfdata}" | grep -q "cycles"
+ then
+ echo "Simple evlist [Failed to list event]"
+ err=1
+ return
+ fi
+ echo "Simple evlist test [Success]"
+}
+
+test_evlist_group() {
+ echo "Group evlist test"
+ if ! perf record -e "{cycles,instructions}" -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Group evlist [Skipped event group recording failed]"
+ return
+ fi
+
+ if ! perf evlist -i "${perfdata}" -g | grep -q "{.*cycles.*,.*instructions.*}"
+ then
+ echo "Group evlist [Failed to list event group]"
+ err=1
+ return
+ fi
+ echo "Group evlist test [Success]"
+}
+
+test_evlist_verbose() {
+ echo "Event configuration evlist test"
+ if ! perf record -e cycles -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Event configuration evlist [Failed record]"
+ err=1
+ return
+ fi
+
+ if ! perf evlist -i "${perfdata}" -v | grep -q "config:"
+ then
+ echo "Event configuration evlist [Failed to list verbose info]"
+ err=1
+ return
+ fi
+ echo "Event configuration evlist test [Success]"
+}
+
+test_evlist_simple
+test_evlist_group
+test_evlist_verbose
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/jitdump-python.sh b/tools/perf/tests/shell/jitdump-python.sh
new file mode 100755
index 000000000000..ae86203b14a2
--- /dev/null
+++ b/tools/perf/tests/shell/jitdump-python.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+# python profiling with jitdump
+# SPDX-License-Identifier: GPL-2.0
+
+SHELLDIR=$(dirname $0)
+# shellcheck source=lib/setup_python.sh
+. "${SHELLDIR}"/lib/setup_python.sh
+
+OUTPUT=$(${PYTHON} -Xperf_jit -c 'import os, sys; print(os.getpid(), sys.is_stack_trampoline_active())' 2> /dev/null)
+PID=${OUTPUT% *}
+HAS_PERF_JIT=${OUTPUT#* }
+
+rm -f /tmp/jit-${PID}.dump 2> /dev/null
+if [ "${HAS_PERF_JIT}" != "True" ]; then
+ echo "SKIP: python JIT dump is not available"
+ exit 2
+fi
+
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXXX)
+
+cleanup() {
+ echo "Cleaning up files..."
+ rm -f ${PERF_DATA} ${PERF_DATA}.jit /tmp/jit-${PID}.dump /tmp/jitted-${PID}-*.so 2> /dev/null
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected termination"
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+echo "Run python with -Xperf_jit"
+cat <<EOF | perf record -k 1 -g --call-graph dwarf -o "${PERF_DATA}" \
+ -- ${PYTHON} -Xperf_jit
+def foo(n):
+ result = 0
+ for _ in range(n):
+ result += 1
+ return result
+
+def bar(n):
+ foo(n)
+
+def baz(n):
+ bar(n)
+
+if __name__ == "__main__":
+ baz(1000000)
+EOF
+
+# extract PID of the target process from the data
+_PID=$(perf report -i "${PERF_DATA}" -F pid -q -g none | cut -d: -f1 -s)
+PID=$(echo -n $_PID) # remove newlines
+
+echo "Generate JIT-ed DSOs using perf inject"
+DEBUGINFOD_URLS='' perf inject -i "${PERF_DATA}" -j -o "${PERF_DATA}.jit"
+
+echo "Add JIT-ed DSOs to the build-ID cache"
+for F in /tmp/jitted-${PID}-*.so; do
+ perf buildid-cache -a "${F}"
+done
+
+echo "Check the symbol containing the function/module name"
+NUM=$(perf report -i "${PERF_DATA}.jit" -s sym | grep -cE 'py::(foo|bar|baz):<stdin>')
+
+echo "Found ${NUM} matching lines"
+
+echo "Remove JIT-ed DSOs from the build-ID cache"
+for F in /tmp/jitted-${PID}-*.so; do
+ perf buildid-cache -r "${F}"
+done
+
+cleanup
+
+if [ "${NUM}" -eq 0 ]; then
+ exit 1
+fi
diff --git a/tools/perf/tests/shell/kallsyms.sh b/tools/perf/tests/shell/kallsyms.sh
new file mode 100755
index 000000000000..d0eb99753f47
--- /dev/null
+++ b/tools/perf/tests/shell/kallsyms.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+# perf kallsyms tests
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+
+test_kallsyms() {
+ echo "Basic perf kallsyms test"
+
+ # Check if /proc/kallsyms is readable
+ if [ ! -r /proc/kallsyms ]; then
+ echo "Basic perf kallsyms test [Skipped: /proc/kallsyms not readable]"
+ err=2
+ return
+ fi
+
+ # Use a symbol that is definitely a function and present in all kernels, e.g. schedule
+ symbol="schedule"
+
+ # Run perf kallsyms
+ # It prints "address symbol_name"
+ output=$(perf kallsyms $symbol 2>&1)
+ ret=$?
+
+ if [ $ret -ne 0 ] || [ -z "$output" ]; then
+ # If empty or failed, it might be due to permissions (kptr_restrict)
+ # Check if we can grep the symbol from /proc/kallsyms directly
+ if grep -q "$symbol" /proc/kallsyms 2>/dev/null; then
+ # If it's in /proc/kallsyms but perf kallsyms returned empty/error,
+ # it likely means perf couldn't parse it or access it correctly (e.g. kptr_restrict=2).
+ echo "Basic perf kallsyms test [Skipped: $symbol found in /proc/kallsyms but perf kallsyms failed (output: '$output')]"
+ err=2
+ return
+ else
+ echo "Basic perf kallsyms test [Skipped: $symbol not found in /proc/kallsyms]"
+ err=2
+ return
+ fi
+ fi
+
+ if echo "$output" | grep -q "not found"; then
+ echo "Basic perf kallsyms test [Failed: output '$output' does not contain $symbol]"
+ err=1
+ return
+ fi
+
+ if perf kallsyms ErlingHaaland | grep -vq "not found"; then
+ echo "Basic perf kallsyms test [Failed: ErlingHaaland found in the output]"
+ err=1
+ return
+ fi
+ echo "Basic perf kallsyms test [Success]"
+}
+
+test_kallsyms
+exit $err
diff --git a/tools/perf/tests/shell/kvm.sh b/tools/perf/tests/shell/kvm.sh
new file mode 100755
index 000000000000..2fafde1a29cc
--- /dev/null
+++ b/tools/perf/tests/shell/kvm.sh
@@ -0,0 +1,154 @@
+#!/bin/bash
+# perf kvm tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_kvm_test.perf.data.XXXXX)
+qemu_pid_file=$(mktemp /tmp/__perf_kvm_test.qemu.pid.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ if [ -f "${qemu_pid_file}" ]; then
+ if [ -s "${qemu_pid_file}" ]; then
+ qemu_pid=$(cat "${qemu_pid_file}")
+ if [ -n "${qemu_pid}" ]; then
+ kill "${qemu_pid}" 2>/dev/null || true
+ fi
+ fi
+ rm -f "${qemu_pid_file}"
+ fi
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+skip() {
+ echo "Skip: $1"
+ cleanup
+ exit 2
+}
+
+test_kvm_stat() {
+ echo "Testing perf kvm stat"
+
+ echo "Recording kvm events for pid ${qemu_pid}..."
+ if ! perf kvm stat record -p "${qemu_pid}" -o "${perfdata}" sleep 1; then
+ echo "Failed to record kvm events"
+ err=1
+ return
+ fi
+
+ echo "Reporting kvm events..."
+ if ! perf kvm -i "${perfdata}" stat report 2>&1 | grep -q "VM-EXIT"; then
+ echo "Failed to find VM-EXIT in report"
+ perf kvm -i "${perfdata}" stat report 2>&1
+ err=1
+ return
+ fi
+
+ echo "perf kvm stat test [Success]"
+}
+
+test_kvm_record_report() {
+ echo "Testing perf kvm record/report"
+
+ echo "Recording kvm profile for pid ${qemu_pid}..."
+ # Use --host to avoid needing guest symbols/mounts for this simple test
+ # We just want to verify the command runs and produces data
+ # We run in background and kill it because 'perf kvm record' appends options
+ # after the command, which breaks 'sleep' (e.g. it gets '-e cycles').
+ perf kvm --host record -p "${qemu_pid}" -o "${perfdata}" &
+ rec_pid=$!
+ sleep 1
+ kill -INT "${rec_pid}"
+ wait "${rec_pid}" || true
+
+ echo "Reporting kvm profile..."
+ # Check for some standard output from report
+ if ! perf kvm -i "${perfdata}" report --stdio 2>&1 | grep -q "Event count"; then
+ echo "Failed to report kvm profile"
+ perf kvm -i "${perfdata}" report --stdio 2>&1
+ err=1
+ return
+ fi
+
+ echo "perf kvm record/report test [Success]"
+}
+
+test_kvm_buildid_list() {
+ echo "Testing perf kvm buildid-list"
+
+ # We reuse the perf.data from the previous record test
+ if ! perf kvm --host -i "${perfdata}" buildid-list 2>&1 | grep -q "."; then
+ echo "Failed to list buildids"
+ perf kvm --host -i "${perfdata}" buildid-list 2>&1
+ err=1
+ return
+ fi
+
+ echo "perf kvm buildid-list test [Success]"
+}
+
+setup_qemu() {
+ # Find qemu
+ if [ "$(uname -m)" = "x86_64" ]; then
+ qemu="qemu-system-x86_64"
+ elif [ "$(uname -m)" = "aarch64" ]; then
+ qemu="qemu-system-aarch64"
+ elif [ "$(uname -m)" = "s390x" ]; then
+ qemu="qemu-system-s390x"
+ elif [ "$(uname -m)" = "ppc64le" ]; then
+ qemu="qemu-system-ppc64"
+ else
+ qemu="qemu-system-$(uname -m)"
+ fi
+
+ if ! which -s "$qemu"; then
+ skip "$qemu not found"
+ fi
+
+ if [ ! -r /dev/kvm ] || [ ! -w /dev/kvm ]; then
+ skip "/dev/kvm not accessible"
+ fi
+
+ if ! perf kvm stat record -a sleep 0.01 >/dev/null 2>&1; then
+ skip "No permission to record kvm events"
+ fi
+
+ echo "Starting $qemu..."
+ # Start qemu in background, detached, with pidfile
+ # We use -display none -daemonize and a monitor to keep it alive/controllable if needed
+ # We don't need a real kernel, just KVM active.
+ if ! $qemu -enable-kvm -display none -daemonize -pidfile "${qemu_pid_file}" -monitor none; then
+ echo "Failed to start qemu"
+ err=1
+ return
+ fi
+
+ # Wait a bit for qemu to start
+ sleep 1
+ qemu_pid=$(cat "${qemu_pid_file}")
+
+ if ! kill -0 "${qemu_pid}" 2>/dev/null; then
+ echo "Qemu process failed to stay alive"
+ err=1
+ return
+ fi
+}
+
+setup_qemu
+if [ $err -eq 0 ]; then
+ test_kvm_stat
+ test_kvm_record_report
+ test_kvm_buildid_list
+fi
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index c6750ef06c0f..dafbde56cc76 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -43,6 +43,9 @@ def isint(num):
def is_counter_value(num):
return isfloat(num) or num == '<not counted>' or num == '<not supported>'
+def is_metric_value(num):
+ return isfloat(num) or num == 'none'
+
def check_json_output(expected_items):
checks = {
'counters': lambda x: isfloat(x),
@@ -57,7 +60,7 @@ def check_json_output(expected_items):
'event-runtime': lambda x: isfloat(x),
'interval': lambda x: isfloat(x),
'metric-unit': lambda x: True,
- 'metric-value': lambda x: isfloat(x),
+ 'metric-value': lambda x: is_metric_value(x),
'metric-threshold': lambda x: x in ['unknown', 'good', 'less good', 'nearly bad', 'bad'],
'metricgroup': lambda x: True,
'node': lambda x: True,
@@ -65,8 +68,6 @@ def check_json_output(expected_items):
'socket': lambda x: True,
'thread': lambda x: True,
'unit': lambda x: True,
- 'insn per cycle': lambda x: isfloat(x),
- 'GHz': lambda x: True, # FIXME: it seems unintended for --metric-only
}
input = '[\n' + ','.join(Lines) + '\n]'
for item in json.loads(input):
@@ -88,6 +89,8 @@ def check_json_output(expected_items):
f' in \'{item}\'')
for key, value in item.items():
if key not in checks:
+ if args.metric_only:
+ continue
raise RuntimeError(f'Unexpected key: key={key} value={value}')
if not checks[key](value):
raise RuntimeError(f'Check failed for: key={key} value={value}')
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
index c2ec7881ec1d..3c36e80fe422 100644
--- a/tools/perf/tests/shell/lib/stat_output.sh
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -156,7 +156,7 @@ check_metric_only()
echo "[Skip] CPU-measurement counter facility not installed"
return
fi
- perf stat --metric-only $2 -e instructions,cycles true
+ perf stat --metric-only $2 -M page_faults_per_second true
commachecker --metric-only
echo "[Success]"
}
diff --git a/tools/perf/tests/shell/record_weak_term.sh b/tools/perf/tests/shell/record_weak_term.sh
new file mode 100755
index 000000000000..811b00ffb47a
--- /dev/null
+++ b/tools/perf/tests/shell/record_weak_term.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# record weak terms
+# SPDX-License-Identifier: GPL-2.0
+# Test that command line options override weak terms from sysfs or inbuilt json.
+set -e
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
+
+# Find the first event with a specified period, such as
+# "cpu_core/event=0x24,period=200003,umask=0xff/"
+event=$(perf list --json | $PYTHON -c '
+import json, sys
+for e in json.load(sys.stdin):
+ if "EventName" not in e or "/modifier" in e["EventName"]:
+ continue
+ if "Encoding" in e and "period=" in e["Encoding"]:
+ print(e["EventName"])
+ break
+')
+if [[ "$event" = "" ]]
+then
+ echo "Skip: No sysfs/json events with inbuilt period."
+ exit 2
+fi
+
+echo "Testing that for $event the period is overridden with 1000"
+perf list --detail "$event"
+if ! perf record -c 1000 -vv -e "$event" -o /dev/null true 2>&1 | \
+ grep -q -F '{ sample_period, sample_freq } 1000'
+then
+ echo "Fail: Unexpected verbose output and sample period"
+ exit 1
+fi
+echo "Success"
+exit 0
diff --git a/tools/perf/tests/shell/script_dlfilter.sh b/tools/perf/tests/shell/script_dlfilter.sh
new file mode 100755
index 000000000000..45c97d4a7d5f
--- /dev/null
+++ b/tools/perf/tests/shell/script_dlfilter.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+# perf script --dlfilter tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+dlfilter_c=$(mktemp /tmp/__perf_test.dlfilter.test.c.XXXXX)
+dlfilter_so=$(mktemp /tmp/__perf_test.dlfilter.so.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${dlfilter_c}"
+ rm -f "${dlfilter_so}"
+ rm -f "${dlfilter_so}.o"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+cat <<EOF > "${dlfilter_c}"
+#include <perf/perf_dlfilter.h>
+#include <string.h>
+#include <stdio.h>
+
+struct perf_dlfilter_fns perf_dlfilter_fns;
+
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ const struct perf_dlfilter_al *al;
+
+ if (!sample->ip)
+ return 0;
+
+ al = perf_dlfilter_fns.resolve_ip(ctx);
+ if (!al || !al->sym || strcmp(al->sym, "test_loop"))
+ return 1;
+
+ return 0;
+}
+EOF
+
+test_dlfilter() {
+ echo "Basic --dlfilter test"
+ # Generate perf.data file
+ if ! perf record -o "${perfdata}" perf test -w thloop 1 2> /dev/null
+ then
+ echo "Basic --dlfilter test [Failed record]"
+ err=1
+ return
+ fi
+
+ # Build the dlfilter
+ if ! cc -c -I tools/perf/include -fpic -x c "${dlfilter_c}" -o "${dlfilter_so}.o"
+ then
+ echo "Basic --dlfilter test [Failed to build dlfilter object]"
+ err=1
+ return
+ fi
+
+ if ! cc -shared -o "${dlfilter_so}" "${dlfilter_so}.o"
+ then
+ echo "Basic --dlfilter test [Failed to link dlfilter shared object]"
+ err=1
+ return
+ fi
+
+ # Check that the output contains "test_loop" and nothing else
+ if ! perf script -i "${perfdata}" --dlfilter "${dlfilter_so}" | grep -q "test_loop"
+ then
+ echo "Basic --dlfilter test [Failed missing output]"
+ err=1
+ return
+ fi
+
+ # The filter should filter out everything except test_loop, so ensure no other symbols are present
+ # This is a simple check; we could be more rigorous
+ if perf script -i "${perfdata}" --dlfilter "${dlfilter_so}" | grep -v "test_loop" | grep -q "perf"
+ then
+ echo "Basic --dlfilter test [Failed filtering]"
+ err=1
+ return
+ fi
+
+ echo "Basic --dlfilter test [Success]"
+}
+
+test_dlfilter
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index 7a6f6e177402..cd6fff597091 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -44,7 +44,7 @@ function commachecker()
;; "--per-die") exp=8
;; "--per-cluster") exp=8
;; "--per-cache") exp=8
- ;; "--metric-only") exp=2
+ ;; "--metric-only") exp=1
esac
while read line
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index 98fb65274ac4..85d1ad7186c6 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -181,7 +181,7 @@ check_metric_only()
echo "[Skip] CPU-measurement counter facility not installed"
return
fi
- perf stat -j --metric-only -e instructions,cycles -o "${stat_output}" true
+ perf stat -j --metric-only -M page_faults_per_second -o "${stat_output}" true
$PYTHON $pythonchecker --metric-only --file "${stat_output}"
echo "[Success]"
}
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
index 8824f445d343..cabbbf17c662 100755
--- a/tools/perf/tests/shell/stat+shadow_stat.sh
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -14,7 +14,7 @@ perf stat -a -e cycles sleep 1 2>&1 | grep -e cpu_core && exit 2
test_global_aggr()
{
- perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \
+ perf stat -a --no-big-num -M insn_per_cycle sleep 1 2>&1 | \
grep -e cycles -e instructions | \
while read num evt _ ipc rest
do
@@ -53,7 +53,7 @@ test_global_aggr()
test_no_aggr()
{
- perf stat -a -A --no-big-num -e cycles,instructions sleep 1 2>&1 | \
+ perf stat -a -A --no-big-num -M insn_per_cycle sleep 1 2>&1 | \
grep ^CPU | \
while read cpu num evt _ ipc rest
do
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index ec41f24299d9..9c4b92ecf448 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -12,8 +12,8 @@ set -e
stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
-event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
-skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound" "TopdownL1" "percent of slots")
+event_metric=("CPUs_utilized" "CPUs_utilized" "cs/sec" "migrations/sec" "faults/sec" "frontend_cycles_idle" "backend_cycles_idle" "GHz" "insn_per_cycle" "/sec" "branch_miss_rate")
+skip_metric=("tma_" "TopdownL1")
cleanup() {
rm -f "${stat_output}"
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index 8a100a7f2dc1..0b2f0f88ca16 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -16,9 +16,46 @@ test_default_stat() {
echo "Basic stat command test [Success]"
}
+test_null_stat() {
+ echo "Null stat command test"
+ if ! perf stat --null true 2>&1 | grep -E -q "Performance counter stats for 'true':"
+ then
+ echo "Null stat command test [Failed]"
+ err=1
+ return
+ fi
+ echo "Null stat command test [Success]"
+}
+
+find_offline_cpu() {
+ for i in $(seq 1 4096)
+ do
+ if [[ ! -f /sys/devices/system/cpu/cpu$i/online || \
+ $(cat /sys/devices/system/cpu/cpu$i/online) == "0" ]]
+ then
+ echo $i
+ return
+ fi
+ done
+ echo "Failed to find offline CPU"
+ exit 1
+}
+
+test_offline_cpu_stat() {
+ cpu=$(find_offline_cpu)
+ echo "Offline CPU stat command test (cpu $cpu)"
+ if ! perf stat "-C$cpu" -e cycles true 2>&1 | grep -E -q "No supported events found."
+ then
+ echo "Offline CPU stat command test [Failed]"
+ err=1
+ return
+ fi
+ echo "Offline CPU stat command test [Success]"
+}
+
test_stat_record_report() {
echo "stat record and report test"
- if ! perf stat record -o - true | perf stat report -i - 2>&1 | \
+ if ! perf stat record -e task-clock -o - true | perf stat report -i - 2>&1 | \
grep -E -q "Performance counter stats for 'pipe':"
then
echo "stat record and report test [Failed]"
@@ -30,7 +67,7 @@ test_stat_record_report() {
test_stat_record_script() {
echo "stat record and script test"
- if ! perf stat record -o - true | perf script -i - 2>&1 | \
+ if ! perf stat record -e task-clock -o - true | perf script -i - 2>&1 | \
grep -E -q "CPU[[:space:]]+THREAD[[:space:]]+VAL[[:space:]]+ENA[[:space:]]+RUN[[:space:]]+TIME[[:space:]]+EVENT"
then
echo "stat record and script test [Failed]"
@@ -196,7 +233,7 @@ test_hybrid() {
fi
# Run default Perf stat
- cycles_events=$(perf stat -- true 2>&1 | grep -E "/cycles/[uH]*| cycles[:uH]* " -c)
+ cycles_events=$(perf stat -a -- sleep 0.1 2>&1 | grep -E "/cpu-cycles/[uH]*| cpu-cycles[:uH]* " -c)
# The expectation is that default output will have a cycles events on each
# hybrid PMU. In situations with no cycles PMU events, like virtualized, this
@@ -212,6 +249,8 @@ test_hybrid() {
}
test_default_stat
+test_null_stat
+test_offline_cpu_stat
test_stat_record_report
test_stat_record_script
test_stat_repeat_weak_groups
diff --git a/tools/perf/tests/shell/stat_all_metricgroups.sh b/tools/perf/tests/shell/stat_all_metricgroups.sh
index c6d61a4ac3e7..1400880ec01f 100755
--- a/tools/perf/tests/shell/stat_all_metricgroups.sh
+++ b/tools/perf/tests/shell/stat_all_metricgroups.sh
@@ -37,6 +37,9 @@ do
then
err=2 # Skip
fi
+ elif [[ "$m" == @(Default2|Default3|Default4) ]]
+ then
+ echo "Ignoring failures in $m that may contain unsupported legacy events"
else
echo "Metric group $m failed"
echo $result
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
index 6fa585a1e34c..3dabb39c7cc8 100755
--- a/tools/perf/tests/shell/stat_all_metrics.sh
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -25,16 +25,22 @@ for m in $(perf list --raw-dump metrics); do
# No error result and metric shown.
continue
fi
- if [[ "$result" =~ "Cannot resolve IDs for" ]]
+ if [[ "$result" =~ "Cannot resolve IDs for" || "$result" =~ "No supported events found" ]]
then
- echo "Metric contains missing events"
+ if [[ $(perf list --raw-dump $m) == "Default"* ]]
+ then
+ echo "[Ignored $m] failed but as a Default metric this can be expected"
+ echo $result
+ continue
+ fi
+ echo "[Failed $m] Metric contains missing events"
echo $result
err=1 # Fail
continue
elif [[ "$result" =~ \
"Access to performance monitoring and observability operations is limited" ]]
then
- echo "Permission failure"
+ echo "[Skipped $m] Permission failure"
echo $result
if [[ $err -eq 0 ]]
then
@@ -43,7 +49,7 @@ for m in $(perf list --raw-dump metrics); do
continue
elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
then
- echo "Permissions - need system wide mode"
+ echo "[Skipped $m] Permissions - need system wide mode"
echo $result
if [[ $err -eq 0 ]]
then
@@ -52,7 +58,13 @@ for m in $(perf list --raw-dump metrics); do
continue
elif [[ "$result" =~ "<not supported>" ]]
then
- echo "Not supported events"
+ if [[ $(perf list --raw-dump $m) == "Default"* ]]
+ then
+ echo "[Ignored $m] failed but as a Default metric this can be expected"
+ echo $result
+ continue
+ fi
+ echo "[Skipped $m] Not supported events"
echo $result
if [[ $err -eq 0 ]]
then
@@ -61,7 +73,7 @@ for m in $(perf list --raw-dump metrics); do
continue
elif [[ "$result" =~ "<not counted>" ]]
then
- echo "Not counted events"
+ echo "[Skipped $m] Not counted events"
echo $result
if [[ $err -eq 0 ]]
then
@@ -70,7 +82,7 @@ for m in $(perf list --raw-dump metrics); do
continue
elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
then
- echo "FP issues"
+ echo "[Skipped $m] FP issues"
echo $result
if [[ $err -eq 0 ]]
then
@@ -79,7 +91,7 @@ for m in $(perf list --raw-dump metrics); do
continue
elif [[ "$result" =~ "PMM" ]]
then
- echo "Optane memory issues"
+ echo "[Skipped $m] Optane memory issues"
echo $result
if [[ $err -eq 0 ]]
then
@@ -96,7 +108,7 @@ for m in $(perf list --raw-dump metrics); do
# No error result and metric shown.
continue
fi
- echo "Metric '$m' has non-zero error '$result_err' or not printed in:"
+ echo "[Failed $m] has non-zero error '$result_err' or not printed in:"
echo "$result"
err=1
done
diff --git a/tools/perf/tests/shell/test_event_open_fallback.sh b/tools/perf/tests/shell/test_event_open_fallback.sh
new file mode 100755
index 000000000000..9420a7557c13
--- /dev/null
+++ b/tools/perf/tests/shell/test_event_open_fallback.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+# Perf event open fallback test
+# SPDX-License-Identifier: GPL-2.0
+
+skip_cnt=0
+ok_cnt=0
+err_cnt=0
+
+perf_record()
+{
+ perf record -o /dev/null "$@" -- true 1>/dev/null 2>&1
+}
+
+test_decrease_precise_ip()
+{
+ echo "Decrease precise ip test"
+
+ perf list pmu | grep -q 'cycles' || return 2
+
+ if ! perf_record -e cycles; then
+ return 2
+ fi
+
+ # It should reduce precision level down to 0 if needed.
+ if ! perf_record -e cycles:P; then
+ return 1
+ fi
+ return 0
+}
+
+test_decrease_precise_ip_complicated()
+{
+ echo "Decrease precise ip test (complicated case)"
+
+ perf list pmu | grep -q 'mem-loads-aux' || return 2
+
+ if ! perf_record -e '{mem-loads-aux:S,mem-loads:PS}'; then
+ return 1
+ fi
+ return 0
+}
+
+count_result()
+{
+ if [ "$1" -eq 2 ] ; then
+ skip_cnt=$((skip_cnt + 1))
+ return
+ fi
+ if [ "$1" -eq 0 ] ; then
+ ok_cnt=$((ok_cnt + 1))
+ return
+ fi
+ err_cnt=$((err_cnt + 1))
+}
+
+ret=0
+test_decrease_precise_ip || ret=$? ; count_result $ret ; ret=0
+test_decrease_precise_ip_complicated || ret=$? ; count_result $ret ; ret=0
+
+cleanup
+
+if [ ${err_cnt} -gt 0 ] ; then
+ exit 1
+fi
+
+if [ ${ok_cnt} -gt 0 ] ; then
+ exit 0
+fi
+
+# Skip
+exit 2
diff --git a/tools/perf/tests/shell/timechart.sh b/tools/perf/tests/shell/timechart.sh
new file mode 100755
index 000000000000..b14b3472c284
--- /dev/null
+++ b/tools/perf/tests/shell/timechart.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+# perf timechart tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_timechart_test.perf.data.XXXXX)
+output=$(mktemp /tmp/__perf_timechart_test.output.XXXXX.svg)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${output}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_timechart() {
+ echo "Basic perf timechart test"
+
+ # Try to record timechart data.
+ # perf timechart record uses system-wide recording and specific tracepoints.
+ # If it fails (e.g. permissions, missing tracepoints), skip the test.
+ if ! perf timechart record -o "${perfdata}" true > /dev/null 2>&1; then
+ echo "Basic perf timechart test [Skipped: perf timechart record failed (permissions/events?)]"
+ return
+ fi
+
+ # Generate the timechart
+ if ! perf timechart -i "${perfdata}" -o "${output}" > /dev/null 2>&1; then
+ echo "Basic perf timechart test [Failed: perf timechart command failed]"
+ err=1
+ return
+ fi
+
+ # Check if output file exists and is not empty
+ if [ ! -s "${output}" ]; then
+ echo "Basic perf timechart test [Failed: output file is empty or missing]"
+ err=1
+ return
+ fi
+
+ # Check if it looks like an SVG
+ if ! grep -q "svg" "${output}"; then
+ echo "Basic perf timechart test [Failed: output doesn't look like SVG]"
+ err=1
+ return
+ fi
+
+ echo "Basic perf timechart test [Success]"
+}
+
+if ! perf check feature -q libtraceevent ; then
+ echo "perf timechart is not supported. Skip."
+ cleanup
+ exit 2
+fi
+
+test_timechart
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/top.sh b/tools/perf/tests/shell/top.sh
new file mode 100755
index 000000000000..768ebcf7a89c
--- /dev/null
+++ b/tools/perf/tests/shell/top.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# perf top tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+log_file=$(mktemp /tmp/perf.top.log.XXXXX)
+
+cleanup() {
+ rm -f "${log_file}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_basic_perf_top() {
+ echo "Basic perf top test"
+
+ # Start a workload that spins to generate samples
+ # thloop runs for the specified number of seconds
+ perf test -w thloop 20 &
+ PID=$!
+
+ # Allow it to start
+ sleep 0.1
+
+ # Run perf top for 5 seconds, monitoring that PID
+ # Use --stdio to avoid TUI and redirect output
+ # Use -d 1 to avoid flooding output
+ # Use -e cpu-clock to ensure we get samples
+ # Use sleep to keep stdin open but silent, preventing EOF loop or interactive spam
+ if ! sleep 10 | timeout 5s perf top --stdio -d 1 -e cpu-clock -p $PID > "${log_file}" 2>&1; then
+ retval=$?
+ if [ $retval -ne 124 ] && [ $retval -ne 0 ]; then
+ echo "Basic perf top test [Failed: perf top failed to start or run (ret=$retval)]"
+ head -n 50 "${log_file}"
+ kill $PID
+ wait $PID 2>/dev/null || true
+ err=1
+ return
+ fi
+ fi
+
+ kill $PID
+ wait $PID 2>/dev/null || true
+
+ # Check for some sample data (percentage)
+ if ! grep -E -q "[0-9]+\.[0-9]+%" "${log_file}"; then
+ echo "Basic perf top test [Failed: no sample percentage found]"
+ head -n 50 "${log_file}"
+ err=1
+ return
+ fi
+
+ # Check for the symbol
+ if ! grep -q "test_loop" "${log_file}"; then
+ echo "Basic perf top test [Failed: test_loop symbol not found]"
+ head -n 50 "${log_file}"
+ err=1
+ return
+ fi
+
+ echo "Basic perf top test [Success]"
+}
+
+test_basic_perf_top
+cleanup
+exit $err