summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-record.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-06-03 15:11:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-06-03 15:11:44 -0700
commit0939bd2fcf337243133b0271335a2838857c319f (patch)
tree57324f5cc62bd878f248f69e23d06ec49b197c18 /tools/perf/builtin-record.c
parent70087d2200d4a3bd31812ab4578c9ec70ea344af (diff)
parenta913ef6fd883c05bd6538ed21ee1e773f0d750b7 (diff)
Merge tag 'perf-tools-for-v6.16-1-2025-06-03' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools
Pull perf tools updates from Arnaldo Carvalho de Melo: "perf report/top/annotate TUI: - Accept the left arrow key as a Zoom out if done on the first column - Show if source code toggle status in title, to help spotting bugs with the various disassemblers (capstone, llvm, objdump) - Provide feedback on unhandled hotkeys Build: - Better inform when certain features are not available with warnings in the build process and in 'perf version --build-options' or 'perf -vv' perf record: - Improve the --off-cpu code by synthesizing events for switch-out -> switch-in intervals using a BPF program. This can be fine tuned using a --off-cpu-thresh knob perf report: - Add 'tgid' sort key perf mem/c2c: - Add 'op', 'cache', 'snoop', 'dtlb' output fields - Add support for 'ldlat' on AMD IBS (Instruction Based Sampling) perf ftrace: - Use process/session specific trace settings instead of messing with the global ftrace knobs perf trace: - Implement syscall summary in BPF - Support --summary-mode=cgroup - Always print return value for syscalls returning a pid - The rseq and set_robust_list don't return a pid, just -errno perf lock contention: - Symbolize zone->lock using BTF - Add -J/--inject-delay option to estimate impact on application performance by optimization of kernel locking behavior perf stat: - Improve hybrid support for the NMI watchdog warning Symbol resolution: - Handle 'u' and 'l' symbols in /proc/kallsyms, resolving some Rust symbols - Improve Rust demangler Hardware tracing: Intel PT: - Fix PEBS-via-PT data_src - Do not default to recording all switch events - Fix pattern matching with python3 on the SQL viewer script arm64: - Fixups for the hip08 hha PMU Vendor events: - Update Intel events/metrics files for alderlake, alderlaken, arrowlake, bonnell, broadwell, broadwellde, broadwellx, cascadelakex, clearwaterforest, elkhartlake, emeraldrapids, grandridge, graniterapids, haswell, haswellx, icelake, icelakex, ivybridge, ivytown, jaketown, lunarlake, meteorlake, nehalemep, nehalemex, rocketlake, sandybridge, sapphirerapids, sierraforest, skylake, skylakex, snowridgex, tigerlake, westmereep-dp, westmereep-sp, westmereep-sx python support: - Add support for event counts in the python binding, add a counting.py example perf list: - Display the PMU name associated with a perf metric in JSON perf test: - Hybrid improvements for metric value validation test - Fix LBR test by ignoring idle task - Add AMD IBS sw filter ana d'ldlat' tests - Add 'perf trace --summary-mode=cgroup' test - Add tests for the various language symbol demanglers Miscellaneous: - Allow specifying the cpu an event will be tied using '-e event/cpu=N/' - Sync various headers with the kernel sources - Add annotations to use clang's -Wthread-safety and fix some problems it detected - Make dump_stack() use perf's symbol resolution to provide better backtraces - Intel TPEBS support cleanups and fixes. TPEBS stands for Timed PEBS (Precision Event-Based Sampling), that adds timing info, the retirement latency of instructions - Various memory allocation (some detected by ASAN) and reference counting fixes - Add a 8-byte aligned PERF_RECORD_COMPRESSED2 to replace PERF_RECORD_COMPRESSED - Skip unsupported event types in perf.data files, don't stop when finding one - Improve lookups using hashmaps and binary searches" * tag 'perf-tools-for-v6.16-1-2025-06-03' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools: (206 commits) perf callchain: Always populate the addr_location map when adding IP perf lock contention: Reject more than 10ms delays for safety perf trace: Set errpid to false for rseq and set_robust_list perf symbol: Move demangling code out of symbol-elf.c perf trace: Always print return value for syscalls returning a pid perf script: Print PERF_AUX_FLAG_COLLISION flag perf mem: Show absolute percent in mem_stat output perf mem: Display sort order only if it's available perf mem: Describe overhead calculation in brief perf record: Fix incorrect --user-regs comments Revert "perf thread: Ensure comm_lock held for comm_list" perf test trace_summary: Skip --bpf-summary tests if no libbpf perf test intel-pt: Skip jitdump test if no libelf perf intel-tpebs: Avoid race when evlist is being deleted perf test demangle-java: Don't segv if demangling fails perf symbol: Fix use-after-free in filename__read_build_id perf pmu: Avoid segv for missing name/alias_name in wildcarding perf machine: Factor creating a "live" machine out of dwarf-unwind perf test: Add AMD IBS sw filter test perf mem: Count L2 HITM for c2c statistic ...
Diffstat (limited to 'tools/perf/builtin-record.c')
-rw-r--r--tools/perf/builtin-record.c80
1 files changed, 70 insertions, 10 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index ba20bf7c011d..8059bce85a51 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -26,6 +26,7 @@
#include "util/target.h"
#include "util/session.h"
#include "util/tool.h"
+#include "util/stat.h"
#include "util/symbol.h"
#include "util/record.h"
#include "util/cpumap.h"
@@ -51,6 +52,7 @@
#include "util/clockid.h"
#include "util/off_cpu.h"
#include "util/bpf-filter.h"
+#include "util/strbuf.h"
#include "asm/bug.h"
#include "perf.h"
#include "cputopo.h"
@@ -648,14 +650,27 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
struct record *rec = to;
if (record__comp_enabled(rec)) {
+ struct perf_record_compressed2 *event = map->data;
+ size_t padding = 0;
+ u8 pad[8] = {0};
ssize_t compressed = zstd_compress(rec->session, map, map->data,
mmap__mmap_len(map), bf, size);
if (compressed < 0)
return (int)compressed;
- size = compressed;
- bf = map->data;
+ bf = event;
+ thread->samples++;
+
+ /*
+ * The record from `zstd_compress` is not 8 bytes aligned, which would cause asan
+ * error. We make it aligned here.
+ */
+ event->data_size = compressed - sizeof(struct perf_record_compressed2);
+ event->header.size = PERF_ALIGN(compressed, sizeof(u64));
+ padding = event->header.size - compressed;
+ return record__write(rec, map, bf, compressed) ||
+ record__write(rec, map, &pad, padding);
}
thread->samples++;
@@ -1534,7 +1549,7 @@ static void record__adjust_affinity(struct record *rec, struct mmap *map)
static size_t process_comp_header(void *record, size_t increment)
{
- struct perf_record_compressed *event = record;
+ struct perf_record_compressed2 *event = record;
size_t size = sizeof(*event);
if (increment) {
@@ -1542,7 +1557,7 @@ static size_t process_comp_header(void *record, size_t increment)
return increment;
}
- event->header.type = PERF_RECORD_COMPRESSED;
+ event->header.type = PERF_RECORD_COMPRESSED2;
event->header.size = size;
return size;
@@ -1552,7 +1567,7 @@ static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
void *dst, size_t dst_size, void *src, size_t src_size)
{
ssize_t compressed;
- size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
+ size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed2) - 1;
struct zstd_data *zstd_data = &session->zstd_data;
if (map && map->file)
@@ -2483,7 +2498,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n");
}
- evlist__uniquify_name(rec->evlist);
+ /*
+ * Use global stat_config that is zero meaning aggr_mode is AGGR_NONE
+ * and hybrid_merge is false.
+ */
+ evlist__uniquify_evsel_names(rec->evlist, &stat_config);
evlist__config(rec->evlist, opts, &callchain_param);
@@ -2569,6 +2588,13 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
evlist__enable(rec->evlist);
/*
+ * offcpu-time does not call execve, so enable_on_exe wouldn't work
+ * when recording a workload, do it manually
+ */
+ if (rec->off_cpu)
+ evlist__enable_evsel(rec->evlist, (char *)OFFCPU_EVENT);
+
+ /*
* Let the child rip
*/
if (forks) {
@@ -2784,13 +2810,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
record__auxtrace_snapshot_exit(rec);
if (forks && workload_exec_errno) {
- char msg[STRERR_BUFSIZE], strevsels[2048];
+ char msg[STRERR_BUFSIZE];
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
+ struct strbuf sb = STRBUF_INIT;
- evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
+ evlist__format_evsels(rec->evlist, &sb, 2048);
pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
- strevsels, argv[0], emsg);
+ sb.buf, argv[0], emsg);
+ strbuf_release(&sb);
err = -1;
goto out_child;
}
@@ -3155,6 +3183,28 @@ out_free:
return ret;
}
+static int record__parse_off_cpu_thresh(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct record_opts *opts = opt->value;
+ char *endptr;
+ u64 off_cpu_thresh_ms;
+
+ if (!str)
+ return -EINVAL;
+
+ off_cpu_thresh_ms = strtoull(str, &endptr, 10);
+
+ /* the threshold isn't string "0", yet strtoull() returns 0, parsing failed */
+ if (*endptr || (off_cpu_thresh_ms == 0 && strcmp(str, "0")))
+ return -EINVAL;
+ else
+ opts->off_cpu_thresh_ns = off_cpu_thresh_ms * NSEC_PER_MSEC;
+
+ return 0;
+}
+
void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
{
}
@@ -3348,6 +3398,7 @@ static struct record record = {
.ctl_fd = -1,
.ctl_fd_ack = -1,
.synth = PERF_SYNTH_ALL,
+ .off_cpu_thresh_ns = OFFCPU_THRESH,
},
};
@@ -3436,6 +3487,8 @@ static struct option __record_options[] = {
"Record the sampled data address data page size"),
OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
"Record the sampled code address (ip) page size"),
+ OPT_BOOLEAN(0, "sample-mem-info", &record.opts.sample_data_src,
+ "Record the data source for memory operations"),
OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
"Record the sample identifier"),
@@ -3480,7 +3533,7 @@ static struct option __record_options[] = {
"sample selected machine registers on interrupt,"
" use '-I?' to list register names", parse_intr_regs),
OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
- "sample selected machine registers on interrupt,"
+ "sample selected machine registers in user space,"
" use '--user-regs=?' to list register names", parse_user_regs),
OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
"Record running/enabled time of read (:S) events"),
@@ -3573,6 +3626,9 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
OPT_STRING(0, "setup-filter", &record.filter_action, "pin|unpin",
"BPF filter action"),
+ OPT_CALLBACK(0, "off-cpu-thresh", &record.opts, "ms",
+ "Dump off-cpu samples if off-cpu time exceeds this threshold (in milliseconds). (Default: 500ms)",
+ record__parse_off_cpu_thresh),
OPT_END()
};
@@ -4130,6 +4186,10 @@ int cmd_record(int argc, const char **argv)
goto out_opts;
}
+ /* For backward compatibility, -d implies --mem-info */
+ if (rec->opts.sample_address)
+ rec->opts.sample_data_src = true;
+
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().