summaryrefslogtreecommitdiff
path: root/tools/perf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/Documentation/perf-kmem.txt13
-rw-r--r--tools/perf/Documentation/perf-probe.txt21
-rw-r--r--tools/perf/builtin-buildid-list.c2
-rw-r--r--tools/perf/builtin-kmem.c123
-rw-r--r--tools/perf/builtin-probe.c80
-rw-r--r--tools/perf/builtin-report.c39
-rw-r--r--tools/perf/builtin-sched.c212
-rw-r--r--tools/perf/builtin-timechart.c54
-rw-r--r--tools/perf/builtin-trace.c48
-rw-r--r--tools/perf/util/data_map.c4
-rw-r--r--tools/perf/util/data_map.h2
-rw-r--r--tools/perf/util/event.c67
-rw-r--r--tools/perf/util/event.h17
-rw-r--r--tools/perf/util/header.c9
-rw-r--r--tools/perf/util/parse-events.c17
-rw-r--r--tools/perf/util/parse-options.c3
-rw-r--r--tools/perf/util/probe-event.c133
-rw-r--r--tools/perf/util/probe-event.h1
-rw-r--r--tools/perf/util/probe-finder.c2
-rw-r--r--tools/perf/util/symbol.c5
-rw-r--r--tools/perf/util/trace-event-parse.c4
-rw-r--r--tools/perf/util/trace-event-perl.c67
-rw-r--r--tools/perf/util/trace-event-perl.h4
-rw-r--r--tools/perf/util/trace-event-read.c3
24 files changed, 578 insertions, 352 deletions
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index 44b0ce35c28a..eac4d852e7cd 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -8,16 +8,16 @@ perf-kmem - Tool to trace/measure kernel memory(slab) properties
SYNOPSIS
--------
[verse]
-'perf kmem' {record} [<options>]
+'perf kmem' {record|stat} [<options>]
DESCRIPTION
-----------
-There's two variants of perf kmem:
+There are two variants of perf kmem:
'perf kmem record <command>' to record the kmem events
of an arbitrary workload.
- 'perf kmem' to report kernel memory statistics.
+ 'perf kmem stat' to report kernel memory statistics.
OPTIONS
-------
@@ -25,8 +25,11 @@ OPTIONS
--input=<file>::
Select the input file (default: perf.data)
---stat=<caller|alloc>::
- Select per callsite or per allocation statistics
+--caller::
+ Show per-callsite statistics
+
+--alloc::
+ Show per-allocation statistics
-s <key[,key2...]>::
--sort=<key[,key2...]>::
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 9270594e6dfd..8fa6bf99fcb5 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -8,10 +8,13 @@ perf-probe - Define new dynamic tracepoints
SYNOPSIS
--------
[verse]
-'perf probe' [options] --add 'PROBE' [--add 'PROBE' ...]
+'perf probe' [options] --add='PROBE' [...]
or
-'perf probe' [options] 'PROBE' ['PROBE' ...]
-
+'perf probe' [options] PROBE
+or
+'perf probe' [options] --del='[GROUP:]EVENT' [...]
+or
+'perf probe' --list
DESCRIPTION
-----------
@@ -31,8 +34,16 @@ OPTIONS
Be more verbose (show parsed arguments, etc).
-a::
---add::
- Define a probe point (see PROBE SYNTAX for detail)
+--add=::
+ Define a probe event (see PROBE SYNTAX for detail).
+
+-d::
+--del=::
+ Delete a probe event.
+
+-l::
+--list::
+ List up current probe events.
PROBE SYNTAX
------------
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 7dee9d19ab7a..dcb6143a0002 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -19,7 +19,7 @@ static char const *input_name = "perf.data";
static int force;
static const char *const buildid_list_usage[] = {
- "perf report [<options>]",
+ "perf buildid-list [<options>]",
NULL
};
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 047fef74bd52..5f209514f657 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -57,11 +57,6 @@ static struct rb_root root_caller_sorted;
static unsigned long total_requested, total_allocated;
static unsigned long nr_allocs, nr_cross_allocs;
-struct raw_event_sample {
- u32 size;
- char data[0];
-};
-
#define PATH_SYS_NODE "/sys/devices/system/node"
static void init_cpunode_map(void)
@@ -201,7 +196,7 @@ static void insert_caller_stat(unsigned long call_site,
}
}
-static void process_alloc_event(struct raw_event_sample *raw,
+static void process_alloc_event(void *data,
struct event *event,
int cpu,
u64 timestamp __used,
@@ -214,10 +209,10 @@ static void process_alloc_event(struct raw_event_sample *raw,
int bytes_alloc;
int node1, node2;
- ptr = raw_field_value(event, "ptr", raw->data);
- call_site = raw_field_value(event, "call_site", raw->data);
- bytes_req = raw_field_value(event, "bytes_req", raw->data);
- bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data);
+ ptr = raw_field_value(event, "ptr", data);
+ call_site = raw_field_value(event, "call_site", data);
+ bytes_req = raw_field_value(event, "bytes_req", data);
+ bytes_alloc = raw_field_value(event, "bytes_alloc", data);
insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
insert_caller_stat(call_site, bytes_req, bytes_alloc);
@@ -227,7 +222,7 @@ static void process_alloc_event(struct raw_event_sample *raw,
if (node) {
node1 = cpunode_map[cpu];
- node2 = raw_field_value(event, "node", raw->data);
+ node2 = raw_field_value(event, "node", data);
if (node1 != node2)
nr_cross_allocs++;
}
@@ -262,7 +257,7 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
return NULL;
}
-static void process_free_event(struct raw_event_sample *raw,
+static void process_free_event(void *data,
struct event *event,
int cpu,
u64 timestamp __used,
@@ -271,7 +266,7 @@ static void process_free_event(struct raw_event_sample *raw,
unsigned long ptr;
struct alloc_stat *s_alloc, *s_caller;
- ptr = raw_field_value(event, "ptr", raw->data);
+ ptr = raw_field_value(event, "ptr", data);
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
if (!s_alloc)
@@ -289,66 +284,53 @@ static void process_free_event(struct raw_event_sample *raw,
}
static void
-process_raw_event(event_t *raw_event __used, void *more_data,
+process_raw_event(event_t *raw_event __used, void *data,
int cpu, u64 timestamp, struct thread *thread)
{
- struct raw_event_sample *raw = more_data;
struct event *event;
int type;
- type = trace_parse_common_type(raw->data);
+ type = trace_parse_common_type(data);
event = trace_find_event(type);
if (!strcmp(event->name, "kmalloc") ||
!strcmp(event->name, "kmem_cache_alloc")) {
- process_alloc_event(raw, event, cpu, timestamp, thread, 0);
+ process_alloc_event(data, event, cpu, timestamp, thread, 0);
return;
}
if (!strcmp(event->name, "kmalloc_node") ||
!strcmp(event->name, "kmem_cache_alloc_node")) {
- process_alloc_event(raw, event, cpu, timestamp, thread, 1);
+ process_alloc_event(data, event, cpu, timestamp, thread, 1);
return;
}
if (!strcmp(event->name, "kfree") ||
!strcmp(event->name, "kmem_cache_free")) {
- process_free_event(raw, event, cpu, timestamp, thread);
+ process_free_event(data, event, cpu, timestamp, thread);
return;
}
}
static int process_sample_event(event_t *event)
{
- u64 ip = event->ip.ip;
- u64 timestamp = -1;
- u32 cpu = -1;
- u64 period = 1;
- void *more_data = event->ip.__more_data;
- struct thread *thread = threads__findnew(event->ip.pid);
+ struct sample_data data;
+ struct thread *thread;
- if (sample_type & PERF_SAMPLE_TIME) {
- timestamp = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
-
- if (sample_type & PERF_SAMPLE_CPU) {
- cpu = *(u32 *)more_data;
- more_data += sizeof(u32);
- more_data += sizeof(u32); /* reserved */
- }
+ memset(&data, 0, sizeof(data));
+ data.time = -1;
+ data.cpu = -1;
+ data.period = 1;
- if (sample_type & PERF_SAMPLE_PERIOD) {
- period = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ event__parse_sample(event, sample_type, &data);
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
event->header.misc,
- event->ip.pid, event->ip.tid,
- (void *)(long)ip,
- (long long)period);
+ data.pid, data.tid,
+ (void *)(long)data.ip,
+ (long long)data.period);
+ thread = threads__findnew(event->ip.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
@@ -357,7 +339,8 @@ static int process_sample_event(event_t *event)
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
- process_raw_event(event, more_data, cpu, timestamp, thread);
+ process_raw_event(event, data.raw_data, data.cpu,
+ data.time, thread);
return 0;
}
@@ -543,7 +526,7 @@ static int __cmd_kmem(void)
}
static const char * const kmem_usage[] = {
- "perf kmem [<options>] {record}",
+ "perf kmem [<options>] {record|stat}",
NULL
};
@@ -703,18 +686,17 @@ static int parse_sort_opt(const struct option *opt __used,
return 0;
}
-static int parse_stat_opt(const struct option *opt __used,
- const char *arg, int unset __used)
+static int parse_caller_opt(const struct option *opt __used,
+ const char *arg __used, int unset __used)
{
- if (!arg)
- return -1;
+ caller_flag = (alloc_flag + 1);
+ return 0;
+}
- if (strcmp(arg, "alloc") == 0)
- alloc_flag = (caller_flag + 1);
- else if (strcmp(arg, "caller") == 0)
- caller_flag = (alloc_flag + 1);
- else
- return -1;
+static int parse_alloc_opt(const struct option *opt __used,
+ const char *arg __used, int unset __used)
+{
+ alloc_flag = (caller_flag + 1);
return 0;
}
@@ -739,14 +721,17 @@ static int parse_line_opt(const struct option *opt __used,
static const struct option kmem_options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
- OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>",
- "stat selector, Pass 'alloc' or 'caller'.",
- parse_stat_opt),
+ OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
+ "show per-callsite statistics",
+ parse_caller_opt),
+ OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
+ "show per-allocation statistics",
+ parse_alloc_opt),
OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
"sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
parse_sort_opt),
OPT_CALLBACK('l', "line", NULL, "num",
- "show n lins",
+ "show n lines",
parse_line_opt),
OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
OPT_END()
@@ -790,18 +775,22 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used)
argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
- if (argc && !strncmp(argv[0], "rec", 3))
- return __cmd_record(argc, argv);
- else if (argc)
+ if (!argc)
usage_with_options(kmem_usage, kmem_options);
- if (list_empty(&caller_sort))
- setup_sorting(&caller_sort, default_sort_order);
- if (list_empty(&alloc_sort))
- setup_sorting(&alloc_sort, default_sort_order);
+ if (!strncmp(argv[0], "rec", 3)) {
+ return __cmd_record(argc, argv);
+ } else if (!strcmp(argv[0], "stat")) {
+ setup_cpunode_map();
+
+ if (list_empty(&caller_sort))
+ setup_sorting(&caller_sort, default_sort_order);
+ if (list_empty(&alloc_sort))
+ setup_sorting(&alloc_sort, default_sort_order);
- setup_cpunode_map();
+ return __cmd_kmem();
+ }
- return __cmd_kmem();
+ return 0;
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index a58e11b7ea80..5a47c1e11f77 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -35,6 +35,7 @@
#include "perf.h"
#include "builtin.h"
#include "util/util.h"
+#include "util/strlist.h"
#include "util/event.h"
#include "util/debug.h"
#include "util/parse-options.h"
@@ -43,11 +44,12 @@
#include "util/probe-event.h"
/* Default vmlinux search paths */
-#define NR_SEARCH_PATH 3
+#define NR_SEARCH_PATH 4
const char *default_search_path[NR_SEARCH_PATH] = {
"/lib/modules/%s/build/vmlinux", /* Custom build kernel */
"/usr/lib/debug/lib/modules/%s/vmlinux", /* Red Hat debuginfo */
"/boot/vmlinux-debug-%s", /* Ubuntu */
+"./vmlinux", /* CWD */
};
#define MAX_PATH_LEN 256
@@ -60,6 +62,7 @@ static struct {
int need_dwarf;
int nr_probe;
struct probe_point probes[MAX_PROBES];
+ struct strlist *dellist;
} session;
static bool listing;
@@ -79,6 +82,25 @@ static void parse_probe_event(const char *str)
pr_debug("%d arguments\n", pp->nr_args);
}
+static void parse_probe_event_argv(int argc, const char **argv)
+{
+ int i, len;
+ char *buf;
+
+ /* Bind up rest arguments */
+ len = 0;
+ for (i = 0; i < argc; i++)
+ len += strlen(argv[i]) + 1;
+ buf = zalloc(len + 1);
+ if (!buf)
+ die("Failed to allocate memory for binding arguments.");
+ len = 0;
+ for (i = 0; i < argc; i++)
+ len += sprintf(&buf[len], "%s ", argv[i]);
+ parse_probe_event(buf);
+ free(buf);
+}
+
static int opt_add_probe_event(const struct option *opt __used,
const char *str, int unset __used)
{
@@ -87,6 +109,17 @@ static int opt_add_probe_event(const struct option *opt __used,
return 0;
}
+static int opt_del_probe_event(const struct option *opt __used,
+ const char *str, int unset __used)
+{
+ if (str) {
+ if (!session.dellist)
+ session.dellist = strlist__new(true, NULL);
+ strlist__add(session.dellist, str);
+ }
+ return 0;
+}
+
#ifndef NO_LIBDWARF
static int open_default_vmlinux(void)
{
@@ -121,6 +154,7 @@ static int open_default_vmlinux(void)
static const char * const probe_usage[] = {
"perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]",
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
+ "perf probe [<options>] --del '[GROUP:]EVENT' ...",
"perf probe --list",
NULL
};
@@ -132,7 +166,9 @@ static const struct option options[] = {
OPT_STRING('k', "vmlinux", &session.vmlinux, "file",
"vmlinux/module pathname"),
#endif
- OPT_BOOLEAN('l', "list", &listing, "list up current probes"),
+ OPT_BOOLEAN('l', "list", &listing, "list up current probe events"),
+ OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
+ opt_del_probe_event),
OPT_CALLBACK('a', "add", NULL,
#ifdef NO_LIBDWARF
"FUNC[+OFFS|%return] [ARG ...]",
@@ -160,7 +196,7 @@ static const struct option options[] = {
int cmd_probe(int argc, const char **argv, const char *prefix __used)
{
- int i, j, ret;
+ int i, ret;
#ifndef NO_LIBDWARF
int fd;
#endif
@@ -168,40 +204,52 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
argc = parse_options(argc, argv, options, probe_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- for (i = 0; i < argc; i++)
- parse_probe_event(argv[i]);
+ if (argc > 0)
+ parse_probe_event_argv(argc, argv);
- if ((session.nr_probe == 0 && !listing) ||
- (session.nr_probe != 0 && listing))
+ if ((session.nr_probe == 0 && !session.dellist && !listing))
usage_with_options(probe_usage, options);
if (listing) {
+ if (session.nr_probe != 0 || session.dellist) {
+ pr_warning(" Error: Don't use --list with"
+ " --add/--del.\n");
+ usage_with_options(probe_usage, options);
+ }
show_perf_probe_events();
return 0;
}
+ if (session.dellist) {
+ del_trace_kprobe_events(session.dellist);
+ strlist__delete(session.dellist);
+ if (session.nr_probe == 0)
+ return 0;
+ }
+
if (session.need_dwarf)
#ifdef NO_LIBDWARF
die("Debuginfo-analysis is not supported");
#else /* !NO_LIBDWARF */
pr_debug("Some probes require debuginfo.\n");
- if (session.vmlinux)
+ if (session.vmlinux) {
+ pr_debug("Try to open %s.", session.vmlinux);
fd = open(session.vmlinux, O_RDONLY);
- else
+ } else
fd = open_default_vmlinux();
if (fd < 0) {
if (session.need_dwarf)
- die("Could not open vmlinux/module file.");
+ die("Could not open debuginfo file.");
- pr_warning("Could not open vmlinux/module file."
- " Try to use symbols.\n");
+ pr_debug("Could not open vmlinux/module file."
+ " Try to use symbols.\n");
goto end_dwarf;
}
/* Searching probe points */
- for (j = 0; j < session.nr_probe; j++) {
- pp = &session.probes[j];
+ for (i = 0; i < session.nr_probe; i++) {
+ pp = &session.probes[i];
if (pp->found)
continue;
@@ -223,8 +271,8 @@ end_dwarf:
#endif /* !NO_LIBDWARF */
/* Synthesize probes without dwarf */
- for (j = 0; j < session.nr_probe; j++) {
- pp = &session.probes[j];
+ for (i = 0; i < session.nr_probe; i++) {
+ pp = &session.probes[i];
if (pp->found) /* This probe is already found. */
continue;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 383c4ab4f9af..2b9eb3a553ed 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -605,44 +605,41 @@ static int validate_chain(struct ip_callchain *chain, event_t *event)
static int process_sample_event(event_t *event)
{
- u64 ip = event->ip.ip;
- u64 period = 1;
- void *more_data = event->ip.__more_data;
- struct ip_callchain *chain = NULL;
+ struct sample_data data;
int cpumode;
struct addr_location al;
- struct thread *thread = threads__findnew(event->ip.pid);
+ struct thread *thread;
- if (sample_type & PERF_SAMPLE_PERIOD) {
- period = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ memset(&data, 0, sizeof(data));
+ data.period = 1;
+
+ event__parse_sample(event, sample_type, &data);
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
event->header.misc,
- event->ip.pid, event->ip.tid,
- (void *)(long)ip,
- (long long)period);
+ data.pid, data.tid,
+ (void *)(long)data.ip,
+ (long long)data.period);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
unsigned int i;
- chain = (void *)more_data;
-
- dump_printf("... chain: nr:%Lu\n", chain->nr);
+ dump_printf("... chain: nr:%Lu\n", data.callchain->nr);
- if (validate_chain(chain, event) < 0) {
+ if (validate_chain(data.callchain, event) < 0) {
pr_debug("call-chain problem with event, "
"skipping it.\n");
return 0;
}
if (dump_trace) {
- for (i = 0; i < chain->nr; i++)
- dump_printf("..... %2d: %016Lx\n", i, chain->ips[i]);
+ for (i = 0; i < data.callchain->nr; i++)
+ dump_printf("..... %2d: %016Lx\n",
+ i, data.callchain->ips[i]);
}
}
+ thread = threads__findnew(data.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
@@ -657,7 +654,7 @@ static int process_sample_event(event_t *event)
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
thread__find_addr_location(thread, cpumode,
- MAP__FUNCTION, ip, &al, NULL);
+ MAP__FUNCTION, data.ip, &al, NULL);
/*
* We have to do this here as we may have a dso with no symbol hit that
* has a name longer than the ones with symbols sampled.
@@ -675,12 +672,12 @@ static int process_sample_event(event_t *event)
if (sym_list && al.sym && !strlist__has_entry(sym_list, al.sym->name))
return 0;
- if (hist_entry__add(&al, chain, period)) {
+ if (hist_entry__add(&al, data.callchain, data.period)) {
pr_debug("problem incrementing symbol count, skipping event\n");
return -1;
}
- event__stats.total += period;
+ event__stats.total += data.period;
return 0;
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 26b782f26ee1..7cca7c15b40a 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -13,7 +13,6 @@
#include "util/debug.h"
#include "util/data_map.h"
-#include <sys/types.h>
#include <sys/prctl.h>
#include <semaphore.h>
@@ -141,6 +140,7 @@ struct work_atoms {
struct thread *thread;
struct rb_node node;
u64 max_lat;
+ u64 max_lat_at;
u64 total_lat;
u64 nb_atoms;
u64 total_runtime;
@@ -414,34 +414,33 @@ static u64 get_cpu_usage_nsec_parent(void)
return sum;
}
-static u64 get_cpu_usage_nsec_self(void)
+static int self_open_counters(void)
{
- char filename [] = "/proc/1234567890/sched";
- unsigned long msecs, nsecs;
- char *line = NULL;
- u64 total = 0;
- size_t len = 0;
- ssize_t chars;
- FILE *file;
- int ret;
+ struct perf_event_attr attr;
+ int fd;
- sprintf(filename, "/proc/%d/sched", getpid());
- file = fopen(filename, "r");
- BUG_ON(!file);
+ memset(&attr, 0, sizeof(attr));
- while ((chars = getline(&line, &len, file)) != -1) {
- ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
- &msecs, &nsecs);
- if (ret == 2) {
- total = msecs*1e6 + nsecs;
- break;
- }
- }
- if (line)
- free(line);
- fclose(file);
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_TASK_CLOCK;
+
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
- return total;
+ if (fd < 0)
+ die("Error: sys_perf_event_open() syscall returned"
+ "with %d (%s)\n", fd, strerror(errno));
+ return fd;
+}
+
+static u64 get_cpu_usage_nsec_self(int fd)
+{
+ u64 runtime;
+ int ret;
+
+ ret = read(fd, &runtime, sizeof(runtime));
+ BUG_ON(ret != sizeof(runtime));
+
+ return runtime;
}
static void *thread_func(void *ctx)
@@ -450,9 +449,11 @@ static void *thread_func(void *ctx)
u64 cpu_usage_0, cpu_usage_1;
unsigned long i, ret;
char comm2[22];
+ int fd;
sprintf(comm2, ":%s", this_task->comm);
prctl(PR_SET_NAME, comm2);
+ fd = self_open_counters();
again:
ret = sem_post(&this_task->ready_for_work);
@@ -462,16 +463,15 @@ again:
ret = pthread_mutex_unlock(&start_work_mutex);
BUG_ON(ret);
- cpu_usage_0 = get_cpu_usage_nsec_self();
+ cpu_usage_0 = get_cpu_usage_nsec_self(fd);
for (i = 0; i < this_task->nr_events; i++) {
this_task->curr_event = i;
process_sched_event(this_task, this_task->atoms[i]);
}
- cpu_usage_1 = get_cpu_usage_nsec_self();
+ cpu_usage_1 = get_cpu_usage_nsec_self(fd);
this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
-
ret = sem_post(&this_task->work_done_sem);
BUG_ON(ret);
@@ -628,11 +628,6 @@ static void test_calibrations(void)
printf("the sleep test took %Ld nsecs\n", T1-T0);
}
-struct raw_event_sample {
- u32 size;
- char data[0];
-};
-
#define FILL_FIELD(ptr, field, event, data) \
ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
@@ -1019,8 +1014,10 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
delta = atom->sched_in_time - atom->wake_up_time;
atoms->total_lat += delta;
- if (delta > atoms->max_lat)
+ if (delta > atoms->max_lat) {
atoms->max_lat = delta;
+ atoms->max_lat_at = timestamp;
+ }
atoms->nb_atoms++;
}
@@ -1216,10 +1213,11 @@ static void output_lat_thread(struct work_atoms *work_list)
avg = work_list->total_lat / work_list->nb_atoms;
- printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
+ printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
(double)work_list->total_runtime / 1e6,
work_list->nb_atoms, (double)avg / 1e6,
- (double)work_list->max_lat / 1e6);
+ (double)work_list->max_lat / 1e6,
+ (double)work_list->max_lat_at / 1e9);
}
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
@@ -1356,7 +1354,7 @@ static void sort_lat(void)
static struct trace_sched_handler *trace_handler;
static void
-process_sched_wakeup_event(struct raw_event_sample *raw,
+process_sched_wakeup_event(void *data,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -1364,13 +1362,13 @@ process_sched_wakeup_event(struct raw_event_sample *raw,
{
struct trace_wakeup_event wakeup_event;
- FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
+ FILL_COMMON_FIELDS(wakeup_event, event, data);
- FILL_ARRAY(wakeup_event, comm, event, raw->data);
- FILL_FIELD(wakeup_event, pid, event, raw->data);
- FILL_FIELD(wakeup_event, prio, event, raw->data);
- FILL_FIELD(wakeup_event, success, event, raw->data);
- FILL_FIELD(wakeup_event, cpu, event, raw->data);
+ FILL_ARRAY(wakeup_event, comm, event, data);
+ FILL_FIELD(wakeup_event, pid, event, data);
+ FILL_FIELD(wakeup_event, prio, event, data);
+ FILL_FIELD(wakeup_event, success, event, data);
+ FILL_FIELD(wakeup_event, cpu, event, data);
if (trace_handler->wakeup_event)
trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
@@ -1469,7 +1467,7 @@ map_switch_event(struct trace_switch_event *switch_event,
static void
-process_sched_switch_event(struct raw_event_sample *raw,
+process_sched_switch_event(void *data,
struct event *event,
int this_cpu,
u64 timestamp __used,
@@ -1477,15 +1475,15 @@ process_sched_switch_event(struct raw_event_sample *raw,
{
struct trace_switch_event switch_event;
- FILL_COMMON_FIELDS(switch_event, event, raw->data);
+ FILL_COMMON_FIELDS(switch_event, event, data);
- FILL_ARRAY(switch_event, prev_comm, event, raw->data);
- FILL_FIELD(switch_event, prev_pid, event, raw->data);
- FILL_FIELD(switch_event, prev_prio, event, raw->data);
- FILL_FIELD(switch_event, prev_state, event, raw->data);
- FILL_ARRAY(switch_event, next_comm, event, raw->data);
- FILL_FIELD(switch_event, next_pid, event, raw->data);
- FILL_FIELD(switch_event, next_prio, event, raw->data);
+ FILL_ARRAY(switch_event, prev_comm, event, data);
+ FILL_FIELD(switch_event, prev_pid, event, data);
+ FILL_FIELD(switch_event, prev_prio, event, data);
+ FILL_FIELD(switch_event, prev_state, event, data);
+ FILL_ARRAY(switch_event, next_comm, event, data);
+ FILL_FIELD(switch_event, next_pid, event, data);
+ FILL_FIELD(switch_event, next_prio, event, data);
if (curr_pid[this_cpu] != (u32)-1) {
/*
@@ -1502,7 +1500,7 @@ process_sched_switch_event(struct raw_event_sample *raw,
}
static void
-process_sched_runtime_event(struct raw_event_sample *raw,
+process_sched_runtime_event(void *data,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -1510,17 +1508,17 @@ process_sched_runtime_event(struct raw_event_sample *raw,
{
struct trace_runtime_event runtime_event;
- FILL_ARRAY(runtime_event, comm, event, raw->data);
- FILL_FIELD(runtime_event, pid, event, raw->data);
- FILL_FIELD(runtime_event, runtime, event, raw->data);
- FILL_FIELD(runtime_event, vruntime, event, raw->data);
+ FILL_ARRAY(runtime_event, comm, event, data);
+ FILL_FIELD(runtime_event, pid, event, data);
+ FILL_FIELD(runtime_event, runtime, event, data);
+ FILL_FIELD(runtime_event, vruntime, event, data);
if (trace_handler->runtime_event)
trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
}
static void
-process_sched_fork_event(struct raw_event_sample *raw,
+process_sched_fork_event(void *data,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -1528,12 +1526,12 @@ process_sched_fork_event(struct raw_event_sample *raw,
{
struct trace_fork_event fork_event;
- FILL_COMMON_FIELDS(fork_event, event, raw->data);
+ FILL_COMMON_FIELDS(fork_event, event, data);
- FILL_ARRAY(fork_event, parent_comm, event, raw->data);
- FILL_FIELD(fork_event, parent_pid, event, raw->data);
- FILL_ARRAY(fork_event, child_comm, event, raw->data);
- FILL_FIELD(fork_event, child_pid, event, raw->data);
+ FILL_ARRAY(fork_event, parent_comm, event, data);
+ FILL_FIELD(fork_event, parent_pid, event, data);
+ FILL_ARRAY(fork_event, child_comm, event, data);
+ FILL_FIELD(fork_event, child_pid, event, data);
if (trace_handler->fork_event)
trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
@@ -1550,7 +1548,7 @@ process_sched_exit_event(struct event *event,
}
static void
-process_sched_migrate_task_event(struct raw_event_sample *raw,
+process_sched_migrate_task_event(void *data,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -1558,80 +1556,66 @@ process_sched_migrate_task_event(struct raw_event_sample *raw,
{
struct trace_migrate_task_event migrate_task_event;
- FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
+ FILL_COMMON_FIELDS(migrate_task_event, event, data);
- FILL_ARRAY(migrate_task_event, comm, event, raw->data);
- FILL_FIELD(migrate_task_event, pid, event, raw->data);
- FILL_FIELD(migrate_task_event, prio, event, raw->data);
- FILL_FIELD(migrate_task_event, cpu, event, raw->data);
+ FILL_ARRAY(migrate_task_event, comm, event, data);
+ FILL_FIELD(migrate_task_event, pid, event, data);
+ FILL_FIELD(migrate_task_event, prio, event, data);
+ FILL_FIELD(migrate_task_event, cpu, event, data);
if (trace_handler->migrate_task_event)
trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
}
static void
-process_raw_event(event_t *raw_event __used, void *more_data,
+process_raw_event(event_t *raw_event __used, void *data,
int cpu, u64 timestamp, struct thread *thread)
{
- struct raw_event_sample *raw = more_data;
struct event *event;
int type;
- type = trace_parse_common_type(raw->data);
+
+ type = trace_parse_common_type(data);
event = trace_find_event(type);
if (!strcmp(event->name, "sched_switch"))
- process_sched_switch_event(raw, event, cpu, timestamp, thread);
+ process_sched_switch_event(data, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_stat_runtime"))
- process_sched_runtime_event(raw, event, cpu, timestamp, thread);
+ process_sched_runtime_event(data, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_wakeup"))
- process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
+ process_sched_wakeup_event(data, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_wakeup_new"))
- process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
+ process_sched_wakeup_event(data, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_process_fork"))
- process_sched_fork_event(raw, event, cpu, timestamp, thread);
+ process_sched_fork_event(data, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_process_exit"))
process_sched_exit_event(event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_migrate_task"))
- process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
+ process_sched_migrate_task_event(data, event, cpu, timestamp, thread);
}
static int process_sample_event(event_t *event)
{
+ struct sample_data data;
struct thread *thread;
- u64 ip = event->ip.ip;
- u64 timestamp = -1;
- u32 cpu = -1;
- u64 period = 1;
- void *more_data = event->ip.__more_data;
if (!(sample_type & PERF_SAMPLE_RAW))
return 0;
- thread = threads__findnew(event->ip.pid);
+ memset(&data, 0, sizeof(data));
+ data.time = -1;
+ data.cpu = -1;
+ data.period = -1;
- if (sample_type & PERF_SAMPLE_TIME) {
- timestamp = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
-
- if (sample_type & PERF_SAMPLE_CPU) {
- cpu = *(u32 *)more_data;
- more_data += sizeof(u32);
- more_data += sizeof(u32); /* reserved */
- }
-
- if (sample_type & PERF_SAMPLE_PERIOD) {
- period = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ event__parse_sample(event, sample_type, &data);
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
event->header.misc,
- event->ip.pid, event->ip.tid,
- (void *)(long)ip,
- (long long)period);
+ data.pid, data.tid,
+ (void *)(long)data.ip,
+ (long long)data.period);
+ thread = threads__findnew(data.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
@@ -1640,10 +1624,10 @@ static int process_sample_event(event_t *event)
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
- if (profile_cpu != -1 && profile_cpu != (int) cpu)
+ if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
return 0;
- process_raw_event(event, more_data, cpu, timestamp, thread);
+ process_raw_event(event, data.raw_data, data.cpu, data.time, thread);
return 0;
}
@@ -1724,9 +1708,9 @@ static void __cmd_lat(void)
read_events();
sort_lat();
- printf("\n -----------------------------------------------------------------------------------------\n");
- printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
- printf(" -----------------------------------------------------------------------------------------\n");
+ printf("\n ---------------------------------------------------------------------------------------------------------------\n");
+ printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
+ printf(" ---------------------------------------------------------------------------------------------------------------\n");
next = rb_first(&sorted_atom_root);
@@ -1902,13 +1886,18 @@ static int __cmd_record(int argc, const char **argv)
int cmd_sched(int argc, const char **argv, const char *prefix __used)
{
- symbol__init(0);
-
argc = parse_options(argc, argv, sched_options, sched_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(sched_usage, sched_options);
+ /*
+ * Aliased to 'perf trace' for now:
+ */
+ if (!strcmp(argv[0], "trace"))
+ return cmd_trace(argc, argv, prefix);
+
+ symbol__init(0);
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
} else if (!strncmp(argv[0], "lat", 3)) {
@@ -1932,11 +1921,6 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used)
usage_with_options(replay_usage, replay_options);
}
__cmd_replay();
- } else if (!strcmp(argv[0], "trace")) {
- /*
- * Aliased to 'perf trace' for now:
- */
- return cmd_trace(argc, argv, prefix);
} else {
usage_with_options(sched_usage, sched_options);
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index cb58b6605fcc..f472df9561ee 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -302,12 +302,11 @@ process_exit_event(event_t *event)
}
struct trace_entry {
- u32 size;
unsigned short type;
unsigned char flags;
unsigned char preempt_count;
int pid;
- int tgid;
+ int lock_depth;
};
struct power_entry {
@@ -484,43 +483,22 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
static int
process_sample_event(event_t *event)
{
- int cursor = 0;
- u64 addr = 0;
- u64 stamp = 0;
- u32 cpu = 0;
- u32 pid = 0;
+ struct sample_data data;
struct trace_entry *te;
- if (sample_type & PERF_SAMPLE_IP)
- cursor++;
-
- if (sample_type & PERF_SAMPLE_TID) {
- pid = event->sample.array[cursor]>>32;
- cursor++;
- }
- if (sample_type & PERF_SAMPLE_TIME) {
- stamp = event->sample.array[cursor++];
+ memset(&data, 0, sizeof(data));
- if (!first_time || first_time > stamp)
- first_time = stamp;
- if (last_time < stamp)
- last_time = stamp;
+ event__parse_sample(event, sample_type, &data);
+ if (sample_type & PERF_SAMPLE_TIME) {
+ if (!first_time || first_time > data.time)
+ first_time = data.time;
+ if (last_time < data.time)
+ last_time = data.time;
}
- if (sample_type & PERF_SAMPLE_ADDR)
- addr = event->sample.array[cursor++];
- if (sample_type & PERF_SAMPLE_ID)
- cursor++;
- if (sample_type & PERF_SAMPLE_STREAM_ID)
- cursor++;
- if (sample_type & PERF_SAMPLE_CPU)
- cpu = event->sample.array[cursor++] & 0xFFFFFFFF;
- if (sample_type & PERF_SAMPLE_PERIOD)
- cursor++;
-
- te = (void *)&event->sample.array[cursor];
- if (sample_type & PERF_SAMPLE_RAW && te->size > 0) {
+ te = (void *)data.raw_data;
+ if (sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) {
char *event_str;
struct power_entry *pe;
@@ -532,19 +510,19 @@ process_sample_event(event_t *event)
return 0;
if (strcmp(event_str, "power:power_start") == 0)
- c_state_start(cpu, stamp, pe->value);
+ c_state_start(data.cpu, data.time, pe->value);
if (strcmp(event_str, "power:power_end") == 0)
- c_state_end(cpu, stamp);
+ c_state_end(data.cpu, data.time);
if (strcmp(event_str, "power:power_frequency") == 0)
- p_state_change(cpu, stamp, pe->value);
+ p_state_change(data.cpu, data.time, pe->value);
if (strcmp(event_str, "sched:sched_wakeup") == 0)
- sched_wakeup(cpu, stamp, pid, te);
+ sched_wakeup(data.cpu, data.time, data.pid, te);
if (strcmp(event_str, "sched:sched_switch") == 0)
- sched_switch(cpu, stamp, te);
+ sched_switch(data.cpu, data.time, te);
}
return 0;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index abb914aa7be6..c2fcc34486f5 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -66,58 +66,40 @@ static u64 sample_type;
static int process_sample_event(event_t *event)
{
- u64 ip = event->ip.ip;
- u64 timestamp = -1;
- u32 cpu = -1;
- u64 period = 1;
- void *more_data = event->ip.__more_data;
- struct thread *thread = threads__findnew(event->ip.pid);
-
- if (sample_type & PERF_SAMPLE_TIME) {
- timestamp = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ struct sample_data data;
+ struct thread *thread;
- if (sample_type & PERF_SAMPLE_CPU) {
- cpu = *(u32 *)more_data;
- more_data += sizeof(u32);
- more_data += sizeof(u32); /* reserved */
- }
+ memset(&data, 0, sizeof(data));
+ data.time = -1;
+ data.cpu = -1;
+ data.period = 1;
- if (sample_type & PERF_SAMPLE_PERIOD) {
- period = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ event__parse_sample(event, sample_type, &data);
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
event->header.misc,
- event->ip.pid, event->ip.tid,
- (void *)(long)ip,
- (long long)period);
+ data.pid, data.tid,
+ (void *)(long)data.ip,
+ (long long)data.period);
+ thread = threads__findnew(event->ip.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
if (sample_type & PERF_SAMPLE_RAW) {
- struct {
- u32 size;
- char data[0];
- } *raw = more_data;
-
/*
* FIXME: better resolve from pid from the struct trace_entry
* field, although it should be the same than this perf
* event pid
*/
- scripting_ops->process_event(cpu, raw->data, raw->size,
- timestamp, thread->comm);
+ scripting_ops->process_event(data.cpu, data.raw_data,
+ data.raw_size,
+ data.time, thread->comm);
}
- event__stats.total += period;
+ event__stats.total += data.period;
return 0;
}
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c
index ca0bedf637c2..59b65d0bd7c1 100644
--- a/tools/perf/util/data_map.c
+++ b/tools/perf/util/data_map.c
@@ -100,11 +100,11 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
}
}
-int perf_header__read_build_ids(int input, off_t offset, off_t size)
+int perf_header__read_build_ids(int input, u64 offset, u64 size)
{
struct build_id_event bev;
char filename[PATH_MAX];
- off_t limit = offset + size;
+ u64 limit = offset + size;
int err = -1;
while (offset < limit) {
diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h
index 3180ff7e3633..258a87bcc4fb 100644
--- a/tools/perf/util/data_map.h
+++ b/tools/perf/util/data_map.h
@@ -27,6 +27,6 @@ int mmap_dispatch_perf_file(struct perf_header **pheader,
int full_paths,
int *cwdlen,
char **cwd);
-int perf_header__read_build_ids(int input, off_t offset, off_t file_size);
+int perf_header__read_build_ids(int input, u64 offset, u64 file_size);
#endif
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 414b89d1bde9..4dcecafa85dc 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -310,3 +310,70 @@ int event__preprocess_sample(const event_t *self, struct addr_location *al,
al->level == 'H' ? "[hypervisor]" : "<not found>");
return 0;
}
+
+int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
+{
+ u64 *array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IP) {
+ data->ip = event->ip.ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u32 *p = (u32 *)array;
+ data->pid = p[0];
+ data->tid = p[1];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ data->time = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ data->addr = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ data->id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ data->stream_id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u32 *p = (u32 *)array;
+ data->cpu = *p;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ data->period = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ data->callchain = (struct ip_callchain *)array;
+ array += 1 + data->callchain->nr;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u32 *p = (u32 *)array;
+ data->raw_size = *p;
+ p++;
+ data->raw_data = p;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index a4cc8105cf67..c7a78eef8e52 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -56,11 +56,25 @@ struct read_event {
u64 id;
};
-struct sample_event{
+struct sample_event {
struct perf_event_header header;
u64 array[];
};
+struct sample_data {
+ u64 ip;
+ u32 pid, tid;
+ u64 time;
+ u64 addr;
+ u64 id;
+ u64 stream_id;
+ u32 cpu;
+ u64 period;
+ struct ip_callchain *callchain;
+ u32 raw_size;
+ void *raw_data;
+};
+
#define BUILD_ID_SIZE 20
struct build_id_event {
@@ -155,5 +169,6 @@ int event__process_task(event_t *self);
struct addr_location;
int event__preprocess_sample(const event_t *self, struct addr_location *al,
symbol_filter_t filter);
+int event__parse_sample(event_t *event, u64 type, struct sample_data *data);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 4805e6dfd23c..59a9c0b3033e 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -187,7 +187,9 @@ static int do_write(int fd, const void *buf, size_t size)
static int __dsos__write_buildid_table(struct list_head *head, int fd)
{
+#define NAME_ALIGN 64
struct dso *pos;
+ static const char zero_buf[NAME_ALIGN];
list_for_each_entry(pos, head, node) {
int err;
@@ -197,14 +199,17 @@ static int __dsos__write_buildid_table(struct list_head *head, int fd)
if (!pos->has_build_id)
continue;
len = pos->long_name_len + 1;
- len = ALIGN(len, 64);
+ len = ALIGN(len, NAME_ALIGN);
memset(&b, 0, sizeof(b));
memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
b.header.size = sizeof(b) + len;
err = do_write(fd, &b, sizeof(b));
if (err < 0)
return err;
- err = do_write(fd, pos->long_name, len);
+ err = do_write(fd, pos->long_name, pos->long_name_len + 1);
+ if (err < 0)
+ return err;
+ err = do_write(fd, zero_buf, len - pos->long_name_len - 1);
if (err < 0)
return err;
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 9e5dbd66d34d..e5bc0fb016b2 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -197,7 +197,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
if (id == config) {
closedir(evt_dir);
closedir(sys_dir);
- path = zalloc(sizeof(path));
+ path = zalloc(sizeof(*path));
path->system = malloc(MAX_EVENT_LENGTH);
if (!path->system) {
free(path);
@@ -467,7 +467,6 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags)
while ((evt_ent = readdir(evt_dir))) {
char event_opt[MAX_EVOPT_LEN + 1];
int len;
- unsigned int rem = MAX_EVOPT_LEN;
if (!strcmp(evt_ent->d_name, ".")
|| !strcmp(evt_ent->d_name, "..")
@@ -475,20 +474,12 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags)
|| !strcmp(evt_ent->d_name, "filter"))
continue;
- len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name,
- evt_ent->d_name);
+ len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
+ evt_ent->d_name, flags ? ":" : "",
+ flags ?: "");
if (len < 0)
return EVT_FAILED;
- rem -= len;
- if (flags) {
- if (rem < strlen(flags) + 1)
- return EVT_FAILED;
-
- strcat(event_opt, ":");
- strcat(event_opt, flags);
- }
-
if (parse_events(NULL, event_opt, 0))
return EVT_FAILED;
}
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 6d8af48c925e..efebd5b476b3 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -430,6 +430,9 @@ int usage_with_options_internal(const char * const *usagestr,
pos = fprintf(stderr, " ");
if (opts->short_name)
pos += fprintf(stderr, "-%c", opts->short_name);
+ else
+ pos += fprintf(stderr, " ");
+
if (opts->long_name && opts->short_name)
pos += fprintf(stderr, ", ");
if (opts->long_name)
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index cd7fbda5e2a5..d14a4585bcaf 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -48,6 +48,9 @@
/* If there is no space to write, returns -E2BIG. */
static int e_snprintf(char *str, size_t size, const char *format, ...)
+ __attribute__((format(printf, 3, 4)));
+
+static int e_snprintf(char *str, size_t size, const char *format, ...)
{
int ret;
va_list ap;
@@ -258,7 +261,7 @@ int synthesize_perf_probe_event(struct probe_point *pp)
ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->function,
offs, pp->retprobe ? "%return" : "", line);
else
- ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->file, line);
+ ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", pp->file, line);
if (ret <= 0)
goto error;
len = ret;
@@ -373,14 +376,32 @@ static void clear_probe_point(struct probe_point *pp)
free(pp->args);
for (i = 0; i < pp->found; i++)
free(pp->probes[i]);
- memset(pp, 0, sizeof(pp));
+ memset(pp, 0, sizeof(*pp));
+}
+
+/* Show an event */
+static void show_perf_probe_event(const char *group, const char *event,
+ const char *place, struct probe_point *pp)
+{
+ int i;
+ char buf[128];
+
+ e_snprintf(buf, 128, "%s:%s", group, event);
+ printf(" %-40s (on %s", buf, place);
+
+ if (pp->nr_args > 0) {
+ printf(" with");
+ for (i = 0; i < pp->nr_args; i++)
+ printf(" %s", pp->args[i]);
+ }
+ printf(")\n");
}
/* List up current perf-probe events */
void show_perf_probe_events(void)
{
unsigned int i;
- int fd;
+ int fd, nr;
char *group, *event;
struct probe_point pp;
struct strlist *rawlist;
@@ -393,8 +414,13 @@ void show_perf_probe_events(void)
for (i = 0; i < strlist__nr_entries(rawlist); i++) {
ent = strlist__entry(rawlist, i);
parse_trace_kprobe_event(ent->s, &group, &event, &pp);
+ /* Synthesize only event probe point */
+ nr = pp.nr_args;
+ pp.nr_args = 0;
synthesize_perf_probe_event(&pp);
- printf("[%s:%s]\t%s\n", group, event, pp.probes[0]);
+ pp.nr_args = nr;
+ /* Show an event */
+ show_perf_probe_event(group, event, pp.probes[0], &pp);
free(group);
free(event);
clear_probe_point(&pp);
@@ -404,21 +430,28 @@ void show_perf_probe_events(void)
}
/* Get current perf-probe event names */
-static struct strlist *get_perf_event_names(int fd)
+static struct strlist *get_perf_event_names(int fd, bool include_group)
{
unsigned int i;
char *group, *event;
+ char buf[128];
struct strlist *sl, *rawlist;
struct str_node *ent;
rawlist = get_trace_kprobe_event_rawlist(fd);
- sl = strlist__new(false, NULL);
+ sl = strlist__new(true, NULL);
for (i = 0; i < strlist__nr_entries(rawlist); i++) {
ent = strlist__entry(rawlist, i);
parse_trace_kprobe_event(ent->s, &group, &event, NULL);
- strlist__add(sl, event);
+ if (include_group) {
+ if (e_snprintf(buf, 128, "%s:%s", group, event) < 0)
+ die("Failed to copy group:event name.");
+ strlist__add(sl, buf);
+ } else
+ strlist__add(sl, event);
free(group);
+ free(event);
}
strlist__delete(rawlist);
@@ -426,24 +459,30 @@ static struct strlist *get_perf_event_names(int fd)
return sl;
}
-static int write_trace_kprobe_event(int fd, const char *buf)
+static void write_trace_kprobe_event(int fd, const char *buf)
{
int ret;
+ pr_debug("Writing event: %s\n", buf);
ret = write(fd, buf, strlen(buf));
if (ret <= 0)
- die("Failed to create event.");
- else
- printf("Added new event: %s\n", buf);
-
- return ret;
+ die("Failed to write event: %s", strerror(errno));
}
static void get_new_event_name(char *buf, size_t len, const char *base,
struct strlist *namelist)
{
int i, ret;
- for (i = 0; i < MAX_EVENT_INDEX; i++) {
+
+ /* Try no suffix */
+ ret = e_snprintf(buf, len, "%s", base);
+ if (ret < 0)
+ die("snprintf() failed: %s", strerror(-ret));
+ if (!strlist__has_entry(namelist, buf))
+ return;
+
+ /* Try to add suffix */
+ for (i = 1; i < MAX_EVENT_INDEX; i++) {
ret = e_snprintf(buf, len, "%s_%d", base, i);
if (ret < 0)
die("snprintf() failed: %s", strerror(-ret));
@@ -464,7 +503,7 @@ void add_trace_kprobe_events(struct probe_point *probes, int nr_probes)
fd = open_kprobe_events(O_RDWR, O_APPEND);
/* Get current event names */
- namelist = get_perf_event_names(fd);
+ namelist = get_perf_event_names(fd, false);
for (j = 0; j < nr_probes; j++) {
pp = probes + j;
@@ -476,9 +515,73 @@ void add_trace_kprobe_events(struct probe_point *probes, int nr_probes)
PERFPROBE_GROUP, event,
pp->probes[i]);
write_trace_kprobe_event(fd, buf);
+ printf("Added new event:\n");
+ /* Get the first parameter (probe-point) */
+ sscanf(pp->probes[i], "%s", buf);
+ show_perf_probe_event(PERFPROBE_GROUP, event,
+ buf, pp);
/* Add added event name to namelist */
strlist__add(namelist, event);
}
}
+ /* Show how to use the event. */
+ printf("\nYou can now use it on all perf tools, such as:\n\n");
+ printf("\tperf record -e %s:%s -a sleep 1\n\n", PERFPROBE_GROUP, event);
+
+ strlist__delete(namelist);
+ close(fd);
+}
+
+static void del_trace_kprobe_event(int fd, const char *group,
+ const char *event, struct strlist *namelist)
+{
+ char buf[128];
+
+ if (e_snprintf(buf, 128, "%s:%s", group, event) < 0)
+ die("Failed to copy event.");
+ if (!strlist__has_entry(namelist, buf)) {
+ pr_warning("Warning: event \"%s\" is not found.\n", buf);
+ return;
+ }
+ /* Convert from perf-probe event to trace-kprobe event */
+ if (e_snprintf(buf, 128, "-:%s/%s", group, event) < 0)
+ die("Failed to copy event.");
+
+ write_trace_kprobe_event(fd, buf);
+ printf("Remove event: %s:%s\n", group, event);
+}
+
+void del_trace_kprobe_events(struct strlist *dellist)
+{
+ int fd;
+ unsigned int i;
+ const char *group, *event;
+ char *p, *str;
+ struct str_node *ent;
+ struct strlist *namelist;
+
+ fd = open_kprobe_events(O_RDWR, O_APPEND);
+ /* Get current event names */
+ namelist = get_perf_event_names(fd, true);
+
+ for (i = 0; i < strlist__nr_entries(dellist); i++) {
+ ent = strlist__entry(dellist, i);
+ str = strdup(ent->s);
+ if (!str)
+ die("Failed to copy event.");
+ p = strchr(str, ':');
+ if (p) {
+ group = str;
+ *p = '\0';
+ event = p + 1;
+ } else {
+ group = PERFPROBE_GROUP;
+ event = str;
+ }
+ del_trace_kprobe_event(fd, group, event, namelist);
+ free(str);
+ }
+ strlist__delete(namelist);
close(fd);
}
+
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 0c6fe56fe38a..f752159124ae 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -10,6 +10,7 @@ extern void parse_trace_kprobe_event(const char *str, char **group,
char **event, struct probe_point *pp);
extern int synthesize_trace_kprobe_event(struct probe_point *pp);
extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes);
+extern void del_trace_kprobe_events(struct strlist *dellist);
extern void show_perf_probe_events(void);
/* Maximum index number of event-name postfix */
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 293cdfc1b8ca..4585f1d86792 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -106,7 +106,7 @@ static int strtailcmp(const char *s1, const char *s2)
{
int i1 = strlen(s1);
int i2 = strlen(s2);
- while (--i1 > 0 && --i2 > 0) {
+ while (--i1 >= 0 && --i2 >= 0) {
if (s1[i1] != s2[i2])
return s1[i1] - s2[i2];
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index fffcb937cdcb..e7508ad3450f 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -938,8 +938,9 @@ static bool __dsos__read_build_ids(struct list_head *head)
bool dsos__read_build_ids(void)
{
- return __dsos__read_build_ids(&dsos__kernel) ||
- __dsos__read_build_ids(&dsos__user);
+ bool kbuildids = __dsos__read_build_ids(&dsos__kernel),
+ ubuildids = __dsos__read_build_ids(&dsos__user);
+ return kbuildids || ubuildids;
}
/*
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 0302405aa2ca..c5c32be040bf 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -177,7 +177,7 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused)
func_count++;
}
- func_list = malloc_or_die(sizeof(*func_list) * func_count + 1);
+ func_list = malloc_or_die(sizeof(*func_list) * (func_count + 1));
i = 0;
while (list) {
@@ -1477,7 +1477,7 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok)
goto out_free;
field = malloc_or_die(sizeof(*field));
- memset(field, 0, sizeof(field));
+ memset(field, 0, sizeof(*field));
value = arg_eval(arg);
field->value = strdup(value);
diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/trace-event-perl.c
index 51e833fd58c3..a5ffe60db5d6 100644
--- a/tools/perf/util/trace-event-perl.c
+++ b/tools/perf/util/trace-event-perl.c
@@ -32,9 +32,6 @@
void xs_init(pTHX);
-void boot_Perf__Trace__Context(pTHX_ CV *cv);
-void boot_DynaLoader(pTHX_ CV *cv);
-
void xs_init(pTHX)
{
const char *file = __FILE__;
@@ -573,26 +570,72 @@ struct scripting_ops perl_scripting_ops = {
.generate_script = perl_generate_script,
};
-#ifdef NO_LIBPERL
-void setup_perl_scripting(void)
+static void print_unsupported_msg(void)
{
fprintf(stderr, "Perl scripting not supported."
- " Install libperl and rebuild perf to enable it. e.g. "
- "apt-get install libperl-dev (ubuntu), yum install "
- "perl-ExtUtils-Embed (Fedora), etc.\n");
+ " Install libperl and rebuild perf to enable it.\n"
+ "For example:\n # apt-get install libperl-dev (ubuntu)"
+ "\n # yum install perl-ExtUtils-Embed (Fedora)"
+ "\n etc.\n");
}
-#else
-void setup_perl_scripting(void)
+
+static int perl_start_script_unsupported(const char *script __unused)
+{
+ print_unsupported_msg();
+
+ return -1;
+}
+
+static int perl_stop_script_unsupported(void)
+{
+ return 0;
+}
+
+static void perl_process_event_unsupported(int cpu __unused,
+ void *data __unused,
+ int size __unused,
+ unsigned long long nsecs __unused,
+ char *comm __unused)
+{
+}
+
+static int perl_generate_script_unsupported(const char *outfile __unused)
+{
+ print_unsupported_msg();
+
+ return -1;
+}
+
+struct scripting_ops perl_scripting_unsupported_ops = {
+ .name = "Perl",
+ .start_script = perl_start_script_unsupported,
+ .stop_script = perl_stop_script_unsupported,
+ .process_event = perl_process_event_unsupported,
+ .generate_script = perl_generate_script_unsupported,
+};
+
+static void register_perl_scripting(struct scripting_ops *scripting_ops)
{
int err;
- err = script_spec_register("Perl", &perl_scripting_ops);
+ err = script_spec_register("Perl", scripting_ops);
if (err)
die("error registering Perl script extension");
- err = script_spec_register("pl", &perl_scripting_ops);
+ err = script_spec_register("pl", scripting_ops);
if (err)
die("error registering pl script extension");
scripting_context = malloc(sizeof(struct scripting_context));
}
+
+#ifdef NO_LIBPERL
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_unsupported_ops);
+}
+#else
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_ops);
+}
#endif
diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h
index 8fe0d866fe1a..e88fb26137bb 100644
--- a/tools/perf/util/trace-event-perl.h
+++ b/tools/perf/util/trace-event-perl.h
@@ -34,9 +34,13 @@ typedef int INTERP;
#define dXSUB_SYS
#define pTHX_
static inline void newXS(const char *a, void *b, const char *c) {}
+static void boot_Perf__Trace__Context(pTHX_ CV *cv) {}
+static void boot_DynaLoader(pTHX_ CV *cv) {}
#else
#include <EXTERN.h>
#include <perl.h>
+void boot_Perf__Trace__Context(pTHX_ CV *cv);
+void boot_DynaLoader(pTHX_ CV *cv);
typedef PerlInterpreter * INTERP;
#endif
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 342dfdd43f87..1744422cafcb 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -145,8 +145,9 @@ static void read_proc_kallsyms(void)
if (!size)
return;
- buf = malloc_or_die(size);
+ buf = malloc_or_die(size + 1);
read_or_die(buf, size);
+ buf[size] = '\0';
parse_proc_kallsyms(buf, size);