summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c29
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace_events_hist.c9
-rw-r--r--kernel/trace/trace_events_synth.c8
-rw-r--r--kernel/trace/trace_functions_graph.c2
5 files changed, 36 insertions, 20 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ef2d5dca6f70..aa758efc3731 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1148,7 +1148,6 @@ struct ftrace_page {
};
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
-#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
static struct ftrace_page *ftrace_pages_start;
static struct ftrace_page *ftrace_pages;
@@ -3834,7 +3833,8 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
return 0;
}
-static int ftrace_allocate_records(struct ftrace_page *pg, int count)
+static int ftrace_allocate_records(struct ftrace_page *pg, int count,
+ unsigned long *num_pages)
{
int order;
int pages;
@@ -3844,7 +3844,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
return -EINVAL;
/* We want to fill as much as possible, with no empty pages */
- pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
+ pages = DIV_ROUND_UP(count * ENTRY_SIZE, PAGE_SIZE);
order = fls(pages) - 1;
again:
@@ -3859,6 +3859,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
}
ftrace_number_of_pages += 1 << order;
+ *num_pages += 1 << order;
ftrace_number_of_groups++;
cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
@@ -3887,12 +3888,14 @@ static void ftrace_free_pages(struct ftrace_page *pages)
}
static struct ftrace_page *
-ftrace_allocate_pages(unsigned long num_to_init)
+ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages)
{
struct ftrace_page *start_pg;
struct ftrace_page *pg;
int cnt;
+ *num_pages = 0;
+
if (!num_to_init)
return NULL;
@@ -3906,7 +3909,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
* waste as little space as possible.
*/
for (;;) {
- cnt = ftrace_allocate_records(pg, num_to_init);
+ cnt = ftrace_allocate_records(pg, num_to_init, num_pages);
if (cnt < 0)
goto free_pages;
@@ -7192,8 +7195,6 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
- pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
-
/*
* Sorting mcount in vmlinux at build time depend on
* CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
@@ -7206,7 +7207,7 @@ static int ftrace_process_locs(struct module *mod,
test_is_sorted(start, count);
}
- start_pg = ftrace_allocate_pages(count);
+ start_pg = ftrace_allocate_pages(count, &pages);
if (!start_pg)
return -ENOMEM;
@@ -7305,27 +7306,27 @@ static int ftrace_process_locs(struct module *mod,
/* We should have used all pages unless we skipped some */
if (pg_unuse) {
unsigned long pg_remaining, remaining = 0;
- unsigned long skip;
+ long skip;
/* Count the number of entries unused and compare it to skipped. */
- pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index;
+ pg_remaining = (PAGE_SIZE << pg->order) / ENTRY_SIZE - pg->index;
if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) {
skip = skipped - pg_remaining;
- for (pg = pg_unuse; pg; pg = pg->next)
+ for (pg = pg_unuse; pg && skip > 0; pg = pg->next) {
remaining += 1 << pg->order;
+ skip -= (PAGE_SIZE << pg->order) / ENTRY_SIZE;
+ }
pages -= remaining;
- skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE);
-
/*
* Check to see if the number of pages remaining would
* just fit the number of entries skipped.
*/
- WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped",
+ WARN(pg || skip > 0, "Extra allocated pages for ftrace: %lu with %lu skipped",
remaining, skipped);
}
/* Need to synchronize with ftrace_location_range() */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index baec63134ab6..8bd4ec08fb36 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6115,10 +6115,10 @@ static int cmp_mod_entry(const void *key, const void *pivot)
unsigned long addr = (unsigned long)key;
const struct trace_mod_entry *ent = pivot;
- if (addr >= ent[0].mod_addr && addr < ent[1].mod_addr)
- return 0;
- else
- return addr - ent->mod_addr;
+ if (addr < ent[0].mod_addr)
+ return -1;
+
+ return addr >= ent[1].mod_addr;
}
/**
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 5e6e70540eef..c97bb2fda5c0 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2057,6 +2057,15 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
hist_field->fn_num = HIST_FIELD_FN_RELDYNSTRING;
else
hist_field->fn_num = HIST_FIELD_FN_PSTRING;
+ } else if (field->filter_type == FILTER_STACKTRACE) {
+ flags |= HIST_FIELD_FL_STACKTRACE;
+
+ hist_field->size = MAX_FILTER_STR_VAL;
+ hist_field->type = kstrdup_const(field->type, GFP_KERNEL);
+ if (!hist_field->type)
+ goto free;
+
+ hist_field->fn_num = HIST_FIELD_FN_STACK;
} else {
hist_field->size = field->size;
hist_field->is_signed = field->is_signed;
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 4554c458b78c..45c187e77e21 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -130,7 +130,9 @@ static int synth_event_define_fields(struct trace_event_call *call)
struct synth_event *event = call->data;
unsigned int i, size, n_u64;
char *name, *type;
+ int filter_type;
bool is_signed;
+ bool is_stack;
int ret = 0;
for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
@@ -138,8 +140,12 @@ static int synth_event_define_fields(struct trace_event_call *call)
is_signed = event->fields[i]->is_signed;
type = event->fields[i]->type;
name = event->fields[i]->name;
+ is_stack = event->fields[i]->is_stack;
+
+ filter_type = is_stack ? FILTER_STACKTRACE : FILTER_OTHER;
+
ret = trace_define_field(call, type, name, offset, size,
- is_signed, FILTER_OTHER);
+ is_signed, filter_type);
if (ret)
break;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b1e9c9913309..1de6f1573621 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -901,7 +901,7 @@ static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entr
trace_seq_printf(s, "%ps", func);
if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
- print_function_args(s, entry->args, (unsigned long)func);
+ print_function_args(s, FGRAPH_ENTRY_ARGS(entry), (unsigned long)func);
trace_seq_putc(s, ';');
} else
trace_seq_puts(s, "();");