diff options
Diffstat (limited to 'kernel/events')
| -rw-r--r-- | kernel/events/callchain.c | 43 | ||||
| -rw-r--r-- | kernel/events/uprobes.c | 33 |
2 files changed, 68 insertions, 8 deletions
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index ad57944b6c40..8d57255e5b29 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -11,6 +11,7 @@ #include <linux/perf_event.h> #include <linux/slab.h> #include <linux/sched/task_stack.h> +#include <linux/uprobes.h> #include "internal.h" @@ -176,13 +177,51 @@ put_callchain_entry(int rctx) put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); } +static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entry, + int start_entry_idx) +{ +#ifdef CONFIG_UPROBES + struct uprobe_task *utask = current->utask; + struct return_instance *ri; + __u64 *cur_ip, *last_ip, tramp_addr; + + if (likely(!utask || !utask->return_instances)) + return; + + cur_ip = &entry->ip[start_entry_idx]; + last_ip = &entry->ip[entry->nr - 1]; + ri = utask->return_instances; + tramp_addr = uprobe_get_trampoline_vaddr(); + + /* + * If there are pending uretprobes for the current thread, they are + * recorded in a list inside utask->return_instances; each such + * pending uretprobe replaces traced user function's return address on + * the stack, so when stack trace is captured, instead of seeing + * actual function's return address, we'll have one or many uretprobe + * trampoline addresses in the stack trace, which are not helpful and + * misleading to users. + * So here we go over the pending list of uretprobes, and each + * encountered trampoline address is replaced with actual return + * address. + */ + while (ri && cur_ip <= last_ip) { + if (*cur_ip == tramp_addr) { + *cur_ip = ri->orig_ret_vaddr; + ri = ri->next; + } + cur_ip++; + } +#endif +} + struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark) { struct perf_callchain_entry *entry; struct perf_callchain_entry_ctx ctx; - int rctx; + int rctx, start_entry_idx; entry = get_callchain_entry(&rctx); if (!entry) @@ -215,7 +254,9 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, if (add_mark) perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); + start_entry_idx = entry->nr; perf_callchain_user(&ctx, regs); + fixup_uretprobe_trampoline_entries(entry, start_entry_idx); } } diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2c83ba776fc7..99be2adedbc0 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1474,11 +1474,20 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) return ret; } +void * __weak arch_uprobe_trampoline(unsigned long *psize) +{ + static uprobe_opcode_t insn = UPROBE_SWBP_INSN; + + *psize = UPROBE_SWBP_INSN_SIZE; + return &insn; +} + static struct xol_area *__create_xol_area(unsigned long vaddr) { struct mm_struct *mm = current->mm; - uprobe_opcode_t insn = UPROBE_SWBP_INSN; + unsigned long insns_size; struct xol_area *area; + void *insns; area = kmalloc(sizeof(*area), GFP_KERNEL); if (unlikely(!area)) @@ -1502,7 +1511,8 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) /* Reserve the 1st slot for get_trampoline_vaddr() */ set_bit(0, area->bitmap); atomic_set(&area->slot_count, 1); - arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); + insns = arch_uprobe_trampoline(&insns_size); + arch_uprobe_copy_ixol(area->pages[0], 0, insns, insns_size); if (!xol_add_vma(mm, area)) return area; @@ -1827,7 +1837,7 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags) * * Returns -1 in case the xol_area is not allocated. */ -static unsigned long get_trampoline_vaddr(void) +unsigned long uprobe_get_trampoline_vaddr(void) { struct xol_area *area; unsigned long trampoline_vaddr = -1; @@ -1878,7 +1888,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) if (!ri) return; - trampoline_vaddr = get_trampoline_vaddr(); + trampoline_vaddr = uprobe_get_trampoline_vaddr(); orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); if (orig_ret_vaddr == -1) goto fail; @@ -2123,7 +2133,7 @@ static struct return_instance *find_next_ret_chain(struct return_instance *ri) return ri; } -static void handle_trampoline(struct pt_regs *regs) +void uprobe_handle_trampoline(struct pt_regs *regs) { struct uprobe_task *utask; struct return_instance *ri, *next; @@ -2149,6 +2159,15 @@ static void handle_trampoline(struct pt_regs *regs) instruction_pointer_set(regs, ri->orig_ret_vaddr); do { + /* pop current instance from the stack of pending return instances, + * as it's not pending anymore: we just fixed up original + * instruction pointer in regs and are about to call handlers; + * this allows fixup_uretprobe_trampoline_entries() to properly fix up + * captured stack traces from uretprobe handlers, in which pending + * trampoline addresses on the stack are replaced with correct + * original return addresses + */ + utask->return_instances = ri->next; if (valid) handle_uretprobe_chain(ri, regs); ri = free_ret_instance(ri); @@ -2187,8 +2206,8 @@ static void handle_swbp(struct pt_regs *regs) int is_swbp; bp_vaddr = uprobe_get_swbp_addr(regs); - if (bp_vaddr == get_trampoline_vaddr()) - return handle_trampoline(regs); + if (bp_vaddr == uprobe_get_trampoline_vaddr()) + return uprobe_handle_trampoline(regs); uprobe = find_active_uprobe(bp_vaddr, &is_swbp); if (!uprobe) { |
