diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-05-22 11:39:08 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-22 11:39:08 +0200 |
commit | a03b1e1c372b60183b8141cdd161316429fab5ac (patch) | |
tree | d0db097668940dce698fec8b00d228cd29c1dee0 /kernel | |
parent | 8e02ae573e6ae86930d08662790827a938203e70 (diff) | |
parent | b02ef20a9fba08948e643d3eec0efadf1da01a44 (diff) |
Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc into perf/uprobes
Pull uprobes fixes and changes from Oleg Nesterov:
" Denys found another nasty old bug in uprobes/x86: div, mul, shifts with
count in CL, and cmpxchg are not handled correctly.
Plus a couple of other minor fixes. Nobody acked the changes in x86/traps,
hopefully they are simple enough, and I believe that they make sense anyway
and allow to do more cleanups."
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/uprobes.c | 33 |
1 files changed, 21 insertions, 12 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 7716c40f2c50..3b02c72938a8 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -279,18 +279,13 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t * supported by that architecture then we need to modify is_trap_at_addr and * uprobe_write_opcode accordingly. This would never be a problem for archs * that have fixed length instructions. - */ - -/* + * * uprobe_write_opcode - write the opcode at a given virtual address. * @mm: the probed process address space. * @vaddr: the virtual address to store the opcode. * @opcode: opcode to be written at @vaddr. * - * Called with mm->mmap_sem held (for read and with a reference to - * mm). - * - * For mm @mm, write the opcode at @vaddr. + * Called with mm->mmap_sem held for write. * Return 0 (success) or a negative errno. */ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, @@ -310,21 +305,25 @@ retry: if (ret <= 0) goto put_old; + ret = anon_vma_prepare(vma); + if (ret) + goto put_old; + ret = -ENOMEM; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (!new_page) goto put_old; - __SetPageUptodate(new_page); + if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)) + goto put_new; + __SetPageUptodate(new_page); copy_highpage(new_page, old_page); copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); - ret = anon_vma_prepare(vma); - if (ret) - goto put_new; - ret = __replace_page(vma, vaddr, old_page, new_page); + if (ret) + mem_cgroup_uncharge_page(new_page); put_new: page_cache_release(new_page); @@ -1352,6 +1351,16 @@ unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; } +unsigned long uprobe_get_trap_addr(struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + if (unlikely(utask && utask->active_uprobe)) + return utask->vaddr; + + return instruction_pointer(regs); +} + /* * Called with no locks held. * Called in context of a exiting or a exec-ing thread. |