From ee1b58d36aa1b5a79eaba11f5c3633c88231da83 Mon Sep 17 00:00:00 2001 From: Qiaowei Ren Date: Fri, 14 Nov 2014 07:18:19 -0800 Subject: mpx: Extend siginfo structure to include bound violation information This patch adds new fields about bound violation into siginfo structure. si_lower and si_upper are respectively lower bound and upper bound when bound violation is caused. Signed-off-by: Qiaowei Ren Signed-off-by: Dave Hansen Cc: linux-mm@kvack.org Cc: linux-mips@linux-mips.org Cc: Dave Hansen Link: http://lkml.kernel.org/r/20141114151819.1908C900@viggo.jf.intel.com Signed-off-by: Thomas Gleixner --- include/uapi/asm-generic/siginfo.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index ba5be7fdbdfe..1e3552037a5a 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -91,6 +91,10 @@ typedef struct siginfo { int _trapno; /* TRAP # which caused the signal */ #endif short _addr_lsb; /* LSB of the reported address */ + struct { + void __user *_lower; + void __user *_upper; + } _addr_bnd; } _sigfault; /* SIGPOLL */ @@ -131,6 +135,8 @@ typedef struct siginfo { #define si_trapno _sifields._sigfault._trapno #endif #define si_addr_lsb _sifields._sigfault._addr_lsb +#define si_lower _sifields._sigfault._addr_bnd._lower +#define si_upper _sifields._sigfault._addr_bnd._upper #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd #ifdef __ARCH_SIGSYS @@ -199,7 +205,8 @@ typedef struct siginfo { */ #define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */ #define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */ -#define NSIGSEGV 2 +#define SEGV_BNDERR (__SI_FAULT|3) /* failed address bound checks */ +#define NSIGSEGV 3 /* * SIGBUS si_codes -- cgit v1.2.3 From 4aae7e436fa51faf4bf5d11b175aea82cfe8224a Mon Sep 17 00:00:00 2001 From: Qiaowei Ren Date: Fri, 14 Nov 2014 07:18:25 -0800 Subject: x86, mpx: Introduce VM_MPX to indicate that a VMA is MPX specific MPX-enabled applications using large swaths of memory can potentially have large numbers of bounds tables in process address space to save bounds information. These tables can take up huge swaths of memory (as much as 80% of the memory on the system) even if we clean them up aggressively. In the worst-case scenario, the tables can be 4x the size of the data structure being tracked. IOW, a 1-page structure can require 4 bounds-table pages. Being this huge, our expectation is that folks using MPX are going to be keen on figuring out how much memory is being dedicated to it. So we need a way to track memory use for MPX. If we want to specifically track MPX VMAs we need to be able to distinguish them from normal VMAs, and keep them from getting merged with normal VMAs. A new VM_ flag set only on MPX VMAs does both of those things. With this flag, MPX bounds-table VMAs can be distinguished from other VMAs, and userspace can also walk /proc/$pid/smaps to get memory usage for MPX. In addition to this flag, we also introduce a special ->vm_ops specific to MPX VMAs (see the patch "add MPX specific mmap interface"), but currently different ->vm_ops do not by themselves prevent VMA merging, so we still need this flag. We understand that VM_ flags are scarce and are open to other options. Signed-off-by: Qiaowei Ren Signed-off-by: Dave Hansen Cc: linux-mm@kvack.org Cc: linux-mips@linux-mips.org Cc: Dave Hansen Link: http://lkml.kernel.org/r/20141114151825.565625B3@viggo.jf.intel.com Signed-off-by: Thomas Gleixner --- include/linux/mm.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index b46461116cd2..f7606d3a0915 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -128,6 +128,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ +#define VM_ARCH_2 0x02000000 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ #ifdef CONFIG_MEM_SOFT_DIRTY @@ -155,6 +156,11 @@ extern unsigned int kobjsize(const void *objp); # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ #endif +#if defined(CONFIG_X86) +/* MPX specific bounds table or bounds directory */ +# define VM_MPX VM_ARCH_2 +#endif + #ifndef VM_GROWSUP # define VM_GROWSUP VM_NONE #endif -- cgit v1.2.3 From fe3d197f84319d3bce379a9c0dc17b1f48ad358c Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 14 Nov 2014 07:18:29 -0800 Subject: x86, mpx: On-demand kernel allocation of bounds tables This is really the meat of the MPX patch set. If there is one patch to review in the entire series, this is the one. There is a new ABI here and this kernel code also interacts with userspace memory in a relatively unusual manner. (small FAQ below). Long Description: This patch adds two prctl() commands to provide enable or disable the management of bounds tables in kernel, including on-demand kernel allocation (See the patch "on-demand kernel allocation of bounds tables") and cleanup (See the patch "cleanup unused bound tables"). Applications do not strictly need the kernel to manage bounds tables and we expect some applications to use MPX without taking advantage of this kernel support. This means the kernel can not simply infer whether an application needs bounds table management from the MPX registers. The prctl() is an explicit signal from userspace. PR_MPX_ENABLE_MANAGEMENT is meant to be a signal from userspace to require kernel's help in managing bounds tables. PR_MPX_DISABLE_MANAGEMENT is the opposite, meaning that userspace don't want kernel's help any more. With PR_MPX_DISABLE_MANAGEMENT, the kernel won't allocate and free bounds tables even if the CPU supports MPX. PR_MPX_ENABLE_MANAGEMENT will fetch the base address of the bounds directory out of a userspace register (bndcfgu) and then cache it into a new field (->bd_addr) in the 'mm_struct'. PR_MPX_DISABLE_MANAGEMENT will set "bd_addr" to an invalid address. Using this scheme, we can use "bd_addr" to determine whether the management of bounds tables in kernel is enabled. Also, the only way to access that bndcfgu register is via an xsaves, which can be expensive. Caching "bd_addr" like this also helps reduce the cost of those xsaves when doing table cleanup at munmap() time. Unfortunately, we can not apply this optimization to #BR fault time because we need an xsave to get the value of BNDSTATUS. ==== Why does the hardware even have these Bounds Tables? ==== MPX only has 4 hardware registers for storing bounds information. If MPX-enabled code needs more than these 4 registers, it needs to spill them somewhere. It has two special instructions for this which allow the bounds to be moved between the bounds registers and some new "bounds tables". They are similar conceptually to a page fault and will be raised by the MPX hardware during both bounds violations or when the tables are not present. This patch handles those #BR exceptions for not-present tables by carving the space out of the normal processes address space (essentially calling the new mmap() interface indroduced earlier in this patch set.) and then pointing the bounds-directory over to it. The tables *need* to be accessed and controlled by userspace because the instructions for moving bounds in and out of them are extremely frequent. They potentially happen every time a register pointing to memory is dereferenced. Any direct kernel involvement (like a syscall) to access the tables would obviously destroy performance. ==== Why not do this in userspace? ==== This patch is obviously doing this allocation in the kernel. However, MPX does not strictly *require* anything in the kernel. It can theoretically be done completely from userspace. Here are a few ways this *could* be done. I don't think any of them are practical in the real-world, but here they are. Q: Can virtual space simply be reserved for the bounds tables so that we never have to allocate them? A: As noted earlier, these tables are *HUGE*. An X-GB virtual area needs 4*X GB of virtual space, plus 2GB for the bounds directory. If we were to preallocate them for the 128TB of user virtual address space, we would need to reserve 512TB+2GB, which is larger than the entire virtual address space today. This means they can not be reserved ahead of time. Also, a single process's pre-popualated bounds directory consumes 2GB of virtual *AND* physical memory. IOW, it's completely infeasible to prepopulate bounds directories. Q: Can we preallocate bounds table space at the same time memory is allocated which might contain pointers that might eventually need bounds tables? A: This would work if we could hook the site of each and every memory allocation syscall. This can be done for small, constrained applications. But, it isn't practical at a larger scale since a given app has no way of controlling how all the parts of the app might allocate memory (think libraries). The kernel is really the only place to intercept these calls. Q: Could a bounds fault be handed to userspace and the tables allocated there in a signal handler instead of in the kernel? A: (thanks to tglx) mmap() is not on the list of safe async handler functions and even if mmap() would work it still requires locking or nasty tricks to keep track of the allocation state there. Having ruled out all of the userspace-only approaches for managing bounds tables that we could think of, we create them on demand in the kernel. Based-on-patch-by: Qiaowei Ren Signed-off-by: Dave Hansen Cc: linux-mm@kvack.org Cc: linux-mips@linux-mips.org Cc: Dave Hansen Link: http://lkml.kernel.org/r/20141114151829.AD4310DE@viggo.jf.intel.com Signed-off-by: Thomas Gleixner --- include/asm-generic/mmu_context.h | 5 +++++ include/linux/mm_types.h | 4 ++++ include/uapi/linux/prctl.h | 6 ++++++ 3 files changed, 15 insertions(+) (limited to 'include') diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h index a7eec910ba6c..1f2a8f9c9264 100644 --- a/include/asm-generic/mmu_context.h +++ b/include/asm-generic/mmu_context.h @@ -42,4 +42,9 @@ static inline void activate_mm(struct mm_struct *prev_mm, { } +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + #endif /* __ASM_GENERIC_MMU_CONTEXT_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6e0b286649f1..004e9d17b47e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -454,6 +454,10 @@ struct mm_struct { bool tlb_flush_pending; #endif struct uprobes_state uprobes_state; +#ifdef CONFIG_X86_INTEL_MPX + /* address of the bounds directory */ + void __user *bd_addr; +#endif }; static inline void mm_init_cpumask(struct mm_struct *mm) diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 513df75d0fc9..89f63503f903 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -179,4 +179,10 @@ struct prctl_mm_map { #define PR_SET_THP_DISABLE 41 #define PR_GET_THP_DISABLE 42 +/* + * Tell the kernel to start/stop helping userspace manage bounds tables. + */ +#define PR_MPX_ENABLE_MANAGEMENT 43 +#define PR_MPX_DISABLE_MANAGEMENT 44 + #endif /* _LINUX_PRCTL_H */ -- cgit v1.2.3 From 1de4fa14ee25a8edf287855513b61c3945c8878a Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 14 Nov 2014 07:18:31 -0800 Subject: x86, mpx: Cleanup unused bound tables The previous patch allocates bounds tables on-demand. As noted in an earlier description, these can add up to *HUGE* amounts of memory. This has caused OOMs in practice when running tests. This patch adds support for freeing bounds tables when they are no longer in use. There are two types of mappings in play when unmapping tables: 1. The mapping with the actual data, which userspace is munmap()ing or brk()ing away, etc... 2. The mapping for the bounds table *backing* the data (is tagged with VM_MPX, see the patch "add MPX specific mmap interface"). If userspace use the prctl() indroduced earlier in this patchset to enable the management of bounds tables in kernel, when it unmaps the first type of mapping with the actual data, the kernel needs to free the mapping for the bounds table backing the data. This patch hooks in at the very end of do_unmap() to do so. We look at the addresses being unmapped and find the bounds directory entries and tables which cover those addresses. If an entire table is unused, we clear associated directory entry and free the table. Once we unmap the bounds table, we would have a bounds directory entry pointing at empty address space. That address space might now be allocated for some other (random) use, and the MPX hardware might now try to walk it as if it were a bounds table. That would be bad. So any unmapping of an enture bounds table has to be accompanied by a corresponding write to the bounds directory entry to invalidate it. That write to the bounds directory can fault, which causes the following problem: Since we are doing the freeing from munmap() (and other paths like it), we hold mmap_sem for write. If we fault, the page fault handler will attempt to acquire mmap_sem for read and we will deadlock. To avoid the deadlock, we pagefault_disable() when touching the bounds directory entry and use a get_user_pages() to resolve the fault. The unmapping of bounds tables happends under vm_munmap(). We also (indirectly) call vm_munmap() to _do_ the unmapping of the bounds tables. We avoid unbounded recursion by disallowing freeing of bounds tables *for* bounds tables. This would not occur normally, so should not have any practical impact. Being strict about it here helps ensure that we do not have an exploitable stack overflow. Based-on-patch-by: Qiaowei Ren Signed-off-by: Dave Hansen Cc: linux-mm@kvack.org Cc: linux-mips@linux-mips.org Cc: Dave Hansen Link: http://lkml.kernel.org/r/20141114151831.E4531C4A@viggo.jf.intel.com Signed-off-by: Thomas Gleixner --- include/asm-generic/mmu_context.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h index 1f2a8f9c9264..aa2d8ba35b20 100644 --- a/include/asm-generic/mmu_context.h +++ b/include/asm-generic/mmu_context.h @@ -47,4 +47,10 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, { } +static inline void arch_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + #endif /* __ASM_GENERIC_MMU_CONTEXT_H */ -- cgit v1.2.3 From 62e88b1c00de9cb30d937841ed5debed871070b8 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 18 Nov 2014 10:23:50 -0800 Subject: mm: Make arch_unmap()/bprm_mm_init() available to all architectures The x86 MPX patch set calls arch_unmap() and arch_bprm_mm_init() from fs/exec.c, so we need at least a stub for them in all architectures. They are only called under an #ifdef for CONFIG_MMU=y, so we can at least restict this to architectures with MMU support. blackfin/c6x have no MMU support, so do not call arch_unmap(). They also do not include mm_hooks.h or mmu_context.h at all and do not need to be touched. s390, um and unicore32 do not use asm-generic/mm_hooks.h, so got their own arch_unmap() versions. (I also moved um's arch_dup_mmap() to be closer to the other mm_hooks.h functions). xtensa only includes mm_hooks when MMU=y, which should be fine since arch_unmap() is called only from MMU=y code. For the rest, we use the stub copies of these functions in asm-generic/mm_hook.h. I cross compiled defconfigs for cris (to check NOMMU) and s390 to make sure that this works. I also checked a 64-bit build of UML and all my normal x86 builds including PARAVIRT on and off. Signed-off-by: Dave Hansen Cc: Dave Hansen Cc: linux-arch@vger.kernel.org Cc: x86@kernel.org Link: http://lkml.kernel.org/r/20141118182350.8B4AA2C2@viggo.jf.intel.com Signed-off-by: Thomas Gleixner --- include/asm-generic/mm_hooks.h | 17 ++++++++++++++--- include/asm-generic/mmu_context.h | 6 ------ 2 files changed, 14 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h index 67dea8123683..866aa461efa5 100644 --- a/include/asm-generic/mm_hooks.h +++ b/include/asm-generic/mm_hooks.h @@ -1,7 +1,7 @@ /* - * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap, to - * be included in asm-FOO/mmu_context.h for any arch FOO which doesn't - * need to hook these. + * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap + * and arch_unmap to be included in asm-FOO/mmu_context.h for any + * arch FOO which doesn't need to hook these. */ #ifndef _ASM_GENERIC_MM_HOOKS_H #define _ASM_GENERIC_MM_HOOKS_H @@ -15,4 +15,15 @@ static inline void arch_exit_mmap(struct mm_struct *mm) { } +static inline void arch_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + #endif /* _ASM_GENERIC_MM_HOOKS_H */ diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h index aa2d8ba35b20..1f2a8f9c9264 100644 --- a/include/asm-generic/mmu_context.h +++ b/include/asm-generic/mmu_context.h @@ -47,10 +47,4 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, { } -static inline void arch_unmap(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ -} - #endif /* __ASM_GENERIC_MMU_CONTEXT_H */ -- cgit v1.2.3 From 9f7789f845cc100dd0d94fa1aa083e3373dc03db Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Sat, 22 Nov 2014 08:37:11 -0800 Subject: asm-generic: Remove asm-generic arch_bprm_mm_init() This is a follow-on to commit 62e88b1c00de 'mm: Make arch_unmap()/bprm_mm_init() available to all architectures' I removed the asm-generic version of arch_unmap() in that patch, but missed arch_bprm_mm_init(). So this broke the build for architectures using asm-generic/mmu_context.h who actually have an MMU. Fixes: 62e88b1c00de 'mm: Make arch_unmap()/bprm_mm_init() available to all architectures' Signed-off-by: Dave Hansen Cc: Dave Hansen Cc: linux-arch@vger.kernel.org Cc: x86@kernel.org Link: http://lkml.kernel.org/r/20141122163711.0F037EE6@viggo.jf.intel.com Signed-off-by: Thomas Gleixner --- include/asm-generic/mmu_context.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include') diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h index 1f2a8f9c9264..a7eec910ba6c 100644 --- a/include/asm-generic/mmu_context.h +++ b/include/asm-generic/mmu_context.h @@ -42,9 +42,4 @@ static inline void activate_mm(struct mm_struct *prev_mm, { } -static inline void arch_bprm_mm_init(struct mm_struct *mm, - struct vm_area_struct *vma) -{ -} - #endif /* __ASM_GENERIC_MMU_CONTEXT_H */ -- cgit v1.2.3