diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 14:41:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 14:41:04 -0700 |
commit | 5db6db0d400edd8bec274e34960cfa22838e1df5 (patch) | |
tree | 3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /include | |
parent | 5fab10041b4389b61de7e7a49893190bae686241 (diff) | |
parent | 2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff) |
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro:
"This is the uaccess unification pile. It's _not_ the end of uaccess
work, but the next batch of that will go into the next cycle. This one
mostly takes copy_from_user() and friends out of arch/* and gets the
zero-padding behaviour in sync for all architectures.
Dealing with the nocache/writethrough mess is for the next cycle;
fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
sold on access_ok() in there, BTW; just not in this pile), same for
reducing __copy_... callsites, strn*... stuff, etc. - there will be a
pile about as large as this one in the next merge window.
This one sat in -next for weeks. -3KLoC"
* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
HAVE_ARCH_HARDENED_USERCOPY is unconditional now
CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
m32r: switch to RAW_COPY_USER
hexagon: switch to RAW_COPY_USER
microblaze: switch to RAW_COPY_USER
get rid of padding, switch to RAW_COPY_USER
ia64: get rid of copy_in_user()
ia64: sanitize __access_ok()
ia64: get rid of 'segment' argument of __do_{get,put}_user()
ia64: get rid of 'segment' argument of __{get,put}_user_check()
ia64: add extable.h
powerpc: get rid of zeroing, switch to RAW_COPY_USER
esas2r: don't open-code memdup_user()
alpha: fix stack smashing in old_adjtimex(2)
don't open-code kernel_setsockopt()
mips: switch to RAW_COPY_USER
mips: get rid of tail-zeroing in primitives
mips: make copy_from_user() zero tail explicitly
mips: clean and reorder the forest of macros...
mips: consolidate __invoke_... wrappers
...
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/extable.h | 26 | ||||
-rw-r--r-- | include/asm-generic/uaccess.h | 135 | ||||
-rw-r--r-- | include/linux/uaccess.h | 197 | ||||
-rw-r--r-- | include/rdma/ib.h | 2 |
4 files changed, 226 insertions, 134 deletions
diff --git a/include/asm-generic/extable.h b/include/asm-generic/extable.h new file mode 100644 index 000000000000..ca14c6664027 --- /dev/null +++ b/include/asm-generic/extable.h @@ -0,0 +1,26 @@ +#ifndef __ASM_GENERIC_EXTABLE_H +#define __ASM_GENERIC_EXTABLE_H + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + + +struct pt_regs; +extern int fixup_exception(struct pt_regs *regs); + +#endif diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index cc6bb319e464..bbe4bb438e39 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -6,7 +6,6 @@ * on any machine that has kernel and user data in the same * address space, e.g. all NOMMU machines. */ -#include <linux/sched.h> #include <linux/string.h> #include <asm/segment.h> @@ -35,9 +34,6 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a).seg == (b).seg) #endif -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) /* @@ -52,87 +48,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size) #endif /* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -/* - * architectures with an MMU should override these two - */ -#ifndef __copy_from_user -static inline __must_check long __copy_from_user(void *to, - const void __user * from, unsigned long n) -{ - if (__builtin_constant_p(n)) { - switch(n) { - case 1: - *(u8 *)to = *(u8 __force *)from; - return 0; - case 2: - *(u16 *)to = *(u16 __force *)from; - return 0; - case 4: - *(u32 *)to = *(u32 __force *)from; - return 0; -#ifdef CONFIG_64BIT - case 8: - *(u64 *)to = *(u64 __force *)from; - return 0; -#endif - default: - break; - } - } - - memcpy(to, (const void __force *)from, n); - return 0; -} -#endif - -#ifndef __copy_to_user -static inline __must_check long __copy_to_user(void __user *to, - const void *from, unsigned long n) -{ - if (__builtin_constant_p(n)) { - switch(n) { - case 1: - *(u8 __force *)to = *(u8 *)from; - return 0; - case 2: - *(u16 __force *)to = *(u16 *)from; - return 0; - case 4: - *(u32 __force *)to = *(u32 *)from; - return 0; -#ifdef CONFIG_64BIT - case 8: - *(u64 __force *)to = *(u64 *)from; - return 0; -#endif - default: - break; - } - } - - memcpy((void __force *)to, from, n); - return 0; -} -#endif - -/* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * This version just falls back to copy_{from,to}_user, which should @@ -171,8 +86,7 @@ static inline __must_check long __copy_to_user(void __user *to, static inline int __put_user_fn(size_t size, void __user *ptr, void *x) { - size = __copy_to_user(ptr, x, size); - return size ? -EFAULT : size; + return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0; } #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) @@ -187,28 +101,28 @@ extern int __put_user_bad(void) __attribute__((noreturn)); __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: { \ - unsigned char __x; \ + unsigned char __x = 0; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ - unsigned short __x; \ + unsigned short __x = 0; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ - unsigned int __x; \ + unsigned int __x = 0; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ - unsigned long long __x; \ + unsigned long long __x = 0; \ __gu_err = __get_user_fn(sizeof (*(ptr)), \ ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ @@ -233,12 +147,7 @@ extern int __put_user_bad(void) __attribute__((noreturn)); #ifndef __get_user_fn static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) { - size_t n = __copy_from_user(x, ptr, size); - if (unlikely(n)) { - memset(x + (size - n), 0, n); - return -EFAULT; - } - return 0; + return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0; } #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) @@ -247,36 +156,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) extern int __get_user_bad(void) __attribute__((noreturn)); -#ifndef __copy_from_user_inatomic -#define __copy_from_user_inatomic __copy_from_user -#endif - -#ifndef __copy_to_user_inatomic -#define __copy_to_user_inatomic __copy_to_user -#endif - -static inline long copy_from_user(void *to, - const void __user * from, unsigned long n) -{ - unsigned long res = n; - might_fault(); - if (likely(access_ok(VERIFY_READ, from, n))) - res = __copy_from_user(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; -} - -static inline long copy_to_user(void __user *to, - const void *from, unsigned long n) -{ - might_fault(); - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_to_user(to, from, n); - else - return n; -} - /* * Copy a null terminated string from userspace. */ @@ -348,4 +227,6 @@ clear_user(void __user *to, unsigned long n) return __clear_user(to, n); } +#include <asm/extable.h> + #endif /* __ASM_GENERIC_UACCESS_H */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index f30c187ed785..e0cbfb09e60f 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -2,8 +2,199 @@ #define __LINUX_UACCESS_H__ #include <linux/sched.h> +#include <linux/thread_info.h> +#include <linux/kasan-checks.h> + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) + #include <asm/uaccess.h> +/* + * Architectures should provide two primitives (raw_copy_{to,from}_user()) + * and get rid of their private instances of copy_{to,from}_user() and + * __copy_{to,from}_user{,_inatomic}(). + * + * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and + * return the amount left to copy. They should assume that access_ok() has + * already been checked (and succeeded); they should *not* zero-pad anything. + * No KASAN or object size checks either - those belong here. + * + * Both of these functions should attempt to copy size bytes starting at from + * into the area starting at to. They must not fetch or store anything + * outside of those areas. Return value must be between 0 (everything + * copied successfully) and size (nothing copied). + * + * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting + * at to must become equal to the bytes fetched from the corresponding area + * starting at from. All data past to + size - N must be left unmodified. + * + * If copying succeeds, the return value must be 0. If some data cannot be + * fetched, it is permitted to copy less than had been fetched; the only + * hard requirement is that not storing anything at all (i.e. returning size) + * should happen only when nothing could be copied. In other words, you don't + * have to squeeze as much as possible - it is allowed, but not necessary. + * + * For raw_copy_from_user() to always points to kernel memory and no faults + * on store should happen. Interpretation of from is affected by set_fs(). + * For raw_copy_to_user() it's the other way round. + * + * Both can be inlined - it's up to architectures whether it wants to bother + * with that. They should not be used directly; they are used to implement + * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) + * that are used instead. Out of those, __... ones are inlined. Plain + * copy_{to,from}_user() might or might not be inlined. If you want them + * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. + * + * NOTE: only copy_from_user() zero-pads the destination in case of short copy. + * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything + * at all; their callers absolutely must check the return value. + * + * Biarch ones should also provide raw_copy_in_user() - similar to the above, + * but both source and destination are __user pointers (affected by set_fs() + * as usual) and both source and destination can trigger faults. + */ + +static __always_inline unsigned long +__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) +{ + kasan_check_write(to, n); + check_object_size(to, n, false); + return raw_copy_from_user(to, from, n); +} + +static __always_inline unsigned long +__copy_from_user(void *to, const void __user *from, unsigned long n) +{ + might_fault(); + kasan_check_write(to, n); + check_object_size(to, n, false); + return raw_copy_from_user(to, from, n); +} + +/** + * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. + * @to: Destination address, in user space. + * @from: Source address, in kernel space. + * @n: Number of bytes to copy. + * + * Context: User context only. + * + * Copy data from kernel space to user space. Caller must check + * the specified block with access_ok() before calling this function. + * The caller should also make sure he pins the user space address + * so that we don't result in page fault and sleep. + */ +static __always_inline unsigned long +__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) +{ + kasan_check_read(from, n); + check_object_size(from, n, true); + return raw_copy_to_user(to, from, n); +} + +static __always_inline unsigned long +__copy_to_user(void __user *to, const void *from, unsigned long n) +{ + might_fault(); + kasan_check_read(from, n); + check_object_size(from, n, true); + return raw_copy_to_user(to, from, n); +} + +#ifdef INLINE_COPY_FROM_USER +static inline unsigned long +_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned long res = n; + if (likely(access_ok(VERIFY_READ, from, n))) + res = raw_copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; +} +#else +extern unsigned long +_copy_from_user(void *, const void __user *, unsigned long); +#endif + +#ifdef INLINE_COPY_TO_USER +static inline unsigned long +_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + n = raw_copy_to_user(to, from, n); + return n; +} +#else +extern unsigned long +_copy_to_user(void __user *, const void *, unsigned long); +#endif + +extern void __compiletime_error("usercopy buffer size is too small") +__bad_copy_user(void); + +static inline void copy_user_overflow(int size, unsigned long count) +{ + WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); +} + +static __always_inline unsigned long __must_check +copy_from_user(void *to, const void __user *from, unsigned long n) +{ + int sz = __compiletime_object_size(to); + + might_fault(); + kasan_check_write(to, n); + + if (likely(sz < 0 || sz >= n)) { + check_object_size(to, n, false); + n = _copy_from_user(to, from, n); + } else if (!__builtin_constant_p(n)) + copy_user_overflow(sz, n); + else + __bad_copy_user(); + + return n; +} + +static __always_inline unsigned long __must_check +copy_to_user(void __user *to, const void *from, unsigned long n) +{ + int sz = __compiletime_object_size(from); + + kasan_check_read(from, n); + might_fault(); + + if (likely(sz < 0 || sz >= n)) { + check_object_size(from, n, true); + n = _copy_to_user(to, from, n); + } else if (!__builtin_constant_p(n)) + copy_user_overflow(sz, n); + else + __bad_copy_user(); + + return n; +} +#ifdef CONFIG_COMPAT +static __always_inline unsigned long __must_check +__copy_in_user(void __user *to, const void *from, unsigned long n) +{ + might_fault(); + return raw_copy_in_user(to, from, n); +} +static __always_inline unsigned long __must_check +copy_in_user(void __user *to, const void *from, unsigned long n) +{ + might_fault(); + if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) + n = raw_copy_in_user(to, from, n); + return n; +} +#endif + static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; @@ -67,12 +258,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to, return __copy_from_user_inatomic(to, from, n); } -static inline unsigned long __copy_from_user_nocache(void *to, - const void __user *from, unsigned long n) -{ - return __copy_from_user(to, from, n); -} - #endif /* ARCH_HAS_NOCACHE_UACCESS */ /* diff --git a/include/rdma/ib.h b/include/rdma/ib.h index 9b4c22a36931..66dbed0c146d 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -100,7 +100,7 @@ struct sockaddr_ib { */ static inline bool ib_safe_file_access(struct file *filp) { - return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS); + return filp->f_cred == current_cred() && !uaccess_kernel(); } #endif /* _RDMA_IB_H */ |