From 627f9c3af3845789a31cab587b49037c62255093 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 17 Nov 2017 15:28:08 -0800 Subject: lib/int_sqrt: optimize initial value compute commit f8ae107eef209bff29a5816bc1aad40d5cd69a80 upstream. The initial value (@m) compute is: m = 1UL << (BITS_PER_LONG - 2); while (m > x) m >>= 2; Which is a linear search for the highest even bit smaller or equal to @x We can implement this using a binary search using __fls() (or better when its hardware implemented). m = 1UL << (__fls(x) & ~1UL); Especially for small values of @x; which are the more common arguments when doing a CDF on idle times; the linear search is near to worst case, while the binary search of __fls() is a constant 6 (or 5 on 32bit) branches. cycles: branches: branch-misses: PRE: hot: 43.633557 +- 0.034373 45.333132 +- 0.002277 0.023529 +- 0.000681 cold: 207.438411 +- 0.125840 45.333132 +- 0.002277 6.976486 +- 0.004219 SOFTWARE FLS: hot: 29.576176 +- 0.028850 26.666730 +- 0.004511 0.019463 +- 0.000663 cold: 165.947136 +- 0.188406 26.666746 +- 0.004511 6.133897 +- 0.004386 HARDWARE FLS: hot: 24.720922 +- 0.025161 20.666784 +- 0.004509 0.020836 +- 0.000677 cold: 132.777197 +- 0.127471 20.666776 +- 0.004509 5.080285 +- 0.003874 Averages computed over all values <128k using a LFSR to generate order. Cold numbers have a LFSR based branch trace buffer 'confuser' ran between each int_sqrt() invocation. Link: http://lkml.kernel.org/r/20171020164644.936577234@infradead.org Signed-off-by: Peter Zijlstra (Intel) Suggested-by: Joe Perches Acked-by: Will Deacon Acked-by: Linus Torvalds Cc: Anshul Garg Cc: Davidlohr Bueso Cc: David Miller Cc: Ingo Molnar Cc: Kees Cook Cc: Matthew Wilcox Cc: Michael Davidson Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Cc: Joe Perches Signed-off-by: Greg Kroah-Hartman --- lib/int_sqrt.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index 1afb545a37c5..6d35274170bc 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c @@ -7,6 +7,7 @@ #include #include +#include /** * int_sqrt - rough approximation to sqrt @@ -21,10 +22,7 @@ unsigned long int_sqrt(unsigned long x) if (x <= 1) return x; - m = 1UL << (BITS_PER_LONG - 2); - while (m > x) - m >>= 2; - + m = 1UL << (__fls(x) & ~1UL); while (m != 0) { b = y + m; y >>= 1; -- cgit v1.2.3 From 9c24366f05459c566c7915068d8834b94418a883 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Wed, 13 Feb 2019 01:15:34 +0900 Subject: kprobes: Prohibit probing on bsearch() [ Upstream commit 02106f883cd745523f7766d90a739f983f19e650 ] Since kprobe breakpoing handler is using bsearch(), probing on this routine can cause recursive breakpoint problem. int3 ->do_int3() ->ftrace_int3_handler() ->ftrace_location() ->ftrace_location_range() ->bsearch() -> int3 Prohibit probing on bsearch(). Signed-off-by: Andrea Righi Acked-by: Masami Hiramatsu Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mathieu Desnoyers Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/154998813406.31052.8791425358974650922.stgit@devbox Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin --- lib/bsearch.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'lib') diff --git a/lib/bsearch.c b/lib/bsearch.c index e33c179089db..d50048446b77 100644 --- a/lib/bsearch.c +++ b/lib/bsearch.c @@ -11,6 +11,7 @@ #include #include +#include /* * bsearch - binary search an array of elements @@ -51,3 +52,4 @@ void *bsearch(const void *key, const void *base, size_t num, size_t size, return NULL; } EXPORT_SYMBOL(bsearch); +NOKPROBE_SYMBOL(bsearch); -- cgit v1.2.3 From 1de344caee73ec863de5e9ead6665aea83857def Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Sat, 2 Feb 2019 03:34:36 +0100 Subject: ARM: 8833/1: Ensure that NEON code always compiles with Clang [ Upstream commit de9c0d49d85dc563549972edc5589d195cd5e859 ] While building arm32 allyesconfig, I ran into the following errors: arch/arm/lib/xor-neon.c:17:2: error: You should compile this file with '-mfloat-abi=softfp -mfpu=neon' In file included from lib/raid6/neon1.c:27: /home/nathan/cbl/prebuilt/lib/clang/8.0.0/include/arm_neon.h:28:2: error: "NEON support not enabled" Building V=1 showed NEON_FLAGS getting passed along to Clang but __ARM_NEON__ was not getting defined. Ultimately, it boils down to Clang only defining __ARM_NEON__ when targeting armv7, rather than armv6k, which is the '-march' value for allyesconfig. >From lib/Basic/Targets/ARM.cpp in the Clang source: // This only gets set when Neon instructions are actually available, unlike // the VFP define, hence the soft float and arch check. This is subtly // different from gcc, we follow the intent which was that it should be set // when Neon instructions are actually available. if ((FPU & NeonFPU) && !SoftFloat && ArchVersion >= 7) { Builder.defineMacro("__ARM_NEON", "1"); Builder.defineMacro("__ARM_NEON__"); // current AArch32 NEON implementations do not support double-precision // floating-point even when it is present in VFP. Builder.defineMacro("__ARM_NEON_FP", "0x" + Twine::utohexstr(HW_FP & ~HW_FP_DP)); } Ard Biesheuvel recommended explicitly adding '-march=armv7-a' at the beginning of the NEON_FLAGS definitions so that __ARM_NEON__ always gets definined by Clang. This doesn't functionally change anything because that code will only run where NEON is supported, which is implicitly armv7. Link: https://github.com/ClangBuiltLinux/linux/issues/287 Suggested-by: Ard Biesheuvel Signed-off-by: Nathan Chancellor Acked-by: Nicolas Pitre Reviewed-by: Nick Desaulniers Reviewed-by: Stefan Agner Signed-off-by: Russell King Signed-off-by: Sasha Levin --- lib/raid6/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 3057011f5599..10ca69475611 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -24,7 +24,7 @@ endif ifeq ($(CONFIG_KERNEL_MODE_NEON),y) NEON_FLAGS := -ffreestanding ifeq ($(ARCH),arm) -NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon +NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon endif ifeq ($(ARCH),arm64) CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only -- cgit v1.2.3 From 7eceaf5bbfc0ecc0efc89b1f7318e44ea87391cc Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 5 Apr 2019 18:38:45 -0700 Subject: lib/string.c: implement a basic bcmp [ Upstream commit 5f074f3e192f10c9fade898b9b3b8812e3d83342 ] A recent optimization in Clang (r355672) lowers comparisons of the return value of memcmp against zero to comparisons of the return value of bcmp against zero. This helps some platforms that implement bcmp more efficiently than memcmp. glibc simply aliases bcmp to memcmp, but an optimized implementation is in the works. This results in linkage failures for all targets with Clang due to the undefined symbol. For now, just implement bcmp as a tailcail to memcmp to unbreak the build. This routine can be further optimized in the future. Other ideas discussed: * A weak alias was discussed, but breaks for architectures that define their own implementations of memcmp since aliases to declarations are not permitted (only definitions). Arch-specific memcmp implementations typically declare memcmp in C headers, but implement them in assembly. * -ffreestanding also is used sporadically throughout the kernel. * -fno-builtin-bcmp doesn't work when doing LTO. Link: https://bugs.llvm.org/show_bug.cgi?id=41035 Link: https://code.woboq.org/userspace/glibc/string/memcmp.c.html#bcmp Link: https://github.com/llvm/llvm-project/commit/8e16d73346f8091461319a7dfc4ddd18eedcff13 Link: https://github.com/ClangBuiltLinux/linux/issues/416 Link: http://lkml.kernel.org/r/20190313211335.165605-1-ndesaulniers@google.com Signed-off-by: Nick Desaulniers Reported-by: Nathan Chancellor Reported-by: Adhemerval Zanella Suggested-by: Arnd Bergmann Suggested-by: James Y Knight Suggested-by: Masahiro Yamada Suggested-by: Nathan Chancellor Suggested-by: Rasmus Villemoes Acked-by: Steven Rostedt (VMware) Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor Reviewed-by: Masahiro Yamada Reviewed-by: Andy Shevchenko Cc: David Laight Cc: Rasmus Villemoes Cc: Namhyung Kim Cc: Greg Kroah-Hartman Cc: Alexander Shishkin Cc: Dan Williams Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/string.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'lib') diff --git a/lib/string.c b/lib/string.c index ed83562a53ae..1cd9757291b1 100644 --- a/lib/string.c +++ b/lib/string.c @@ -772,6 +772,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count) EXPORT_SYMBOL(memcmp); #endif +#ifndef __HAVE_ARCH_BCMP +/** + * bcmp - returns 0 if and only if the buffers have identical contents. + * @a: pointer to first buffer. + * @b: pointer to second buffer. + * @len: size of buffers. + * + * The sign or magnitude of a non-zero return value has no particular + * meaning, and architectures may implement their own more efficient bcmp(). So + * while this particular implementation is a simple (tail) call to memcmp, do + * not rely on anything but whether the return value is zero or non-zero. + */ +#undef bcmp +int bcmp(const void *a, const void *b, size_t len) +{ + return memcmp(a, b, len); +} +EXPORT_SYMBOL(bcmp); +#endif + #ifndef __HAVE_ARCH_MEMSCAN /** * memscan - Find a character in an area of memory. -- cgit v1.2.3 From afac7da6d8560c78c6a6d61adb27fbeb064f7a26 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Thu, 7 Mar 2019 16:28:18 -0800 Subject: lib/div64.c: off by one in shift [ Upstream commit cdc94a37493135e355dfc0b0e086d84e3eadb50d ] fls counts bits starting from 1 to 32 (returns 0 for zero argument). If we add 1 we shift right one bit more and loose precision from divisor, what cause function incorect results with some numbers. Corrected code was tested in user-space, see bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=202391 Link: http://lkml.kernel.org/r/1548686944-11891-1-git-send-email-sgruszka@redhat.com Fixes: 658716d19f8f ("div64_u64(): improve precision on 32bit platforms") Signed-off-by: Stanislaw Gruszka Reported-by: Siarhei Volkau Tested-by: Siarhei Volkau Acked-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/div64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/div64.c b/lib/div64.c index 7f345259c32f..c1c1a4c36dd5 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -102,7 +102,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) quot = div_u64_rem(dividend, divisor, &rem32); *remainder = rem32; } else { - int n = 1 + fls(high); + int n = fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) @@ -140,7 +140,7 @@ u64 div64_u64(u64 dividend, u64 divisor) if (high == 0) { quot = div_u64(dividend, divisor); } else { - int n = 1 + fls(high); + int n = fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) -- cgit v1.2.3 From fe71230d9a02d267742c893e8acfd870beadc01e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 6 Feb 2018 15:36:48 -0800 Subject: kasan: remove redundant initialization of variable 'real_size' commit 48c232395431c23d35cf3b4c5a090bd793316578 upstream. Variable real_size is initialized with a value that is never read, it is re-assigned a new value later on, hence the initialization is redundant and can be removed. Cleans up clang warning: lib/test_kasan.c:422:21: warning: Value stored to 'real_size' during its initialization is never read Link: http://lkml.kernel.org/r/20180206144950.32457-1-colin.king@canonical.com Signed-off-by: Colin Ian King Acked-by: Andrey Ryabinin Reviewed-by: Andrew Morton Cc: Alexander Potapenko Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Andrey Konovalov Signed-off-by: Greg Kroah-Hartman --- lib/test_kasan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index fbdf87920093..4ba4cbe169a8 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -355,7 +355,7 @@ static noinline void __init kasan_stack_oob(void) static noinline void __init ksize_unpoisons_memory(void) { char *ptr; - size_t size = 123, real_size = size; + size_t size = 123, real_size; pr_info("ksize() unpoisons the whole allocated chunk\n"); ptr = kmalloc(size, GFP_KERNEL); -- cgit v1.2.3 From 5d01a64da40355b3274f0f7f746019809184b72c Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 10 Apr 2018 16:30:39 -0700 Subject: kasan: prevent compiler from optimizing away memset in tests commit 69ca372c100fba99c78ef826a1795aa86e4f01a8 upstream. A compiler can optimize away memset calls by replacing them with mov instructions. There are KASAN tests that specifically test that KASAN correctly handles memset calls so we don't want this optimization to happen. The solution is to add -fno-builtin flag to test_kasan.ko Link: http://lkml.kernel.org/r/105ec9a308b2abedb1a0d1fdced0c22d765e4732.1519924383.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Geert Uytterhoeven Cc: Nick Terrell Cc: Chris Mason Cc: Yury Norov Cc: Al Viro Cc: "Luis R . Rodriguez" Cc: Palmer Dabbelt Cc: "Paul E . McKenney" Cc: Jeff Layton Cc: "Jason A . Donenfeld" Cc: Kostya Serebryany Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Andrey Konovalov Signed-off-by: Greg Kroah-Hartman --- lib/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index 50144a3aeebd..2447a218fff8 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o obj-$(CONFIG_TEST_HASH) += test_hash.o obj-$(CONFIG_TEST_KASAN) += test_kasan.o +CFLAGS_test_kasan.o += -fno-builtin obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o obj-$(CONFIG_TEST_LKM) += test_module.o obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o -- cgit v1.2.3 From b34d6553ad81eba86fd3ce7ea2b85d9421e32fbe Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Mon, 6 May 2019 13:45:26 +0300 Subject: ubsan: Fix nasty -Wbuiltin-declaration-mismatch GCC-9 warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit f0996bc2978e02d2ea898101462b960f6119b18f upstream. Building lib/ubsan.c with gcc-9 results in a ton of nasty warnings like this one: lib/ubsan.c warning: conflicting types for built-in function ‘__ubsan_handle_negate_overflow’; expected ‘void(void *, void *)’ [-Wbuiltin-declaration-mismatch] The kernel's declarations of __ubsan_handle_*() often uses 'unsigned long' types in parameters while GCC these parameters as 'void *' types, hence the mismatch. Fix this by using 'void *' to match GCC's declarations. Reported-by: Linus Torvalds Signed-off-by: Andrey Ryabinin Fixes: c6d308534aef ("UBSAN: run-time undefined behavior sanity checker") Cc: Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/ubsan.c | 49 +++++++++++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 26 deletions(-) (limited to 'lib') diff --git a/lib/ubsan.c b/lib/ubsan.c index 60e108c5c173..c652b4a820cc 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -86,11 +86,13 @@ static bool is_inline_int(struct type_descriptor *type) return bits <= inline_bits; } -static s_max get_signed_val(struct type_descriptor *type, unsigned long val) +static s_max get_signed_val(struct type_descriptor *type, void *val) { if (is_inline_int(type)) { unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type); - return ((s_max)val) << extra_bits >> extra_bits; + unsigned long ulong_val = (unsigned long)val; + + return ((s_max)ulong_val) << extra_bits >> extra_bits; } if (type_bit_width(type) == 64) @@ -99,15 +101,15 @@ static s_max get_signed_val(struct type_descriptor *type, unsigned long val) return *(s_max *)val; } -static bool val_is_negative(struct type_descriptor *type, unsigned long val) +static bool val_is_negative(struct type_descriptor *type, void *val) { return type_is_signed(type) && get_signed_val(type, val) < 0; } -static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val) +static u_max get_unsigned_val(struct type_descriptor *type, void *val) { if (is_inline_int(type)) - return val; + return (unsigned long)val; if (type_bit_width(type) == 64) return *(u64 *)val; @@ -116,7 +118,7 @@ static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val) } static void val_to_string(char *str, size_t size, struct type_descriptor *type, - unsigned long value) + void *value) { if (type_is_int(type)) { if (type_bit_width(type) == 128) { @@ -168,8 +170,8 @@ static void ubsan_epilogue(unsigned long *flags) current->in_ubsan--; } -static void handle_overflow(struct overflow_data *data, unsigned long lhs, - unsigned long rhs, char op) +static void handle_overflow(struct overflow_data *data, void *lhs, + void *rhs, char op) { struct type_descriptor *type = data->type; @@ -196,8 +198,7 @@ static void handle_overflow(struct overflow_data *data, unsigned long lhs, } void __ubsan_handle_add_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) + void *lhs, void *rhs) { handle_overflow(data, lhs, rhs, '+'); @@ -205,23 +206,21 @@ void __ubsan_handle_add_overflow(struct overflow_data *data, EXPORT_SYMBOL(__ubsan_handle_add_overflow); void __ubsan_handle_sub_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) + void *lhs, void *rhs) { handle_overflow(data, lhs, rhs, '-'); } EXPORT_SYMBOL(__ubsan_handle_sub_overflow); void __ubsan_handle_mul_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) + void *lhs, void *rhs) { handle_overflow(data, lhs, rhs, '*'); } EXPORT_SYMBOL(__ubsan_handle_mul_overflow); void __ubsan_handle_negate_overflow(struct overflow_data *data, - unsigned long old_val) + void *old_val) { unsigned long flags; char old_val_str[VALUE_LENGTH]; @@ -242,8 +241,7 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); void __ubsan_handle_divrem_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) + void *lhs, void *rhs) { unsigned long flags; char rhs_val_str[VALUE_LENGTH]; @@ -328,7 +326,7 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, } void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, - unsigned long ptr) + void *ptr) { struct type_mismatch_data_common common_data = { .location = &data->location, @@ -337,12 +335,12 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, .type_check_kind = data->type_check_kind }; - ubsan_type_mismatch_common(&common_data, ptr); + ubsan_type_mismatch_common(&common_data, (unsigned long)ptr); } EXPORT_SYMBOL(__ubsan_handle_type_mismatch); void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, - unsigned long ptr) + void *ptr) { struct type_mismatch_data_common common_data = { @@ -352,7 +350,7 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, .type_check_kind = data->type_check_kind }; - ubsan_type_mismatch_common(&common_data, ptr); + ubsan_type_mismatch_common(&common_data, (unsigned long)ptr); } EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); @@ -376,7 +374,7 @@ void __ubsan_handle_nonnull_return(struct nonnull_return_data *data) EXPORT_SYMBOL(__ubsan_handle_nonnull_return); void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, - unsigned long bound) + void *bound) { unsigned long flags; char bound_str[VALUE_LENGTH]; @@ -393,8 +391,7 @@ void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, } EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); -void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, - unsigned long index) +void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) { unsigned long flags; char index_str[VALUE_LENGTH]; @@ -412,7 +409,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, - unsigned long lhs, unsigned long rhs) + void *lhs, void *rhs) { unsigned long flags; struct type_descriptor *rhs_type = data->rhs_type; @@ -463,7 +460,7 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, - unsigned long val) + void *val) { unsigned long flags; char val_str[VALUE_LENGTH]; -- cgit v1.2.3 From 50013eadda59b9bbbc48eee40ecbfccefc875cc6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 24 Apr 2019 09:19:25 +0200 Subject: mm/uaccess: Use 'unsigned long' to placate UBSAN warnings on older GCC versions [ Upstream commit 29da93fea3ea39ab9b12270cc6be1b70ef201c9e ] Randy reported objtool triggered on his (GCC-7.4) build: lib/strncpy_from_user.o: warning: objtool: strncpy_from_user()+0x315: call to __ubsan_handle_add_overflow() with UACCESS enabled lib/strnlen_user.o: warning: objtool: strnlen_user()+0x337: call to __ubsan_handle_sub_overflow() with UACCESS enabled This is due to UBSAN generating signed-overflow-UB warnings where it should not. Prior to GCC-8 UBSAN ignored -fwrapv (which the kernel uses through -fno-strict-overflow). Make the functions use 'unsigned long' throughout. Reported-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Acked-by: Randy Dunlap # build-tested Acked-by: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: luto@kernel.org Link: http://lkml.kernel.org/r/20190424072208.754094071@infradead.org Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin --- lib/strncpy_from_user.c | 5 +++-- lib/strnlen_user.c | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 7e35fc450c5b..5a07f19059c3 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -22,10 +22,11 @@ * hit it), 'max' is the address space maximum (and we return * -EFAULT if we hit it). */ -static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) +static inline long do_strncpy_from_user(char *dst, const char __user *src, + unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long res = 0; + unsigned long res = 0; /* * Truncate 'max' to the user-specified limit, so that diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 8e105ed4df12..9ff4f3bbb1aa 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -27,7 +27,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long align, res = 0; + unsigned long align, res = 0; unsigned long c; /* @@ -41,7 +41,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * Do everything aligned. But that means that we * need to also expand the maximum.. */ - align = (sizeof(long) - 1) & (unsigned long)src; + align = (sizeof(unsigned long) - 1) & (unsigned long)src; src -= align; max += align; -- cgit v1.2.3 From ea68394342605bad4a1c2bc8c30de4209b4ffac3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 24 Jun 2019 18:32:26 +0800 Subject: lib/mpi: Fix karactx leak in mpi_powm commit c8ea9fce2baf7b643384f36f29e4194fa40d33a6 upstream. Sometimes mpi_powm will leak karactx because a memory allocation failure causes a bail-out that skips the freeing of karactx. This patch moves the freeing of karactx to the end of the function like everything else so that it can't be skipped. Reported-by: syzbot+f7baccc38dcc1e094e77@syzkaller.appspotmail.com Fixes: cdec9cb5167a ("crypto: GnuPG based MPI lib - source files...") Cc: Signed-off-by: Herbert Xu Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- lib/mpi/mpi-pow.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index 468fb7cd1221..edf345b7f06b 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c @@ -37,6 +37,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) { mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; + struct karatsuba_ctx karactx = {}; mpi_ptr_t xp_marker = NULL; mpi_ptr_t tspace = NULL; mpi_ptr_t rp, ep, mp, bp; @@ -164,13 +165,11 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) int c; mpi_limb_t e; mpi_limb_t carry_limb; - struct karatsuba_ctx karactx; xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); if (!xp) goto enomem; - memset(&karactx, 0, sizeof karactx); negative_result = (ep[0] & 1) && base->sign; i = esize - 1; @@ -295,8 +294,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) if (mod_shift_cnt) mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); MPN_NORMALIZE(rp, rsize); - - mpihelp_release_karatsuba_ctx(&karactx); } if (negative_result && rsize) { @@ -313,6 +310,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) leave: rc = 0; enomem: + mpihelp_release_karatsuba_ctx(&karactx); if (assign_rp) mpi_assign_limb_space(res, rp, size); if (mp_marker) -- cgit v1.2.3 From 5c345e2afe52d23499c861999894ba564486beb0 Mon Sep 17 00:00:00 2001 From: Ferdinand Blomqvist Date: Thu, 20 Jun 2019 17:10:34 +0300 Subject: rslib: Fix decoding of shortened codes [ Upstream commit 2034a42d1747fc1e1eeef2c6f1789c4d0762cb9c ] The decoding of shortenend codes is broken. It only works as expected if there are no erasures. When decoding with erasures, Lambda (the error and erasure locator polynomial) is initialized from the given erasure positions. The pad parameter is not accounted for by the initialisation code, and hence Lambda is initialized from incorrect erasure positions. The fix is to adjust the erasure positions by the supplied pad. Signed-off-by: Ferdinand Blomqvist Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190620141039.9874-3-ferdinand.blomqvist@gmail.com Signed-off-by: Sasha Levin --- lib/reed_solomon/decode_rs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c index 0ec3f257ffdf..8eed0f9ac495 100644 --- a/lib/reed_solomon/decode_rs.c +++ b/lib/reed_solomon/decode_rs.c @@ -99,9 +99,9 @@ if (no_eras > 0) { /* Init lambda to be the erasure locator polynomial */ lambda[1] = alpha_to[rs_modnn(rs, - prim * (nn - 1 - eras_pos[0]))]; + prim * (nn - 1 - (eras_pos[0] + pad)))]; for (i = 1; i < no_eras; i++) { - u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i])); + u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad))); for (j = i + 1; j > 0; j--) { tmp = index_of[lambda[j - 1]]; if (tmp != nn) { -- cgit v1.2.3 From 2419d391d84366bb330c844220a1efd5a89d6a14 Mon Sep 17 00:00:00 2001 From: Ferdinand Blomqvist Date: Thu, 20 Jun 2019 17:10:37 +0300 Subject: rslib: Fix handling of of caller provided syndrome [ Upstream commit ef4d6a8556b637ad27c8c2a2cff1dda3da38e9a9 ] Check if the syndrome provided by the caller is zero, and act accordingly. Signed-off-by: Ferdinand Blomqvist Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190620141039.9874-6-ferdinand.blomqvist@gmail.com Signed-off-by: Sasha Levin --- lib/reed_solomon/decode_rs.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c index 8eed0f9ac495..a5d313381539 100644 --- a/lib/reed_solomon/decode_rs.c +++ b/lib/reed_solomon/decode_rs.c @@ -42,8 +42,18 @@ BUG_ON(pad < 0 || pad >= nn); /* Does the caller provide the syndrome ? */ - if (s != NULL) - goto decode; + if (s != NULL) { + for (i = 0; i < nroots; i++) { + /* The syndrome is in index form, + * so nn represents zero + */ + if (s[i] != nn) + goto decode; + } + + /* syndrome is zero, no errors to correct */ + return 0; + } /* form the syndromes; i.e., evaluate data(x) at roots of * g(x) */ -- cgit v1.2.3 From 8fc18666c06ade5f132a995a7e1806f43b6bbda6 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 24 Jun 2019 07:20:14 +0000 Subject: lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE commit aeb87246537a83c2aff482f3f34a2e0991e02cbc upstream. All mapping iterator logic is based on the assumption that sg->offset is always lower than PAGE_SIZE. But there are situations where sg->offset is such that the SG item is on the second page. In that case sg_copy_to_buffer() fails properly copying the data into the buffer. One of the reason is that the data will be outside the kmapped area used to access that data. This patch fixes the issue by adjusting the mapping iterator offset and pgoffset fields such that offset is always lower than PAGE_SIZE. Signed-off-by: Christophe Leroy Fixes: 4225fc8555a9 ("lib/scatterlist: use page iterator in the mapping iterator") Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- lib/scatterlist.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 004fc70fc56a..a854cc39f084 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -496,17 +496,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) { if (!miter->__remaining) { struct scatterlist *sg; - unsigned long pgoffset; if (!__sg_page_iter_next(&miter->piter)) return false; sg = miter->piter.sg; - pgoffset = miter->piter.sg_pgoffset; - miter->__offset = pgoffset ? 0 : sg->offset; + miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset; + miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT; + miter->__offset &= PAGE_SIZE - 1; miter->__remaining = sg->offset + sg->length - - (pgoffset << PAGE_SHIFT) - miter->__offset; + (miter->piter.sg_pgoffset << PAGE_SHIFT) - + miter->__offset; miter->__remaining = min_t(unsigned long, miter->__remaining, PAGE_SIZE - miter->__offset); } -- cgit v1.2.3 From 8dd8b4d7a00d9795e4451c170f5e2aaac2f5aaef Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Thu, 1 Feb 2018 21:00:50 +0300 Subject: lib/strscpy: Shut up KASAN false-positives in strscpy() [ Upstream commit 1a3241ff10d038ecd096d03380327f2a0b5840a6 ] strscpy() performs the word-at-a-time optimistic reads. So it may may access the memory past the end of the object, which is perfectly fine since strscpy() doesn't use that (past-the-end) data and makes sure the optimistic read won't cross a page boundary. Use new read_word_at_a_time() to shut up the KASAN. Note that this potentially could hide some bugs. In example bellow, stscpy() will copy more than we should (1-3 extra uninitialized bytes): char dst[8]; char *src; src = kmalloc(5, GFP_KERNEL); memset(src, 0xff, 5); strscpy(dst, src, 8); Signed-off-by: Andrey Ryabinin Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/string.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/string.c b/lib/string.c index 1cd9757291b1..8f1a2a04e22f 100644 --- a/lib/string.c +++ b/lib/string.c @@ -202,7 +202,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count) while (max >= sizeof(unsigned long)) { unsigned long c, data; - c = *(unsigned long *)(src+res); + c = read_word_at_a_time(src+res); if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); -- cgit v1.2.3 From 53e054b3cd1bd3dde2212c3e279ab4a3eefac6bb Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sat, 17 Aug 2019 00:01:12 +0100 Subject: siphash: add cryptographically secure PRF commit 2c956a60778cbb6a27e0c7a8a52a91378c90e1d1 upstream. SipHash is a 64-bit keyed hash function that is actually a cryptographically secure PRF, like HMAC. Except SipHash is super fast, and is meant to be used as a hashtable keyed lookup function, or as a general PRF for short input use cases, such as sequence numbers or RNG chaining. For the first usage: There are a variety of attacks known as "hashtable poisoning" in which an attacker forms some data such that the hash of that data will be the same, and then preceeds to fill up all entries of a hashbucket. This is a realistic and well-known denial-of-service vector. Currently hashtables use jhash, which is fast but not secure, and some kind of rotating key scheme (or none at all, which isn't good). SipHash is meant as a replacement for jhash in these cases. There are a modicum of places in the kernel that are vulnerable to hashtable poisoning attacks, either via userspace vectors or network vectors, and there's not a reliable mechanism inside the kernel at the moment to fix it. The first step toward fixing these issues is actually getting a secure primitive into the kernel for developers to use. Then we can, bit by bit, port things over to it as deemed appropriate. While SipHash is extremely fast for a cryptographically secure function, it is likely a bit slower than the insecure jhash, and so replacements will be evaluated on a case-by-case basis based on whether or not the difference in speed is negligible and whether or not the current jhash usage poses a real security risk. For the second usage: A few places in the kernel are using MD5 or SHA1 for creating secure sequence numbers, syn cookies, port numbers, or fast random numbers. SipHash is a faster and more fitting, and more secure replacement for MD5 in those situations. Replacing MD5 and SHA1 with SipHash for these uses is obvious and straight-forward, and so is submitted along with this patch series. There shouldn't be much of a debate over its efficacy. Dozens of languages are already using this internally for their hash tables and PRFs. Some of the BSDs already use this in their kernels. SipHash is a widely known high-speed solution to a widely known set of problems, and it's time we catch-up. Signed-off-by: Jason A. Donenfeld Reviewed-by: Jean-Philippe Aumasson Cc: Linus Torvalds Cc: Eric Biggers Cc: David Laight Cc: Eric Dumazet Signed-off-by: David S. Miller [bwh: Backported to 4.9 as dependency of commits df453700e8d8 "inet: switch IP ID generator to siphash" and 3c79107631db "netfilter: ctnetlink: don't use conntrack/expect object addresses as id"] Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- lib/Kconfig.debug | 6 +- lib/Makefile | 5 +- lib/siphash.c | 232 +++++++++++++++++++++++++++++++++++++++++++++++++++++ lib/test_siphash.c | 131 ++++++++++++++++++++++++++++++ 4 files changed, 369 insertions(+), 5 deletions(-) create mode 100644 lib/siphash.c create mode 100644 lib/test_siphash.c (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 58a22ca10f33..4f561860bf41 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1822,9 +1822,9 @@ config TEST_HASH tristate "Perform selftest on hash functions" default n help - Enable this option to test the kernel's integer () - and string () hash functions on boot - (or module load). + Enable this option to test the kernel's integer (), + string (), and siphash () + hash functions on boot (or module load). This is intended to help people writing architecture-specific optimized versions. If unsure, say N. diff --git a/lib/Makefile b/lib/Makefile index 2447a218fff8..452d2956a5a2 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -22,7 +22,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ - earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o + earlycpio.o seq_buf.o siphash.o \ + nmi_backtrace.o nodemask.o win_minmax.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -44,7 +45,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o obj-y += kstrtox.o obj-$(CONFIG_TEST_BPF) += test_bpf.o obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o -obj-$(CONFIG_TEST_HASH) += test_hash.o +obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o obj-$(CONFIG_TEST_KASAN) += test_kasan.o CFLAGS_test_kasan.o += -fno-builtin obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o diff --git a/lib/siphash.c b/lib/siphash.c new file mode 100644 index 000000000000..c43cf406e71b --- /dev/null +++ b/lib/siphash.c @@ -0,0 +1,232 @@ +/* Copyright (C) 2016 Jason A. Donenfeld . All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4. + */ + +#include +#include + +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 +#include +#include +#endif + +#define SIPROUND \ + do { \ + v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ + v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ + v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ + v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ + } while (0) + +#define PREAMBLE(len) \ + u64 v0 = 0x736f6d6570736575ULL; \ + u64 v1 = 0x646f72616e646f6dULL; \ + u64 v2 = 0x6c7967656e657261ULL; \ + u64 v3 = 0x7465646279746573ULL; \ + u64 b = ((u64)(len)) << 56; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define POSTAMBLE \ + v3 ^= b; \ + SIPROUND; \ + SIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} +EXPORT_SYMBOL(__siphash_aligned); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} +EXPORT_SYMBOL(__siphash_unaligned); +#endif + +/** + * siphash_1u64 - compute 64-bit siphash PRF value of a u64 + * @first: first u64 + * @key: the siphash key + */ +u64 siphash_1u64(const u64 first, const siphash_key_t *key) +{ + PREAMBLE(8) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_1u64); + +/** + * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 + * @first: first u64 + * @second: second u64 + * @key: the siphash key + */ +u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) +{ + PREAMBLE(16) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_2u64); + +/** + * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @key: the siphash key + */ +u64 siphash_3u64(const u64 first, const u64 second, const u64 third, + const siphash_key_t *key) +{ + PREAMBLE(24) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_3u64); + +/** + * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @forth: forth u64 + * @key: the siphash key + */ +u64 siphash_4u64(const u64 first, const u64 second, const u64 third, + const u64 forth, const siphash_key_t *key) +{ + PREAMBLE(32) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + v3 ^= forth; + SIPROUND; + SIPROUND; + v0 ^= forth; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_4u64); + +u64 siphash_1u32(const u32 first, const siphash_key_t *key) +{ + PREAMBLE(4) + b |= first; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_1u32); + +u64 siphash_3u32(const u32 first, const u32 second, const u32 third, + const siphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + PREAMBLE(12) + v3 ^= combined; + SIPROUND; + SIPROUND; + v0 ^= combined; + b |= third; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_3u32); diff --git a/lib/test_siphash.c b/lib/test_siphash.c new file mode 100644 index 000000000000..d972acfc15e4 --- /dev/null +++ b/lib/test_siphash.c @@ -0,0 +1,131 @@ +/* Test cases for siphash.c + * + * Copyright (C) 2016 Jason A. Donenfeld . All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +/* Test vectors taken from official reference source available at: + * https://131002.net/siphash/siphash24.c + */ + +static const siphash_key_t test_key_siphash = + {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }}; + +static const u64 test_vectors_siphash[64] = { + 0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL, + 0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL, + 0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL, + 0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL, + 0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL, + 0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL, + 0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL, + 0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL, + 0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL, + 0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL, + 0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL, + 0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL, + 0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL, + 0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL, + 0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL, + 0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL, + 0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL, + 0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL, + 0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL, + 0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL, + 0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL, + 0x958a324ceb064572ULL +}; + +static int __init siphash_test_init(void) +{ + u8 in[64] __aligned(SIPHASH_ALIGNMENT); + u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT); + u8 i; + int ret = 0; + + for (i = 0; i < 64; ++i) { + in[i] = i; + in_unaligned[i + 1] = i; + if (siphash(in, i, &test_key_siphash) != + test_vectors_siphash[i]) { + pr_info("siphash self-test aligned %u: FAIL\n", i + 1); + ret = -EINVAL; + } + if (siphash(in_unaligned + 1, i, &test_key_siphash) != + test_vectors_siphash[i]) { + pr_info("siphash self-test unaligned %u: FAIL\n", i + 1); + ret = -EINVAL; + } + } + if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) != + test_vectors_siphash[8]) { + pr_info("siphash self-test 1u64: FAIL\n"); + ret = -EINVAL; + } + if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL, + &test_key_siphash) != test_vectors_siphash[16]) { + pr_info("siphash self-test 2u64: FAIL\n"); + ret = -EINVAL; + } + if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL, + 0x1716151413121110ULL, &test_key_siphash) != + test_vectors_siphash[24]) { + pr_info("siphash self-test 3u64: FAIL\n"); + ret = -EINVAL; + } + if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL, + 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL, + &test_key_siphash) != test_vectors_siphash[32]) { + pr_info("siphash self-test 4u64: FAIL\n"); + ret = -EINVAL; + } + if (siphash_1u32(0x03020100U, &test_key_siphash) != + test_vectors_siphash[4]) { + pr_info("siphash self-test 1u32: FAIL\n"); + ret = -EINVAL; + } + if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) != + test_vectors_siphash[8]) { + pr_info("siphash self-test 2u32: FAIL\n"); + ret = -EINVAL; + } + if (siphash_3u32(0x03020100U, 0x07060504U, + 0x0b0a0908U, &test_key_siphash) != + test_vectors_siphash[12]) { + pr_info("siphash self-test 3u32: FAIL\n"); + ret = -EINVAL; + } + if (siphash_4u32(0x03020100U, 0x07060504U, + 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) != + test_vectors_siphash[16]) { + pr_info("siphash self-test 4u32: FAIL\n"); + ret = -EINVAL; + } + if (!ret) + pr_info("self-tests: pass\n"); + return ret; +} + +static void __exit siphash_test_exit(void) +{ +} + +module_init(siphash_test_init); +module_exit(siphash_test_exit); + +MODULE_AUTHOR("Jason A. Donenfeld "); +MODULE_LICENSE("Dual BSD/GPL"); -- cgit v1.2.3 From 175a407ce432088d827b822b8a47afd8360a8dbe Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sat, 17 Aug 2019 00:01:19 +0100 Subject: siphash: implement HalfSipHash1-3 for hash tables commit 1ae2324f732c9c4e2fa4ebd885fa1001b70d52e1 upstream. HalfSipHash, or hsiphash, is a shortened version of SipHash, which generates 32-bit outputs using a weaker 64-bit key. It has *much* lower security margins, and shouldn't be used for anything too sensitive, but it could be used as a hashtable key function replacement, if the output is never exposed, and if the security requirement is not too high. The goal is to make this something that performance-critical jhash users would be willing to use. On 64-bit machines, HalfSipHash1-3 is slower than SipHash1-3, so we alias SipHash1-3 to HalfSipHash1-3 on those systems. 64-bit x86_64: [ 0.509409] test_siphash: SipHash2-4 cycles: 4049181 [ 0.510650] test_siphash: SipHash1-3 cycles: 2512884 [ 0.512205] test_siphash: HalfSipHash1-3 cycles: 3429920 [ 0.512904] test_siphash: JenkinsHash cycles: 978267 So, we map hsiphash() -> SipHash1-3 32-bit x86: [ 0.509868] test_siphash: SipHash2-4 cycles: 14812892 [ 0.513601] test_siphash: SipHash1-3 cycles: 9510710 [ 0.515263] test_siphash: HalfSipHash1-3 cycles: 3856157 [ 0.515952] test_siphash: JenkinsHash cycles: 1148567 So, we map hsiphash() -> HalfSipHash1-3 hsiphash() is roughly 3 times slower than jhash(), but comes with a considerable security improvement. Signed-off-by: Jason A. Donenfeld Reviewed-by: Jean-Philippe Aumasson Signed-off-by: David S. Miller [bwh: Backported to 4.9 to avoid regression for WireGuard with only half the siphash API present] Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- lib/siphash.c | 321 ++++++++++++++++++++++++++++++++++++++++++++++++++++- lib/test_siphash.c | 98 +++++++++++++++- 2 files changed, 415 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/siphash.c b/lib/siphash.c index c43cf406e71b..3ae58b4edad6 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -5,7 +5,9 @@ * SipHash: a fast short-input PRF * https://131002.net/siphash/ * - * This implementation is specifically for SipHash2-4. + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. */ #include @@ -230,3 +232,320 @@ u64 siphash_3u32(const u32 first, const u32 second, const u32 third, POSTAMBLE } EXPORT_SYMBOL(siphash_3u32); + +#if BITS_PER_LONG == 64 +/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for + * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3. + */ + +#define HSIPROUND SIPROUND +#define HPREAMBLE(len) PREAMBLE(len) +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_aligned); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_unaligned); +#endif + +/** + * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + b |= first; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_1u32); + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(8) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_2u32); + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(12) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + b |= third; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_3u32); + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(16) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + combined = (u64)forth << 32 | third; + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_4u32); +#else +#define HSIPROUND \ + do { \ + v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ + v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ + v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ + v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ + } while (0) + +#define HPREAMBLE(len) \ + u32 v0 = 0; \ + u32 v1 = 0; \ + u32 v2 = 0x6c796765U; \ + u32 v3 = 0x74656462U; \ + u32 b = ((u32)(len)) << 24; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return v1 ^ v3; + +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = le32_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_aligned); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = get_unaligned_le32(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_unaligned); +#endif + +/** + * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + v3 ^= first; + HSIPROUND; + v0 ^= first; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_1u32); + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + HPREAMBLE(8) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_2u32); + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + HPREAMBLE(12) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_3u32); + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + HPREAMBLE(16) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + v3 ^= forth; + HSIPROUND; + v0 ^= forth; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_4u32); +#endif diff --git a/lib/test_siphash.c b/lib/test_siphash.c index d972acfc15e4..a6d854d933bf 100644 --- a/lib/test_siphash.c +++ b/lib/test_siphash.c @@ -7,7 +7,9 @@ * SipHash: a fast short-input PRF * https://131002.net/siphash/ * - * This implementation is specifically for SipHash2-4. + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -18,8 +20,8 @@ #include #include -/* Test vectors taken from official reference source available at: - * https://131002.net/siphash/siphash24.c +/* Test vectors taken from reference source available at: + * https://github.com/veorq/SipHash */ static const siphash_key_t test_key_siphash = @@ -50,6 +52,64 @@ static const u64 test_vectors_siphash[64] = { 0x958a324ceb064572ULL }; +#if BITS_PER_LONG == 64 +static const hsiphash_key_t test_key_hsiphash = + {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }}; + +static const u32 test_vectors_hsiphash[64] = { + 0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU, + 0xe7ddf7fbU, 0x88d38328U, 0x49533b67U, + 0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU, + 0x6c063de4U, 0x92ff097fU, 0xf94dc352U, + 0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U, + 0x2a519956U, 0x7d908b66U, 0x63dbd80cU, + 0xb473e63eU, 0x8d297d1cU, 0xa6cce040U, + 0x2b45f844U, 0xa320872eU, 0xdae6c123U, + 0x67349c8cU, 0x705b0979U, 0xca9913a5U, + 0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U, + 0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU, + 0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U, + 0xada26206U, 0xa3c33057U, 0xae3a36a1U, + 0x7b108392U, 0x99e41531U, 0x3f1ad944U, + 0xc8138825U, 0xc28949a6U, 0xfaf8876bU, + 0x9f042196U, 0x68b1d623U, 0x8b5114fdU, + 0xdf074c46U, 0x12cc86b3U, 0x0a52098fU, + 0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U, + 0x73f0bce6U, 0x70a7e980U, 0x243c6d75U, + 0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U, + 0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U, + 0xb7bbb3a8U +}; +#else +static const hsiphash_key_t test_key_hsiphash = + {{ 0x03020100U, 0x07060504U }}; + +static const u32 test_vectors_hsiphash[64] = { + 0x5814c896U, 0xe7e864caU, 0xbc4b0e30U, + 0x01539939U, 0x7e059ea6U, 0x88e3d89bU, + 0xa0080b65U, 0x9d38d9d6U, 0x577999b1U, + 0xc839caedU, 0xe4fa32cfU, 0x959246eeU, + 0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU, + 0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU, + 0x06712339U, 0x522aca67U, 0x911bb605U, + 0x90a65f0eU, 0xf826ef7bU, 0x62512debU, + 0x57150ad7U, 0x5d473507U, 0x1ec47442U, + 0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U, + 0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU, + 0xe0f6c934U, 0xb0652033U, 0x9b9851ccU, + 0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU, + 0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU, + 0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U, + 0x65671619U, 0x9f5fff91U, 0xd89c5267U, + 0x007783ebU, 0x95766243U, 0xab639262U, + 0x9c7e1390U, 0xc368dda6U, 0x38ddc455U, + 0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU, + 0x2ee80657U, 0x33dbb66aU, 0xae3f0577U, + 0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U, + 0x87178304U +}; +#endif + static int __init siphash_test_init(void) { u8 in[64] __aligned(SIPHASH_ALIGNMENT); @@ -70,6 +130,16 @@ static int __init siphash_test_init(void) pr_info("siphash self-test unaligned %u: FAIL\n", i + 1); ret = -EINVAL; } + if (hsiphash(in, i, &test_key_hsiphash) != + test_vectors_hsiphash[i]) { + pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1); + ret = -EINVAL; + } + if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) != + test_vectors_hsiphash[i]) { + pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1); + ret = -EINVAL; + } } if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) != test_vectors_siphash[8]) { @@ -115,6 +185,28 @@ static int __init siphash_test_init(void) pr_info("siphash self-test 4u32: FAIL\n"); ret = -EINVAL; } + if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) != + test_vectors_hsiphash[4]) { + pr_info("hsiphash self-test 1u32: FAIL\n"); + ret = -EINVAL; + } + if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) != + test_vectors_hsiphash[8]) { + pr_info("hsiphash self-test 2u32: FAIL\n"); + ret = -EINVAL; + } + if (hsiphash_3u32(0x03020100U, 0x07060504U, + 0x0b0a0908U, &test_key_hsiphash) != + test_vectors_hsiphash[12]) { + pr_info("hsiphash self-test 3u32: FAIL\n"); + ret = -EINVAL; + } + if (hsiphash_4u32(0x03020100U, 0x07060504U, + 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) != + test_vectors_hsiphash[16]) { + pr_info("hsiphash self-test 4u32: FAIL\n"); + ret = -EINVAL; + } if (!ret) pr_info("self-tests: pass\n"); return ret; -- cgit v1.2.3 From 198d33ad87d963a85e425bd2b9d6e8cb4e4c48f3 Mon Sep 17 00:00:00 2001 From: Nicolas Boichat Date: Mon, 23 Sep 2019 15:33:55 -0700 Subject: kmemleak: increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE default to 16K [ Upstream commit b751c52bb587ae66f773b15204ef7a147467f4c7 ] The current default value (400) is too low on many systems (e.g. some ARM64 platform takes up 1000+ entries). syzbot uses 16000 as default value, and has proved to be enough on beefy configurations, so let's pick that value. This consumes more RAM on boot (each entry is 160 bytes, so in total ~2.5MB of RAM), but the memory would later be freed (early_log is __initdata). Link: http://lkml.kernel.org/r/20190730154027.101525-1-drinkcat@chromium.org Signed-off-by: Nicolas Boichat Suggested-by: Dmitry Vyukov Acked-by: Catalin Marinas Acked-by: Dmitry Vyukov Cc: Masahiro Yamada Cc: Kees Cook Cc: Petr Mladek Cc: Thomas Gleixner Cc: Tetsuo Handa Cc: Joe Lawrence Cc: Uladzislau Rezki Cc: Andy Shevchenko Cc: Stephen Rothwell Cc: Andrey Ryabinin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 4f561860bf41..bc5ff3a53d4a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -535,7 +535,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE int "Maximum kmemleak early log entries" depends on DEBUG_KMEMLEAK range 200 40000 - default 400 + default 16000 help Kmemleak must track all the memory allocations to avoid reporting false positives. Since memory may be allocated or -- cgit v1.2.3 From 1bb77722ad2a9ad419066c4a724b77d0ca59b9f4 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 5 Nov 2019 21:16:57 -0800 Subject: dump_stack: avoid the livelock of the dump_lock commit 5cbf2fff3bba8d3c6a4d47c1754de1cf57e2b01f upstream. In the current code, we use the atomic_cmpxchg() to serialize the output of the dump_stack(), but this implementation suffers the thundering herd problem. We have observed such kind of livelock on a Marvell cn96xx board(24 cpus) when heavily using the dump_stack() in a kprobe handler. Actually we can let the competitors to wait for the releasing of the lock before jumping to atomic_cmpxchg(). This will definitely mitigate the thundering herd problem. Thanks Linus for the suggestion. [akpm@linux-foundation.org: fix comment] Link: http://lkml.kernel.org/r/20191030031637.6025-1-haokexin@gmail.com Fixes: b58d977432c8 ("dump_stack: serialize the output from dump_stack()") Signed-off-by: Kevin Hao Suggested-by: Linus Torvalds Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/dump_stack.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/dump_stack.c b/lib/dump_stack.c index c30d07e99dba..72de6444934d 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -44,7 +44,12 @@ retry: was_locked = 1; } else { local_irq_restore(flags); - cpu_relax(); + /* + * Wait for the lock to release before jumping to + * atomic_cmpxchg() in order to mitigate the thundering herd + * problem. + */ + do { cpu_relax(); } while (atomic_read(&dump_lock) != -1); goto retry; } -- cgit v1.2.3 From bd5d9cd97ca55bba497a446f7a5d6a47b00869d5 Mon Sep 17 00:00:00 2001 From: Alexey Skidanov Date: Thu, 3 Jan 2019 15:26:44 -0800 Subject: lib/genalloc.c: fix allocation of aligned buffer from non-aligned chunk [ Upstream commit 52fbf1134d479234d7e64ba9dcbaea23405f229e ] gen_pool_alloc_algo() uses different allocation functions implementing different allocation algorithms. With gen_pool_first_fit_align() allocation function, the returned address should be aligned on the requested boundary. If chunk start address isn't aligned on the requested boundary, the returned address isn't aligned too. The only way to get properly aligned address is to initialize the pool with chunks aligned on the requested boundary. If want to have an ability to allocate buffers aligned on different boundaries (for example, 4K, 1MB, ...), the chunk start address should be aligned on the max possible alignment. This happens because gen_pool_first_fit_align() looks for properly aligned memory block without taking into account the chunk start address alignment. To fix this, we provide chunk start address to gen_pool_first_fit_align() and change its implementation such that it starts looking for properly aligned block with appropriate offset (exactly as is done in CMA). Link: https://lkml.kernel.org/lkml/a170cf65-6884-3592-1de9-4c235888cc8a@intel.com Link: http://lkml.kernel.org/r/1541690953-4623-1-git-send-email-alexey.skidanov@intel.com Signed-off-by: Alexey Skidanov Reviewed-by: Andrew Morton Cc: Logan Gunthorpe Cc: Daniel Mentz Cc: Mathieu Desnoyers Cc: Laura Abbott Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/genalloc.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/genalloc.c b/lib/genalloc.c index ca06adc4f445..5deb25c40a5a 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -311,7 +311,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, end_bit = chunk_size(chunk) >> order; retry: start_bit = algo(chunk->bits, end_bit, start_bit, - nbits, data, pool); + nbits, data, pool, chunk->start_addr); if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); @@ -525,7 +525,7 @@ EXPORT_SYMBOL(gen_pool_set_algo); */ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { return bitmap_find_next_zero_area(map, size, start, nr, 0); } @@ -543,16 +543,19 @@ EXPORT_SYMBOL(gen_pool_first_fit); */ unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { struct genpool_data_align *alignment; - unsigned long align_mask; + unsigned long align_mask, align_off; int order; alignment = data; order = pool->min_alloc_order; align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; - return bitmap_find_next_zero_area(map, size, start, nr, align_mask); + align_off = (start_addr & (alignment->align - 1)) >> order; + + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, align_off); } EXPORT_SYMBOL(gen_pool_first_fit_align); @@ -567,7 +570,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align); */ unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { struct genpool_data_fixed *fixed_data; int order; @@ -601,7 +604,8 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc); */ unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, - unsigned int nr, void *data, struct gen_pool *pool) + unsigned int nr, void *data, struct gen_pool *pool, + unsigned long start_addr) { unsigned long align_mask = roundup_pow_of_two(nr) - 1; @@ -624,7 +628,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align); */ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool) + struct gen_pool *pool, unsigned long start_addr) { unsigned long start_bit = size; unsigned long len = size + 1; -- cgit v1.2.3 From d84f622a917d3737dcaedf9c981e660e6bfdb19b Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Thu, 3 Jan 2019 15:26:51 -0800 Subject: lib/genalloc.c: use vzalloc_node() to allocate the bitmap [ Upstream commit 6862d2fc81859f88c1f3f660886427893f2b4f3f ] Some devices may have big memory on chip, such as over 1G. In some cases, the nbytes maybe bigger then 4M which is the bounday of the memory buddy system (4K default). So use vzalloc_node() to allocate the bitmap. Also use vfree to free it. Link: http://lkml.kernel.org/r/20181225015701.6289-1-sjhuang@iluvatar.ai Signed-off-by: Huang Shijie Reviewed-by: Andrew Morton Cc: Alexey Skidanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/genalloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/genalloc.c b/lib/genalloc.c index 5deb25c40a5a..f365d71cdc77 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -187,7 +187,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy int nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(nbits) * sizeof(long); - chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); + chunk = vzalloc_node(nbytes, nid); if (unlikely(chunk == NULL)) return -ENOMEM; @@ -251,7 +251,7 @@ void gen_pool_destroy(struct gen_pool *pool) bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); - kfree(chunk); + vfree(chunk); } kfree_const(pool->name); kfree(pool); -- cgit v1.2.3 From 4964ffcbac63267a065fb81af62fbeea5da10ee4 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Sat, 5 Jan 2019 13:21:18 -0800 Subject: lib/genalloc.c: include vmalloc.h [ Upstream commit 35004f2e55807a1a1491db24ab512dd2f770a130 ] Fixes build break on most ARM/ARM64 defconfigs: lib/genalloc.c: In function 'gen_pool_add_virt': lib/genalloc.c:190:10: error: implicit declaration of function 'vzalloc_node'; did you mean 'kzalloc_node'? lib/genalloc.c:190:8: warning: assignment to 'struct gen_pool_chunk *' from 'int' makes pointer from integer without a cast [-Wint-conversion] lib/genalloc.c: In function 'gen_pool_destroy': lib/genalloc.c:254:3: error: implicit declaration of function 'vfree'; did you mean 'kfree'? Fixes: 6862d2fc8185 ('lib/genalloc.c: use vzalloc_node() to allocate the bitmap') Cc: Huang Shijie Cc: Andrew Morton Cc: Alexey Skidanov Signed-off-by: Olof Johansson Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/genalloc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/genalloc.c b/lib/genalloc.c index f365d71cdc77..7e85d1e37a6e 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -35,6 +35,7 @@ #include #include #include +#include static inline size_t chunk_size(const struct gen_pool_chunk *chunk) { -- cgit v1.2.3 From 4d1f99ad23b677c15237578f2017a27a8e185064 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 6 Dec 2019 16:26:00 +0100 Subject: lib: raid6: fix awk build warnings commit 702600eef73033ddd4eafcefcbb6560f3e3a90f7 upstream. Newer versions of awk spit out these fun warnings: awk: ../lib/raid6/unroll.awk:16: warning: regexp escape sequence `\#' is not a known regexp operator As commit 700c1018b86d ("x86/insn: Fix awk regexp warnings") showed, it turns out that there are a number of awk strings that do not need to be escaped and newer versions of awk now warn about this. Fix the string up so that no warning is produced. The exact same kernel module gets created before and after this patch, showing that it wasn't needed. Link: https://lore.kernel.org/r/20191206152600.GA75093@kroah.com Signed-off-by: Greg Kroah-Hartman --- lib/raid6/unroll.awk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk index c6aa03631df8..0809805a7e23 100644 --- a/lib/raid6/unroll.awk +++ b/lib/raid6/unroll.awk @@ -13,7 +13,7 @@ BEGIN { for (i = 0; i < rep; ++i) { tmp = $0 gsub(/\$\$/, i, tmp) - gsub(/\$\#/, n, tmp) + gsub(/\$#/, n, tmp) gsub(/\$\*/, "$", tmp) print tmp } -- cgit v1.2.3 From bbed8ffa4164ec9a85de6e45e0b9ce553c7d098c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 28 Oct 2019 14:56:46 -0700 Subject: dma-debug: add a schedule point in debug_dma_dump_mappings() [ Upstream commit 9ff6aa027dbb98755f0265695354f2dd07c0d1ce ] debug_dma_dump_mappings() can take a lot of cpu cycles : lpk43:/# time wc -l /sys/kernel/debug/dma-api/dump 163435 /sys/kernel/debug/dma-api/dump real 0m0.463s user 0m0.003s sys 0m0.459s Let's add a cond_resched() to avoid holding cpu for too long. Signed-off-by: Eric Dumazet Cc: Corentin Labbe Cc: Christoph Hellwig Cc: Marek Szyprowski Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- lib/dma-debug.c | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 8971370bfb16..4435bec55fb5 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -435,6 +435,7 @@ void debug_dma_dump_mappings(struct device *dev) } spin_unlock_irqrestore(&bucket->lock, flags); + cond_resched(); } } EXPORT_SYMBOL(debug_dma_dump_mappings); -- cgit v1.2.3 From 475c1471f11a455a5332879cb28a3ae03d353598 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 23 Mar 2017 01:37:01 +0100 Subject: kobject: Export kobject_get_unless_zero() commit c70c176ff8c3ff0ac6ef9a831cd591ea9a66bd1a upstream. Make the function available for outside use and fortify it against NULL kobject. CC: Greg Kroah-Hartman Reviewed-by: Bart Van Assche Acked-by: Tejun Heo Signed-off-by: Jan Kara Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- lib/kobject.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kobject.c b/lib/kobject.c index f58c7f2b229c..bbbb067de8ec 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -599,12 +599,15 @@ struct kobject *kobject_get(struct kobject *kobj) } EXPORT_SYMBOL(kobject_get); -static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) +struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) { + if (!kobj) + return NULL; if (!kref_get_unless_zero(&kobj->kref)) kobj = NULL; return kobj; } +EXPORT_SYMBOL(kobject_get_unless_zero); /* * kobject_cleanup - free kobject resources. -- cgit v1.2.3 From db62dded6dcb7c78d65a588882a7f3da39d007e2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 28 Jun 2019 16:59:45 +0200 Subject: devres: allow const resource arguments [ Upstream commit 9dea44c91469512d346e638694c22c30a5273992 ] devm_ioremap_resource() does not currently take 'const' arguments, which results in a warning from the first driver trying to do it anyway: drivers/gpio/gpio-amd-fch.c: In function 'amd_fch_gpio_probe': drivers/gpio/gpio-amd-fch.c:171:49: error: passing argument 2 of 'devm_ioremap_resource' discards 'const' qualifier from pointer target type [-Werror=discarded-qualifiers] priv->base = devm_ioremap_resource(&pdev->dev, &amd_fch_gpio_iores); ^~~~~~~~~~~~~~~~~~~ Change the prototype to allow it, as there is no real reason not to. Fixes: 9bb2e0452508 ("gpio: amd: Make resource struct const") Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20190628150049.1108048-1-arnd@arndb.de Acked-by: Greg Kroah-Hartman Reviwed-By: Enrico Weigelt Signed-off-by: Linus Walleij Signed-off-by: Sasha Levin --- lib/devres.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/devres.c b/lib/devres.c index cb1464c411a2..38912892053c 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -131,7 +131,8 @@ EXPORT_SYMBOL(devm_iounmap); * if (IS_ERR(base)) * return PTR_ERR(base); */ -void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) +void __iomem *devm_ioremap_resource(struct device *dev, + const struct resource *res) { resource_size_t size; const char *name; -- cgit v1.2.3 From 1d06b246fa4338ee3ce12efaba8f5521bf55263e Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 30 Aug 2019 18:47:15 -0700 Subject: Partially revert "kfifo: fix kfifo_alloc() and kfifo_init()" [ Upstream commit ab9bb6318b0967671e0c9b6537c1537d51ca4f45 ] Commit dfe2a77fd243 ("kfifo: fix kfifo_alloc() and kfifo_init()") made the kfifo code round the number of elements up. That was good for __kfifo_alloc(), but it's actually wrong for __kfifo_init(). The difference? __kfifo_alloc() will allocate the rounded-up number of elements, but __kfifo_init() uses an allocation done by the caller. We can't just say "use more elements than the caller allocated", and have to round down. The good news? All the normal cases will be using power-of-two arrays anyway, and most users of kfifo's don't use kfifo_init() at all, but one of the helper macros to declare a KFIFO that enforce the proper power-of-two behavior. But it looks like at least ibmvscsis might be affected. The bad news? Will Deacon refers to an old thread and points points out that the memory ordering in kfifo's is questionable. See https://lore.kernel.org/lkml/20181211034032.32338-1-yuleixzhang@tencent.com/ for more. Fixes: dfe2a77fd243 ("kfifo: fix kfifo_alloc() and kfifo_init()") Reported-by: laokz Cc: Stefani Seibold Cc: Andrew Morton Cc: Dan Carpenter Cc: Greg KH Cc: Kees Cook Cc: Will Deacon Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/kfifo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kfifo.c b/lib/kfifo.c index 90ba1eb1df06..a94227c55551 100644 --- a/lib/kfifo.c +++ b/lib/kfifo.c @@ -82,7 +82,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer, { size /= esize; - size = roundup_pow_of_two(size); + if (!is_power_of_2(size)) + size = rounddown_pow_of_two(size); fifo->in = 0; fifo->out = 0; -- cgit v1.2.3 From 33a451d9d8ba52c2ffe6c1690fc49d798868ba2a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 1 Aug 2018 15:42:56 -0700 Subject: bitmap: Add bitmap_alloc(), bitmap_zalloc() and bitmap_free() commit c42b65e363ce97a828f81b59033c3558f8fa7f70 upstream. A lot of code become ugly because of open coding allocations for bitmaps. Introduce three helpers to allow users be more clear of intention and keep their code neat. Note, due to multiple circular dependencies we may not provide the helpers as inliners. For now we keep them exported and, perhaps, at some point in the future we will sort out header inclusion and inheritance. Signed-off-by: Andy Shevchenko Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- lib/bitmap.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'lib') diff --git a/lib/bitmap.c b/lib/bitmap.c index 0b66f0e5eb6b..a44b20a829dd 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -1212,3 +1213,22 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n } EXPORT_SYMBOL(bitmap_copy_le); #endif + +unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags) +{ + return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), + flags); +} +EXPORT_SYMBOL(bitmap_alloc); + +unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags) +{ + return bitmap_alloc(nbits, flags | __GFP_ZERO); +} +EXPORT_SYMBOL(bitmap_zalloc); + +void bitmap_free(const unsigned long *bitmap) +{ + kfree(bitmap); +} +EXPORT_SYMBOL(bitmap_free); -- cgit v1.2.3 From 02012baea99d3958cfd9132137bd92bdb30480b7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 30 Jan 2020 22:13:51 -0800 Subject: lib/test_kasan.c: fix memory leak in kmalloc_oob_krealloc_more() commit 3e21d9a501bf99aee2e5835d7f34d8c823f115b5 upstream. In case memory resources for _ptr2_ were allocated, release them before return. Notice that in case _ptr1_ happens to be NULL, krealloc() behaves exactly like kmalloc(). Addresses-Coverity-ID: 1490594 ("Resource leak") Link: http://lkml.kernel.org/r/20200123160115.GA4202@embeddedor Fixes: 3f15801cdc23 ("lib: add kasan test module") Signed-off-by: Gustavo A. R. Silva Reviewed-by: Dmitry Vyukov Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/test_kasan.c | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 4ba4cbe169a8..6e76a448867d 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -124,6 +124,7 @@ static noinline void __init kmalloc_oob_krealloc_more(void) if (!ptr1 || !ptr2) { pr_err("Allocation failed\n"); kfree(ptr1); + kfree(ptr2); return; } -- cgit v1.2.3 From 1cf357ff6cca415eba9efc214700937b0f69b583 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 30 Jan 2020 22:16:37 -0800 Subject: lib/scatterlist.c: adjust indentation in __sg_alloc_table [ Upstream commit 4e456fee215677584cafa7f67298a76917e89c64 ] Clang warns: ../lib/scatterlist.c:314:5: warning: misleading indentation; statement is not part of the previous 'if' [-Wmisleading-indentation] return -ENOMEM; ^ ../lib/scatterlist.c:311:4: note: previous statement is here if (prv) ^ 1 warning generated. This warning occurs because there is a space before the tab on this line. Remove it so that the indentation is consistent with the Linux kernel coding style and clang no longer warns. Link: http://lkml.kernel.org/r/20191218033606.11942-1-natechancellor@gmail.com Link: https://github.com/ClangBuiltLinux/linux/issues/830 Fixes: edce6820a9fd ("scatterlist: prevent invalid free when alloc fails") Signed-off-by: Nathan Chancellor Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/scatterlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a854cc39f084..ef8c14a56d0a 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -317,7 +317,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, if (prv) table->nents = ++table->orig_nents; - return -ENOMEM; + return -ENOMEM; } sg_init_table(sg, alloc_size); -- cgit v1.2.3 From 73e82ee9f04808bf2938b67ab8cbf134d97b7b31 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Thu, 20 Feb 2020 20:04:30 -0800 Subject: lib/stackdepot.c: fix global out-of-bounds in stack_slabs [ Upstream commit 305e519ce48e935702c32241f07d393c3c8fed3e ] Walter Wu has reported a potential case in which init_stack_slab() is called after stack_slabs[STACK_ALLOC_MAX_SLABS - 1] has already been initialized. In that case init_stack_slab() will overwrite stack_slabs[STACK_ALLOC_MAX_SLABS], which may result in a memory corruption. Link: http://lkml.kernel.org/r/20200218102950.260263-1-glider@google.com Fixes: cd11016e5f521 ("mm, kasan: stackdepot implementation. Enable stackdepot for SLAB") Signed-off-by: Alexander Potapenko Reported-by: Walter Wu Cc: Dmitry Vyukov Cc: Matthias Brugger Cc: Thomas Gleixner Cc: Josh Poimboeuf Cc: Kate Stewart Cc: Greg Kroah-Hartman Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/stackdepot.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/stackdepot.c b/lib/stackdepot.c index f87d138e9672..759ff419fe61 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -92,15 +92,19 @@ static bool init_stack_slab(void **prealloc) return true; if (stack_slabs[depot_index] == NULL) { stack_slabs[depot_index] = *prealloc; + *prealloc = NULL; } else { - stack_slabs[depot_index + 1] = *prealloc; + /* If this is the last depot slab, do not touch the next one. */ + if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) { + stack_slabs[depot_index + 1] = *prealloc; + *prealloc = NULL; + } /* * This smp_store_release pairs with smp_load_acquire() from * |next_slab_inited| above and in depot_save_stack(). */ smp_store_release(&next_slab_inited, 1); } - *prealloc = NULL; return true; } -- cgit v1.2.3