From 4a67b82250b481f0591ba83b2584cf18385fa059 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 22 Jun 2018 14:54:49 -0700 Subject: scsi: klist: Make it safe to use klists in atomic context [ Upstream commit 624fa7790f80575a4ec28fbdb2034097dc18d051 ] In the scsi_transport_srp implementation it cannot be avoided to iterate over a klist from atomic context when using the legacy block layer instead of blk-mq. Hence this patch that makes it safe to use klists in atomic context. This patch avoids that lockdep reports the following: WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&(&k->k_lock)->rlock); local_irq_disable(); lock(&(&q->__queue_lock)->rlock); lock(&(&k->k_lock)->rlock); lock(&(&q->__queue_lock)->rlock); stack backtrace: Workqueue: kblockd blk_timeout_work Call Trace: dump_stack+0xa4/0xf5 check_usage+0x6e6/0x700 __lock_acquire+0x185d/0x1b50 lock_acquire+0xd2/0x260 _raw_spin_lock+0x32/0x50 klist_next+0x47/0x190 device_for_each_child+0x8e/0x100 srp_timed_out+0xaf/0x1d0 [scsi_transport_srp] scsi_times_out+0xd4/0x410 [scsi_mod] blk_rq_timed_out+0x36/0x70 blk_timeout_work+0x1b5/0x220 process_one_work+0x4fe/0xad0 worker_thread+0x63/0x5a0 kthread+0x1c1/0x1e0 ret_from_fork+0x24/0x30 See also commit c9ddf73476ff ("scsi: scsi_transport_srp: Fix shost to rport translation"). Signed-off-by: Bart Van Assche Cc: Martin K. Petersen Cc: James Bottomley Acked-by: Greg Kroah-Hartman Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- lib/klist.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/klist.c b/lib/klist.c index 0507fa5d84c5..f6b547812fe3 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i) void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *prev; + unsigned long flags; - spin_lock(&i->i_klist->k_lock); + spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { prev = to_klist_node(last->n_node.prev); @@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i) prev = to_klist_node(prev->n_node.prev); } - spin_unlock(&i->i_klist->k_lock); + spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); @@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i) void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *next; + unsigned long flags; - spin_lock(&i->i_klist->k_lock); + spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { next = to_klist_node(last->n_node.next); @@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i) next = to_klist_node(next->n_node.next); } - spin_unlock(&i->i_klist->k_lock); + spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); -- cgit v1.2.3 From fb19348bd709e3f948825ed995bdc477a0414772 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 10 Oct 2018 12:29:55 -0700 Subject: rhashtable: add schedule points Rehashing and destroying large hash table takes a lot of time, and happens in process context. It is safe to add cond_resched() in rhashtable_rehash_table() and rhashtable_free_and_destroy() Signed-off-by: Eric Dumazet Acked-by: Herbert Xu Signed-off-by: David S. Miller (cherry picked from commit ae6da1f503abb5a5081f9f6c4a6881de97830f3e) Signed-off-by: Greg Kroah-Hartman --- lib/rhashtable.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 101dac085c62..fdffd6232365 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -251,8 +251,10 @@ static int rhashtable_rehash_table(struct rhashtable *ht) if (!new_tbl) return 0; - for (old_hash = 0; old_hash < old_tbl->size; old_hash++) + for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { rhashtable_rehash_chain(ht, old_hash); + cond_resched(); + } /* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); @@ -993,6 +995,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; + cond_resched(); for (pos = rht_dereference(tbl->buckets[i], ht), next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; -- cgit v1.2.3 From acfbd2866fe960eaca671b20329927c7a5697acc Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Tue, 20 Mar 2018 09:58:51 -0300 Subject: test_bpf: Fix testing with CONFIG_BPF_JIT_ALWAYS_ON=y on other arches [ Upstream commit 52fda36d63bfc8c8e8ae5eda8eb5ac6f52cd67ed ] Function bpf_fill_maxinsns11 is designed to not be able to be JITed on x86_64. So, it fails when CONFIG_BPF_JIT_ALWAYS_ON=y, and commit 09584b406742 ("bpf: fix selftests/bpf test_kmod.sh failure when CONFIG_BPF_JIT_ALWAYS_ON=y") makes sure that failure is detected on that case. However, it does not fail on other architectures, which have a different JIT compiler design. So, test_bpf has started to fail to load on those. After this fix, test_bpf loads fine on both x86_64 and ppc64el. Fixes: 09584b406742 ("bpf: fix selftests/bpf test_kmod.sh failure when CONFIG_BPF_JIT_ALWAYS_ON=y") Signed-off-by: Thadeu Lima de Souza Cascardo Reviewed-by: Yonghong Song Signed-off-by: Daniel Borkmann Signed-off-by: Sasha Levin --- lib/test_bpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 1586dfdea809..960d4d627361 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -4874,7 +4874,7 @@ static struct bpf_test tests[] = { { "BPF_MAXINSNS: Jump, gap, jump, ...", { }, -#ifdef CONFIG_BPF_JIT_ALWAYS_ON +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86) CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, #else CLASSIC | FLAG_NO_DATA, -- cgit v1.2.3 From 81301a15a267ba132c07e01c70d5fbe91069bdfb Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 18 Oct 2018 21:45:17 -0400 Subject: locking/lockdep: Fix debug_locks off performance problem [ Upstream commit 9506a7425b094d2f1d9c877ed5a78f416669269b ] It was found that when debug_locks was turned off because of a problem found by the lockdep code, the system performance could drop quite significantly when the lock_stat code was also configured into the kernel. For instance, parallel kernel build time on a 4-socket x86-64 server nearly doubled. Further analysis into the cause of the slowdown traced back to the frequent call to debug_locks_off() from the __lock_acquired() function probably due to some inconsistent lockdep states with debug_locks off. The debug_locks_off() function did an unconditional atomic xchg to write a 0 value into debug_locks which had already been set to 0. This led to severe cacheline contention in the cacheline that held debug_locks. As debug_locks is being referenced in quite a few different places in the kernel, this greatly slow down the system performance. To prevent that trashing of debug_locks cacheline, lock_acquired() and lock_contended() now checks the state of debug_locks before proceeding. The debug_locks_off() function is also modified to check debug_locks before calling __debug_locks_off(). Signed-off-by: Waiman Long Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/1539913518-15598-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- lib/debug_locks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 96c4c633d95e..124fdf238b3d 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent); */ int debug_locks_off(void) { - if (__debug_locks_off()) { + if (debug_locks && __debug_locks_off()) { if (!debug_locks_silent) { console_verbose(); return 1; -- cgit v1.2.3 From e133c33edf3b63193fceb58a5d5249eed63cb572 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 16 Nov 2018 15:08:35 -0800 Subject: lib/ubsan.c: don't mark __ubsan_handle_builtin_unreachable as noreturn commit 1c23b4108d716cc848b38532063a8aca4f86add8 upstream. gcc-8 complains about the prototype for this function: lib/ubsan.c:432:1: error: ignoring attribute 'noreturn' in declaration of a built-in function '__ubsan_handle_builtin_unreachable' because it conflicts with attribute 'const' [-Werror=attributes] This is actually a GCC's bug. In GCC internals __ubsan_handle_builtin_unreachable() declared with both 'noreturn' and 'const' attributes instead of only 'noreturn': https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84210 Workaround this by removing the noreturn attribute. [aryabinin: add information about GCC bug in changelog] Link: http://lkml.kernel.org/r/20181107144516.4587-1-aryabinin@virtuozzo.com Signed-off-by: Arnd Bergmann Signed-off-by: Andrey Ryabinin Acked-by: Olof Johansson Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/ubsan.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/ubsan.c b/lib/ubsan.c index 50d1d5c25deb..60e108c5c173 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -451,8 +451,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); -void __noreturn -__ubsan_handle_builtin_unreachable(struct unreachable_data *data) +void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) { unsigned long flags; -- cgit v1.2.3 From d05e26d49ed30201299ca60eec2de3872d43a3d2 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 5 Nov 2018 18:14:41 -0600 Subject: lib/raid6: Fix arm64 test build [ Upstream commit 313a06e636808387822af24c507cba92703568b1 ] The lib/raid6/test fails to build the neon objects on arm64 because the correct machine type is 'aarch64'. Once this is correctly enabled, the neon recovery objects need to be added to the build. Reviewed-by: Ard Biesheuvel Signed-off-by: Jeremy Linton Signed-off-by: Catalin Marinas Signed-off-by: Sasha Levin --- lib/raid6/test/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index 2c7b60edea04..1faeef0c30b9 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -26,7 +26,7 @@ ifeq ($(ARCH),arm) CFLAGS += -I../../../arch/arm/include -mfpu=neon HAS_NEON = yes endif -ifeq ($(ARCH),arm64) +ifeq ($(ARCH),aarch64) CFLAGS += -I../../../arch/arm64/include HAS_NEON = yes endif @@ -40,7 +40,7 @@ ifeq ($(IS_X86),yes) gcc -c -x assembler - >&/dev/null && \ rm ./-.o && echo -DCONFIG_AS_AVX512=1) else ifeq ($(HAS_NEON),yes) - OBJS += neon.o neon1.o neon2.o neon4.o neon8.o + OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 else HAS_ALTIVEC := $(shell printf '\#include \nvector int a;\n' |\ -- cgit v1.2.3 From d041ed258e58e48aff222ad562bb44fdcfcc1482 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 30 Nov 2018 12:13:15 -0800 Subject: test_hexdump: use memcpy instead of strncpy commit b1286ed7158e9b62787508066283ab0b8850b518 upstream. New versions of gcc reasonably warn about the odd pattern of strncpy(p, q, strlen(q)); which really doesn't make sense: the strncpy() ends up being just a slow and odd way to write memcpy() in this case. Apparently there was a patch for this floating around earlier, but it got lost. Acked-again-by: Andy Shevchenko Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/test_hexdump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c index 3f415d8101f3..1c3c513add77 100644 --- a/lib/test_hexdump.c +++ b/lib/test_hexdump.c @@ -81,7 +81,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize, const char *q = *result++; size_t amount = strlen(q); - strncpy(p, q, amount); + memcpy(p, q, amount); p += amount; *p++ = ' '; -- cgit v1.2.3 From cd39e296c4d65a4598c882d17373ca5ec99d7416 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 1 Jul 2018 13:57:16 -0700 Subject: kobject: Replace strncpy with memcpy commit 77d2a24b6107bd9b3bf2403a65c1428a9da83dd0 upstream. gcc 8.1.0 complains: lib/kobject.c:128:3: warning: 'strncpy' output truncated before terminating nul copying as many bytes from a string as its length [-Wstringop-truncation] lib/kobject.c: In function 'kobject_get_path': lib/kobject.c:125:13: note: length computed here Using strncpy() is indeed less than perfect since the length of data to be copied has already been determined with strlen(). Replace strncpy() with memcpy() to address the warning and optimize the code a little. Signed-off-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman --- lib/kobject.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kobject.c b/lib/kobject.c index b733a83e5294..f58c7f2b229c 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -127,7 +127,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length) int cur = strlen(kobject_name(parent)); /* back up enough to print this name with '/' */ length -= cur; - strncpy(path + length, kobject_name(parent), cur); + memcpy(path + length, kobject_name(parent), cur); *(path + --length) = '/'; } -- cgit v1.2.3