From 8e18be7610aebea50e9327c11afcd5eeaaa06644 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Mon, 7 Nov 2022 14:26:23 +0800 Subject: lib: Fix some kernel-doc comments Make the description of @policy to @p in nla_policy_len() to clear the below warnings: lib/nlattr.c:660: warning: Function parameter or member 'p' not described in 'nla_policy_len' lib/nlattr.c:660: warning: Excess function parameter 'policy' description in 'nla_policy_len' Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=2736 Reported-by: Abaci Robot Signed-off-by: Yang Li Link: https://lore.kernel.org/r/20221107062623.6709-1-yang.lee@linux.alibaba.com Signed-off-by: Jakub Kicinski --- lib/nlattr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index b67a53e29b8f..9055e8b4d144 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -646,7 +646,7 @@ EXPORT_SYMBOL(__nla_validate); /** * nla_policy_len - Determine the max. length of a policy - * @policy: policy to use + * @p: policy to use * @n: number of policies * * Determines the max. length of the policy. It is currently used -- cgit v1.2.3 From 21780f89d65837e23fef825c79aa836c1cb3a8e9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 8 Nov 2022 16:11:08 +0200 Subject: mac_pton: Don't access memory over expected length The strlen() may go too far when estimating the length of the given string. In some cases it may go over the boundary and crash the system which is the case according to the commit 13a55372b64e ("ARM: orion5x: Revert commit 4904dbda41c8."). Rectify this by switching to strnlen() for the expected maximum length of the string. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20221108141108.62974-1-andriy.shevchenko@linux.intel.com Signed-off-by: Jakub Kicinski --- lib/net_utils.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/net_utils.c b/lib/net_utils.c index af525353395d..c17201df3d08 100644 --- a/lib/net_utils.c +++ b/lib/net_utils.c @@ -6,10 +6,11 @@ bool mac_pton(const char *s, u8 *mac) { + size_t maxlen = 3 * ETH_ALEN - 1; int i; /* XX:XX:XX:XX:XX:XX */ - if (strlen(s) < 3 * ETH_ALEN - 1) + if (strnlen(s, maxlen) < maxlen) return false; /* Don't dirty result unless string is valid MAC. */ -- cgit v1.2.3 From 354259fa73e2aac92ae5e19522adb69a92c15b49 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Nov 2022 09:57:58 +0000 Subject: net: remove skb->vlan_present skb->vlan_present seems redundant. We can instead derive it from this boolean expression: vlan_present = skb->vlan_proto != 0 || skb->vlan_tci != 0 Add a new union, to access both fields in a single load/store when possible. union { u32 vlan_all; struct { __be16 vlan_proto; __u16 vlan_tci; }; }; This allows following patch to remove a conditional test in GRO stack. Note: We move remcsum_offload to keep TC_AT_INGRESS_MASK and SKB_MONO_DELIVERY_TIME_MASK unchanged. Signed-off-by: Eric Dumazet Acked-by: Yonghong Song Acked-by: Martin KaFai Lau Signed-off-by: Jakub Kicinski --- lib/test_bpf.c | 1 - 1 file changed, 1 deletion(-) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 5820704165a6..ade9ac672adb 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -14346,7 +14346,6 @@ static struct sk_buff *populate_skb(char *buf, int size) skb->hash = SKB_HASH; skb->queue_mapping = SKB_QUEUE_MAP; skb->vlan_tci = SKB_VLAN_TCI; - skb->vlan_present = SKB_VLAN_PRESENT; skb->vlan_proto = htons(ETH_P_IP); dev_net_set(&dev, &init_net); skb->dev = &dev; -- cgit v1.2.3 From b084f6cc3563faf4f4d16c98852c0c734fe18914 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Wed, 23 Nov 2022 17:37:02 +0800 Subject: lib/test_rhashtable: Remove set but unused variable 'insert_retries' Variable 'insert_retries' is not effectively used in the function, so delete it. lib/test_rhashtable.c:437:18: warning: variable 'insert_retries' set but not used. Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=3242 Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Acked-by: Herbert Xu Signed-off-by: David S. Miller --- lib/test_rhashtable.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index f2ba5787055a..3ae3399f3651 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -434,7 +434,7 @@ out_free: static int __init test_rhashtable_max(struct test_obj *array, unsigned int entries) { - unsigned int i, insert_retries = 0; + unsigned int i; int err; test_rht_params.max_size = roundup_pow_of_two(entries / 8); @@ -447,9 +447,7 @@ static int __init test_rhashtable_max(struct test_obj *array, obj->value.id = i * 2; err = insert_retry(&ht, obj, test_rht_params); - if (err > 0) - insert_retries += err; - else if (err) + if (err < 0) return err; } -- cgit v1.2.3 From e47877c7aa821c413b45e05f804819579bdfa1a3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 6 Dec 2022 11:36:32 -1000 Subject: rhashtable: Allow rhashtable to be used from irq-safe contexts rhashtable currently only does bh-safe synchronization making it impossible to use from irq-safe contexts. Switch it to use irq-safe synchronization to remove the restriction. v2: Update the lock functions to return the ulong flags value and unlock functions to take the value directly instead of passing around the pointer. Suggested by Linus. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Cc: Linus Torvalds Signed-off-by: David S. Miller --- lib/rhashtable.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e12bbfb240b8..6ae2ba8e06a2 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -231,6 +231,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, struct rhash_head *head, *next, *entry; struct rhash_head __rcu **pprev = NULL; unsigned int new_hash; + unsigned long flags; if (new_tbl->nest) goto out; @@ -253,13 +254,14 @@ static int rhashtable_rehash_one(struct rhashtable *ht, new_hash = head_hashfn(ht, new_tbl, entry); - rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); + flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], + SINGLE_DEPTH_NESTING); head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); RCU_INIT_POINTER(entry->next, head); - rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry); + rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags); if (pprev) rcu_assign_pointer(*pprev, next); @@ -276,18 +278,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); + unsigned long flags; int err; if (!bkt) return 0; - rht_lock(old_tbl, bkt); + flags = rht_lock(old_tbl, bkt); while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) ; if (err == -ENOENT) err = 0; - rht_unlock(old_tbl, bkt); + rht_unlock(old_tbl, bkt, flags); return err; } @@ -590,6 +593,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, struct bucket_table *new_tbl; struct bucket_table *tbl; struct rhash_lock_head __rcu **bkt; + unsigned long flags; unsigned int hash; void *data; @@ -607,7 +611,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); data = ERR_PTR(-EAGAIN); } else { - rht_lock(tbl, bkt); + flags = rht_lock(tbl, bkt); data = rhashtable_lookup_one(ht, bkt, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, bkt, tbl, @@ -615,7 +619,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); } } while (!IS_ERR_OR_NULL(new_tbl)); -- cgit v1.2.3 From 1280d4b76f3402645aa7075a53f49a3a14be07a8 Mon Sep 17 00:00:00 2001 From: Uladzislau Koshchanka Date: Sat, 10 Dec 2022 03:44:23 +0300 Subject: lib: packing: replace bit_reverse() with bitrev8() Remove bit_reverse() function. Instead use bitrev8() from linux/bitrev.h + bitshift. Reduces code-repetition. Signed-off-by: Uladzislau Koshchanka Link: https://lore.kernel.org/r/20221210004423.32332-1-koshchanka@gmail.com Signed-off-by: Jakub Kicinski --- lib/Kconfig | 1 + lib/packing.c | 16 ++-------------- 2 files changed, 3 insertions(+), 14 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 9bbf8a4b2108..cc969ef58a2a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -24,6 +24,7 @@ config LINEAR_RANGES config PACKING bool "Generic bitfield packing and unpacking" + select BITREVERSE default n help This option provides the packing() helper function, which permits diff --git a/lib/packing.c b/lib/packing.c index 9a72f4bbf0e2..a96169237ae6 100644 --- a/lib/packing.c +++ b/lib/packing.c @@ -7,6 +7,7 @@ #include #include #include +#include static int get_le_offset(int offset) { @@ -29,19 +30,6 @@ static int get_reverse_lsw32_offset(int offset, size_t len) return word_index * 4 + offset; } -static u64 bit_reverse(u64 val, unsigned int width) -{ - u64 new_val = 0; - unsigned int bit; - unsigned int i; - - for (i = 0; i < width; i++) { - bit = (val & (1 << i)) != 0; - new_val |= (bit << (width - i - 1)); - } - return new_val; -} - static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, int *box_end_bit, u8 *box_mask) { @@ -49,7 +37,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, int new_box_start_bit, new_box_end_bit; *to_write >>= *box_end_bit; - *to_write = bit_reverse(*to_write, box_bit_width); + *to_write = bitrev8(*to_write) >> (8 - box_bit_width); *to_write <<= *box_end_bit; new_box_end_bit = box_bit_width - *box_start_bit - 1; -- cgit v1.2.3