summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2026-01-20 13:55:01 +0100
committerAndrii Nakryiko <andrii@kernel.org>2026-01-20 11:28:02 -0800
commit713edc71449f122491f8860be49b40f27d5f46b5 (patch)
tree2f49a50a7b77db9cb757c726165d5bf171bdc68c /kernel
parentef7d4e42d16f74b123c86c9195ba5136046cee57 (diff)
bpf: Remove leftover accounting in htab_map_mem_usage after rqspinlock
After commit 4fa8d68aa53e ("bpf: Convert hashtab.c to rqspinlock") we no longer use HASHTAB_MAP_LOCK_{COUNT,MASK} as the per-CPU map_locked[HASHTAB_MAP_LOCK_COUNT] array got removed from struct bpf_htab. Right now it is still accounted for in htab_map_mem_usage. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/09703eb6bb249f12b1d5253b5a50a0c4fa239d27.1768913513.git.daniel@iogearbox.net
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/hashtab.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 441ff5bc54ac..3b9d297a53be 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -82,9 +82,6 @@ struct bucket {
rqspinlock_t raw_lock;
};
-#define HASHTAB_MAP_LOCK_COUNT 8
-#define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
-
struct bpf_htab {
struct bpf_map map;
struct bpf_mem_alloc ma;
@@ -2237,11 +2234,11 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
bool prealloc = htab_is_prealloc(htab);
bool percpu = htab_is_percpu(htab);
bool lru = htab_is_lru(htab);
- u64 num_entries;
- u64 usage = sizeof(struct bpf_htab);
+ u64 num_entries, usage;
+
+ usage = sizeof(struct bpf_htab) +
+ sizeof(struct bucket) * htab->n_buckets;
- usage += sizeof(struct bucket) * htab->n_buckets;
- usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
if (prealloc) {
num_entries = map->max_entries;
if (htab_has_extra_elems(htab))