summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-19 10:36:54 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-19 10:36:54 -0800
commit4f13d0dabc87fb585b96d90cc4b29f67a2995405 (patch)
treea84b90a0f0473bd81ce7af04582fbfc5342cc603
parent2b7a25df823dc7d8f56f8ce7c2d2dac391cea9c2 (diff)
parent1e5c009126952f673ffa2427acbd69e57493f0d2 (diff)
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov: - Fix invalid write loop logic in libbpf's bpf_linker__add_buf() (Amery Hung) - Fix a potential use-after-free of BTF object (Anton Protopopov) - Add feature detection to libbpf and avoid moving arena global variables on older kernels (Emil Tsalapatis) - Remove extern declaration of bpf_stream_vprintk() from libbpf headers (Ihor Solodrai) - Fix truncated netlink dumps in bpftool (Jakub Kicinski) - Fix map_kptr grace period wait in bpf selftests (Kumar Kartikeya Dwivedi) - Remove hexdump dependency while building bpf selftests (Matthieu Baerts) - Complete fsession support in BPF trampolines on riscv (Menglong Dong) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: selftests/bpf: Remove hexdump dependency libbpf: Remove extern declaration of bpf_stream_vprintk() selftests/bpf: Use vmlinux.h in test_xdp_meta bpftool: Fix truncated netlink dumps libbpf: Delay feature gate check until object prepare time libbpf: Do not use PROG_TYPE_TRACEPOINT program for feature gating bpf: Add a map/btf from a fd array more consistently selftests/bpf: Fix map_kptr grace period wait selftests/bpf: enable fsession_test on riscv64 selftests/bpf: Adjust selftest due to function rename bpf, riscv: add fsession support for trampolines bpf: Fix a potential use-after-free of BTF object bpf, riscv: introduce emit_store_stack_imm64() for trampoline libbpf: Fix invalid write loop logic in bpf_linker__add_buf() libbpf: Add gating for arena globals relocation feature
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c97
-rw-r--r--kernel/bpf/verifier.c48
-rw-r--r--tools/bpf/bpftool/net.c5
-rw-r--r--tools/lib/bpf/bpf_helpers.h3
-rw-r--r--tools/lib/bpf/features.c65
-rw-r--r--tools/lib/bpf/libbpf.c17
-rw-r--r--tools/lib/bpf/libbpf_internal.h2
-rw-r--r--tools/lib/bpf/linker.c2
-rw-r--r--tools/lib/bpf/netlink.c4
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_kptr.c15
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_args_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.h2
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h6
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_meta.c12
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.c28
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h1
19 files changed, 242 insertions, 101 deletions
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 37888abee70c..f065b45e8d8f 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -926,6 +926,14 @@ static void restore_stack_args(int nr_stack_args, int args_off, int stk_arg_off,
}
}
+static void emit_store_stack_imm64(u8 reg, int stack_off, u64 imm64,
+ struct rv_jit_context *ctx)
+{
+ /* Load imm64 into reg and store it at [FP + stack_off]. */
+ emit_imm(reg, (s64)imm64, ctx);
+ emit_sd(RV_REG_FP, stack_off, reg, ctx);
+}
+
static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off,
int run_ctx_off, bool save_ret, struct rv_jit_context *ctx)
{
@@ -933,12 +941,10 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
struct bpf_prog *p = l->link.prog;
int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
- if (l->cookie) {
- emit_imm(RV_REG_T1, l->cookie, ctx);
- emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_T1, ctx);
- } else {
+ if (l->cookie)
+ emit_store_stack_imm64(RV_REG_T1, -run_ctx_off + cookie_off, l->cookie, ctx);
+ else
emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_ZERO, ctx);
- }
/* arg1: prog */
emit_imm(RV_REG_A0, (const s64)p, ctx);
@@ -990,6 +996,29 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
return ret;
}
+static int invoke_bpf(struct bpf_tramp_links *tl, int args_off, int retval_off,
+ int run_ctx_off, int func_meta_off, bool save_ret, u64 func_meta,
+ int cookie_off, struct rv_jit_context *ctx)
+{
+ int i, cur_cookie = (cookie_off - args_off) / 8;
+
+ for (i = 0; i < tl->nr_links; i++) {
+ int err;
+
+ if (bpf_prog_calls_session_cookie(tl->links[i])) {
+ u64 meta = func_meta | ((u64)cur_cookie << BPF_TRAMP_COOKIE_INDEX_SHIFT);
+
+ emit_store_stack_imm64(RV_REG_T1, -func_meta_off, meta, ctx);
+ cur_cookie--;
+ }
+ err = invoke_bpf_prog(tl->links[i], args_off, retval_off, run_ctx_off,
+ save_ret, ctx);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
const struct btf_func_model *m,
struct bpf_tramp_links *tlinks,
@@ -999,13 +1028,15 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
int i, ret, offset;
int *branches_off = NULL;
int stack_size = 0, nr_arg_slots = 0;
- int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off, stk_arg_off;
+ int retval_off, args_off, func_meta_off, ip_off, run_ctx_off, sreg_off, stk_arg_off;
+ int cookie_off, cookie_cnt;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
void *orig_call = func_addr;
bool save_ret;
+ u64 func_meta;
u32 insn;
/* Two types of generated trampoline stack layout:
@@ -1036,10 +1067,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
* [ ... ]
* FP - args_off [ arg1 ]
*
- * FP - nregs_off [ regs count ]
+ * FP - func_meta_off [ regs count, etc ]
*
* FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
*
+ * [ stack cookie N ]
+ * [ ... ]
+ * FP - cookie_off [ stack cookie 1 ]
+ *
* FP - run_ctx_off [ bpf_tramp_run_ctx ]
*
* FP - sreg_off [ callee saved reg ]
@@ -1071,14 +1106,20 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
stack_size += nr_arg_slots * 8;
args_off = stack_size;
+ /* function metadata, such as regs count */
stack_size += 8;
- nregs_off = stack_size;
+ func_meta_off = stack_size;
if (flags & BPF_TRAMP_F_IP_ARG) {
stack_size += 8;
ip_off = stack_size;
}
+ cookie_cnt = bpf_fsession_cookie_cnt(tlinks);
+ /* room for session cookies */
+ stack_size += cookie_cnt * 8;
+ cookie_off = stack_size;
+
stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
run_ctx_off = stack_size;
@@ -1123,16 +1164,22 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
emit_sd(RV_REG_FP, -sreg_off, RV_REG_S1, ctx);
/* store ip address of the traced function */
- if (flags & BPF_TRAMP_F_IP_ARG) {
- emit_imm(RV_REG_T1, (const s64)func_addr, ctx);
- emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx);
- }
+ if (flags & BPF_TRAMP_F_IP_ARG)
+ emit_store_stack_imm64(RV_REG_T1, -ip_off, (u64)func_addr, ctx);
- emit_li(RV_REG_T1, nr_arg_slots, ctx);
- emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx);
+ func_meta = nr_arg_slots;
+ emit_store_stack_imm64(RV_REG_T1, -func_meta_off, func_meta, ctx);
store_args(nr_arg_slots, args_off, ctx);
+ if (bpf_fsession_cnt(tlinks)) {
+ /* clear all session cookies' value */
+ for (i = 0; i < cookie_cnt; i++)
+ emit_sd(RV_REG_FP, -cookie_off + 8 * i, RV_REG_ZERO, ctx);
+ /* clear return value to make sure fentry always get 0 */
+ emit_sd(RV_REG_FP, -retval_off, RV_REG_ZERO, ctx);
+ }
+
if (flags & BPF_TRAMP_F_CALL_ORIG) {
emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
@@ -1140,9 +1187,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
return ret;
}
- for (i = 0; i < fentry->nr_links; i++) {
- ret = invoke_bpf_prog(fentry->links[i], args_off, retval_off, run_ctx_off,
- flags & BPF_TRAMP_F_RET_FENTRY_RET, ctx);
+ if (fentry->nr_links) {
+ ret = invoke_bpf(fentry, args_off, retval_off, run_ctx_off, func_meta_off,
+ flags & BPF_TRAMP_F_RET_FENTRY_RET, func_meta, cookie_off, ctx);
if (ret)
return ret;
}
@@ -1189,9 +1236,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
*(u32 *)(ctx->insns + branches_off[i]) = insn;
}
- for (i = 0; i < fexit->nr_links; i++) {
- ret = invoke_bpf_prog(fexit->links[i], args_off, retval_off,
- run_ctx_off, false, ctx);
+ /* set "is_return" flag for fsession */
+ func_meta |= (1ULL << BPF_TRAMP_IS_RETURN_SHIFT);
+ if (bpf_fsession_cnt(tlinks))
+ emit_store_stack_imm64(RV_REG_T1, -func_meta_off, func_meta, ctx);
+
+ if (fexit->nr_links) {
+ ret = invoke_bpf(fexit, args_off, retval_off, run_ctx_off, func_meta_off,
+ false, func_meta, cookie_off, ctx);
if (ret)
goto out;
}
@@ -2091,3 +2143,8 @@ bool bpf_jit_inlines_helper_call(s32 imm)
return false;
}
}
+
+bool bpf_jit_supports_fsession(void)
+{
+ return true;
+}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index edf5342b982f..dbaafb64d3bd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -21333,29 +21333,29 @@ static int find_btf_percpu_datasec(struct btf *btf)
}
/*
- * Add btf to the used_btfs array and return the index. (If the btf was
- * already added, then just return the index.) Upon successful insertion
- * increase btf refcnt, and, if present, also refcount the corresponding
- * kernel module.
+ * Add btf to the env->used_btfs array. If needed, refcount the
+ * corresponding kernel module. To simplify caller's logic
+ * in case of error or if btf was added before the function
+ * decreases the btf refcount.
*/
static int __add_used_btf(struct bpf_verifier_env *env, struct btf *btf)
{
struct btf_mod_pair *btf_mod;
+ int ret = 0;
int i;
/* check whether we recorded this BTF (and maybe module) already */
for (i = 0; i < env->used_btf_cnt; i++)
if (env->used_btfs[i].btf == btf)
- return i;
+ goto ret_put;
if (env->used_btf_cnt >= MAX_USED_BTFS) {
verbose(env, "The total number of btfs per program has reached the limit of %u\n",
MAX_USED_BTFS);
- return -E2BIG;
+ ret = -E2BIG;
+ goto ret_put;
}
- btf_get(btf);
-
btf_mod = &env->used_btfs[env->used_btf_cnt];
btf_mod->btf = btf;
btf_mod->module = NULL;
@@ -21364,12 +21364,18 @@ static int __add_used_btf(struct bpf_verifier_env *env, struct btf *btf)
if (btf_is_module(btf)) {
btf_mod->module = btf_try_get_module(btf);
if (!btf_mod->module) {
- btf_put(btf);
- return -ENXIO;
+ ret = -ENXIO;
+ goto ret_put;
}
}
- return env->used_btf_cnt++;
+ env->used_btf_cnt++;
+ return 0;
+
+ret_put:
+ /* Either error or this BTF was already added */
+ btf_put(btf);
+ return ret;
}
/* replace pseudo btf_id with kernel symbol address */
@@ -21466,9 +21472,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
btf_fd = insn[1].imm;
if (btf_fd) {
- CLASS(fd, f)(btf_fd);
-
- btf = __btf_get_by_fd(f);
+ btf = btf_get_by_fd(btf_fd);
if (IS_ERR(btf)) {
verbose(env, "invalid module BTF object FD specified.\n");
return -EINVAL;
@@ -21478,17 +21482,17 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
return -EINVAL;
}
+ btf_get(btf_vmlinux);
btf = btf_vmlinux;
}
err = __check_pseudo_btf_id(env, insn, aux, btf);
- if (err)
+ if (err) {
+ btf_put(btf);
return err;
+ }
- err = __add_used_btf(env, btf);
- if (err < 0)
- return err;
- return 0;
+ return __add_used_btf(env, btf);
}
static bool is_tracing_prog_type(enum bpf_prog_type type)
@@ -25370,10 +25374,8 @@ static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd)
btf = __btf_get_by_fd(f);
if (!IS_ERR(btf)) {
- err = __add_used_btf(env, btf);
- if (err < 0)
- return err;
- return 0;
+ btf_get(btf);
+ return __add_used_btf(env, btf);
}
verbose(env, "fd %d is not pointing to valid bpf_map or btf\n", fd);
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index f25d66c8395e..974189da8a91 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -156,7 +156,7 @@ static int netlink_recv(int sock, __u32 nl_pid, __u32 seq,
bool multipart = true;
struct nlmsgerr *err;
struct nlmsghdr *nh;
- char buf[4096];
+ char buf[8192];
int len, ret;
while (multipart) {
@@ -201,6 +201,9 @@ static int netlink_recv(int sock, __u32 nl_pid, __u32 seq,
return ret;
}
}
+
+ if (len)
+ p_err("Invalid message or trailing data in Netlink response: %d bytes left", len);
}
ret = 0;
done:
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index c145da05a67c..9d160b5b9c0e 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -315,9 +315,6 @@ enum libbpf_tristate {
___param, sizeof(___param)); \
})
-extern int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args,
- __u32 len__sz) __weak __ksym;
-
#define bpf_stream_printk(stream_id, fmt, args...) \
({ \
static const char ___fmt[] = fmt; \
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
index b842b83e2480..2fa434f09cce 100644
--- a/tools/lib/bpf/features.c
+++ b/tools/lib/bpf/features.c
@@ -506,6 +506,68 @@ static int probe_kern_arg_ctx_tag(int token_fd)
return probe_fd(prog_fd);
}
+static int probe_ldimm64_full_range_off(int token_fd)
+{
+ char log_buf[1024];
+ int prog_fd, map_fd;
+ int ret;
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
+ .token_fd = token_fd,
+ .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ .log_buf = log_buf,
+ .log_size = sizeof(log_buf),
+ );
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 1UL << 30),
+ BPF_EXIT_INSN(),
+ };
+ int insn_cnt = ARRAY_SIZE(insns);
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr", sizeof(int), 1, 1, &map_opts);
+ if (map_fd < 0) {
+ ret = -errno;
+ pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
+ __func__, errstr(ret));
+ return ret;
+ }
+ insns[0].imm = map_fd;
+
+ log_buf[0] = '\0';
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "global_reloc", "GPL", insns, insn_cnt, &prog_opts);
+ ret = -errno;
+
+ close(map_fd);
+
+ if (prog_fd >= 0) {
+ pr_warn("Error in %s(): Program loading unexpectedly succeeded.\n", __func__);
+ close(prog_fd);
+ return -EINVAL;
+ }
+
+ /*
+ * Feature is allowed if we're not failing with the error message
+ * "direct value offset of %u is not allowed" removed in
+ * 12a1fe6e12db ("bpf/verifier: Do not limit maximum direct offset into arena map").
+ * We should instead fail with "invalid access to map value pointer".
+ * Ensure we match with one of the two and we're not failing with a
+ * different, unexpected message.
+ */
+ if (strstr(log_buf, "direct value offset of"))
+ return 0;
+
+ if (!strstr(log_buf, "invalid access to map value pointer")) {
+ pr_warn("Error in %s(): Program unexpectedly failed with message: %s.\n",
+ __func__, log_buf);
+ return ret;
+ }
+
+ return 1;
+}
+
typedef int (*feature_probe_fn)(int /* token_fd */);
static struct kern_feature_cache feature_cache;
@@ -581,6 +643,9 @@ static struct kern_feature_desc {
[FEAT_BTF_QMARK_DATASEC] = {
"BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
},
+ [FEAT_LDIMM64_FULL_RANGE_OFF] = {
+ "full range LDIMM64 support", probe_ldimm64_full_range_off,
+ },
};
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 0c8bf0b5cce4..0be7017800fe 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -3009,9 +3009,6 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
memcpy(obj->arena_data, data, data_sz);
obj->arena_data_sz = data_sz;
- /* place globals at the end of the arena */
- obj->arena_data_off = mmap_sz - data_alloc_sz;
-
/* make bpf_map__init_value() work for ARENA maps */
map->mmaped = obj->arena_data;
@@ -4669,7 +4666,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
reloc_desc->type = RELO_DATA;
reloc_desc->insn_idx = insn_idx;
reloc_desc->map_idx = obj->arena_map_idx;
- reloc_desc->sym_off = sym->st_value + obj->arena_data_off;
+ reloc_desc->sym_off = sym->st_value;
map = &obj->maps[obj->arena_map_idx];
pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n",
@@ -6383,6 +6380,10 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
case RELO_DATA:
map = &obj->maps[relo->map_idx];
insn[1].imm = insn[0].imm + relo->sym_off;
+
+ if (relo->map_idx == obj->arena_map_idx)
+ insn[1].imm += obj->arena_data_off;
+
if (obj->gen_loader) {
insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
insn[0].imm = relo->map_idx;
@@ -7384,6 +7385,14 @@ static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_pat
bpf_object__sort_relos(obj);
}
+ /* place globals at the end of the arena (if supported) */
+ if (obj->arena_map_idx >= 0 && kernel_supports(obj, FEAT_LDIMM64_FULL_RANGE_OFF)) {
+ struct bpf_map *arena_map = &obj->maps[obj->arena_map_idx];
+
+ obj->arena_data_off = bpf_map_mmap_sz(arena_map) -
+ roundup(obj->arena_data_sz, sysconf(_SC_PAGE_SIZE));
+ }
+
/* Before relocating calls pre-process relocations and mark
* few ld_imm64 instructions that points to subprogs.
* Otherwise bpf_object__reloc_code() later would have to consider
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index fc59b21b51b5..974147e8a8aa 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -392,6 +392,8 @@ enum kern_feature_id {
FEAT_ARG_CTX_TAG,
/* Kernel supports '?' at the front of datasec names */
FEAT_BTF_QMARK_DATASEC,
+ /* Kernel supports LDIMM64 imm offsets past 512 MiB. */
+ FEAT_LDIMM64_FULL_RANGE_OFF,
__FEAT_CNT,
};
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index f4403e3cf994..78f92c39290a 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -581,7 +581,7 @@ int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
written = 0;
while (written < buf_sz) {
- ret = write(fd, buf, buf_sz);
+ ret = write(fd, buf + written, buf_sz - written);
if (ret < 0) {
ret = -errno;
pr_warn("failed to write '%s': %s\n", filename, errstr(ret));
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index c997e69d507f..c9a78fb16f11 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -143,7 +143,7 @@ static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq,
struct nlmsghdr *nh;
int len, ret;
- ret = alloc_iov(&iov, 4096);
+ ret = alloc_iov(&iov, 8192);
if (ret)
goto done;
@@ -212,6 +212,8 @@ start:
}
}
}
+ if (len)
+ pr_warn("Invalid message or trailing data in Netlink response: %d bytes left\n", len);
}
ret = 0;
done:
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index c6bf4dfb1495..6776158f1f3e 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -723,7 +723,7 @@ $(VERIFICATION_CERT) $(PRIVATE_KEY): $(VERIFY_SIG_SETUP)
# Generates a header with C array declaration, containing test_progs_verification_cert bytes
$(VERIFY_SIG_HDR): $(VERIFICATION_CERT)
$(Q)(echo "unsigned char test_progs_verification_cert[] = {"; \
- hexdump -v -e '12/1 " 0x%02x," "\n"' $< | sed 's/0x ,//g; $$s/,$$//'; \
+ od -v -t 'xC' -w12 $< | sed 's/ \(\S\+\)/ 0x\1,/g;s/^\S\+/ /;$$d'; \
echo "};"; \
echo "unsigned int test_progs_verification_cert_len = $$(wc -c < $<);") > $@
diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
index f372162c0280..03b46f17cf53 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
@@ -118,15 +118,16 @@ exit:
static int kern_sync_rcu_tasks_trace(struct rcu_tasks_trace_gp *rcu)
{
- long gp_seq = READ_ONCE(rcu->bss->gp_seq);
LIBBPF_OPTS(bpf_test_run_opts, opts);
+ int ret;
- if (!ASSERT_OK(bpf_prog_test_run_opts(bpf_program__fd(rcu->progs.do_call_rcu_tasks_trace),
- &opts), "do_call_rcu_tasks_trace"))
+ WRITE_ONCE(rcu->bss->done, 0);
+ ret = bpf_prog_test_run_opts(bpf_program__fd(rcu->progs.call_rcu_tasks_trace), &opts);
+ if (!ASSERT_OK(ret, "call_rcu_tasks_trace"))
return -EFAULT;
- if (!ASSERT_OK(opts.retval, "opts.retval == 0"))
+ if (!ASSERT_OK(opts.retval, "call_rcu_tasks_trace retval"))
return -EFAULT;
- while (gp_seq == READ_ONCE(rcu->bss->gp_seq))
+ while (!READ_ONCE(rcu->bss->done))
sched_yield();
return 0;
}
@@ -159,8 +160,6 @@ void serial_test_map_kptr(void)
skel = rcu_tasks_trace_gp__open_and_load();
if (!ASSERT_OK_PTR(skel, "rcu_tasks_trace_gp__open_and_load"))
return;
- if (!ASSERT_OK(rcu_tasks_trace_gp__attach(skel), "rcu_tasks_trace_gp__attach"))
- goto end;
if (test__start_subtest("success-map")) {
test_map_kptr_success(true);
@@ -180,7 +179,5 @@ void serial_test_map_kptr(void)
test_map_kptr_success(true);
}
-end:
rcu_tasks_trace_gp__destroy(skel);
- return;
}
diff --git a/tools/testing/selftests/bpf/progs/get_func_args_test.c b/tools/testing/selftests/bpf/progs/get_func_args_test.c
index 180ba5098ca1..075a1180ec26 100644
--- a/tools/testing/selftests/bpf/progs/get_func_args_test.c
+++ b/tools/testing/selftests/bpf/progs/get_func_args_test.c
@@ -167,7 +167,7 @@ int BPF_PROG(tp_test2)
}
__u64 test7_result = 0;
-#if defined(bpf_target_x86) || defined(bpf_target_arm64)
+#if defined(bpf_target_x86) || defined(bpf_target_arm64) || defined(bpf_target_riscv)
SEC("fsession/bpf_fentry_test1")
int BPF_PROG(test7)
{
diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
index 43ff836a8ed8..45eaa54d1ac7 100644
--- a/tools/testing/selftests/bpf/progs/get_func_ip_test.c
+++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
@@ -106,7 +106,7 @@ int BPF_URETPROBE(test8, int ret)
__u64 test9_entry_result = 0;
__u64 test9_exit_result = 0;
-#if defined(bpf_target_x86) || defined(bpf_target_arm64)
+#if defined(bpf_target_x86) || defined(bpf_target_arm64) || defined(bpf_target_riscv)
SEC("fsession/bpf_fentry_test1")
int BPF_PROG(test9, int a)
{
diff --git a/tools/testing/selftests/bpf/progs/profiler.h b/tools/testing/selftests/bpf/progs/profiler.h
index 3bac4fdd4bdf..637fbf2c2652 100644
--- a/tools/testing/selftests/bpf/progs/profiler.h
+++ b/tools/testing/selftests/bpf/progs/profiler.h
@@ -169,7 +169,7 @@ enum bpf_function_id {
profiler_bpf_sched_process_exec,
profiler_bpf_sched_process_exit,
profiler_bpf_sys_enter_kill,
- profiler_bpf_do_filp_open_ret,
+ profiler_bpf_do_file_open_ret,
profiler_bpf_sched_process_fork,
profiler_bpf_vfs_link,
profiler_bpf_vfs_symlink,
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 813143b4985d..9044dd8aff11 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -751,11 +751,11 @@ out:
return 0;
}
-SEC("kretprobe/do_filp_open")
-int kprobe_ret__do_filp_open(struct pt_regs* ctx)
+SEC("kretprobe/do_file_open")
+int kprobe_ret__do_file_open(struct pt_regs *ctx)
{
struct bpf_func_stats_ctx stats_ctx;
- bpf_stats_enter(&stats_ctx, profiler_bpf_do_filp_open_ret);
+ bpf_stats_enter(&stats_ctx, profiler_bpf_do_file_open_ret);
struct file* filp = (struct file*)PT_REGS_RC_CORE(ctx);
diff --git a/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c
index df4873558634..189c05c6abcc 100644
--- a/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c
+++ b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c
@@ -1,36 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
-struct task_ls_map {
- __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
- __uint(map_flags, BPF_F_NO_PREALLOC);
- __type(key, int);
- __type(value, int);
-} task_ls_map SEC(".maps");
-
-long gp_seq;
+int done;
SEC("syscall")
-int do_call_rcu_tasks_trace(void *ctx)
-{
- struct task_struct *current;
- int *v;
-
- current = bpf_get_current_task_btf();
- v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
- if (!v)
- return 1;
- /* Invoke call_rcu_tasks_trace */
- return bpf_task_storage_delete(&task_ls_map, current);
-}
-
-SEC("kprobe/rcu_tasks_trace_postgp")
-int rcu_tasks_trace_postgp(void *ctx)
+int call_rcu_tasks_trace(void *ctx)
{
- __sync_add_and_fetch(&gp_seq, 1);
- return 0;
+ return bpf_kfunc_call_test_call_rcu_tasks_trace(&done);
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
index 0a0f371a2dec..fa73b17cb999 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_meta.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
@@ -1,12 +1,12 @@
-#include <stdbool.h>
-#include <linux/bpf.h>
-#include <linux/errno.h>
-#include <linux/if_ether.h>
-#include <linux/pkt_cls.h>
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
+#include <errno.h>
+
#include "bpf_kfuncs.h"
+#include "bpf_tracing_net.h"
#define META_SIZE 32
@@ -42,7 +42,7 @@ static bool check_metadata(const char *file, int line, __u8 *meta_have)
if (!__builtin_memcmp(meta_have, meta_want, META_SIZE))
return true;
- bpf_stream_printk(BPF_STREAM_STDERR,
+ bpf_stream_printk(BPF_STDERR,
"FAIL:%s:%d: metadata mismatch\n"
" have:\n %pI6\n %pI6\n"
" want:\n %pI6\n %pI6\n",
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
index 186a25ab429a..e62c6b78657f 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
@@ -18,6 +18,7 @@
#include <linux/in6.h>
#include <linux/un.h>
#include <linux/filter.h>
+#include <linux/rcupdate_trace.h>
#include <net/sock.h>
#include <linux/namei.h>
#include "bpf_testmod.h"
@@ -885,6 +886,32 @@ __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
{
}
+struct bpf_kfunc_rcu_tasks_trace_data {
+ struct rcu_head rcu;
+ int *done;
+};
+
+static void bpf_kfunc_rcu_tasks_trace_cb(struct rcu_head *rhp)
+{
+ struct bpf_kfunc_rcu_tasks_trace_data *data;
+
+ data = container_of(rhp, struct bpf_kfunc_rcu_tasks_trace_data, rcu);
+ WRITE_ONCE(*data->done, 1);
+ kfree(data);
+}
+
+__bpf_kfunc int bpf_kfunc_call_test_call_rcu_tasks_trace(int *done)
+{
+ struct bpf_kfunc_rcu_tasks_trace_data *data;
+
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+ data->done = done;
+ call_rcu_tasks_trace(&data->rcu, bpf_kfunc_rcu_tasks_trace_cb);
+ return 0;
+}
+
__bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
{
int proto;
@@ -1222,6 +1249,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_call_rcu_tasks_trace)
BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
index d5c5454e257e..b393bf771131 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
@@ -118,6 +118,7 @@ void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym;
void bpf_kfunc_call_test_destructive(void) __ksym;
void bpf_kfunc_call_test_sleepable(void) __ksym;
+int bpf_kfunc_call_test_call_rcu_tasks_trace(int *done) __ksym;
void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p);
struct prog_test_member *bpf_kfunc_call_memb_acquire(void);