From 5964d1e4594eb1dbfc1e2a34ec89eb48f6b03e75 Mon Sep 17 00:00:00 2001 From: Li kunyu Date: Sat, 5 Aug 2023 01:59:29 +0800 Subject: bpf: bpf_struct_ops: Remove unnecessary initial values of variables err and tlinks is assigned first, so it does not need to initialize the assignment. Signed-off-by: Li kunyu Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230804175929.2867-1-kunyu@nfschina.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/bpf_struct_ops.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 116a0ce378ec..eaff04eefb31 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -374,9 +374,9 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, struct bpf_struct_ops_value *uvalue, *kvalue; const struct btf_member *member; const struct btf_type *t = st_ops->type; - struct bpf_tramp_links *tlinks = NULL; + struct bpf_tramp_links *tlinks; void *udata, *kdata; - int prog_fd, err = 0; + int prog_fd, err; void *image, *image_end; u32 i; @@ -815,7 +815,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map struct bpf_struct_ops_map *st_map, *old_st_map; struct bpf_map *old_map; struct bpf_struct_ops_link *st_link; - int err = 0; + int err; st_link = container_of(link, struct bpf_struct_ops_link, link); st_map = container_of(new_map, struct bpf_struct_ops_map, map); -- cgit v1.2.3 From d210f9735e13e9b1ef6ffbb636ee051f615bd109 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 4 Aug 2023 15:11:11 +0200 Subject: bpf: Fix mprog detachment for empty mprog entry syzbot reported an UBSAN array-index-out-of-bounds access in bpf_mprog_read() upon bpf_mprog_detach(). While it did not have a reproducer, I was able to manually reproduce through an empty mprog entry which just has miniq present. The latter is important given otherwise we get an ENOENT error as tcx detaches the whole mprog entry. The index 4294967295 was triggered via NULL dtuple.prog which then attempts to detach from the back. bpf_mprog_fetch() in this case did hit the idx == total and therefore tried to grab the entry at idx -1. Fix it by adding an explicit bpf_mprog_total() check in bpf_mprog_detach() and bail out early with ENOENT. Fixes: 053c8e1f235d ("bpf: Add generic attach/detach/query API for multi-progs") Reported-by: syzbot+0c06ba0f831fe07a8f27@syzkaller.appspotmail.com Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/r/20230804131112.11012-1-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- kernel/bpf/mprog.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/bpf/mprog.c b/kernel/bpf/mprog.c index f7816d2bc3e4..32d2c4829eb8 100644 --- a/kernel/bpf/mprog.c +++ b/kernel/bpf/mprog.c @@ -337,6 +337,8 @@ int bpf_mprog_detach(struct bpf_mprog_entry *entry, return -EINVAL; if (revision && revision != bpf_mprog_revision(entry)) return -ESTALE; + if (!bpf_mprog_total(entry)) + return -ENOENT; ret = bpf_mprog_tuple_relative(&rtuple, id_or_fd, flags, prog ? prog->type : BPF_PROG_TYPE_UNSPEC); -- cgit v1.2.3 From 5426700e6841bf72e652e34b5cec68eadf442435 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Thu, 3 Aug 2023 16:12:06 -0700 Subject: bpf: fix bpf_dynptr_slice() to stop return an ERR_PTR. Verify if the pointer obtained from bpf_xdp_pointer() is either an error or NULL before returning it. The function bpf_dynptr_slice() mistakenly returned an ERR_PTR. Instead of solely checking for NULL, it should also verify if the pointer returned by bpf_xdp_pointer() is an error or NULL. Reported-by: Dan Carpenter Closes: https://lore.kernel.org/bpf/d1360219-85c3-4a03-9449-253ea905f9d1@moroto.mountain/ Fixes: 66e3a13e7c2c ("bpf: Add bpf_dynptr_slice and bpf_dynptr_slice_rdwr") Suggested-by: Alexei Starovoitov Signed-off-by: Kui-Feng Lee Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20230803231206.1060485-1-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/helpers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 56ce5008aedd..eb91cae0612a 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2270,7 +2270,7 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset case BPF_DYNPTR_TYPE_XDP: { void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); - if (xdp_ptr) + if (!IS_ERR_OR_NULL(xdp_ptr)) return xdp_ptr; if (!buffer__opt) -- cgit v1.2.3 From 1e8e2efb34023c5113ec679eae3c59ae2cd57b13 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Thu, 3 Aug 2023 10:31:28 +0800 Subject: bpf: change bpf_alu_sign_string and bpf_movsx_string to static The bpf_alu_sign_string and bpf_movsx_string introduced in commit f835bb622299 ("bpf: Add kernel/bpftool asm support for new instructions") are only used in disasm.c now, change them to static. Signed-off-by: Yang Yingliang Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202308050615.wxAn1v2J-lkp@intel.com/ Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20230803023128.3753323-1-yangyingliang@huawei.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/disasm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c index ef7c107c7b8f..49940c26a227 100644 --- a/kernel/bpf/disasm.c +++ b/kernel/bpf/disasm.c @@ -87,12 +87,12 @@ const char *const bpf_alu_string[16] = { [BPF_END >> 4] = "endian", }; -const char *const bpf_alu_sign_string[16] = { +static const char *const bpf_alu_sign_string[16] = { [BPF_DIV >> 4] = "s/=", [BPF_MOD >> 4] = "s%=", }; -const char *const bpf_movsx_string[4] = { +static const char *const bpf_movsx_string[4] = { [0] = "(s8)", [1] = "(s16)", [3] = "(s32)", -- cgit v1.2.3 From db2baf82b098aa10ac16f34e44732ec450fb11c7 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 7 Aug 2023 10:57:21 -0700 Subject: bpf: Fix an incorrect verification success with movsx insn syzbot reports a verifier bug which triggers a runtime panic. The test bpf program is: 0: (62) *(u32 *)(r10 -8) = 553656332 1: (bf) r1 = (s16)r10 2: (07) r1 += -8 3: (b7) r2 = 3 4: (bd) if r2 <= r1 goto pc+0 5: (85) call bpf_trace_printk#-138320 6: (b7) r0 = 0 7: (95) exit At insn 1, the current implementation keeps 'r1' as a frame pointer, which caused later bpf_trace_printk helper call crash since frame pointer address is not valid any more. Note that at insn 4, the 'pointer vs. scalar' comparison is allowed for privileged prog run. To fix the problem with above insn 1, the fix in the patch adopts similar pattern to existing 'R1 = (u32) R2' handling. For unprivileged prog run, verification will fail with 'R sign-extension part of pointer'. For privileged prog run, the dst_reg 'r1' will be marked as an unknown scalar, so later 'bpf_trace_pointk' helper will complain since it expected certain pointers. Reported-by: syzbot+d61b595e9205573133b3@syzkaller.appspotmail.com Fixes: 8100928c8814 ("bpf: Support new sign-extension mov insns") Signed-off-by: Yonghong Song Acked-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230807175721.671696-1-yonghong.song@linux.dev Signed-off-by: Martin KaFai Lau --- kernel/bpf/verifier.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 132f25dab931..4ccca1f6c998 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13165,17 +13165,26 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) dst_reg->subreg_def = DEF_NOT_SUBREG; } else { /* case: R1 = (s8, s16 s32)R2 */ - bool no_sext; - - no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); - if (no_sext && need_id) - src_reg->id = ++env->id_gen; - copy_register_state(dst_reg, src_reg); - if (!no_sext) - dst_reg->id = 0; - coerce_reg_to_size_sx(dst_reg, insn->off >> 3); - dst_reg->live |= REG_LIVE_WRITTEN; - dst_reg->subreg_def = DEF_NOT_SUBREG; + if (is_pointer_value(env, insn->src_reg)) { + verbose(env, + "R%d sign-extension part of pointer\n", + insn->src_reg); + return -EACCES; + } else if (src_reg->type == SCALAR_VALUE) { + bool no_sext; + + no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); + if (no_sext && need_id) + src_reg->id = ++env->id_gen; + copy_register_state(dst_reg, src_reg); + if (!no_sext) + dst_reg->id = 0; + coerce_reg_to_size_sx(dst_reg, insn->off >> 3); + dst_reg->live |= REG_LIVE_WRITTEN; + dst_reg->subreg_def = DEF_NOT_SUBREG; + } else { + mark_reg_unknown(env, regs, insn->dst_reg); + } } } else { /* R1 = (u32) R2 */ -- cgit v1.2.3 From a3c485a5d8d47af5d2d1a0e5c3b7a1ed223669f9 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 7 Aug 2023 10:59:54 +0200 Subject: bpf: Add support for bpf_get_func_ip helper for uprobe program Adding support for bpf_get_func_ip helper for uprobe program to return probed address for both uprobe and return uprobe. We discussed this in [1] and agreed that uprobe can have special use of bpf_get_func_ip helper that differs from kprobe. The kprobe bpf_get_func_ip returns: - address of the function if probe is attach on function entry for both kprobe and return kprobe - 0 if the probe is not attach on function entry The uprobe bpf_get_func_ip returns: - address of the probe for both uprobe and return uprobe The reason for this semantic change is that kernel can't really tell if the probe user space address is function entry. The uprobe program is actually kprobe type program attached as uprobe. One of the consequences of this design is that uprobes do not have its own set of helpers, but share them with kprobes. As we need different functionality for bpf_get_func_ip helper for uprobe, I'm adding the bool value to the bpf_trace_run_ctx, so the helper can detect that it's executed in uprobe context and call specific code. The is_uprobe bool is set as true in bpf_prog_run_array_sleepable, which is currently used only for executing bpf programs in uprobe. Renaming bpf_prog_run_array_sleepable to bpf_prog_run_array_uprobe to address that it's only used for uprobes and that it sets the run_ctx.is_uprobe as suggested by Yafang Shao. Suggested-by: Andrii Nakryiko Tested-by: Alan Maguire [1] https://lore.kernel.org/bpf/CAEf4BzZ=xLVkG5eurEuvLU79wAMtwho7ReR+XJAgwhFF4M-7Cg@mail.gmail.com/ Signed-off-by: Jiri Olsa Tested-by: Viktor Malik Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20230807085956.2344866-2-jolsa@kernel.org Signed-off-by: Martin KaFai Lau --- kernel/trace/bpf_trace.c | 11 ++++++++++- kernel/trace/trace_probe.h | 5 +++++ kernel/trace/trace_uprobe.c | 7 +------ 3 files changed, 16 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index d6296d51a826..792445e1f3f0 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1055,7 +1055,16 @@ static unsigned long get_entry_ip(unsigned long fentry_ip) BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) { - struct kprobe *kp = kprobe_running(); + struct bpf_trace_run_ctx *run_ctx __maybe_unused; + struct kprobe *kp; + +#ifdef CONFIG_UPROBES + run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); + if (run_ctx->is_uprobe) + return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr; +#endif + + kp = kprobe_running(); if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) return 0; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 01ea148723de..7dde806be91e 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -519,3 +519,8 @@ void __trace_probe_log_err(int offset, int err); #define trace_probe_log_err(offs, err) \ __trace_probe_log_err(offs, TP_ERR_##err) + +struct uprobe_dispatch_data { + struct trace_uprobe *tu; + unsigned long bp_addr; +}; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 555c223c3232..576b3bcb8ebd 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -88,11 +88,6 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev) static int register_uprobe_event(struct trace_uprobe *tu); static int unregister_uprobe_event(struct trace_uprobe *tu); -struct uprobe_dispatch_data { - struct trace_uprobe *tu; - unsigned long bp_addr; -}; - static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); static int uretprobe_dispatcher(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs); @@ -1352,7 +1347,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, if (bpf_prog_array_valid(call)) { u32 ret; - ret = bpf_prog_run_array_sleepable(call->prog_array, regs, bpf_prog_run); + ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run); if (!ret) return; } -- cgit v1.2.3 From 526bc5ba19e8701a2b03fdbd2c01e684f0cf9010 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 8 Aug 2023 22:55:31 +0800 Subject: bpf: lru: Remove unused declaration bpf_lru_promote() Commit 3a08c2fd7634 ("bpf: LRU List") declared but never implemented this. Signed-off-by: Yue Haibing Link: https://lore.kernel.org/r/20230808145531.19692-1-yuehaibing@huawei.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/bpf_lru_list.h | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/bpf_lru_list.h b/kernel/bpf/bpf_lru_list.h index 8f3c8b2b4490..cbd8d3720c2b 100644 --- a/kernel/bpf/bpf_lru_list.h +++ b/kernel/bpf/bpf_lru_list.h @@ -75,6 +75,5 @@ void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, void bpf_lru_destroy(struct bpf_lru *lru); struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash); void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node); -void bpf_lru_promote(struct bpf_lru *lru, struct bpf_lru_node *node); #endif -- cgit v1.2.3