summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHengqi Chen <hengqi.chen@gmail.com>2026-04-23 12:49:36 +0800
committerHuacai Chen <chenhuacai@loongson.cn>2026-04-23 12:49:36 +0800
commite815df29b6a5e59293500085a010d5882374cb3e (patch)
treebdeb7066709bd9621f902740b865a9211c8ed037
parent6ef04707e8eee09360f70812c0ac63c712460bd0 (diff)
LoongArch: BPF: Add fsession support for trampolines
Implement BPF_TRACE_FSESSION support in LoongArch BPF JIT. The logic here is almost identical to what has been done in RISC-V JIT. The key changes are: - Allocate stack space for function meta and session cookies - Introduce invoke_bpf() as a wrapper around invoke_bpf_prog() that populates session cookies before each invocation - Implement bpf_jit_supports_fsession() callback Tested-by: Vincent Li <vincent.mc.li@gmail.com> Reviewed-by: Menglong Dong <menglong8.dong@gmail.com> Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
-rw-r--r--arch/loongarch/net/bpf_jit.c76
1 files changed, 66 insertions, 10 deletions
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index a6c001583083..ec3c25b45882 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1737,6 +1737,29 @@ static int invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
return ret;
}
+static int invoke_bpf(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
+ int args_off, int retval_off, int run_ctx_off,
+ int func_meta_off, bool save_ret, u64 func_meta, int cookie_off)
+{
+ int i, cur_cookie = (cookie_off - args_off) / 8;
+
+ for (i = 0; i < tl->nr_links; i++) {
+ int err;
+
+ if (bpf_prog_calls_session_cookie(tl->links[i])) {
+ u64 meta = func_meta | ((u64)cur_cookie << BPF_TRAMP_COOKIE_INDEX_SHIFT);
+
+ emit_store_stack_imm64(ctx, LOONGARCH_GPR_T1, -func_meta_off, meta);
+ cur_cookie--;
+ }
+ err = invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, run_ctx_off, save_ret);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
void *arch_alloc_bpf_trampoline(unsigned int size)
{
return bpf_prog_pack_alloc(size, jit_fill_hole);
@@ -1788,8 +1811,10 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
void *func_addr, u32 flags)
{
int i, ret, save_ret;
+ int cookie_cnt, cookie_off;
int stack_size, args_off, stk_args_off, nr_arg_slots = 0;
- int retval_off, nregs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off;
+ int retval_off, func_meta_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off;
+ unsigned long long func_meta;
bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
void *orig_call = func_addr;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
@@ -1813,10 +1838,14 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
* [ ... ]
* FP - args_off [ arg reg1 ]
*
- * FP - nregs_off [ arg regs count ]
+ * FP - func_meta_off [ regs count, etc ]
*
* FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
*
+ * [ stack cookie N ]
+ * [ ... ]
+ * FP - cookie_off [ stack cookie 1 ]
+ *
* FP - run_ctx_off [ bpf_tramp_run_ctx ]
*
* FP - sreg_off [ callee saved reg ]
@@ -1859,9 +1888,9 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
stack_size += nr_arg_slots * 8;
args_off = stack_size;
- /* Room of trampoline frame to store args number */
+ /* Room of function metadata, such as regs count */
stack_size += 8;
- nregs_off = stack_size;
+ func_meta_off = stack_size;
/* Room of trampoline frame to store ip address */
if (flags & BPF_TRAMP_F_IP_ARG) {
@@ -1869,6 +1898,12 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
ip_off = stack_size;
}
+ cookie_cnt = bpf_fsession_cookie_cnt(tlinks);
+
+ /* Room for session cookies */
+ stack_size += cookie_cnt * 8;
+ cookie_off = stack_size;
+
/* Room of trampoline frame to store struct bpf_tramp_run_ctx */
stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
run_ctx_off = stack_size;
@@ -1929,10 +1964,20 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
emit_store_stack_imm64(ctx, LOONGARCH_GPR_T1, -ip_off, (u64)func_addr);
/* store arg regs count */
- emit_store_stack_imm64(ctx, LOONGARCH_GPR_T1, -nregs_off, nr_arg_slots);
+ func_meta = nr_arg_slots;
+ emit_store_stack_imm64(ctx, LOONGARCH_GPR_T1, -func_meta_off, func_meta);
store_args(ctx, nr_arg_slots, args_off);
+ if (bpf_fsession_cnt(tlinks)) {
+ /* clear all session cookies' value */
+ for (i = 0; i < cookie_cnt; i++)
+ emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -cookie_off + 8 * i);
+
+ /* clear return value to make sure fentry always get 0 */
+ emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -retval_off);
+ }
+
/* To traced function */
/* Ftrace jump skips 2 NOP instructions */
if (is_kernel_text((unsigned long)orig_call) ||
@@ -1949,9 +1994,9 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
return ret;
}
- for (i = 0; i < fentry->nr_links; i++) {
- ret = invoke_bpf_prog(ctx, fentry->links[i], args_off, retval_off,
- run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET);
+ if (fentry->nr_links) {
+ ret = invoke_bpf(ctx, fentry, args_off, retval_off, run_ctx_off, func_meta_off,
+ flags & BPF_TRAMP_F_RET_FENTRY_RET, func_meta, cookie_off);
if (ret)
return ret;
}
@@ -1995,8 +2040,14 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
*branches[i] = larch_insn_gen_bne(LOONGARCH_GPR_T1, LOONGARCH_GPR_ZERO, offset);
}
- for (i = 0; i < fexit->nr_links; i++) {
- ret = invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, run_ctx_off, false);
+ /* Set "is_return" flag for fsession */
+ func_meta |= (1ULL << BPF_TRAMP_IS_RETURN_SHIFT);
+ if (bpf_fsession_cnt(tlinks))
+ emit_store_stack_imm64(ctx, LOONGARCH_GPR_T1, -func_meta_off, func_meta);
+
+ if (fexit->nr_links) {
+ ret = invoke_bpf(ctx, fexit, args_off, retval_off, run_ctx_off,
+ func_meta_off, false, func_meta, cookie_off);
if (ret)
goto out;
}
@@ -2331,6 +2382,11 @@ bool bpf_jit_supports_arena(void)
return true;
}
+bool bpf_jit_supports_fsession(void)
+{
+ return true;
+}
+
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bool bpf_jit_supports_subprog_tailcalls(void)
{