summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-01-20 16:15:58 -0800
committerAlexei Starovoitov <ast@kernel.org>2026-01-20 16:22:38 -0800
commitb236134f70ba1e98a85d00623ea8fafb41dacf7f (patch)
tree8d9bf4462f03a5f2599978c564601043240e62ad /kernel
parent2e6690d4f7fc41c4fae7d0a4c0bf11f1973e5650 (diff)
parent74bc4f6127207624ec06f0d0984b280a390992aa (diff)
Merge branch 'bpf-kernel-functions-with-kf_implicit_args'
Ihor Solodrai says: ==================== bpf: Kernel functions with KF_IMPLICIT_ARGS This series implements a generic "implicit arguments" feature for BPF kernel functions. For context see prior work [1][2]. A mechanism is created for kfuncs to have arguments that are not visible to the BPF programs, and are provided to the kernel function implementation by the verifier. This mechanism is then used in the kfuncs that have a parameter with __prog annotation [3], which is the current way of passing struct bpf_prog_aux pointer to kfuncs. The function with implicit arguments is defined by KF_IMPLICIT_ARGS flag in BTF_IDS_FLAGS set. In this series, only a pointer to struct bpf_prog_aux can be implicit, although it is simple to extend this to more types. The verifier handles a kfunc with KF_IMPLICIT_ARGS by resolving it to a different (actual) BTF prototype early in verification (patch #3). A <kfunc>_impl function generated in BTF for a kfunc with implicit args does not have a "bpf_kfunc" decl tag, and a kernel address. The verifier will reject a program trying to call such an _impl kfunc. The usage of <kfunc>_impl functions in BPF is only allowed for kfuncs with an explicit kernel (or kmodule) declaration, that is in "legacy" cases. As of this series, there are no legacy kernel functions, as all __prog users are migrated to KF_IMPLICIT_ARGS. However the implementation allows for legacy cases support in principle. The series removes the following BPF kernel functions: - bpf_stream_vprintk_impl - bpf_task_work_schedule_resume_impl - bpf_task_work_schedule_signal_impl - bpf_wq_set_callback_impl This will break existing BPF programs calling these functions (the verifier will not load them) on new kernels. To mitigate, BPF users are advised to use the following pattern [4]: if (xxx_impl) xxx_impl(..., NULL); else xxx(...); Which can be wrapped in a macro. The series consists of the following patches: - patches #1 and #2 are non-functional refactoring in kernel/bpf - patch #3 defines KF_IMPLICIT_ARGS flag and teaches the verifier about it - patches #4-#5 implement btf2btf transformation in resolve_btfids - patch #6 adds selftests specific to KF_IMPLICIT_ARGS feature - patches #7-#11 migrate the current users of __prog argument to KF_IMPLICIT_ARGS - patch #12 removes __prog arg suffix support from the kernel - patch #13 updates the docs [1] https://lore.kernel.org/bpf/20251029190113.3323406-1-ihor.solodrai@linux.dev/ [2] https://lore.kernel.org/bpf/20250924211716.1287715-1-ihor.solodrai@linux.dev/ [3] https://docs.kernel.org/bpf/kfuncs.html#prog-annotation [4] https://lore.kernel.org/bpf/CAEf4BzbgPfRm9BX=TsZm-TsHFAHcwhPY4vTt=9OT-uhWqf8tqw@mail.gmail.com/ --- v2->v3: - resolve_btfids: Use dynamic reallocation for btf2btf_context arrays (Andrii) - resolve_btfids: Add missing free() for btf2btf_context arrays (AI) - Other nits in resolve_btfids (Andrii, Eduard) v2: https://lore.kernel.org/bpf/20260116201700.864797-1-ihor.solodrai@linux.dev/ v1->v2: - Replace the following kernel functions with KF_IMPLICIT_ARGS version: - bpf_stream_vprintk_impl -> bpf_stream_vprintk - bpf_task_work_schedule_resume_impl -> bpf_task_work_schedule_resume - bpf_task_work_schedule_signal_impl -> bpf_task_work_schedule_signal - bpf_wq_set_callback_impl -> bpf_wq_set_callback_impl - Remove __prog arg suffix support from the verifier - Rework btf2btf implementation in resolve_btfids - Do distill base and sort before BTF_ids patching - Collect kfuncs based on BTF decl tags, before BTF_ids are patched - resolve_btfids: use dynamic memory for intermediate data (Andrii) - verifier: reset .subreg_def for caller saved registers on kfunc call (Eduard) - selftests/hid: remove Makefile changes (Benjamin) - selftests/bpf: Add a patch (#11) migrating struct_ops_assoc test to KF_IMPLICIT_ARGS - Various nits across the series (Alexei, Andrii, Eduard) v1: https://lore.kernel.org/bpf/20260109184852.1089786-1-ihor.solodrai@linux.dev/ --- ==================== Link: https://patch.msgid.link/20260120222638.3976562-1-ihor.solodrai@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/btf.c70
-rw-r--r--kernel/bpf/helpers.c43
-rw-r--r--kernel/bpf/stream.c5
-rw-r--r--kernel/bpf/verifier.c245
4 files changed, 232 insertions, 131 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 364dd84bfc5a..d10b3404260f 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -8757,24 +8757,17 @@ end:
return ret;
}
-static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
- enum btf_kfunc_hook hook,
- u32 kfunc_btf_id,
- const struct bpf_prog *prog)
+static u32 *btf_kfunc_id_set_contains(const struct btf *btf,
+ enum btf_kfunc_hook hook,
+ u32 kfunc_btf_id)
{
- struct btf_kfunc_hook_filter *hook_filter;
struct btf_id_set8 *set;
- u32 *id, i;
+ u32 *id;
if (hook >= BTF_KFUNC_HOOK_MAX)
return NULL;
if (!btf->kfunc_set_tab)
return NULL;
- hook_filter = &btf->kfunc_set_tab->hook_filters[hook];
- for (i = 0; i < hook_filter->nr_filters; i++) {
- if (hook_filter->filters[i](prog, kfunc_btf_id))
- return NULL;
- }
set = btf->kfunc_set_tab->sets[hook];
if (!set)
return NULL;
@@ -8785,6 +8778,28 @@ static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
return id + 1;
}
+static bool __btf_kfunc_is_allowed(const struct btf *btf,
+ enum btf_kfunc_hook hook,
+ u32 kfunc_btf_id,
+ const struct bpf_prog *prog)
+{
+ struct btf_kfunc_hook_filter *hook_filter;
+ int i;
+
+ if (hook >= BTF_KFUNC_HOOK_MAX)
+ return false;
+ if (!btf->kfunc_set_tab)
+ return false;
+
+ hook_filter = &btf->kfunc_set_tab->hook_filters[hook];
+ for (i = 0; i < hook_filter->nr_filters; i++) {
+ if (hook_filter->filters[i](prog, kfunc_btf_id))
+ return false;
+ }
+
+ return true;
+}
+
static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
{
switch (prog_type) {
@@ -8832,6 +8847,26 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
}
}
+bool btf_kfunc_is_allowed(const struct btf *btf,
+ u32 kfunc_btf_id,
+ const struct bpf_prog *prog)
+{
+ enum bpf_prog_type prog_type = resolve_prog_type(prog);
+ enum btf_kfunc_hook hook;
+ u32 *kfunc_flags;
+
+ kfunc_flags = btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id);
+ if (kfunc_flags && __btf_kfunc_is_allowed(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog))
+ return true;
+
+ hook = bpf_prog_type_to_kfunc_hook(prog_type);
+ kfunc_flags = btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id);
+ if (kfunc_flags && __btf_kfunc_is_allowed(btf, hook, kfunc_btf_id, prog))
+ return true;
+
+ return false;
+}
+
/* Caution:
* Reference to the module (obtained using btf_try_get_module) corresponding to
* the struct btf *MUST* be held when calling this function from verifier
@@ -8839,26 +8874,27 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
* keeping the reference for the duration of the call provides the necessary
* protection for looking up a well-formed btf->kfunc_set_tab.
*/
-u32 *btf_kfunc_id_set_contains(const struct btf *btf,
- u32 kfunc_btf_id,
- const struct bpf_prog *prog)
+u32 *btf_kfunc_flags(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog)
{
enum bpf_prog_type prog_type = resolve_prog_type(prog);
enum btf_kfunc_hook hook;
u32 *kfunc_flags;
- kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog);
+ kfunc_flags = btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id);
if (kfunc_flags)
return kfunc_flags;
hook = bpf_prog_type_to_kfunc_hook(prog_type);
- return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog);
+ return btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id);
}
u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
const struct bpf_prog *prog)
{
- return __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog);
+ if (!__btf_kfunc_is_allowed(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog))
+ return NULL;
+
+ return btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id);
}
static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 9eaa4185e0a7..f8aa1320e2f7 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -3120,12 +3120,11 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
return 0;
}
-__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
- int (callback_fn)(void *map, int *key, void *value),
- unsigned int flags,
- void *aux__prog)
+__bpf_kfunc int bpf_wq_set_callback(struct bpf_wq *wq,
+ int (callback_fn)(void *map, int *key, void *value),
+ unsigned int flags,
+ struct bpf_prog_aux *aux)
{
- struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog;
struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
if (flags)
@@ -4275,41 +4274,39 @@ release_prog:
}
/**
- * bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL
+ * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL
* mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
* @callback: pointer to BPF subprogram to call
- * @aux__prog: user should pass NULL
+ * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
-__bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
- struct bpf_task_work *tw, void *map__map,
- bpf_task_work_callback_t callback,
- void *aux__prog)
+__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
+ void *map__map, bpf_task_work_callback_t callback,
+ struct bpf_prog_aux *aux)
{
- return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
+ return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
}
/**
- * bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME
+ * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME
* mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
* @callback: pointer to BPF subprogram to call
- * @aux__prog: user should pass NULL
+ * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
-__bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task,
- struct bpf_task_work *tw, void *map__map,
- bpf_task_work_callback_t callback,
- void *aux__prog)
+__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
+ void *map__map, bpf_task_work_callback_t callback,
+ struct bpf_prog_aux *aux)
{
- return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
+ return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_RESUME);
}
static int make_file_dynptr(struct file *file, u32 flags, bool may_sleep,
@@ -4488,7 +4485,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_memset)
BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
#endif
BTF_ID_FLAGS(func, bpf_wq_init)
-BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
+BTF_ID_FLAGS(func, bpf_wq_set_callback, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_wq_start)
BTF_ID_FLAGS(func, bpf_preempt_disable)
BTF_ID_FLAGS(func, bpf_preempt_enable)
@@ -4536,9 +4533,9 @@ BTF_ID_FLAGS(func, bpf_strncasestr);
#if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS)
BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
#endif
-BTF_ID_FLAGS(func, bpf_stream_vprintk_impl)
-BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl)
-BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl)
+BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_IMPLICIT_ARGS)
+BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS)
+BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_dynptr_from_file)
BTF_ID_FLAGS(func, bpf_dynptr_file_discard)
BTF_KFUNCS_END(common_btf_ids)
diff --git a/kernel/bpf/stream.c b/kernel/bpf/stream.c
index 0b6bc3f30335..24730df55e69 100644
--- a/kernel/bpf/stream.c
+++ b/kernel/bpf/stream.c
@@ -212,14 +212,13 @@ __bpf_kfunc_start_defs();
* Avoid using enum bpf_stream_id so that kfunc users don't have to pull in the
* enum in headers.
*/
-__bpf_kfunc int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args,
- u32 len__sz, void *aux__prog)
+__bpf_kfunc int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args,
+ u32 len__sz, struct bpf_prog_aux *aux)
{
struct bpf_bprintf_data data = {
.get_bin_args = true,
.get_buf = true,
};
- struct bpf_prog_aux *aux = aux__prog;
u32 fmt_size = strlen(fmt__str) + 1;
struct bpf_stream *stream;
u32 data_len = len__sz;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b67d8981b058..919556614505 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -294,6 +294,14 @@ struct bpf_call_arg_meta {
s64 const_map_key;
};
+struct bpf_kfunc_meta {
+ struct btf *btf;
+ const struct btf_type *proto;
+ const char *name;
+ const u32 *flags;
+ s32 id;
+};
+
struct bpf_kfunc_call_arg_meta {
/* In parameters */
struct btf *btf;
@@ -512,7 +520,7 @@ static bool is_async_callback_calling_kfunc(u32 btf_id);
static bool is_callback_calling_kfunc(u32 btf_id);
static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
-static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
+static bool is_bpf_wq_set_callback_kfunc(u32 btf_id);
static bool is_task_work_add_kfunc(u32 func_id);
static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
@@ -554,7 +562,7 @@ static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn
/* bpf_wq and bpf_task_work callbacks are always sleepable. */
if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
- (is_bpf_wq_set_callback_impl_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
+ (is_bpf_wq_set_callback_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
return true;
verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
@@ -3263,16 +3271,105 @@ static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
return btf_vmlinux ?: ERR_PTR(-ENOENT);
}
-static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
+#define KF_IMPL_SUFFIX "_impl"
+
+static const struct btf_type *find_kfunc_impl_proto(struct bpf_verifier_env *env,
+ struct btf *btf,
+ const char *func_name)
+{
+ char *buf = env->tmp_str_buf;
+ const struct btf_type *func;
+ s32 impl_id;
+ int len;
+
+ len = snprintf(buf, TMP_STR_BUF_LEN, "%s%s", func_name, KF_IMPL_SUFFIX);
+ if (len < 0 || len >= TMP_STR_BUF_LEN) {
+ verbose(env, "function name %s%s is too long\n", func_name, KF_IMPL_SUFFIX);
+ return NULL;
+ }
+
+ impl_id = btf_find_by_name_kind(btf, buf, BTF_KIND_FUNC);
+ if (impl_id <= 0) {
+ verbose(env, "cannot find function %s in BTF\n", buf);
+ return NULL;
+ }
+
+ func = btf_type_by_id(btf, impl_id);
+
+ return btf_type_by_id(btf, func->type);
+}
+
+static int fetch_kfunc_meta(struct bpf_verifier_env *env,
+ s32 func_id,
+ s16 offset,
+ struct bpf_kfunc_meta *kfunc)
{
const struct btf_type *func, *func_proto;
+ const char *func_name;
+ u32 *kfunc_flags;
+ struct btf *btf;
+
+ if (func_id <= 0) {
+ verbose(env, "invalid kernel function btf_id %d\n", func_id);
+ return -EINVAL;
+ }
+
+ btf = find_kfunc_desc_btf(env, offset);
+ if (IS_ERR(btf)) {
+ verbose(env, "failed to find BTF for kernel function\n");
+ return PTR_ERR(btf);
+ }
+
+ /*
+ * Note that kfunc_flags may be NULL at this point, which
+ * means that we couldn't find func_id in any relevant
+ * kfunc_id_set. This most likely indicates an invalid kfunc
+ * call. However we don't fail with an error here,
+ * and let the caller decide what to do with NULL kfunc->flags.
+ */
+ kfunc_flags = btf_kfunc_flags(btf, func_id, env->prog);
+
+ func = btf_type_by_id(btf, func_id);
+ if (!func || !btf_type_is_func(func)) {
+ verbose(env, "kernel btf_id %d is not a function\n", func_id);
+ return -EINVAL;
+ }
+
+ func_name = btf_name_by_offset(btf, func->name_off);
+
+ /*
+ * An actual prototype of a kfunc with KF_IMPLICIT_ARGS flag
+ * can be found through the counterpart _impl kfunc.
+ */
+ if (kfunc_flags && (*kfunc_flags & KF_IMPLICIT_ARGS))
+ func_proto = find_kfunc_impl_proto(env, btf, func_name);
+ else
+ func_proto = btf_type_by_id(btf, func->type);
+
+ if (!func_proto || !btf_type_is_func_proto(func_proto)) {
+ verbose(env, "kernel function btf_id %d does not have a valid func_proto\n",
+ func_id);
+ return -EINVAL;
+ }
+
+ memset(kfunc, 0, sizeof(*kfunc));
+ kfunc->btf = btf;
+ kfunc->id = func_id;
+ kfunc->name = func_name;
+ kfunc->proto = func_proto;
+ kfunc->flags = kfunc_flags;
+
+ return 0;
+}
+
+static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
+{
struct bpf_kfunc_btf_tab *btf_tab;
struct btf_func_model func_model;
struct bpf_kfunc_desc_tab *tab;
struct bpf_prog_aux *prog_aux;
+ struct bpf_kfunc_meta kfunc;
struct bpf_kfunc_desc *desc;
- const char *func_name;
- struct btf *desc_btf;
unsigned long addr;
int err;
@@ -3322,12 +3419,6 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
prog_aux->kfunc_btf_tab = btf_tab;
}
- desc_btf = find_kfunc_desc_btf(env, offset);
- if (IS_ERR(desc_btf)) {
- verbose(env, "failed to find BTF for kernel function\n");
- return PTR_ERR(desc_btf);
- }
-
if (find_kfunc_desc(env->prog, func_id, offset))
return 0;
@@ -3336,24 +3427,13 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
return -E2BIG;
}
- func = btf_type_by_id(desc_btf, func_id);
- if (!func || !btf_type_is_func(func)) {
- verbose(env, "kernel btf_id %u is not a function\n",
- func_id);
- return -EINVAL;
- }
- func_proto = btf_type_by_id(desc_btf, func->type);
- if (!func_proto || !btf_type_is_func_proto(func_proto)) {
- verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
- func_id);
- return -EINVAL;
- }
+ err = fetch_kfunc_meta(env, func_id, offset, &kfunc);
+ if (err)
+ return err;
- func_name = btf_name_by_offset(desc_btf, func->name_off);
- addr = kallsyms_lookup_name(func_name);
+ addr = kallsyms_lookup_name(kfunc.name);
if (!addr) {
- verbose(env, "cannot find address for kernel function %s\n",
- func_name);
+ verbose(env, "cannot find address for kernel function %s\n", kfunc.name);
return -EINVAL;
}
@@ -3363,9 +3443,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
return err;
}
- err = btf_distill_func_proto(&env->log, desc_btf,
- func_proto, func_name,
- &func_model);
+ err = btf_distill_func_proto(&env->log, kfunc.btf, kfunc.proto, kfunc.name, &func_model);
if (err)
return err;
@@ -12133,11 +12211,6 @@ static bool is_kfunc_arg_irq_flag(const struct btf *btf, const struct btf_param
return btf_param_match_suffix(btf, arg, "__irq_flag");
}
-static bool is_kfunc_arg_prog(const struct btf *btf, const struct btf_param *arg)
-{
- return btf_param_match_suffix(btf, arg, "__prog");
-}
-
static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
const struct btf_param *arg,
const char *name)
@@ -12166,6 +12239,7 @@ enum {
KF_ARG_WORKQUEUE_ID,
KF_ARG_RES_SPIN_LOCK_ID,
KF_ARG_TASK_WORK_ID,
+ KF_ARG_PROG_AUX_ID
};
BTF_ID_LIST(kf_arg_btf_ids)
@@ -12177,6 +12251,7 @@ BTF_ID(struct, bpf_rb_node)
BTF_ID(struct, bpf_wq)
BTF_ID(struct, bpf_res_spin_lock)
BTF_ID(struct, bpf_task_work)
+BTF_ID(struct, bpf_prog_aux)
static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
const struct btf_param *arg, int type)
@@ -12257,6 +12332,11 @@ static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf
return true;
}
+static bool is_kfunc_arg_prog_aux(const struct btf *btf, const struct btf_param *arg)
+{
+ return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_PROG_AUX_ID);
+}
+
/* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
const struct btf *btf,
@@ -12350,7 +12430,7 @@ enum special_kfunc_type {
KF_bpf_percpu_obj_new_impl,
KF_bpf_percpu_obj_drop_impl,
KF_bpf_throw,
- KF_bpf_wq_set_callback_impl,
+ KF_bpf_wq_set_callback,
KF_bpf_preempt_disable,
KF_bpf_preempt_enable,
KF_bpf_iter_css_task_new,
@@ -12370,8 +12450,8 @@ enum special_kfunc_type {
KF_bpf_dynptr_from_file,
KF_bpf_dynptr_file_discard,
KF___bpf_trap,
- KF_bpf_task_work_schedule_signal_impl,
- KF_bpf_task_work_schedule_resume_impl,
+ KF_bpf_task_work_schedule_signal,
+ KF_bpf_task_work_schedule_resume,
KF_bpf_arena_alloc_pages,
KF_bpf_arena_free_pages,
KF_bpf_arena_reserve_pages,
@@ -12414,7 +12494,7 @@ BTF_ID(func, bpf_dynptr_clone)
BTF_ID(func, bpf_percpu_obj_new_impl)
BTF_ID(func, bpf_percpu_obj_drop_impl)
BTF_ID(func, bpf_throw)
-BTF_ID(func, bpf_wq_set_callback_impl)
+BTF_ID(func, bpf_wq_set_callback)
BTF_ID(func, bpf_preempt_disable)
BTF_ID(func, bpf_preempt_enable)
#ifdef CONFIG_CGROUPS
@@ -12447,16 +12527,16 @@ BTF_ID(func, bpf_res_spin_unlock_irqrestore)
BTF_ID(func, bpf_dynptr_from_file)
BTF_ID(func, bpf_dynptr_file_discard)
BTF_ID(func, __bpf_trap)
-BTF_ID(func, bpf_task_work_schedule_signal_impl)
-BTF_ID(func, bpf_task_work_schedule_resume_impl)
+BTF_ID(func, bpf_task_work_schedule_signal)
+BTF_ID(func, bpf_task_work_schedule_resume)
BTF_ID(func, bpf_arena_alloc_pages)
BTF_ID(func, bpf_arena_free_pages)
BTF_ID(func, bpf_arena_reserve_pages)
static bool is_task_work_add_kfunc(u32 func_id)
{
- return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] ||
- func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl];
+ return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
+ func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume];
}
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
@@ -12907,7 +12987,7 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id)
static bool is_async_callback_calling_kfunc(u32 btf_id)
{
- return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl] ||
+ return is_bpf_wq_set_callback_kfunc(btf_id) ||
is_task_work_add_kfunc(btf_id);
}
@@ -12917,9 +12997,9 @@ static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
insn->imm == special_kfunc_list[KF_bpf_throw];
}
-static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id)
+static bool is_bpf_wq_set_callback_kfunc(u32 btf_id)
{
- return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
+ return btf_id == special_kfunc_list[KF_bpf_wq_set_callback];
}
static bool is_callback_calling_kfunc(u32 btf_id)
@@ -13193,8 +13273,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (is_kfunc_arg_ignore(btf, &args[i]))
continue;
- if (is_kfunc_arg_prog(btf, &args[i])) {
- /* Used to reject repeated use of __prog. */
+ if (is_kfunc_arg_prog_aux(btf, &args[i])) {
+ /* Reject repeated use bpf_prog_aux */
if (meta->arg_prog) {
verifier_bug(env, "Only 1 prog->aux argument supported per-kfunc");
return -EFAULT;
@@ -13696,44 +13776,28 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return 0;
}
-static int fetch_kfunc_meta(struct bpf_verifier_env *env,
- struct bpf_insn *insn,
- struct bpf_kfunc_call_arg_meta *meta,
- const char **kfunc_name)
+static int fetch_kfunc_arg_meta(struct bpf_verifier_env *env,
+ s32 func_id,
+ s16 offset,
+ struct bpf_kfunc_call_arg_meta *meta)
{
- const struct btf_type *func, *func_proto;
- u32 func_id, *kfunc_flags;
- const char *func_name;
- struct btf *desc_btf;
-
- if (kfunc_name)
- *kfunc_name = NULL;
+ struct bpf_kfunc_meta kfunc;
+ int err;
- if (!insn->imm)
- return -EINVAL;
+ err = fetch_kfunc_meta(env, func_id, offset, &kfunc);
+ if (err)
+ return err;
- desc_btf = find_kfunc_desc_btf(env, insn->off);
- if (IS_ERR(desc_btf))
- return PTR_ERR(desc_btf);
+ memset(meta, 0, sizeof(*meta));
+ meta->btf = kfunc.btf;
+ meta->func_id = kfunc.id;
+ meta->func_proto = kfunc.proto;
+ meta->func_name = kfunc.name;
- func_id = insn->imm;
- func = btf_type_by_id(desc_btf, func_id);
- func_name = btf_name_by_offset(desc_btf, func->name_off);
- if (kfunc_name)
- *kfunc_name = func_name;
- func_proto = btf_type_by_id(desc_btf, func->type);
-
- kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog);
- if (!kfunc_flags) {
+ if (!kfunc.flags || !btf_kfunc_is_allowed(kfunc.btf, kfunc.id, env->prog))
return -EACCES;
- }
- memset(meta, 0, sizeof(*meta));
- meta->btf = desc_btf;
- meta->func_id = func_id;
- meta->kfunc_flags = *kfunc_flags;
- meta->func_proto = func_proto;
- meta->func_name = func_name;
+ meta->kfunc_flags = *kfunc.flags;
return 0;
}
@@ -13938,12 +14002,13 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (!insn->imm)
return 0;
- err = fetch_kfunc_meta(env, insn, &meta, &func_name);
- if (err == -EACCES && func_name)
- verbose(env, "calling kernel function %s is not allowed\n", func_name);
+ err = fetch_kfunc_arg_meta(env, insn->imm, insn->off, &meta);
+ if (err == -EACCES && meta.func_name)
+ verbose(env, "calling kernel function %s is not allowed\n", meta.func_name);
if (err)
return err;
desc_btf = meta.btf;
+ func_name = meta.func_name;
insn_aux = &env->insn_aux_data[insn_idx];
insn_aux->is_iter_next = is_iter_next_kfunc(&meta);
@@ -14013,7 +14078,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
meta.r0_rdonly = false;
}
- if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) {
+ if (is_bpf_wq_set_callback_kfunc(meta.func_id)) {
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_timer_callback_state);
if (err) {
@@ -14151,8 +14216,12 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
}
}
- for (i = 0; i < CALLER_SAVED_REGS; i++)
- mark_reg_not_init(env, regs, caller_saved[i]);
+ for (i = 0; i < CALLER_SAVED_REGS; i++) {
+ u32 regno = caller_saved[i];
+
+ mark_reg_not_init(env, regs, regno);
+ regs[regno].subreg_def = DEF_NOT_SUBREG;
+ }
/* Check return type */
t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL);
@@ -17789,7 +17858,7 @@ static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call
if (bpf_pseudo_kfunc_call(call)) {
int err;
- err = fetch_kfunc_meta(env, call, &meta, NULL);
+ err = fetch_kfunc_arg_meta(env, call->imm, call->off, &meta);
if (err < 0)
/* error would be reported later */
return false;
@@ -18297,7 +18366,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
struct bpf_kfunc_call_arg_meta meta;
- ret = fetch_kfunc_meta(env, insn, &meta, NULL);
+ ret = fetch_kfunc_arg_meta(env, insn->imm, insn->off, &meta);
if (ret == 0 && is_iter_next_kfunc(&meta)) {
mark_prune_point(env, t);
/* Checking and saving state checkpoints at iter_next() call