summaryrefslogtreecommitdiff
path: root/kernel/bpf/trampoline.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/trampoline.c')
-rw-r--r--kernel/bpf/trampoline.c74
1 files changed, 35 insertions, 39 deletions
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index c4b1a98ff726..f2cb0b097093 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -479,11 +479,6 @@ again:
* BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
* trampoline again, and retry register.
*/
- /* reset fops->func and fops->trampoline for re-register */
- tr->fops->func = NULL;
- tr->fops->trampoline = 0;
-
- /* free im memory and reallocate later */
bpf_tramp_image_free(im);
goto again;
}
@@ -674,7 +669,8 @@ static const struct bpf_link_ops bpf_shim_tramp_link_lops = {
static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog,
bpf_func_t bpf_func,
- int cgroup_atype)
+ int cgroup_atype,
+ enum bpf_attach_type attach_type)
{
struct bpf_shim_tramp_link *shim_link = NULL;
struct bpf_prog *p;
@@ -701,7 +697,7 @@ static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog
p->expected_attach_type = BPF_LSM_MAC;
bpf_prog_inc(p);
bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC,
- &bpf_shim_tramp_link_lops, p);
+ &bpf_shim_tramp_link_lops, p, attach_type);
bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype);
return shim_link;
@@ -726,7 +722,8 @@ static struct bpf_shim_tramp_link *cgroup_shim_find(struct bpf_trampoline *tr,
}
int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
- int cgroup_atype)
+ int cgroup_atype,
+ enum bpf_attach_type attach_type)
{
struct bpf_shim_tramp_link *shim_link = NULL;
struct bpf_attach_target_info tgt_info = {};
@@ -763,7 +760,7 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
/* Allocate and install new shim. */
- shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype);
+ shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype, attach_type);
if (!shim_link) {
err = -ENOMEM;
goto err;
@@ -897,8 +894,7 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
__acquires(RCU)
{
- rcu_read_lock();
- migrate_disable();
+ rcu_read_lock_dont_migrate();
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
@@ -911,27 +907,32 @@ static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tram
return bpf_prog_start_time();
}
-static void notrace update_prog_stats(struct bpf_prog *prog,
- u64 start)
+static void notrace __update_prog_stats(struct bpf_prog *prog, u64 start)
{
struct bpf_prog_stats *stats;
+ unsigned long flags;
+ u64 duration;
- if (static_branch_unlikely(&bpf_stats_enabled_key) &&
- /* static_key could be enabled in __bpf_prog_enter*
- * and disabled in __bpf_prog_exit*.
- * And vice versa.
- * Hence check that 'start' is valid.
- */
- start > NO_START_TIME) {
- u64 duration = sched_clock() - start;
- unsigned long flags;
-
- stats = this_cpu_ptr(prog->stats);
- flags = u64_stats_update_begin_irqsave(&stats->syncp);
- u64_stats_inc(&stats->cnt);
- u64_stats_add(&stats->nsecs, duration);
- u64_stats_update_end_irqrestore(&stats->syncp, flags);
- }
+ /*
+ * static_key could be enabled in __bpf_prog_enter* and disabled in
+ * __bpf_prog_exit*. And vice versa. Check that 'start' is valid.
+ */
+ if (start <= NO_START_TIME)
+ return;
+
+ duration = sched_clock() - start;
+ stats = this_cpu_ptr(prog->stats);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, duration);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+}
+
+static __always_inline void notrace update_prog_stats(struct bpf_prog *prog,
+ u64 start)
+{
+ if (static_branch_unlikely(&bpf_stats_enabled_key))
+ __update_prog_stats(prog, start);
}
static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
@@ -942,8 +943,7 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
update_prog_stats(prog, start);
this_cpu_dec(*(prog->active));
- migrate_enable();
- rcu_read_unlock();
+ rcu_read_unlock_migrate();
}
static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
@@ -953,8 +953,7 @@ static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
/* Runtime stats are exported via actual BPF_LSM_CGROUP
* programs, not the shims.
*/
- rcu_read_lock();
- migrate_disable();
+ rcu_read_lock_dont_migrate();
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
@@ -967,8 +966,7 @@ static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
{
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
- migrate_enable();
- rcu_read_unlock();
+ rcu_read_unlock_migrate();
}
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
@@ -1026,8 +1024,7 @@ static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx)
__acquires(RCU)
{
- rcu_read_lock();
- migrate_disable();
+ rcu_read_lock_dont_migrate();
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
@@ -1041,8 +1038,7 @@ static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
update_prog_stats(prog, start);
- migrate_enable();
- rcu_read_unlock();
+ rcu_read_unlock_migrate();
}
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)