summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c6
-rw-r--r--kernel/bpf/verifier.c5
-rw-r--r--net/bpf/test_run.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c14
4 files changed, 35 insertions, 15 deletions
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 5f9457e910e8..37888abee70c 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -1133,10 +1133,6 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
store_args(nr_arg_slots, args_off, ctx);
- /* skip to actual body of traced function */
- if (flags & BPF_TRAMP_F_ORIG_STACK)
- orig_call += RV_FENTRY_NINSNS * 4;
-
if (flags & BPF_TRAMP_F_CALL_ORIG) {
emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
@@ -1171,6 +1167,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ /* skip to actual body of traced function */
+ orig_call += RV_FENTRY_NINSNS * 4;
restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx);
restore_stack_args(nr_arg_slots - RV_MAX_REG_ARGS, args_off, stk_arg_off, ctx);
ret = emit_call((const u64)orig_call, true, ctx);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f0ca69f888fa..3135643d5695 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -9609,6 +9609,11 @@ static int check_reg_const_str(struct bpf_verifier_env *env,
if (reg->type != PTR_TO_MAP_VALUE)
return -EINVAL;
+ if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY) {
+ verbose(env, "R%d points to insn_array map which cannot be used as const string\n", regno);
+ return -EACCES;
+ }
+
if (!bpf_map_is_rdonly(map)) {
verbose(env, "R%d does not point to a readonly map'\n", regno);
return -EACCES;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 655efac6f133..26cfcfdc45eb 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -1294,8 +1294,6 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
batch_size = NAPI_POLL_WEIGHT;
else if (batch_size > TEST_XDP_MAX_BATCH)
return -E2BIG;
-
- headroom += sizeof(struct xdp_page_head);
} else if (batch_size) {
return -EINVAL;
}
@@ -1308,16 +1306,26 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
/* There can't be user provided data before the meta data */
if (ctx->data_meta || ctx->data_end > kattr->test.data_size_in ||
ctx->data > ctx->data_end ||
- unlikely(xdp_metalen_invalid(ctx->data)) ||
(do_live && (kattr->test.data_out || kattr->test.ctx_out)))
goto free_ctx;
- /* Meta data is allocated from the headroom */
- headroom -= ctx->data;
meta_sz = ctx->data;
+ if (xdp_metalen_invalid(meta_sz) || meta_sz > headroom - sizeof(struct xdp_frame))
+ goto free_ctx;
+
+ /* Meta data is allocated from the headroom */
+ headroom -= meta_sz;
linear_sz = ctx->data_end;
}
+ /* The xdp_page_head structure takes up space in each page, limiting the
+ * size of the packet data; add the extra size to headroom here to make
+ * sure it's accounted in the length checks below, but not in the
+ * metadata size check above.
+ */
+ if (do_live)
+ headroom += sizeof(struct xdp_page_head);
+
max_linear_sz = PAGE_SIZE - headroom - tailroom;
linear_sz = min_t(u32, linear_sz, max_linear_sz);
@@ -1355,13 +1363,13 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (sinfo->nr_frags == MAX_SKB_FRAGS) {
ret = -ENOMEM;
- goto out;
+ goto out_put_dev;
}
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
- goto out;
+ goto out_put_dev;
}
frag = &sinfo->frags[sinfo->nr_frags++];
@@ -1373,7 +1381,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (copy_from_user(page_address(page), data_in + size,
data_len)) {
ret = -EFAULT;
- goto out;
+ goto out_put_dev;
}
sinfo->xdp_frags_size += data_len;
size += data_len;
@@ -1388,6 +1396,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
else
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
+out_put_dev:
/* We convert the xdp_buff back to an xdp_md before checking the return
* code so the reference count of any held netdevice will be decremented
* even if the test run failed.
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
index ee94c281888a..26159e0499c7 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
@@ -47,6 +47,7 @@ void test_xdp_context_test_run(void)
struct test_xdp_context_test_run *skel = NULL;
char data[sizeof(pkt_v4) + sizeof(__u32)];
char bad_ctx[sizeof(struct xdp_md) + 1];
+ char large_data[256];
struct xdp_md ctx_in, ctx_out;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
@@ -94,9 +95,6 @@ void test_xdp_context_test_run(void)
test_xdp_context_error(prog_fd, opts, 4, sizeof(__u32), sizeof(data),
0, 0, 0);
- /* Meta data must be 255 bytes or smaller */
- test_xdp_context_error(prog_fd, opts, 0, 256, sizeof(data), 0, 0, 0);
-
/* Total size of data must be data_end - data_meta or larger */
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
sizeof(data) + 1, 0, 0, 0);
@@ -116,6 +114,16 @@ void test_xdp_context_test_run(void)
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
0, 0, 1);
+ /* Meta data must be 216 bytes or smaller (256 - sizeof(struct
+ * xdp_frame)). Test both nearest invalid size and nearest invalid
+ * 4-byte-aligned size, and make sure data_in is large enough that we
+ * actually hit the check on metadata length
+ */
+ opts.data_in = large_data;
+ opts.data_size_in = sizeof(large_data);
+ test_xdp_context_error(prog_fd, opts, 0, 217, sizeof(large_data), 0, 0, 0);
+ test_xdp_context_error(prog_fd, opts, 0, 220, sizeof(large_data), 0, 0, 0);
+
test_xdp_context_test_run__destroy(skel);
}