summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2021-02-26 12:55:43 -0800
committerAlexei Starovoitov <ast@kernel.org>2021-02-26 13:23:53 -0800
commitcc0f83530934dda0ce1dd01990d2f37f8c3f0d92 (patch)
treeab8a104aef4dfda8e7f437a936e385b36dba4975 /include/linux
parent86fd166575c38c17ecd3a6b8fb9c64fa19871486 (diff)
parent6b9e3331347ee9e84fe5c71d3eba7ec204f9bb25 (diff)
Merge branch 'bpf: add bpf_for_each_map_elem() helper'
Yonghong Song says: ==================== This patch set introduced bpf_for_each_map_elem() helper. The helper permits bpf program iterates through all elements for a particular map. The work originally inspired by an internal discussion where firewall rules are kept in a map and bpf prog wants to check packet 5 tuples against all rules in the map. A bounded loop can be used but it has a few drawbacks. As the loop iteration goes up, verification time goes up too. For really large maps, verification may fail. A helper which abstracts out the loop itself will not have verification time issue. A recent discussion in [1] involves to iterate all hash map elements in bpf program. Currently iterating all hashmap elements in bpf program is not easy if key space is really big. Having a helper to abstract out the loop itself is even more meaningful. The proposed helper signature looks like: long bpf_for_each_map_elem(map, callback_fn, callback_ctx, flags) where callback_fn is a static function and callback_ctx is a piece of data allocated on the caller stack which can be accessed by the callback_fn. The callback_fn signature might be different for different maps. For example, for hash/array maps, the signature is long callback_fn(map, key, val, callback_ctx) In the rest of series, Patches 1/2/3/4 did some refactoring. Patch 5 implemented core kernel support for the helper. Patches 6 and 7 added hashmap and arraymap support. Patches 8/9 added libbpf support. Patch 10 added bpftool support. Patches 11 and 12 added selftests for hashmap and arraymap. [1]: https://lore.kernel.org/bpf/20210122205415.113822-1-xiyou.wangcong@gmail.com/ Changelogs: v4 -> v5: - rebase on top of bpf-next. v3 -> v4: - better refactoring of check_func_call(), calculate subprogno outside of __check_func_call() helper. (Andrii) - better documentation (like the list of supported maps and their callback signatures) in uapi header. (Andrii) - implement and use ASSERT_LT in selftests. (Andrii) - a few other minor changes. v2 -> v3: - add comments in retrieve_ptr_limit(), which is in sanitize_ptr_alu(), to clarify the code is not executed for PTR_TO_MAP_KEY handling, but code is manually tested. (Alexei) - require BTF for callback function. (Alexei) - simplify hashmap/arraymap callback return handling as return value [0, 1] has been enforced by the verifier. (Alexei) - also mark global subprog (if used in ld_imm64) as RELO_SUBPROG_ADDR. (Andrii) - handle the condition to mark RELO_SUBPROG_ADDR properly. (Andrii) - make bpftool subprog insn offset dumping consist with pcrel calls. (Andrii) v1 -> v2: - setup callee frame in check_helper_call() and then proceed to verify helper return value as normal (Alexei) - use meta data to keep track of map/func pointer to avoid hard coding the register number (Alexei) - verify callback_fn return value range [0, 1]. (Alexei) - add migrate_{disable, enable} to ensure percpu value is the one bpf program expects to see. (Alexei) - change bpf_for_each_map_elem() return value to the number of iterated elements. (Andrii) - Change libbpf pseudo_func relo name to RELO_SUBPROG_ADDR and use more rigid checking for the relocation. (Andrii) - Better format to print out subprog address with bpftool. (Andrii) - Use bpf_prog_test_run to trigger bpf run, instead of bpf_iter. (Andrii) - Other misc changes. ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf.h17
-rw-r--r--include/linux/bpf_verifier.h3
2 files changed, 20 insertions, 0 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e1e4d2f60527..4c730863fa77 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -39,6 +39,7 @@ struct bpf_local_storage;
struct bpf_local_storage_map;
struct kobject;
struct mem_cgroup;
+struct bpf_func_state;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
@@ -129,6 +130,13 @@ struct bpf_map_ops {
bool (*map_meta_equal)(const struct bpf_map *meta0,
const struct bpf_map *meta1);
+
+ int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
+ int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
+ void *callback_ctx, u64 flags);
+
/* BTF name and id of struct allocated by map_alloc */
const char * const map_btf_name;
int *map_btf_id;
@@ -295,6 +303,8 @@ enum bpf_arg_type {
ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
+ ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
+ ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */
__BPF_ARG_TYPE_MAX,
};
@@ -411,6 +421,8 @@ enum bpf_reg_type {
PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */
+ PTR_TO_FUNC, /* reg points to a bpf program function */
+ PTR_TO_MAP_KEY, /* reg points to a map element key */
};
/* The information passed from prog-specific *_is_valid_access
@@ -1385,6 +1397,10 @@ void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
struct bpf_link_info *info);
+int map_set_for_each_callback_args(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
+
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
@@ -1887,6 +1903,7 @@ extern const struct bpf_func_proto bpf_sock_from_file_proto;
extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
+extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 971b33aca13d..51c2ffa3d901 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -68,6 +68,8 @@ struct bpf_reg_state {
unsigned long raw1;
unsigned long raw2;
} raw;
+
+ u32 subprogno; /* for PTR_TO_FUNC */
};
/* For PTR_TO_PACKET, used to find other pointers with the same variable
* offset, so they can share range knowledge.
@@ -204,6 +206,7 @@ struct bpf_func_state {
int acquired_refs;
struct bpf_reference_state *refs;
int allocated_stack;
+ bool in_callback_fn;
struct bpf_stack_state *stack;
};