From 28b93c64499ae09d9dc8c04123b15f8654a93c4c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 18 Aug 2020 15:39:14 -0700 Subject: libbpf: Clean up and improve CO-RE reloc logging Add logging of local/target type kind (struct/union/typedef/etc). Preserve unresolved root type ID (for cases of typedef). Improve the format of CO-RE reloc spec output format to contain only relevant and succinct info. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200818223921.2911963-3-andriin@fb.com --- tools/lib/bpf/libbpf_internal.h | 78 +++++++++++++++++++++++++++++++---------- 1 file changed, 59 insertions(+), 19 deletions(-) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 50d70e90d5f1..b776a7125c92 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -138,6 +138,44 @@ struct btf_ext_info { i < (sec)->num_info; \ i++, rec = (void *)rec + (seg)->rec_size) +/* + * The .BTF.ext ELF section layout defined as + * struct btf_ext_header + * func_info subsection + * + * The func_info subsection layout: + * record size for struct bpf_func_info in the func_info subsection + * struct btf_sec_func_info for section #1 + * a list of bpf_func_info records for section #1 + * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h + * but may not be identical + * struct btf_sec_func_info for section #2 + * a list of bpf_func_info records for section #2 + * ...... + * + * Note that the bpf_func_info record size in .BTF.ext may not + * be the same as the one defined in include/uapi/linux/bpf.h. + * The loader should ensure that record_size meets minimum + * requirement and pass the record as is to the kernel. The + * kernel will handle the func_info properly based on its contents. + */ +struct btf_ext_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + + /* All offsets are in bytes relative to the end of this header */ + __u32 func_info_off; + __u32 func_info_len; + __u32 line_info_off; + __u32 line_info_len; + + /* optional part of .BTF.ext header */ + __u32 core_relo_off; + __u32 core_relo_len; +}; + struct btf_ext { union { struct btf_ext_header *hdr; @@ -145,7 +183,7 @@ struct btf_ext { }; struct btf_ext_info func_info; struct btf_ext_info line_info; - struct btf_ext_info field_reloc_info; + struct btf_ext_info core_relo_info; __u32 data_size; }; @@ -170,32 +208,34 @@ struct bpf_line_info_min { __u32 line_col; }; -/* bpf_field_info_kind encodes which aspect of captured field has to be - * adjusted by relocations. Currently supported values are: - * - BPF_FIELD_BYTE_OFFSET: field offset (in bytes); - * - BPF_FIELD_EXISTS: field existence (1, if field exists; 0, otherwise); +/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value + * has to be adjusted by relocations. */ -enum bpf_field_info_kind { +enum bpf_core_relo_kind { BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ - BPF_FIELD_BYTE_SIZE = 1, + BPF_FIELD_BYTE_SIZE = 1, /* field size in bytes */ BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ - BPF_FIELD_SIGNED = 3, - BPF_FIELD_LSHIFT_U64 = 4, - BPF_FIELD_RSHIFT_U64 = 5, + BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ + BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ + BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ }; -/* The minimum bpf_field_reloc checked by the loader +/* The minimum bpf_core_relo checked by the loader * - * Field relocation captures the following data: + * CO-RE relocation captures the following data: * - insn_off - instruction offset (in bytes) within a BPF program that needs * its insn->imm field to be relocated with actual field info; * - type_id - BTF type ID of the "root" (containing) entity of a relocatable - * field; + * type or field; * - access_str_off - offset into corresponding .BTF string section. String - * itself encodes an accessed field using a sequence of field and array - * indicies, separated by colon (:). It's conceptually very close to LLVM's - * getelementptr ([0]) instruction's arguments for identifying offset to - * a field. + * interpretation depends on specific relocation kind: + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; * * Example to provide a better feel. * @@ -226,11 +266,11 @@ enum bpf_field_info_kind { * * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction */ -struct bpf_field_reloc { +struct bpf_core_relo { __u32 insn_off; __u32 type_id; __u32 access_str_off; - enum bpf_field_info_kind kind; + enum bpf_core_relo_kind kind; }; #endif /* __LIBBPF_LIBBPF_INTERNAL_H */ -- cgit v1.2.3 From 029258d7b22894fabcecb1626e1b87d18a6823f4 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 18 Aug 2020 18:36:04 -0700 Subject: libbpf: Remove any use of reallocarray() in libbpf Re-implement glibc's reallocarray() for libbpf internal-only use. reallocarray(), unfortunately, is not available in all versions of glibc, so requires extra feature detection and using reallocarray() stub from and COMPAT_NEED_REALLOCARRAY. All this complicates build of libbpf unnecessarily and is just a maintenance burden. Instead, it's trivial to implement libbpf-specific internal version and use it throughout libbpf. Which is what this patch does, along with converting some realloc() uses that should really have been reallocarray() in the first place. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200819013607.3607269-2-andriin@fb.com --- tools/lib/bpf/libbpf_internal.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index b776a7125c92..954bc2bd040c 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -9,6 +9,7 @@ #ifndef __LIBBPF_LIBBPF_INTERNAL_H #define __LIBBPF_LIBBPF_INTERNAL_H +#include #include "libbpf.h" #define BTF_INFO_ENC(kind, kind_flag, vlen) \ @@ -23,6 +24,12 @@ #define BTF_PARAM_ENC(name, type) (name), (type) #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif #ifndef min # define min(x, y) ((x) < (y) ? (x) : (y)) #endif @@ -63,6 +70,24 @@ do { \ #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) +/* + * Re-implement glibc's reallocarray() for libbpf internal-only use. + * reallocarray(), unfortunately, is not available in all versions of glibc, + * so requires extra feature detection and using reallocarray() stub from + * and COMPAT_NEED_REALLOCARRAY. All this complicates + * build of libbpf unnecessarily and is just a maintenance burden. Instead, + * it's trivial to implement libbpf-specific internal version and use it + * throughout libbpf. + */ +static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) +{ + size_t total; + + if (unlikely(__builtin_mul_overflow(nmemb, size, &total))) + return NULL; + return realloc(ptr, total); +} + static inline bool libbpf_validate_opts(const char *opts, size_t opts_sz, size_t user_sz, const char *type_name) -- cgit v1.2.3 From 7084566a236fbc98beb11430d8d67dd08b2ac151 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 18 Aug 2020 18:36:05 -0700 Subject: tools/bpftool: Remove libbpf_internal.h usage in bpftool Most netlink-related functions were unique to bpftool usage, so I moved them into net.c. Few functions are still used by both bpftool and libbpf itself internally, so I've copy-pasted them (libbpf_nl_get_link, libbpf_netlink_open). It's a bit of duplication of code, but better separation of libbpf as a library with public API and bpftool, relying on unexposed functions in libbpf. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200819013607.3607269-3-andriin@fb.com --- tools/lib/bpf/libbpf_internal.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 954bc2bd040c..65931e989eea 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -130,18 +130,6 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name, int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, __u32 *off); -struct nlattr; -typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); -int libbpf_netlink_open(unsigned int *nl_pid); -int libbpf_nl_get_link(int sock, unsigned int nl_pid, - libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie); -int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex, - libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie); -int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex, - libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie); -int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle, - libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie); - struct btf_ext_info { /* * info points to the individual info section (e.g. func_info and -- cgit v1.2.3 From 85367030a6c7ef3373347cf816c698995074f6f0 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 18 Aug 2020 18:36:06 -0700 Subject: libbpf: Centralize poisoning and poison reallocarray() Most of libbpf source files already include libbpf_internal.h, so it's a good place to centralize identifier poisoning. So move kernel integer type poisoning there. And also add reallocarray to a poison list to prevent accidental use of it. libbpf_reallocarray() should be used universally instead. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200819013607.3607269-4-andriin@fb.com --- tools/lib/bpf/libbpf_internal.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 65931e989eea..c8ed352671d5 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -10,6 +10,13 @@ #define __LIBBPF_LIBBPF_INTERNAL_H #include + +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + +/* prevent accidental re-addition of reallocarray() */ +#pragma GCC poison reallocarray + #include "libbpf.h" #define BTF_INFO_ENC(kind, kind_flag, vlen) \ -- cgit v1.2.3 From 3fc32f40c40207bf85ce1b007f18981c4673df96 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 19 Aug 2020 12:45:15 -0700 Subject: libbpf: Implement type-based CO-RE relocations support Implement support for TYPE_EXISTS/TYPE_SIZE/TYPE_ID_LOCAL/TYPE_ID_REMOTE relocations. These are examples of type-based relocations, as opposed to field-based relocations supported already. The difference is that they are calculating relocation values based on the type itself, not a field within a struct/union. Type-based relos have slightly different semantics when matching local types to kernel target types, see comments in bpf_core_types_are_compat() for details. Their behavior on failure to find target type in kernel BTF also differs. Instead of "poisoning" relocatable instruction and failing load subsequently in kernel, they return 0 (which is rarely a valid return result, so user BPF code can use that to detect success/failure of the relocation and deal with it without extra "guarding" relocations). Also, it's always possible to check existence of the type in target kernel with TYPE_EXISTS relocation, similarly to a field-based FIELD_EXISTS. TYPE_ID_LOCAL relocation is a bit special in that it always succeeds (barring any libbpf/Clang bugs) and resolved to BTF ID using **local** BTF info of BPF program itself. Tests in subsequent patches demonstrate the usage and semantics of new relocations. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200819194519.3375898-2-andriin@fb.com --- tools/lib/bpf/libbpf_internal.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index c8ed352671d5..edd3511aa242 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -238,6 +238,10 @@ enum bpf_core_relo_kind { BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ + BPF_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ + BPF_TYPE_ID_TARGET = 7, /* type ID in target kernel */ + BPF_TYPE_EXISTS = 8, /* type existence in target kernel */ + BPF_TYPE_SIZE = 9, /* type size in bytes */ }; /* The minimum bpf_core_relo checked by the loader -- cgit v1.2.3 From eacaaed784e2c9da69dea3030c81062c1fd66a37 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 19 Aug 2020 12:45:18 -0700 Subject: libbpf: Implement enum value-based CO-RE relocations Implement two relocations of a new enumerator value-based CO-RE relocation kind: ENUMVAL_EXISTS and ENUMVAL_VALUE. First, ENUMVAL_EXISTS, allows to detect the presence of a named enumerator value in the target (kernel) BTF. This is useful to do BPF helper/map/program type support detection from BPF program side. bpf_core_enum_value_exists() macro helper is provided to simplify built-in usage. Second, ENUMVAL_VALUE, allows to capture enumerator integer value and relocate it according to the target BTF, if it changes. This is useful to have a guarantee against intentional or accidental re-ordering/re-numbering of some of the internal (non-UAPI) enumerations, where kernel developers don't care about UAPI backwards compatiblity concerns. bpf_core_enum_value() allows to capture this succinctly and use correct enum values in code. LLVM uses ldimm64 instruction to capture enumerator value-based relocations, so add support for ldimm64 instruction patching as well. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200819194519.3375898-5-andriin@fb.com --- tools/lib/bpf/libbpf_internal.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index edd3511aa242..61dff515a2f0 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -242,6 +242,8 @@ enum bpf_core_relo_kind { BPF_TYPE_ID_TARGET = 7, /* type ID in target kernel */ BPF_TYPE_EXISTS = 8, /* type existence in target kernel */ BPF_TYPE_SIZE = 9, /* type size in bytes */ + BPF_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ + BPF_ENUMVAL_VALUE = 11, /* enum value integer value */ }; /* The minimum bpf_core_relo checked by the loader -- cgit v1.2.3 From dda1ec9fc7f8383cb469a82614dbce61f357f3f8 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 19 Aug 2020 23:14:09 -0700 Subject: libbpf: Fix libbpf build on compilers missing __builtin_mul_overflow GCC compilers older than version 5 don't support __builtin_mul_overflow yet. Given GCC 4.9 is the minimal supported compiler for building kernel and the fact that libbpf is a dependency of resolve_btfids, which is dependency of CONFIG_DEBUG_INFO_BTF=y, this needs to be handled. This patch fixes the issue by falling back to slower detection of integer overflow in such cases. Fixes: 029258d7b228 ("libbpf: Remove any use of reallocarray() in libbpf") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200820061411.1755905-2-andriin@fb.com --- tools/lib/bpf/libbpf_internal.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 61dff515a2f0..4d1c366fca2c 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -10,6 +10,7 @@ #define __LIBBPF_LIBBPF_INTERNAL_H #include +#include /* make sure libbpf doesn't use kernel-only integer typedefs */ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 @@ -77,6 +78,9 @@ do { \ #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif /* * Re-implement glibc's reallocarray() for libbpf internal-only use. * reallocarray(), unfortunately, is not available in all versions of glibc, @@ -90,8 +94,14 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) { size_t total; +#if __has_builtin(__builtin_mul_overflow) if (unlikely(__builtin_mul_overflow(nmemb, size, &total))) return NULL; +#else + if (size == 0 || nmemb > ULONG_MAX / size) + return NULL; + total = nmemb * size; +#endif return realloc(ptr, total); } -- cgit v1.2.3 From 88f7fe7233244101fa5b7786e2e298bf27fe1375 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 25 Sep 2020 13:54:30 -0700 Subject: libbpf: Support test run of raw tracepoint programs Add bpf_prog_test_run_opts() with support of new fields in bpf_attr.test, namely, flags and cpu. Also extend _opts operations to support outputs via opts. Signed-off-by: Song Liu Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200925205432.1777-3-songliubraving@fb.com --- tools/lib/bpf/libbpf_internal.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 4d1c366fca2c..d3f7a6a6bc98 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -136,6 +136,11 @@ static inline bool libbpf_validate_opts(const char *opts, ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field)) #define OPTS_GET(opts, field, fallback_value) \ (OPTS_HAS(opts, field) ? (opts)->field : fallback_value) +#define OPTS_SET(opts, field, value) \ + do { \ + if (OPTS_HAS(opts, field)) \ + (opts)->field = value; \ + } while (0) int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz); int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); -- cgit v1.2.3 From 192f5a1fe6894dca58d14dc883e6c7030e7267f7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 25 Sep 2020 18:13:51 -0700 Subject: libbpf: Generalize common logic for managing dynamically-sized arrays Managing dynamically-sized array is a common, but not trivial functionality, which significant amount of logic and code to implement properly. So instead of re-implementing it all the time, extract it into a helper function ans reuse. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200926011357.2366158-4-andriin@fb.com --- tools/lib/bpf/libbpf_internal.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index d3f7a6a6bc98..eed5b624a784 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -105,6 +105,9 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) return realloc(ptr, total); } +void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, + size_t cur_cnt, size_t max_cnt, size_t add_cnt); + static inline bool libbpf_validate_opts(const char *opts, size_t opts_sz, size_t user_sz, const char *type_name) -- cgit v1.2.3 From 9c6c5c48d7e9c00c541e0f1a5cea3a96767e825c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 29 Sep 2020 16:28:40 -0700 Subject: libbpf: Make btf_dump work with modifiable BTF Ensure that btf_dump can accommodate new BTF types being appended to BTF instance after struct btf_dump was created. This came up during attemp to use btf_dump for raw type dumping in selftests, but given changes are not excessive, it's good to not have any gotchas in API usage, so I decided to support such use case in general. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200929232843.1249318-2-andriin@fb.com --- tools/lib/bpf/libbpf_internal.h | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/lib/bpf/libbpf_internal.h') diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index eed5b624a784..d99bc847bf84 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -107,6 +107,7 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t cur_cnt, size_t max_cnt, size_t add_cnt); +int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt); static inline bool libbpf_validate_opts(const char *opts, size_t opts_sz, size_t user_sz, -- cgit v1.2.3