Compare commits
10 Commits
e6f1de923e
...
fdd07a4d7f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fdd07a4d7f | ||
|
|
9d7b729575 | ||
|
|
161a1520aa | ||
|
|
85374adbbd | ||
|
|
163379aa12 | ||
|
|
1dcf95dee0 | ||
|
|
c447b0050e | ||
|
|
9b5804cc2a | ||
|
|
4a25fa5b89 | ||
|
|
767848cdce |
@ -0,0 +1,53 @@
|
||||
From 1867490d8fc635c552569d51c48debff588d2191 Mon Sep 17 00:00:00 2001
|
||||
From: Andreas Ziegler <ziegler.andreas@siemens.com>
|
||||
Date: Wed, 3 Jul 2024 10:34:36 +0200
|
||||
Subject: [PATCH] libbpf: Add NULL checks to bpf_object__{prev_map,next_map}
|
||||
|
||||
In the current state, an erroneous call to
|
||||
bpf_object__find_map_by_name(NULL, ...) leads to a segmentation
|
||||
fault through the following call chain:
|
||||
|
||||
bpf_object__find_map_by_name(obj = NULL, ...)
|
||||
-> bpf_object__for_each_map(pos, obj = NULL)
|
||||
-> bpf_object__next_map((obj = NULL), NULL)
|
||||
-> return (obj = NULL)->maps
|
||||
|
||||
While calling bpf_object__find_map_by_name with obj = NULL is
|
||||
obviously incorrect, this should not lead to a segmentation
|
||||
fault but rather be handled gracefully.
|
||||
|
||||
As __bpf_map__iter already handles this situation correctly, we
|
||||
can delegate the check for the regular case there and only add
|
||||
a check in case the prev or next parameter is NULL.
|
||||
|
||||
Signed-off-by: Andreas Ziegler <ziegler.andreas@siemens.com>
|
||||
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
|
||||
Link: https://lore.kernel.org/bpf/20240703083436.505124-1-ziegler.andreas@siemens.com
|
||||
---
|
||||
src/libbpf.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/src/libbpf.c b/src/libbpf.c
|
||||
index 4a28fac49..30f121754 100644
|
||||
--- a/src/libbpf.c
|
||||
+++ b/src/libbpf.c
|
||||
@@ -10375,7 +10375,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
|
||||
struct bpf_map *
|
||||
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
|
||||
{
|
||||
- if (prev == NULL)
|
||||
+ if (prev == NULL && obj != NULL)
|
||||
return obj->maps;
|
||||
|
||||
return __bpf_map__iter(prev, obj, 1);
|
||||
@@ -10384,7 +10384,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
|
||||
struct bpf_map *
|
||||
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
|
||||
{
|
||||
- if (next == NULL) {
|
||||
+ if (next == NULL && obj != NULL) {
|
||||
if (!obj->nr_maps)
|
||||
return NULL;
|
||||
return obj->maps + obj->nr_maps - 1;
|
||||
--
|
||||
2.33.0
|
||||
@ -0,0 +1,49 @@
|
||||
From 89ca11a79bb93824e82897bdb48727b5d75e469a Mon Sep 17 00:00:00 2001
|
||||
From: Andrey Grafin <conquistador@yandex-team.ru>
|
||||
Date: Wed, 17 Jan 2024 16:06:18 +0300
|
||||
Subject: [PATCH] libbpf: Apply map_set_def_max_entries() for inner_maps on
|
||||
creation
|
||||
|
||||
This patch allows to auto create BPF_MAP_TYPE_ARRAY_OF_MAPS and
|
||||
BPF_MAP_TYPE_HASH_OF_MAPS with values of BPF_MAP_TYPE_PERF_EVENT_ARRAY
|
||||
by bpf_object__load().
|
||||
|
||||
Previous behaviour created a zero filled btf_map_def for inner maps and
|
||||
tried to use it for a map creation but the linux kernel forbids to create
|
||||
a BPF_MAP_TYPE_PERF_EVENT_ARRAY map with max_entries=0.
|
||||
|
||||
Fixes: 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map support")
|
||||
Signed-off-by: Andrey Grafin <conquistador@yandex-team.ru>
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Acked-by: Yonghong Song <yonghong.song@linux.dev>
|
||||
Acked-by: Hou Tao <houtao1@huawei.com>
|
||||
Link: https://lore.kernel.org/bpf/20240117130619.9403-1-conquistador@yandex-team.ru
|
||||
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
||||
---
|
||||
src/libbpf.c | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
diff --git a/src/libbpf.c b/src/libbpf.c
|
||||
index afd09571c..b8b00da62 100644
|
||||
--- a/src/libbpf.c
|
||||
+++ b/src/libbpf.c
|
||||
@@ -70,6 +70,7 @@
|
||||
|
||||
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
|
||||
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
|
||||
+static int map_set_def_max_entries(struct bpf_map *map);
|
||||
|
||||
static int __base_pr(enum libbpf_print_level level, const char *format,
|
||||
va_list args)
|
||||
@@ -5172,6 +5173,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
||||
|
||||
if (bpf_map_type__is_map_in_map(def->type)) {
|
||||
if (map->inner_map) {
|
||||
+ err = map_set_def_max_entries(map->inner_map);
|
||||
+ if (err)
|
||||
+ return err;
|
||||
err = bpf_object__create_map(obj, map->inner_map, true);
|
||||
if (err) {
|
||||
pr_warn("map '%s': failed to create inner map: %d\n",
|
||||
--
|
||||
2.33.0
|
||||
@ -0,0 +1,56 @@
|
||||
From 3827aa514cba7db16b81236712a46e8b70260fcd Mon Sep 17 00:00:00 2001
|
||||
From: "Jose E. Marchesi" <jose.marchesi@oracle.com>
|
||||
Date: Wed, 8 May 2024 12:13:13 +0200
|
||||
Subject: [PATCH] bpf: Avoid uninitialized value in BPF_CORE_READ_BITFIELD
|
||||
|
||||
[Changes from V1:
|
||||
- Use a default branch in the switch statement to initialize `val'.]
|
||||
|
||||
GCC warns that `val' may be used uninitialized in the
|
||||
BPF_CRE_READ_BITFIELD macro, defined in bpf_core_read.h as:
|
||||
|
||||
[...]
|
||||
unsigned long long val; \
|
||||
[...] \
|
||||
switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
|
||||
case 1: val = *(const unsigned char *)p; break; \
|
||||
case 2: val = *(const unsigned short *)p; break; \
|
||||
case 4: val = *(const unsigned int *)p; break; \
|
||||
case 8: val = *(const unsigned long long *)p; break; \
|
||||
} \
|
||||
[...]
|
||||
val; \
|
||||
} \
|
||||
|
||||
This patch adds a default entry in the switch statement that sets
|
||||
`val' to zero in order to avoid the warning, and random values to be
|
||||
used in case __builtin_preserve_field_info returns unexpected values
|
||||
for BPF_FIELD_BYTE_SIZE.
|
||||
|
||||
Tested in bpf-next master.
|
||||
No regressions.
|
||||
|
||||
Signed-off-by: Jose E. Marchesi <jose.marchesi@oracle.com>
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Link: https://lore.kernel.org/bpf/20240508101313.16662-1-jose.marchesi@oracle.com
|
||||
|
||||
Conflict: NA
|
||||
Reference:https://github.com/libbpf/libbpf/commit/3827aa514cba7db16b81236712a46e8b70260fcd
|
||||
---
|
||||
src/bpf_core_read.h | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/src/bpf_core_read.h b/src/bpf_core_read.h
|
||||
index b5c7ce5c2..c0e13cdf9 100644
|
||||
--- a/src/bpf_core_read.h
|
||||
+++ b/src/bpf_core_read.h
|
||||
@@ -104,6 +104,7 @@ enum bpf_enum_value_kind {
|
||||
case 2: val = *(const unsigned short *)p; break; \
|
||||
case 4: val = *(const unsigned int *)p; break; \
|
||||
case 8: val = *(const unsigned long long *)p; break; \
|
||||
+ default: val = 0; break; \
|
||||
} \
|
||||
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
|
||||
if (__CORE_RELO(s, field, SIGNED)) \
|
||||
|
||||
|
||||
33
backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch
Normal file
33
backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch
Normal file
@ -0,0 +1,33 @@
|
||||
From ecf998ed8ff51efd3887ff7caca0a0cc56a88082 Mon Sep 17 00:00:00 2001
|
||||
From: Eric Long <i@hack3r.moe>
|
||||
Date: Wed, 2 Oct 2024 14:25:06 +0800
|
||||
Subject: [PATCH] libbpf: Do not resolve size on duplicate FUNCs
|
||||
|
||||
FUNCs do not have sizes, thus currently btf__resolve_size will fail
|
||||
with -EINVAL. Add conditions so that we only update size when the BTF
|
||||
object is not function or function prototype.
|
||||
|
||||
Signed-off-by: Eric Long <i@hack3r.moe>
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Link: https://lore.kernel.org/bpf/20241002-libbpf-dup-extern-funcs-v4-1-560eb460ff90@hack3r.moe
|
||||
---
|
||||
src/linker.c | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
diff --git a/src/linker.c b/src/linker.c
|
||||
index 81dbbdd79..f83c1c299 100644
|
||||
--- a/src/linker.c
|
||||
+++ b/src/linker.c
|
||||
@@ -2451,6 +2451,10 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
if (glob_sym && glob_sym->var_idx >= 0) {
|
||||
__s64 sz;
|
||||
|
||||
+ /* FUNCs don't have size, nothing to update */
|
||||
+ if (btf_is_func(t))
|
||||
+ continue;
|
||||
+
|
||||
dst_var = &dst_sec->sec_vars[glob_sym->var_idx];
|
||||
/* Because underlying BTF type might have
|
||||
* changed, so might its size have changed, so
|
||||
|
||||
|
||||
@ -0,0 +1,109 @@
|
||||
From 7b5237996a42c3b8a6fe8ccae656047de2831f58 Mon Sep 17 00:00:00 2001
|
||||
From: David Vernet <void@manifault.com>
|
||||
Date: Wed, 24 Jul 2024 12:14:58 -0500
|
||||
Subject: [PATCH] libbpf: Don't take direct pointers into BTF data from st_ops
|
||||
|
||||
In struct bpf_struct_ops, we have take a pointer to a BTF type name, and
|
||||
a struct btf_type. This was presumably done for convenience, but can
|
||||
actually result in subtle and confusing bugs given that BTF data can be
|
||||
invalidated before a program is loaded. For example, in sched_ext, we
|
||||
may sometimes resize a data section after a skeleton has been opened,
|
||||
but before the struct_ops scheduler map has been loaded. This may cause
|
||||
the BTF data to be realloc'd, which can then cause a UAF when loading
|
||||
the program because the struct_ops map has pointers directly into the
|
||||
BTF data.
|
||||
|
||||
We're already storing the BTF type_id in struct bpf_struct_ops. Because
|
||||
type_id is stable, we can therefore just update the places where we were
|
||||
looking at those pointers to instead do the lookups we need from the
|
||||
type_id.
|
||||
|
||||
Fixes: 590a00888250 ("bpf: libbpf: Add STRUCT_OPS support")
|
||||
Signed-off-by: David Vernet <void@manifault.com>
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Link: https://lore.kernel.org/bpf/20240724171459.281234-1-void@manifault.com
|
||||
|
||||
Conflict:parts of functions do not exist in v0.8.1
|
||||
Reference:https://github.com/libbpf/libbpf/commit/7b5237996a42c3b8a6fe8ccae656047de2831f58
|
||||
---
|
||||
src/libbpf.c | 18 +++++++++---------
|
||||
1 file changed, 9 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/src/libbpf.c b/src/libbpf.c
|
||||
index 5eb2452..25a91ba 100644
|
||||
--- a/src/libbpf.c
|
||||
+++ b/src/libbpf.c
|
||||
@@ -327,8 +327,6 @@ struct bpf_program {
|
||||
};
|
||||
|
||||
struct bpf_struct_ops {
|
||||
- const char *tname;
|
||||
- const struct btf_type *type;
|
||||
struct bpf_program **progs;
|
||||
__u32 *kern_func_off;
|
||||
/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
|
||||
@@ -955,8 +953,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
|
||||
int err;
|
||||
|
||||
st_ops = map->st_ops;
|
||||
- type = st_ops->type;
|
||||
- tname = st_ops->tname;
|
||||
+ type = btf__type_by_id(btf, st_ops->type_id);
|
||||
+ tname = btf__name_by_offset(btf, type->name_off);
|
||||
err = find_struct_ops_kern_types(kern_btf, tname,
|
||||
&kern_type, &kern_type_id,
|
||||
&kern_vtype, &kern_vtype_id,
|
||||
@@ -1175,8 +1173,6 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
|
||||
memcpy(st_ops->data,
|
||||
obj->efile.st_ops_data->d_buf + vsi->offset,
|
||||
type->size);
|
||||
- st_ops->tname = tname;
|
||||
- st_ops->type = type;
|
||||
st_ops->type_id = type_id;
|
||||
|
||||
pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
|
||||
@@ -9361,6 +9357,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
|
||||
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
|
||||
Elf64_Shdr *shdr, Elf_Data *data)
|
||||
{
|
||||
+ const struct btf_type *type;
|
||||
const struct btf_member *member;
|
||||
struct bpf_struct_ops *st_ops;
|
||||
struct bpf_program *prog;
|
||||
@@ -9420,13 +9417,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
|
||||
}
|
||||
insn_idx = sym->st_value / BPF_INSN_SZ;
|
||||
|
||||
- member = find_member_by_offset(st_ops->type, moff * 8);
|
||||
+ type = btf__type_by_id(btf, st_ops->type_id);
|
||||
+ member = find_member_by_offset(type, moff * 8);
|
||||
if (!member) {
|
||||
pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
|
||||
map->name, moff);
|
||||
return -EINVAL;
|
||||
}
|
||||
- member_idx = member - btf_members(st_ops->type);
|
||||
+ member_idx = member - btf_members(type);
|
||||
name = btf__name_by_offset(btf, member->name_off);
|
||||
|
||||
if (!resolve_func_ptr(btf, member->type, NULL)) {
|
||||
@@ -11967,6 +11965,7 @@ static int bpf_link__detach_struct_ops(struct bpf_link *link)
|
||||
|
||||
struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
|
||||
{
|
||||
+ const struct btf_type *type;
|
||||
struct bpf_struct_ops *st_ops;
|
||||
struct bpf_link *link;
|
||||
__u32 i, zero = 0;
|
||||
@@ -11980,7 +11979,8 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
st_ops = map->st_ops;
|
||||
- for (i = 0; i < btf_vlen(st_ops->type); i++) {
|
||||
+ type = btf__type_by_id(map->obj->btf, st_ops->type_id);
|
||||
+ for (i = 0; i < btf_vlen(type); i++) {
|
||||
struct bpf_program *prog = st_ops->progs[i];
|
||||
void *kern_data;
|
||||
int prog_fd;
|
||||
--
|
||||
2.33.0
|
||||
@ -0,0 +1,47 @@
|
||||
From 0167a883554df812013ae1778724943ed0c8a069 Mon Sep 17 00:00:00 2001
|
||||
From: Yonghong Song <yhs@fb.com>
|
||||
Date: Mon, 6 Jun 2022 23:26:10 -0700
|
||||
Subject: [PATCH] libbpf: Fix an error in 64bit relocation value computation
|
||||
|
||||
Currently, the 64bit relocation value in the instruction
|
||||
is computed as follows:
|
||||
__u64 imm = insn[0].imm + ((__u64)insn[1].imm << 32)
|
||||
|
||||
Suppose insn[0].imm = -1 (0xffffffff) and insn[1].imm = 1.
|
||||
With the above computation, insn[0].imm will first sign-extend
|
||||
to 64bit -1 (0xffffffffFFFFFFFF) and then add 0x1FFFFFFFF,
|
||||
producing incorrect value 0xFFFFFFFF. The correct value
|
||||
should be 0x1FFFFFFFF.
|
||||
|
||||
Changing insn[0].imm to __u32 first will prevent 64bit sign
|
||||
extension and fix the issue. Merging high and low 32bit values
|
||||
also changed from '+' to '|' to be consistent with other
|
||||
similar occurences in kernel and libbpf.
|
||||
|
||||
Acked-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Acked-by: Dave Marchevsky <davemarchevsky@fb.com>
|
||||
Signed-off-by: Yonghong Song <yhs@fb.com>
|
||||
Link: https://lore.kernel.org/r/20220607062610.3717378-1-yhs@fb.com
|
||||
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
||||
Conflict:NA
|
||||
Reference:https://github.com/libbpf/libbpf/commit/0167a883554df812013ae1778724943ed0c8a069
|
||||
---
|
||||
src/relo_core.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/relo_core.c b/src/relo_core.c
|
||||
index ba4453d..a9216b6 100644
|
||||
--- a/src/relo_core.c
|
||||
+++ b/src/relo_core.c
|
||||
@@ -1027,7 +1027,7 @@ int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
- imm = insn[0].imm + ((__u64)insn[1].imm << 32);
|
||||
+ imm = (__u32)insn[0].imm | ((__u64)insn[1].imm << 32);
|
||||
if (res->validate && imm != orig_val) {
|
||||
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
|
||||
prog_name, relo_idx,
|
||||
--
|
||||
2.33.0
|
||||
|
||||
@ -0,0 +1,180 @@
|
||||
From f6f24022d3054d2855612e642f8fe9f1148b4275 Mon Sep 17 00:00:00 2001
|
||||
From: Andrii Nakryiko <andrii@kernel.org>
|
||||
Date: Tue, 27 Aug 2024 13:37:21 -0700
|
||||
Subject: [PATCH] libbpf: Fix bpf_object__open_skeleton()'s mishandling of
|
||||
options
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
We do an ugly copying of options in bpf_object__open_skeleton() just to
|
||||
be able to set object name from skeleton's recorded name (while still
|
||||
allowing user to override it through opts->object_name).
|
||||
|
||||
This is not just ugly, but it also is broken due to memcpy() that
|
||||
doesn't take into account potential skel_opts' and user-provided opts'
|
||||
sizes differences due to backward and forward compatibility. This leads
|
||||
to copying over extra bytes and then failing to validate options
|
||||
properly. It could, technically, lead also to SIGSEGV, if we are unlucky.
|
||||
|
||||
So just get rid of that memory copy completely and instead pass
|
||||
default object name into bpf_object_open() directly, simplifying all
|
||||
this significantly. The rule now is that obj_name should be non-NULL for
|
||||
bpf_object_open() when called with in-memory buffer, so validate that
|
||||
explicitly as well.
|
||||
|
||||
We adopt bpf_object__open_mem() to this as well and generate default
|
||||
name (based on buffer memory address and size) outside of bpf_object_open().
|
||||
|
||||
Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support")
|
||||
Reported-by: Daniel Müller <deso@posteo.net>
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
|
||||
Reviewed-by: Daniel Müller <deso@posteo.net>
|
||||
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
|
||||
Link: https://lore.kernel.org/bpf/20240827203721.1145494-1-andrii@kernel.org
|
||||
|
||||
Conflict:1:Context adaptation: Deleted token_path. No related patch is introduced in this version.
|
||||
2:add bpf_object__open_xattr by bpf_object__open_file, add bpf_object__open_buffer by bpf_object__open_mem
|
||||
Reference: https://github.com/libbpf/libbpf/commit/f6f24022d3054d2855612e642f8fe9f1148b4275
|
||||
---
|
||||
src/libbpf.c | 61 ++++++++++++++++++++++------------------------------
|
||||
1 file changed, 26 insertions(+), 35 deletions(-)
|
||||
|
||||
diff --git a/src/libbpf.c b/src/libbpf.c
|
||||
index 25a91ba..11ccc70 100644
|
||||
--- a/src/libbpf.c
|
||||
+++ b/src/libbpf.c
|
||||
@@ -7362,16 +7362,19 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
|
||||
}
|
||||
|
||||
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
|
||||
+ const char *obj_name,
|
||||
const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
- const char *obj_name, *kconfig, *btf_tmp_path;
|
||||
+ const char *kconfig, *btf_tmp_path;
|
||||
struct bpf_object *obj;
|
||||
- char tmp_name[64];
|
||||
int err;
|
||||
char *log_buf;
|
||||
size_t log_size;
|
||||
__u32 log_level;
|
||||
|
||||
+ if (obj_buf && !obj_name)
|
||||
+ return ERR_PTR(-EINVAL);
|
||||
+
|
||||
if (elf_version(EV_CURRENT) == EV_NONE) {
|
||||
pr_warn("failed to init libelf for %s\n",
|
||||
path ? : "(mem buf)");
|
||||
@@ -7381,16 +7384,12 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
|
||||
if (!OPTS_VALID(opts, bpf_object_open_opts))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
- obj_name = OPTS_GET(opts, object_name, NULL);
|
||||
+ obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name;
|
||||
if (obj_buf) {
|
||||
- if (!obj_name) {
|
||||
- snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
|
||||
- (unsigned long)obj_buf,
|
||||
- (unsigned long)obj_buf_sz);
|
||||
- obj_name = tmp_name;
|
||||
- }
|
||||
path = obj_name;
|
||||
pr_debug("loading object '%s' from buffer\n", obj_name);
|
||||
+ } else {
|
||||
+ pr_debug("loading object from %s\n", path);
|
||||
}
|
||||
|
||||
log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
|
||||
@@ -7462,7 +7461,7 @@ __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
|
||||
return NULL;
|
||||
|
||||
pr_debug("loading %s\n", attr->file);
|
||||
- return bpf_object_open(attr->file, NULL, 0, &opts);
|
||||
+ return bpf_object_open(attr->file, NULL, 0, NULL, &opts);
|
||||
}
|
||||
|
||||
struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
|
||||
@@ -7486,25 +7485,30 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
|
||||
if (!path)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
- pr_debug("loading %s\n", path);
|
||||
-
|
||||
- return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
|
||||
+ return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts));
|
||||
}
|
||||
|
||||
struct bpf_object *
|
||||
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
|
||||
const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
+ char tmp_name[64];
|
||||
+
|
||||
if (!obj_buf || obj_buf_sz == 0)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
- return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
|
||||
+ /* create a (quite useless) default "name" for this memory buffer object */
|
||||
+ snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
|
||||
+
|
||||
+ return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts));
|
||||
}
|
||||
|
||||
struct bpf_object *
|
||||
bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
|
||||
const char *name)
|
||||
{
|
||||
+ char tmp_name[64];
|
||||
+
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
|
||||
.object_name = name,
|
||||
/* wrong default, but backwards-compatible */
|
||||
@@ -7515,7 +7519,10 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
|
||||
if (!obj_buf || obj_buf_sz == 0)
|
||||
return errno = EINVAL, NULL;
|
||||
|
||||
- return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts));
|
||||
+ /* create a (quite useless) default "name" for this memory buffer object */
|
||||
+ snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
|
||||
+
|
||||
+ return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, &opts));
|
||||
}
|
||||
|
||||
static int bpf_object_unload(struct bpf_object *obj)
|
||||
@@ -13013,29 +13020,13 @@ static int populate_skeleton_progs(const struct bpf_object *obj,
|
||||
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
|
||||
const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
|
||||
- .object_name = s->name,
|
||||
- );
|
||||
struct bpf_object *obj;
|
||||
int err;
|
||||
|
||||
- /* Attempt to preserve opts->object_name, unless overriden by user
|
||||
- * explicitly. Overwriting object name for skeletons is discouraged,
|
||||
- * as it breaks global data maps, because they contain object name
|
||||
- * prefix as their own map name prefix. When skeleton is generated,
|
||||
- * bpftool is making an assumption that this name will stay the same.
|
||||
- */
|
||||
- if (opts) {
|
||||
- memcpy(&skel_opts, opts, sizeof(*opts));
|
||||
- if (!opts->object_name)
|
||||
- skel_opts.object_name = s->name;
|
||||
- }
|
||||
-
|
||||
- obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
|
||||
- err = libbpf_get_error(obj);
|
||||
- if (err) {
|
||||
- pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
|
||||
- s->name, err);
|
||||
+ obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
|
||||
+ if (IS_ERR(obj)) {
|
||||
+ err = PTR_ERR(obj);
|
||||
+ pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
|
||||
@ -0,0 +1,135 @@
|
||||
From 984dcc97ae50c566924277aedc4967e1222e38c2 Mon Sep 17 00:00:00 2001
|
||||
From: Quentin Monnet <qmo@kernel.org>
|
||||
Date: Thu, 5 Dec 2024 13:59:42 +0000
|
||||
Subject: [PATCH] libbpf: Fix segfault due to libelf functions not setting
|
||||
errno
|
||||
|
||||
Libelf functions do not set errno on failure. Instead, it relies on its
|
||||
internal _elf_errno value, that can be retrieved via elf_errno (or the
|
||||
corresponding message via elf_errmsg()). From "man libelf":
|
||||
|
||||
If a libelf function encounters an error it will set an internal
|
||||
error code that can be retrieved with elf_errno. Each thread
|
||||
maintains its own separate error code. The meaning of each error
|
||||
code can be determined with elf_errmsg, which returns a string
|
||||
describing the error.
|
||||
|
||||
As a consequence, libbpf should not return -errno when a function from
|
||||
libelf fails, because an empty value will not be interpreted as an error
|
||||
and won't prevent the program to stop. This is visible in
|
||||
bpf_linker__add_file(), for example, where we call a succession of
|
||||
functions that rely on libelf:
|
||||
|
||||
err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
|
||||
err = err ?: linker_append_sec_data(linker, &obj);
|
||||
err = err ?: linker_append_elf_syms(linker, &obj);
|
||||
err = err ?: linker_append_elf_relos(linker, &obj);
|
||||
err = err ?: linker_append_btf(linker, &obj);
|
||||
err = err ?: linker_append_btf_ext(linker, &obj);
|
||||
|
||||
If the object file that we try to process is not, in fact, a correct
|
||||
object file, linker_load_obj_file() may fail with errno not being set,
|
||||
and return 0. In this case we attempt to run linker_append_elf_sysms()
|
||||
and may segfault.
|
||||
|
||||
This can happen (and was discovered) with bpftool:
|
||||
|
||||
$ bpftool gen object output.o sample_ret0.bpf.c
|
||||
libbpf: failed to get ELF header for sample_ret0.bpf.c: invalid `Elf' handle
|
||||
zsh: segmentation fault (core dumped) bpftool gen object output.o sample_ret0.bpf.c
|
||||
|
||||
Fix the issue by returning a non-null error code (-EINVAL) when libelf
|
||||
functions fail.
|
||||
|
||||
Fixes: faf6ed321cf6 ("libbpf: Add BPF static linker APIs")
|
||||
Signed-off-by: Quentin Monnet <qmo@kernel.org>
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Link: https://lore.kernel.org/bpf/20241205135942.65262-1-qmo@kernel.org
|
||||
---
|
||||
src/linker.c | 22 ++++++++--------------
|
||||
1 file changed, 8 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/src/linker.c b/src/linker.c
|
||||
index cf71d149f..e56ba6e67 100644
|
||||
--- a/src/linker.c
|
||||
+++ b/src/linker.c
|
||||
@@ -566,17 +566,15 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
}
|
||||
obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
|
||||
if (!obj->elf) {
|
||||
- err = -errno;
|
||||
pr_warn_elf("failed to parse ELF file '%s'", filename);
|
||||
- return err;
|
||||
+ return -EINVAL;
|
||||
}
|
||||
|
||||
/* Sanity check ELF file high-level properties */
|
||||
ehdr = elf64_getehdr(obj->elf);
|
||||
if (!ehdr) {
|
||||
- err = -errno;
|
||||
pr_warn_elf("failed to get ELF header for %s", filename);
|
||||
- return err;
|
||||
+ return -EINVAL;
|
||||
}
|
||||
if (ehdr->e_ident[EI_DATA] != host_endianness) {
|
||||
err = -EOPNOTSUPP;
|
||||
@@ -606,9 +604,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
}
|
||||
|
||||
if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
|
||||
- err = -errno;
|
||||
pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
|
||||
- return err;
|
||||
+ return -EINVAL;
|
||||
}
|
||||
|
||||
scn = NULL;
|
||||
@@ -618,26 +615,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
|
||||
shdr = elf64_getshdr(scn);
|
||||
if (!shdr) {
|
||||
- err = -errno;
|
||||
pr_warn_elf("failed to get section #%zu header for %s",
|
||||
sec_idx, filename);
|
||||
- return err;
|
||||
+ return -EINVAL;
|
||||
}
|
||||
|
||||
sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
|
||||
if (!sec_name) {
|
||||
- err = -errno;
|
||||
pr_warn_elf("failed to get section #%zu name for %s",
|
||||
sec_idx, filename);
|
||||
- return err;
|
||||
+ return -EINVAL;
|
||||
}
|
||||
|
||||
data = elf_getdata(scn, 0);
|
||||
if (!data) {
|
||||
- err = -errno;
|
||||
pr_warn_elf("failed to get section #%zu (%s) data from %s",
|
||||
sec_idx, sec_name, filename);
|
||||
- return err;
|
||||
+ return -EINVAL;
|
||||
}
|
||||
|
||||
sec = add_src_sec(obj, sec_name);
|
||||
@@ -2680,14 +2674,14 @@ int bpf_linker__finalize(struct bpf_linker *linker)
|
||||
|
||||
/* Finalize ELF layout */
|
||||
if (elf_update(linker->elf, ELF_C_NULL) < 0) {
|
||||
- err = -errno;
|
||||
+ err = -EINVAL;
|
||||
pr_warn_elf("failed to finalize ELF layout");
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
/* Write out final ELF contents */
|
||||
if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
|
||||
- err = -errno;
|
||||
+ err = -EINVAL;
|
||||
pr_warn_elf("failed to write ELF contents");
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
|
||||
@ -0,0 +1,42 @@
|
||||
From c975797ebecb07934d1399e1595db8e0d55bec04 Mon Sep 17 00:00:00 2001
|
||||
From: David Michael <fedora.dm0@gmail.com>
|
||||
Date: Sun, 13 Nov 2022 15:52:17 -0500
|
||||
Subject: [PATCH] libbpf: Fix uninitialized warning in btf_dump_dump_type_data
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
GCC 11.3.0 fails to compile btf_dump.c due to the following error,
|
||||
which seems to originate in btf_dump_struct_data where the returned
|
||||
value would be uninitialized if btf_vlen returns zero.
|
||||
|
||||
btf_dump.c: In function ‘btf_dump_dump_type_data’:
|
||||
btf_dump.c:2363:12: error: ‘err’ may be used uninitialized in this function [-Werror=maybe-uninitialized]
|
||||
2363 | if (err < 0)
|
||||
| ^
|
||||
|
||||
Fixes: 920d16af9b42 ("libbpf: BTF dumper support for typed data")
|
||||
Signed-off-by: David Michael <fedora.dm0@gmail.com>
|
||||
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
|
||||
Acked-by: Stanislav Fomichev <sdf@google.com>
|
||||
Acked-by: Alan Maguire <alan.maguire@oracle.com>
|
||||
Link: https://lore.kernel.org/bpf/87zgcu60hq.fsf@gmail.com
|
||||
---
|
||||
src/btf_dump.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/btf_dump.c b/src/btf_dump.c
|
||||
index 12f7039e0..e9f849d82 100644
|
||||
--- a/src/btf_dump.c
|
||||
+++ b/src/btf_dump.c
|
||||
@@ -1989,7 +1989,7 @@ static int btf_dump_struct_data(struct btf_dump *d,
|
||||
{
|
||||
const struct btf_member *m = btf_members(t);
|
||||
__u16 n = btf_vlen(t);
|
||||
- int i, err;
|
||||
+ int i, err = 0;
|
||||
|
||||
/* note that we increment depth before calling btf_dump_print() below;
|
||||
* this is intentional. btf_dump_data_newline() will not print a
|
||||
--
|
||||
2.33.0
|
||||
@ -0,0 +1,32 @@
|
||||
From 81ac790dc831a5b753b310138f2201f87b55169b Mon Sep 17 00:00:00 2001
|
||||
From: Shuyi Cheng <chengshuyi@linux.alibaba.com>
|
||||
Date: Sun, 8 Sep 2024 17:23:53 +0800
|
||||
Subject: [PATCH] libbpf: Fixed getting wrong return address on arm64
|
||||
architecture
|
||||
|
||||
ARM64 has a separate lr register to store the return address, so here
|
||||
you only need to read the lr register to get the return address, no need
|
||||
to dereference it again.
|
||||
|
||||
Signed-off-by: Shuyi Cheng <chengshuyi@linux.alibaba.com>
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Link: https://lore.kernel.org/bpf/1725787433-77262-1-git-send-email-chengshuyi@linux.alibaba.com
|
||||
---
|
||||
src/bpf_tracing.h | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/bpf_tracing.h b/src/bpf_tracing.h
|
||||
index 4eab132a9..aa3b04f55 100644
|
||||
--- a/src/bpf_tracing.h
|
||||
+++ b/src/bpf_tracing.h
|
||||
@@ -522,7 +522,7 @@ struct pt_regs;
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
|
||||
-#elif defined(bpf_target_sparc)
|
||||
+#elif defined(bpf_target_sparc) || defined(bpf_target_arm64)
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
|
||||
|
||||
@ -0,0 +1,43 @@
|
||||
From 0e3971339f06c23aa9402a33057ecb3aac7795aa Mon Sep 17 00:00:00 2001
|
||||
From: Andrii Nakryiko <andrii@kernel.org>
|
||||
Date: Tue, 8 Oct 2024 18:15:54 -0700
|
||||
Subject: [PATCH] libbpf: fix sym_is_subprog() logic for weak global subprogs
|
||||
|
||||
sym_is_subprog() is incorrectly rejecting relocations against *weak*
|
||||
global subprogs. Fix that by realizing that STB_WEAK is also a global
|
||||
function.
|
||||
|
||||
While it seems like verifier doesn't support taking an address of
|
||||
non-static subprog right now, it's still best to fix support for it on
|
||||
libbpf side, otherwise users will get a very confusing error during BPF
|
||||
skeleton generation or static linking due to misinterpreted relocation:
|
||||
|
||||
libbpf: prog 'handle_tp': bad map relo against 'foo' in section '.text'
|
||||
Error: failed to open BPF object file: Relocation failed
|
||||
|
||||
It's clearly not a map relocation, but is treated and reported as such
|
||||
without this fix.
|
||||
|
||||
Fixes: 53eddb5e04ac ("libbpf: Support subprog address relocation")
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Link: https://lore.kernel.org/r/20241009011554.880168-1-andrii@kernel.org
|
||||
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
||||
---
|
||||
src/libbpf.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/libbpf.c b/src/libbpf.c
|
||||
index 712b95e88..05ad264ff 100644
|
||||
--- a/src/libbpf.c
|
||||
+++ b/src/libbpf.c
|
||||
@@ -4013,7 +4013,7 @@ static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
|
||||
return true;
|
||||
|
||||
/* global function */
|
||||
- return bind == STB_GLOBAL && type == STT_FUNC;
|
||||
+ return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC;
|
||||
}
|
||||
|
||||
static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
|
||||
|
||||
|
||||
@ -0,0 +1,165 @@
|
||||
From 2dea4b86ee82a48912e54b49ac4c255eca592067 Mon Sep 17 00:00:00 2001
|
||||
From: Andrii Nakryiko <andrii@kernel.org>
|
||||
Date: Tue, 22 Oct 2024 21:39:07 -0700
|
||||
Subject: [PATCH] libbpf: move global data mmap()'ing into bpf_object__load()
|
||||
|
||||
Since BPF skeleton inception libbpf has been doing mmap()'ing of global
|
||||
data ARRAY maps in bpf_object__load_skeleton() API, which is used by
|
||||
code generated .skel.h files (i.e., by BPF skeletons only).
|
||||
|
||||
This is wrong because if BPF object is loaded through generic
|
||||
bpf_object__load() API, global data maps won't be re-mmap()'ed after
|
||||
load step, and memory pointers returned from bpf_map__initial_value()
|
||||
would be wrong and won't reflect the actual memory shared between BPF
|
||||
program and user space.
|
||||
|
||||
bpf_map__initial_value() return result is rarely used after load, so
|
||||
this went unnoticed for a really long time, until bpftrace project
|
||||
attempted to load BPF object through generic bpf_object__load() API and
|
||||
then used BPF subskeleton instantiated from such bpf_object. It turned
|
||||
out that .data/.rodata/.bss data updates through such subskeleton was
|
||||
"blackholed", all because libbpf wouldn't re-mmap() those maps during
|
||||
bpf_object__load() phase.
|
||||
|
||||
Long story short, this step should be done by libbpf regardless of BPF
|
||||
skeleton usage, right after BPF map is created in the kernel. This patch
|
||||
moves this functionality into bpf_object__populate_internal_map() to
|
||||
achieve this. And bpf_object__load_skeleton() is now simple and almost
|
||||
trivial, only propagating these mmap()'ed pointers into user-supplied
|
||||
skeleton structs.
|
||||
|
||||
We also do trivial adjustments to error reporting inside
|
||||
bpf_object__populate_internal_map() for consistency with the rest of
|
||||
libbpf's map-handling code.
|
||||
|
||||
Reported-by: Alastair Robertson <ajor@meta.com>
|
||||
Reported-by: Jonathan Wiepert <jwiepert@meta.com>
|
||||
Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support")
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Link: https://lore.kernel.org/r/20241023043908.3834423-3-andrii@kernel.org
|
||||
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
||||
|
||||
Conflict:In the original patch, the function code is moved from the bpf object __load_skeleton to the bpf object __populate_internal_map. The implementation details of the function code are different due to version changes. Therefore, the function code is moved again according to this method.
|
||||
Reference: https://github.com/libbpf/libbpf/commit/2dea4b86ee82a48912e54b49ac4c255eca592067
|
||||
---
|
||||
src/libbpf.c | 81 ++++++++++++++++++++++++++--------------------------
|
||||
1 file changed, 41 insertions(+), 40 deletions(-)
|
||||
|
||||
diff --git a/src/libbpf.c b/src/libbpf.c
|
||||
index 8d63238..cd8203f 100644
|
||||
--- a/src/libbpf.c
|
||||
+++ b/src/libbpf.c
|
||||
@@ -4971,6 +4971,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
|
||||
enum libbpf_map_type map_type = map->libbpf_type;
|
||||
char *cp, errmsg[STRERR_BUFSIZE];
|
||||
int err, zero = 0;
|
||||
+ size_t mmap_sz;
|
||||
|
||||
if (obj->gen_loader) {
|
||||
bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
|
||||
@@ -4983,8 +4984,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
|
||||
if (err) {
|
||||
err = -errno;
|
||||
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
|
||||
- pr_warn("Error setting initial map(%s) contents: %s\n",
|
||||
- map->name, cp);
|
||||
+ pr_warn("map '%s': failed to set initial contents: %s\n",
|
||||
+ bpf_map__name(map), cp);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -4994,11 +4995,45 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
|
||||
if (err) {
|
||||
err = -errno;
|
||||
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
|
||||
- pr_warn("Error freezing map(%s) as read-only: %s\n",
|
||||
- map->name, cp);
|
||||
+ pr_warn("map '%s': failed to freeze as read-only: %s\n",
|
||||
+ bpf_map__name(map), cp);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
+
|
||||
+ /* Remap anonymous mmap()-ed "map initialization image" as
|
||||
+ * a BPF map-backed mmap()-ed memory, but preserving the same
|
||||
+ * memory address. This will cause kernel to change process'
|
||||
+ * page table to point to a different piece of kernel memory,
|
||||
+ * but from userspace point of view memory address (and its
|
||||
+ * contents, being identical at this point) will stay the
|
||||
+ * same. This mapping will be released by bpf_object__close()
|
||||
+ * as per normal clean up procedure, so we don't need to worry
|
||||
+ * about it from skeleton's clean up perspective.
|
||||
+ */
|
||||
+ mmap_sz = bpf_map_mmap_sz(map);
|
||||
+ if (map->def.map_flags & BPF_F_MMAPABLE) {
|
||||
+ void *mmaped;
|
||||
+ int prot;
|
||||
+
|
||||
+ if (map->def.map_flags & BPF_F_RDONLY_PROG)
|
||||
+ prot = PROT_READ;
|
||||
+ else
|
||||
+ prot = PROT_READ | PROT_WRITE;
|
||||
+ mmaped = mmap(map->mmaped, mmap_sz, prot,
|
||||
+ MAP_SHARED | MAP_FIXED, map->fd, 0);
|
||||
+ if (mmaped == MAP_FAILED) {
|
||||
+ err = -errno;
|
||||
+ mmaped = NULL;
|
||||
+ pr_warn("failed to re-mmap() map '%s': %d\n",
|
||||
+ bpf_map__name(map), err);
|
||||
+ return libbpf_err(err);
|
||||
+ }
|
||||
+ } else if (map->mmaped) {
|
||||
+ munmap(map->mmaped, mmap_sz);
|
||||
+ map->mmaped = NULL;
|
||||
+ }
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -13128,44 +13163,10 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
|
||||
|
||||
for (i = 0; i < s->map_cnt; i++) {
|
||||
struct bpf_map *map = *s->maps[i].map;
|
||||
- size_t mmap_sz = bpf_map_mmap_sz(map);
|
||||
- int prot, map_fd = bpf_map__fd(map);
|
||||
- void **mmaped = s->maps[i].mmaped;
|
||||
-
|
||||
- if (!mmaped)
|
||||
+ if (!s->maps[i].mmaped)
|
||||
continue;
|
||||
-
|
||||
- if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
|
||||
- *mmaped = NULL;
|
||||
- continue;
|
||||
- }
|
||||
-
|
||||
- if (map->def.map_flags & BPF_F_RDONLY_PROG)
|
||||
- prot = PROT_READ;
|
||||
- else
|
||||
- prot = PROT_READ | PROT_WRITE;
|
||||
-
|
||||
- /* Remap anonymous mmap()-ed "map initialization image" as
|
||||
- * a BPF map-backed mmap()-ed memory, but preserving the same
|
||||
- * memory address. This will cause kernel to change process'
|
||||
- * page table to point to a different piece of kernel memory,
|
||||
- * but from userspace point of view memory address (and its
|
||||
- * contents, being identical at this point) will stay the
|
||||
- * same. This mapping will be released by bpf_object__close()
|
||||
- * as per normal clean up procedure, so we don't need to worry
|
||||
- * about it from skeleton's clean up perspective.
|
||||
- */
|
||||
- *mmaped = mmap(map->mmaped, mmap_sz, prot,
|
||||
- MAP_SHARED | MAP_FIXED, map_fd, 0);
|
||||
- if (*mmaped == MAP_FAILED) {
|
||||
- err = -errno;
|
||||
- *mmaped = NULL;
|
||||
- pr_warn("failed to re-mmap() map '%s': %d\n",
|
||||
- bpf_map__name(map), err);
|
||||
- return libbpf_err(err);
|
||||
- }
|
||||
+ *s->maps[i].mmaped = map->mmaped;
|
||||
}
|
||||
-
|
||||
return 0;
|
||||
}
|
||||
|
||||
41
libbpf.spec
41
libbpf.spec
@ -4,7 +4,7 @@
|
||||
|
||||
Name: %{githubname}
|
||||
Version: %{githubver}
|
||||
Release: 13
|
||||
Release: 18
|
||||
Summary: Libbpf library
|
||||
|
||||
License: LGPLv2 or BSD
|
||||
@ -42,6 +42,18 @@ Patch0025: backport-libbpf-Fix-NULL-pointer-dereference-in_bpf_object__c.pa
|
||||
Patch0026: backport-libbpf-Fix-str_has_sfxs-return-value.patch
|
||||
Patch0027: backport-libbpf-Initialize-err-in-probe_map_create.patch
|
||||
Patch0028: backport-libbpf-Modify-the-function-name-in-libbpf.c-to-match.patch
|
||||
Patch0029: backport-libbpf-Fix-an-error-in-64bit-relocation-value-comput.patch
|
||||
Patch0030: backport-libbpf-Avoid-uninitialized-value-in-BPF_CORE_READ_BI.patch
|
||||
Patch0031: backport-libbpf-Add-NULL-checks-to-bpf_object__prev_map,next_.patch
|
||||
Patch0032: backport-libbpf-Apply-map_set_def_max_entries-for-inner_maps-.patch
|
||||
Patch0033: backport-libbpf-Fix-uninitialized-warning-in-btf_dump_dump_ty.patch
|
||||
Patch0034: backport-libbpf-Dont-take-direct-pointers-into-BTF-data-from-.patch
|
||||
Patch0035: backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch
|
||||
Patch0036: backport-libbpf-Fix-bpf_object__open_skeleton-s-mishandling-o.patch
|
||||
Patch0037: backport-libbpf-Fix-segfault-due-to-libelf-functions-not-sett.patch
|
||||
Patch0038: backport-libbpf-Fixed-getting-wrong-return-address-on-arm64-a.patch
|
||||
Patch0039: backport-libbpf-fix-sym_is_subprog-logic-for-weak-global-subp.patch
|
||||
Patch0040: backport-libbpf-move-global-data-mmap-ing-into-bpf_object__lo.patch
|
||||
|
||||
# This package supersedes libbpf from kernel-tools,
|
||||
# which has default Epoch: 0. By having Epoch: 1
|
||||
@ -94,6 +106,33 @@ developing applications that use %{name}
|
||||
%{_libdir}/libbpf.a
|
||||
|
||||
%changelog
|
||||
* Fri Mar 14 2025 zhangmingyi <zhangmingyi5@huawei.com> 2:0.8.1-18
|
||||
- backport patch from upstream:
|
||||
backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch
|
||||
backport-libbpf-Fix-bpf_object__open_skeleton-s-mishandling-o.patch
|
||||
backport-libbpf-Fix-segfault-due-to-libelf-functions-not-sett.patch
|
||||
backport-libbpf-Fixed-getting-wrong-return-address-on-arm64-a.patch
|
||||
backport-libbpf-fix-sym_is_subprog-logic-for-weak-global-subp.patch
|
||||
backport-libbpf-move-global-data-mmap-ing-into-bpf_object__lo.patch
|
||||
|
||||
* Mon Dec 30 2024 zhangmingyi <zhangmingyi5@huawei.com> 2:0.8.1-17
|
||||
- backport patch from upstream:
|
||||
backport-libbpf-Dont-take-direct-pointers-into-BTF-data-from-.patch
|
||||
|
||||
* Wed Oct 09 2024 zhangmingyi <zhangmingyi5@huawei.com> 2:0.8.1-16
|
||||
- backport patch from upstream:
|
||||
backport-libbpf-Add-NULL-checks-to-bpf_object__prev_map,next_.patch
|
||||
backport-libbpf-Apply-map_set_def_max_entries-for-inner_maps-.patch
|
||||
backport-libbpf-Fix-uninitialized-warning-in-btf_dump_dump_ty.patch
|
||||
|
||||
* Wed Sep 25 2024 zhangmingyi <zhangmingyi5@huawei.com> 2:0.8.1-15
|
||||
- backport patch from upstream:
|
||||
backport-libbpf-Avoid-uninitialized-value-in-BPF_CORE_READ_BI.patch
|
||||
|
||||
* Thu Jun 20 2024 xiesongyang <xiesongyang@huawei.com> - 2:0.8.1-14
|
||||
- backport patch from upstream:
|
||||
backport-libbpf-Fix-an-error-in-64bit-relocation-value-comput.patch
|
||||
|
||||
* Sun Apr 28 2024 jinzhiguang <jinzhiguang@kylinos.cn> - 2:0.8.1-13
|
||||
- backport patch from upstream:
|
||||
backport-libbpf-Modify-the-function-name-in-libbpf.c-to-match.patch
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user