Merge branch 'selftests-bpf-test-btf-sanitization'

Alan Maguire says:

====================
selftests/bpf: Test BTF sanitization

Allow simulation of missing BPF features through provision of
a synthetic feature cache set, and use this to simulate case
where FEAT_BTF_LAYOUT is missing.  Ensure sanitization leaves us
with expected BTF (layout info removed, layout header fields
zeroed, strings data adjusted).

Specifying a feature cache with selected missing features will
allow testing of other missing feature codepaths, but for now
add BTF layout sanitization test only.

Changes since v2 [1]:

- change zfree() to free() since we immediately assign the
  feat_cache (Jiri, patch 1)
- "goto out" to avoid skeleton leak (Chengkaitao, patch 2)
- just use kfree_skb__open() since we do not need to load
  skeleton

Changes since v1 [2]:

- renamed to bpf_object_set_feat_cache() (Andrii, patch 1)
- remove __packed, relocate skeleton open/load, fix formatting
  issues (Andrii, patch 2)

[1] https://lore.kernel.org/bpf/20260408105324.663280-1-alan.maguire@oracle.com/
[2] https://lore.kernel.org/bpf/20260401164302.3844142-1-alan.maguire@oracle.com/
====================

Link: https://patch.msgid.link/20260408165735.843763-1-alan.maguire@oracle.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov
2026-04-10 12:34:36 -07:00
3 changed files with 109 additions and 3 deletions

View File

@@ -3145,7 +3145,7 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
!has_layout;
}
static struct btf *bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *orig_btf)
struct btf *bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *orig_btf)
{
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
@@ -5203,12 +5203,20 @@ bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
*/
return true;
if (obj->token_fd)
if (obj->feat_cache)
return feat_supported(obj->feat_cache, feat_id);
return feat_supported(NULL, feat_id);
}
/* Used in testing to simulate missing features. */
void bpf_object_set_feat_cache(struct bpf_object *obj, struct kern_feature_cache *cache)
{
if (obj->feat_cache)
free(obj->feat_cache);
obj->feat_cache = cache;
}
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
{
struct bpf_map_info map_info;

View File

@@ -414,6 +414,7 @@ struct kern_feature_cache {
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
void bpf_object_set_feat_cache(struct bpf_object *obj, struct kern_feature_cache *cache);
int probe_kern_syscall_wrapper(int token_fd);
int probe_memcg_account(int token_fd);
@@ -427,7 +428,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
int libbpf__load_raw_btf_hdr(const struct btf_header *hdr,
const char *raw_types, const char *str_sec,
const char *layout_sec, int token_fd);
struct btf *bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *orig_btf);
int btf_load_into_kernel(struct btf *btf,
char *log_buf, size_t log_sz, __u32 log_level,
int token_fd);

View File

@@ -0,0 +1,97 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2026, Oracle and/or its affiliates. */
#include <test_progs.h>
#include <linux/btf.h>
#include "bpf/libbpf_internal.h"
#include "../test_btf.h"
#include "kfree_skb.skel.h"
#define TYPE_LEN (sizeof(struct btf_type) + sizeof(__u32))
#define MAX_NR_LAYOUT 2
#define LAYOUT_LEN (sizeof(struct btf_layout) * MAX_NR_LAYOUT)
#define STR_LEN sizeof("\0int")
struct layout_btf {
struct btf_header hdr;
__u32 types[TYPE_LEN/sizeof(__u32)];
struct btf_layout layout[MAX_NR_LAYOUT];
char strs[STR_LEN];
};
static const struct layout_btf layout_btf = {
.hdr = {
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
.type_off = 0,
.type_len = TYPE_LEN,
.str_off = TYPE_LEN + LAYOUT_LEN,
.str_len = STR_LEN,
.layout_off = TYPE_LEN,
.layout_len = LAYOUT_LEN,
},
.types = {
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
},
.layout = {
{ .info_sz = 0, .elem_sz = 0, .flags = 0 },
{ .info_sz = sizeof(__u32), .elem_sz = 0, .flags = 0 },
},
.strs = "\0int",
};
void test_btf_sanitize_layout(void)
{
struct btf *orig = NULL, *sanitized = NULL;
struct kern_feature_cache *cache = NULL;
struct kfree_skb *skel = NULL;
const struct btf_header *hdr;
const void *raw;
__u32 raw_sz;
skel = kfree_skb__open();
if (!ASSERT_OK_PTR(skel, "kfree_skb_skel"))
return;
orig = btf__new(&layout_btf, sizeof(layout_btf));
if (!ASSERT_OK_PTR(orig, "btf_new_layout"))
goto out;
raw = btf__raw_data(orig, &raw_sz);
if (!ASSERT_OK_PTR(raw, "btf__raw_data_orig"))
goto out;
hdr = (struct btf_header *)raw;
ASSERT_EQ(hdr->layout_off, TYPE_LEN, "layout_off_nonzero");
ASSERT_EQ(hdr->layout_len, LAYOUT_LEN, "layout_len_nonzero");
cache = calloc(1, sizeof(*cache));
if (!ASSERT_OK_PTR(cache, "alloc_feat_cache"))
goto out;
for (int i = 0; i < __FEAT_CNT; i++)
cache->res[i] = FEAT_SUPPORTED;
cache->res[FEAT_BTF_LAYOUT] = FEAT_MISSING;
bpf_object_set_feat_cache(skel->obj, cache);
if (!ASSERT_FALSE(kernel_supports(skel->obj, FEAT_BTF_LAYOUT), "layout_feature_missing"))
goto out;
if (!ASSERT_TRUE(kernel_supports(skel->obj, FEAT_BTF_FUNC), "other_feature_allowed"))
goto out;
sanitized = bpf_object__sanitize_btf(skel->obj, orig);
if (!ASSERT_OK_PTR(sanitized, "bpf_object__sanitize_btf"))
goto out;
raw = btf__raw_data(sanitized, &raw_sz);
if (!ASSERT_OK_PTR(raw, "btf__raw_data_sanitized"))
goto out;
hdr = (struct btf_header *)raw;
ASSERT_EQ(hdr->layout_off, 0, "layout_off_zero");
ASSERT_EQ(hdr->layout_len, 0, "layout_len_zero");
ASSERT_EQ(hdr->str_off, TYPE_LEN, "strs_after_types");
ASSERT_EQ(hdr->str_len, STR_LEN, "strs_len_unchanged");
ASSERT_EQ(raw_sz, hdr->hdr_len + hdr->type_len + hdr->str_len, "btf_raw_sz_reduced");
out:
/* This will free the cache we allocated above */
kfree_skb__destroy(skel);
btf__free(sanitized);
btf__free(orig);
}