Merge branch 'extend-struct_ops-support-for-operators'

Amery Hung says:

====================
This patchset supports struct_ops operators that acquire kptrs through
arguments and operators that return a kptr. A coming new struct_ops use
case, bpf qdisc [0], has two operators that are not yet supported by
current struct_ops infrastructure. Qdisc_ops::enqueue requires getting
referenced skb kptr from the argument; Qdisc_ops::dequeue needs to return
a referenced skb kptr. This patchset will allow bpf qdisc and other
potential struct_ops implementers to do so.

For struct_ops implementers:

- To get a kptr from an argument, a struct_ops implementer needs to
  annotate the argument name in the stub function with "__ref" suffix.

- The kptr return will automatically work as we now allow operators that
  return a struct pointer.

- The verifier allows returning a null pointer. More control can be
  added later if there is a future struct_ops implementer only expecting
  valid pointers.

For struct_ops users:

- The referenced kptr acquired through the argument needs to be released
  or xchged into maps just like ones acquired via kfuncs.

- To return a referenced kptr in struct_ops,
  1) The type of the pointer must matches the return type
  2) The pointer must comes from the kernel (not locally allocated), and
  3) The pointer must be in its unmodified form

[0] https://lore.kernel.org/bpf/20250210174336.2024258-1-ameryhung@gmail.com/
---
v2
- Replace kcalloc+memcpy with kmemdup_array in
  bpf_prog_ctx_arg_info_init()
- Remove unnecessary checks when kfree-ing ctx_arg_info
- Remove conditional assignment of ref_obj_id in btf_ctx_access()

v1
- Link: https://lore.kernel.org/bpf/20250214164520.1001211-1-ameryhung@gmail.com/
- Fix missing kfree for ctx_arg_info
====================

Link: https://patch.msgid.link/20250217190640.1748177-1-ameryhung@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov
2025-02-17 18:47:27 -08:00
18 changed files with 410 additions and 33 deletions

View File

@@ -968,6 +968,7 @@ struct bpf_insn_access_aux {
struct {
struct btf *btf;
u32 btf_id;
u32 ref_obj_id;
};
};
struct bpf_verifier_log *log; /* for verbose logs */
@@ -1481,6 +1482,8 @@ struct bpf_ctx_arg_aux {
enum bpf_reg_type reg_type;
struct btf *btf;
u32 btf_id;
u32 ref_obj_id;
bool refcounted;
};
struct btf_mod_pair {
@@ -1507,7 +1510,7 @@ struct bpf_prog_aux {
u32 max_rdonly_access;
u32 max_rdwr_access;
struct btf *attach_btf;
const struct bpf_ctx_arg_aux *ctx_arg_info;
struct bpf_ctx_arg_aux *ctx_arg_info;
void __percpu *priv_stack_ptr;
struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
struct bpf_prog *dst_prog;
@@ -1945,6 +1948,9 @@ static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_op
#endif
int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
const struct bpf_ctx_arg_aux *info, u32 cnt);
#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
int cgroup_atype);
@@ -2546,7 +2552,7 @@ struct bpf_iter__bpf_map_elem {
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
int bpf_iter_prog_supported(struct bpf_prog *prog);
const struct bpf_func_proto *
bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);

View File

@@ -335,7 +335,7 @@ static void cache_btf_id(struct bpf_iter_target_info *tinfo,
tinfo->btf_id = prog->aux->attach_btf_id;
}
bool bpf_iter_prog_supported(struct bpf_prog *prog)
int bpf_iter_prog_supported(struct bpf_prog *prog)
{
const char *attach_fname = prog->aux->attach_func_name;
struct bpf_iter_target_info *tinfo = NULL, *iter;
@@ -344,7 +344,7 @@ bool bpf_iter_prog_supported(struct bpf_prog *prog)
int prefix_len = strlen(prefix);
if (strncmp(attach_fname, prefix, prefix_len))
return false;
return -EINVAL;
mutex_lock(&targets_mutex);
list_for_each_entry(iter, &targets, list) {
@@ -360,12 +360,11 @@ bool bpf_iter_prog_supported(struct bpf_prog *prog)
}
mutex_unlock(&targets_mutex);
if (tinfo) {
prog->aux->ctx_arg_info_size = tinfo->reg_info->ctx_arg_info_size;
prog->aux->ctx_arg_info = tinfo->reg_info->ctx_arg_info;
}
if (!tinfo)
return -EINVAL;
return tinfo != NULL;
return bpf_prog_ctx_arg_info_init(prog, tinfo->reg_info->ctx_arg_info,
tinfo->reg_info->ctx_arg_info_size);
}
const struct bpf_func_proto *

View File

@@ -146,6 +146,7 @@ void bpf_struct_ops_image_free(void *image)
}
#define MAYBE_NULL_SUFFIX "__nullable"
#define REFCOUNTED_SUFFIX "__ref"
/* Prepare argument info for every nullable argument of a member of a
* struct_ops type.
@@ -174,11 +175,13 @@ static int prepare_arg_info(struct btf *btf,
struct bpf_struct_ops_arg_info *arg_info)
{
const struct btf_type *stub_func_proto, *pointed_type;
bool is_nullable = false, is_refcounted = false;
const struct btf_param *stub_args, *args;
struct bpf_ctx_arg_aux *info, *info_buf;
u32 nargs, arg_no, info_cnt = 0;
char ksym[KSYM_SYMBOL_LEN];
const char *stub_fname;
const char *suffix;
s32 stub_func_id;
u32 arg_btf_id;
int offset;
@@ -223,10 +226,18 @@ static int prepare_arg_info(struct btf *btf,
info = info_buf;
for (arg_no = 0; arg_no < nargs; arg_no++) {
/* Skip arguments that is not suffixed with
* "__nullable".
* "__nullable or __ref".
*/
if (!btf_param_match_suffix(btf, &stub_args[arg_no],
MAYBE_NULL_SUFFIX))
is_nullable = btf_param_match_suffix(btf, &stub_args[arg_no],
MAYBE_NULL_SUFFIX);
is_refcounted = btf_param_match_suffix(btf, &stub_args[arg_no],
REFCOUNTED_SUFFIX);
if (is_nullable)
suffix = MAYBE_NULL_SUFFIX;
else if (is_refcounted)
suffix = REFCOUNTED_SUFFIX;
else
continue;
/* Should be a pointer to struct */
@@ -236,7 +247,7 @@ static int prepare_arg_info(struct btf *btf,
if (!pointed_type ||
!btf_type_is_struct(pointed_type)) {
pr_warn("stub function %s has %s tagging to an unsupported type\n",
stub_fname, MAYBE_NULL_SUFFIX);
stub_fname, suffix);
goto err_out;
}
@@ -254,11 +265,15 @@ static int prepare_arg_info(struct btf *btf,
}
/* Fill the information of the new argument */
info->reg_type =
PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
info->btf_id = arg_btf_id;
info->btf = btf;
info->offset = offset;
if (is_nullable) {
info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
} else if (is_refcounted) {
info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID;
info->refcounted = true;
}
info++;
info_cnt++;
@@ -375,7 +390,7 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
st_ops_desc->value_type = btf_type_by_id(btf, value_id);
for_each_member(i, t, member) {
const struct btf_type *func_proto;
const struct btf_type *func_proto, *ret_type;
void **stub_func_addr;
u32 moff;
@@ -412,6 +427,16 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
if (!func_proto || bpf_struct_ops_supported(st_ops, moff))
continue;
if (func_proto->type) {
ret_type = btf_type_resolve_ptr(btf, func_proto->type, NULL);
if (ret_type && !__btf_type_is_struct(ret_type)) {
pr_warn("func ptr %s in struct %s returns non-struct pointer, which is not supported\n",
mname, st_ops->name);
err = -EOPNOTSUPP;
goto errout;
}
}
if (btf_distill_func_proto(log, btf,
func_proto, mname,
&st_ops->func_models[i])) {

View File

@@ -6774,6 +6774,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
info->reg_type = ctx_arg_info->reg_type;
info->btf = ctx_arg_info->btf ? : btf_vmlinux;
info->btf_id = ctx_arg_info->btf_id;
info->ref_obj_id = ctx_arg_info->ref_obj_id;
return true;
}
}

View File

@@ -2313,6 +2313,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
kvfree(prog->aux->jited_linfo);
kvfree(prog->aux->linfo);
kfree(prog->aux->kfunc_tab);
kfree(prog->aux->ctx_arg_info);
if (prog->aux->attach_btf)
btf_put(prog->aux->attach_btf);

View File

@@ -1543,6 +1543,17 @@ static void release_reference_state(struct bpf_verifier_state *state, int idx)
return;
}
static bool find_reference_state(struct bpf_verifier_state *state, int ptr_id)
{
int i;
for (i = 0; i < state->acquired_refs; i++)
if (state->refs[i].id == ptr_id)
return true;
return false;
}
static int release_lock_state(struct bpf_verifier_state *state, int type, int id, void *ptr)
{
int i;
@@ -5981,7 +5992,8 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type,
struct btf **btf, u32 *btf_id, bool *is_retval, bool is_ldsx)
struct btf **btf, u32 *btf_id, bool *is_retval, bool is_ldsx,
u32 *ref_obj_id)
{
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
@@ -6003,8 +6015,16 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
*is_retval = info.is_retval;
if (base_type(*reg_type) == PTR_TO_BTF_ID) {
if (info.ref_obj_id &&
!find_reference_state(env->cur_state, info.ref_obj_id)) {
verbose(env, "invalid bpf_context access off=%d. Reference may already be released\n",
off);
return -EACCES;
}
*btf = info.btf;
*btf_id = info.btf_id;
*ref_obj_id = info.ref_obj_id;
} else {
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
}
@@ -7367,7 +7387,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
struct bpf_retval_range range;
enum bpf_reg_type reg_type = SCALAR_VALUE;
struct btf *btf = NULL;
u32 btf_id = 0;
u32 btf_id = 0, ref_obj_id = 0;
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
@@ -7380,7 +7400,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return err;
err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
&btf_id, &is_retval, is_ldsx);
&btf_id, &is_retval, is_ldsx, &ref_obj_id);
if (err)
verbose_linfo(env, insn_idx, "; ");
if (!err && t == BPF_READ && value_regno >= 0) {
@@ -7411,6 +7431,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (base_type(reg_type) == PTR_TO_BTF_ID) {
regs[value_regno].btf = btf;
regs[value_regno].btf_id = btf_id;
regs[value_regno].ref_obj_id = ref_obj_id;
}
}
regs[value_regno].type = reg_type;
@@ -10737,6 +10758,8 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit)
{
struct bpf_verifier_state *state = env->cur_state;
enum bpf_prog_type type = resolve_prog_type(env->prog);
struct bpf_reg_state *reg = reg_state(env, BPF_REG_0);
bool refs_lingering = false;
int i;
@@ -10746,6 +10769,12 @@ static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exi
for (i = 0; i < state->acquired_refs; i++) {
if (state->refs[i].type != REF_TYPE_PTR)
continue;
/* Allow struct_ops programs to return a referenced kptr back to
* kernel. Type checks are performed later in check_return_code.
*/
if (type == BPF_PROG_TYPE_STRUCT_OPS && !exception_exit &&
reg->ref_obj_id == state->refs[i].id)
continue;
verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
state->refs[i].id, state->refs[i].insn_idx);
refs_lingering = true;
@@ -16397,13 +16426,14 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
const char *exit_ctx = "At program exit";
struct tnum enforce_attach_type_range = tnum_unknown;
const struct bpf_prog *prog = env->prog;
struct bpf_reg_state *reg;
struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_retval_range range = retval_range(0, 1);
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
const bool is_subprog = frame->subprogno;
bool return_32bit = false;
const struct btf_type *reg_type, *ret_type = NULL;
/* LSM and struct_ops func-ptr's return type could be "void" */
if (!is_subprog || frame->in_exception_callback_fn) {
@@ -16412,10 +16442,26 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
if (prog->expected_attach_type == BPF_LSM_CGROUP)
/* See below, can be 0 or 0-1 depending on hook. */
break;
fallthrough;
if (!prog->aux->attach_func_proto->type)
return 0;
break;
case BPF_PROG_TYPE_STRUCT_OPS:
if (!prog->aux->attach_func_proto->type)
return 0;
if (frame->in_exception_callback_fn)
break;
/* Allow a struct_ops program to return a referenced kptr if it
* matches the operator's return type and is in its unmodified
* form. A scalar zero (i.e., a null pointer) is also allowed.
*/
reg_type = reg->btf ? btf_type_by_id(reg->btf, reg->btf_id) : NULL;
ret_type = btf_type_resolve_ptr(prog->aux->attach_btf,
prog->aux->attach_func_proto->type,
NULL);
if (ret_type && ret_type == reg_type && reg->ref_obj_id)
return __check_ptr_off_reg(env, reg, regno, false);
break;
default:
break;
@@ -16437,8 +16483,6 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
return -EACCES;
}
reg = cur_regs(env) + regno;
if (frame->in_async_callback_fn) {
/* enforce return zero from async callbacks like timer */
exit_ctx = "At async callback return";
@@ -16537,6 +16581,11 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
case BPF_PROG_TYPE_NETFILTER:
range = retval_range(NF_DROP, NF_ACCEPT);
break;
case BPF_PROG_TYPE_STRUCT_OPS:
if (!ret_type)
return 0;
range = retval_range(0, 0);
break;
case BPF_PROG_TYPE_EXT:
/* freplace program can return anything as its return value
* depends on the to-be-replaced kernel func or bpf program.
@@ -22176,6 +22225,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
{
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
struct bpf_subprog_info *sub = subprog_info(env, subprog);
struct bpf_prog_aux *aux = env->prog->aux;
struct bpf_verifier_state *state;
struct bpf_reg_state *regs;
int ret, i;
@@ -22283,6 +22333,13 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
mark_reg_known_zero(env, regs, BPF_REG_1);
}
/* Acquire references for struct_ops program arguments tagged with "__ref" */
if (!subprog && env->prog->type == BPF_PROG_TYPE_STRUCT_OPS) {
for (i = 0; i < aux->ctx_arg_info_size; i++)
aux->ctx_arg_info[i].ref_obj_id = aux->ctx_arg_info[i].refcounted ?
acquire_reference(env, 0) : 0;
}
ret = do_check(env);
out:
/* check for NULL is necessary, since cur_state can be freed inside
@@ -22405,6 +22462,15 @@ static void print_verification_stats(struct bpf_verifier_env *env)
env->peak_states, env->longest_mark_read_walk);
}
int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
const struct bpf_ctx_arg_aux *info, u32 cnt)
{
prog->aux->ctx_arg_info = kmemdup_array(info, cnt, sizeof(*info), GFP_KERNEL);
prog->aux->ctx_arg_info_size = cnt;
return prog->aux->ctx_arg_info ? 0 : -ENOMEM;
}
static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
{
const struct btf_type *t, *func_proto;
@@ -22485,17 +22551,12 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
return -EACCES;
}
/* btf_ctx_access() used this to provide argument type info */
prog->aux->ctx_arg_info =
st_ops_desc->arg_info[member_idx].info;
prog->aux->ctx_arg_info_size =
st_ops_desc->arg_info[member_idx].cnt;
prog->aux->attach_func_proto = func_proto;
prog->aux->attach_func_name = mname;
env->ops = st_ops->verifier_ops;
return 0;
return bpf_prog_ctx_arg_info_init(prog, st_ops_desc->arg_info[member_idx].info,
st_ops_desc->arg_info[member_idx].cnt);
}
#define SECURITY_PREFIX "security_"
@@ -22945,9 +23006,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
prog->aux->attach_btf_trace = true;
return 0;
} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
if (!bpf_iter_prog_supported(prog))
return -EINVAL;
return 0;
return bpf_iter_prog_supported(prog);
}
if (prog->type == BPF_PROG_TYPE_LSM) {

View File

@@ -0,0 +1,16 @@
#include <test_progs.h>
#include "struct_ops_kptr_return.skel.h"
#include "struct_ops_kptr_return_fail__wrong_type.skel.h"
#include "struct_ops_kptr_return_fail__invalid_scalar.skel.h"
#include "struct_ops_kptr_return_fail__nonzero_offset.skel.h"
#include "struct_ops_kptr_return_fail__local_kptr.skel.h"
void test_struct_ops_kptr_return(void)
{
RUN_TESTS(struct_ops_kptr_return);
RUN_TESTS(struct_ops_kptr_return_fail__wrong_type);
RUN_TESTS(struct_ops_kptr_return_fail__invalid_scalar);
RUN_TESTS(struct_ops_kptr_return_fail__nonzero_offset);
RUN_TESTS(struct_ops_kptr_return_fail__local_kptr);
}

View File

@@ -0,0 +1,12 @@
#include <test_progs.h>
#include "struct_ops_refcounted.skel.h"
#include "struct_ops_refcounted_fail__ref_leak.skel.h"
#include "struct_ops_refcounted_fail__global_subprog.skel.h"
void test_struct_ops_refcounted(void)
{
RUN_TESTS(struct_ops_refcounted);
RUN_TESTS(struct_ops_refcounted_fail__ref_leak);
RUN_TESTS(struct_ops_refcounted_fail__global_subprog);
}

View File

@@ -0,0 +1,30 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../test_kmods/bpf_testmod.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
void bpf_task_release(struct task_struct *p) __ksym;
/* This test struct_ops BPF programs returning referenced kptr. The verifier should
* allow a referenced kptr or a NULL pointer to be returned. A referenced kptr to task
* here is acquried automatically as the task argument is tagged with "__ref".
*/
SEC("struct_ops/test_return_ref_kptr")
struct task_struct *BPF_PROG(kptr_return, int dummy,
struct task_struct *task, struct cgroup *cgrp)
{
if (dummy % 2) {
bpf_task_release(task);
return NULL;
}
return task;
}
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_kptr_return = {
.test_return_ref_kptr = (void *)kptr_return,
};

View File

@@ -0,0 +1,26 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../test_kmods/bpf_testmod.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
void bpf_task_release(struct task_struct *p) __ksym;
/* This test struct_ops BPF programs returning referenced kptr. The verifier should
* reject programs returning a non-zero scalar value.
*/
SEC("struct_ops/test_return_ref_kptr")
__failure __msg("At program exit the register R0 has smin=1 smax=1 should have been in [0, 0]")
struct task_struct *BPF_PROG(kptr_return_fail__invalid_scalar, int dummy,
struct task_struct *task, struct cgroup *cgrp)
{
bpf_task_release(task);
return (struct task_struct *)1;
}
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_kptr_return = {
.test_return_ref_kptr = (void *)kptr_return_fail__invalid_scalar,
};

View File

@@ -0,0 +1,34 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../test_kmods/bpf_testmod.h"
#include "bpf_experimental.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
void bpf_task_release(struct task_struct *p) __ksym;
/* This test struct_ops BPF programs returning referenced kptr. The verifier should
* reject programs returning a local kptr.
*/
SEC("struct_ops/test_return_ref_kptr")
__failure __msg("At program exit the register R0 is not a known value (ptr_or_null_)")
struct task_struct *BPF_PROG(kptr_return_fail__local_kptr, int dummy,
struct task_struct *task, struct cgroup *cgrp)
{
struct task_struct *t;
bpf_task_release(task);
t = bpf_obj_new(typeof(*task));
if (!t)
return NULL;
return t;
}
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_kptr_return = {
.test_return_ref_kptr = (void *)kptr_return_fail__local_kptr,
};

View File

@@ -0,0 +1,25 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../test_kmods/bpf_testmod.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
void bpf_task_release(struct task_struct *p) __ksym;
/* This test struct_ops BPF programs returning referenced kptr. The verifier should
* reject programs returning a modified referenced kptr.
*/
SEC("struct_ops/test_return_ref_kptr")
__failure __msg("dereference of modified trusted_ptr_ ptr R0 off={{[0-9]+}} disallowed")
struct task_struct *BPF_PROG(kptr_return_fail__nonzero_offset, int dummy,
struct task_struct *task, struct cgroup *cgrp)
{
return (struct task_struct *)&task->jobctl;
}
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_kptr_return = {
.test_return_ref_kptr = (void *)kptr_return_fail__nonzero_offset,
};

View File

@@ -0,0 +1,30 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../test_kmods/bpf_testmod.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
void bpf_task_release(struct task_struct *p) __ksym;
/* This test struct_ops BPF programs returning referenced kptr. The verifier should
* reject programs returning a referenced kptr of the wrong type.
*/
SEC("struct_ops/test_return_ref_kptr")
__failure __msg("At program exit the register R0 is not a known value (ptr_or_null_)")
struct task_struct *BPF_PROG(kptr_return_fail__wrong_type, int dummy,
struct task_struct *task, struct cgroup *cgrp)
{
struct task_struct *ret;
ret = (struct task_struct *)bpf_cgroup_acquire(cgrp);
bpf_task_release(task);
return ret;
}
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_kptr_return = {
.test_return_ref_kptr = (void *)kptr_return_fail__wrong_type,
};

View File

@@ -0,0 +1,31 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../test_kmods/bpf_testmod.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
__attribute__((nomerge)) extern void bpf_task_release(struct task_struct *p) __ksym;
/* This is a test BPF program that uses struct_ops to access a referenced
* kptr argument. This is a test for the verifier to ensure that it
* 1) recongnizes the task as a referenced object (i.e., ref_obj_id > 0), and
* 2) the same reference can be acquired from multiple paths as long as it
* has not been released.
*/
SEC("struct_ops/test_refcounted")
int BPF_PROG(refcounted, int dummy, struct task_struct *task)
{
if (dummy == 1)
bpf_task_release(task);
else
bpf_task_release(task);
return 0;
}
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_refcounted = {
.test_refcounted = (void *)refcounted,
};

View File

@@ -0,0 +1,39 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../test_kmods/bpf_testmod.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
extern void bpf_task_release(struct task_struct *p) __ksym;
__noinline int subprog_release(__u64 *ctx __arg_ctx)
{
struct task_struct *task = (struct task_struct *)ctx[1];
int dummy = (int)ctx[0];
bpf_task_release(task);
return dummy + 1;
}
/* Test that the verifier rejects a program that contains a global
* subprogram with referenced kptr arguments
*/
SEC("struct_ops/test_refcounted")
__failure __log_level(2)
__msg("Validating subprog_release() func#1...")
__msg("invalid bpf_context access off=8. Reference may already be released")
int refcounted_fail__global_subprog(unsigned long long *ctx)
{
struct task_struct *task = (struct task_struct *)ctx[1];
bpf_task_release(task);
return subprog_release(ctx);
}
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_ref_acquire = {
.test_refcounted = (void *)refcounted_fail__global_subprog,
};

View File

@@ -0,0 +1,22 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../test_kmods/bpf_testmod.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
/* Test that the verifier rejects a program that acquires a referenced
* kptr through context without releasing the reference
*/
SEC("struct_ops/test_refcounted")
__failure __msg("Unreleased reference id=1 alloc_insn=0")
int BPF_PROG(refcounted_fail__ref_leak, int dummy,
struct task_struct *task)
{
return 0;
}
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_ref_acquire = {
.test_refcounted = (void *)refcounted_fail__ref_leak,
};

View File

@@ -1176,10 +1176,25 @@ static int bpf_testmod_ops__test_maybe_null(int dummy,
return 0;
}
static int bpf_testmod_ops__test_refcounted(int dummy,
struct task_struct *task__ref)
{
return 0;
}
static struct task_struct *
bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref,
struct cgroup *cgrp)
{
return NULL;
}
static struct bpf_testmod_ops __bpf_testmod_ops = {
.test_1 = bpf_testmod_test_1,
.test_2 = bpf_testmod_test_2,
.test_maybe_null = bpf_testmod_ops__test_maybe_null,
.test_refcounted = bpf_testmod_ops__test_refcounted,
.test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr,
};
struct bpf_struct_ops bpf_bpf_testmod_ops = {

View File

@@ -6,6 +6,7 @@
#include <linux/types.h>
struct task_struct;
struct cgroup;
struct bpf_testmod_test_read_ctx {
char *buf;
@@ -36,6 +37,11 @@ struct bpf_testmod_ops {
/* Used to test nullable arguments. */
int (*test_maybe_null)(int dummy, struct task_struct *task);
int (*unsupported_ops)(void);
/* Used to test ref_acquired arguments. */
int (*test_refcounted)(int dummy, struct task_struct *task);
/* Used to test returning referenced kptr. */
struct task_struct *(*test_return_ref_kptr)(int dummy, struct task_struct *task,
struct cgroup *cgrp);
/* The following fields are used to test shadow copies. */
char onebyte;