Skip to content

Commit

Permalink
selftests/bpf: Add selftests for cgroup1 local storage
Browse files Browse the repository at this point in the history
Expanding the test coverage from cgroup2 to include cgroup1. The result
as follows,

Already existing test cases for cgroup2:
  torvalds#48/1    cgrp_local_storage/tp_btf:OK
  torvalds#48/2    cgrp_local_storage/attach_cgroup:OK
  torvalds#48/3    cgrp_local_storage/recursion:OK
  torvalds#48/4    cgrp_local_storage/negative:OK
  torvalds#48/5    cgrp_local_storage/cgroup_iter_sleepable:OK
  torvalds#48/6    cgrp_local_storage/yes_rcu_lock:OK
  torvalds#48/7    cgrp_local_storage/no_rcu_lock:OK

Expanded test cases for cgroup1:
  torvalds#48/8    cgrp_local_storage/cgrp1_tp_btf:OK
  torvalds#48/9    cgrp_local_storage/cgrp1_recursion:OK
  torvalds#48/10   cgrp_local_storage/cgrp1_negative:OK
  torvalds#48/11   cgrp_local_storage/cgrp1_iter_sleepable:OK
  torvalds#48/12   cgrp_local_storage/cgrp1_yes_rcu_lock:OK
  torvalds#48/13   cgrp_local_storage/cgrp1_no_rcu_lock:OK

Summary:
  torvalds#48      cgrp_local_storage:OK
  Summary: 1/13 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
  • Loading branch information
laoar authored and intel-lab-lkp committed Dec 5, 2023
1 parent c6b84af commit 451492f
Show file tree
Hide file tree
Showing 4 changed files with 278 additions and 47 deletions.
92 changes: 91 additions & 1 deletion tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ struct socket_cookie {
__u64 cookie_value;
};

bool is_cgroup1;
int target_hid;

static void test_tp_btf(int cgroup_fd)
{
struct cgrp_ls_tp_btf *skel;
Expand All @@ -29,6 +32,9 @@ static void test_tp_btf(int cgroup_fd)
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;

skel->bss->is_cgroup1 = is_cgroup1;
skel->bss->target_hid = target_hid;

/* populate a value in map_b */
err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
if (!ASSERT_OK(err, "map_update_elem"))
Expand Down Expand Up @@ -130,6 +136,9 @@ static void test_recursion(int cgroup_fd)
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;

skel->bss->target_hid = target_hid;
skel->bss->is_cgroup1 = is_cgroup1;

err = cgrp_ls_recursion__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
Expand Down Expand Up @@ -165,6 +174,9 @@ static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;

skel->bss->target_hid = target_hid;
skel->bss->is_cgroup1 = is_cgroup1;

bpf_program__set_autoload(skel->progs.cgroup_iter, true);
err = cgrp_ls_sleepable__load(skel);
if (!ASSERT_OK(err, "skel_load"))
Expand Down Expand Up @@ -202,6 +214,8 @@ static void test_yes_rcu_lock(__u64 cgroup_id)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;

skel->bss->target_hid = target_hid;
skel->bss->is_cgroup1 = is_cgroup1;
skel->bss->target_pid = syscall(SYS_gettid);

bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
Expand Down Expand Up @@ -229,14 +243,36 @@ static void test_no_rcu_lock(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;

skel->bss->target_hid = target_hid;
skel->bss->is_cgroup1 = is_cgroup1;

bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
err = cgrp_ls_sleepable__load(skel);
ASSERT_ERR(err, "skel_load");

cgrp_ls_sleepable__destroy(skel);
}

void test_cgrp_local_storage(void)
static void test_cgrp1_no_rcu_lock(void)
{
struct cgrp_ls_sleepable *skel;
int err;

skel = cgrp_ls_sleepable__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;

skel->bss->target_hid = target_hid;
skel->bss->is_cgroup1 = is_cgroup1;

bpf_program__set_autoload(skel->progs.cgrp1_no_rcu_lock, true);
err = cgrp_ls_sleepable__load(skel);
ASSERT_OK(err, "skel_load");

cgrp_ls_sleepable__destroy(skel);
}

void cgrp2_local_storage(void)
{
__u64 cgroup_id;
int cgroup_fd;
Expand All @@ -245,6 +281,8 @@ void test_cgrp_local_storage(void)
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
return;

is_cgroup1 = 0;
target_hid = -1;
cgroup_id = get_cgroup_id("/cgrp_local_storage");
if (test__start_subtest("tp_btf"))
test_tp_btf(cgroup_fd);
Expand All @@ -263,3 +301,55 @@ void test_cgrp_local_storage(void)

close(cgroup_fd);
}

void cgrp1_local_storage(void)
{
int cgrp1_fd, cgrp1_hid, cgrp1_id, err;

/* Setup cgroup1 hierarchy */
err = setup_classid_environment();
if (!ASSERT_OK(err, "setup_classid_environment"))
return;

err = join_classid();
if (!ASSERT_OK(err, "join_cgroup1"))
goto cleanup;

cgrp1_fd = open_classid();
if (!ASSERT_GE(cgrp1_fd, 0, "cgroup1 fd"))
goto cleanup;

cgrp1_id = get_classid_cgroup_id();
if (!ASSERT_GE(cgrp1_id, 0, "cgroup1 id"))
goto close_fd;

cgrp1_hid = get_cgroup1_hierarchy_id("net_cls");
if (!ASSERT_GE(cgrp1_hid, 0, "cgroup1 hid"))
goto close_fd;
target_hid = cgrp1_hid;
is_cgroup1 = 1;

if (test__start_subtest("cgrp1_tp_btf"))
test_tp_btf(cgrp1_fd);
if (test__start_subtest("cgrp1_recursion"))
test_recursion(cgrp1_fd);
if (test__start_subtest("cgrp1_negative"))
test_negative();
if (test__start_subtest("cgrp1_iter_sleepable"))
test_cgroup_iter_sleepable(cgrp1_fd, cgrp1_id);
if (test__start_subtest("cgrp1_yes_rcu_lock"))
test_yes_rcu_lock(cgrp1_id);
if (test__start_subtest("cgrp1_no_rcu_lock"))
test_cgrp1_no_rcu_lock();

close_fd:
close(cgrp1_fd);
cleanup:
cleanup_classid_environment();
}

void test_cgrp_local_storage(void)
{
cgrp2_local_storage();
cgrp1_local_storage();
}
84 changes: 67 additions & 17 deletions tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,50 +21,100 @@ struct {
__type(value, long);
} map_b SEC(".maps");

int target_hid = 0;
bool is_cgroup1 = 0;

struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;

static void __on_lookup(struct cgroup *cgrp)
{
bpf_cgrp_storage_delete(&map_a, cgrp);
bpf_cgrp_storage_delete(&map_b, cgrp);
}

SEC("fentry/bpf_local_storage_lookup")
int BPF_PROG(on_lookup)
{
struct task_struct *task = bpf_get_current_task_btf();
struct cgroup *cgrp;

if (is_cgroup1) {
cgrp = bpf_task_get_cgroup1(task, target_hid);
if (!cgrp)
return 0;

bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp);
bpf_cgrp_storage_delete(&map_b, task->cgroups->dfl_cgrp);
__on_lookup(cgrp);
bpf_cgroup_release(cgrp);
return 0;
}

__on_lookup(task->cgroups->dfl_cgrp);
return 0;
}

SEC("fentry/bpf_local_storage_update")
int BPF_PROG(on_update)
static void __on_update(struct cgroup *cgrp)
{
struct task_struct *task = bpf_get_current_task_btf();
long *ptr;

ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
*ptr += 1;

ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
*ptr += 1;
}

SEC("fentry/bpf_local_storage_update")
int BPF_PROG(on_update)
{
struct task_struct *task = bpf_get_current_task_btf();
struct cgroup *cgrp;

if (is_cgroup1) {
cgrp = bpf_task_get_cgroup1(task, target_hid);
if (!cgrp)
return 0;

__on_update(cgrp);
bpf_cgroup_release(cgrp);
return 0;
}

__on_update(task->cgroups->dfl_cgrp);
return 0;
}

SEC("tp_btf/sys_enter")
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp)
{
struct task_struct *task;
long *ptr;

task = bpf_get_current_task_btf();
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
*ptr = 200;

ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
*ptr = 100;
}

SEC("tp_btf/sys_enter")
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
{
struct task_struct *task = bpf_get_current_task_btf();
struct cgroup *cgrp;

if (is_cgroup1) {
cgrp = bpf_task_get_cgroup1(task, target_hid);
if (!cgrp)
return 0;

__on_enter(regs, id, cgrp);
bpf_cgroup_release(cgrp);
return 0;
}

__on_enter(regs, id, task->cgroups->dfl_cgrp);
return 0;
}
67 changes: 60 additions & 7 deletions tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,11 @@ struct {

__u32 target_pid;
__u64 cgroup_id;
int target_hid;
bool is_cgroup1;

struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;

Expand All @@ -37,23 +41,56 @@ int cgroup_iter(struct bpf_iter__cgroup *ctx)
return 0;
}

static void __no_rcu_lock(struct cgroup *cgrp)
{
long *ptr;

/* Note that trace rcu is held in sleepable prog, so we can use
* bpf_cgrp_storage_get() in sleepable prog.
*/
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
cgroup_id = cgrp->kn->id;
}

SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int no_rcu_lock(void *ctx)
int cgrp1_no_rcu_lock(void *ctx)
{
struct task_struct *task;
struct cgroup *cgrp;
long *ptr;

if (!is_cgroup1)
return 0;

task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;

/* bpf_task_get_cgroup1 can work in sleepable prog */
cgrp = bpf_task_get_cgroup1(task, target_hid);
if (!cgrp)
return 0;

__no_rcu_lock(cgrp);
bpf_cgroup_release(cgrp);
return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int no_rcu_lock(void *ctx)
{
struct task_struct *task;

if (is_cgroup1)
return 0;

task = bpf_get_current_task_btf();
if (task->pid != target_pid)
return 0;

/* task->cgroups is untrusted in sleepable prog outside of RCU CS */
cgrp = task->cgroups->dfl_cgrp;
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
cgroup_id = cgrp->kn->id;
__no_rcu_lock(task->cgroups->dfl_cgrp);
return 0;
}

Expand All @@ -68,6 +105,22 @@ int yes_rcu_lock(void *ctx)
if (task->pid != target_pid)
return 0;

if (is_cgroup1) {
bpf_rcu_read_lock();
cgrp = bpf_task_get_cgroup1(task, target_hid);
if (!cgrp) {
bpf_rcu_read_unlock();
return 0;
}

ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
if (ptr)
cgroup_id = cgrp->kn->id;
bpf_cgroup_release(cgrp);
bpf_rcu_read_unlock();
return 0;
}

bpf_rcu_read_lock();
cgrp = task->cgroups->dfl_cgrp;
/* cgrp is trusted under RCU CS */
Expand Down
Loading

0 comments on commit 451492f

Please sign in to comment.