Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add selftests for cgroup1 local storage

Expanding the test coverage from cgroup2 to include cgroup1. The result
as follows,

Already existing test cases for cgroup2:
#48/1 cgrp_local_storage/tp_btf:OK
#48/2 cgrp_local_storage/attach_cgroup:OK
#48/3 cgrp_local_storage/recursion:OK
#48/4 cgrp_local_storage/negative:OK
#48/5 cgrp_local_storage/cgroup_iter_sleepable:OK
#48/6 cgrp_local_storage/yes_rcu_lock:OK
#48/7 cgrp_local_storage/no_rcu_lock:OK

Expanded test cases for cgroup1:
#48/8 cgrp_local_storage/cgrp1_tp_btf:OK
#48/9 cgrp_local_storage/cgrp1_recursion:OK
#48/10 cgrp_local_storage/cgrp1_negative:OK
#48/11 cgrp_local_storage/cgrp1_iter_sleepable:OK
#48/12 cgrp_local_storage/cgrp1_yes_rcu_lock:OK
#48/13 cgrp_local_storage/cgrp1_no_rcu_lock:OK

Summary:
#48 cgrp_local_storage:OK
Summary: 1/13 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20231206115326.4295-4-laoar.shao@gmail.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>

authored by

Yafang Shao and committed by
Martin KaFai Lau
a2c6380b f4199271

+291 -60
+97 -1
tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
··· 19 19 __u64 cookie_value; 20 20 }; 21 21 22 + static bool is_cgroup1; 23 + static int target_hid; 24 + 25 + #define CGROUP_MODE_SET(skel) \ 26 + { \ 27 + skel->bss->is_cgroup1 = is_cgroup1; \ 28 + skel->bss->target_hid = target_hid; \ 29 + } 30 + 31 + static void cgroup_mode_value_init(bool cgroup, int hid) 32 + { 33 + is_cgroup1 = cgroup; 34 + target_hid = hid; 35 + } 36 + 22 37 static void test_tp_btf(int cgroup_fd) 23 38 { 24 39 struct cgrp_ls_tp_btf *skel; ··· 43 28 skel = cgrp_ls_tp_btf__open_and_load(); 44 29 if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) 45 30 return; 31 + 32 + CGROUP_MODE_SET(skel); 46 33 47 34 /* populate a value in map_b */ 48 35 err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY); ··· 147 130 if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) 148 131 return; 149 132 133 + CGROUP_MODE_SET(skel); 134 + 150 135 err = cgrp_ls_recursion__attach(skel); 151 136 if (!ASSERT_OK(err, "skel_attach")) 152 137 goto out; ··· 183 164 skel = cgrp_ls_sleepable__open(); 184 165 if (!ASSERT_OK_PTR(skel, "skel_open")) 185 166 return; 167 + 168 + CGROUP_MODE_SET(skel); 186 169 187 170 bpf_program__set_autoload(skel->progs.cgroup_iter, true); 188 171 err = cgrp_ls_sleepable__load(skel); ··· 223 202 if (!ASSERT_OK_PTR(skel, "skel_open")) 224 203 return; 225 204 205 + CGROUP_MODE_SET(skel); 226 206 skel->bss->target_pid = syscall(SYS_gettid); 227 207 228 208 bpf_program__set_autoload(skel->progs.yes_rcu_lock, true); ··· 251 229 if (!ASSERT_OK_PTR(skel, "skel_open")) 252 230 return; 253 231 232 + CGROUP_MODE_SET(skel); 233 + 254 234 bpf_program__set_autoload(skel->progs.no_rcu_lock, true); 255 235 err = cgrp_ls_sleepable__load(skel); 256 236 ASSERT_ERR(err, "skel_load"); ··· 260 236 cgrp_ls_sleepable__destroy(skel); 261 237 } 262 238 263 - void test_cgrp_local_storage(void) 239 + static void test_cgrp1_no_rcu_lock(void) 240 + { 241 + struct cgrp_ls_sleepable *skel; 242 + int err; 243 + 244 + skel = cgrp_ls_sleepable__open(); 245 + if (!ASSERT_OK_PTR(skel, "skel_open")) 246 + return; 247 + 248 + CGROUP_MODE_SET(skel); 249 + 250 + bpf_program__set_autoload(skel->progs.cgrp1_no_rcu_lock, true); 251 + err = cgrp_ls_sleepable__load(skel); 252 + ASSERT_OK(err, "skel_load"); 253 + 254 + cgrp_ls_sleepable__destroy(skel); 255 + } 256 + 257 + static void cgrp2_local_storage(void) 264 258 { 265 259 __u64 cgroup_id; 266 260 int cgroup_fd; ··· 286 244 cgroup_fd = test__join_cgroup("/cgrp_local_storage"); 287 245 if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage")) 288 246 return; 247 + 248 + cgroup_mode_value_init(0, -1); 289 249 290 250 cgroup_id = get_cgroup_id("/cgrp_local_storage"); 291 251 if (test__start_subtest("tp_btf")) ··· 306 262 test_no_rcu_lock(); 307 263 308 264 close(cgroup_fd); 265 + } 266 + 267 + static void cgrp1_local_storage(void) 268 + { 269 + int cgrp1_fd, cgrp1_hid, cgrp1_id, err; 270 + 271 + /* Setup cgroup1 hierarchy */ 272 + err = setup_classid_environment(); 273 + if (!ASSERT_OK(err, "setup_classid_environment")) 274 + return; 275 + 276 + err = join_classid(); 277 + if (!ASSERT_OK(err, "join_cgroup1")) 278 + goto cleanup; 279 + 280 + cgrp1_fd = open_classid(); 281 + if (!ASSERT_GE(cgrp1_fd, 0, "cgroup1 fd")) 282 + goto cleanup; 283 + 284 + cgrp1_id = get_classid_cgroup_id(); 285 + if (!ASSERT_GE(cgrp1_id, 0, "cgroup1 id")) 286 + goto close_fd; 287 + 288 + cgrp1_hid = get_cgroup1_hierarchy_id("net_cls"); 289 + if (!ASSERT_GE(cgrp1_hid, 0, "cgroup1 hid")) 290 + goto close_fd; 291 + 292 + cgroup_mode_value_init(1, cgrp1_hid); 293 + 294 + if (test__start_subtest("cgrp1_tp_btf")) 295 + test_tp_btf(cgrp1_fd); 296 + if (test__start_subtest("cgrp1_recursion")) 297 + test_recursion(cgrp1_fd); 298 + if (test__start_subtest("cgrp1_negative")) 299 + test_negative(); 300 + if (test__start_subtest("cgrp1_iter_sleepable")) 301 + test_cgroup_iter_sleepable(cgrp1_fd, cgrp1_id); 302 + if (test__start_subtest("cgrp1_yes_rcu_lock")) 303 + test_yes_rcu_lock(cgrp1_id); 304 + if (test__start_subtest("cgrp1_no_rcu_lock")) 305 + test_cgrp1_no_rcu_lock(); 306 + 307 + close_fd: 308 + close(cgrp1_fd); 309 + cleanup: 310 + cleanup_classid_environment(); 311 + } 312 + 313 + void test_cgrp_local_storage(void) 314 + { 315 + cgrp2_local_storage(); 316 + cgrp1_local_storage(); 309 317 }
+71 -21
tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
··· 21 21 __type(value, long); 22 22 } map_b SEC(".maps"); 23 23 24 + int target_hid = 0; 25 + bool is_cgroup1 = 0; 26 + 27 + struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym; 28 + void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 29 + 30 + static void __on_lookup(struct cgroup *cgrp) 31 + { 32 + bpf_cgrp_storage_delete(&map_a, cgrp); 33 + bpf_cgrp_storage_delete(&map_b, cgrp); 34 + } 35 + 24 36 SEC("fentry/bpf_local_storage_lookup") 25 37 int BPF_PROG(on_lookup) 26 38 { 27 39 struct task_struct *task = bpf_get_current_task_btf(); 40 + struct cgroup *cgrp; 28 41 29 - bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp); 30 - bpf_cgrp_storage_delete(&map_b, task->cgroups->dfl_cgrp); 42 + if (is_cgroup1) { 43 + cgrp = bpf_task_get_cgroup1(task, target_hid); 44 + if (!cgrp) 45 + return 0; 46 + 47 + __on_lookup(cgrp); 48 + bpf_cgroup_release(cgrp); 49 + return 0; 50 + } 51 + 52 + __on_lookup(task->cgroups->dfl_cgrp); 31 53 return 0; 54 + } 55 + 56 + static void __on_update(struct cgroup *cgrp) 57 + { 58 + long *ptr; 59 + 60 + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); 61 + if (ptr) 62 + *ptr += 1; 63 + 64 + ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); 65 + if (ptr) 66 + *ptr += 1; 32 67 } 33 68 34 69 SEC("fentry/bpf_local_storage_update") 35 70 int BPF_PROG(on_update) 36 71 { 37 72 struct task_struct *task = bpf_get_current_task_btf(); 73 + struct cgroup *cgrp; 74 + 75 + if (is_cgroup1) { 76 + cgrp = bpf_task_get_cgroup1(task, target_hid); 77 + if (!cgrp) 78 + return 0; 79 + 80 + __on_update(cgrp); 81 + bpf_cgroup_release(cgrp); 82 + return 0; 83 + } 84 + 85 + __on_update(task->cgroups->dfl_cgrp); 86 + return 0; 87 + } 88 + 89 + static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp) 90 + { 38 91 long *ptr; 39 92 40 - ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, 41 - BPF_LOCAL_STORAGE_GET_F_CREATE); 93 + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); 42 94 if (ptr) 43 - *ptr += 1; 95 + *ptr = 200; 44 96 45 - ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0, 46 - BPF_LOCAL_STORAGE_GET_F_CREATE); 97 + ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); 47 98 if (ptr) 48 - *ptr += 1; 49 - 50 - return 0; 99 + *ptr = 100; 51 100 } 52 101 53 102 SEC("tp_btf/sys_enter") 54 103 int BPF_PROG(on_enter, struct pt_regs *regs, long id) 55 104 { 56 - struct task_struct *task; 57 - long *ptr; 105 + struct task_struct *task = bpf_get_current_task_btf(); 106 + struct cgroup *cgrp; 58 107 59 - task = bpf_get_current_task_btf(); 60 - ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, 61 - BPF_LOCAL_STORAGE_GET_F_CREATE); 62 - if (ptr) 63 - *ptr = 200; 108 + if (is_cgroup1) { 109 + cgrp = bpf_task_get_cgroup1(task, target_hid); 110 + if (!cgrp) 111 + return 0; 64 112 65 - ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0, 66 - BPF_LOCAL_STORAGE_GET_F_CREATE); 67 - if (ptr) 68 - *ptr = 100; 113 + __on_enter(regs, id, cgrp); 114 + bpf_cgroup_release(cgrp); 115 + return 0; 116 + } 117 + 118 + __on_enter(regs, id, task->cgroups->dfl_cgrp); 69 119 return 0; 70 120 }
+54 -7
tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
··· 17 17 18 18 __u32 target_pid; 19 19 __u64 cgroup_id; 20 + int target_hid; 21 + bool is_cgroup1; 20 22 23 + struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym; 24 + void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 21 25 void bpf_rcu_read_lock(void) __ksym; 22 26 void bpf_rcu_read_unlock(void) __ksym; 23 27 ··· 41 37 return 0; 42 38 } 43 39 40 + static void __no_rcu_lock(struct cgroup *cgrp) 41 + { 42 + long *ptr; 43 + 44 + /* Note that trace rcu is held in sleepable prog, so we can use 45 + * bpf_cgrp_storage_get() in sleepable prog. 46 + */ 47 + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, 48 + BPF_LOCAL_STORAGE_GET_F_CREATE); 49 + if (ptr) 50 + cgroup_id = cgrp->kn->id; 51 + } 52 + 53 + SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") 54 + int cgrp1_no_rcu_lock(void *ctx) 55 + { 56 + struct task_struct *task; 57 + struct cgroup *cgrp; 58 + 59 + task = bpf_get_current_task_btf(); 60 + if (task->pid != target_pid) 61 + return 0; 62 + 63 + /* bpf_task_get_cgroup1 can work in sleepable prog */ 64 + cgrp = bpf_task_get_cgroup1(task, target_hid); 65 + if (!cgrp) 66 + return 0; 67 + 68 + __no_rcu_lock(cgrp); 69 + bpf_cgroup_release(cgrp); 70 + return 0; 71 + } 72 + 44 73 SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") 45 74 int no_rcu_lock(void *ctx) 46 75 { 47 76 struct task_struct *task; 48 - struct cgroup *cgrp; 49 - long *ptr; 50 77 51 78 task = bpf_get_current_task_btf(); 52 79 if (task->pid != target_pid) 53 80 return 0; 54 81 55 82 /* task->cgroups is untrusted in sleepable prog outside of RCU CS */ 56 - cgrp = task->cgroups->dfl_cgrp; 57 - ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, 58 - BPF_LOCAL_STORAGE_GET_F_CREATE); 59 - if (ptr) 60 - cgroup_id = cgrp->kn->id; 83 + __no_rcu_lock(task->cgroups->dfl_cgrp); 61 84 return 0; 62 85 } 63 86 ··· 98 67 task = bpf_get_current_task_btf(); 99 68 if (task->pid != target_pid) 100 69 return 0; 70 + 71 + if (is_cgroup1) { 72 + bpf_rcu_read_lock(); 73 + cgrp = bpf_task_get_cgroup1(task, target_hid); 74 + if (!cgrp) { 75 + bpf_rcu_read_unlock(); 76 + return 0; 77 + } 78 + 79 + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); 80 + if (ptr) 81 + cgroup_id = cgrp->kn->id; 82 + bpf_cgroup_release(cgrp); 83 + bpf_rcu_read_unlock(); 84 + return 0; 85 + } 101 86 102 87 bpf_rcu_read_lock(); 103 88 cgrp = task->cgroups->dfl_cgrp;
+69 -31
tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
··· 27 27 int mismatch_cnt = 0; 28 28 int enter_cnt = 0; 29 29 int exit_cnt = 0; 30 + int target_hid = 0; 31 + bool is_cgroup1 = 0; 32 + 33 + struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym; 34 + void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 35 + 36 + static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp) 37 + { 38 + long *ptr; 39 + int err; 40 + 41 + /* populate value 0 */ 42 + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, 43 + BPF_LOCAL_STORAGE_GET_F_CREATE); 44 + if (!ptr) 45 + return; 46 + 47 + /* delete value 0 */ 48 + err = bpf_cgrp_storage_delete(&map_a, cgrp); 49 + if (err) 50 + return; 51 + 52 + /* value is not available */ 53 + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, 0); 54 + if (ptr) 55 + return; 56 + 57 + /* re-populate the value */ 58 + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, 59 + BPF_LOCAL_STORAGE_GET_F_CREATE); 60 + if (!ptr) 61 + return; 62 + __sync_fetch_and_add(&enter_cnt, 1); 63 + *ptr = MAGIC_VALUE + enter_cnt; 64 + } 30 65 31 66 SEC("tp_btf/sys_enter") 32 67 int BPF_PROG(on_enter, struct pt_regs *regs, long id) 33 68 { 34 69 struct task_struct *task; 35 - long *ptr; 36 - int err; 70 + struct cgroup *cgrp; 37 71 38 72 task = bpf_get_current_task_btf(); 39 73 if (task->pid != target_pid) 40 74 return 0; 41 75 42 - /* populate value 0 */ 43 - ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, 44 - BPF_LOCAL_STORAGE_GET_F_CREATE); 45 - if (!ptr) 46 - return 0; 76 + if (is_cgroup1) { 77 + cgrp = bpf_task_get_cgroup1(task, target_hid); 78 + if (!cgrp) 79 + return 0; 47 80 48 - /* delete value 0 */ 49 - err = bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp); 50 - if (err) 81 + __on_enter(regs, id, cgrp); 82 + bpf_cgroup_release(cgrp); 51 83 return 0; 84 + } 52 85 53 - /* value is not available */ 54 - ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, 0); 55 - if (ptr) 56 - return 0; 57 - 58 - /* re-populate the value */ 59 - ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, 60 - BPF_LOCAL_STORAGE_GET_F_CREATE); 61 - if (!ptr) 62 - return 0; 63 - __sync_fetch_and_add(&enter_cnt, 1); 64 - *ptr = MAGIC_VALUE + enter_cnt; 65 - 86 + __on_enter(regs, id, task->cgroups->dfl_cgrp); 66 87 return 0; 88 + } 89 + 90 + static void __on_exit(struct pt_regs *regs, long id, struct cgroup *cgrp) 91 + { 92 + long *ptr; 93 + 94 + ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, 95 + BPF_LOCAL_STORAGE_GET_F_CREATE); 96 + if (!ptr) 97 + return; 98 + 99 + __sync_fetch_and_add(&exit_cnt, 1); 100 + if (*ptr != MAGIC_VALUE + exit_cnt) 101 + __sync_fetch_and_add(&mismatch_cnt, 1); 67 102 } 68 103 69 104 SEC("tp_btf/sys_exit") 70 105 int BPF_PROG(on_exit, struct pt_regs *regs, long id) 71 106 { 72 107 struct task_struct *task; 73 - long *ptr; 108 + struct cgroup *cgrp; 74 109 75 110 task = bpf_get_current_task_btf(); 76 111 if (task->pid != target_pid) 77 112 return 0; 78 113 79 - ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0, 80 - BPF_LOCAL_STORAGE_GET_F_CREATE); 81 - if (!ptr) 82 - return 0; 114 + if (is_cgroup1) { 115 + cgrp = bpf_task_get_cgroup1(task, target_hid); 116 + if (!cgrp) 117 + return 0; 83 118 84 - __sync_fetch_and_add(&exit_cnt, 1); 85 - if (*ptr != MAGIC_VALUE + exit_cnt) 86 - __sync_fetch_and_add(&mismatch_cnt, 1); 119 + __on_exit(regs, id, cgrp); 120 + bpf_cgroup_release(cgrp); 121 + return 0; 122 + } 123 + 124 + __on_exit(regs, id, task->cgroups->dfl_cgrp); 87 125 return 0; 88 126 }