Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Test global bpf_rb_root arrays and fields in nested struct types.

Make sure global arrays of bpf_rb_root and fields of bpf_rb_root in nested
struct types work correctly.

Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
Link: https://lore.kernel.org/r/20240523174202.461236-9-thinker.li@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Kui-Feng Lee and committed by
Alexei Starovoitov
d55c765a c4c6c3b7

+124
+47
tools/testing/selftests/bpf/prog_tests/rbtree.c
··· 31 31 rbtree__destroy(skel); 32 32 } 33 33 34 + static void test_rbtree_add_nodes_nested(void) 35 + { 36 + LIBBPF_OPTS(bpf_test_run_opts, opts, 37 + .data_in = &pkt_v4, 38 + .data_size_in = sizeof(pkt_v4), 39 + .repeat = 1, 40 + ); 41 + struct rbtree *skel; 42 + int ret; 43 + 44 + skel = rbtree__open_and_load(); 45 + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) 46 + return; 47 + 48 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts); 49 + ASSERT_OK(ret, "rbtree_add_nodes_nested run"); 50 + ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval"); 51 + ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes_nested less_callback_ran"); 52 + 53 + rbtree__destroy(skel); 54 + } 55 + 34 56 static void test_rbtree_add_and_remove(void) 35 57 { 36 58 LIBBPF_OPTS(bpf_test_run_opts, opts, ··· 71 49 ASSERT_OK(ret, "rbtree_add_and_remove"); 72 50 ASSERT_OK(opts.retval, "rbtree_add_and_remove retval"); 73 51 ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key"); 52 + 53 + rbtree__destroy(skel); 54 + } 55 + 56 + static void test_rbtree_add_and_remove_array(void) 57 + { 58 + LIBBPF_OPTS(bpf_test_run_opts, opts, 59 + .data_in = &pkt_v4, 60 + .data_size_in = sizeof(pkt_v4), 61 + .repeat = 1, 62 + ); 63 + struct rbtree *skel; 64 + int ret; 65 + 66 + skel = rbtree__open_and_load(); 67 + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) 68 + return; 69 + 70 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove_array), &opts); 71 + ASSERT_OK(ret, "rbtree_add_and_remove_array"); 72 + ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval"); 74 73 75 74 rbtree__destroy(skel); 76 75 } ··· 147 104 { 148 105 if (test__start_subtest("rbtree_add_nodes")) 149 106 test_rbtree_add_nodes(); 107 + if (test__start_subtest("rbtree_add_nodes_nested")) 108 + test_rbtree_add_nodes_nested(); 150 109 if (test__start_subtest("rbtree_add_and_remove")) 151 110 test_rbtree_add_and_remove(); 111 + if (test__start_subtest("rbtree_add_and_remove_array")) 112 + test_rbtree_add_and_remove_array(); 152 113 if (test__start_subtest("rbtree_first_and_remove")) 153 114 test_rbtree_first_and_remove(); 154 115 if (test__start_subtest("rbtree_api_release_aliasing"))
+77
tools/testing/selftests/bpf/progs/rbtree.c
··· 13 13 struct bpf_rb_node node; 14 14 }; 15 15 16 + struct root_nested_inner { 17 + struct bpf_spin_lock glock; 18 + struct bpf_rb_root root __contains(node_data, node); 19 + }; 20 + 21 + struct root_nested { 22 + struct root_nested_inner inner; 23 + }; 24 + 16 25 long less_callback_ran = -1; 17 26 long removed_key = -1; 18 27 long first_data[2] = {-1, -1}; ··· 29 20 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) 30 21 private(A) struct bpf_spin_lock glock; 31 22 private(A) struct bpf_rb_root groot __contains(node_data, node); 23 + private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node); 24 + private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node); 25 + private(B) struct root_nested groot_nested; 32 26 33 27 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) 34 28 { ··· 84 72 } 85 73 86 74 SEC("tc") 75 + long rbtree_add_nodes_nested(void *ctx) 76 + { 77 + return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock); 78 + } 79 + 80 + SEC("tc") 87 81 long rbtree_add_and_remove(void *ctx) 88 82 { 89 83 struct bpf_rb_node *res = NULL; ··· 124 106 bpf_obj_drop(n); 125 107 if (m) 126 108 bpf_obj_drop(m); 109 + return 1; 110 + } 111 + 112 + SEC("tc") 113 + long rbtree_add_and_remove_array(void *ctx) 114 + { 115 + struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL; 116 + struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}}; 117 + struct node_data *n; 118 + long k1 = -1, k2 = -1, k3 = -1; 119 + int i, j; 120 + 121 + for (i = 0; i < 3; i++) { 122 + for (j = 0; j < 2; j++) { 123 + nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j])); 124 + if (!nodes[i][j]) 125 + goto err_out; 126 + nodes[i][j]->key = i * 2 + j; 127 + } 128 + } 129 + 130 + bpf_spin_lock(&glock); 131 + for (i = 0; i < 2; i++) 132 + for (j = 0; j < 2; j++) 133 + bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less); 134 + for (j = 0; j < 2; j++) 135 + bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less); 136 + res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node); 137 + res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node); 138 + res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node); 139 + bpf_spin_unlock(&glock); 140 + 141 + if (res1) { 142 + n = container_of(res1, struct node_data, node); 143 + k1 = n->key; 144 + bpf_obj_drop(n); 145 + } 146 + if (res2) { 147 + n = container_of(res2, struct node_data, node); 148 + k2 = n->key; 149 + bpf_obj_drop(n); 150 + } 151 + if (res3) { 152 + n = container_of(res3, struct node_data, node); 153 + k3 = n->key; 154 + bpf_obj_drop(n); 155 + } 156 + if (k1 != 0 || k2 != 2 || k3 != 4) 157 + return 2; 158 + 159 + return 0; 160 + 161 + err_out: 162 + for (i = 0; i < 3; i++) { 163 + for (j = 0; j < 2; j++) { 164 + if (nodes[i][j]) 165 + bpf_obj_drop(nodes[i][j]); 166 + } 167 + } 127 168 return 1; 128 169 } 129 170