Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: add selftests for new insn_array map

Add the following selftests for new insn_array map:

* Incorrect instruction indexes are rejected
* Two programs can't use the same map
* BPF progs can't operate the map
* no changes to code => map is the same
* expected changes when instructions are added
* expected changes when instructions are deleted
* expected changes when multiple functions are present

Signed-off-by: Anton Protopopov <a.s.protopopov@gmail.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20251105090410.1250500-5-a.s.protopopov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Anton Protopopov and committed by
Alexei Starovoitov
218edd6d cbef91de

+409
+409
tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <bpf/bpf.h> 4 + #include <test_progs.h> 5 + 6 + #ifdef __x86_64__ 7 + static int map_create(__u32 map_type, __u32 max_entries) 8 + { 9 + const char *map_name = "insn_array"; 10 + __u32 key_size = 4; 11 + __u32 value_size = sizeof(struct bpf_insn_array_value); 12 + 13 + return bpf_map_create(map_type, map_name, key_size, value_size, max_entries, NULL); 14 + } 15 + 16 + static int prog_load(struct bpf_insn *insns, __u32 insn_cnt, int *fd_array, __u32 fd_array_cnt) 17 + { 18 + LIBBPF_OPTS(bpf_prog_load_opts, opts); 19 + 20 + opts.fd_array = fd_array; 21 + opts.fd_array_cnt = fd_array_cnt; 22 + 23 + return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, &opts); 24 + } 25 + 26 + static void __check_success(struct bpf_insn *insns, __u32 insn_cnt, __u32 *map_in, __u32 *map_out) 27 + { 28 + struct bpf_insn_array_value val = {}; 29 + int prog_fd = -1, map_fd, i; 30 + 31 + map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, insn_cnt); 32 + if (!ASSERT_GE(map_fd, 0, "map_create")) 33 + return; 34 + 35 + for (i = 0; i < insn_cnt; i++) { 36 + val.orig_off = map_in[i]; 37 + if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem")) 38 + goto cleanup; 39 + } 40 + 41 + if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze")) 42 + goto cleanup; 43 + 44 + prog_fd = prog_load(insns, insn_cnt, &map_fd, 1); 45 + if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)")) 46 + goto cleanup; 47 + 48 + for (i = 0; i < insn_cnt; i++) { 49 + char buf[64]; 50 + 51 + if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem")) 52 + goto cleanup; 53 + 54 + snprintf(buf, sizeof(buf), "val.xlated_off should be equal map_out[%d]", i); 55 + ASSERT_EQ(val.xlated_off, map_out[i], buf); 56 + } 57 + 58 + cleanup: 59 + close(prog_fd); 60 + close(map_fd); 61 + } 62 + 63 + /* 64 + * Load a program, which will not be anyhow mangled by the verifier. Add an 65 + * insn_array map pointing to every instruction. Check that it hasn't changed 66 + * after the program load. 67 + */ 68 + static void check_one_to_one_mapping(void) 69 + { 70 + struct bpf_insn insns[] = { 71 + BPF_MOV64_IMM(BPF_REG_0, 4), 72 + BPF_MOV64_IMM(BPF_REG_0, 3), 73 + BPF_MOV64_IMM(BPF_REG_0, 2), 74 + BPF_MOV64_IMM(BPF_REG_0, 1), 75 + BPF_MOV64_IMM(BPF_REG_0, 0), 76 + BPF_EXIT_INSN(), 77 + }; 78 + __u32 map_in[] = {0, 1, 2, 3, 4, 5}; 79 + __u32 map_out[] = {0, 1, 2, 3, 4, 5}; 80 + 81 + __check_success(insns, ARRAY_SIZE(insns), map_in, map_out); 82 + } 83 + 84 + /* 85 + * Load a program with two patches (get jiffies, for simplicity). Add an 86 + * insn_array map pointing to every instruction. Check how it was changed 87 + * after the program load. 88 + */ 89 + static void check_simple(void) 90 + { 91 + struct bpf_insn insns[] = { 92 + BPF_MOV64_IMM(BPF_REG_0, 2), 93 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), 94 + BPF_MOV64_IMM(BPF_REG_0, 1), 95 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), 96 + BPF_MOV64_IMM(BPF_REG_0, 0), 97 + BPF_EXIT_INSN(), 98 + }; 99 + __u32 map_in[] = {0, 1, 2, 3, 4, 5}; 100 + __u32 map_out[] = {0, 1, 4, 5, 8, 9}; 101 + 102 + __check_success(insns, ARRAY_SIZE(insns), map_in, map_out); 103 + } 104 + 105 + /* 106 + * Verifier can delete code in two cases: nops & dead code. From insn 107 + * array's point of view, the two cases are the same, so test using 108 + * the simplest method: by loading some nops 109 + */ 110 + static void check_deletions(void) 111 + { 112 + struct bpf_insn insns[] = { 113 + BPF_MOV64_IMM(BPF_REG_0, 2), 114 + BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */ 115 + BPF_MOV64_IMM(BPF_REG_0, 1), 116 + BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */ 117 + BPF_MOV64_IMM(BPF_REG_0, 0), 118 + BPF_EXIT_INSN(), 119 + }; 120 + __u32 map_in[] = {0, 1, 2, 3, 4, 5}; 121 + __u32 map_out[] = {0, -1, 1, -1, 2, 3}; 122 + 123 + __check_success(insns, ARRAY_SIZE(insns), map_in, map_out); 124 + } 125 + 126 + /* 127 + * Same test as check_deletions, but also add code which adds instructions 128 + */ 129 + static void check_deletions_with_functions(void) 130 + { 131 + struct bpf_insn insns[] = { 132 + BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */ 133 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), 134 + BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */ 135 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 136 + BPF_MOV64_IMM(BPF_REG_0, 1), 137 + BPF_EXIT_INSN(), 138 + BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */ 139 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), 140 + BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */ 141 + BPF_MOV64_IMM(BPF_REG_0, 2), 142 + BPF_EXIT_INSN(), 143 + }; 144 + __u32 map_in[] = { 0, 1, 2, 3, 4, 5, /* func */ 6, 7, 8, 9, 10}; 145 + __u32 map_out[] = {-1, 0, -1, 3, 4, 5, /* func */ -1, 6, -1, 9, 10}; 146 + 147 + __check_success(insns, ARRAY_SIZE(insns), map_in, map_out); 148 + } 149 + 150 + /* 151 + * Try to load a program with a map which points to outside of the program 152 + */ 153 + static void check_out_of_bounds_index(void) 154 + { 155 + struct bpf_insn insns[] = { 156 + BPF_MOV64_IMM(BPF_REG_0, 4), 157 + BPF_MOV64_IMM(BPF_REG_0, 3), 158 + BPF_MOV64_IMM(BPF_REG_0, 2), 159 + BPF_MOV64_IMM(BPF_REG_0, 1), 160 + BPF_MOV64_IMM(BPF_REG_0, 0), 161 + BPF_EXIT_INSN(), 162 + }; 163 + int prog_fd, map_fd; 164 + struct bpf_insn_array_value val = {}; 165 + int key; 166 + 167 + map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1); 168 + if (!ASSERT_GE(map_fd, 0, "map_create")) 169 + return; 170 + 171 + key = 0; 172 + val.orig_off = ARRAY_SIZE(insns); /* too big */ 173 + if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &key, &val, 0), 0, "bpf_map_update_elem")) 174 + goto cleanup; 175 + 176 + if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze")) 177 + goto cleanup; 178 + 179 + prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1); 180 + if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) { 181 + close(prog_fd); 182 + goto cleanup; 183 + } 184 + 185 + cleanup: 186 + close(map_fd); 187 + } 188 + 189 + /* 190 + * Try to load a program with a map which points to the middle of 16-bit insn 191 + */ 192 + static void check_mid_insn_index(void) 193 + { 194 + struct bpf_insn insns[] = { 195 + BPF_LD_IMM64(BPF_REG_0, 0), /* 2 x 8 */ 196 + BPF_EXIT_INSN(), 197 + }; 198 + int prog_fd, map_fd; 199 + struct bpf_insn_array_value val = {}; 200 + int key; 201 + 202 + map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1); 203 + if (!ASSERT_GE(map_fd, 0, "map_create")) 204 + return; 205 + 206 + key = 0; 207 + val.orig_off = 1; /* middle of 16-byte instruction */ 208 + if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &key, &val, 0), 0, "bpf_map_update_elem")) 209 + goto cleanup; 210 + 211 + if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze")) 212 + goto cleanup; 213 + 214 + prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1); 215 + if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) { 216 + close(prog_fd); 217 + goto cleanup; 218 + } 219 + 220 + cleanup: 221 + close(map_fd); 222 + } 223 + 224 + static void check_incorrect_index(void) 225 + { 226 + check_out_of_bounds_index(); 227 + check_mid_insn_index(); 228 + } 229 + 230 + /* Once map was initialized, it should be frozen */ 231 + static void check_load_unfrozen_map(void) 232 + { 233 + struct bpf_insn insns[] = { 234 + BPF_MOV64_IMM(BPF_REG_0, 0), 235 + BPF_EXIT_INSN(), 236 + }; 237 + int prog_fd = -1, map_fd; 238 + struct bpf_insn_array_value val = {}; 239 + int i; 240 + 241 + map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns)); 242 + if (!ASSERT_GE(map_fd, 0, "map_create")) 243 + return; 244 + 245 + for (i = 0; i < ARRAY_SIZE(insns); i++) { 246 + val.orig_off = i; 247 + if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem")) 248 + goto cleanup; 249 + } 250 + 251 + prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1); 252 + if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) 253 + goto cleanup; 254 + 255 + /* correctness: now freeze the map, the program should load fine */ 256 + 257 + if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze")) 258 + goto cleanup; 259 + 260 + prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1); 261 + if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)")) 262 + goto cleanup; 263 + 264 + for (i = 0; i < ARRAY_SIZE(insns); i++) { 265 + if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem")) 266 + goto cleanup; 267 + 268 + ASSERT_EQ(val.xlated_off, i, "val should be equal i"); 269 + } 270 + 271 + cleanup: 272 + close(prog_fd); 273 + close(map_fd); 274 + } 275 + 276 + /* Map can be used only by one BPF program */ 277 + static void check_no_map_reuse(void) 278 + { 279 + struct bpf_insn insns[] = { 280 + BPF_MOV64_IMM(BPF_REG_0, 0), 281 + BPF_EXIT_INSN(), 282 + }; 283 + int prog_fd = -1, map_fd, extra_fd = -1; 284 + struct bpf_insn_array_value val = {}; 285 + int i; 286 + 287 + map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns)); 288 + if (!ASSERT_GE(map_fd, 0, "map_create")) 289 + return; 290 + 291 + for (i = 0; i < ARRAY_SIZE(insns); i++) { 292 + val.orig_off = i; 293 + if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem")) 294 + goto cleanup; 295 + } 296 + 297 + if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze")) 298 + goto cleanup; 299 + 300 + prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1); 301 + if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)")) 302 + goto cleanup; 303 + 304 + for (i = 0; i < ARRAY_SIZE(insns); i++) { 305 + if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem")) 306 + goto cleanup; 307 + 308 + ASSERT_EQ(val.xlated_off, i, "val should be equal i"); 309 + } 310 + 311 + extra_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1); 312 + if (!ASSERT_EQ(extra_fd, -EBUSY, "program should have been rejected (extra_fd != -EBUSY)")) 313 + goto cleanup; 314 + 315 + /* correctness: check that prog is still loadable without fd_array */ 316 + extra_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0); 317 + if (!ASSERT_GE(extra_fd, 0, "bpf(BPF_PROG_LOAD): expected no error")) 318 + goto cleanup; 319 + 320 + cleanup: 321 + close(extra_fd); 322 + close(prog_fd); 323 + close(map_fd); 324 + } 325 + 326 + static void check_bpf_no_lookup(void) 327 + { 328 + struct bpf_insn insns[] = { 329 + BPF_LD_MAP_FD(BPF_REG_1, 0), 330 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 331 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 332 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 333 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 334 + BPF_EXIT_INSN(), 335 + }; 336 + int prog_fd = -1, map_fd; 337 + 338 + map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1); 339 + if (!ASSERT_GE(map_fd, 0, "map_create")) 340 + return; 341 + 342 + insns[0].imm = map_fd; 343 + 344 + if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze")) 345 + goto cleanup; 346 + 347 + prog_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0); 348 + if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) 349 + goto cleanup; 350 + 351 + /* correctness: check that prog is still loadable with normal map */ 352 + close(map_fd); 353 + map_fd = map_create(BPF_MAP_TYPE_ARRAY, 1); 354 + insns[0].imm = map_fd; 355 + prog_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0); 356 + if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)")) 357 + goto cleanup; 358 + 359 + cleanup: 360 + close(prog_fd); 361 + close(map_fd); 362 + } 363 + 364 + static void check_bpf_side(void) 365 + { 366 + check_bpf_no_lookup(); 367 + } 368 + 369 + static void __test_bpf_insn_array(void) 370 + { 371 + /* Test if offsets are adjusted properly */ 372 + 373 + if (test__start_subtest("one2one")) 374 + check_one_to_one_mapping(); 375 + 376 + if (test__start_subtest("simple")) 377 + check_simple(); 378 + 379 + if (test__start_subtest("deletions")) 380 + check_deletions(); 381 + 382 + if (test__start_subtest("deletions-with-functions")) 383 + check_deletions_with_functions(); 384 + 385 + /* Check all kinds of operations and related restrictions */ 386 + 387 + if (test__start_subtest("incorrect-index")) 388 + check_incorrect_index(); 389 + 390 + if (test__start_subtest("load-unfrozen-map")) 391 + check_load_unfrozen_map(); 392 + 393 + if (test__start_subtest("no-map-reuse")) 394 + check_no_map_reuse(); 395 + 396 + if (test__start_subtest("bpf-side-ops")) 397 + check_bpf_side(); 398 + } 399 + #else 400 + static void __test_bpf_insn_array(void) 401 + { 402 + test__skip(); 403 + } 404 + #endif 405 + 406 + void test_bpf_insn_array(void) 407 + { 408 + __test_bpf_insn_array(); 409 + }