Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3#include <linux/capability.h>
4#include <stdlib.h>
5#include <test_progs.h>
6#include <bpf/btf.h>
7
8#include "autoconf_helper.h"
9#include "disasm_helpers.h"
10#include "unpriv_helpers.h"
11#include "cap_helpers.h"
12#include "jit_disasm_helpers.h"
13
14#define str_has_pfx(str, pfx) \
15 (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
16
17#define TEST_LOADER_LOG_BUF_SZ 2097152
18
19#define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
20#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
21#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
22#define TEST_TAG_EXPECT_NOT_MSG_PFX "comment:test_expect_not_msg="
23#define TEST_TAG_EXPECT_XLATED_PFX "comment:test_expect_xlated="
24#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
25#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
26#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
27#define TEST_TAG_EXPECT_NOT_MSG_PFX_UNPRIV "comment:test_expect_not_msg_unpriv="
28#define TEST_TAG_EXPECT_XLATED_PFX_UNPRIV "comment:test_expect_xlated_unpriv="
29#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
30#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
31#define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
32#define TEST_TAG_RETVAL_PFX "comment:test_retval="
33#define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv="
34#define TEST_TAG_AUXILIARY "comment:test_auxiliary"
35#define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv"
36#define TEST_BTF_PATH "comment:test_btf_path="
37#define TEST_TAG_ARCH "comment:test_arch="
38#define TEST_TAG_JITED_PFX "comment:test_jited="
39#define TEST_TAG_JITED_PFX_UNPRIV "comment:test_jited_unpriv="
40#define TEST_TAG_CAPS_UNPRIV "comment:test_caps_unpriv="
41#define TEST_TAG_LOAD_MODE_PFX "comment:load_mode="
42#define TEST_TAG_EXPECT_STDERR_PFX "comment:test_expect_stderr="
43#define TEST_TAG_EXPECT_STDERR_PFX_UNPRIV "comment:test_expect_stderr_unpriv="
44#define TEST_TAG_EXPECT_STDOUT_PFX "comment:test_expect_stdout="
45#define TEST_TAG_EXPECT_STDOUT_PFX_UNPRIV "comment:test_expect_stdout_unpriv="
46
47/* Warning: duplicated in bpf_misc.h */
48#define POINTER_VALUE 0xbadcafe
49#define TEST_DATA_LEN 64
50
51#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
52#define EFFICIENT_UNALIGNED_ACCESS 1
53#else
54#define EFFICIENT_UNALIGNED_ACCESS 0
55#endif
56
57static int sysctl_unpriv_disabled = -1;
58
59enum mode {
60 PRIV = 1,
61 UNPRIV = 2
62};
63
64enum load_mode {
65 JITED = 1 << 0,
66 NO_JITED = 1 << 1,
67};
68
69struct test_subspec {
70 char *name;
71 bool expect_failure;
72 struct expected_msgs expect_msgs;
73 struct expected_msgs expect_xlated;
74 struct expected_msgs jited;
75 struct expected_msgs stderr;
76 struct expected_msgs stdout;
77 int retval;
78 bool execute;
79 __u64 caps;
80};
81
82struct test_spec {
83 const char *prog_name;
84 struct test_subspec priv;
85 struct test_subspec unpriv;
86 const char *btf_custom_path;
87 int log_level;
88 int prog_flags;
89 int mode_mask;
90 int arch_mask;
91 int load_mask;
92 bool auxiliary;
93 bool valid;
94};
95
96static int tester_init(struct test_loader *tester)
97{
98 if (!tester->log_buf) {
99 tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ;
100 tester->log_buf = calloc(tester->log_buf_sz, 1);
101 if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf"))
102 return -ENOMEM;
103 }
104
105 return 0;
106}
107
108void test_loader_fini(struct test_loader *tester)
109{
110 if (!tester)
111 return;
112
113 free(tester->log_buf);
114}
115
116static void free_msgs(struct expected_msgs *msgs)
117{
118 int i;
119
120 for (i = 0; i < msgs->cnt; i++)
121 if (msgs->patterns[i].is_regex)
122 regfree(&msgs->patterns[i].regex);
123 free(msgs->patterns);
124 msgs->patterns = NULL;
125 msgs->cnt = 0;
126}
127
128static void free_test_spec(struct test_spec *spec)
129{
130 /* Deallocate expect_msgs arrays. */
131 free_msgs(&spec->priv.expect_msgs);
132 free_msgs(&spec->unpriv.expect_msgs);
133 free_msgs(&spec->priv.expect_xlated);
134 free_msgs(&spec->unpriv.expect_xlated);
135 free_msgs(&spec->priv.jited);
136 free_msgs(&spec->unpriv.jited);
137 free_msgs(&spec->unpriv.stderr);
138 free_msgs(&spec->priv.stderr);
139 free_msgs(&spec->unpriv.stdout);
140 free_msgs(&spec->priv.stdout);
141
142 free(spec->priv.name);
143 free(spec->unpriv.name);
144 spec->priv.name = NULL;
145 spec->unpriv.name = NULL;
146}
147
148/* Compiles regular expression matching pattern.
149 * Pattern has a special syntax:
150 *
151 * pattern := (<verbatim text> | regex)*
152 * regex := "{{" <posix extended regular expression> "}}"
153 *
154 * In other words, pattern is a verbatim text with inclusion
155 * of regular expressions enclosed in "{{" "}}" pairs.
156 * For example, pattern "foo{{[0-9]+}}" matches strings like
157 * "foo0", "foo007", etc.
158 */
159static int compile_regex(const char *pattern, regex_t *regex)
160{
161 char err_buf[256], buf[256] = {}, *ptr, *buf_end;
162 const char *original_pattern = pattern;
163 bool in_regex = false;
164 int err;
165
166 buf_end = buf + sizeof(buf);
167 ptr = buf;
168 while (*pattern && ptr < buf_end - 2) {
169 if (!in_regex && str_has_pfx(pattern, "{{")) {
170 in_regex = true;
171 pattern += 2;
172 continue;
173 }
174 if (in_regex && str_has_pfx(pattern, "}}")) {
175 in_regex = false;
176 pattern += 2;
177 continue;
178 }
179 if (in_regex) {
180 *ptr++ = *pattern++;
181 continue;
182 }
183 /* list of characters that need escaping for extended posix regex */
184 if (strchr(".[]\\()*+?{}|^$", *pattern)) {
185 *ptr++ = '\\';
186 *ptr++ = *pattern++;
187 continue;
188 }
189 *ptr++ = *pattern++;
190 }
191 if (*pattern) {
192 PRINT_FAIL("Regexp too long: '%s'\n", original_pattern);
193 return -EINVAL;
194 }
195 if (in_regex) {
196 PRINT_FAIL("Regexp has open '{{' but no closing '}}': '%s'\n", original_pattern);
197 return -EINVAL;
198 }
199 err = regcomp(regex, buf, REG_EXTENDED | REG_NEWLINE);
200 if (err != 0) {
201 regerror(err, regex, err_buf, sizeof(err_buf));
202 PRINT_FAIL("Regexp compilation error in '%s': '%s'\n", buf, err_buf);
203 return -EINVAL;
204 }
205 return 0;
206}
207
208static int __push_msg(const char *pattern, bool on_next_line, bool negative,
209 struct expected_msgs *msgs)
210{
211 struct expect_msg *msg;
212 void *tmp;
213 int err;
214
215 tmp = realloc(msgs->patterns,
216 (1 + msgs->cnt) * sizeof(struct expect_msg));
217 if (!tmp) {
218 ASSERT_FAIL("failed to realloc memory for messages\n");
219 return -ENOMEM;
220 }
221 msgs->patterns = tmp;
222 msg = &msgs->patterns[msgs->cnt];
223 msg->on_next_line = on_next_line;
224 msg->substr = pattern;
225 msg->negative = negative;
226 msg->is_regex = false;
227 if (strstr(pattern, "{{")) {
228 err = compile_regex(pattern, &msg->regex);
229 if (err)
230 return err;
231 msg->is_regex = true;
232 }
233 msgs->cnt += 1;
234 return 0;
235}
236
237static int clone_msgs(struct expected_msgs *from, struct expected_msgs *to)
238{
239 struct expect_msg *msg;
240 int i, err;
241
242 for (i = 0; i < from->cnt; i++) {
243 msg = &from->patterns[i];
244 err = __push_msg(msg->substr, msg->on_next_line, msg->negative, to);
245 if (err)
246 return err;
247 }
248 return 0;
249}
250
251static int push_msg(const char *substr, bool negative, struct expected_msgs *msgs)
252{
253 return __push_msg(substr, false, negative, msgs);
254}
255
256static int push_disasm_msg(const char *regex_str, bool *on_next_line, struct expected_msgs *msgs)
257{
258 int err;
259
260 if (strcmp(regex_str, "...") == 0) {
261 *on_next_line = false;
262 return 0;
263 }
264 err = __push_msg(regex_str, *on_next_line, false, msgs);
265 if (err)
266 return err;
267 *on_next_line = true;
268 return 0;
269}
270
271static int parse_int(const char *str, int *val, const char *name)
272{
273 char *end;
274 long tmp;
275
276 errno = 0;
277 if (str_has_pfx(str, "0x"))
278 tmp = strtol(str + 2, &end, 16);
279 else
280 tmp = strtol(str, &end, 10);
281 if (errno || end[0] != '\0') {
282 PRINT_FAIL("failed to parse %s from '%s'\n", name, str);
283 return -EINVAL;
284 }
285 *val = tmp;
286 return 0;
287}
288
289static int parse_caps(const char *str, __u64 *val, const char *name)
290{
291 int cap_flag = 0;
292 char *token = NULL, *saveptr = NULL;
293
294 char *str_cpy = strdup(str);
295 if (str_cpy == NULL) {
296 PRINT_FAIL("Memory allocation failed\n");
297 return -EINVAL;
298 }
299
300 token = strtok_r(str_cpy, "|", &saveptr);
301 while (token != NULL) {
302 errno = 0;
303 if (!strncmp("CAP_", token, sizeof("CAP_") - 1)) {
304 PRINT_FAIL("define %s constant in bpf_misc.h, failed to parse caps\n", token);
305 return -EINVAL;
306 }
307 cap_flag = strtol(token, NULL, 10);
308 if (!cap_flag || errno) {
309 PRINT_FAIL("failed to parse caps %s\n", name);
310 return -EINVAL;
311 }
312 *val |= (1ULL << cap_flag);
313 token = strtok_r(NULL, "|", &saveptr);
314 }
315
316 free(str_cpy);
317 return 0;
318}
319
320static int parse_retval(const char *str, int *val, const char *name)
321{
322 /*
323 * INT_MIN is defined as (-INT_MAX -1), i.e. it doesn't expand to a
324 * single int and cannot be parsed with strtol, so we handle it
325 * separately here. In addition, it expands to different expressions in
326 * different compilers so we use a prefixed _INT_MIN instead.
327 */
328 if (strcmp(str, "_INT_MIN") == 0) {
329 *val = INT_MIN;
330 return 0;
331 }
332
333 return parse_int(str, val, name);
334}
335
336static void update_flags(int *flags, int flag, bool clear)
337{
338 if (clear)
339 *flags &= ~flag;
340 else
341 *flags |= flag;
342}
343
344/* Matches a string of form '<pfx>[^=]=.*' and returns it's suffix.
345 * Used to parse btf_decl_tag values.
346 * Such values require unique prefix because compiler does not add
347 * same __attribute__((btf_decl_tag(...))) twice.
348 * Test suite uses two-component tags for such cases:
349 *
350 * <pfx> __COUNTER__ '='
351 *
352 * For example, two consecutive __msg tags '__msg("foo") __msg("foo")'
353 * would be encoded as:
354 *
355 * [18] DECL_TAG 'comment:test_expect_msg=0=foo' type_id=15 component_idx=-1
356 * [19] DECL_TAG 'comment:test_expect_msg=1=foo' type_id=15 component_idx=-1
357 *
358 * And the purpose of this function is to extract 'foo' from the above.
359 */
360static const char *skip_dynamic_pfx(const char *s, const char *pfx)
361{
362 const char *msg;
363
364 if (strncmp(s, pfx, strlen(pfx)) != 0)
365 return NULL;
366 msg = s + strlen(pfx);
367 msg = strchr(msg, '=');
368 if (!msg)
369 return NULL;
370 return msg + 1;
371}
372
373enum arch {
374 ARCH_UNKNOWN = 0x1,
375 ARCH_X86_64 = 0x2,
376 ARCH_ARM64 = 0x4,
377 ARCH_RISCV64 = 0x8,
378 ARCH_S390X = 0x10,
379};
380
381static int get_current_arch(void)
382{
383#if defined(__x86_64__)
384 return ARCH_X86_64;
385#elif defined(__aarch64__)
386 return ARCH_ARM64;
387#elif defined(__riscv) && __riscv_xlen == 64
388 return ARCH_RISCV64;
389#elif defined(__s390x__)
390 return ARCH_S390X;
391#endif
392 return ARCH_UNKNOWN;
393}
394
395/* Uses btf_decl_tag attributes to describe the expected test
396 * behavior, see bpf_misc.h for detailed description of each attribute
397 * and attribute combinations.
398 */
399static int parse_test_spec(struct test_loader *tester,
400 struct bpf_object *obj,
401 struct bpf_program *prog,
402 struct test_spec *spec)
403{
404 const char *description = NULL;
405 bool has_unpriv_result = false;
406 bool has_unpriv_retval = false;
407 bool unpriv_xlated_on_next_line = true;
408 bool xlated_on_next_line = true;
409 bool unpriv_jit_on_next_line;
410 bool jit_on_next_line;
411 bool stderr_on_next_line = true;
412 bool unpriv_stderr_on_next_line = true;
413 bool stdout_on_next_line = true;
414 bool unpriv_stdout_on_next_line = true;
415 bool collect_jit = false;
416 int func_id, i, err = 0;
417 u32 arch_mask = 0;
418 u32 load_mask = 0;
419 struct btf *btf;
420 enum arch arch;
421
422 memset(spec, 0, sizeof(*spec));
423
424 spec->prog_name = bpf_program__name(prog);
425 spec->prog_flags = testing_prog_flags();
426
427 btf = bpf_object__btf(obj);
428 if (!btf) {
429 ASSERT_FAIL("BPF object has no BTF");
430 return -EINVAL;
431 }
432
433 func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC);
434 if (func_id < 0) {
435 ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name);
436 return -EINVAL;
437 }
438
439 for (i = 1; i < btf__type_cnt(btf); i++) {
440 const char *s, *val, *msg;
441 const struct btf_type *t;
442 bool clear;
443 int flags;
444
445 t = btf__type_by_id(btf, i);
446 if (!btf_is_decl_tag(t))
447 continue;
448
449 if (t->type != func_id || btf_decl_tag(t)->component_idx != -1)
450 continue;
451
452 s = btf__str_by_offset(btf, t->name_off);
453 if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) {
454 description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1;
455 } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) {
456 spec->priv.expect_failure = true;
457 spec->mode_mask |= PRIV;
458 } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) {
459 spec->priv.expect_failure = false;
460 spec->mode_mask |= PRIV;
461 } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) {
462 spec->unpriv.expect_failure = true;
463 spec->mode_mask |= UNPRIV;
464 has_unpriv_result = true;
465 } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) {
466 spec->unpriv.expect_failure = false;
467 spec->mode_mask |= UNPRIV;
468 has_unpriv_result = true;
469 } else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) {
470 spec->auxiliary = true;
471 spec->mode_mask |= PRIV;
472 } else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) {
473 spec->auxiliary = true;
474 spec->mode_mask |= UNPRIV;
475 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX))) {
476 err = push_msg(msg, false, &spec->priv.expect_msgs);
477 if (err)
478 goto cleanup;
479 spec->mode_mask |= PRIV;
480 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_NOT_MSG_PFX))) {
481 err = push_msg(msg, true, &spec->priv.expect_msgs);
482 if (err)
483 goto cleanup;
484 spec->mode_mask |= PRIV;
485 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV))) {
486 err = push_msg(msg, false, &spec->unpriv.expect_msgs);
487 if (err)
488 goto cleanup;
489 spec->mode_mask |= UNPRIV;
490 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_NOT_MSG_PFX_UNPRIV))) {
491 err = push_msg(msg, true, &spec->unpriv.expect_msgs);
492 if (err)
493 goto cleanup;
494 spec->mode_mask |= UNPRIV;
495 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX))) {
496 if (arch_mask == 0) {
497 PRINT_FAIL("__jited used before __arch_*");
498 goto cleanup;
499 }
500 if (collect_jit) {
501 err = push_disasm_msg(msg, &jit_on_next_line,
502 &spec->priv.jited);
503 if (err)
504 goto cleanup;
505 spec->mode_mask |= PRIV;
506 }
507 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX_UNPRIV))) {
508 if (arch_mask == 0) {
509 PRINT_FAIL("__unpriv_jited used before __arch_*");
510 goto cleanup;
511 }
512 if (collect_jit) {
513 err = push_disasm_msg(msg, &unpriv_jit_on_next_line,
514 &spec->unpriv.jited);
515 if (err)
516 goto cleanup;
517 spec->mode_mask |= UNPRIV;
518 }
519 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) {
520 err = push_disasm_msg(msg, &xlated_on_next_line,
521 &spec->priv.expect_xlated);
522 if (err)
523 goto cleanup;
524 spec->mode_mask |= PRIV;
525 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) {
526 err = push_disasm_msg(msg, &unpriv_xlated_on_next_line,
527 &spec->unpriv.expect_xlated);
528 if (err)
529 goto cleanup;
530 spec->mode_mask |= UNPRIV;
531 } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) {
532 val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1;
533 err = parse_retval(val, &spec->priv.retval, "__retval");
534 if (err)
535 goto cleanup;
536 spec->priv.execute = true;
537 spec->mode_mask |= PRIV;
538 } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) {
539 val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1;
540 err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv");
541 if (err)
542 goto cleanup;
543 spec->mode_mask |= UNPRIV;
544 spec->unpriv.execute = true;
545 has_unpriv_retval = true;
546 } else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) {
547 val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1;
548 err = parse_int(val, &spec->log_level, "test log level");
549 if (err)
550 goto cleanup;
551 } else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
552 val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
553
554 clear = val[0] == '!';
555 if (clear)
556 val++;
557
558 if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
559 update_flags(&spec->prog_flags, BPF_F_STRICT_ALIGNMENT, clear);
560 } else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) {
561 update_flags(&spec->prog_flags, BPF_F_ANY_ALIGNMENT, clear);
562 } else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) {
563 update_flags(&spec->prog_flags, BPF_F_TEST_RND_HI32, clear);
564 } else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) {
565 update_flags(&spec->prog_flags, BPF_F_TEST_STATE_FREQ, clear);
566 } else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) {
567 update_flags(&spec->prog_flags, BPF_F_SLEEPABLE, clear);
568 } else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
569 update_flags(&spec->prog_flags, BPF_F_XDP_HAS_FRAGS, clear);
570 } else if (strcmp(val, "BPF_F_TEST_REG_INVARIANTS") == 0) {
571 update_flags(&spec->prog_flags, BPF_F_TEST_REG_INVARIANTS, clear);
572 } else /* assume numeric value */ {
573 err = parse_int(val, &flags, "test prog flags");
574 if (err)
575 goto cleanup;
576 update_flags(&spec->prog_flags, flags, clear);
577 }
578 } else if (str_has_pfx(s, TEST_TAG_ARCH)) {
579 val = s + sizeof(TEST_TAG_ARCH) - 1;
580 if (strcmp(val, "X86_64") == 0) {
581 arch = ARCH_X86_64;
582 } else if (strcmp(val, "ARM64") == 0) {
583 arch = ARCH_ARM64;
584 } else if (strcmp(val, "RISCV64") == 0) {
585 arch = ARCH_RISCV64;
586 } else if (strcmp(val, "s390x") == 0) {
587 arch = ARCH_S390X;
588 } else {
589 PRINT_FAIL("bad arch spec: '%s'\n", val);
590 err = -EINVAL;
591 goto cleanup;
592 }
593 arch_mask |= arch;
594 collect_jit = get_current_arch() == arch;
595 unpriv_jit_on_next_line = true;
596 jit_on_next_line = true;
597 } else if (str_has_pfx(s, TEST_BTF_PATH)) {
598 spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1;
599 } else if (str_has_pfx(s, TEST_TAG_CAPS_UNPRIV)) {
600 val = s + sizeof(TEST_TAG_CAPS_UNPRIV) - 1;
601 err = parse_caps(val, &spec->unpriv.caps, "test caps");
602 if (err)
603 goto cleanup;
604 spec->mode_mask |= UNPRIV;
605 } else if (str_has_pfx(s, TEST_TAG_LOAD_MODE_PFX)) {
606 val = s + sizeof(TEST_TAG_LOAD_MODE_PFX) - 1;
607 if (strcmp(val, "jited") == 0) {
608 load_mask = JITED;
609 } else if (strcmp(val, "no_jited") == 0) {
610 load_mask = NO_JITED;
611 } else {
612 PRINT_FAIL("bad load spec: '%s'", val);
613 err = -EINVAL;
614 goto cleanup;
615 }
616 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDERR_PFX))) {
617 err = push_disasm_msg(msg, &stderr_on_next_line,
618 &spec->priv.stderr);
619 if (err)
620 goto cleanup;
621 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDERR_PFX_UNPRIV))) {
622 err = push_disasm_msg(msg, &unpriv_stderr_on_next_line,
623 &spec->unpriv.stderr);
624 if (err)
625 goto cleanup;
626 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDOUT_PFX))) {
627 err = push_disasm_msg(msg, &stdout_on_next_line,
628 &spec->priv.stdout);
629 if (err)
630 goto cleanup;
631 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDOUT_PFX_UNPRIV))) {
632 err = push_disasm_msg(msg, &unpriv_stdout_on_next_line,
633 &spec->unpriv.stdout);
634 if (err)
635 goto cleanup;
636 }
637 }
638
639 spec->arch_mask = arch_mask ?: -1;
640 spec->load_mask = load_mask ?: (JITED | NO_JITED);
641
642 if (spec->mode_mask == 0)
643 spec->mode_mask = PRIV;
644
645 if (!description)
646 description = spec->prog_name;
647
648 if (spec->mode_mask & PRIV) {
649 spec->priv.name = strdup(description);
650 if (!spec->priv.name) {
651 PRINT_FAIL("failed to allocate memory for priv.name\n");
652 err = -ENOMEM;
653 goto cleanup;
654 }
655 }
656
657 if (spec->mode_mask & UNPRIV) {
658 int descr_len = strlen(description);
659 const char *suffix = " @unpriv";
660 char *name;
661
662 name = malloc(descr_len + strlen(suffix) + 1);
663 if (!name) {
664 PRINT_FAIL("failed to allocate memory for unpriv.name\n");
665 err = -ENOMEM;
666 goto cleanup;
667 }
668
669 strcpy(name, description);
670 strcpy(&name[descr_len], suffix);
671 spec->unpriv.name = name;
672 }
673
674 if (spec->mode_mask & (PRIV | UNPRIV)) {
675 if (!has_unpriv_result)
676 spec->unpriv.expect_failure = spec->priv.expect_failure;
677
678 if (!has_unpriv_retval) {
679 spec->unpriv.retval = spec->priv.retval;
680 spec->unpriv.execute = spec->priv.execute;
681 }
682
683 if (spec->unpriv.expect_msgs.cnt == 0)
684 clone_msgs(&spec->priv.expect_msgs, &spec->unpriv.expect_msgs);
685 if (spec->unpriv.expect_xlated.cnt == 0)
686 clone_msgs(&spec->priv.expect_xlated, &spec->unpriv.expect_xlated);
687 if (spec->unpriv.jited.cnt == 0)
688 clone_msgs(&spec->priv.jited, &spec->unpriv.jited);
689 if (spec->unpriv.stderr.cnt == 0)
690 clone_msgs(&spec->priv.stderr, &spec->unpriv.stderr);
691 if (spec->unpriv.stdout.cnt == 0)
692 clone_msgs(&spec->priv.stdout, &spec->unpriv.stdout);
693 }
694
695 spec->valid = true;
696
697 return 0;
698
699cleanup:
700 free_test_spec(spec);
701 return err;
702}
703
704static void prepare_case(struct test_loader *tester,
705 struct test_spec *spec,
706 struct bpf_object *obj,
707 struct bpf_program *prog)
708{
709 int min_log_level = 0, prog_flags;
710
711 if (env.verbosity > VERBOSE_NONE)
712 min_log_level = 1;
713 if (env.verbosity > VERBOSE_VERY)
714 min_log_level = 2;
715
716 bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz);
717
718 /* Make sure we set at least minimal log level, unless test requires
719 * even higher level already. Make sure to preserve independent log
720 * level 4 (verifier stats), though.
721 */
722 if ((spec->log_level & 3) < min_log_level)
723 bpf_program__set_log_level(prog, (spec->log_level & 4) | min_log_level);
724 else
725 bpf_program__set_log_level(prog, spec->log_level);
726
727 prog_flags = bpf_program__flags(prog);
728 bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
729
730 tester->log_buf[0] = '\0';
731}
732
733static void emit_verifier_log(const char *log_buf, bool force)
734{
735 if (!force && env.verbosity == VERBOSE_NONE)
736 return;
737 fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf);
738}
739
740static void emit_xlated(const char *xlated, bool force)
741{
742 if (!force && env.verbosity == VERBOSE_NONE)
743 return;
744 fprintf(stdout, "XLATED:\n=============\n%s=============\n", xlated);
745}
746
747static void emit_jited(const char *jited, bool force)
748{
749 if (!force && env.verbosity == VERBOSE_NONE)
750 return;
751 fprintf(stdout, "JITED:\n=============\n%s=============\n", jited);
752}
753
754static void emit_stderr(const char *stderr, bool force)
755{
756 if (!force && env.verbosity == VERBOSE_NONE)
757 return;
758 fprintf(stdout, "STDERR:\n=============\n%s=============\n", stderr);
759}
760
761static void emit_stdout(const char *bpf_stdout, bool force)
762{
763 if (!force && env.verbosity == VERBOSE_NONE)
764 return;
765 fprintf(stdout, "STDOUT:\n=============\n%s=============\n", bpf_stdout);
766}
767
768static const char *match_msg(struct expect_msg *msg, const char **log)
769{
770 const char *match = NULL;
771 regmatch_t reg_match[1];
772 int err;
773
774 if (!msg->is_regex) {
775 match = strstr(*log, msg->substr);
776 if (match)
777 *log = match + strlen(msg->substr);
778 } else {
779 err = regexec(&msg->regex, *log, 1, reg_match, 0);
780 if (err == 0) {
781 match = *log + reg_match[0].rm_so;
782 *log += reg_match[0].rm_eo;
783 }
784 }
785 return match;
786}
787
788static int count_lines(const char *start, const char *end)
789{
790 const char *tmp;
791 int n = 0;
792
793 for (tmp = start; tmp < end; ++tmp)
794 if (*tmp == '\n')
795 n++;
796 return n;
797}
798
799struct match {
800 const char *start;
801 const char *end;
802 int line;
803};
804
805/*
806 * Positive messages are matched sequentially, each next message
807 * is looked for starting from the end of a previous matched one.
808 */
809static void match_positive_msgs(const char *log, struct expected_msgs *msgs, struct match *matches)
810{
811 const char *prev_match;
812 int i, line;
813
814 prev_match = log;
815 line = 0;
816 for (i = 0; i < msgs->cnt; i++) {
817 struct expect_msg *msg = &msgs->patterns[i];
818 const char *match = NULL;
819
820 if (msg->negative)
821 continue;
822
823 match = match_msg(msg, &log);
824 if (match) {
825 line += count_lines(prev_match, match);
826 matches[i].start = match;
827 matches[i].end = log;
828 matches[i].line = line;
829 prev_match = match;
830 }
831 }
832}
833
834/*
835 * Each negative messages N located between positive messages P1 and P2
836 * is matched in the span P1.end .. P2.start. Consequently, negative messages
837 * are unordered within the span.
838 */
839static void match_negative_msgs(const char *log, struct expected_msgs *msgs, struct match *matches)
840{
841 const char *start = log, *end, *next, *match;
842 const char *log_end = log + strlen(log);
843 int i, j, next_positive;
844
845 for (i = 0; i < msgs->cnt; i++) {
846 struct expect_msg *msg = &msgs->patterns[i];
847
848 /* positive message bumps span start */
849 if (!msg->negative) {
850 start = matches[i].end ?: start;
851 continue;
852 }
853
854 /* count stride of negative patterns and adjust span end */
855 end = log_end;
856 for (next_positive = i + 1; next_positive < msgs->cnt; next_positive++) {
857 if (!msgs->patterns[next_positive].negative) {
858 end = matches[next_positive].start;
859 break;
860 }
861 }
862
863 /* try matching negative messages within identified span */
864 for (j = i; j < next_positive; j++) {
865 next = start;
866 match = match_msg(msg, &next);
867 if (match && next <= end) {
868 matches[j].start = match;
869 matches[j].end = next;
870 }
871 }
872
873 /* -1 to account for i++ */
874 i = next_positive - 1;
875 }
876}
877
878void validate_msgs(const char *log_buf, struct expected_msgs *msgs,
879 void (*emit_fn)(const char *buf, bool force))
880{
881 struct match matches[msgs->cnt];
882 struct match *prev_match = NULL;
883 int i, j;
884
885 memset(matches, 0, sizeof(*matches) * msgs->cnt);
886 match_positive_msgs(log_buf, msgs, matches);
887 match_negative_msgs(log_buf, msgs, matches);
888
889 for (i = 0; i < msgs->cnt; i++) {
890 struct expect_msg *msg = &msgs->patterns[i];
891 struct match *match = &matches[i];
892 const char *pat_status;
893 bool unexpected;
894 bool wrong_line;
895 bool no_match;
896
897 no_match = !msg->negative && !match->start;
898 wrong_line = !msg->negative &&
899 msg->on_next_line &&
900 prev_match && prev_match->line + 1 != match->line;
901 unexpected = msg->negative && match->start;
902 if (no_match || wrong_line || unexpected) {
903 PRINT_FAIL("expect_msg\n");
904 if (env.verbosity == VERBOSE_NONE)
905 emit_fn(log_buf, true /*force*/);
906 for (j = 0; j <= i; j++) {
907 msg = &msgs->patterns[j];
908 if (j < i)
909 pat_status = "MATCHED ";
910 else if (wrong_line)
911 pat_status = "WRONG LINE";
912 else if (no_match)
913 pat_status = "EXPECTED ";
914 else
915 pat_status = "UNEXPECTED";
916 msg = &msgs->patterns[j];
917 fprintf(stderr, "%s %s: '%s'\n",
918 pat_status,
919 msg->is_regex ? " REGEX" : "SUBSTR",
920 msg->substr);
921 }
922 if (wrong_line) {
923 fprintf(stderr,
924 "expecting match at line %d, actual match is at line %d\n",
925 prev_match->line + 1, match->line);
926 }
927 break;
928 }
929
930 if (!msg->negative)
931 prev_match = match;
932 }
933}
934
935struct cap_state {
936 __u64 old_caps;
937 bool initialized;
938};
939
940static int drop_capabilities(struct cap_state *caps)
941{
942 const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN |
943 1ULL << CAP_PERFMON | 1ULL << CAP_BPF);
944 int err;
945
946 err = cap_disable_effective(caps_to_drop, &caps->old_caps);
947 if (err) {
948 PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(-err));
949 return err;
950 }
951
952 caps->initialized = true;
953 return 0;
954}
955
956static int restore_capabilities(struct cap_state *caps)
957{
958 int err;
959
960 if (!caps->initialized)
961 return 0;
962
963 err = cap_enable_effective(caps->old_caps, NULL);
964 if (err)
965 PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(-err));
966 caps->initialized = false;
967 return err;
968}
969
970static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec)
971{
972 if (sysctl_unpriv_disabled < 0)
973 sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0;
974 if (sysctl_unpriv_disabled)
975 return false;
976 if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS)
977 return false;
978 return true;
979}
980
981static bool is_unpriv_capable_map(struct bpf_map *map)
982{
983 enum bpf_map_type type;
984 __u32 flags;
985
986 type = bpf_map__type(map);
987
988 switch (type) {
989 case BPF_MAP_TYPE_HASH:
990 case BPF_MAP_TYPE_PERCPU_HASH:
991 case BPF_MAP_TYPE_HASH_OF_MAPS:
992 flags = bpf_map__map_flags(map);
993 return !(flags & BPF_F_ZERO_SEED);
994 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
995 case BPF_MAP_TYPE_ARRAY:
996 case BPF_MAP_TYPE_RINGBUF:
997 case BPF_MAP_TYPE_PROG_ARRAY:
998 case BPF_MAP_TYPE_CGROUP_ARRAY:
999 case BPF_MAP_TYPE_PERCPU_ARRAY:
1000 case BPF_MAP_TYPE_USER_RINGBUF:
1001 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1002 case BPF_MAP_TYPE_CGROUP_STORAGE:
1003 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1004 return true;
1005 default:
1006 return false;
1007 }
1008}
1009
1010static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts)
1011{
1012 __u8 tmp_out[TEST_DATA_LEN << 2] = {};
1013 __u8 tmp_in[TEST_DATA_LEN] = {};
1014 int err, saved_errno;
1015 LIBBPF_OPTS(bpf_test_run_opts, topts,
1016 .data_in = tmp_in,
1017 .data_size_in = sizeof(tmp_in),
1018 .data_out = tmp_out,
1019 .data_size_out = sizeof(tmp_out),
1020 .repeat = 1,
1021 );
1022
1023 if (empty_opts) {
1024 memset(&topts, 0, sizeof(struct bpf_test_run_opts));
1025 topts.sz = sizeof(struct bpf_test_run_opts);
1026 }
1027 err = bpf_prog_test_run_opts(fd_prog, &topts);
1028 saved_errno = errno;
1029
1030 if (err) {
1031 PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ",
1032 saved_errno, strerror(saved_errno));
1033 return err;
1034 }
1035
1036 ASSERT_OK(0, "bpf_prog_test_run");
1037 *retval = topts.retval;
1038
1039 return 0;
1040}
1041
1042static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec)
1043{
1044 if (!subspec->execute)
1045 return false;
1046
1047 if (subspec->expect_failure)
1048 return false;
1049
1050 if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) {
1051 if (env.verbosity != VERBOSE_NONE)
1052 printf("alignment prevents execution\n");
1053 return false;
1054 }
1055
1056 return true;
1057}
1058
1059/* Get a disassembly of BPF program after verifier applies all rewrites */
1060static int get_xlated_program_text(int prog_fd, char *text, size_t text_sz)
1061{
1062 struct bpf_insn *insn_start = NULL, *insn, *insn_end;
1063 __u32 insns_cnt = 0, i;
1064 char buf[64];
1065 FILE *out = NULL;
1066 int err;
1067
1068 err = get_xlated_program(prog_fd, &insn_start, &insns_cnt);
1069 if (!ASSERT_OK(err, "get_xlated_program"))
1070 goto out;
1071 out = fmemopen(text, text_sz, "w");
1072 if (!ASSERT_OK_PTR(out, "open_memstream"))
1073 goto out;
1074 insn_end = insn_start + insns_cnt;
1075 insn = insn_start;
1076 while (insn < insn_end) {
1077 i = insn - insn_start;
1078 insn = disasm_insn(insn, buf, sizeof(buf));
1079 fprintf(out, "%d: %s\n", i, buf);
1080 }
1081 fflush(out);
1082
1083out:
1084 free(insn_start);
1085 if (out)
1086 fclose(out);
1087 return err;
1088}
1089
1090/* Read the bpf stream corresponding to the stream_id */
1091static int get_stream(int stream_id, int prog_fd, char *text, size_t text_sz)
1092{
1093 LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts);
1094 int ret;
1095
1096 ret = bpf_prog_stream_read(prog_fd, stream_id, text, text_sz, &ropts);
1097 ASSERT_GT(ret, 0, "stream read");
1098 text[ret] = '\0';
1099
1100 return ret;
1101}
1102
1103/* this function is forced noinline and has short generic name to look better
1104 * in test_progs output (in case of a failure)
1105 */
1106static noinline
1107void run_subtest(struct test_loader *tester,
1108 struct bpf_object_open_opts *open_opts,
1109 const void *obj_bytes,
1110 size_t obj_byte_cnt,
1111 struct test_spec *specs,
1112 struct test_spec *spec,
1113 bool unpriv)
1114{
1115 struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
1116 int current_runtime = is_jit_enabled() ? JITED : NO_JITED;
1117 struct bpf_program *tprog = NULL, *tprog_iter;
1118 struct bpf_link *link, *links[32] = {};
1119 struct test_spec *spec_iter;
1120 struct cap_state caps = {};
1121 struct bpf_object *tobj;
1122 struct bpf_map *map;
1123 int retval, err, i;
1124 int links_cnt = 0;
1125 bool should_load;
1126
1127 if (!test__start_subtest(subspec->name))
1128 return;
1129
1130 if ((get_current_arch() & spec->arch_mask) == 0) {
1131 test__skip();
1132 return;
1133 }
1134
1135 if ((current_runtime & spec->load_mask) == 0) {
1136 test__skip();
1137 return;
1138 }
1139
1140 if (unpriv) {
1141 if (!can_execute_unpriv(tester, spec)) {
1142 test__skip();
1143 test__end_subtest();
1144 return;
1145 }
1146 if (drop_capabilities(&caps)) {
1147 test__end_subtest();
1148 return;
1149 }
1150 if (subspec->caps) {
1151 err = cap_enable_effective(subspec->caps, NULL);
1152 if (err) {
1153 PRINT_FAIL("failed to set capabilities: %i, %s\n", err, strerror(-err));
1154 goto subtest_cleanup;
1155 }
1156 }
1157 }
1158
1159 /* Implicitly reset to NULL if next test case doesn't specify */
1160 open_opts->btf_custom_path = spec->btf_custom_path;
1161
1162 tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts);
1163 if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */
1164 goto subtest_cleanup;
1165
1166 i = 0;
1167 bpf_object__for_each_program(tprog_iter, tobj) {
1168 spec_iter = &specs[i++];
1169 should_load = false;
1170
1171 if (spec_iter->valid) {
1172 if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) {
1173 tprog = tprog_iter;
1174 should_load = true;
1175 }
1176
1177 if (spec_iter->auxiliary &&
1178 spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV))
1179 should_load = true;
1180 }
1181
1182 bpf_program__set_autoload(tprog_iter, should_load);
1183 }
1184
1185 prepare_case(tester, spec, tobj, tprog);
1186
1187 /* By default bpf_object__load() automatically creates all
1188 * maps declared in the skeleton. Some map types are only
1189 * allowed in priv mode. Disable autoload for such maps in
1190 * unpriv mode.
1191 */
1192 bpf_object__for_each_map(map, tobj)
1193 bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map));
1194
1195 err = bpf_object__load(tobj);
1196 if (subspec->expect_failure) {
1197 if (!ASSERT_ERR(err, "unexpected_load_success")) {
1198 emit_verifier_log(tester->log_buf, false /*force*/);
1199 goto tobj_cleanup;
1200 }
1201 } else {
1202 if (!ASSERT_OK(err, "unexpected_load_failure")) {
1203 emit_verifier_log(tester->log_buf, true /*force*/);
1204 goto tobj_cleanup;
1205 }
1206 }
1207 emit_verifier_log(tester->log_buf, false /*force*/);
1208 validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log);
1209
1210 /* Restore capabilities because the kernel will silently ignore requests
1211 * for program info (such as xlated program text) if we are not
1212 * bpf-capable. Also, for some reason test_verifier executes programs
1213 * with all capabilities restored. Do the same here.
1214 */
1215 if (restore_capabilities(&caps))
1216 goto tobj_cleanup;
1217
1218 if (subspec->expect_xlated.cnt) {
1219 err = get_xlated_program_text(bpf_program__fd(tprog),
1220 tester->log_buf, tester->log_buf_sz);
1221 if (err)
1222 goto tobj_cleanup;
1223 emit_xlated(tester->log_buf, false /*force*/);
1224 validate_msgs(tester->log_buf, &subspec->expect_xlated, emit_xlated);
1225 }
1226
1227 if (subspec->jited.cnt) {
1228 err = get_jited_program_text(bpf_program__fd(tprog),
1229 tester->log_buf, tester->log_buf_sz);
1230 if (err == -EOPNOTSUPP) {
1231 printf("%s:SKIP: jited programs disassembly is not supported,\n", __func__);
1232 printf("%s:SKIP: tests are built w/o LLVM development libs\n", __func__);
1233 test__skip();
1234 goto tobj_cleanup;
1235 }
1236 if (!ASSERT_EQ(err, 0, "get_jited_program_text"))
1237 goto tobj_cleanup;
1238 emit_jited(tester->log_buf, false /*force*/);
1239 validate_msgs(tester->log_buf, &subspec->jited, emit_jited);
1240 }
1241
1242 if (should_do_test_run(spec, subspec)) {
1243 /* Do bpf_map__attach_struct_ops() for each struct_ops map.
1244 * This should trigger bpf_struct_ops->reg callback on kernel side.
1245 */
1246 bpf_object__for_each_map(map, tobj) {
1247 if (!bpf_map__autocreate(map) ||
1248 bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1249 continue;
1250 if (links_cnt >= ARRAY_SIZE(links)) {
1251 PRINT_FAIL("too many struct_ops maps");
1252 goto tobj_cleanup;
1253 }
1254 link = bpf_map__attach_struct_ops(map);
1255 if (!link) {
1256 PRINT_FAIL("bpf_map__attach_struct_ops failed for map %s: err=%d\n",
1257 bpf_map__name(map), -errno);
1258 goto tobj_cleanup;
1259 }
1260 links[links_cnt++] = link;
1261 }
1262
1263 if (tester->pre_execution_cb) {
1264 err = tester->pre_execution_cb(tobj);
1265 if (err) {
1266 PRINT_FAIL("pre_execution_cb failed: %d\n", err);
1267 goto tobj_cleanup;
1268 }
1269 }
1270
1271 err = do_prog_test_run(bpf_program__fd(tprog), &retval,
1272 bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false);
1273 if (!err && retval != subspec->retval && subspec->retval != POINTER_VALUE) {
1274 PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
1275 goto tobj_cleanup;
1276 }
1277
1278 if (subspec->stderr.cnt) {
1279 err = get_stream(2, bpf_program__fd(tprog),
1280 tester->log_buf, tester->log_buf_sz);
1281 if (err <= 0) {
1282 PRINT_FAIL("Unexpected retval from get_stream(): %d, errno = %d\n",
1283 err, errno);
1284 goto tobj_cleanup;
1285 }
1286 emit_stderr(tester->log_buf, false /*force*/);
1287 validate_msgs(tester->log_buf, &subspec->stderr, emit_stderr);
1288 }
1289
1290 if (subspec->stdout.cnt) {
1291 err = get_stream(1, bpf_program__fd(tprog),
1292 tester->log_buf, tester->log_buf_sz);
1293 if (err <= 0) {
1294 PRINT_FAIL("Unexpected retval from get_stream(): %d, errno = %d\n",
1295 err, errno);
1296 goto tobj_cleanup;
1297 }
1298 emit_stdout(tester->log_buf, false /*force*/);
1299 validate_msgs(tester->log_buf, &subspec->stdout, emit_stdout);
1300 }
1301
1302 /* redo bpf_map__attach_struct_ops for each test */
1303 while (links_cnt > 0)
1304 bpf_link__destroy(links[--links_cnt]);
1305 }
1306
1307tobj_cleanup:
1308 while (links_cnt > 0)
1309 bpf_link__destroy(links[--links_cnt]);
1310 bpf_object__close(tobj);
1311subtest_cleanup:
1312 test__end_subtest();
1313 restore_capabilities(&caps);
1314}
1315
1316static void process_subtest(struct test_loader *tester,
1317 const char *skel_name,
1318 skel_elf_bytes_fn elf_bytes_factory)
1319{
1320 LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name);
1321 struct test_spec *specs = NULL;
1322 struct bpf_object *obj = NULL;
1323 struct bpf_program *prog;
1324 const void *obj_bytes;
1325 int err, i, nr_progs;
1326 size_t obj_byte_cnt;
1327
1328 if (tester_init(tester) < 0)
1329 return; /* failed to initialize tester */
1330
1331 obj_bytes = elf_bytes_factory(&obj_byte_cnt);
1332 obj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts);
1333 if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
1334 return;
1335
1336 nr_progs = 0;
1337 bpf_object__for_each_program(prog, obj)
1338 ++nr_progs;
1339
1340 specs = calloc(nr_progs, sizeof(struct test_spec));
1341 if (!ASSERT_OK_PTR(specs, "specs_alloc"))
1342 return;
1343
1344 i = 0;
1345 bpf_object__for_each_program(prog, obj) {
1346 /* ignore tests for which we can't derive test specification */
1347 err = parse_test_spec(tester, obj, prog, &specs[i++]);
1348 if (err)
1349 PRINT_FAIL("Can't parse test spec for program '%s'\n",
1350 bpf_program__name(prog));
1351 }
1352
1353 i = 0;
1354 bpf_object__for_each_program(prog, obj) {
1355 struct test_spec *spec = &specs[i++];
1356
1357 if (!spec->valid || spec->auxiliary)
1358 continue;
1359
1360 if (spec->mode_mask & PRIV)
1361 run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
1362 specs, spec, false);
1363 if (spec->mode_mask & UNPRIV)
1364 run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
1365 specs, spec, true);
1366
1367 }
1368
1369 for (i = 0; i < nr_progs; ++i)
1370 free_test_spec(&specs[i]);
1371 free(specs);
1372 bpf_object__close(obj);
1373}
1374
1375void test_loader__run_subtests(struct test_loader *tester,
1376 const char *skel_name,
1377 skel_elf_bytes_fn elf_bytes_factory)
1378{
1379 /* see comment in run_subtest() for why we do this function nesting */
1380 process_subtest(tester, skel_name, elf_bytes_factory);
1381}