Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libbpf: Improve BPF_PROG2 macro code quality and description

Commit 34586d29f8df ("libbpf: Add new BPF_PROG2 macro") added BPF_PROG2
macro for trampoline based programs with struct arguments. Andrii
made a few suggestions to improve code quality and description.
This patch implemented these suggestions including better internal
macro name, consistent usage pattern for __builtin_choose_expr(),
simpler macro definition for always-inline func arguments and
better macro description.

Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/bpf/20220910025214.1536510-1-yhs@fb.com

authored by

Yonghong Song and committed by
Andrii Nakryiko
9f2f5d78 c12a0376

+90 -62
+90 -62
tools/lib/bpf/bpf_tracing.h
··· 438 438 static __always_inline typeof(name(0)) \ 439 439 ____##name(unsigned long long *ctx, ##args) 440 440 441 - #ifndef ____bpf_nth 442 - #define ____bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N 441 + #ifndef ___bpf_nth2 442 + #define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, \ 443 + _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N 443 444 #endif 444 - #ifndef ____bpf_narg 445 - #define ____bpf_narg(...) ____bpf_nth(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0) 445 + #ifndef ___bpf_narg2 446 + #define ___bpf_narg2(...) \ 447 + ___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, \ 448 + 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0) 446 449 #endif 447 450 448 - #define BPF_REG_CNT(t) \ 449 - (__builtin_choose_expr(sizeof(t) == 1 || sizeof(t) == 2 || sizeof(t) == 4 || sizeof(t) == 8, 1, \ 450 - __builtin_choose_expr(sizeof(t) == 16, 2, \ 451 - (void)0))) 452 - 453 - #define ____bpf_reg_cnt0() (0) 454 - #define ____bpf_reg_cnt1(t, x) (____bpf_reg_cnt0() + BPF_REG_CNT(t)) 455 - #define ____bpf_reg_cnt2(t, x, args...) (____bpf_reg_cnt1(args) + BPF_REG_CNT(t)) 456 - #define ____bpf_reg_cnt3(t, x, args...) (____bpf_reg_cnt2(args) + BPF_REG_CNT(t)) 457 - #define ____bpf_reg_cnt4(t, x, args...) (____bpf_reg_cnt3(args) + BPF_REG_CNT(t)) 458 - #define ____bpf_reg_cnt5(t, x, args...) (____bpf_reg_cnt4(args) + BPF_REG_CNT(t)) 459 - #define ____bpf_reg_cnt6(t, x, args...) (____bpf_reg_cnt5(args) + BPF_REG_CNT(t)) 460 - #define ____bpf_reg_cnt7(t, x, args...) (____bpf_reg_cnt6(args) + BPF_REG_CNT(t)) 461 - #define ____bpf_reg_cnt8(t, x, args...) (____bpf_reg_cnt7(args) + BPF_REG_CNT(t)) 462 - #define ____bpf_reg_cnt9(t, x, args...) (____bpf_reg_cnt8(args) + BPF_REG_CNT(t)) 463 - #define ____bpf_reg_cnt10(t, x, args...) (____bpf_reg_cnt9(args) + BPF_REG_CNT(t)) 464 - #define ____bpf_reg_cnt11(t, x, args...) (____bpf_reg_cnt10(args) + BPF_REG_CNT(t)) 465 - #define ____bpf_reg_cnt12(t, x, args...) (____bpf_reg_cnt11(args) + BPF_REG_CNT(t)) 466 - #define ____bpf_reg_cnt(args...) ___bpf_apply(____bpf_reg_cnt, ____bpf_narg(args))(args) 467 - 468 - #define ____bpf_union_arg(t, x, n) \ 469 - __builtin_choose_expr(sizeof(t) == 1, ({ union { struct { __u8 x; } ___z; t x; } ___tmp = { .___z = {ctx[n]}}; ___tmp.x; }), \ 470 - __builtin_choose_expr(sizeof(t) == 2, ({ union { struct { __u16 x; } ___z; t x; } ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \ 471 - __builtin_choose_expr(sizeof(t) == 4, ({ union { struct { __u32 x; } ___z; t x; } ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \ 472 - __builtin_choose_expr(sizeof(t) == 8, ({ union { struct { __u64 x; } ___z; t x; } ___tmp = {.___z = {ctx[n]} }; ___tmp.x; }), \ 473 - __builtin_choose_expr(sizeof(t) == 16, ({ union { struct { __u64 x, y; } ___z; t x; } ___tmp = {.___z = {ctx[n], ctx[n + 1]} }; ___tmp.x; }), \ 451 + #define ___bpf_treg_cnt(t) \ 452 + __builtin_choose_expr(sizeof(t) == 1, 1, \ 453 + __builtin_choose_expr(sizeof(t) == 2, 1, \ 454 + __builtin_choose_expr(sizeof(t) == 4, 1, \ 455 + __builtin_choose_expr(sizeof(t) == 8, 1, \ 456 + __builtin_choose_expr(sizeof(t) == 16, 2, \ 474 457 (void)0))))) 475 458 476 - #define ____bpf_ctx_arg0(n, args...) 477 - #define ____bpf_ctx_arg1(n, t, x) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt1(t, x)) 478 - #define ____bpf_ctx_arg2(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt2(t, x, args)) ____bpf_ctx_arg1(n, args) 479 - #define ____bpf_ctx_arg3(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt3(t, x, args)) ____bpf_ctx_arg2(n, args) 480 - #define ____bpf_ctx_arg4(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt4(t, x, args)) ____bpf_ctx_arg3(n, args) 481 - #define ____bpf_ctx_arg5(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt5(t, x, args)) ____bpf_ctx_arg4(n, args) 482 - #define ____bpf_ctx_arg6(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt6(t, x, args)) ____bpf_ctx_arg5(n, args) 483 - #define ____bpf_ctx_arg7(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt7(t, x, args)) ____bpf_ctx_arg6(n, args) 484 - #define ____bpf_ctx_arg8(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt8(t, x, args)) ____bpf_ctx_arg7(n, args) 485 - #define ____bpf_ctx_arg9(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt9(t, x, args)) ____bpf_ctx_arg8(n, args) 486 - #define ____bpf_ctx_arg10(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt10(t, x, args)) ____bpf_ctx_arg9(n, args) 487 - #define ____bpf_ctx_arg11(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt11(t, x, args)) ____bpf_ctx_arg10(n, args) 488 - #define ____bpf_ctx_arg12(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt12(t, x, args)) ____bpf_ctx_arg11(n, args) 489 - #define ____bpf_ctx_arg(n, args...) ___bpf_apply(____bpf_ctx_arg, ____bpf_narg(args))(n, args) 459 + #define ___bpf_reg_cnt0() (0) 460 + #define ___bpf_reg_cnt1(t, x) (___bpf_reg_cnt0() + ___bpf_treg_cnt(t)) 461 + #define ___bpf_reg_cnt2(t, x, args...) (___bpf_reg_cnt1(args) + ___bpf_treg_cnt(t)) 462 + #define ___bpf_reg_cnt3(t, x, args...) (___bpf_reg_cnt2(args) + ___bpf_treg_cnt(t)) 463 + #define ___bpf_reg_cnt4(t, x, args...) (___bpf_reg_cnt3(args) + ___bpf_treg_cnt(t)) 464 + #define ___bpf_reg_cnt5(t, x, args...) (___bpf_reg_cnt4(args) + ___bpf_treg_cnt(t)) 465 + #define ___bpf_reg_cnt6(t, x, args...) (___bpf_reg_cnt5(args) + ___bpf_treg_cnt(t)) 466 + #define ___bpf_reg_cnt7(t, x, args...) (___bpf_reg_cnt6(args) + ___bpf_treg_cnt(t)) 467 + #define ___bpf_reg_cnt8(t, x, args...) (___bpf_reg_cnt7(args) + ___bpf_treg_cnt(t)) 468 + #define ___bpf_reg_cnt9(t, x, args...) (___bpf_reg_cnt8(args) + ___bpf_treg_cnt(t)) 469 + #define ___bpf_reg_cnt10(t, x, args...) (___bpf_reg_cnt9(args) + ___bpf_treg_cnt(t)) 470 + #define ___bpf_reg_cnt11(t, x, args...) (___bpf_reg_cnt10(args) + ___bpf_treg_cnt(t)) 471 + #define ___bpf_reg_cnt12(t, x, args...) (___bpf_reg_cnt11(args) + ___bpf_treg_cnt(t)) 472 + #define ___bpf_reg_cnt(args...) ___bpf_apply(___bpf_reg_cnt, ___bpf_narg2(args))(args) 490 473 491 - #define ____bpf_ctx_decl0() 492 - #define ____bpf_ctx_decl1(t, x) , t x 493 - #define ____bpf_ctx_decl2(t, x, args...) , t x ____bpf_ctx_decl1(args) 494 - #define ____bpf_ctx_decl3(t, x, args...) , t x ____bpf_ctx_decl2(args) 495 - #define ____bpf_ctx_decl4(t, x, args...) , t x ____bpf_ctx_decl3(args) 496 - #define ____bpf_ctx_decl5(t, x, args...) , t x ____bpf_ctx_decl4(args) 497 - #define ____bpf_ctx_decl6(t, x, args...) , t x ____bpf_ctx_decl5(args) 498 - #define ____bpf_ctx_decl7(t, x, args...) , t x ____bpf_ctx_decl6(args) 499 - #define ____bpf_ctx_decl8(t, x, args...) , t x ____bpf_ctx_decl7(args) 500 - #define ____bpf_ctx_decl9(t, x, args...) , t x ____bpf_ctx_decl8(args) 501 - #define ____bpf_ctx_decl10(t, x, args...) , t x ____bpf_ctx_decl9(args) 502 - #define ____bpf_ctx_decl11(t, x, args...) , t x ____bpf_ctx_decl10(args) 503 - #define ____bpf_ctx_decl12(t, x, args...) , t x ____bpf_ctx_decl11(args) 504 - #define ____bpf_ctx_decl(args...) ___bpf_apply(____bpf_ctx_decl, ____bpf_narg(args))(args) 474 + #define ___bpf_union_arg(t, x, n) \ 475 + __builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 z[1]; t x; } ___t = { .z = {ctx[n]}}; ___t.x; }), \ 476 + __builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \ 477 + __builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \ 478 + __builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 z[1]; t x; } ___t = {.z = {ctx[n]} }; ___t.x; }), \ 479 + __builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 z[2]; t x; } ___t = {.z = {ctx[n], ctx[n + 1]} }; ___t.x; }), \ 480 + (void)0))))) 481 + 482 + #define ___bpf_ctx_arg0(n, args...) 483 + #define ___bpf_ctx_arg1(n, t, x) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt1(t, x)) 484 + #define ___bpf_ctx_arg2(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt2(t, x, args)) ___bpf_ctx_arg1(n, args) 485 + #define ___bpf_ctx_arg3(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt3(t, x, args)) ___bpf_ctx_arg2(n, args) 486 + #define ___bpf_ctx_arg4(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt4(t, x, args)) ___bpf_ctx_arg3(n, args) 487 + #define ___bpf_ctx_arg5(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt5(t, x, args)) ___bpf_ctx_arg4(n, args) 488 + #define ___bpf_ctx_arg6(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt6(t, x, args)) ___bpf_ctx_arg5(n, args) 489 + #define ___bpf_ctx_arg7(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt7(t, x, args)) ___bpf_ctx_arg6(n, args) 490 + #define ___bpf_ctx_arg8(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt8(t, x, args)) ___bpf_ctx_arg7(n, args) 491 + #define ___bpf_ctx_arg9(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt9(t, x, args)) ___bpf_ctx_arg8(n, args) 492 + #define ___bpf_ctx_arg10(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt10(t, x, args)) ___bpf_ctx_arg9(n, args) 493 + #define ___bpf_ctx_arg11(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt11(t, x, args)) ___bpf_ctx_arg10(n, args) 494 + #define ___bpf_ctx_arg12(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt12(t, x, args)) ___bpf_ctx_arg11(n, args) 495 + #define ___bpf_ctx_arg(args...) ___bpf_apply(___bpf_ctx_arg, ___bpf_narg2(args))(___bpf_reg_cnt(args), args) 496 + 497 + #define ___bpf_ctx_decl0() 498 + #define ___bpf_ctx_decl1(t, x) , t x 499 + #define ___bpf_ctx_decl2(t, x, args...) , t x ___bpf_ctx_decl1(args) 500 + #define ___bpf_ctx_decl3(t, x, args...) , t x ___bpf_ctx_decl2(args) 501 + #define ___bpf_ctx_decl4(t, x, args...) , t x ___bpf_ctx_decl3(args) 502 + #define ___bpf_ctx_decl5(t, x, args...) , t x ___bpf_ctx_decl4(args) 503 + #define ___bpf_ctx_decl6(t, x, args...) , t x ___bpf_ctx_decl5(args) 504 + #define ___bpf_ctx_decl7(t, x, args...) , t x ___bpf_ctx_decl6(args) 505 + #define ___bpf_ctx_decl8(t, x, args...) , t x ___bpf_ctx_decl7(args) 506 + #define ___bpf_ctx_decl9(t, x, args...) , t x ___bpf_ctx_decl8(args) 507 + #define ___bpf_ctx_decl10(t, x, args...) , t x ___bpf_ctx_decl9(args) 508 + #define ___bpf_ctx_decl11(t, x, args...) , t x ___bpf_ctx_decl10(args) 509 + #define ___bpf_ctx_decl12(t, x, args...) , t x ___bpf_ctx_decl11(args) 510 + #define ___bpf_ctx_decl(args...) ___bpf_apply(___bpf_ctx_decl, ___bpf_narg2(args))(args) 505 511 506 512 /* 507 - * BPF_PROG2 can handle struct arguments. 513 + * BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct 514 + * arguments. Since each struct argument might take one or two u64 values 515 + * in the trampoline stack, argument type size is needed to place proper number 516 + * of u64 values for each argument. Therefore, BPF_PROG2 has different 517 + * syntax from BPF_PROG. For example, for the following BPF_PROG syntax: 518 + * 519 + * int BPF_PROG(test2, int a, int b) { ... } 520 + * 521 + * the corresponding BPF_PROG2 syntax is: 522 + * 523 + * int BPF_PROG2(test2, int, a, int, b) { ... } 524 + * 525 + * where type and the corresponding argument name are separated by comma. 526 + * 527 + * Use BPF_PROG2 macro if one of the arguments might be a struct/union larger 528 + * than 8 bytes: 529 + * 530 + * int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b, 531 + * int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret) 532 + * { 533 + * // access a, b, c, d, e, and ret directly 534 + * ... 535 + * } 508 536 */ 509 537 #define BPF_PROG2(name, args...) \ 510 538 name(unsigned long long *ctx); \ 511 539 static __always_inline typeof(name(0)) \ 512 - ____##name(unsigned long long *ctx ____bpf_ctx_decl(args)); \ 540 + ____##name(unsigned long long *ctx ___bpf_ctx_decl(args)); \ 513 541 typeof(name(0)) name(unsigned long long *ctx) \ 514 542 { \ 515 - return ____##name(ctx ____bpf_ctx_arg(____bpf_reg_cnt(args), args)); \ 543 + return ____##name(ctx ___bpf_ctx_arg(args)); \ 516 544 } \ 517 545 static __always_inline typeof(name(0)) \ 518 - ____##name(unsigned long long *ctx ____bpf_ctx_decl(args)) 546 + ____##name(unsigned long long *ctx ___bpf_ctx_decl(args)) 519 547 520 548 struct pt_regs; 521 549