Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libbpf: Streamline bpf_attr and perf_event_attr initialization

Make sure that entire libbpf code base is initializing bpf_attr and
perf_event_attr with memset(0). Also for bpf_attr make sure we
clear and pass to kernel only relevant parts of bpf_attr. bpf_attr is
a huge union of independent sub-command attributes, so there is no need
to clear and pass entire union bpf_attr, which over time grows quite
a lot and for most commands this growth is completely irrelevant.

Few cases where we were relying on compiler initialization of BPF UAPI
structs (like bpf_prog_info, bpf_map_info, etc) with `= {};` were
switched to memset(0) pattern for future-proofing.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hao Luo <haoluo@google.com>
Link: https://lore.kernel.org/bpf/20220816001929.369487-3-andrii@kernel.org

authored by

Andrii Nakryiko and committed by
Daniel Borkmann
813847a3 d4e6d684

+138 -91
+102 -71
tools/lib/bpf/bpf.c
··· 105 105 */ 106 106 int probe_memcg_account(void) 107 107 { 108 - const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd); 108 + const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd); 109 109 struct bpf_insn insns[] = { 110 110 BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), 111 111 BPF_EXIT_INSN(), ··· 115 115 int prog_fd; 116 116 117 117 /* attempt loading freplace trying to use custom BTF */ 118 - memset(&attr, 0, prog_load_attr_sz); 118 + memset(&attr, 0, attr_sz); 119 119 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 120 120 attr.insns = ptr_to_u64(insns); 121 121 attr.insn_cnt = insn_cnt; 122 122 attr.license = ptr_to_u64("GPL"); 123 123 124 - prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz); 124 + prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz); 125 125 if (prog_fd >= 0) { 126 126 close(prog_fd); 127 127 return 1; ··· 232 232 const struct bpf_insn *insns, size_t insn_cnt, 233 233 const struct bpf_prog_load_opts *opts) 234 234 { 235 + const size_t attr_sz = offsetofend(union bpf_attr, fd_array); 235 236 void *finfo = NULL, *linfo = NULL; 236 237 const char *func_info, *line_info; 237 238 __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; ··· 252 251 if (attempts == 0) 253 252 attempts = PROG_LOAD_ATTEMPTS; 254 253 255 - memset(&attr, 0, sizeof(attr)); 254 + memset(&attr, 0, attr_sz); 256 255 257 256 attr.prog_type = prog_type; 258 257 attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); ··· 315 314 attr.log_level = log_level; 316 315 } 317 316 318 - fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 317 + fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 319 318 if (fd >= 0) 320 319 return fd; 321 320 ··· 355 354 break; 356 355 } 357 356 358 - fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 357 + fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 359 358 if (fd >= 0) 360 359 goto done; 361 360 } ··· 369 368 attr.log_size = log_size; 370 369 attr.log_level = 1; 371 370 372 - fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); 371 + fd = sys_bpf_prog_load(&attr, attr_sz, attempts); 373 372 } 374 373 done: 375 374 /* free() doesn't affect errno, so we don't need to restore it */ ··· 381 380 int bpf_map_update_elem(int fd, const void *key, const void *value, 382 381 __u64 flags) 383 382 { 383 + const size_t attr_sz = offsetofend(union bpf_attr, flags); 384 384 union bpf_attr attr; 385 385 int ret; 386 386 387 - memset(&attr, 0, sizeof(attr)); 387 + memset(&attr, 0, attr_sz); 388 388 attr.map_fd = fd; 389 389 attr.key = ptr_to_u64(key); 390 390 attr.value = ptr_to_u64(value); 391 391 attr.flags = flags; 392 392 393 - ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 393 + ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); 394 394 return libbpf_err_errno(ret); 395 395 } 396 396 397 397 int bpf_map_lookup_elem(int fd, const void *key, void *value) 398 398 { 399 + const size_t attr_sz = offsetofend(union bpf_attr, flags); 399 400 union bpf_attr attr; 400 401 int ret; 401 402 402 - memset(&attr, 0, sizeof(attr)); 403 + memset(&attr, 0, attr_sz); 403 404 attr.map_fd = fd; 404 405 attr.key = ptr_to_u64(key); 405 406 attr.value = ptr_to_u64(value); 406 407 407 - ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 408 + ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); 408 409 return libbpf_err_errno(ret); 409 410 } 410 411 411 412 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) 412 413 { 414 + const size_t attr_sz = offsetofend(union bpf_attr, flags); 413 415 union bpf_attr attr; 414 416 int ret; 415 417 416 - memset(&attr, 0, sizeof(attr)); 418 + memset(&attr, 0, attr_sz); 417 419 attr.map_fd = fd; 418 420 attr.key = ptr_to_u64(key); 419 421 attr.value = ptr_to_u64(value); 420 422 attr.flags = flags; 421 423 422 - ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 424 + ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); 423 425 return libbpf_err_errno(ret); 424 426 } 425 427 426 428 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) 427 429 { 430 + const size_t attr_sz = offsetofend(union bpf_attr, flags); 428 431 union bpf_attr attr; 429 432 int ret; 430 433 431 - memset(&attr, 0, sizeof(attr)); 434 + memset(&attr, 0, attr_sz); 432 435 attr.map_fd = fd; 433 436 attr.key = ptr_to_u64(key); 434 437 attr.value = ptr_to_u64(value); 435 438 436 - ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); 439 + ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); 437 440 return libbpf_err_errno(ret); 438 441 } 439 442 440 443 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) 441 444 { 445 + const size_t attr_sz = offsetofend(union bpf_attr, flags); 442 446 union bpf_attr attr; 443 447 int ret; 444 448 445 - memset(&attr, 0, sizeof(attr)); 449 + memset(&attr, 0, attr_sz); 446 450 attr.map_fd = fd; 447 451 attr.key = ptr_to_u64(key); 448 452 attr.value = ptr_to_u64(value); 449 453 attr.flags = flags; 450 454 451 - ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); 455 + ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); 452 456 return libbpf_err_errno(ret); 453 457 } 454 458 455 459 int bpf_map_delete_elem(int fd, const void *key) 456 460 { 461 + const size_t attr_sz = offsetofend(union bpf_attr, flags); 457 462 union bpf_attr attr; 458 463 int ret; 459 464 460 - memset(&attr, 0, sizeof(attr)); 465 + memset(&attr, 0, attr_sz); 461 466 attr.map_fd = fd; 462 467 attr.key = ptr_to_u64(key); 463 468 464 - ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 469 + ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); 465 470 return libbpf_err_errno(ret); 466 471 } 467 472 468 473 int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags) 469 474 { 475 + const size_t attr_sz = offsetofend(union bpf_attr, flags); 470 476 union bpf_attr attr; 471 477 int ret; 472 478 473 - memset(&attr, 0, sizeof(attr)); 479 + memset(&attr, 0, attr_sz); 474 480 attr.map_fd = fd; 475 481 attr.key = ptr_to_u64(key); 476 482 attr.flags = flags; 477 483 478 - ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 484 + ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); 479 485 return libbpf_err_errno(ret); 480 486 } 481 487 482 488 int bpf_map_get_next_key(int fd, const void *key, void *next_key) 483 489 { 490 + const size_t attr_sz = offsetofend(union bpf_attr, next_key); 484 491 union bpf_attr attr; 485 492 int ret; 486 493 487 - memset(&attr, 0, sizeof(attr)); 494 + memset(&attr, 0, attr_sz); 488 495 attr.map_fd = fd; 489 496 attr.key = ptr_to_u64(key); 490 497 attr.next_key = ptr_to_u64(next_key); 491 498 492 - ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); 499 + ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz); 493 500 return libbpf_err_errno(ret); 494 501 } 495 502 496 503 int bpf_map_freeze(int fd) 497 504 { 505 + const size_t attr_sz = offsetofend(union bpf_attr, map_fd); 498 506 union bpf_attr attr; 499 507 int ret; 500 508 501 - memset(&attr, 0, sizeof(attr)); 509 + memset(&attr, 0, attr_sz); 502 510 attr.map_fd = fd; 503 511 504 - ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr)); 512 + ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz); 505 513 return libbpf_err_errno(ret); 506 514 } 507 515 ··· 519 509 __u32 *count, 520 510 const struct bpf_map_batch_opts *opts) 521 511 { 512 + const size_t attr_sz = offsetofend(union bpf_attr, batch); 522 513 union bpf_attr attr; 523 514 int ret; 524 515 525 516 if (!OPTS_VALID(opts, bpf_map_batch_opts)) 526 517 return libbpf_err(-EINVAL); 527 518 528 - memset(&attr, 0, sizeof(attr)); 519 + memset(&attr, 0, attr_sz); 529 520 attr.batch.map_fd = fd; 530 521 attr.batch.in_batch = ptr_to_u64(in_batch); 531 522 attr.batch.out_batch = ptr_to_u64(out_batch); ··· 536 525 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); 537 526 attr.batch.flags = OPTS_GET(opts, flags, 0); 538 527 539 - ret = sys_bpf(cmd, &attr, sizeof(attr)); 528 + ret = sys_bpf(cmd, &attr, attr_sz); 540 529 *count = attr.batch.count; 541 530 542 531 return libbpf_err_errno(ret); ··· 575 564 576 565 int bpf_obj_pin(int fd, const char *pathname) 577 566 { 567 + const size_t attr_sz = offsetofend(union bpf_attr, file_flags); 578 568 union bpf_attr attr; 579 569 int ret; 580 570 581 - memset(&attr, 0, sizeof(attr)); 571 + memset(&attr, 0, attr_sz); 582 572 attr.pathname = ptr_to_u64((void *)pathname); 583 573 attr.bpf_fd = fd; 584 574 585 - ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); 575 + ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz); 586 576 return libbpf_err_errno(ret); 587 577 } 588 578 ··· 594 582 595 583 int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts) 596 584 { 585 + const size_t attr_sz = offsetofend(union bpf_attr, file_flags); 597 586 union bpf_attr attr; 598 587 int fd; 599 588 600 589 if (!OPTS_VALID(opts, bpf_obj_get_opts)) 601 590 return libbpf_err(-EINVAL); 602 591 603 - memset(&attr, 0, sizeof(attr)); 592 + memset(&attr, 0, attr_sz); 604 593 attr.pathname = ptr_to_u64((void *)pathname); 605 594 attr.file_flags = OPTS_GET(opts, file_flags, 0); 606 595 607 - fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr)); 596 + fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz); 608 597 return libbpf_err_errno(fd); 609 598 } 610 599 ··· 623 610 enum bpf_attach_type type, 624 611 const struct bpf_prog_attach_opts *opts) 625 612 { 613 + const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd); 626 614 union bpf_attr attr; 627 615 int ret; 628 616 629 617 if (!OPTS_VALID(opts, bpf_prog_attach_opts)) 630 618 return libbpf_err(-EINVAL); 631 619 632 - memset(&attr, 0, sizeof(attr)); 620 + memset(&attr, 0, attr_sz); 633 621 attr.target_fd = target_fd; 634 622 attr.attach_bpf_fd = prog_fd; 635 623 attr.attach_type = type; 636 624 attr.attach_flags = OPTS_GET(opts, flags, 0); 637 625 attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0); 638 626 639 - ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); 627 + ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz); 640 628 return libbpf_err_errno(ret); 641 629 } 642 630 ··· 648 634 649 635 int bpf_prog_detach(int target_fd, enum bpf_attach_type type) 650 636 { 637 + const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd); 651 638 union bpf_attr attr; 652 639 int ret; 653 640 654 - memset(&attr, 0, sizeof(attr)); 641 + memset(&attr, 0, attr_sz); 655 642 attr.target_fd = target_fd; 656 643 attr.attach_type = type; 657 644 658 - ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 645 + ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz); 659 646 return libbpf_err_errno(ret); 660 647 } 661 648 662 649 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) 663 650 { 651 + const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd); 664 652 union bpf_attr attr; 665 653 int ret; 666 654 667 - memset(&attr, 0, sizeof(attr)); 655 + memset(&attr, 0, attr_sz); 668 656 attr.target_fd = target_fd; 669 657 attr.attach_bpf_fd = prog_fd; 670 658 attr.attach_type = type; 671 659 672 - ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 660 + ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz); 673 661 return libbpf_err_errno(ret); 674 662 } 675 663 ··· 679 663 enum bpf_attach_type attach_type, 680 664 const struct bpf_link_create_opts *opts) 681 665 { 666 + const size_t attr_sz = offsetofend(union bpf_attr, link_create); 682 667 __u32 target_btf_id, iter_info_len; 683 668 union bpf_attr attr; 684 669 int fd, err; ··· 698 681 return libbpf_err(-EINVAL); 699 682 } 700 683 701 - memset(&attr, 0, sizeof(attr)); 684 + memset(&attr, 0, attr_sz); 702 685 attr.link_create.prog_fd = prog_fd; 703 686 attr.link_create.target_fd = target_fd; 704 687 attr.link_create.attach_type = attach_type; ··· 742 725 break; 743 726 } 744 727 proceed: 745 - fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr)); 728 + fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz); 746 729 if (fd >= 0) 747 730 return fd; 748 731 /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry ··· 778 761 779 762 int bpf_link_detach(int link_fd) 780 763 { 764 + const size_t attr_sz = offsetofend(union bpf_attr, link_detach); 781 765 union bpf_attr attr; 782 766 int ret; 783 767 784 - memset(&attr, 0, sizeof(attr)); 768 + memset(&attr, 0, attr_sz); 785 769 attr.link_detach.link_fd = link_fd; 786 770 787 - ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr)); 771 + ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz); 788 772 return libbpf_err_errno(ret); 789 773 } 790 774 791 775 int bpf_link_update(int link_fd, int new_prog_fd, 792 776 const struct bpf_link_update_opts *opts) 793 777 { 778 + const size_t attr_sz = offsetofend(union bpf_attr, link_update); 794 779 union bpf_attr attr; 795 780 int ret; 796 781 797 782 if (!OPTS_VALID(opts, bpf_link_update_opts)) 798 783 return libbpf_err(-EINVAL); 799 784 800 - memset(&attr, 0, sizeof(attr)); 785 + memset(&attr, 0, attr_sz); 801 786 attr.link_update.link_fd = link_fd; 802 787 attr.link_update.new_prog_fd = new_prog_fd; 803 788 attr.link_update.flags = OPTS_GET(opts, flags, 0); 804 789 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); 805 790 806 - ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr)); 791 + ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz); 807 792 return libbpf_err_errno(ret); 808 793 } 809 794 810 795 int bpf_iter_create(int link_fd) 811 796 { 797 + const size_t attr_sz = offsetofend(union bpf_attr, iter_create); 812 798 union bpf_attr attr; 813 799 int fd; 814 800 815 - memset(&attr, 0, sizeof(attr)); 801 + memset(&attr, 0, attr_sz); 816 802 attr.iter_create.link_fd = link_fd; 817 803 818 - fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr)); 804 + fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz); 819 805 return libbpf_err_errno(fd); 820 806 } 821 807 ··· 826 806 enum bpf_attach_type type, 827 807 struct bpf_prog_query_opts *opts) 828 808 { 809 + const size_t attr_sz = offsetofend(union bpf_attr, query); 829 810 union bpf_attr attr; 830 811 int ret; 831 812 832 813 if (!OPTS_VALID(opts, bpf_prog_query_opts)) 833 814 return libbpf_err(-EINVAL); 834 815 835 - memset(&attr, 0, sizeof(attr)); 816 + memset(&attr, 0, attr_sz); 836 817 837 818 attr.query.target_fd = target_fd; 838 819 attr.query.attach_type = type; ··· 842 821 attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL)); 843 822 attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL)); 844 823 845 - ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); 824 + ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz); 846 825 847 826 OPTS_SET(opts, attach_flags, attr.query.attach_flags); 848 827 OPTS_SET(opts, prog_cnt, attr.query.prog_cnt); ··· 871 850 872 851 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) 873 852 { 853 + const size_t attr_sz = offsetofend(union bpf_attr, test); 874 854 union bpf_attr attr; 875 855 int ret; 876 856 877 857 if (!OPTS_VALID(opts, bpf_test_run_opts)) 878 858 return libbpf_err(-EINVAL); 879 859 880 - memset(&attr, 0, sizeof(attr)); 860 + memset(&attr, 0, attr_sz); 881 861 attr.test.prog_fd = prog_fd; 882 862 attr.test.batch_size = OPTS_GET(opts, batch_size, 0); 883 863 attr.test.cpu = OPTS_GET(opts, cpu, 0); ··· 894 872 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); 895 873 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); 896 874 897 - ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 875 + ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz); 898 876 899 877 OPTS_SET(opts, data_size_out, attr.test.data_size_out); 900 878 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); ··· 906 884 907 885 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) 908 886 { 887 + const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 909 888 union bpf_attr attr; 910 889 int err; 911 890 912 - memset(&attr, 0, sizeof(attr)); 891 + memset(&attr, 0, attr_sz); 913 892 attr.start_id = start_id; 914 893 915 - err = sys_bpf(cmd, &attr, sizeof(attr)); 894 + err = sys_bpf(cmd, &attr, attr_sz); 916 895 if (!err) 917 896 *next_id = attr.next_id; 918 897 ··· 942 919 943 920 int bpf_prog_get_fd_by_id(__u32 id) 944 921 { 922 + const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 945 923 union bpf_attr attr; 946 924 int fd; 947 925 948 - memset(&attr, 0, sizeof(attr)); 926 + memset(&attr, 0, attr_sz); 949 927 attr.prog_id = id; 950 928 951 - fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); 929 + fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz); 952 930 return libbpf_err_errno(fd); 953 931 } 954 932 955 933 int bpf_map_get_fd_by_id(__u32 id) 956 934 { 935 + const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 957 936 union bpf_attr attr; 958 937 int fd; 959 938 960 - memset(&attr, 0, sizeof(attr)); 939 + memset(&attr, 0, attr_sz); 961 940 attr.map_id = id; 962 941 963 - fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); 942 + fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz); 964 943 return libbpf_err_errno(fd); 965 944 } 966 945 967 946 int bpf_btf_get_fd_by_id(__u32 id) 968 947 { 948 + const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 969 949 union bpf_attr attr; 970 950 int fd; 971 951 972 - memset(&attr, 0, sizeof(attr)); 952 + memset(&attr, 0, attr_sz); 973 953 attr.btf_id = id; 974 954 975 - fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); 955 + fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz); 976 956 return libbpf_err_errno(fd); 977 957 } 978 958 979 959 int bpf_link_get_fd_by_id(__u32 id) 980 960 { 961 + const size_t attr_sz = offsetofend(union bpf_attr, open_flags); 981 962 union bpf_attr attr; 982 963 int fd; 983 964 984 - memset(&attr, 0, sizeof(attr)); 965 + memset(&attr, 0, attr_sz); 985 966 attr.link_id = id; 986 967 987 - fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr)); 968 + fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz); 988 969 return libbpf_err_errno(fd); 989 970 } 990 971 991 972 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) 992 973 { 974 + const size_t attr_sz = offsetofend(union bpf_attr, info); 993 975 union bpf_attr attr; 994 976 int err; 995 977 996 - memset(&attr, 0, sizeof(attr)); 978 + memset(&attr, 0, attr_sz); 997 979 attr.info.bpf_fd = bpf_fd; 998 980 attr.info.info_len = *info_len; 999 981 attr.info.info = ptr_to_u64(info); 1000 982 1001 - err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); 1002 - 983 + err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz); 1003 984 if (!err) 1004 985 *info_len = attr.info.info_len; 1005 - 1006 986 return libbpf_err_errno(err); 1007 987 } 1008 988 1009 989 int bpf_raw_tracepoint_open(const char *name, int prog_fd) 1010 990 { 991 + const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint); 1011 992 union bpf_attr attr; 1012 993 int fd; 1013 994 1014 - memset(&attr, 0, sizeof(attr)); 995 + memset(&attr, 0, attr_sz); 1015 996 attr.raw_tracepoint.name = ptr_to_u64(name); 1016 997 attr.raw_tracepoint.prog_fd = prog_fd; 1017 998 1018 - fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); 999 + fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz); 1019 1000 return libbpf_err_errno(fd); 1020 1001 } 1021 1002 ··· 1075 1048 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, 1076 1049 __u64 *probe_addr) 1077 1050 { 1078 - union bpf_attr attr = {}; 1051 + const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query); 1052 + union bpf_attr attr; 1079 1053 int err; 1080 1054 1055 + memset(&attr, 0, attr_sz); 1081 1056 attr.task_fd_query.pid = pid; 1082 1057 attr.task_fd_query.fd = fd; 1083 1058 attr.task_fd_query.flags = flags; 1084 1059 attr.task_fd_query.buf = ptr_to_u64(buf); 1085 1060 attr.task_fd_query.buf_len = *buf_len; 1086 1061 1087 - err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr)); 1062 + err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz); 1088 1063 1089 1064 *buf_len = attr.task_fd_query.buf_len; 1090 1065 *prog_id = attr.task_fd_query.prog_id; ··· 1099 1070 1100 1071 int bpf_enable_stats(enum bpf_stats_type type) 1101 1072 { 1073 + const size_t attr_sz = offsetofend(union bpf_attr, enable_stats); 1102 1074 union bpf_attr attr; 1103 1075 int fd; 1104 1076 1105 - memset(&attr, 0, sizeof(attr)); 1077 + memset(&attr, 0, attr_sz); 1106 1078 attr.enable_stats.type = type; 1107 1079 1108 - fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr)); 1080 + fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz); 1109 1081 return libbpf_err_errno(fd); 1110 1082 } 1111 1083 1112 1084 int bpf_prog_bind_map(int prog_fd, int map_fd, 1113 1085 const struct bpf_prog_bind_opts *opts) 1114 1086 { 1087 + const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map); 1115 1088 union bpf_attr attr; 1116 1089 int ret; 1117 1090 1118 1091 if (!OPTS_VALID(opts, bpf_prog_bind_opts)) 1119 1092 return libbpf_err(-EINVAL); 1120 1093 1121 - memset(&attr, 0, sizeof(attr)); 1094 + memset(&attr, 0, attr_sz); 1122 1095 attr.prog_bind_map.prog_fd = prog_fd; 1123 1096 attr.prog_bind_map.map_fd = map_fd; 1124 1097 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); 1125 1098 1126 - ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr)); 1099 + ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz); 1127 1100 return libbpf_err_errno(ret); 1128 1101 }
+28 -15
tools/lib/bpf/libbpf.c
··· 4287 4287 4288 4288 int bpf_map__reuse_fd(struct bpf_map *map, int fd) 4289 4289 { 4290 - struct bpf_map_info info = {}; 4290 + struct bpf_map_info info; 4291 4291 __u32 len = sizeof(info), name_len; 4292 4292 int new_fd, err; 4293 4293 char *new_name; 4294 4294 4295 + memset(&info, 0, len); 4295 4296 err = bpf_obj_get_info_by_fd(fd, &info, &len); 4296 4297 if (err && errno == EINVAL) 4297 4298 err = bpf_get_map_info_from_fdinfo(fd, &info); ··· 4834 4833 4835 4834 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) 4836 4835 { 4837 - struct bpf_map_info map_info = {}; 4836 + struct bpf_map_info map_info; 4838 4837 char msg[STRERR_BUFSIZE]; 4839 - __u32 map_info_len; 4838 + __u32 map_info_len = sizeof(map_info); 4840 4839 int err; 4841 4840 4842 - map_info_len = sizeof(map_info); 4843 - 4841 + memset(&map_info, 0, map_info_len); 4844 4842 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len); 4845 4843 if (err && errno == EINVAL) 4846 4844 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); ··· 9007 9007 9008 9008 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) 9009 9009 { 9010 - struct bpf_prog_info info = {}; 9010 + struct bpf_prog_info info; 9011 9011 __u32 info_len = sizeof(info); 9012 9012 struct btf *btf; 9013 9013 int err; 9014 9014 9015 + memset(&info, 0, info_len); 9015 9016 err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len); 9016 9017 if (err) { 9017 9018 pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n", ··· 9840 9839 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, 9841 9840 uint64_t offset, int pid, size_t ref_ctr_off) 9842 9841 { 9843 - struct perf_event_attr attr = {}; 9842 + const size_t attr_sz = sizeof(struct perf_event_attr); 9843 + struct perf_event_attr attr; 9844 9844 char errmsg[STRERR_BUFSIZE]; 9845 9845 int type, pfd; 9846 9846 9847 9847 if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS)) 9848 9848 return -EINVAL; 9849 + 9850 + memset(&attr, 0, attr_sz); 9849 9851 9850 9852 type = uprobe ? determine_uprobe_perf_type() 9851 9853 : determine_kprobe_perf_type(); ··· 9870 9866 } 9871 9867 attr.config |= 1 << bit; 9872 9868 } 9873 - attr.size = sizeof(attr); 9869 + attr.size = attr_sz; 9874 9870 attr.type = type; 9875 9871 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT; 9876 9872 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ ··· 9969 9965 static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, 9970 9966 const char *kfunc_name, size_t offset, int pid) 9971 9967 { 9972 - struct perf_event_attr attr = {}; 9968 + const size_t attr_sz = sizeof(struct perf_event_attr); 9969 + struct perf_event_attr attr; 9973 9970 char errmsg[STRERR_BUFSIZE]; 9974 9971 int type, pfd, err; 9975 9972 ··· 9989 9984 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9990 9985 goto err_clean_legacy; 9991 9986 } 9992 - attr.size = sizeof(attr); 9987 + 9988 + memset(&attr, 0, attr_sz); 9989 + attr.size = attr_sz; 9993 9990 attr.config = type; 9994 9991 attr.type = PERF_TYPE_TRACEPOINT; 9995 9992 ··· 10448 10441 static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, 10449 10442 const char *binary_path, size_t offset, int pid) 10450 10443 { 10444 + const size_t attr_sz = sizeof(struct perf_event_attr); 10451 10445 struct perf_event_attr attr; 10452 10446 int type, pfd, err; 10453 10447 ··· 10466 10458 goto err_clean_legacy; 10467 10459 } 10468 10460 10469 - memset(&attr, 0, sizeof(attr)); 10470 - attr.size = sizeof(attr); 10461 + memset(&attr, 0, attr_sz); 10462 + attr.size = attr_sz; 10471 10463 attr.config = type; 10472 10464 attr.type = PERF_TYPE_TRACEPOINT; 10473 10465 ··· 11006 10998 static int perf_event_open_tracepoint(const char *tp_category, 11007 10999 const char *tp_name) 11008 11000 { 11009 - struct perf_event_attr attr = {}; 11001 + const size_t attr_sz = sizeof(struct perf_event_attr); 11002 + struct perf_event_attr attr; 11010 11003 char errmsg[STRERR_BUFSIZE]; 11011 11004 int tp_id, pfd, err; 11012 11005 ··· 11019 11010 return tp_id; 11020 11011 } 11021 11012 11013 + memset(&attr, 0, attr_sz); 11022 11014 attr.type = PERF_TYPE_TRACEPOINT; 11023 - attr.size = sizeof(attr); 11015 + attr.size = attr_sz; 11024 11016 attr.config = tp_id; 11025 11017 11026 11018 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, ··· 11641 11631 void *ctx, 11642 11632 const struct perf_buffer_opts *opts) 11643 11633 { 11634 + const size_t attr_sz = sizeof(struct perf_event_attr); 11644 11635 struct perf_buffer_params p = {}; 11645 - struct perf_event_attr attr = {}; 11636 + struct perf_event_attr attr; 11646 11637 11647 11638 if (!OPTS_VALID(opts, perf_buffer_opts)) 11648 11639 return libbpf_err_ptr(-EINVAL); 11649 11640 11641 + memset(&attr, 0, attr_sz); 11642 + attr.size = attr_sz; 11650 11643 attr.config = PERF_COUNT_SW_BPF_OUTPUT; 11651 11644 attr.type = PERF_TYPE_SOFTWARE; 11652 11645 attr.sample_type = PERF_SAMPLE_RAW;
+2 -1
tools/lib/bpf/netlink.c
··· 587 587 588 588 static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd) 589 589 { 590 - struct bpf_prog_info info = {}; 590 + struct bpf_prog_info info; 591 591 __u32 info_len = sizeof(info); 592 592 char name[256]; 593 593 int len, ret; 594 594 595 + memset(&info, 0, info_len); 595 596 ret = bpf_obj_get_info_by_fd(fd, &info, &info_len); 596 597 if (ret < 0) 597 598 return ret;
+6 -4
tools/lib/bpf/skel_internal.h
··· 285 285 286 286 static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) 287 287 { 288 + const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array); 289 + const size_t test_run_attr_sz = offsetofend(union bpf_attr, test); 288 290 int map_fd = -1, prog_fd = -1, key = 0, err; 289 291 union bpf_attr attr; 290 292 ··· 304 302 goto out; 305 303 } 306 304 307 - memset(&attr, 0, sizeof(attr)); 305 + memset(&attr, 0, prog_load_attr_sz); 308 306 attr.prog_type = BPF_PROG_TYPE_SYSCALL; 309 307 attr.insns = (long) opts->insns; 310 308 attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn); ··· 315 313 attr.log_size = opts->ctx->log_size; 316 314 attr.log_buf = opts->ctx->log_buf; 317 315 attr.prog_flags = BPF_F_SLEEPABLE; 318 - err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 316 + err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, prog_load_attr_sz); 319 317 if (prog_fd < 0) { 320 318 opts->errstr = "failed to load loader prog"; 321 319 set_err; 322 320 goto out; 323 321 } 324 322 325 - memset(&attr, 0, sizeof(attr)); 323 + memset(&attr, 0, test_run_attr_sz); 326 324 attr.test.prog_fd = prog_fd; 327 325 attr.test.ctx_in = (long) opts->ctx; 328 326 attr.test.ctx_size_in = opts->ctx->sz; 329 - err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr)); 327 + err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz); 330 328 if (err < 0 || (int)attr.test.retval < 0) { 331 329 opts->errstr = "failed to execute loader prog"; 332 330 if (err < 0) {