Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add test for bpf array map iterators

Two subtests are added.
$ ./test_progs -n 4
...
#4/20 bpf_array_map:OK
#4/21 bpf_percpu_array_map:OK
...

The bpf_array_map subtest also tested bpf program
changing array element values and send key/value
to user space through bpf_seq_write() interface.

Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200723184121.591367-1-yhs@fb.com

authored by

Yonghong Song and committed by
Alexei Starovoitov
60dd49ea 2a7c2fff

+247
+161
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
··· 17 17 #include "bpf_iter_test_kern4.skel.h" 18 18 #include "bpf_iter_bpf_hash_map.skel.h" 19 19 #include "bpf_iter_bpf_percpu_hash_map.skel.h" 20 + #include "bpf_iter_bpf_array_map.skel.h" 21 + #include "bpf_iter_bpf_percpu_array_map.skel.h" 20 22 21 23 static int duration; 22 24 ··· 640 638 bpf_iter_bpf_percpu_hash_map__destroy(skel); 641 639 } 642 640 641 + static void test_bpf_array_map(void) 642 + { 643 + __u64 val, expected_val = 0, res_first_val, first_val = 0; 644 + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 645 + __u32 expected_key = 0, res_first_key; 646 + struct bpf_iter_bpf_array_map *skel; 647 + int err, i, map_fd, iter_fd; 648 + struct bpf_link *link; 649 + char buf[64] = {}; 650 + int len, start; 651 + 652 + skel = bpf_iter_bpf_array_map__open_and_load(); 653 + if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load", 654 + "skeleton open_and_load failed\n")) 655 + return; 656 + 657 + map_fd = bpf_map__fd(skel->maps.arraymap1); 658 + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 659 + val = i + 4; 660 + expected_key += i; 661 + expected_val += val; 662 + 663 + if (i == 0) 664 + first_val = val; 665 + 666 + err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); 667 + if (CHECK(err, "map_update", "map_update failed\n")) 668 + goto out; 669 + } 670 + 671 + opts.map_fd = map_fd; 672 + link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); 673 + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 674 + goto out; 675 + 676 + iter_fd = bpf_iter_create(bpf_link__fd(link)); 677 + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 678 + goto free_link; 679 + 680 + /* do some tests */ 681 + start = 0; 682 + while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) 683 + start += len; 684 + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 685 + goto close_iter; 686 + 687 + /* test results */ 688 + res_first_key = *(__u32 *)buf; 689 + res_first_val = *(__u64 *)(buf + sizeof(__u32)); 690 + if (CHECK(res_first_key != 0 || res_first_val != first_val, 691 + "bpf_seq_write", 692 + "seq_write failure: first key %u vs expected 0, " 693 + " first value %llu vs expected %llu\n", 694 + res_first_key, res_first_val, first_val)) 695 + goto close_iter; 696 + 697 + if (CHECK(skel->bss->key_sum != expected_key, 698 + "key_sum", "got %u expected %u\n", 699 + skel->bss->key_sum, expected_key)) 700 + goto close_iter; 701 + if (CHECK(skel->bss->val_sum != expected_val, 702 + "val_sum", "got %llu expected %llu\n", 703 + skel->bss->val_sum, expected_val)) 704 + goto close_iter; 705 + 706 + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 707 + err = bpf_map_lookup_elem(map_fd, &i, &val); 708 + if (CHECK(err, "map_lookup", "map_lookup failed\n")) 709 + goto out; 710 + if (CHECK(i != val, "invalid_val", 711 + "got value %llu expected %u\n", val, i)) 712 + goto out; 713 + } 714 + 715 + close_iter: 716 + close(iter_fd); 717 + free_link: 718 + bpf_link__destroy(link); 719 + out: 720 + bpf_iter_bpf_array_map__destroy(skel); 721 + } 722 + 723 + static void test_bpf_percpu_array_map(void) 724 + { 725 + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 726 + struct bpf_iter_bpf_percpu_array_map *skel; 727 + __u32 expected_key = 0, expected_val = 0; 728 + int err, i, j, map_fd, iter_fd; 729 + struct bpf_link *link; 730 + char buf[64]; 731 + void *val; 732 + int len; 733 + 734 + val = malloc(8 * bpf_num_possible_cpus()); 735 + 736 + skel = bpf_iter_bpf_percpu_array_map__open(); 737 + if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", 738 + "skeleton open failed\n")) 739 + return; 740 + 741 + skel->rodata->num_cpus = bpf_num_possible_cpus(); 742 + 743 + err = bpf_iter_bpf_percpu_array_map__load(skel); 744 + if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", 745 + "skeleton load failed\n")) 746 + goto out; 747 + 748 + /* update map values here */ 749 + map_fd = bpf_map__fd(skel->maps.arraymap1); 750 + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 751 + expected_key += i; 752 + 753 + for (j = 0; j < bpf_num_possible_cpus(); j++) { 754 + *(__u32 *)(val + j * 8) = i + j; 755 + expected_val += i + j; 756 + } 757 + 758 + err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); 759 + if (CHECK(err, "map_update", "map_update failed\n")) 760 + goto out; 761 + } 762 + 763 + opts.map_fd = map_fd; 764 + link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); 765 + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 766 + goto out; 767 + 768 + iter_fd = bpf_iter_create(bpf_link__fd(link)); 769 + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 770 + goto free_link; 771 + 772 + /* do some tests */ 773 + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 774 + ; 775 + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 776 + goto close_iter; 777 + 778 + /* test results */ 779 + if (CHECK(skel->bss->key_sum != expected_key, 780 + "key_sum", "got %u expected %u\n", 781 + skel->bss->key_sum, expected_key)) 782 + goto close_iter; 783 + if (CHECK(skel->bss->val_sum != expected_val, 784 + "val_sum", "got %u expected %u\n", 785 + skel->bss->val_sum, expected_val)) 786 + goto close_iter; 787 + 788 + close_iter: 789 + close(iter_fd); 790 + free_link: 791 + bpf_link__destroy(link); 792 + out: 793 + bpf_iter_bpf_percpu_array_map__destroy(skel); 794 + } 795 + 643 796 void test_bpf_iter(void) 644 797 { 645 798 if (test__start_subtest("btf_id_or_null")) ··· 835 678 test_bpf_hash_map(); 836 679 if (test__start_subtest("bpf_percpu_hash_map")) 837 680 test_bpf_percpu_hash_map(); 681 + if (test__start_subtest("bpf_array_map")) 682 + test_bpf_array_map(); 683 + if (test__start_subtest("bpf_percpu_array_map")) 684 + test_bpf_percpu_array_map(); 838 685 }
+40
tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #include "bpf_iter.h" 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + 7 + char _license[] SEC("license") = "GPL"; 8 + 9 + struct key_t { 10 + int a; 11 + int b; 12 + int c; 13 + }; 14 + 15 + struct { 16 + __uint(type, BPF_MAP_TYPE_ARRAY); 17 + __uint(max_entries, 3); 18 + __type(key, __u32); 19 + __type(value, __u64); 20 + } arraymap1 SEC(".maps"); 21 + 22 + __u32 key_sum = 0; 23 + __u64 val_sum = 0; 24 + 25 + SEC("iter/bpf_map_elem") 26 + int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx) 27 + { 28 + __u32 *key = ctx->key; 29 + __u64 *val = ctx->value; 30 + 31 + if (key == (void *)0 || val == (void *)0) 32 + return 0; 33 + 34 + bpf_seq_write(ctx->meta->seq, key, sizeof(__u32)); 35 + bpf_seq_write(ctx->meta->seq, val, sizeof(__u64)); 36 + key_sum += *key; 37 + val_sum += *val; 38 + *val = *key; 39 + return 0; 40 + }
+46
tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #include "bpf_iter.h" 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + 7 + char _license[] SEC("license") = "GPL"; 8 + 9 + struct key_t { 10 + int a; 11 + int b; 12 + int c; 13 + }; 14 + 15 + struct { 16 + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 17 + __uint(max_entries, 3); 18 + __type(key, __u32); 19 + __type(value, __u32); 20 + } arraymap1 SEC(".maps"); 21 + 22 + /* will set before prog run */ 23 + volatile const __u32 num_cpus = 0; 24 + 25 + __u32 key_sum = 0, val_sum = 0; 26 + 27 + SEC("iter/bpf_map_elem") 28 + int dump_bpf_percpu_array_map(struct bpf_iter__bpf_map_elem *ctx) 29 + { 30 + __u32 *key = ctx->key; 31 + void *pptr = ctx->value; 32 + __u32 step; 33 + int i; 34 + 35 + if (key == (void *)0 || pptr == (void *)0) 36 + return 0; 37 + 38 + key_sum += *key; 39 + 40 + step = 8; 41 + for (i = 0; i < num_cpus; i++) { 42 + val_sum += *(__u32 *)pptr; 43 + pptr += step; 44 + } 45 + return 0; 46 + }