Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Set map_btf_{name, id} for all map types

Set map_btf_name and map_btf_id for all map types so that map fields can
be accessed by bpf programs.

Signed-off-by: Andrey Ignatov <rdna@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/a825f808f22af52b018dbe82f1c7d29dab5fc978.1592600985.git.rdna@fb.com

authored by

Andrey Ignatov and committed by
Daniel Borkmann
2872e9ac 41c48f3a

+72
+15
kernel/bpf/arraymap.c
··· 515 515 .map_btf_id = &array_map_btf_id, 516 516 }; 517 517 518 + static int percpu_array_map_btf_id; 518 519 const struct bpf_map_ops percpu_array_map_ops = { 519 520 .map_alloc_check = array_map_alloc_check, 520 521 .map_alloc = array_map_alloc, ··· 526 525 .map_delete_elem = array_map_delete_elem, 527 526 .map_seq_show_elem = percpu_array_map_seq_show_elem, 528 527 .map_check_btf = array_map_check_btf, 528 + .map_btf_name = "bpf_array", 529 + .map_btf_id = &percpu_array_map_btf_id, 529 530 }; 530 531 531 532 static int fd_array_map_alloc_check(union bpf_attr *attr) ··· 874 871 fd_array_map_free(map); 875 872 } 876 873 874 + static int prog_array_map_btf_id; 877 875 const struct bpf_map_ops prog_array_map_ops = { 878 876 .map_alloc_check = fd_array_map_alloc_check, 879 877 .map_alloc = prog_array_map_alloc, ··· 890 886 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 891 887 .map_release_uref = prog_array_map_clear, 892 888 .map_seq_show_elem = prog_array_map_seq_show_elem, 889 + .map_btf_name = "bpf_array", 890 + .map_btf_id = &prog_array_map_btf_id, 893 891 }; 894 892 895 893 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, ··· 970 964 rcu_read_unlock(); 971 965 } 972 966 967 + static int perf_event_array_map_btf_id; 973 968 const struct bpf_map_ops perf_event_array_map_ops = { 974 969 .map_alloc_check = fd_array_map_alloc_check, 975 970 .map_alloc = array_map_alloc, ··· 982 975 .map_fd_put_ptr = perf_event_fd_array_put_ptr, 983 976 .map_release = perf_event_fd_array_release, 984 977 .map_check_btf = map_check_no_btf, 978 + .map_btf_name = "bpf_array", 979 + .map_btf_id = &perf_event_array_map_btf_id, 985 980 }; 986 981 987 982 #ifdef CONFIG_CGROUPS ··· 1006 997 fd_array_map_free(map); 1007 998 } 1008 999 1000 + static int cgroup_array_map_btf_id; 1009 1001 const struct bpf_map_ops cgroup_array_map_ops = { 1010 1002 .map_alloc_check = fd_array_map_alloc_check, 1011 1003 .map_alloc = array_map_alloc, ··· 1017 1007 .map_fd_get_ptr = cgroup_fd_array_get_ptr, 1018 1008 .map_fd_put_ptr = cgroup_fd_array_put_ptr, 1019 1009 .map_check_btf = map_check_no_btf, 1010 + .map_btf_name = "bpf_array", 1011 + .map_btf_id = &cgroup_array_map_btf_id, 1020 1012 }; 1021 1013 #endif 1022 1014 ··· 1092 1080 return insn - insn_buf; 1093 1081 } 1094 1082 1083 + static int array_of_maps_map_btf_id; 1095 1084 const struct bpf_map_ops array_of_maps_map_ops = { 1096 1085 .map_alloc_check = fd_array_map_alloc_check, 1097 1086 .map_alloc = array_of_map_alloc, ··· 1105 1092 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 1106 1093 .map_gen_lookup = array_of_map_gen_lookup, 1107 1094 .map_check_btf = map_check_no_btf, 1095 + .map_btf_name = "bpf_array", 1096 + .map_btf_id = &array_of_maps_map_btf_id, 1108 1097 };
+3
kernel/bpf/bpf_struct_ops.c
··· 611 611 return map; 612 612 } 613 613 614 + static int bpf_struct_ops_map_btf_id; 614 615 const struct bpf_map_ops bpf_struct_ops_map_ops = { 615 616 .map_alloc_check = bpf_struct_ops_map_alloc_check, 616 617 .map_alloc = bpf_struct_ops_map_alloc, ··· 621 620 .map_delete_elem = bpf_struct_ops_map_delete_elem, 622 621 .map_update_elem = bpf_struct_ops_map_update_elem, 623 622 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem, 623 + .map_btf_name = "bpf_struct_ops_map", 624 + .map_btf_id = &bpf_struct_ops_map_btf_id, 624 625 }; 625 626 626 627 /* "const void *" because some subsystem is
+3
kernel/bpf/cpumap.c
··· 543 543 return 0; 544 544 } 545 545 546 + static int cpu_map_btf_id; 546 547 const struct bpf_map_ops cpu_map_ops = { 547 548 .map_alloc = cpu_map_alloc, 548 549 .map_free = cpu_map_free, ··· 552 551 .map_lookup_elem = cpu_map_lookup_elem, 553 552 .map_get_next_key = cpu_map_get_next_key, 554 553 .map_check_btf = map_check_no_btf, 554 + .map_btf_name = "bpf_cpu_map", 555 + .map_btf_id = &cpu_map_btf_id, 555 556 }; 556 557 557 558 static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
+6
kernel/bpf/devmap.c
··· 747 747 map, key, value, map_flags); 748 748 } 749 749 750 + static int dev_map_btf_id; 750 751 const struct bpf_map_ops dev_map_ops = { 751 752 .map_alloc = dev_map_alloc, 752 753 .map_free = dev_map_free, ··· 756 755 .map_update_elem = dev_map_update_elem, 757 756 .map_delete_elem = dev_map_delete_elem, 758 757 .map_check_btf = map_check_no_btf, 758 + .map_btf_name = "bpf_dtab", 759 + .map_btf_id = &dev_map_btf_id, 759 760 }; 760 761 762 + static int dev_map_hash_map_btf_id; 761 763 const struct bpf_map_ops dev_map_hash_ops = { 762 764 .map_alloc = dev_map_alloc, 763 765 .map_free = dev_map_free, ··· 769 765 .map_update_elem = dev_map_hash_update_elem, 770 766 .map_delete_elem = dev_map_hash_delete_elem, 771 767 .map_check_btf = map_check_no_btf, 768 + .map_btf_name = "bpf_dtab", 769 + .map_btf_id = &dev_map_hash_map_btf_id, 772 770 }; 773 771 774 772 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
+12
kernel/bpf/hashtab.c
··· 1630 1630 .map_btf_id = &htab_map_btf_id, 1631 1631 }; 1632 1632 1633 + static int htab_lru_map_btf_id; 1633 1634 const struct bpf_map_ops htab_lru_map_ops = { 1634 1635 .map_alloc_check = htab_map_alloc_check, 1635 1636 .map_alloc = htab_map_alloc, ··· 1643 1642 .map_gen_lookup = htab_lru_map_gen_lookup, 1644 1643 .map_seq_show_elem = htab_map_seq_show_elem, 1645 1644 BATCH_OPS(htab_lru), 1645 + .map_btf_name = "bpf_htab", 1646 + .map_btf_id = &htab_lru_map_btf_id, 1646 1647 }; 1647 1648 1648 1649 /* Called from eBPF program */ ··· 1749 1746 rcu_read_unlock(); 1750 1747 } 1751 1748 1749 + static int htab_percpu_map_btf_id; 1752 1750 const struct bpf_map_ops htab_percpu_map_ops = { 1753 1751 .map_alloc_check = htab_map_alloc_check, 1754 1752 .map_alloc = htab_map_alloc, ··· 1760 1756 .map_delete_elem = htab_map_delete_elem, 1761 1757 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 1762 1758 BATCH_OPS(htab_percpu), 1759 + .map_btf_name = "bpf_htab", 1760 + .map_btf_id = &htab_percpu_map_btf_id, 1763 1761 }; 1764 1762 1763 + static int htab_lru_percpu_map_btf_id; 1765 1764 const struct bpf_map_ops htab_lru_percpu_map_ops = { 1766 1765 .map_alloc_check = htab_map_alloc_check, 1767 1766 .map_alloc = htab_map_alloc, ··· 1775 1768 .map_delete_elem = htab_lru_map_delete_elem, 1776 1769 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 1777 1770 BATCH_OPS(htab_lru_percpu), 1771 + .map_btf_name = "bpf_htab", 1772 + .map_btf_id = &htab_lru_percpu_map_btf_id, 1778 1773 }; 1779 1774 1780 1775 static int fd_htab_map_alloc_check(union bpf_attr *attr) ··· 1899 1890 fd_htab_map_free(map); 1900 1891 } 1901 1892 1893 + static int htab_of_maps_map_btf_id; 1902 1894 const struct bpf_map_ops htab_of_maps_map_ops = { 1903 1895 .map_alloc_check = fd_htab_map_alloc_check, 1904 1896 .map_alloc = htab_of_map_alloc, ··· 1912 1902 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 1913 1903 .map_gen_lookup = htab_of_map_gen_lookup, 1914 1904 .map_check_btf = map_check_no_btf, 1905 + .map_btf_name = "bpf_htab", 1906 + .map_btf_id = &htab_of_maps_map_btf_id, 1915 1907 };
+3
kernel/bpf/local_storage.c
··· 409 409 rcu_read_unlock(); 410 410 } 411 411 412 + static int cgroup_storage_map_btf_id; 412 413 const struct bpf_map_ops cgroup_storage_map_ops = { 413 414 .map_alloc = cgroup_storage_map_alloc, 414 415 .map_free = cgroup_storage_map_free, ··· 419 418 .map_delete_elem = cgroup_storage_delete_elem, 420 419 .map_check_btf = cgroup_storage_check_btf, 421 420 .map_seq_show_elem = cgroup_storage_seq_show_elem, 421 + .map_btf_name = "bpf_cgroup_storage_map", 422 + .map_btf_id = &cgroup_storage_map_btf_id, 422 423 }; 423 424 424 425 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
+3
kernel/bpf/lpm_trie.c
··· 735 735 -EINVAL : 0; 736 736 } 737 737 738 + static int trie_map_btf_id; 738 739 const struct bpf_map_ops trie_map_ops = { 739 740 .map_alloc = trie_alloc, 740 741 .map_free = trie_free, ··· 744 743 .map_update_elem = trie_update_elem, 745 744 .map_delete_elem = trie_delete_elem, 746 745 .map_check_btf = trie_check_btf, 746 + .map_btf_name = "lpm_trie", 747 + .map_btf_id = &trie_map_btf_id, 747 748 };
+6
kernel/bpf/queue_stack_maps.c
··· 262 262 return -EINVAL; 263 263 } 264 264 265 + static int queue_map_btf_id; 265 266 const struct bpf_map_ops queue_map_ops = { 266 267 .map_alloc_check = queue_stack_map_alloc_check, 267 268 .map_alloc = queue_stack_map_alloc, ··· 274 273 .map_pop_elem = queue_map_pop_elem, 275 274 .map_peek_elem = queue_map_peek_elem, 276 275 .map_get_next_key = queue_stack_map_get_next_key, 276 + .map_btf_name = "bpf_queue_stack", 277 + .map_btf_id = &queue_map_btf_id, 277 278 }; 278 279 280 + static int stack_map_btf_id; 279 281 const struct bpf_map_ops stack_map_ops = { 280 282 .map_alloc_check = queue_stack_map_alloc_check, 281 283 .map_alloc = queue_stack_map_alloc, ··· 290 286 .map_pop_elem = stack_map_pop_elem, 291 287 .map_peek_elem = stack_map_peek_elem, 292 288 .map_get_next_key = queue_stack_map_get_next_key, 289 + .map_btf_name = "bpf_queue_stack", 290 + .map_btf_id = &stack_map_btf_id, 293 291 };
+3
kernel/bpf/reuseport_array.c
··· 345 345 return 0; 346 346 } 347 347 348 + static int reuseport_array_map_btf_id; 348 349 const struct bpf_map_ops reuseport_array_ops = { 349 350 .map_alloc_check = reuseport_array_alloc_check, 350 351 .map_alloc = reuseport_array_alloc, ··· 353 352 .map_lookup_elem = reuseport_array_lookup_elem, 354 353 .map_get_next_key = reuseport_array_get_next_key, 355 354 .map_delete_elem = reuseport_array_delete_elem, 355 + .map_btf_name = "reuseport_array", 356 + .map_btf_id = &reuseport_array_map_btf_id, 356 357 };
+3
kernel/bpf/ringbuf.c
··· 294 294 return 0; 295 295 } 296 296 297 + static int ringbuf_map_btf_id; 297 298 const struct bpf_map_ops ringbuf_map_ops = { 298 299 .map_alloc = ringbuf_map_alloc, 299 300 .map_free = ringbuf_map_free, ··· 304 303 .map_update_elem = ringbuf_map_update_elem, 305 304 .map_delete_elem = ringbuf_map_delete_elem, 306 305 .map_get_next_key = ringbuf_map_get_next_key, 306 + .map_btf_name = "bpf_ringbuf_map", 307 + .map_btf_id = &ringbuf_map_btf_id, 307 308 }; 308 309 309 310 /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
+3
kernel/bpf/stackmap.c
··· 613 613 put_callchain_buffers(); 614 614 } 615 615 616 + static int stack_trace_map_btf_id; 616 617 const struct bpf_map_ops stack_trace_map_ops = { 617 618 .map_alloc = stack_map_alloc, 618 619 .map_free = stack_map_free, ··· 622 621 .map_update_elem = stack_map_update_elem, 623 622 .map_delete_elem = stack_map_delete_elem, 624 623 .map_check_btf = map_check_no_btf, 624 + .map_btf_name = "bpf_stack_map", 625 + .map_btf_id = &stack_trace_map_btf_id, 625 626 }; 626 627 627 628 static int __init stack_map_init(void)
+3
net/core/bpf_sk_storage.c
··· 919 919 return -ENOENT; 920 920 } 921 921 922 + static int sk_storage_map_btf_id; 922 923 const struct bpf_map_ops sk_storage_map_ops = { 923 924 .map_alloc_check = bpf_sk_storage_map_alloc_check, 924 925 .map_alloc = bpf_sk_storage_map_alloc, ··· 929 928 .map_update_elem = bpf_fd_sk_storage_update_elem, 930 929 .map_delete_elem = bpf_fd_sk_storage_delete_elem, 931 930 .map_check_btf = bpf_sk_storage_map_check_btf, 931 + .map_btf_name = "bpf_sk_storage_map", 932 + .map_btf_id = &sk_storage_map_btf_id, 932 933 }; 933 934 934 935 const struct bpf_func_proto bpf_sk_storage_get_proto = {
+6
net/core/sock_map.c
··· 643 643 .arg4_type = ARG_ANYTHING, 644 644 }; 645 645 646 + static int sock_map_btf_id; 646 647 const struct bpf_map_ops sock_map_ops = { 647 648 .map_alloc = sock_map_alloc, 648 649 .map_free = sock_map_free, ··· 654 653 .map_lookup_elem = sock_map_lookup, 655 654 .map_release_uref = sock_map_release_progs, 656 655 .map_check_btf = map_check_no_btf, 656 + .map_btf_name = "bpf_stab", 657 + .map_btf_id = &sock_map_btf_id, 657 658 }; 658 659 659 660 struct bpf_shtab_elem { ··· 1179 1176 .arg4_type = ARG_ANYTHING, 1180 1177 }; 1181 1178 1179 + static int sock_hash_map_btf_id; 1182 1180 const struct bpf_map_ops sock_hash_ops = { 1183 1181 .map_alloc = sock_hash_alloc, 1184 1182 .map_free = sock_hash_free, ··· 1190 1186 .map_lookup_elem_sys_only = sock_hash_lookup_sys, 1191 1187 .map_release_uref = sock_hash_release_progs, 1192 1188 .map_check_btf = map_check_no_btf, 1189 + .map_btf_name = "bpf_shtab", 1190 + .map_btf_id = &sock_hash_map_btf_id, 1193 1191 }; 1194 1192 1195 1193 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
+3
net/xdp/xskmap.c
··· 254 254 spin_unlock_bh(&map->lock); 255 255 } 256 256 257 + static int xsk_map_btf_id; 257 258 const struct bpf_map_ops xsk_map_ops = { 258 259 .map_alloc = xsk_map_alloc, 259 260 .map_free = xsk_map_free, ··· 265 264 .map_update_elem = xsk_map_update_elem, 266 265 .map_delete_elem = xsk_map_delete_elem, 267 266 .map_check_btf = map_check_no_btf, 267 + .map_btf_name = "xsk_map", 268 + .map_btf_id = &xsk_map_btf_id, 268 269 };