Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Reshuffle some parts of bpf/offload.c

To avoid adding forward declarations in the main patch, shuffle
some code around. No functional changes.

Cc: John Fastabend <john.fastabend@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Willem de Bruijn <willemb@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Anatoly Burakov <anatoly.burakov@intel.com>
Cc: Alexander Lobakin <alexandr.lobakin@intel.com>
Cc: Magnus Karlsson <magnus.karlsson@gmail.com>
Cc: Maryam Tahhan <mtahhan@redhat.com>
Cc: xdp-hints@xdp-project.net
Cc: netdev@vger.kernel.org
Signed-off-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/r/20230119221536.3349901-5-sdf@google.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>

authored by

Stanislav Fomichev and committed by
Martin KaFai Lau
89bbc53a f1fc43d0

+117 -105
+117 -105
kernel/bpf/offload.c
··· 74 74 return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); 75 75 } 76 76 77 + static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 78 + struct net_device *netdev) 79 + { 80 + struct bpf_offload_netdev *ondev; 81 + int err; 82 + 83 + ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); 84 + if (!ondev) 85 + return -ENOMEM; 86 + 87 + ondev->netdev = netdev; 88 + ondev->offdev = offdev; 89 + INIT_LIST_HEAD(&ondev->progs); 90 + INIT_LIST_HEAD(&ondev->maps); 91 + 92 + down_write(&bpf_devs_lock); 93 + err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); 94 + if (err) { 95 + netdev_warn(netdev, "failed to register for BPF offload\n"); 96 + goto err_unlock_free; 97 + } 98 + 99 + list_add(&ondev->offdev_netdevs, &offdev->netdevs); 100 + up_write(&bpf_devs_lock); 101 + return 0; 102 + 103 + err_unlock_free: 104 + up_write(&bpf_devs_lock); 105 + kfree(ondev); 106 + return err; 107 + } 108 + 109 + static void __bpf_prog_offload_destroy(struct bpf_prog *prog) 110 + { 111 + struct bpf_prog_offload *offload = prog->aux->offload; 112 + 113 + if (offload->dev_state) 114 + offload->offdev->ops->destroy(prog); 115 + 116 + /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ 117 + bpf_prog_free_id(prog, true); 118 + 119 + list_del_init(&offload->offloads); 120 + kfree(offload); 121 + prog->aux->offload = NULL; 122 + } 123 + 124 + static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, 125 + enum bpf_netdev_command cmd) 126 + { 127 + struct netdev_bpf data = {}; 128 + struct net_device *netdev; 129 + 130 + ASSERT_RTNL(); 131 + 132 + data.command = cmd; 133 + data.offmap = offmap; 134 + /* Caller must make sure netdev is valid */ 135 + netdev = offmap->netdev; 136 + 137 + return netdev->netdev_ops->ndo_bpf(netdev, &data); 138 + } 139 + 140 + static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) 141 + { 142 + WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); 143 + /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ 144 + bpf_map_free_id(&offmap->map, true); 145 + list_del_init(&offmap->offloads); 146 + offmap->netdev = NULL; 147 + } 148 + 149 + static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 150 + struct net_device *netdev) 151 + { 152 + struct bpf_offload_netdev *ondev, *altdev; 153 + struct bpf_offloaded_map *offmap, *mtmp; 154 + struct bpf_prog_offload *offload, *ptmp; 155 + 156 + ASSERT_RTNL(); 157 + 158 + down_write(&bpf_devs_lock); 159 + ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); 160 + if (WARN_ON(!ondev)) 161 + goto unlock; 162 + 163 + WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); 164 + list_del(&ondev->offdev_netdevs); 165 + 166 + /* Try to move the objects to another netdev of the device */ 167 + altdev = list_first_entry_or_null(&offdev->netdevs, 168 + struct bpf_offload_netdev, 169 + offdev_netdevs); 170 + if (altdev) { 171 + list_for_each_entry(offload, &ondev->progs, offloads) 172 + offload->netdev = altdev->netdev; 173 + list_splice_init(&ondev->progs, &altdev->progs); 174 + 175 + list_for_each_entry(offmap, &ondev->maps, offloads) 176 + offmap->netdev = altdev->netdev; 177 + list_splice_init(&ondev->maps, &altdev->maps); 178 + } else { 179 + list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) 180 + __bpf_prog_offload_destroy(offload->prog); 181 + list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) 182 + __bpf_map_offload_destroy(offmap); 183 + } 184 + 185 + WARN_ON(!list_empty(&ondev->progs)); 186 + WARN_ON(!list_empty(&ondev->maps)); 187 + kfree(ondev); 188 + unlock: 189 + up_write(&bpf_devs_lock); 190 + } 191 + 77 192 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) 78 193 { 79 194 struct bpf_offload_netdev *ondev; ··· 321 206 up_read(&bpf_devs_lock); 322 207 } 323 208 324 - static void __bpf_prog_offload_destroy(struct bpf_prog *prog) 325 - { 326 - struct bpf_prog_offload *offload = prog->aux->offload; 327 - 328 - if (offload->dev_state) 329 - offload->offdev->ops->destroy(prog); 330 - 331 - /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ 332 - bpf_prog_free_id(prog, true); 333 - 334 - list_del_init(&offload->offloads); 335 - kfree(offload); 336 - prog->aux->offload = NULL; 337 - } 338 - 339 209 void bpf_prog_offload_destroy(struct bpf_prog *prog) 340 210 { 341 211 down_write(&bpf_devs_lock); ··· 440 340 const struct bpf_prog_ops bpf_offload_prog_ops = { 441 341 }; 442 342 443 - static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, 444 - enum bpf_netdev_command cmd) 445 - { 446 - struct netdev_bpf data = {}; 447 - struct net_device *netdev; 448 - 449 - ASSERT_RTNL(); 450 - 451 - data.command = cmd; 452 - data.offmap = offmap; 453 - /* Caller must make sure netdev is valid */ 454 - netdev = offmap->netdev; 455 - 456 - return netdev->netdev_ops->ndo_bpf(netdev, &data); 457 - } 458 - 459 343 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 460 344 { 461 345 struct net *net = current->nsproxy->net_ns; ··· 487 403 rtnl_unlock(); 488 404 bpf_map_area_free(offmap); 489 405 return ERR_PTR(err); 490 - } 491 - 492 - static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) 493 - { 494 - WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); 495 - /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ 496 - bpf_map_free_id(&offmap->map, true); 497 - list_del_init(&offmap->offloads); 498 - offmap->netdev = NULL; 499 406 } 500 407 501 408 void bpf_map_offload_map_free(struct bpf_map *map) ··· 667 592 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 668 593 struct net_device *netdev) 669 594 { 670 - struct bpf_offload_netdev *ondev; 671 - int err; 672 - 673 - ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); 674 - if (!ondev) 675 - return -ENOMEM; 676 - 677 - ondev->netdev = netdev; 678 - ondev->offdev = offdev; 679 - INIT_LIST_HEAD(&ondev->progs); 680 - INIT_LIST_HEAD(&ondev->maps); 681 - 682 - down_write(&bpf_devs_lock); 683 - err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); 684 - if (err) { 685 - netdev_warn(netdev, "failed to register for BPF offload\n"); 686 - goto err_unlock_free; 687 - } 688 - 689 - list_add(&ondev->offdev_netdevs, &offdev->netdevs); 690 - up_write(&bpf_devs_lock); 691 - return 0; 692 - 693 - err_unlock_free: 694 - up_write(&bpf_devs_lock); 695 - kfree(ondev); 696 - return err; 595 + return __bpf_offload_dev_netdev_register(offdev, netdev); 697 596 } 698 597 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); 699 598 700 599 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 701 600 struct net_device *netdev) 702 601 { 703 - struct bpf_offload_netdev *ondev, *altdev; 704 - struct bpf_offloaded_map *offmap, *mtmp; 705 - struct bpf_prog_offload *offload, *ptmp; 706 - 707 - ASSERT_RTNL(); 708 - 709 - down_write(&bpf_devs_lock); 710 - ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); 711 - if (WARN_ON(!ondev)) 712 - goto unlock; 713 - 714 - WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); 715 - list_del(&ondev->offdev_netdevs); 716 - 717 - /* Try to move the objects to another netdev of the device */ 718 - altdev = list_first_entry_or_null(&offdev->netdevs, 719 - struct bpf_offload_netdev, 720 - offdev_netdevs); 721 - if (altdev) { 722 - list_for_each_entry(offload, &ondev->progs, offloads) 723 - offload->netdev = altdev->netdev; 724 - list_splice_init(&ondev->progs, &altdev->progs); 725 - 726 - list_for_each_entry(offmap, &ondev->maps, offloads) 727 - offmap->netdev = altdev->netdev; 728 - list_splice_init(&ondev->maps, &altdev->maps); 729 - } else { 730 - list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) 731 - __bpf_prog_offload_destroy(offload->prog); 732 - list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) 733 - __bpf_map_offload_destroy(offmap); 734 - } 735 - 736 - WARN_ON(!list_empty(&ondev->progs)); 737 - WARN_ON(!list_empty(&ondev->maps)); 738 - kfree(ondev); 739 - unlock: 740 - up_write(&bpf_devs_lock); 602 + __bpf_offload_dev_netdev_unregister(offdev, netdev); 741 603 } 742 604 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); 743 605