Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-more-factorization-in-cleanup_net-paths'

Eric Dumazet says:

====================
net: more factorization in cleanup_net() paths

This series is inspired by recent syzbot reports hinting to RTNL and
workqueue abuses.

rtnl_lock() is unfair to (single threaded) cleanup_net(), because
many threads can cause contention on it.

This series adds a new (struct pernet_operations) method,
so that cleanup_net() can hold RTNL longer once it finally
acquires it.

It also factorizes unregister_netdevice_many(), to further
reduce stalls in cleanup_net().

Link: https://lore.kernel.org/netdev/CANn89iLJrrJs+6Vc==Un4rVKcpV0Eof4F_4w1_wQGxUCE2FWAg@mail.gmail.com/T/#u
https://lore.kernel.org/netdev/170688415193.5216.10499830272732622816@kwain/
====================

Link: https://lore.kernel.org/r/20240206144313.2050392-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+195 -162
+4 -9
drivers/net/bareudp.c
··· 760 760 unregister_netdevice_queue(bareudp->dev, head); 761 761 } 762 762 763 - static void __net_exit bareudp_exit_batch_net(struct list_head *net_list) 763 + static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list, 764 + struct list_head *dev_kill_list) 764 765 { 765 766 struct net *net; 766 - LIST_HEAD(list); 767 767 768 - rtnl_lock(); 769 768 list_for_each_entry(net, net_list, exit_list) 770 - bareudp_destroy_tunnels(net, &list); 771 - 772 - /* unregister the devices gathered above */ 773 - unregister_netdevice_many(&list); 774 - rtnl_unlock(); 769 + bareudp_destroy_tunnels(net, dev_kill_list); 775 770 } 776 771 777 772 static struct pernet_operations bareudp_net_ops = { 778 773 .init = bareudp_init_net, 779 - .exit_batch = bareudp_exit_batch_net, 774 + .exit_batch_rtnl = bareudp_exit_batch_rtnl, 780 775 .id = &bareudp_net_id, 781 776 .size = sizeof(struct bareudp_net), 782 777 };
+26 -11
drivers/net/bonding/bond_main.c
··· 6416 6416 return 0; 6417 6417 } 6418 6418 6419 - static void __net_exit bond_net_exit_batch(struct list_head *net_list) 6419 + /* According to commit 69b0216ac255 ("bonding: fix bonding_masters 6420 + * race condition in bond unloading") we need to remove sysfs files 6421 + * before we remove our devices (done later in bond_net_exit_batch_rtnl()) 6422 + */ 6423 + static void __net_exit bond_net_pre_exit(struct net *net) 6424 + { 6425 + struct bond_net *bn = net_generic(net, bond_net_id); 6426 + 6427 + bond_destroy_sysfs(bn); 6428 + } 6429 + 6430 + static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list, 6431 + struct list_head *dev_kill_list) 6420 6432 { 6421 6433 struct bond_net *bn; 6422 6434 struct net *net; 6423 - LIST_HEAD(list); 6424 - 6425 - list_for_each_entry(net, net_list, exit_list) { 6426 - bn = net_generic(net, bond_net_id); 6427 - bond_destroy_sysfs(bn); 6428 - } 6429 6435 6430 6436 /* Kill off any bonds created after unregistering bond rtnl ops */ 6431 - rtnl_lock(); 6432 6437 list_for_each_entry(net, net_list, exit_list) { 6433 6438 struct bonding *bond, *tmp_bond; 6434 6439 6435 6440 bn = net_generic(net, bond_net_id); 6436 6441 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) 6437 - unregister_netdevice_queue(bond->dev, &list); 6442 + unregister_netdevice_queue(bond->dev, dev_kill_list); 6438 6443 } 6439 - unregister_netdevice_many(&list); 6440 - rtnl_unlock(); 6444 + } 6445 + 6446 + /* According to commit 23fa5c2caae0 ("bonding: destroy proc directory 6447 + * only after all bonds are gone") bond_destroy_proc_dir() is called 6448 + * after bond_net_exit_batch_rtnl() has completed. 6449 + */ 6450 + static void __net_exit bond_net_exit_batch(struct list_head *net_list) 6451 + { 6452 + struct bond_net *bn; 6453 + struct net *net; 6441 6454 6442 6455 list_for_each_entry(net, net_list, exit_list) { 6443 6456 bn = net_generic(net, bond_net_id); ··· 6460 6447 6461 6448 static struct pernet_operations bond_net_ops = { 6462 6449 .init = bond_net_init, 6450 + .pre_exit = bond_net_pre_exit, 6451 + .exit_batch_rtnl = bond_net_exit_batch_rtnl, 6463 6452 .exit_batch = bond_net_exit_batch, 6464 6453 .id = &bond_net_id, 6465 6454 .size = sizeof(struct bond_net),
+10 -13
drivers/net/geneve.c
··· 1900 1900 } 1901 1901 } 1902 1902 1903 - static void __net_exit geneve_exit_batch_net(struct list_head *net_list) 1903 + static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list, 1904 + struct list_head *dev_to_kill) 1904 1905 { 1905 1906 struct net *net; 1906 - LIST_HEAD(list); 1907 1907 1908 - rtnl_lock(); 1909 1908 list_for_each_entry(net, net_list, exit_list) 1910 - geneve_destroy_tunnels(net, &list); 1909 + geneve_destroy_tunnels(net, dev_to_kill); 1910 + } 1911 1911 1912 - /* unregister the devices gathered above */ 1913 - unregister_netdevice_many(&list); 1914 - rtnl_unlock(); 1912 + static void __net_exit geneve_exit_net(struct net *net) 1913 + { 1914 + const struct geneve_net *gn = net_generic(net, geneve_net_id); 1915 1915 1916 - list_for_each_entry(net, net_list, exit_list) { 1917 - const struct geneve_net *gn = net_generic(net, geneve_net_id); 1918 - 1919 - WARN_ON_ONCE(!list_empty(&gn->sock_list)); 1920 - } 1916 + WARN_ON_ONCE(!list_empty(&gn->sock_list)); 1921 1917 } 1922 1918 1923 1919 static struct pernet_operations geneve_net_ops = { 1924 1920 .init = geneve_init_net, 1925 - .exit_batch = geneve_exit_batch_net, 1921 + .exit_batch_rtnl = geneve_exit_batch_rtnl, 1922 + .exit = geneve_exit_net, 1926 1923 .id = &geneve_net_id, 1927 1924 .size = sizeof(struct geneve_net), 1928 1925 };
+10 -10
drivers/net/gtp.c
··· 1876 1876 return 0; 1877 1877 } 1878 1878 1879 - static void __net_exit gtp_net_exit(struct net *net) 1879 + static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, 1880 + struct list_head *dev_to_kill) 1880 1881 { 1881 - struct gtp_net *gn = net_generic(net, gtp_net_id); 1882 - struct gtp_dev *gtp; 1883 - LIST_HEAD(list); 1882 + struct net *net; 1884 1883 1885 - rtnl_lock(); 1886 - list_for_each_entry(gtp, &gn->gtp_dev_list, list) 1887 - gtp_dellink(gtp->dev, &list); 1884 + list_for_each_entry(net, net_list, exit_list) { 1885 + struct gtp_net *gn = net_generic(net, gtp_net_id); 1886 + struct gtp_dev *gtp; 1888 1887 1889 - unregister_netdevice_many(&list); 1890 - rtnl_unlock(); 1888 + list_for_each_entry(gtp, &gn->gtp_dev_list, list) 1889 + gtp_dellink(gtp->dev, dev_to_kill); 1890 + } 1891 1891 } 1892 1892 1893 1893 static struct pernet_operations gtp_net_ops = { 1894 1894 .init = gtp_net_init, 1895 - .exit = gtp_net_exit, 1895 + .exit_batch_rtnl = gtp_net_exit_batch_rtnl, 1896 1896 .id = &gtp_net_id, 1897 1897 .size = sizeof(struct gtp_net), 1898 1898 };
+24 -36
drivers/net/vxlan/vxlan_core.c
··· 4826 4826 NULL); 4827 4827 } 4828 4828 4829 - static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) 4829 + static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn, 4830 + struct list_head *dev_to_kill) 4830 4831 { 4831 - struct vxlan_net *vn = net_generic(net, vxlan_net_id); 4832 4832 struct vxlan_dev *vxlan, *next; 4833 - struct net_device *dev, *aux; 4834 4833 4835 - for_each_netdev_safe(net, dev, aux) 4836 - if (dev->rtnl_link_ops == &vxlan_link_ops) 4837 - unregister_netdevice_queue(dev, head); 4838 - 4839 - list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 4840 - /* If vxlan->dev is in the same netns, it has already been added 4841 - * to the list by the previous loop. 4842 - */ 4843 - if (!net_eq(dev_net(vxlan->dev), net)) 4844 - unregister_netdevice_queue(vxlan->dev, head); 4845 - } 4846 - 4834 + list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) 4835 + vxlan_dellink(vxlan->dev, dev_to_kill); 4847 4836 } 4848 4837 4849 - static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) 4838 + static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list, 4839 + struct list_head *dev_to_kill) 4850 4840 { 4851 4841 struct net *net; 4852 - LIST_HEAD(list); 4842 + 4843 + ASSERT_RTNL(); 4844 + list_for_each_entry(net, net_list, exit_list) { 4845 + struct vxlan_net *vn = net_generic(net, vxlan_net_id); 4846 + 4847 + __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block); 4848 + 4849 + vxlan_destroy_tunnels(vn, dev_to_kill); 4850 + } 4851 + } 4852 + 4853 + static void __net_exit vxlan_exit_net(struct net *net) 4854 + { 4855 + struct vxlan_net *vn = net_generic(net, vxlan_net_id); 4853 4856 unsigned int h; 4854 4857 4855 - list_for_each_entry(net, net_list, exit_list) { 4856 - struct vxlan_net *vn = net_generic(net, vxlan_net_id); 4857 - 4858 - unregister_nexthop_notifier(net, &vn->nexthop_notifier_block); 4859 - } 4860 - rtnl_lock(); 4861 - list_for_each_entry(net, net_list, exit_list) 4862 - vxlan_destroy_tunnels(net, &list); 4863 - 4864 - unregister_netdevice_many(&list); 4865 - rtnl_unlock(); 4866 - 4867 - list_for_each_entry(net, net_list, exit_list) { 4868 - struct vxlan_net *vn = net_generic(net, vxlan_net_id); 4869 - 4870 - for (h = 0; h < PORT_HASH_SIZE; ++h) 4871 - WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); 4872 - } 4858 + for (h = 0; h < PORT_HASH_SIZE; ++h) 4859 + WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); 4873 4860 } 4874 4861 4875 4862 static struct pernet_operations vxlan_net_ops = { 4876 4863 .init = vxlan_init_net, 4877 - .exit_batch = vxlan_exit_batch_net, 4864 + .exit_batch_rtnl = vxlan_exit_batch_rtnl, 4865 + .exit = vxlan_exit_net, 4878 4866 .id = &vxlan_net_id, 4879 4867 .size = sizeof(struct vxlan_net), 4880 4868 };
+2 -1
include/net/ip_tunnels.h
··· 284 284 struct rtnl_link_ops *ops, char *devname); 285 285 286 286 void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id, 287 - struct rtnl_link_ops *ops); 287 + struct rtnl_link_ops *ops, 288 + struct list_head *dev_to_kill); 288 289 289 290 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 290 291 const struct iphdr *tnl_params, const u8 protocol);
+3
include/net/net_namespace.h
··· 448 448 void (*pre_exit)(struct net *net); 449 449 void (*exit)(struct net *net); 450 450 void (*exit_batch)(struct list_head *net_exit_list); 451 + /* Following method is called with RTNL held. */ 452 + void (*exit_batch_rtnl)(struct list_head *net_exit_list, 453 + struct list_head *dev_kill_list); 451 454 unsigned int *id; 452 455 size_t size; 453 456 };
+1
include/net/nexthop.h
··· 218 218 219 219 int register_nexthop_notifier(struct net *net, struct notifier_block *nb, 220 220 struct netlink_ext_ack *extack); 221 + int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb); 221 222 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb); 222 223 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap); 223 224 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
+5 -10
net/bridge/br.c
··· 356 356 clear_bit(opt, &br->options); 357 357 } 358 358 359 - static void __net_exit br_net_exit_batch(struct list_head *net_list) 359 + static void __net_exit br_net_exit_batch_rtnl(struct list_head *net_list, 360 + struct list_head *dev_to_kill) 360 361 { 361 362 struct net_device *dev; 362 363 struct net *net; 363 - LIST_HEAD(list); 364 364 365 - rtnl_lock(); 366 - 365 + ASSERT_RTNL(); 367 366 list_for_each_entry(net, net_list, exit_list) 368 367 for_each_netdev(net, dev) 369 368 if (netif_is_bridge_master(dev)) 370 - br_dev_delete(dev, &list); 371 - 372 - unregister_netdevice_many(&list); 373 - 374 - rtnl_unlock(); 369 + br_dev_delete(dev, dev_to_kill); 375 370 } 376 371 377 372 static struct pernet_operations br_net_ops = { 378 - .exit_batch = br_net_exit_batch, 373 + .exit_batch_rtnl = br_net_exit_batch_rtnl, 379 374 }; 380 375 381 376 static const struct stp_proto br_stp_proto = {
+30 -1
net/core/net_namespace.c
··· 318 318 { 319 319 /* Must be called with pernet_ops_rwsem held */ 320 320 const struct pernet_operations *ops, *saved_ops; 321 - int error = 0; 322 321 LIST_HEAD(net_exit_list); 322 + LIST_HEAD(dev_kill_list); 323 + int error = 0; 323 324 324 325 refcount_set(&net->ns.count, 1); 325 326 ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt"); ··· 357 356 ops_pre_exit_list(ops, &net_exit_list); 358 357 359 358 synchronize_rcu(); 359 + 360 + ops = saved_ops; 361 + rtnl_lock(); 362 + list_for_each_entry_continue_reverse(ops, &pernet_list, list) { 363 + if (ops->exit_batch_rtnl) 364 + ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); 365 + } 366 + unregister_netdevice_many(&dev_kill_list); 367 + rtnl_unlock(); 360 368 361 369 ops = saved_ops; 362 370 list_for_each_entry_continue_reverse(ops, &pernet_list, list) ··· 583 573 struct net *net, *tmp, *last; 584 574 struct llist_node *net_kill_list; 585 575 LIST_HEAD(net_exit_list); 576 + LIST_HEAD(dev_kill_list); 586 577 587 578 /* Atomically snapshot the list of namespaces to cleanup */ 588 579 net_kill_list = llist_del_all(&cleanup_list); ··· 623 612 * Also the pre_exit() and exit() methods need this barrier. 624 613 */ 625 614 synchronize_rcu(); 615 + 616 + rtnl_lock(); 617 + list_for_each_entry_reverse(ops, &pernet_list, list) { 618 + if (ops->exit_batch_rtnl) 619 + ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); 620 + } 621 + unregister_netdevice_many(&dev_kill_list); 622 + rtnl_unlock(); 626 623 627 624 /* Run all of the network namespace exit methods */ 628 625 list_for_each_entry_reverse(ops, &pernet_list, list) ··· 1212 1193 { 1213 1194 ops_pre_exit_list(ops, net_exit_list); 1214 1195 synchronize_rcu(); 1196 + 1197 + if (ops->exit_batch_rtnl) { 1198 + LIST_HEAD(dev_kill_list); 1199 + 1200 + rtnl_lock(); 1201 + ops->exit_batch_rtnl(net_exit_list, &dev_kill_list); 1202 + unregister_netdevice_many(&dev_kill_list); 1203 + rtnl_unlock(); 1204 + } 1215 1205 ops_exit_list(ops, net_exit_list); 1206 + 1216 1207 ops_free_list(ops, net_exit_list); 1217 1208 } 1218 1209
+15 -9
net/ipv4/ip_gre.c
··· 1025 1025 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL); 1026 1026 } 1027 1027 1028 - static void __net_exit ipgre_exit_batch_net(struct list_head *list_net) 1028 + static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net, 1029 + struct list_head *dev_to_kill) 1029 1030 { 1030 - ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops); 1031 + ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops, 1032 + dev_to_kill); 1031 1033 } 1032 1034 1033 1035 static struct pernet_operations ipgre_net_ops = { 1034 1036 .init = ipgre_init_net, 1035 - .exit_batch = ipgre_exit_batch_net, 1037 + .exit_batch_rtnl = ipgre_exit_batch_rtnl, 1036 1038 .id = &ipgre_net_id, 1037 1039 .size = sizeof(struct ip_tunnel_net), 1038 1040 }; ··· 1699 1697 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0"); 1700 1698 } 1701 1699 1702 - static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net) 1700 + static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net, 1701 + struct list_head *dev_to_kill) 1703 1702 { 1704 - ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops); 1703 + ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops, 1704 + dev_to_kill); 1705 1705 } 1706 1706 1707 1707 static struct pernet_operations ipgre_tap_net_ops = { 1708 1708 .init = ipgre_tap_init_net, 1709 - .exit_batch = ipgre_tap_exit_batch_net, 1709 + .exit_batch_rtnl = ipgre_tap_exit_batch_rtnl, 1710 1710 .id = &gre_tap_net_id, 1711 1711 .size = sizeof(struct ip_tunnel_net), 1712 1712 }; ··· 1719 1715 &erspan_link_ops, "erspan0"); 1720 1716 } 1721 1717 1722 - static void __net_exit erspan_exit_batch_net(struct list_head *net_list) 1718 + static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list, 1719 + struct list_head *dev_to_kill) 1723 1720 { 1724 - ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops); 1721 + ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops, 1722 + dev_to_kill); 1725 1723 } 1726 1724 1727 1725 static struct pernet_operations erspan_net_ops = { 1728 1726 .init = erspan_init_net, 1729 - .exit_batch = erspan_exit_batch_net, 1727 + .exit_batch_rtnl = erspan_exit_batch_rtnl, 1730 1728 .id = &erspan_net_id, 1731 1729 .size = sizeof(struct ip_tunnel_net), 1732 1730 };
+4 -6
net/ipv4/ip_tunnel.c
··· 1130 1130 } 1131 1131 1132 1132 void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id, 1133 - struct rtnl_link_ops *ops) 1133 + struct rtnl_link_ops *ops, 1134 + struct list_head *dev_to_kill) 1134 1135 { 1135 1136 struct ip_tunnel_net *itn; 1136 1137 struct net *net; 1137 - LIST_HEAD(list); 1138 1138 1139 - rtnl_lock(); 1139 + ASSERT_RTNL(); 1140 1140 list_for_each_entry(net, net_list, exit_list) { 1141 1141 itn = net_generic(net, id); 1142 - ip_tunnel_destroy(net, itn, &list, ops); 1142 + ip_tunnel_destroy(net, itn, dev_to_kill, ops); 1143 1143 } 1144 - unregister_netdevice_many(&list); 1145 - rtnl_unlock(); 1146 1144 } 1147 1145 EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets); 1148 1146
+5 -3
net/ipv4/ip_vti.c
··· 510 510 return 0; 511 511 } 512 512 513 - static void __net_exit vti_exit_batch_net(struct list_head *list_net) 513 + static void __net_exit vti_exit_batch_rtnl(struct list_head *list_net, 514 + struct list_head *dev_to_kill) 514 515 { 515 - ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops); 516 + ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops, 517 + dev_to_kill); 516 518 } 517 519 518 520 static struct pernet_operations vti_net_ops = { 519 521 .init = vti_init_net, 520 - .exit_batch = vti_exit_batch_net, 522 + .exit_batch_rtnl = vti_exit_batch_rtnl, 521 523 .id = &vti_net_id, 522 524 .size = sizeof(struct ip_tunnel_net), 523 525 };
+5 -3
net/ipv4/ipip.c
··· 592 592 return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0"); 593 593 } 594 594 595 - static void __net_exit ipip_exit_batch_net(struct list_head *list_net) 595 + static void __net_exit ipip_exit_batch_rtnl(struct list_head *list_net, 596 + struct list_head *dev_to_kill) 596 597 { 597 - ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops); 598 + ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops, 599 + dev_to_kill); 598 600 } 599 601 600 602 static struct pernet_operations ipip_net_ops = { 601 603 .init = ipip_init_net, 602 - .exit_batch = ipip_exit_batch_net, 604 + .exit_batch_rtnl = ipip_exit_batch_rtnl, 603 605 .id = &ipip_net_id, 604 606 .size = sizeof(struct ip_tunnel_net), 605 607 };
+25 -13
net/ipv4/nexthop.c
··· 3631 3631 } 3632 3632 EXPORT_SYMBOL(register_nexthop_notifier); 3633 3633 3634 + int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3635 + { 3636 + int err; 3637 + 3638 + err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain, 3639 + nb); 3640 + if (!err) 3641 + nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL); 3642 + return err; 3643 + } 3644 + EXPORT_SYMBOL(__unregister_nexthop_notifier); 3645 + 3634 3646 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3635 3647 { 3636 3648 int err; 3637 3649 3638 3650 rtnl_lock(); 3639 - err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain, 3640 - nb); 3641 - if (err) 3642 - goto unlock; 3643 - nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL); 3644 - unlock: 3651 + err = __unregister_nexthop_notifier(net, nb); 3645 3652 rtnl_unlock(); 3646 3653 return err; 3647 3654 } ··· 3744 3737 } 3745 3738 EXPORT_SYMBOL(nexthop_res_grp_activity_update); 3746 3739 3747 - static void __net_exit nexthop_net_exit_batch(struct list_head *net_list) 3740 + static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list, 3741 + struct list_head *dev_to_kill) 3748 3742 { 3749 3743 struct net *net; 3750 3744 3751 - rtnl_lock(); 3752 - list_for_each_entry(net, net_list, exit_list) { 3745 + ASSERT_RTNL(); 3746 + list_for_each_entry(net, net_list, exit_list) 3753 3747 flush_all_nexthops(net); 3754 - kfree(net->nexthop.devhash); 3755 - } 3756 - rtnl_unlock(); 3748 + } 3749 + 3750 + static void __net_exit nexthop_net_exit(struct net *net) 3751 + { 3752 + kfree(net->nexthop.devhash); 3753 + net->nexthop.devhash = NULL; 3757 3754 } 3758 3755 3759 3756 static int __net_init nexthop_net_init(struct net *net) ··· 3775 3764 3776 3765 static struct pernet_operations nexthop_net_ops = { 3777 3766 .init = nexthop_net_init, 3778 - .exit_batch = nexthop_net_exit_batch, 3767 + .exit = nexthop_net_exit, 3768 + .exit_batch_rtnl = nexthop_net_exit_batch_rtnl, 3779 3769 }; 3780 3770 3781 3771 static int __init nexthop_init(void)
+5 -7
net/ipv6/ip6_gre.c
··· 1632 1632 return err; 1633 1633 } 1634 1634 1635 - static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list) 1635 + static void __net_exit ip6gre_exit_batch_rtnl(struct list_head *net_list, 1636 + struct list_head *dev_to_kill) 1636 1637 { 1637 1638 struct net *net; 1638 - LIST_HEAD(list); 1639 1639 1640 - rtnl_lock(); 1640 + ASSERT_RTNL(); 1641 1641 list_for_each_entry(net, net_list, exit_list) 1642 - ip6gre_destroy_tunnels(net, &list); 1643 - unregister_netdevice_many(&list); 1644 - rtnl_unlock(); 1642 + ip6gre_destroy_tunnels(net, dev_to_kill); 1645 1643 } 1646 1644 1647 1645 static struct pernet_operations ip6gre_net_ops = { 1648 1646 .init = ip6gre_init_net, 1649 - .exit_batch = ip6gre_exit_batch_net, 1647 + .exit_batch_rtnl = ip6gre_exit_batch_rtnl, 1650 1648 .id = &ip6gre_net_id, 1651 1649 .size = sizeof(struct ip6gre_net), 1652 1650 };
+5 -7
net/ipv6/ip6_tunnel.c
··· 2282 2282 return err; 2283 2283 } 2284 2284 2285 - static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list) 2285 + static void __net_exit ip6_tnl_exit_batch_rtnl(struct list_head *net_list, 2286 + struct list_head *dev_to_kill) 2286 2287 { 2287 2288 struct net *net; 2288 - LIST_HEAD(list); 2289 2289 2290 - rtnl_lock(); 2290 + ASSERT_RTNL(); 2291 2291 list_for_each_entry(net, net_list, exit_list) 2292 - ip6_tnl_destroy_tunnels(net, &list); 2293 - unregister_netdevice_many(&list); 2294 - rtnl_unlock(); 2292 + ip6_tnl_destroy_tunnels(net, dev_to_kill); 2295 2293 } 2296 2294 2297 2295 static struct pernet_operations ip6_tnl_net_ops = { 2298 2296 .init = ip6_tnl_init_net, 2299 - .exit_batch = ip6_tnl_exit_batch_net, 2297 + .exit_batch_rtnl = ip6_tnl_exit_batch_rtnl, 2300 2298 .id = &ip6_tnl_net_id, 2301 2299 .size = sizeof(struct ip6_tnl_net), 2302 2300 };
+5 -7
net/ipv6/ip6_vti.c
··· 1174 1174 return err; 1175 1175 } 1176 1176 1177 - static void __net_exit vti6_exit_batch_net(struct list_head *net_list) 1177 + static void __net_exit vti6_exit_batch_rtnl(struct list_head *net_list, 1178 + struct list_head *dev_to_kill) 1178 1179 { 1179 1180 struct vti6_net *ip6n; 1180 1181 struct net *net; 1181 - LIST_HEAD(list); 1182 1182 1183 - rtnl_lock(); 1183 + ASSERT_RTNL(); 1184 1184 list_for_each_entry(net, net_list, exit_list) { 1185 1185 ip6n = net_generic(net, vti6_net_id); 1186 - vti6_destroy_tunnels(ip6n, &list); 1186 + vti6_destroy_tunnels(ip6n, dev_to_kill); 1187 1187 } 1188 - unregister_netdevice_many(&list); 1189 - rtnl_unlock(); 1190 1188 } 1191 1189 1192 1190 static struct pernet_operations vti6_net_ops = { 1193 1191 .init = vti6_init_net, 1194 - .exit_batch = vti6_exit_batch_net, 1192 + .exit_batch_rtnl = vti6_exit_batch_rtnl, 1195 1193 .id = &vti6_net_id, 1196 1194 .size = sizeof(struct vti6_net), 1197 1195 };
+5 -8
net/ipv6/sit.c
··· 1875 1875 return err; 1876 1876 } 1877 1877 1878 - static void __net_exit sit_exit_batch_net(struct list_head *net_list) 1878 + static void __net_exit sit_exit_batch_rtnl(struct list_head *net_list, 1879 + struct list_head *dev_to_kill) 1879 1880 { 1880 - LIST_HEAD(list); 1881 1881 struct net *net; 1882 1882 1883 - rtnl_lock(); 1883 + ASSERT_RTNL(); 1884 1884 list_for_each_entry(net, net_list, exit_list) 1885 - sit_destroy_tunnels(net, &list); 1886 - 1887 - unregister_netdevice_many(&list); 1888 - rtnl_unlock(); 1885 + sit_destroy_tunnels(net, dev_to_kill); 1889 1886 } 1890 1887 1891 1888 static struct pernet_operations sit_net_ops = { 1892 1889 .init = sit_init_net, 1893 - .exit_batch = sit_exit_batch_net, 1890 + .exit_batch_rtnl = sit_exit_batch_rtnl, 1894 1891 .id = &sit_net_id, 1895 1892 .size = sizeof(struct sit_net), 1896 1893 };
+6 -8
net/xfrm/xfrm_interface_core.c
··· 957 957 .get_link_net = xfrmi_get_link_net, 958 958 }; 959 959 960 - static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list) 960 + static void __net_exit xfrmi_exit_batch_rtnl(struct list_head *net_exit_list, 961 + struct list_head *dev_to_kill) 961 962 { 962 963 struct net *net; 963 - LIST_HEAD(list); 964 964 965 - rtnl_lock(); 965 + ASSERT_RTNL(); 966 966 list_for_each_entry(net, net_exit_list, exit_list) { 967 967 struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); 968 968 struct xfrm_if __rcu **xip; ··· 973 973 for (xip = &xfrmn->xfrmi[i]; 974 974 (xi = rtnl_dereference(*xip)) != NULL; 975 975 xip = &xi->next) 976 - unregister_netdevice_queue(xi->dev, &list); 976 + unregister_netdevice_queue(xi->dev, dev_to_kill); 977 977 } 978 978 xi = rtnl_dereference(xfrmn->collect_md_xfrmi); 979 979 if (xi) 980 - unregister_netdevice_queue(xi->dev, &list); 980 + unregister_netdevice_queue(xi->dev, dev_to_kill); 981 981 } 982 - unregister_netdevice_many(&list); 983 - rtnl_unlock(); 984 982 } 985 983 986 984 static struct pernet_operations xfrmi_net_ops = { 987 - .exit_batch = xfrmi_exit_batch_net, 985 + .exit_batch_rtnl = xfrmi_exit_batch_rtnl, 988 986 .id = &xfrmi_net_id, 989 987 .size = sizeof(struct xfrmi_net), 990 988 };