Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: core: add generic lockdep keys

Some interface types could be nested.
(VLAN, BONDING, TEAM, MACSEC, MACVLAN, IPVLAN, VIRT_WIFI, VXLAN, etc..)
These interface types should set lockdep class because, without lockdep
class key, lockdep always warn about unexisting circular locking.

In the current code, these interfaces have their own lockdep class keys and
these manage itself. So that there are so many duplicate code around the
/driver/net and /net/.
This patch adds new generic lockdep keys and some helper functions for it.

This patch does below changes.
a) Add lockdep class keys in struct net_device
- qdisc_running, xmit, addr_list, qdisc_busylock
- these keys are used as dynamic lockdep key.
b) When net_device is being allocated, lockdep keys are registered.
- alloc_netdev_mqs()
c) When net_device is being free'd llockdep keys are unregistered.
- free_netdev()
d) Add generic lockdep key helper function
- netdev_register_lockdep_key()
- netdev_unregister_lockdep_key()
- netdev_update_lockdep_key()
e) Remove unnecessary generic lockdep macro and functions
f) Remove unnecessary lockdep code of each interfaces.

After this patch, each interface modules don't need to maintain
their lockdep keys.

Signed-off-by: Taehee Yoo <ap420073@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Taehee Yoo and committed by
David S. Miller
ab92d68f 5343da4c

+63 -356
-1
drivers/net/bonding/bond_main.c
··· 4769 4769 return -ENOMEM; 4770 4770 4771 4771 bond->nest_level = SINGLE_DEPTH_NESTING; 4772 - netdev_lockdep_set_classes(bond_dev); 4773 4772 4774 4773 list_add_tail(&bond->bond_list, &bn->dev_list); 4775 4774
-18
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
··· 299 299 nfp_port_free(repr->port); 300 300 } 301 301 302 - static struct lock_class_key nfp_repr_netdev_xmit_lock_key; 303 - static struct lock_class_key nfp_repr_netdev_addr_lock_key; 304 - 305 - static void nfp_repr_set_lockdep_class_one(struct net_device *dev, 306 - struct netdev_queue *txq, 307 - void *_unused) 308 - { 309 - lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key); 310 - } 311 - 312 - static void nfp_repr_set_lockdep_class(struct net_device *dev) 313 - { 314 - lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key); 315 - netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL); 316 - } 317 - 318 302 int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, 319 303 u32 cmsg_port_id, struct nfp_port *port, 320 304 struct net_device *pf_netdev) ··· 307 323 struct nfp_net *nn = netdev_priv(pf_netdev); 308 324 u32 repr_cap = nn->tlv_caps.repr_cap; 309 325 int err; 310 - 311 - nfp_repr_set_lockdep_class(netdev); 312 326 313 327 repr->port = port; 314 328 repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
-22
drivers/net/hamradio/bpqether.c
··· 107 107 108 108 static LIST_HEAD(bpq_devices); 109 109 110 - /* 111 - * bpqether network devices are paired with ethernet devices below them, so 112 - * form a special "super class" of normal ethernet devices; split their locks 113 - * off into a separate class since they always nest. 114 - */ 115 - static struct lock_class_key bpq_netdev_xmit_lock_key; 116 - static struct lock_class_key bpq_netdev_addr_lock_key; 117 - 118 - static void bpq_set_lockdep_class_one(struct net_device *dev, 119 - struct netdev_queue *txq, 120 - void *_unused) 121 - { 122 - lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key); 123 - } 124 - 125 - static void bpq_set_lockdep_class(struct net_device *dev) 126 - { 127 - lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key); 128 - netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL); 129 - } 130 - 131 110 /* ------------------------------------------------------------------------ */ 132 111 133 112 ··· 477 498 err = register_netdevice(ndev); 478 499 if (err) 479 500 goto error; 480 - bpq_set_lockdep_class(ndev); 481 501 482 502 /* List protected by RTNL */ 483 503 list_add_rcu(&bpq->bpq_list, &bpq_devices);
-2
drivers/net/hyperv/netvsc_drv.c
··· 2335 2335 NETIF_F_HW_VLAN_CTAG_RX; 2336 2336 net->vlan_features = net->features; 2337 2337 2338 - netdev_lockdep_set_classes(net); 2339 - 2340 2338 /* MTU range: 68 - 1500 or 65521 */ 2341 2339 net->min_mtu = NETVSC_MTU_MIN; 2342 2340 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
-2
drivers/net/ipvlan/ipvlan_main.c
··· 131 131 dev->gso_max_segs = phy_dev->gso_max_segs; 132 132 dev->hard_header_len = phy_dev->hard_header_len; 133 133 134 - netdev_lockdep_set_classes(dev); 135 - 136 134 ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats); 137 135 if (!ipvlan->pcpu_stats) 138 136 return -ENOMEM;
-5
drivers/net/macsec.c
··· 2750 2750 2751 2751 #define MACSEC_FEATURES \ 2752 2752 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2753 - static struct lock_class_key macsec_netdev_addr_lock_key; 2754 2753 2755 2754 static int macsec_dev_init(struct net_device *dev) 2756 2755 { ··· 3263 3264 dev_hold(real_dev); 3264 3265 3265 3266 macsec->nest_level = dev_get_nest_level(real_dev) + 1; 3266 - netdev_lockdep_set_classes(dev); 3267 - lockdep_set_class_and_subclass(&dev->addr_list_lock, 3268 - &macsec_netdev_addr_lock_key, 3269 - macsec_get_nest_level(dev)); 3270 3267 3271 3268 err = netdev_upper_dev_link(real_dev, dev, extack); 3272 3269 if (err < 0)
-12
drivers/net/macvlan.c
··· 852 852 * "super class" of normal network devices; split their locks off into a 853 853 * separate class since they always nest. 854 854 */ 855 - static struct lock_class_key macvlan_netdev_addr_lock_key; 856 - 857 855 #define ALWAYS_ON_OFFLOADS \ 858 856 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ 859 857 NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) ··· 872 874 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; 873 875 } 874 876 875 - static void macvlan_set_lockdep_class(struct net_device *dev) 876 - { 877 - netdev_lockdep_set_classes(dev); 878 - lockdep_set_class_and_subclass(&dev->addr_list_lock, 879 - &macvlan_netdev_addr_lock_key, 880 - macvlan_get_nest_level(dev)); 881 - } 882 - 883 877 static int macvlan_init(struct net_device *dev) 884 878 { 885 879 struct macvlan_dev *vlan = netdev_priv(dev); ··· 889 899 dev->gso_max_size = lowerdev->gso_max_size; 890 900 dev->gso_max_segs = lowerdev->gso_max_segs; 891 901 dev->hard_header_len = lowerdev->hard_header_len; 892 - 893 - macvlan_set_lockdep_class(dev); 894 902 895 903 vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 896 904 if (!vlan->pcpu_stats)
-2
drivers/net/ppp/ppp_generic.c
··· 1324 1324 { 1325 1325 struct ppp *ppp; 1326 1326 1327 - netdev_lockdep_set_classes(dev); 1328 - 1329 1327 ppp = netdev_priv(dev); 1330 1328 /* Let the netdevice take a reference on the ppp file. This ensures 1331 1329 * that ppp_destroy_interface() won't run before the device gets
-2
drivers/net/team/team.c
··· 1642 1642 goto err_options_register; 1643 1643 netif_carrier_off(dev); 1644 1644 1645 - netdev_lockdep_set_classes(dev); 1646 - 1647 1645 return 0; 1648 1646 1649 1647 err_options_register:
-1
drivers/net/vrf.c
··· 865 865 866 866 /* similarly, oper state is irrelevant; set to up to avoid confusion */ 867 867 dev->operstate = IF_OPER_UP; 868 - netdev_lockdep_set_classes(dev); 869 868 return 0; 870 869 871 870 out_rth:
-25
drivers/net/wireless/intersil/hostap/hostap_hw.c
··· 3041 3041 } 3042 3042 } 3043 3043 3044 - 3045 - /* 3046 - * HostAP uses two layers of net devices, where the inner 3047 - * layer gets called all the time from the outer layer. 3048 - * This is a natural nesting, which needs a split lock type. 3049 - */ 3050 - static struct lock_class_key hostap_netdev_xmit_lock_key; 3051 - static struct lock_class_key hostap_netdev_addr_lock_key; 3052 - 3053 - static void prism2_set_lockdep_class_one(struct net_device *dev, 3054 - struct netdev_queue *txq, 3055 - void *_unused) 3056 - { 3057 - lockdep_set_class(&txq->_xmit_lock, 3058 - &hostap_netdev_xmit_lock_key); 3059 - } 3060 - 3061 - static void prism2_set_lockdep_class(struct net_device *dev) 3062 - { 3063 - lockdep_set_class(&dev->addr_list_lock, 3064 - &hostap_netdev_addr_lock_key); 3065 - netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL); 3066 - } 3067 - 3068 3044 static struct net_device * 3069 3045 prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, 3070 3046 struct device *sdev) ··· 3199 3223 if (ret >= 0) 3200 3224 ret = register_netdevice(dev); 3201 3225 3202 - prism2_set_lockdep_class(dev); 3203 3226 rtnl_unlock(); 3204 3227 if (ret < 0) { 3205 3228 printk(KERN_WARNING "%s: register netdevice failed!\n",
+13 -22
include/linux/netdevice.h
··· 925 925 struct devlink; 926 926 struct tlsdev_ops; 927 927 928 + 928 929 /* 929 930 * This structure defines the management hooks for network devices. 930 931 * The following hooks can be defined; unless noted otherwise, they are ··· 1761 1760 * @phydev: Physical device may attach itself 1762 1761 * for hardware timestamping 1763 1762 * @sfp_bus: attached &struct sfp_bus structure. 1764 - * 1765 - * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 1766 - * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount 1763 + * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock 1764 + spinlock 1765 + * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount 1766 + * @qdisc_xmit_lock_key: lockdep class annotating 1767 + * netdev_queue->_xmit_lock spinlock 1768 + * @addr_list_lock_key: lockdep class annotating 1769 + * net_device->addr_list_lock spinlock 1767 1770 * 1768 1771 * @proto_down: protocol port state information can be sent to the 1769 1772 * switch driver and used to set the phys state of the ··· 2054 2049 #endif 2055 2050 struct phy_device *phydev; 2056 2051 struct sfp_bus *sfp_bus; 2057 - struct lock_class_key *qdisc_tx_busylock; 2058 - struct lock_class_key *qdisc_running_key; 2052 + struct lock_class_key qdisc_tx_busylock_key; 2053 + struct lock_class_key qdisc_running_key; 2054 + struct lock_class_key qdisc_xmit_lock_key; 2055 + struct lock_class_key addr_list_lock_key; 2059 2056 bool proto_down; 2060 2057 unsigned wol_enabled:1; 2061 2058 }; ··· 2133 2126 2134 2127 for (i = 0; i < dev->num_tx_queues; i++) 2135 2128 f(dev, &dev->_tx[i], arg); 2136 - } 2137 - 2138 - #define netdev_lockdep_set_classes(dev) \ 2139 - { \ 2140 - static struct lock_class_key qdisc_tx_busylock_key; \ 2141 - static struct lock_class_key qdisc_running_key; \ 2142 - static struct lock_class_key qdisc_xmit_lock_key; \ 2143 - static struct lock_class_key dev_addr_list_lock_key; \ 2144 - unsigned int i; \ 2145 - \ 2146 - (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2147 - (dev)->qdisc_running_key = &qdisc_running_key; \ 2148 - lockdep_set_class(&(dev)->addr_list_lock, \ 2149 - &dev_addr_list_lock_key); \ 2150 - for (i = 0; i < (dev)->num_tx_queues; i++) \ 2151 - lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2152 - &qdisc_xmit_lock_key); \ 2153 2129 } 2154 2130 2155 2131 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, ··· 3133 3143 } 3134 3144 3135 3145 void netif_tx_stop_all_queues(struct net_device *dev); 3146 + void netdev_update_lockdep_key(struct net_device *dev); 3136 3147 3137 3148 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3138 3149 {
-27
net/8021q/vlan_dev.c
··· 489 489 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 490 490 } 491 491 492 - /* 493 - * vlan network devices have devices nesting below it, and are a special 494 - * "super class" of normal network devices; split their locks off into a 495 - * separate class since they always nest. 496 - */ 497 - static struct lock_class_key vlan_netdev_xmit_lock_key; 498 - static struct lock_class_key vlan_netdev_addr_lock_key; 499 - 500 - static void vlan_dev_set_lockdep_one(struct net_device *dev, 501 - struct netdev_queue *txq, 502 - void *_subclass) 503 - { 504 - lockdep_set_class_and_subclass(&txq->_xmit_lock, 505 - &vlan_netdev_xmit_lock_key, 506 - *(int *)_subclass); 507 - } 508 - 509 - static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) 510 - { 511 - lockdep_set_class_and_subclass(&dev->addr_list_lock, 512 - &vlan_netdev_addr_lock_key, 513 - subclass); 514 - netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); 515 - } 516 - 517 492 static int vlan_dev_get_lock_subclass(struct net_device *dev) 518 493 { 519 494 return vlan_dev_priv(dev)->nest_level; ··· 583 608 dev->netdev_ops = &vlan_netdev_ops; 584 609 585 610 SET_NETDEV_DEVTYPE(dev, &vlan_type); 586 - 587 - vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev)); 588 611 589 612 vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 590 613 if (!vlan->vlan_pcpu_stats)
-32
net/batman-adv/soft-interface.c
··· 740 740 return 0; 741 741 } 742 742 743 - /* batman-adv network devices have devices nesting below it and are a special 744 - * "super class" of normal network devices; split their locks off into a 745 - * separate class since they always nest. 746 - */ 747 - static struct lock_class_key batadv_netdev_xmit_lock_key; 748 - static struct lock_class_key batadv_netdev_addr_lock_key; 749 - 750 - /** 751 - * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue 752 - * @dev: device which owns the tx queue 753 - * @txq: tx queue to modify 754 - * @_unused: always NULL 755 - */ 756 - static void batadv_set_lockdep_class_one(struct net_device *dev, 757 - struct netdev_queue *txq, 758 - void *_unused) 759 - { 760 - lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); 761 - } 762 - 763 - /** 764 - * batadv_set_lockdep_class() - Set txq and addr_list lockdep class 765 - * @dev: network device to modify 766 - */ 767 - static void batadv_set_lockdep_class(struct net_device *dev) 768 - { 769 - lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key); 770 - netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL); 771 - } 772 - 773 743 /** 774 744 * batadv_softif_init_late() - late stage initialization of soft interface 775 745 * @dev: registered network device to modify ··· 752 782 u32 random_seqno; 753 783 int ret; 754 784 size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM; 755 - 756 - batadv_set_lockdep_class(dev); 757 785 758 786 bat_priv = netdev_priv(dev); 759 787 bat_priv->soft_iface = dev;
-8
net/bluetooth/6lowpan.c
··· 571 571 return err < 0 ? NET_XMIT_DROP : err; 572 572 } 573 573 574 - static int bt_dev_init(struct net_device *dev) 575 - { 576 - netdev_lockdep_set_classes(dev); 577 - 578 - return 0; 579 - } 580 - 581 574 static const struct net_device_ops netdev_ops = { 582 - .ndo_init = bt_dev_init, 583 575 .ndo_start_xmit = bt_xmit, 584 576 }; 585 577
-8
net/bridge/br_device.c
··· 24 24 const struct nf_br_ops __rcu *nf_br_ops __read_mostly; 25 25 EXPORT_SYMBOL_GPL(nf_br_ops); 26 26 27 - static struct lock_class_key bridge_netdev_addr_lock_key; 28 - 29 27 /* net device transmit always called with BH disabled */ 30 28 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 31 29 { ··· 106 108 return NETDEV_TX_OK; 107 109 } 108 110 109 - static void br_set_lockdep_class(struct net_device *dev) 110 - { 111 - lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key); 112 - } 113 - 114 111 static int br_dev_init(struct net_device *dev) 115 112 { 116 113 struct net_bridge *br = netdev_priv(dev); ··· 143 150 br_mdb_hash_fini(br); 144 151 br_fdb_hash_fini(br); 145 152 } 146 - br_set_lockdep_class(dev); 147 153 148 154 return err; 149 155 }
+43 -84
net/core/dev.c
··· 277 277 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 278 278 EXPORT_PER_CPU_SYMBOL(softnet_data); 279 279 280 - #ifdef CONFIG_LOCKDEP 281 - /* 282 - * register_netdevice() inits txq->_xmit_lock and sets lockdep class 283 - * according to dev->type 284 - */ 285 - static const unsigned short netdev_lock_type[] = { 286 - ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 287 - ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 288 - ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 289 - ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 290 - ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 291 - ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 292 - ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 293 - ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 294 - ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 295 - ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 296 - ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 297 - ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 298 - ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 299 - ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 300 - ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 301 - 302 - static const char *const netdev_lock_name[] = { 303 - "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 304 - "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 305 - "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 306 - "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 307 - "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 308 - "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 309 - "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 310 - "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 311 - "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 312 - "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 313 - "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 314 - "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 315 - "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 316 - "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 317 - "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 318 - 319 - static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 320 - static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 321 - 322 - static inline unsigned short netdev_lock_pos(unsigned short dev_type) 323 - { 324 - int i; 325 - 326 - for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 327 - if (netdev_lock_type[i] == dev_type) 328 - return i; 329 - /* the last key is used by default */ 330 - return ARRAY_SIZE(netdev_lock_type) - 1; 331 - } 332 - 333 - static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 334 - unsigned short dev_type) 335 - { 336 - int i; 337 - 338 - i = netdev_lock_pos(dev_type); 339 - lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 340 - netdev_lock_name[i]); 341 - } 342 - 343 - static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 344 - { 345 - int i; 346 - 347 - i = netdev_lock_pos(dev->type); 348 - lockdep_set_class_and_name(&dev->addr_list_lock, 349 - &netdev_addr_lock_key[i], 350 - netdev_lock_name[i]); 351 - } 352 - #else 353 - static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 354 - unsigned short dev_type) 355 - { 356 - } 357 - static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 358 - { 359 - } 360 - #endif 361 - 362 280 /******************************************************************************* 363 281 * 364 282 * Protocol management and registration routines ··· 8717 8799 { 8718 8800 /* Initialize queue lock */ 8719 8801 spin_lock_init(&queue->_xmit_lock); 8720 - netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 8802 + lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key); 8721 8803 queue->xmit_lock_owner = -1; 8722 8804 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 8723 8805 queue->dev = dev; ··· 8764 8846 } 8765 8847 EXPORT_SYMBOL(netif_tx_stop_all_queues); 8766 8848 8849 + static void netdev_register_lockdep_key(struct net_device *dev) 8850 + { 8851 + lockdep_register_key(&dev->qdisc_tx_busylock_key); 8852 + lockdep_register_key(&dev->qdisc_running_key); 8853 + lockdep_register_key(&dev->qdisc_xmit_lock_key); 8854 + lockdep_register_key(&dev->addr_list_lock_key); 8855 + } 8856 + 8857 + static void netdev_unregister_lockdep_key(struct net_device *dev) 8858 + { 8859 + lockdep_unregister_key(&dev->qdisc_tx_busylock_key); 8860 + lockdep_unregister_key(&dev->qdisc_running_key); 8861 + lockdep_unregister_key(&dev->qdisc_xmit_lock_key); 8862 + lockdep_unregister_key(&dev->addr_list_lock_key); 8863 + } 8864 + 8865 + void netdev_update_lockdep_key(struct net_device *dev) 8866 + { 8867 + struct netdev_queue *queue; 8868 + int i; 8869 + 8870 + lockdep_unregister_key(&dev->qdisc_xmit_lock_key); 8871 + lockdep_unregister_key(&dev->addr_list_lock_key); 8872 + 8873 + lockdep_register_key(&dev->qdisc_xmit_lock_key); 8874 + lockdep_register_key(&dev->addr_list_lock_key); 8875 + 8876 + lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key); 8877 + for (i = 0; i < dev->num_tx_queues; i++) { 8878 + queue = netdev_get_tx_queue(dev, i); 8879 + 8880 + lockdep_set_class(&queue->_xmit_lock, 8881 + &dev->qdisc_xmit_lock_key); 8882 + } 8883 + } 8884 + EXPORT_SYMBOL(netdev_update_lockdep_key); 8885 + 8767 8886 /** 8768 8887 * register_netdevice - register a network device 8769 8888 * @dev: device to register ··· 8835 8880 BUG_ON(!net); 8836 8881 8837 8882 spin_lock_init(&dev->addr_list_lock); 8838 - netdev_set_addr_lockdep_class(dev); 8883 + lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key); 8839 8884 8840 8885 ret = dev_get_valid_name(net, dev, dev->name); 8841 8886 if (ret < 0) ··· 9345 9390 9346 9391 dev_net_set(dev, &init_net); 9347 9392 9393 + netdev_register_lockdep_key(dev); 9394 + 9348 9395 dev->gso_max_size = GSO_MAX_SIZE; 9349 9396 dev->gso_max_segs = GSO_MAX_SEGS; 9350 9397 dev->upper_level = 1; ··· 9430 9473 9431 9474 free_percpu(dev->pcpu_refcnt); 9432 9475 dev->pcpu_refcnt = NULL; 9476 + 9477 + netdev_unregister_lockdep_key(dev); 9433 9478 9434 9479 /* Compatibility with error handling in drivers */ 9435 9480 if (dev->reg_state == NETREG_UNINITIALIZED) {
+1
net/core/rtnetlink.c
··· 2355 2355 err = ops->ndo_del_slave(upper_dev, dev); 2356 2356 if (err) 2357 2357 return err; 2358 + netdev_update_lockdep_key(dev); 2358 2359 } else { 2359 2360 return -EOPNOTSUPP; 2360 2361 }
-5
net/dsa/master.c
··· 310 310 rtnl_unlock(); 311 311 } 312 312 313 - static struct lock_class_key dsa_master_addr_list_lock_key; 314 - 315 313 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) 316 314 { 317 315 int ret; ··· 323 325 wmb(); 324 326 325 327 dev->dsa_ptr = cpu_dp; 326 - lockdep_set_class(&dev->addr_list_lock, 327 - &dsa_master_addr_list_lock_key); 328 - 329 328 ret = dsa_master_ethtool_setup(dev); 330 329 if (ret) 331 330 return ret;
-12
net/dsa/slave.c
··· 1341 1341 return ret; 1342 1342 } 1343 1343 1344 - static struct lock_class_key dsa_slave_netdev_xmit_lock_key; 1345 - static void dsa_slave_set_lockdep_class_one(struct net_device *dev, 1346 - struct netdev_queue *txq, 1347 - void *_unused) 1348 - { 1349 - lockdep_set_class(&txq->_xmit_lock, 1350 - &dsa_slave_netdev_xmit_lock_key); 1351 - } 1352 - 1353 1344 int dsa_slave_suspend(struct net_device *slave_dev) 1354 1345 { 1355 1346 struct dsa_port *dp = dsa_slave_to_port(slave_dev); ··· 1423 1432 slave_dev->min_mtu = 0; 1424 1433 slave_dev->max_mtu = ETH_MAX_MTU; 1425 1434 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 1426 - 1427 - netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one, 1428 - NULL); 1429 1435 1430 1436 SET_NETDEV_DEV(slave_dev, port->ds->dev); 1431 1437 slave_dev->dev.of_node = port->dn;
-8
net/ieee802154/6lowpan/core.c
··· 58 58 .create = lowpan_header_create, 59 59 }; 60 60 61 - static int lowpan_dev_init(struct net_device *ldev) 62 - { 63 - netdev_lockdep_set_classes(ldev); 64 - 65 - return 0; 66 - } 67 - 68 61 static int lowpan_open(struct net_device *dev) 69 62 { 70 63 if (!open_count) ··· 89 96 } 90 97 91 98 static const struct net_device_ops lowpan_netdev_ops = { 92 - .ndo_init = lowpan_dev_init, 93 99 .ndo_start_xmit = lowpan_xmit, 94 100 .ndo_open = lowpan_open, 95 101 .ndo_stop = lowpan_stop,
-1
net/l2tp/l2tp_eth.c
··· 56 56 { 57 57 eth_hw_addr_random(dev); 58 58 eth_broadcast_addr(dev->broadcast); 59 - netdev_lockdep_set_classes(dev); 60 59 61 60 return 0; 62 61 }
-23
net/netrom/af_netrom.c
··· 64 64 static const struct proto_ops nr_proto_ops; 65 65 66 66 /* 67 - * NETROM network devices are virtual network devices encapsulating NETROM 68 - * frames into AX.25 which will be sent through an AX.25 device, so form a 69 - * special "super class" of normal net devices; split their locks off into a 70 - * separate class since they always nest. 71 - */ 72 - static struct lock_class_key nr_netdev_xmit_lock_key; 73 - static struct lock_class_key nr_netdev_addr_lock_key; 74 - 75 - static void nr_set_lockdep_one(struct net_device *dev, 76 - struct netdev_queue *txq, 77 - void *_unused) 78 - { 79 - lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); 80 - } 81 - 82 - static void nr_set_lockdep_key(struct net_device *dev) 83 - { 84 - lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key); 85 - netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); 86 - } 87 - 88 - /* 89 67 * Socket removal during an interrupt is now safe. 90 68 */ 91 69 static void nr_remove_socket(struct sock *sk) ··· 1392 1414 free_netdev(dev); 1393 1415 goto fail; 1394 1416 } 1395 - nr_set_lockdep_key(dev); 1396 1417 dev_nr[i] = dev; 1397 1418 } 1398 1419
-23
net/rose/af_rose.c
··· 65 65 ax25_address rose_callsign; 66 66 67 67 /* 68 - * ROSE network devices are virtual network devices encapsulating ROSE 69 - * frames into AX.25 which will be sent through an AX.25 device, so form a 70 - * special "super class" of normal net devices; split their locks off into a 71 - * separate class since they always nest. 72 - */ 73 - static struct lock_class_key rose_netdev_xmit_lock_key; 74 - static struct lock_class_key rose_netdev_addr_lock_key; 75 - 76 - static void rose_set_lockdep_one(struct net_device *dev, 77 - struct netdev_queue *txq, 78 - void *_unused) 79 - { 80 - lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); 81 - } 82 - 83 - static void rose_set_lockdep_key(struct net_device *dev) 84 - { 85 - lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); 86 - netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); 87 - } 88 - 89 - /* 90 68 * Convert a ROSE address into text. 91 69 */ 92 70 char *rose2asc(char *buf, const rose_address *addr) ··· 1511 1533 free_netdev(dev); 1512 1534 goto fail; 1513 1535 } 1514 - rose_set_lockdep_key(dev); 1515 1536 dev_rose[i] = dev; 1516 1537 } 1517 1538
+6 -11
net/sched/sch_generic.c
··· 799 799 }; 800 800 EXPORT_SYMBOL(pfifo_fast_ops); 801 801 802 - static struct lock_class_key qdisc_tx_busylock; 803 - static struct lock_class_key qdisc_running_key; 804 - 805 802 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 806 803 const struct Qdisc_ops *ops, 807 804 struct netlink_ext_ack *extack) ··· 851 854 } 852 855 853 856 spin_lock_init(&sch->busylock); 854 - lockdep_set_class(&sch->busylock, 855 - dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 856 - 857 857 /* seqlock has the same scope of busylock, for NOLOCK qdisc */ 858 858 spin_lock_init(&sch->seqlock); 859 - lockdep_set_class(&sch->busylock, 860 - dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 861 - 862 859 seqcount_init(&sch->running); 863 - lockdep_set_class(&sch->running, 864 - dev->qdisc_running_key ?: &qdisc_running_key); 865 860 866 861 sch->ops = ops; 867 862 sch->flags = ops->static_flags; ··· 863 874 sch->empty = true; 864 875 dev_hold(dev); 865 876 refcount_set(&sch->refcnt, 1); 877 + 878 + if (sch != &noop_qdisc) { 879 + lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key); 880 + lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key); 881 + lockdep_set_class(&sch->running, &dev->qdisc_running_key); 882 + } 866 883 867 884 return sch; 868 885 errout1: