Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: partially revert dynamic lockdep key changes

This patch reverts the folowing commits:

commit 064ff66e2bef84f1153087612032b5b9eab005bd
"bonding: add missing netdev_update_lockdep_key()"

commit 53d374979ef147ab51f5d632dfe20b14aebeccd0
"net: avoid updating qdisc_xmit_lock_key in netdev_update_lockdep_key()"

commit 1f26c0d3d24125992ab0026b0dab16c08df947c7
"net: fix kernel-doc warning in <linux/netdevice.h>"

commit ab92d68fc22f9afab480153bd82a20f6e2533769
"net: core: add generic lockdep keys"

but keeps the addr_list_lock_key because we still lock
addr_list_lock nestedly on stack devices, unlikely xmit_lock
this is safe because we don't take addr_list_lock on any fast
path.

Reported-and-tested-by: syzbot+aaa6fa4949cc5d9b7b25@syzkaller.appspotmail.com
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Taehee Yoo <ap420073@gmail.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Acked-by: Taehee Yoo <ap420073@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Cong Wang and committed by
David S. Miller
1a33e10e ea84c842

+294 -33
+1
drivers/net/bonding/bond_main.c
··· 4898 4898 spin_lock_init(&bond->stats_lock); 4899 4899 lockdep_register_key(&bond->stats_lock_key); 4900 4900 lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key); 4901 + netdev_lockdep_set_classes(bond_dev); 4901 4902 4902 4903 list_add_tail(&bond->bond_list, &bn->dev_list); 4903 4904
+16
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
··· 299 299 nfp_port_free(repr->port); 300 300 } 301 301 302 + static struct lock_class_key nfp_repr_netdev_xmit_lock_key; 303 + 304 + static void nfp_repr_set_lockdep_class_one(struct net_device *dev, 305 + struct netdev_queue *txq, 306 + void *_unused) 307 + { 308 + lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key); 309 + } 310 + 311 + static void nfp_repr_set_lockdep_class(struct net_device *dev) 312 + { 313 + netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL); 314 + } 315 + 302 316 int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, 303 317 u32 cmsg_port_id, struct nfp_port *port, 304 318 struct net_device *pf_netdev) ··· 321 307 struct nfp_net *nn = netdev_priv(pf_netdev); 322 308 u32 repr_cap = nn->tlv_caps.repr_cap; 323 309 int err; 310 + 311 + nfp_repr_set_lockdep_class(netdev); 324 312 325 313 repr->port = port; 326 314 repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
+20
drivers/net/hamradio/bpqether.c
··· 107 107 108 108 static LIST_HEAD(bpq_devices); 109 109 110 + /* 111 + * bpqether network devices are paired with ethernet devices below them, so 112 + * form a special "super class" of normal ethernet devices; split their locks 113 + * off into a separate class since they always nest. 114 + */ 115 + static struct lock_class_key bpq_netdev_xmit_lock_key; 116 + 117 + static void bpq_set_lockdep_class_one(struct net_device *dev, 118 + struct netdev_queue *txq, 119 + void *_unused) 120 + { 121 + lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key); 122 + } 123 + 124 + static void bpq_set_lockdep_class(struct net_device *dev) 125 + { 126 + netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL); 127 + } 128 + 110 129 /* ------------------------------------------------------------------------ */ 111 130 112 131 ··· 496 477 err = register_netdevice(ndev); 497 478 if (err) 498 479 goto error; 480 + bpq_set_lockdep_class(ndev); 499 481 500 482 /* List protected by RTNL */ 501 483 list_add_rcu(&bpq->bpq_list, &bpq_devices);
+2
drivers/net/hyperv/netvsc_drv.c
··· 2456 2456 NETIF_F_HW_VLAN_CTAG_RX; 2457 2457 net->vlan_features = net->features; 2458 2458 2459 + netdev_lockdep_set_classes(net); 2460 + 2459 2461 /* MTU range: 68 - 1500 or 65521 */ 2460 2462 net->min_mtu = NETVSC_MTU_MIN; 2461 2463 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
+2
drivers/net/ipvlan/ipvlan_main.c
··· 131 131 dev->gso_max_segs = phy_dev->gso_max_segs; 132 132 dev->hard_header_len = phy_dev->hard_header_len; 133 133 134 + netdev_lockdep_set_classes(dev); 135 + 134 136 ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats); 135 137 if (!ipvlan->pcpu_stats) 136 138 return -ENOMEM;
+2
drivers/net/macsec.c
··· 4047 4047 if (err < 0) 4048 4048 return err; 4049 4049 4050 + netdev_lockdep_set_classes(dev); 4051 + 4050 4052 err = netdev_upper_dev_link(real_dev, dev, extack); 4051 4053 if (err < 0) 4052 4054 goto unregister;
+2
drivers/net/macvlan.c
··· 890 890 dev->gso_max_segs = lowerdev->gso_max_segs; 891 891 dev->hard_header_len = lowerdev->hard_header_len; 892 892 893 + netdev_lockdep_set_classes(dev); 894 + 893 895 vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 894 896 if (!vlan->pcpu_stats) 895 897 return -ENOMEM;
+2
drivers/net/ppp/ppp_generic.c
··· 1410 1410 { 1411 1411 struct ppp *ppp; 1412 1412 1413 + netdev_lockdep_set_classes(dev); 1414 + 1413 1415 ppp = netdev_priv(dev); 1414 1416 /* Let the netdevice take a reference on the ppp file. This ensures 1415 1417 * that ppp_destroy_interface() won't run before the device gets
+1
drivers/net/team/team.c
··· 1647 1647 1648 1648 lockdep_register_key(&team->team_lock_key); 1649 1649 __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key); 1650 + netdev_lockdep_set_classes(dev); 1650 1651 1651 1652 return 0; 1652 1653
+1
drivers/net/vrf.c
··· 867 867 868 868 /* similarly, oper state is irrelevant; set to up to avoid confusion */ 869 869 dev->operstate = IF_OPER_UP; 870 + netdev_lockdep_set_classes(dev); 870 871 return 0; 871 872 872 873 out_rth:
+22
drivers/net/wireless/intersil/hostap/hostap_hw.c
··· 3041 3041 } 3042 3042 } 3043 3043 3044 + 3045 + /* 3046 + * HostAP uses two layers of net devices, where the inner 3047 + * layer gets called all the time from the outer layer. 3048 + * This is a natural nesting, which needs a split lock type. 3049 + */ 3050 + static struct lock_class_key hostap_netdev_xmit_lock_key; 3051 + 3052 + static void prism2_set_lockdep_class_one(struct net_device *dev, 3053 + struct netdev_queue *txq, 3054 + void *_unused) 3055 + { 3056 + lockdep_set_class(&txq->_xmit_lock, 3057 + &hostap_netdev_xmit_lock_key); 3058 + } 3059 + 3060 + static void prism2_set_lockdep_class(struct net_device *dev) 3061 + { 3062 + netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL); 3063 + } 3064 + 3044 3065 static struct net_device * 3045 3066 prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, 3046 3067 struct device *sdev) ··· 3220 3199 if (ret >= 0) 3221 3200 ret = register_netdevice(dev); 3222 3201 3202 + prism2_set_lockdep_class(dev); 3223 3203 rtnl_unlock(); 3224 3204 if (ret < 0) { 3225 3205 printk(KERN_WARNING "%s: register netdevice failed!\n",
+19 -8
include/linux/netdevice.h
··· 1805 1805 * @phydev: Physical device may attach itself 1806 1806 * for hardware timestamping 1807 1807 * @sfp_bus: attached &struct sfp_bus structure. 1808 - * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock 1809 - * spinlock 1810 - * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount 1811 - * @qdisc_xmit_lock_key: lockdep class annotating 1812 - * netdev_queue->_xmit_lock spinlock 1808 + * 1813 1809 * @addr_list_lock_key: lockdep class annotating 1814 1810 * net_device->addr_list_lock spinlock 1811 + * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 1812 + * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount 1815 1813 * 1816 1814 * @proto_down: protocol port state information can be sent to the 1817 1815 * switch driver and used to set the phys state of the ··· 2110 2112 #endif 2111 2113 struct phy_device *phydev; 2112 2114 struct sfp_bus *sfp_bus; 2113 - struct lock_class_key qdisc_tx_busylock_key; 2114 - struct lock_class_key qdisc_running_key; 2115 - struct lock_class_key qdisc_xmit_lock_key; 2116 2115 struct lock_class_key addr_list_lock_key; 2116 + struct lock_class_key *qdisc_tx_busylock; 2117 + struct lock_class_key *qdisc_running_key; 2117 2118 bool proto_down; 2118 2119 unsigned wol_enabled:1; 2119 2120 ··· 2195 2198 2196 2199 for (i = 0; i < dev->num_tx_queues; i++) 2197 2200 f(dev, &dev->_tx[i], arg); 2201 + } 2202 + 2203 + #define netdev_lockdep_set_classes(dev) \ 2204 + { \ 2205 + static struct lock_class_key qdisc_tx_busylock_key; \ 2206 + static struct lock_class_key qdisc_running_key; \ 2207 + static struct lock_class_key qdisc_xmit_lock_key; \ 2208 + unsigned int i; \ 2209 + \ 2210 + (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2211 + (dev)->qdisc_running_key = &qdisc_running_key; \ 2212 + for (i = 0; i < (dev)->num_tx_queues; i++) \ 2213 + lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ 2214 + &qdisc_xmit_lock_key); \ 2198 2215 } 2199 2216 2200 2217 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+21
net/8021q/vlan_dev.c
··· 489 489 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 490 490 } 491 491 492 + /* 493 + * vlan network devices have devices nesting below it, and are a special 494 + * "super class" of normal network devices; split their locks off into a 495 + * separate class since they always nest. 496 + */ 497 + static struct lock_class_key vlan_netdev_xmit_lock_key; 498 + 499 + static void vlan_dev_set_lockdep_one(struct net_device *dev, 500 + struct netdev_queue *txq, 501 + void *unused) 502 + { 503 + lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key); 504 + } 505 + 506 + static void vlan_dev_set_lockdep_class(struct net_device *dev) 507 + { 508 + netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL); 509 + } 510 + 492 511 static const struct header_ops vlan_header_ops = { 493 512 .create = vlan_dev_hard_header, 494 513 .parse = eth_header_parse, ··· 597 578 dev->netdev_ops = &vlan_netdev_ops; 598 579 599 580 SET_NETDEV_DEVTYPE(dev, &vlan_type); 581 + 582 + vlan_dev_set_lockdep_class(dev); 600 583 601 584 vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 602 585 if (!vlan->vlan_pcpu_stats)
+30
net/batman-adv/soft-interface.c
··· 739 739 return 0; 740 740 } 741 741 742 + /* batman-adv network devices have devices nesting below it and are a special 743 + * "super class" of normal network devices; split their locks off into a 744 + * separate class since they always nest. 745 + */ 746 + static struct lock_class_key batadv_netdev_xmit_lock_key; 747 + 748 + /** 749 + * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue 750 + * @dev: device which owns the tx queue 751 + * @txq: tx queue to modify 752 + * @_unused: always NULL 753 + */ 754 + static void batadv_set_lockdep_class_one(struct net_device *dev, 755 + struct netdev_queue *txq, 756 + void *_unused) 757 + { 758 + lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); 759 + } 760 + 761 + /** 762 + * batadv_set_lockdep_class() - Set txq and addr_list lockdep class 763 + * @dev: network device to modify 764 + */ 765 + static void batadv_set_lockdep_class(struct net_device *dev) 766 + { 767 + netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL); 768 + } 769 + 742 770 /** 743 771 * batadv_softif_init_late() - late stage initialization of soft interface 744 772 * @dev: registered network device to modify ··· 779 751 u32 random_seqno; 780 752 int ret; 781 753 size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM; 754 + 755 + batadv_set_lockdep_class(dev); 782 756 783 757 bat_priv = netdev_priv(dev); 784 758 bat_priv->soft_iface = dev;
+8
net/bluetooth/6lowpan.c
··· 571 571 return err < 0 ? NET_XMIT_DROP : err; 572 572 } 573 573 574 + static int bt_dev_init(struct net_device *dev) 575 + { 576 + netdev_lockdep_set_classes(dev); 577 + 578 + return 0; 579 + } 580 + 574 581 static const struct net_device_ops netdev_ops = { 582 + .ndo_init = bt_dev_init, 575 583 .ndo_start_xmit = bt_xmit, 576 584 }; 577 585
+71 -19
net/core/dev.c
··· 398 398 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 399 399 EXPORT_PER_CPU_SYMBOL(softnet_data); 400 400 401 + #ifdef CONFIG_LOCKDEP 402 + /* 403 + * register_netdevice() inits txq->_xmit_lock and sets lockdep class 404 + * according to dev->type 405 + */ 406 + static const unsigned short netdev_lock_type[] = { 407 + ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 408 + ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 409 + ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 410 + ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 411 + ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 412 + ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 413 + ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 414 + ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 415 + ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 416 + ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 417 + ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 418 + ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 419 + ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 420 + ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 421 + ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 422 + 423 + static const char *const netdev_lock_name[] = { 424 + "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 425 + "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 426 + "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 427 + "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 428 + "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 429 + "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 430 + "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 431 + "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 432 + "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 433 + "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 434 + "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 435 + "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 436 + "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 437 + "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 438 + "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 439 + 440 + static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 441 + 442 + static inline unsigned short netdev_lock_pos(unsigned short dev_type) 443 + { 444 + int i; 445 + 446 + for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 447 + if (netdev_lock_type[i] == dev_type) 448 + return i; 449 + /* the last key is used by default */ 450 + return ARRAY_SIZE(netdev_lock_type) - 1; 451 + } 452 + 453 + static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 454 + unsigned short dev_type) 455 + { 456 + int i; 457 + 458 + i = netdev_lock_pos(dev_type); 459 + lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 460 + netdev_lock_name[i]); 461 + } 462 + #else 463 + static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 464 + unsigned short dev_type) 465 + { 466 + } 467 + #endif 468 + 401 469 /******************************************************************************* 402 470 * 403 471 * Protocol management and registration routines ··· 9276 9208 { 9277 9209 /* Initialize queue lock */ 9278 9210 spin_lock_init(&queue->_xmit_lock); 9279 - lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key); 9211 + netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 9280 9212 queue->xmit_lock_owner = -1; 9281 9213 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 9282 9214 queue->dev = dev; ··· 9322 9254 } 9323 9255 } 9324 9256 EXPORT_SYMBOL(netif_tx_stop_all_queues); 9325 - 9326 - static void netdev_register_lockdep_key(struct net_device *dev) 9327 - { 9328 - lockdep_register_key(&dev->qdisc_tx_busylock_key); 9329 - lockdep_register_key(&dev->qdisc_running_key); 9330 - lockdep_register_key(&dev->qdisc_xmit_lock_key); 9331 - lockdep_register_key(&dev->addr_list_lock_key); 9332 - } 9333 - 9334 - static void netdev_unregister_lockdep_key(struct net_device *dev) 9335 - { 9336 - lockdep_unregister_key(&dev->qdisc_tx_busylock_key); 9337 - lockdep_unregister_key(&dev->qdisc_running_key); 9338 - lockdep_unregister_key(&dev->qdisc_xmit_lock_key); 9339 - lockdep_unregister_key(&dev->addr_list_lock_key); 9340 - } 9341 9257 9342 9258 void netdev_update_lockdep_key(struct net_device *dev) 9343 9259 { ··· 9889 9837 9890 9838 dev_net_set(dev, &init_net); 9891 9839 9892 - netdev_register_lockdep_key(dev); 9840 + lockdep_register_key(&dev->addr_list_lock_key); 9893 9841 9894 9842 dev->gso_max_size = GSO_MAX_SIZE; 9895 9843 dev->gso_max_segs = GSO_MAX_SEGS; ··· 9978 9926 free_percpu(dev->xdp_bulkq); 9979 9927 dev->xdp_bulkq = NULL; 9980 9928 9981 - netdev_unregister_lockdep_key(dev); 9929 + lockdep_unregister_key(&dev->addr_list_lock_key); 9982 9930 9983 9931 /* Compatibility with error handling in drivers */ 9984 9932 if (dev->reg_state == NETREG_UNINITIALIZED) {
+12
net/dsa/slave.c
··· 1671 1671 return ret; 1672 1672 } 1673 1673 1674 + static struct lock_class_key dsa_slave_netdev_xmit_lock_key; 1675 + static void dsa_slave_set_lockdep_class_one(struct net_device *dev, 1676 + struct netdev_queue *txq, 1677 + void *_unused) 1678 + { 1679 + lockdep_set_class(&txq->_xmit_lock, 1680 + &dsa_slave_netdev_xmit_lock_key); 1681 + } 1682 + 1674 1683 int dsa_slave_suspend(struct net_device *slave_dev) 1675 1684 { 1676 1685 struct dsa_port *dp = dsa_slave_to_port(slave_dev); ··· 1762 1753 else 1763 1754 slave_dev->max_mtu = ETH_MAX_MTU; 1764 1755 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 1756 + 1757 + netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one, 1758 + NULL); 1765 1759 1766 1760 SET_NETDEV_DEV(slave_dev, port->ds->dev); 1767 1761 slave_dev->dev.of_node = port->dn;
+8
net/ieee802154/6lowpan/core.c
··· 58 58 .create = lowpan_header_create, 59 59 }; 60 60 61 + static int lowpan_dev_init(struct net_device *ldev) 62 + { 63 + netdev_lockdep_set_classes(ldev); 64 + 65 + return 0; 66 + } 67 + 61 68 static int lowpan_open(struct net_device *dev) 62 69 { 63 70 if (!open_count) ··· 96 89 } 97 90 98 91 static const struct net_device_ops lowpan_netdev_ops = { 92 + .ndo_init = lowpan_dev_init, 99 93 .ndo_start_xmit = lowpan_xmit, 100 94 .ndo_open = lowpan_open, 101 95 .ndo_stop = lowpan_stop,
+1
net/l2tp/l2tp_eth.c
··· 56 56 { 57 57 eth_hw_addr_random(dev); 58 58 eth_broadcast_addr(dev->broadcast); 59 + netdev_lockdep_set_classes(dev); 59 60 60 61 return 0; 61 62 }
+21
net/netrom/af_netrom.c
··· 64 64 static const struct proto_ops nr_proto_ops; 65 65 66 66 /* 67 + * NETROM network devices are virtual network devices encapsulating NETROM 68 + * frames into AX.25 which will be sent through an AX.25 device, so form a 69 + * special "super class" of normal net devices; split their locks off into a 70 + * separate class since they always nest. 71 + */ 72 + static struct lock_class_key nr_netdev_xmit_lock_key; 73 + 74 + static void nr_set_lockdep_one(struct net_device *dev, 75 + struct netdev_queue *txq, 76 + void *_unused) 77 + { 78 + lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); 79 + } 80 + 81 + static void nr_set_lockdep_key(struct net_device *dev) 82 + { 83 + netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); 84 + } 85 + 86 + /* 67 87 * Socket removal during an interrupt is now safe. 68 88 */ 69 89 static void nr_remove_socket(struct sock *sk) ··· 1414 1394 free_netdev(dev); 1415 1395 goto fail; 1416 1396 } 1397 + nr_set_lockdep_key(dev); 1417 1398 dev_nr[i] = dev; 1418 1399 } 1419 1400
+21
net/rose/af_rose.c
··· 65 65 ax25_address rose_callsign; 66 66 67 67 /* 68 + * ROSE network devices are virtual network devices encapsulating ROSE 69 + * frames into AX.25 which will be sent through an AX.25 device, so form a 70 + * special "super class" of normal net devices; split their locks off into a 71 + * separate class since they always nest. 72 + */ 73 + static struct lock_class_key rose_netdev_xmit_lock_key; 74 + 75 + static void rose_set_lockdep_one(struct net_device *dev, 76 + struct netdev_queue *txq, 77 + void *_unused) 78 + { 79 + lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); 80 + } 81 + 82 + static void rose_set_lockdep_key(struct net_device *dev) 83 + { 84 + netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); 85 + } 86 + 87 + /* 68 88 * Convert a ROSE address into text. 69 89 */ 70 90 char *rose2asc(char *buf, const rose_address *addr) ··· 1531 1511 free_netdev(dev); 1532 1512 goto fail; 1533 1513 } 1514 + rose_set_lockdep_key(dev); 1534 1515 dev_rose[i] = dev; 1535 1516 } 1536 1517
+11 -6
net/sched/sch_generic.c
··· 794 794 }; 795 795 EXPORT_SYMBOL(pfifo_fast_ops); 796 796 797 + static struct lock_class_key qdisc_tx_busylock; 798 + static struct lock_class_key qdisc_running_key; 799 + 797 800 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 798 801 const struct Qdisc_ops *ops, 799 802 struct netlink_ext_ack *extack) ··· 849 846 } 850 847 851 848 spin_lock_init(&sch->busylock); 849 + lockdep_set_class(&sch->busylock, 850 + dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 851 + 852 852 /* seqlock has the same scope of busylock, for NOLOCK qdisc */ 853 853 spin_lock_init(&sch->seqlock); 854 + lockdep_set_class(&sch->busylock, 855 + dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 856 + 854 857 seqcount_init(&sch->running); 858 + lockdep_set_class(&sch->running, 859 + dev->qdisc_running_key ?: &qdisc_running_key); 855 860 856 861 sch->ops = ops; 857 862 sch->flags = ops->static_flags; ··· 869 858 sch->empty = true; 870 859 dev_hold(dev); 871 860 refcount_set(&sch->refcnt, 1); 872 - 873 - if (sch != &noop_qdisc) { 874 - lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key); 875 - lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key); 876 - lockdep_set_class(&sch->running, &dev->qdisc_running_key); 877 - } 878 861 879 862 return sch; 880 863 errout1: