Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: sched: add rcu annotations around qdisc->qdisc_sleeping

syzbot reported a race around qdisc->qdisc_sleeping [1]

It is time we add proper annotations to reads and writes to/from
qdisc->qdisc_sleeping.

[1]
BUG: KCSAN: data-race in dev_graft_qdisc / qdisc_lookup_rcu

read to 0xffff8881286fc618 of 8 bytes by task 6928 on cpu 1:
qdisc_lookup_rcu+0x192/0x2c0 net/sched/sch_api.c:331
__tcf_qdisc_find+0x74/0x3c0 net/sched/cls_api.c:1174
tc_get_tfilter+0x18f/0x990 net/sched/cls_api.c:2547
rtnetlink_rcv_msg+0x7af/0x8c0 net/core/rtnetlink.c:6386
netlink_rcv_skb+0x126/0x220 net/netlink/af_netlink.c:2546
rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:6413
netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline]
netlink_unicast+0x56f/0x640 net/netlink/af_netlink.c:1365
netlink_sendmsg+0x665/0x770 net/netlink/af_netlink.c:1913
sock_sendmsg_nosec net/socket.c:724 [inline]
sock_sendmsg net/socket.c:747 [inline]
____sys_sendmsg+0x375/0x4c0 net/socket.c:2503
___sys_sendmsg net/socket.c:2557 [inline]
__sys_sendmsg+0x1e3/0x270 net/socket.c:2586
__do_sys_sendmsg net/socket.c:2595 [inline]
__se_sys_sendmsg net/socket.c:2593 [inline]
__x64_sys_sendmsg+0x46/0x50 net/socket.c:2593
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x63/0xcd

write to 0xffff8881286fc618 of 8 bytes by task 6912 on cpu 0:
dev_graft_qdisc+0x4f/0x80 net/sched/sch_generic.c:1115
qdisc_graft+0x7d0/0xb60 net/sched/sch_api.c:1103
tc_modify_qdisc+0x712/0xf10 net/sched/sch_api.c:1693
rtnetlink_rcv_msg+0x807/0x8c0 net/core/rtnetlink.c:6395
netlink_rcv_skb+0x126/0x220 net/netlink/af_netlink.c:2546
rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:6413
netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline]
netlink_unicast+0x56f/0x640 net/netlink/af_netlink.c:1365
netlink_sendmsg+0x665/0x770 net/netlink/af_netlink.c:1913
sock_sendmsg_nosec net/socket.c:724 [inline]
sock_sendmsg net/socket.c:747 [inline]
____sys_sendmsg+0x375/0x4c0 net/socket.c:2503
___sys_sendmsg net/socket.c:2557 [inline]
__sys_sendmsg+0x1e3/0x270 net/socket.c:2586
__do_sys_sendmsg net/socket.c:2595 [inline]
__se_sys_sendmsg net/socket.c:2593 [inline]
__x64_sys_sendmsg+0x46/0x50 net/socket.c:2593
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x63/0xcd

Reported by Kernel Concurrency Sanitizer on:
CPU: 0 PID: 6912 Comm: syz-executor.5 Not tainted 6.4.0-rc3-syzkaller-00190-g0d85b27b0cc6 #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/16/2023

Fixes: 3a7d0d07a386 ("net: sched: extend Qdisc with rcu")
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Vlad Buslov <vladbu@nvidia.com>
Acked-by: Jamal Hadi Salim<jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
d636fc5d e3144ff5

+63 -44
+1 -1
include/linux/netdevice.h
··· 620 620 netdevice_tracker dev_tracker; 621 621 622 622 struct Qdisc __rcu *qdisc; 623 - struct Qdisc *qdisc_sleeping; 623 + struct Qdisc __rcu *qdisc_sleeping; 624 624 #ifdef CONFIG_SYSFS 625 625 struct kobject kobj; 626 626 #endif
+4 -2
include/net/sch_generic.h
··· 545 545 546 546 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 547 547 { 548 - return qdisc->dev_queue->qdisc_sleeping; 548 + return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); 549 549 } 550 550 551 551 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) ··· 754 754 755 755 for (i = 0; i < dev->num_tx_queues; i++) { 756 756 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 757 - if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 757 + 758 + if (rcu_access_pointer(txq->qdisc) != 759 + rcu_access_pointer(txq->qdisc_sleeping)) 758 760 return true; 759 761 } 760 762 return false;
+1 -1
net/core/dev.c
··· 10543 10543 return NULL; 10544 10544 netdev_init_one_queue(dev, queue, NULL); 10545 10545 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 10546 - queue->qdisc_sleeping = &noop_qdisc; 10546 + RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc); 10547 10547 rcu_assign_pointer(dev->ingress_queue, queue); 10548 10548 #endif 10549 10549 return queue;
+16 -10
net/sched/sch_api.c
··· 309 309 310 310 if (dev_ingress_queue(dev)) 311 311 q = qdisc_match_from_root( 312 - dev_ingress_queue(dev)->qdisc_sleeping, 312 + rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping), 313 313 handle); 314 314 out: 315 315 return q; ··· 328 328 329 329 nq = dev_ingress_queue_rcu(dev); 330 330 if (nq) 331 - q = qdisc_match_from_root(nq->qdisc_sleeping, handle); 331 + q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping), 332 + handle); 332 333 out: 333 334 return q; 334 335 } ··· 635 634 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, 636 635 u64 delta_ns) 637 636 { 638 - if (test_bit(__QDISC_STATE_DEACTIVATED, 639 - &qdisc_root_sleeping(wd->qdisc)->state)) 637 + bool deactivated; 638 + 639 + rcu_read_lock(); 640 + deactivated = test_bit(__QDISC_STATE_DEACTIVATED, 641 + &qdisc_root_sleeping(wd->qdisc)->state); 642 + rcu_read_unlock(); 643 + if (deactivated) 640 644 return; 641 645 642 646 if (hrtimer_is_queued(&wd->timer)) { ··· 1484 1478 } 1485 1479 q = qdisc_leaf(p, clid); 1486 1480 } else if (dev_ingress_queue(dev)) { 1487 - q = dev_ingress_queue(dev)->qdisc_sleeping; 1481 + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); 1488 1482 } 1489 1483 } else { 1490 1484 q = rtnl_dereference(dev->qdisc); ··· 1570 1564 } 1571 1565 q = qdisc_leaf(p, clid); 1572 1566 } else if (dev_ingress_queue_create(dev)) { 1573 - q = dev_ingress_queue(dev)->qdisc_sleeping; 1567 + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); 1574 1568 } 1575 1569 } else { 1576 1570 q = rtnl_dereference(dev->qdisc); ··· 1811 1805 1812 1806 dev_queue = dev_ingress_queue(dev); 1813 1807 if (dev_queue && 1814 - tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, 1815 - &q_idx, s_q_idx, false, 1808 + tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping), 1809 + skb, cb, &q_idx, s_q_idx, false, 1816 1810 tca[TCA_DUMP_INVISIBLE]) < 0) 1817 1811 goto done; 1818 1812 ··· 2255 2249 2256 2250 dev_queue = dev_ingress_queue(dev); 2257 2251 if (dev_queue && 2258 - tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, 2259 - &t, s_t, false) < 0) 2252 + tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping), 2253 + skb, tcm, cb, &t, s_t, false) < 0) 2260 2254 goto done; 2261 2255 2262 2256 done:
+2
net/sched/sch_fq_pie.c
··· 379 379 spinlock_t *root_lock; /* to lock qdisc for probability calculations */ 380 380 u32 idx; 381 381 382 + rcu_read_lock(); 382 383 root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 383 384 spin_lock(root_lock); 384 385 ··· 392 391 mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate); 393 392 394 393 spin_unlock(root_lock); 394 + rcu_read_unlock(); 395 395 } 396 396 397 397 static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
+15 -15
net/sched/sch_generic.c
··· 648 648 649 649 static struct netdev_queue noop_netdev_queue = { 650 650 RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), 651 - .qdisc_sleeping = &noop_qdisc, 651 + RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc), 652 652 }; 653 653 654 654 struct Qdisc noop_qdisc = { ··· 1103 1103 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 1104 1104 struct Qdisc *qdisc) 1105 1105 { 1106 - struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 1106 + struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1107 1107 spinlock_t *root_lock; 1108 1108 1109 1109 root_lock = qdisc_lock(oqdisc); ··· 1112 1112 /* ... and graft new one */ 1113 1113 if (qdisc == NULL) 1114 1114 qdisc = &noop_qdisc; 1115 - dev_queue->qdisc_sleeping = qdisc; 1115 + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); 1116 1116 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 1117 1117 1118 1118 spin_unlock_bh(root_lock); ··· 1125 1125 struct netdev_queue *dev_queue, 1126 1126 void *_qdisc_default) 1127 1127 { 1128 - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1128 + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1129 1129 struct Qdisc *qdisc_default = _qdisc_default; 1130 1130 1131 1131 if (qdisc) { 1132 1132 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 1133 - dev_queue->qdisc_sleeping = qdisc_default; 1133 + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default); 1134 1134 1135 1135 qdisc_put(qdisc); 1136 1136 } ··· 1154 1154 1155 1155 if (!netif_is_multiqueue(dev)) 1156 1156 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 1157 - dev_queue->qdisc_sleeping = qdisc; 1157 + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); 1158 1158 } 1159 1159 1160 1160 static void attach_default_qdiscs(struct net_device *dev) ··· 1167 1167 if (!netif_is_multiqueue(dev) || 1168 1168 dev->priv_flags & IFF_NO_QUEUE) { 1169 1169 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 1170 - qdisc = txq->qdisc_sleeping; 1170 + qdisc = rtnl_dereference(txq->qdisc_sleeping); 1171 1171 rcu_assign_pointer(dev->qdisc, qdisc); 1172 1172 qdisc_refcount_inc(qdisc); 1173 1173 } else { ··· 1186 1186 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 1187 1187 dev->priv_flags |= IFF_NO_QUEUE; 1188 1188 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 1189 - qdisc = txq->qdisc_sleeping; 1189 + qdisc = rtnl_dereference(txq->qdisc_sleeping); 1190 1190 rcu_assign_pointer(dev->qdisc, qdisc); 1191 1191 qdisc_refcount_inc(qdisc); 1192 1192 dev->priv_flags ^= IFF_NO_QUEUE; ··· 1202 1202 struct netdev_queue *dev_queue, 1203 1203 void *_need_watchdog) 1204 1204 { 1205 - struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 1205 + struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1206 1206 int *need_watchdog_p = _need_watchdog; 1207 1207 1208 1208 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) ··· 1272 1272 struct Qdisc *qdisc; 1273 1273 bool nolock; 1274 1274 1275 - qdisc = dev_queue->qdisc_sleeping; 1275 + qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1276 1276 if (!qdisc) 1277 1277 return; 1278 1278 ··· 1303 1303 int val; 1304 1304 1305 1305 dev_queue = netdev_get_tx_queue(dev, i); 1306 - q = dev_queue->qdisc_sleeping; 1306 + q = rtnl_dereference(dev_queue->qdisc_sleeping); 1307 1307 1308 1308 root_lock = qdisc_lock(q); 1309 1309 spin_lock_bh(root_lock); ··· 1379 1379 static int qdisc_change_tx_queue_len(struct net_device *dev, 1380 1380 struct netdev_queue *dev_queue) 1381 1381 { 1382 - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 1382 + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1383 1383 const struct Qdisc_ops *ops = qdisc->ops; 1384 1384 1385 1385 if (ops->change_tx_queue_len) ··· 1404 1404 unsigned int i; 1405 1405 1406 1406 for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { 1407 - qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; 1407 + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); 1408 1408 /* Only update the default qdiscs we created, 1409 1409 * qdiscs with handles are always hashed. 1410 1410 */ ··· 1412 1412 qdisc_hash_del(qdisc); 1413 1413 } 1414 1414 for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { 1415 - qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; 1415 + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); 1416 1416 if (qdisc != &noop_qdisc && !qdisc->handle) 1417 1417 qdisc_hash_add(qdisc, false); 1418 1418 } ··· 1449 1449 struct Qdisc *qdisc = _qdisc; 1450 1450 1451 1451 rcu_assign_pointer(dev_queue->qdisc, qdisc); 1452 - dev_queue->qdisc_sleeping = qdisc; 1452 + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); 1453 1453 } 1454 1454 1455 1455 void dev_init_scheduler(struct net_device *dev)
+4 -4
net/sched/sch_mq.c
··· 141 141 * qdisc totals are added at end. 142 142 */ 143 143 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 144 - qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; 144 + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); 145 145 spin_lock_bh(qdisc_lock(qdisc)); 146 146 147 147 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, ··· 202 202 { 203 203 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 204 204 205 - return dev_queue->qdisc_sleeping; 205 + return rtnl_dereference(dev_queue->qdisc_sleeping); 206 206 } 207 207 208 208 static unsigned long mq_find(struct Qdisc *sch, u32 classid) ··· 221 221 222 222 tcm->tcm_parent = TC_H_ROOT; 223 223 tcm->tcm_handle |= TC_H_MIN(cl); 224 - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 224 + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; 225 225 return 0; 226 226 } 227 227 ··· 230 230 { 231 231 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 232 232 233 - sch = dev_queue->qdisc_sleeping; 233 + sch = rtnl_dereference(dev_queue->qdisc_sleeping); 234 234 if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 || 235 235 qdisc_qstats_copy(d, sch) < 0) 236 236 return -1;
+4 -4
net/sched/sch_mqprio.c
··· 557 557 * qdisc totals are added at end. 558 558 */ 559 559 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 560 - qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; 560 + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); 561 561 spin_lock_bh(qdisc_lock(qdisc)); 562 562 563 563 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, ··· 604 604 if (!dev_queue) 605 605 return NULL; 606 606 607 - return dev_queue->qdisc_sleeping; 607 + return rtnl_dereference(dev_queue->qdisc_sleeping); 608 608 } 609 609 610 610 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) ··· 637 637 tcm->tcm_parent = (tc < 0) ? 0 : 638 638 TC_H_MAKE(TC_H_MAJ(sch->handle), 639 639 TC_H_MIN(tc + TC_H_MIN_PRIORITY)); 640 - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 640 + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; 641 641 } else { 642 642 tcm->tcm_parent = TC_H_ROOT; 643 643 tcm->tcm_info = 0; ··· 693 693 } else { 694 694 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 695 695 696 - sch = dev_queue->qdisc_sleeping; 696 + sch = rtnl_dereference(dev_queue->qdisc_sleeping); 697 697 if (gnet_stats_copy_basic(d, sch->cpu_bstats, 698 698 &sch->bstats, true) < 0 || 699 699 qdisc_qstats_copy(d, sch) < 0)
+4 -1
net/sched/sch_pie.c
··· 421 421 { 422 422 struct pie_sched_data *q = from_timer(q, t, adapt_timer); 423 423 struct Qdisc *sch = q->sch; 424 - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 424 + spinlock_t *root_lock; 425 425 426 + rcu_read_lock(); 427 + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 426 428 spin_lock(root_lock); 427 429 pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); 428 430 ··· 432 430 if (q->params.tupdate) 433 431 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); 434 432 spin_unlock(root_lock); 433 + rcu_read_unlock(); 435 434 } 436 435 437 436 static int pie_init(struct Qdisc *sch, struct nlattr *opt,
+4 -1
net/sched/sch_red.c
··· 321 321 { 322 322 struct red_sched_data *q = from_timer(q, t, adapt_timer); 323 323 struct Qdisc *sch = q->sch; 324 - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 324 + spinlock_t *root_lock; 325 325 326 + rcu_read_lock(); 327 + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 326 328 spin_lock(root_lock); 327 329 red_adaptative_algo(&q->parms, &q->vars); 328 330 mod_timer(&q->adapt_timer, jiffies + HZ/2); 329 331 spin_unlock(root_lock); 332 + rcu_read_unlock(); 330 333 } 331 334 332 335 static int red_init(struct Qdisc *sch, struct nlattr *opt,
+4 -1
net/sched/sch_sfq.c
··· 606 606 { 607 607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer); 608 608 struct Qdisc *sch = q->sch; 609 - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 609 + spinlock_t *root_lock; 610 610 siphash_key_t nkey; 611 611 612 612 get_random_bytes(&nkey, sizeof(nkey)); 613 + rcu_read_lock(); 614 + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 613 615 spin_lock(root_lock); 614 616 q->perturbation = nkey; 615 617 if (!q->filter_list && q->tail) ··· 620 618 621 619 if (q->perturb_period) 622 620 mod_timer(&q->perturb_timer, jiffies + q->perturb_period); 621 + rcu_read_unlock(); 623 622 } 624 623 625 624 static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+3 -3
net/sched/sch_taprio.c
··· 2358 2358 if (!dev_queue) 2359 2359 return NULL; 2360 2360 2361 - return dev_queue->qdisc_sleeping; 2361 + return rtnl_dereference(dev_queue->qdisc_sleeping); 2362 2362 } 2363 2363 2364 2364 static unsigned long taprio_find(struct Qdisc *sch, u32 classid) ··· 2377 2377 2378 2378 tcm->tcm_parent = TC_H_ROOT; 2379 2379 tcm->tcm_handle |= TC_H_MIN(cl); 2380 - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; 2380 + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; 2381 2381 2382 2382 return 0; 2383 2383 } ··· 2389 2389 { 2390 2390 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 2391 2391 2392 - sch = dev_queue->qdisc_sleeping; 2392 + sch = rtnl_dereference(dev_queue->qdisc_sleeping); 2393 2393 if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || 2394 2394 qdisc_qstats_copy(d, sch) < 0) 2395 2395 return -1;
+1 -1
net/sched/sch_teql.c
··· 297 297 struct net_device *slave = qdisc_dev(q); 298 298 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); 299 299 300 - if (slave_txq->qdisc_sleeping != q) 300 + if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q) 301 301 continue; 302 302 if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) || 303 303 !netif_running(slave)) {