Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: sched: Remove Qdisc::running sequence counter

The Qdisc::running sequence counter has two uses:

1. Reliably reading qdisc's tc statistics while the qdisc is running
(a seqcount read/retry loop at gnet_stats_add_basic()).

2. As a flag, indicating whether the qdisc in question is running
(without any retry loops).

For the first usage, the Qdisc::running sequence counter write section,
qdisc_run_begin() => qdisc_run_end(), covers a much wider area than what
is actually needed: the raw qdisc's bstats update. A u64_stats sync
point was thus introduced (in previous commits) inside the bstats
structure itself. A local u64_stats write section is then started and
stopped for the bstats updates.

Use that u64_stats sync point mechanism for the bstats read/retry loop
at gnet_stats_add_basic().

For the second qdisc->running usage, a __QDISC_STATE_RUNNING bit flag,
accessed with atomic bitops, is sufficient. Using a bit flag instead of
a sequence counter at qdisc_run_begin/end() and qdisc_is_running() leads
to the SMP barriers implicitly added through raw_read_seqcount() and
write_seqcount_begin/end() getting removed. All call sites have been
surveyed though, and no required ordering was identified.

Now that the qdisc->running sequence counter is no longer used, remove
it.

Note, using u64_stats implies no sequence counter protection for 64-bit
architectures. This can lead to the qdisc tc statistics "packets" vs.
"bytes" values getting out of sync on rare occasions. The individual
values will still be valid.

Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Ahmed S. Darwish and committed by
David S. Miller
29cbcd85 50dc9a85

+102 -134
-4
include/linux/netdevice.h
··· 1916 1916 * @sfp_bus: attached &struct sfp_bus structure. 1917 1917 * 1918 1918 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 1919 - * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount 1920 1919 * 1921 1920 * @proto_down: protocol port state information can be sent to the 1922 1921 * switch driver and used to set the phys state of the ··· 2249 2250 struct phy_device *phydev; 2250 2251 struct sfp_bus *sfp_bus; 2251 2252 struct lock_class_key *qdisc_tx_busylock; 2252 - struct lock_class_key *qdisc_running_key; 2253 2253 bool proto_down; 2254 2254 unsigned wol_enabled:1; 2255 2255 unsigned threaded:1; ··· 2358 2360 #define netdev_lockdep_set_classes(dev) \ 2359 2361 { \ 2360 2362 static struct lock_class_key qdisc_tx_busylock_key; \ 2361 - static struct lock_class_key qdisc_running_key; \ 2362 2363 static struct lock_class_key qdisc_xmit_lock_key; \ 2363 2364 static struct lock_class_key dev_addr_list_lock_key; \ 2364 2365 unsigned int i; \ 2365 2366 \ 2366 2367 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ 2367 - (dev)->qdisc_running_key = &qdisc_running_key; \ 2368 2368 lockdep_set_class(&(dev)->addr_list_lock, \ 2369 2369 &dev_addr_list_lock_key); \ 2370 2370 for (i = 0; i < (dev)->num_tx_queues; i++) \
+8 -11
include/net/gen_stats.h
··· 46 46 spinlock_t *lock, struct gnet_dump *d, 47 47 int padattr); 48 48 49 - int gnet_stats_copy_basic(const seqcount_t *running, 50 - struct gnet_dump *d, 49 + int gnet_stats_copy_basic(struct gnet_dump *d, 51 50 struct gnet_stats_basic_sync __percpu *cpu, 52 - struct gnet_stats_basic_sync *b); 53 - void gnet_stats_add_basic(const seqcount_t *running, 54 - struct gnet_stats_basic_sync *bstats, 51 + struct gnet_stats_basic_sync *b, bool running); 52 + void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats, 55 53 struct gnet_stats_basic_sync __percpu *cpu, 56 - struct gnet_stats_basic_sync *b); 57 - int gnet_stats_copy_basic_hw(const seqcount_t *running, 58 - struct gnet_dump *d, 54 + struct gnet_stats_basic_sync *b, bool running); 55 + int gnet_stats_copy_basic_hw(struct gnet_dump *d, 59 56 struct gnet_stats_basic_sync __percpu *cpu, 60 - struct gnet_stats_basic_sync *b); 57 + struct gnet_stats_basic_sync *b, bool running); 61 58 int gnet_stats_copy_rate_est(struct gnet_dump *d, 62 59 struct net_rate_estimator __rcu **ptr); 63 60 int gnet_stats_copy_queue(struct gnet_dump *d, ··· 71 74 struct gnet_stats_basic_sync __percpu *cpu_bstats, 72 75 struct net_rate_estimator __rcu **rate_est, 73 76 spinlock_t *lock, 74 - seqcount_t *running, struct nlattr *opt); 77 + bool running, struct nlattr *opt); 75 78 void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); 76 79 int gen_replace_estimator(struct gnet_stats_basic_sync *bstats, 77 80 struct gnet_stats_basic_sync __percpu *cpu_bstats, 78 81 struct net_rate_estimator __rcu **ptr, 79 82 spinlock_t *lock, 80 - seqcount_t *running, struct nlattr *opt); 83 + bool running, struct nlattr *opt); 81 84 bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); 82 85 bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, 83 86 struct gnet_stats_rate_est64 *sample);
+14 -19
include/net/sch_generic.h
··· 38 38 __QDISC_STATE_DEACTIVATED, 39 39 __QDISC_STATE_MISSED, 40 40 __QDISC_STATE_DRAINING, 41 + /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly. 42 + * Use qdisc_run_begin/end() or qdisc_is_running() instead. 43 + */ 44 + __QDISC_STATE_RUNNING, 41 45 }; 42 46 43 47 #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) ··· 112 108 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 113 109 struct qdisc_skb_head q; 114 110 struct gnet_stats_basic_sync bstats; 115 - seqcount_t running; 116 111 struct gnet_stats_queue qstats; 117 112 unsigned long state; 118 113 struct Qdisc *next_sched; ··· 146 143 return NULL; 147 144 } 148 145 146 + /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc 147 + * root_lock section, or provide their own memory barriers -- ordering 148 + * against qdisc_run_begin/end() atomic bit operations. 149 + */ 149 150 static inline bool qdisc_is_running(struct Qdisc *qdisc) 150 151 { 151 152 if (qdisc->flags & TCQ_F_NOLOCK) 152 153 return spin_is_locked(&qdisc->seqlock); 153 - return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 154 + return test_bit(__QDISC_STATE_RUNNING, &qdisc->state); 154 155 } 155 156 156 157 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) ··· 174 167 return !READ_ONCE(qdisc->q.qlen); 175 168 } 176 169 170 + /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with 171 + * the qdisc root lock acquired. 172 + */ 177 173 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 178 174 { 179 175 if (qdisc->flags & TCQ_F_NOLOCK) { ··· 216 206 * after it releases the lock at the end of qdisc_run_end(). 217 207 */ 218 208 return spin_trylock(&qdisc->seqlock); 219 - } else if (qdisc_is_running(qdisc)) { 220 - return false; 221 209 } 222 - /* Variant of write_seqcount_begin() telling lockdep a trylock 223 - * was attempted. 224 - */ 225 - raw_write_seqcount_begin(&qdisc->running); 226 - seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 227 - return true; 210 + return test_and_set_bit(__QDISC_STATE_RUNNING, &qdisc->state); 228 211 } 229 212 230 213 static inline void qdisc_run_end(struct Qdisc *qdisc) ··· 229 226 &qdisc->state))) 230 227 __netif_schedule(qdisc); 231 228 } else { 232 - write_seqcount_end(&qdisc->running); 229 + clear_bit(__QDISC_STATE_RUNNING, &qdisc->state); 233 230 } 234 231 } 235 232 ··· 593 590 594 591 ASSERT_RTNL(); 595 592 return qdisc_lock(root); 596 - } 597 - 598 - static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 599 - { 600 - struct Qdisc *root = qdisc_root_sleeping(qdisc); 601 - 602 - ASSERT_RTNL(); 603 - return &root->running; 604 593 } 605 594 606 595 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
+10 -6
net/core/gen_estimator.c
··· 42 42 struct net_rate_estimator { 43 43 struct gnet_stats_basic_sync *bstats; 44 44 spinlock_t *stats_lock; 45 - seqcount_t *running; 45 + bool running; 46 46 struct gnet_stats_basic_sync __percpu *cpu_bstats; 47 47 u8 ewma_log; 48 48 u8 intvl_log; /* period : (250ms << intvl_log) */ ··· 66 66 if (e->stats_lock) 67 67 spin_lock(e->stats_lock); 68 68 69 - gnet_stats_add_basic(e->running, b, e->cpu_bstats, e->bstats); 69 + gnet_stats_add_basic(b, e->cpu_bstats, e->bstats, e->running); 70 70 71 71 if (e->stats_lock) 72 72 spin_unlock(e->stats_lock); ··· 113 113 * @cpu_bstats: bstats per cpu 114 114 * @rate_est: rate estimator statistics 115 115 * @lock: lock for statistics and control path 116 - * @running: qdisc running seqcount 116 + * @running: true if @bstats represents a running qdisc, thus @bstats' 117 + * internal values might change during basic reads. Only used 118 + * if @bstats_cpu is NULL 117 119 * @opt: rate estimator configuration TLV 118 120 * 119 121 * Creates a new rate estimator with &bstats as source and &rate_est ··· 131 129 struct gnet_stats_basic_sync __percpu *cpu_bstats, 132 130 struct net_rate_estimator __rcu **rate_est, 133 131 spinlock_t *lock, 134 - seqcount_t *running, 132 + bool running, 135 133 struct nlattr *opt) 136 134 { 137 135 struct gnet_estimator *parm = nla_data(opt); ··· 220 218 * @cpu_bstats: bstats per cpu 221 219 * @rate_est: rate estimator statistics 222 220 * @lock: lock for statistics and control path 223 - * @running: qdisc running seqcount (might be NULL) 221 + * @running: true if @bstats represents a running qdisc, thus @bstats' 222 + * internal values might change during basic reads. Only used 223 + * if @cpu_bstats is NULL 224 224 * @opt: rate estimator configuration TLV 225 225 * 226 226 * Replaces the configuration of a rate estimator by calling ··· 234 230 struct gnet_stats_basic_sync __percpu *cpu_bstats, 235 231 struct net_rate_estimator __rcu **rate_est, 236 232 spinlock_t *lock, 237 - seqcount_t *running, struct nlattr *opt) 233 + bool running, struct nlattr *opt) 238 234 { 239 235 return gen_new_estimator(bstats, cpu_bstats, rate_est, 240 236 lock, running, opt);
+28 -22
net/core/gen_stats.c
··· 146 146 _bstats_update(bstats, t_bytes, t_packets); 147 147 } 148 148 149 - void gnet_stats_add_basic(const seqcount_t *running, 150 - struct gnet_stats_basic_sync *bstats, 149 + void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats, 151 150 struct gnet_stats_basic_sync __percpu *cpu, 152 - struct gnet_stats_basic_sync *b) 151 + struct gnet_stats_basic_sync *b, bool running) 153 152 { 154 - unsigned int seq; 153 + unsigned int start; 155 154 u64 bytes = 0; 156 155 u64 packets = 0; 156 + 157 + WARN_ON_ONCE((cpu || running) && !in_task()); 157 158 158 159 if (cpu) { 159 160 gnet_stats_add_basic_cpu(bstats, cpu); ··· 162 161 } 163 162 do { 164 163 if (running) 165 - seq = read_seqcount_begin(running); 164 + start = u64_stats_fetch_begin_irq(&b->syncp); 166 165 bytes = u64_stats_read(&b->bytes); 167 166 packets = u64_stats_read(&b->packets); 168 - } while (running && read_seqcount_retry(running, seq)); 167 + } while (running && u64_stats_fetch_retry_irq(&b->syncp, start)); 169 168 170 169 _bstats_update(bstats, bytes, packets); 171 170 } 172 171 EXPORT_SYMBOL(gnet_stats_add_basic); 173 172 174 173 static int 175 - ___gnet_stats_copy_basic(const seqcount_t *running, 176 - struct gnet_dump *d, 174 + ___gnet_stats_copy_basic(struct gnet_dump *d, 177 175 struct gnet_stats_basic_sync __percpu *cpu, 178 176 struct gnet_stats_basic_sync *b, 179 - int type) 177 + int type, bool running) 180 178 { 181 179 struct gnet_stats_basic_sync bstats; 182 180 u64 bstats_bytes, bstats_packets; 183 181 184 182 gnet_stats_basic_sync_init(&bstats); 185 - gnet_stats_add_basic(running, &bstats, cpu, b); 183 + gnet_stats_add_basic(&bstats, cpu, b, running); 186 184 187 185 bstats_bytes = u64_stats_read(&bstats.bytes); 188 186 bstats_packets = u64_stats_read(&bstats.packets); ··· 210 210 211 211 /** 212 212 * gnet_stats_copy_basic - copy basic statistics into statistic TLV 213 - * @running: seqcount_t pointer 214 213 * @d: dumping handle 215 214 * @cpu: copy statistic per cpu 216 215 * @b: basic statistics 216 + * @running: true if @b represents a running qdisc, thus @b's 217 + * internal values might change during basic reads. 218 + * Only used if @cpu is NULL 219 + * 220 + * Context: task; must not be run from IRQ or BH contexts 217 221 * 218 222 * Appends the basic statistics to the top level TLV created by 219 223 * gnet_stats_start_copy(). ··· 226 222 * if the room in the socket buffer was not sufficient. 227 223 */ 228 224 int 229 - gnet_stats_copy_basic(const seqcount_t *running, 230 - struct gnet_dump *d, 225 + gnet_stats_copy_basic(struct gnet_dump *d, 231 226 struct gnet_stats_basic_sync __percpu *cpu, 232 - struct gnet_stats_basic_sync *b) 227 + struct gnet_stats_basic_sync *b, 228 + bool running) 233 229 { 234 - return ___gnet_stats_copy_basic(running, d, cpu, b, 235 - TCA_STATS_BASIC); 230 + return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC, running); 236 231 } 237 232 EXPORT_SYMBOL(gnet_stats_copy_basic); 238 233 239 234 /** 240 235 * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV 241 - * @running: seqcount_t pointer 242 236 * @d: dumping handle 243 237 * @cpu: copy statistic per cpu 244 238 * @b: basic statistics 239 + * @running: true if @b represents a running qdisc, thus @b's 240 + * internal values might change during basic reads. 241 + * Only used if @cpu is NULL 242 + * 243 + * Context: task; must not be run from IRQ or BH contexts 245 244 * 246 245 * Appends the basic statistics to the top level TLV created by 247 246 * gnet_stats_start_copy(). ··· 253 246 * if the room in the socket buffer was not sufficient. 254 247 */ 255 248 int 256 - gnet_stats_copy_basic_hw(const seqcount_t *running, 257 - struct gnet_dump *d, 249 + gnet_stats_copy_basic_hw(struct gnet_dump *d, 258 250 struct gnet_stats_basic_sync __percpu *cpu, 259 - struct gnet_stats_basic_sync *b) 251 + struct gnet_stats_basic_sync *b, 252 + bool running) 260 253 { 261 - return ___gnet_stats_copy_basic(running, d, cpu, b, 262 - TCA_STATS_BASIC_HW); 254 + return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC_HW, running); 263 255 } 264 256 EXPORT_SYMBOL(gnet_stats_copy_basic_hw); 265 257
+5 -4
net/sched/act_api.c
··· 501 501 if (est) { 502 502 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 503 503 &p->tcfa_rate_est, 504 - &p->tcfa_lock, NULL, est); 504 + &p->tcfa_lock, false, est); 505 505 if (err) 506 506 goto err4; 507 507 } ··· 1173 1173 if (err < 0) 1174 1174 goto errout; 1175 1175 1176 - if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || 1177 - gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw, 1178 - &p->tcfa_bstats_hw) < 0 || 1176 + if (gnet_stats_copy_basic(&d, p->cpu_bstats, 1177 + &p->tcfa_bstats, false) < 0 || 1178 + gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw, 1179 + &p->tcfa_bstats_hw, false) < 0 || 1179 1180 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 1180 1181 gnet_stats_copy_queue(&d, p->cpu_qstats, 1181 1182 &p->tcfa_qstats,
+1 -1
net/sched/act_police.c
··· 125 125 police->common.cpu_bstats, 126 126 &police->tcf_rate_est, 127 127 &police->tcf_lock, 128 - NULL, est); 128 + false, est); 129 129 if (err) 130 130 goto failure; 131 131 } else if (tb[TCA_POLICE_AVRATE] &&
+3 -13
net/sched/sch_api.c
··· 943 943 cpu_qstats = q->cpu_qstats; 944 944 } 945 945 946 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q), 947 - &d, cpu_bstats, &q->bstats) < 0 || 946 + if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 || 948 947 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || 949 948 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) 950 949 goto nla_put_failure; ··· 1264 1265 rcu_assign_pointer(sch->stab, stab); 1265 1266 } 1266 1267 if (tca[TCA_RATE]) { 1267 - seqcount_t *running; 1268 - 1269 1268 err = -EOPNOTSUPP; 1270 1269 if (sch->flags & TCQ_F_MQROOT) { 1271 1270 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc"); 1272 1271 goto err_out4; 1273 1272 } 1274 1273 1275 - if (sch->parent != TC_H_ROOT && 1276 - !(sch->flags & TCQ_F_INGRESS) && 1277 - (!p || !(p->flags & TCQ_F_MQROOT))) 1278 - running = qdisc_root_sleeping_running(sch); 1279 - else 1280 - running = &sch->running; 1281 - 1282 1274 err = gen_new_estimator(&sch->bstats, 1283 1275 sch->cpu_bstats, 1284 1276 &sch->rate_est, 1285 1277 NULL, 1286 - running, 1278 + true, 1287 1279 tca[TCA_RATE]); 1288 1280 if (err) { 1289 1281 NL_SET_ERR_MSG(extack, "Failed to generate new estimator"); ··· 1350 1360 sch->cpu_bstats, 1351 1361 &sch->rate_est, 1352 1362 NULL, 1353 - qdisc_root_sleeping_running(sch), 1363 + true, 1354 1364 tca[TCA_RATE]); 1355 1365 } 1356 1366 out:
+1 -2
net/sched/sch_atm.c
··· 653 653 { 654 654 struct atm_flow_data *flow = (struct atm_flow_data *)arg; 655 655 656 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 657 - d, NULL, &flow->bstats) < 0 || 656 + if (gnet_stats_copy_basic(d, NULL, &flow->bstats, true) < 0 || 658 657 gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) 659 658 return -1; 660 659
+3 -6
net/sched/sch_cbq.c
··· 1383 1383 if (cl->undertime != PSCHED_PASTPERFECT) 1384 1384 cl->xstats.undertime = cl->undertime - q->now; 1385 1385 1386 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 1387 - d, NULL, &cl->bstats) < 0 || 1386 + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || 1388 1387 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1389 1388 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) 1390 1389 return -1; ··· 1517 1518 err = gen_replace_estimator(&cl->bstats, NULL, 1518 1519 &cl->rate_est, 1519 1520 NULL, 1520 - qdisc_root_sleeping_running(sch), 1521 + true, 1521 1522 tca[TCA_RATE]); 1522 1523 if (err) { 1523 1524 NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator"); ··· 1618 1619 1619 1620 if (tca[TCA_RATE]) { 1620 1621 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, 1621 - NULL, 1622 - qdisc_root_sleeping_running(sch), 1623 - tca[TCA_RATE]); 1622 + NULL, true, tca[TCA_RATE]); 1624 1623 if (err) { 1625 1624 NL_SET_ERR_MSG(extack, "Couldn't create new estimator"); 1626 1625 tcf_block_put(cl->block);
+3 -7
net/sched/sch_drr.c
··· 85 85 if (tca[TCA_RATE]) { 86 86 err = gen_replace_estimator(&cl->bstats, NULL, 87 87 &cl->rate_est, 88 - NULL, 89 - qdisc_root_sleeping_running(sch), 88 + NULL, true, 90 89 tca[TCA_RATE]); 91 90 if (err) { 92 91 NL_SET_ERR_MSG(extack, "Failed to replace estimator"); ··· 118 119 119 120 if (tca[TCA_RATE]) { 120 121 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, 121 - NULL, 122 - qdisc_root_sleeping_running(sch), 123 - tca[TCA_RATE]); 122 + NULL, true, tca[TCA_RATE]); 124 123 if (err) { 125 124 NL_SET_ERR_MSG(extack, "Failed to replace estimator"); 126 125 qdisc_put(cl->qdisc); ··· 265 268 if (qlen) 266 269 xstats.deficit = cl->deficit; 267 270 268 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 269 - d, NULL, &cl->bstats) < 0 || 271 + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || 270 272 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 271 273 gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0) 272 274 return -1;
+1 -2
net/sched/sch_ets.c
··· 325 325 struct ets_class *cl = ets_class_from_arg(sch, arg); 326 326 struct Qdisc *cl_q = cl->qdisc; 327 327 328 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 329 - d, NULL, &cl_q->bstats) < 0 || 328 + if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats, true) < 0 || 330 329 qdisc_qstats_copy(d, cl_q) < 0) 331 330 return -1; 332 331
+2 -8
net/sched/sch_generic.c
··· 304 304 305 305 /* 306 306 * Transmit possibly several skbs, and handle the return status as 307 - * required. Owning running seqcount bit guarantees that 308 - * only one CPU can execute this function. 307 + * required. Owning qdisc running bit guarantees that only one CPU 308 + * can execute this function. 309 309 * 310 310 * Returns to the caller: 311 311 * false - hardware queue frozen backoff ··· 606 606 .ops = &noop_qdisc_ops, 607 607 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 608 608 .dev_queue = &noop_netdev_queue, 609 - .running = SEQCNT_ZERO(noop_qdisc.running), 610 609 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 611 610 .gso_skb = { 612 611 .next = (struct sk_buff *)&noop_qdisc.gso_skb, ··· 866 867 EXPORT_SYMBOL(pfifo_fast_ops); 867 868 868 869 static struct lock_class_key qdisc_tx_busylock; 869 - static struct lock_class_key qdisc_running_key; 870 870 871 871 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 872 872 const struct Qdisc_ops *ops, ··· 914 916 spin_lock_init(&sch->seqlock); 915 917 lockdep_set_class(&sch->seqlock, 916 918 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 917 - 918 - seqcount_init(&sch->running); 919 - lockdep_set_class(&sch->running, 920 - dev->qdisc_running_key ?: &qdisc_running_key); 921 919 922 920 sch->ops = ops; 923 921 sch->flags = ops->static_flags;
+3 -5
net/sched/sch_hfsc.c
··· 965 965 err = gen_replace_estimator(&cl->bstats, NULL, 966 966 &cl->rate_est, 967 967 NULL, 968 - qdisc_root_sleeping_running(sch), 968 + true, 969 969 tca[TCA_RATE]); 970 970 if (err) 971 971 return err; ··· 1033 1033 1034 1034 if (tca[TCA_RATE]) { 1035 1035 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, 1036 - NULL, 1037 - qdisc_root_sleeping_running(sch), 1038 - tca[TCA_RATE]); 1036 + NULL, true, tca[TCA_RATE]); 1039 1037 if (err) { 1040 1038 tcf_block_put(cl->block); 1041 1039 kfree(cl); ··· 1326 1328 xstats.work = cl->cl_total; 1327 1329 xstats.rtwork = cl->cl_cumul; 1328 1330 1329 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || 1331 + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || 1330 1332 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1331 1333 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) 1332 1334 return -1;
+3 -4
net/sched/sch_htb.c
··· 1368 1368 } 1369 1369 } 1370 1370 1371 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 1372 - d, NULL, &cl->bstats) < 0 || 1371 + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || 1373 1372 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1374 1373 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) 1375 1374 return -1; ··· 1864 1865 err = gen_new_estimator(&cl->bstats, NULL, 1865 1866 &cl->rate_est, 1866 1867 NULL, 1867 - qdisc_root_sleeping_running(sch), 1868 + true, 1868 1869 tca[TCA_RATE] ? : &est.nla); 1869 1870 if (err) 1870 1871 goto err_block_put; ··· 1990 1991 err = gen_replace_estimator(&cl->bstats, NULL, 1991 1992 &cl->rate_est, 1992 1993 NULL, 1993 - qdisc_root_sleeping_running(sch), 1994 + true, 1994 1995 tca[TCA_RATE]); 1995 1996 if (err) 1996 1997 return err;
+3 -4
net/sched/sch_mq.c
··· 144 144 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; 145 145 spin_lock_bh(qdisc_lock(qdisc)); 146 146 147 - gnet_stats_add_basic(NULL, &sch->bstats, qdisc->cpu_bstats, 148 - &qdisc->bstats); 147 + gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, 148 + &qdisc->bstats, false); 149 149 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats, 150 150 &qdisc->qstats); 151 151 sch->q.qlen += qdisc_qlen(qdisc); ··· 231 231 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 232 232 233 233 sch = dev_queue->qdisc_sleeping; 234 - if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats, 235 - &sch->bstats) < 0 || 234 + if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 || 236 235 qdisc_qstats_copy(d, sch) < 0) 237 236 return -1; 238 237 return 0;
+7 -7
net/sched/sch_mqprio.c
··· 402 402 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; 403 403 spin_lock_bh(qdisc_lock(qdisc)); 404 404 405 - gnet_stats_add_basic(NULL, &sch->bstats, qdisc->cpu_bstats, 406 - &qdisc->bstats); 405 + gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, 406 + &qdisc->bstats, false); 407 407 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats, 408 408 &qdisc->qstats); 409 409 sch->q.qlen += qdisc_qlen(qdisc); ··· 519 519 520 520 spin_lock_bh(qdisc_lock(qdisc)); 521 521 522 - gnet_stats_add_basic(NULL, &bstats, qdisc->cpu_bstats, 523 - &qdisc->bstats); 522 + gnet_stats_add_basic(&bstats, qdisc->cpu_bstats, 523 + &qdisc->bstats, false); 524 524 gnet_stats_add_queue(&qstats, qdisc->cpu_qstats, 525 525 &qdisc->qstats); 526 526 sch->q.qlen += qdisc_qlen(qdisc); ··· 532 532 /* Reclaim root sleeping lock before completing stats */ 533 533 if (d->lock) 534 534 spin_lock_bh(d->lock); 535 - if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 || 535 + if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 || 536 536 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) 537 537 return -1; 538 538 } else { 539 539 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 540 540 541 541 sch = dev_queue->qdisc_sleeping; 542 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, 543 - sch->cpu_bstats, &sch->bstats) < 0 || 542 + if (gnet_stats_copy_basic(d, sch->cpu_bstats, 543 + &sch->bstats, true) < 0 || 544 544 qdisc_qstats_copy(d, sch) < 0) 545 545 return -1; 546 546 }
+1 -2
net/sched/sch_multiq.c
··· 338 338 struct Qdisc *cl_q; 339 339 340 340 cl_q = q->queues[cl - 1]; 341 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 342 - d, cl_q->cpu_bstats, &cl_q->bstats) < 0 || 341 + if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, &cl_q->bstats, true) < 0 || 343 342 qdisc_qstats_copy(d, cl_q) < 0) 344 343 return -1; 345 344
+2 -2
net/sched/sch_prio.c
··· 361 361 struct Qdisc *cl_q; 362 362 363 363 cl_q = q->queues[cl - 1]; 364 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 365 - d, cl_q->cpu_bstats, &cl_q->bstats) < 0 || 364 + if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, 365 + &cl_q->bstats, true) < 0 || 366 366 qdisc_qstats_copy(d, cl_q) < 0) 367 367 return -1; 368 368
+3 -4
net/sched/sch_qfq.c
··· 451 451 err = gen_replace_estimator(&cl->bstats, NULL, 452 452 &cl->rate_est, 453 453 NULL, 454 - qdisc_root_sleeping_running(sch), 454 + true, 455 455 tca[TCA_RATE]); 456 456 if (err) 457 457 return err; ··· 478 478 err = gen_new_estimator(&cl->bstats, NULL, 479 479 &cl->rate_est, 480 480 NULL, 481 - qdisc_root_sleeping_running(sch), 481 + true, 482 482 tca[TCA_RATE]); 483 483 if (err) 484 484 goto destroy_class; ··· 640 640 xstats.weight = cl->agg->class_weight; 641 641 xstats.lmax = cl->agg->lmax; 642 642 643 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 644 - d, NULL, &cl->bstats) < 0 || 643 + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || 645 644 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 646 645 qdisc_qstats_copy(d, cl->qdisc) < 0) 647 646 return -1;
+1 -1
net/sched/sch_taprio.c
··· 1977 1977 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); 1978 1978 1979 1979 sch = dev_queue->qdisc_sleeping; 1980 - if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || 1980 + if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || 1981 1981 qdisc_qstats_copy(d, sch) < 0) 1982 1982 return -1; 1983 1983 return 0;