Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Fair Queue CoDel discipline
4 *
5 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
6 */
7
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/jiffies.h>
12#include <linux/string.h>
13#include <linux/in.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/skbuff.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/codel.h>
23#include <net/codel_impl.h>
24#include <net/codel_qdisc.h>
25
26/* Fair Queue CoDel.
27 *
28 * Principles :
29 * Packets are classified (internal classifier or external) on flows.
30 * This is a Stochastic model (as we use a hash, several flows
31 * might be hashed on same slot)
32 * Each flow has a CoDel managed queue.
33 * Flows are linked onto two (Round Robin) lists,
34 * so that new flows have priority on old ones.
35 *
36 * For a given flow, packets are not reordered (CoDel uses a FIFO)
37 * head drops only.
38 * ECN capability is on by default.
39 * Low memory footprint (64 bytes per flow)
40 */
41
42struct fq_codel_flow {
43 struct sk_buff *head;
44 struct sk_buff *tail;
45 struct list_head flowchain;
46 int deficit;
47 struct codel_vars cvars;
48}; /* please try to keep this structure <= 64 bytes */
49
50struct fq_codel_sched_data {
51 struct tcf_proto __rcu *filter_list; /* optional external classifier */
52 struct tcf_block *block;
53 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
54 u32 *backlogs; /* backlog table [flows_cnt] */
55 u32 flows_cnt; /* number of flows */
56 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
57 u32 drop_batch_size;
58 u32 memory_limit;
59 struct codel_params cparams;
60 struct codel_stats cstats;
61 u32 memory_usage;
62 u32 drop_overmemory;
63 u32 drop_overlimit;
64 u32 new_flow_count;
65
66 struct list_head new_flows; /* list of new flows */
67 struct list_head old_flows; /* list of old flows */
68};
69
70static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71 struct sk_buff *skb)
72{
73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
74}
75
76static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
77 int *qerr)
78{
79 struct fq_codel_sched_data *q = qdisc_priv(sch);
80 struct tcf_proto *filter;
81 struct tcf_result res;
82 int result;
83
84 if (TC_H_MAJ(skb->priority) == sch->handle &&
85 TC_H_MIN(skb->priority) > 0 &&
86 TC_H_MIN(skb->priority) <= q->flows_cnt)
87 return TC_H_MIN(skb->priority);
88
89 filter = rcu_dereference_bh(q->filter_list);
90 if (!filter)
91 return fq_codel_hash(q, skb) + 1;
92
93 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
94 result = tcf_classify(skb, NULL, filter, &res, false);
95 if (result >= 0) {
96#ifdef CONFIG_NET_CLS_ACT
97 switch (result) {
98 case TC_ACT_STOLEN:
99 case TC_ACT_QUEUED:
100 case TC_ACT_TRAP:
101 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
102 fallthrough;
103 case TC_ACT_SHOT:
104 return 0;
105 }
106#endif
107 if (TC_H_MIN(res.classid) <= q->flows_cnt)
108 return TC_H_MIN(res.classid);
109 }
110 return 0;
111}
112
113/* helper functions : might be changed when/if skb use a standard list_head */
114
115/* remove one skb from head of slot queue */
116static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
117{
118 struct sk_buff *skb = flow->head;
119
120 flow->head = skb->next;
121 skb_mark_not_on_list(skb);
122 return skb;
123}
124
125/* add skb to flow queue (tail add) */
126static inline void flow_queue_add(struct fq_codel_flow *flow,
127 struct sk_buff *skb)
128{
129 if (flow->head == NULL)
130 flow->head = skb;
131 else
132 flow->tail->next = skb;
133 flow->tail = skb;
134 skb->next = NULL;
135}
136
137static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
138 struct sk_buff **to_free)
139{
140 struct fq_codel_sched_data *q = qdisc_priv(sch);
141 struct sk_buff *skb;
142 unsigned int maxbacklog = 0, idx = 0, i, len;
143 struct fq_codel_flow *flow;
144 unsigned int threshold;
145 unsigned int mem = 0;
146
147 /* Queue is full! Find the fat flow and drop packet(s) from it.
148 * This might sound expensive, but with 1024 flows, we scan
149 * 4KB of memory, and we dont need to handle a complex tree
150 * in fast path (packet queue/enqueue) with many cache misses.
151 * In stress mode, we'll try to drop 64 packets from the flow,
152 * amortizing this linear lookup to one cache line per drop.
153 */
154 for (i = 0; i < q->flows_cnt; i++) {
155 if (q->backlogs[i] > maxbacklog) {
156 maxbacklog = q->backlogs[i];
157 idx = i;
158 }
159 }
160
161 /* Our goal is to drop half of this fat flow backlog */
162 threshold = maxbacklog >> 1;
163
164 flow = &q->flows[idx];
165 len = 0;
166 i = 0;
167 do {
168 skb = dequeue_head(flow);
169 len += qdisc_pkt_len(skb);
170 mem += get_codel_cb(skb)->mem_usage;
171 tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_OVERLIMIT);
172 __qdisc_drop(skb, to_free);
173 } while (++i < max_packets && len < threshold);
174
175 /* Tell codel to increase its signal strength also */
176 flow->cvars.count += i;
177 q->backlogs[idx] -= len;
178 q->memory_usage -= mem;
179 sch->qstats.drops += i;
180 sch->qstats.backlog -= len;
181 sch->q.qlen -= i;
182 return idx;
183}
184
185static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
186 struct sk_buff **to_free)
187{
188 struct fq_codel_sched_data *q = qdisc_priv(sch);
189 unsigned int idx, prev_backlog, prev_qlen;
190 struct fq_codel_flow *flow;
191 int ret;
192 unsigned int pkt_len;
193 bool memory_limited;
194
195 idx = fq_codel_classify(skb, sch, &ret);
196 if (idx == 0) {
197 if (ret & __NET_XMIT_BYPASS)
198 qdisc_qstats_drop(sch);
199 __qdisc_drop(skb, to_free);
200 return ret;
201 }
202 idx--;
203
204 codel_set_enqueue_time(skb);
205 flow = &q->flows[idx];
206 flow_queue_add(flow, skb);
207 q->backlogs[idx] += qdisc_pkt_len(skb);
208 qdisc_qstats_backlog_inc(sch, skb);
209
210 if (list_empty(&flow->flowchain)) {
211 list_add_tail(&flow->flowchain, &q->new_flows);
212 q->new_flow_count++;
213 flow->deficit = q->quantum;
214 }
215 get_codel_cb(skb)->mem_usage = skb->truesize;
216 q->memory_usage += get_codel_cb(skb)->mem_usage;
217 memory_limited = q->memory_usage > q->memory_limit;
218 if (++sch->q.qlen <= sch->limit && !memory_limited)
219 return NET_XMIT_SUCCESS;
220
221 prev_backlog = sch->qstats.backlog;
222 prev_qlen = sch->q.qlen;
223
224 /* save this packet length as it might be dropped by fq_codel_drop() */
225 pkt_len = qdisc_pkt_len(skb);
226 /* fq_codel_drop() is quite expensive, as it performs a linear search
227 * in q->backlogs[] to find a fat flow.
228 * So instead of dropping a single packet, drop half of its backlog
229 * with a 64 packets limit to not add a too big cpu spike here.
230 */
231 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
232
233 prev_qlen -= sch->q.qlen;
234 prev_backlog -= sch->qstats.backlog;
235 q->drop_overlimit += prev_qlen;
236 if (memory_limited)
237 q->drop_overmemory += prev_qlen;
238
239 /* As we dropped packet(s), better let upper stack know this.
240 * If we dropped a packet for this flow, return NET_XMIT_CN,
241 * but in this case, our parents wont increase their backlogs.
242 */
243 if (ret == idx) {
244 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
245 prev_backlog - pkt_len);
246 return NET_XMIT_CN;
247 }
248 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
249 return NET_XMIT_SUCCESS;
250}
251
252/* This is the specific function called from codel_dequeue()
253 * to dequeue a packet from queue. Note: backlog is handled in
254 * codel, we dont need to reduce it here.
255 */
256static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
257{
258 struct Qdisc *sch = ctx;
259 struct fq_codel_sched_data *q = qdisc_priv(sch);
260 struct fq_codel_flow *flow;
261 struct sk_buff *skb = NULL;
262
263 flow = container_of(vars, struct fq_codel_flow, cvars);
264 if (flow->head) {
265 skb = dequeue_head(flow);
266 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
267 q->memory_usage -= get_codel_cb(skb)->mem_usage;
268 sch->q.qlen--;
269 sch->qstats.backlog -= qdisc_pkt_len(skb);
270 }
271 return skb;
272}
273
274static void drop_func(struct sk_buff *skb, void *ctx)
275{
276 struct Qdisc *sch = ctx;
277
278 qdisc_dequeue_drop(sch, skb, SKB_DROP_REASON_QDISC_CONGESTED);
279 qdisc_qstats_drop(sch);
280}
281
282static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
283{
284 struct fq_codel_sched_data *q = qdisc_priv(sch);
285 struct sk_buff *skb;
286 struct fq_codel_flow *flow;
287 struct list_head *head;
288
289begin:
290 head = &q->new_flows;
291 if (list_empty(head)) {
292 head = &q->old_flows;
293 if (list_empty(head))
294 return NULL;
295 }
296 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
297
298 if (flow->deficit <= 0) {
299 flow->deficit += q->quantum;
300 list_move_tail(&flow->flowchain, &q->old_flows);
301 goto begin;
302 }
303
304 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
305 &flow->cvars, &q->cstats, qdisc_pkt_len,
306 codel_get_enqueue_time, drop_func, dequeue_func);
307
308 if (!skb) {
309 /* force a pass through old_flows to prevent starvation */
310 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
311 list_move_tail(&flow->flowchain, &q->old_flows);
312 else
313 list_del_init(&flow->flowchain);
314 goto begin;
315 }
316 qdisc_bstats_update(sch, skb);
317 flow->deficit -= qdisc_pkt_len(skb);
318
319 if (q->cstats.drop_count) {
320 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
321 q->cstats.drop_len);
322 q->cstats.drop_count = 0;
323 q->cstats.drop_len = 0;
324 }
325 return skb;
326}
327
328static void fq_codel_flow_purge(struct fq_codel_flow *flow)
329{
330 rtnl_kfree_skbs(flow->head, flow->tail);
331 flow->head = NULL;
332}
333
334static void fq_codel_reset(struct Qdisc *sch)
335{
336 struct fq_codel_sched_data *q = qdisc_priv(sch);
337 int i;
338
339 INIT_LIST_HEAD(&q->new_flows);
340 INIT_LIST_HEAD(&q->old_flows);
341 for (i = 0; i < q->flows_cnt; i++) {
342 struct fq_codel_flow *flow = q->flows + i;
343
344 fq_codel_flow_purge(flow);
345 INIT_LIST_HEAD(&flow->flowchain);
346 codel_vars_init(&flow->cvars);
347 }
348 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
349 q->memory_usage = 0;
350}
351
352static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
353 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
354 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
355 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
356 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
357 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
358 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
359 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
360 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
361 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
362 [TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 },
363 [TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 },
364};
365
366static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
367 struct netlink_ext_ack *extack)
368{
369 unsigned int dropped_pkts = 0, dropped_bytes = 0;
370 struct fq_codel_sched_data *q = qdisc_priv(sch);
371 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
372 u32 quantum = 0;
373 int err;
374
375 err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
376 fq_codel_policy, NULL);
377 if (err < 0)
378 return err;
379 if (tb[TCA_FQ_CODEL_FLOWS]) {
380 if (q->flows)
381 return -EINVAL;
382 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
383 if (!q->flows_cnt ||
384 q->flows_cnt > 65536)
385 return -EINVAL;
386 }
387 if (tb[TCA_FQ_CODEL_QUANTUM]) {
388 quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
389 if (quantum > FQ_CODEL_QUANTUM_MAX) {
390 NL_SET_ERR_MSG(extack, "Invalid quantum");
391 return -EINVAL;
392 }
393 }
394 sch_tree_lock(sch);
395
396 if (tb[TCA_FQ_CODEL_TARGET]) {
397 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
398
399 WRITE_ONCE(q->cparams.target,
400 (target * NSEC_PER_USEC) >> CODEL_SHIFT);
401 }
402
403 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
404 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
405
406 WRITE_ONCE(q->cparams.ce_threshold,
407 (val * NSEC_PER_USEC) >> CODEL_SHIFT);
408 }
409
410 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
411 WRITE_ONCE(q->cparams.ce_threshold_selector,
412 nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]));
413 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
414 WRITE_ONCE(q->cparams.ce_threshold_mask,
415 nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]));
416
417 if (tb[TCA_FQ_CODEL_INTERVAL]) {
418 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
419
420 WRITE_ONCE(q->cparams.interval,
421 (interval * NSEC_PER_USEC) >> CODEL_SHIFT);
422 }
423
424 if (tb[TCA_FQ_CODEL_LIMIT])
425 WRITE_ONCE(sch->limit,
426 nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]));
427
428 if (tb[TCA_FQ_CODEL_ECN])
429 WRITE_ONCE(q->cparams.ecn,
430 !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]));
431
432 if (quantum)
433 WRITE_ONCE(q->quantum, quantum);
434
435 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
436 WRITE_ONCE(q->drop_batch_size,
437 max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])));
438
439 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
440 WRITE_ONCE(q->memory_limit,
441 min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])));
442
443 while (sch->q.qlen > sch->limit ||
444 q->memory_usage > q->memory_limit) {
445 struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
446
447 if (!skb)
448 break;
449
450 dropped_pkts++;
451 dropped_bytes += qdisc_pkt_len(skb);
452 rtnl_kfree_skbs(skb, skb);
453 }
454 qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
455
456 sch_tree_unlock(sch);
457 return 0;
458}
459
460static void fq_codel_destroy(struct Qdisc *sch)
461{
462 struct fq_codel_sched_data *q = qdisc_priv(sch);
463
464 tcf_block_put(q->block);
465 kvfree(q->backlogs);
466 kvfree(q->flows);
467}
468
469static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
470 struct netlink_ext_ack *extack)
471{
472 struct fq_codel_sched_data *q = qdisc_priv(sch);
473 int i;
474 int err;
475
476 sch->limit = 10*1024;
477 q->flows_cnt = 1024;
478 q->memory_limit = 32 << 20; /* 32 MBytes */
479 q->drop_batch_size = 64;
480 q->quantum = psched_mtu(qdisc_dev(sch));
481 INIT_LIST_HEAD(&q->new_flows);
482 INIT_LIST_HEAD(&q->old_flows);
483 codel_params_init(&q->cparams);
484 codel_stats_init(&q->cstats);
485 q->cparams.ecn = true;
486 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
487
488 if (opt) {
489 err = fq_codel_change(sch, opt, extack);
490 if (err)
491 goto init_failure;
492 }
493
494 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
495 if (err)
496 goto init_failure;
497
498 if (!q->flows) {
499 q->flows = kvcalloc(q->flows_cnt,
500 sizeof(struct fq_codel_flow),
501 GFP_KERNEL);
502 if (!q->flows) {
503 err = -ENOMEM;
504 goto init_failure;
505 }
506 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
507 if (!q->backlogs) {
508 err = -ENOMEM;
509 goto alloc_failure;
510 }
511 for (i = 0; i < q->flows_cnt; i++) {
512 struct fq_codel_flow *flow = q->flows + i;
513
514 INIT_LIST_HEAD(&flow->flowchain);
515 codel_vars_init(&flow->cvars);
516 }
517 }
518 if (sch->limit >= 1)
519 sch->flags |= TCQ_F_CAN_BYPASS;
520 else
521 sch->flags &= ~TCQ_F_CAN_BYPASS;
522
523 sch->flags |= TCQ_F_DEQUEUE_DROPS;
524
525 return 0;
526
527alloc_failure:
528 kvfree(q->flows);
529 q->flows = NULL;
530init_failure:
531 q->flows_cnt = 0;
532 return err;
533}
534
535static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
536{
537 struct fq_codel_sched_data *q = qdisc_priv(sch);
538 codel_time_t ce_threshold;
539 struct nlattr *opts;
540
541 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
542 if (opts == NULL)
543 goto nla_put_failure;
544
545 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
546 codel_time_to_us(READ_ONCE(q->cparams.target))) ||
547 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
548 READ_ONCE(sch->limit)) ||
549 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
550 codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
551 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
552 READ_ONCE(q->cparams.ecn)) ||
553 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
554 READ_ONCE(q->quantum)) ||
555 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
556 READ_ONCE(q->drop_batch_size)) ||
557 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
558 READ_ONCE(q->memory_limit)) ||
559 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
560 READ_ONCE(q->flows_cnt)))
561 goto nla_put_failure;
562
563 ce_threshold = READ_ONCE(q->cparams.ce_threshold);
564 if (ce_threshold != CODEL_DISABLED_THRESHOLD) {
565 if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
566 codel_time_to_us(ce_threshold)))
567 goto nla_put_failure;
568 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
569 READ_ONCE(q->cparams.ce_threshold_selector)))
570 goto nla_put_failure;
571 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK,
572 READ_ONCE(q->cparams.ce_threshold_mask)))
573 goto nla_put_failure;
574 }
575
576 return nla_nest_end(skb, opts);
577
578nla_put_failure:
579 return -1;
580}
581
582static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
583{
584 struct fq_codel_sched_data *q = qdisc_priv(sch);
585 struct tc_fq_codel_xstats st = {
586 .type = TCA_FQ_CODEL_XSTATS_QDISC,
587 };
588 struct list_head *pos;
589
590 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
591 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
592 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
593 st.qdisc_stats.new_flow_count = q->new_flow_count;
594 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
595 st.qdisc_stats.memory_usage = q->memory_usage;
596 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
597
598 sch_tree_lock(sch);
599 list_for_each(pos, &q->new_flows)
600 st.qdisc_stats.new_flows_len++;
601
602 list_for_each(pos, &q->old_flows)
603 st.qdisc_stats.old_flows_len++;
604 sch_tree_unlock(sch);
605
606 return gnet_stats_copy_app(d, &st, sizeof(st));
607}
608
609static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
610{
611 return NULL;
612}
613
614static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
615{
616 return 0;
617}
618
619static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
620 u32 classid)
621{
622 return 0;
623}
624
625static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
626{
627}
628
629static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
630 struct netlink_ext_ack *extack)
631{
632 struct fq_codel_sched_data *q = qdisc_priv(sch);
633
634 if (cl)
635 return NULL;
636 return q->block;
637}
638
639static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
640 struct sk_buff *skb, struct tcmsg *tcm)
641{
642 tcm->tcm_handle |= TC_H_MIN(cl);
643 return 0;
644}
645
646static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
647 struct gnet_dump *d)
648{
649 struct fq_codel_sched_data *q = qdisc_priv(sch);
650 u32 idx = cl - 1;
651 struct gnet_stats_queue qs = { 0 };
652 struct tc_fq_codel_xstats xstats;
653
654 if (idx < q->flows_cnt) {
655 const struct fq_codel_flow *flow = &q->flows[idx];
656 const struct sk_buff *skb;
657
658 memset(&xstats, 0, sizeof(xstats));
659 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
660 xstats.class_stats.deficit = flow->deficit;
661 xstats.class_stats.ldelay =
662 codel_time_to_us(flow->cvars.ldelay);
663 xstats.class_stats.count = flow->cvars.count;
664 xstats.class_stats.lastcount = flow->cvars.lastcount;
665 xstats.class_stats.dropping = flow->cvars.dropping;
666 if (flow->cvars.dropping) {
667 codel_tdiff_t delta = flow->cvars.drop_next -
668 codel_get_time();
669
670 xstats.class_stats.drop_next = (delta >= 0) ?
671 codel_time_to_us(delta) :
672 -codel_time_to_us(-delta);
673 }
674 if (flow->head) {
675 sch_tree_lock(sch);
676 skb = flow->head;
677 while (skb) {
678 qs.qlen++;
679 skb = skb->next;
680 }
681 sch_tree_unlock(sch);
682 }
683 qs.backlog = q->backlogs[idx];
684 qs.drops = 0;
685 }
686 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
687 return -1;
688 if (idx < q->flows_cnt)
689 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
690 return 0;
691}
692
693static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
694{
695 struct fq_codel_sched_data *q = qdisc_priv(sch);
696 unsigned int i;
697
698 if (arg->stop)
699 return;
700
701 for (i = 0; i < q->flows_cnt; i++) {
702 if (list_empty(&q->flows[i].flowchain)) {
703 arg->count++;
704 continue;
705 }
706 if (!tc_qdisc_stats_dump(sch, i + 1, arg))
707 break;
708 }
709}
710
711static const struct Qdisc_class_ops fq_codel_class_ops = {
712 .leaf = fq_codel_leaf,
713 .find = fq_codel_find,
714 .tcf_block = fq_codel_tcf_block,
715 .bind_tcf = fq_codel_bind,
716 .unbind_tcf = fq_codel_unbind,
717 .dump = fq_codel_dump_class,
718 .dump_stats = fq_codel_dump_class_stats,
719 .walk = fq_codel_walk,
720};
721
722static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
723 .cl_ops = &fq_codel_class_ops,
724 .id = "fq_codel",
725 .priv_size = sizeof(struct fq_codel_sched_data),
726 .enqueue = fq_codel_enqueue,
727 .dequeue = fq_codel_dequeue,
728 .peek = qdisc_peek_dequeued,
729 .init = fq_codel_init,
730 .reset = fq_codel_reset,
731 .destroy = fq_codel_destroy,
732 .change = fq_codel_change,
733 .dump = fq_codel_dump,
734 .dump_stats = fq_codel_dump_stats,
735 .owner = THIS_MODULE,
736};
737MODULE_ALIAS_NET_SCH("fq_codel");
738
739static int __init fq_codel_module_init(void)
740{
741 return register_qdisc(&fq_codel_qdisc_ops);
742}
743
744static void __exit fq_codel_module_exit(void)
745{
746 unregister_qdisc(&fq_codel_qdisc_ops);
747}
748
749module_init(fq_codel_module_init)
750module_exit(fq_codel_module_exit)
751MODULE_AUTHOR("Eric Dumazet");
752MODULE_LICENSE("GPL");
753MODULE_DESCRIPTION("Fair Queue CoDel discipline");