Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Codel - The Controlled-Delay Active Queue Management algorithm
4 *
5 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
6 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
7 *
8 * Implemented on linux by :
9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <linux/prefetch.h>
20#include <net/pkt_sched.h>
21#include <net/codel.h>
22#include <net/codel_impl.h>
23#include <net/codel_qdisc.h>
24
25
26#define DEFAULT_CODEL_LIMIT 1000
27
28struct codel_sched_data {
29 struct codel_params params;
30 struct codel_vars vars;
31 struct codel_stats stats;
32 u32 drop_overlimit;
33};
34
35/* This is the specific function called from codel_dequeue()
36 * to dequeue a packet from queue. Note: backlog is handled in
37 * codel, we dont need to reduce it here.
38 */
39static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
40{
41 struct Qdisc *sch = ctx;
42 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
43
44 if (skb) {
45 sch->qstats.backlog -= qdisc_pkt_len(skb);
46 prefetch(&skb->end); /* we'll need skb_shinfo() */
47 }
48 return skb;
49}
50
51static void drop_func(struct sk_buff *skb, void *ctx)
52{
53 struct Qdisc *sch = ctx;
54
55 qdisc_dequeue_drop(sch, skb, SKB_DROP_REASON_QDISC_CONGESTED);
56 qdisc_qstats_drop(sch);
57}
58
59static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
60{
61 struct codel_sched_data *q = qdisc_priv(sch);
62 struct sk_buff *skb;
63
64 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
65 &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
66 drop_func, dequeue_func);
67
68 if (q->stats.drop_count) {
69 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
70 q->stats.drop_count = 0;
71 q->stats.drop_len = 0;
72 }
73 if (skb)
74 qdisc_bstats_update(sch, skb);
75 return skb;
76}
77
78static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
79 struct sk_buff **to_free)
80{
81 struct codel_sched_data *q;
82
83 if (likely(qdisc_qlen(sch) < sch->limit)) {
84 codel_set_enqueue_time(skb);
85 return qdisc_enqueue_tail(skb, sch);
86 }
87 q = qdisc_priv(sch);
88 q->drop_overlimit++;
89 return qdisc_drop_reason(skb, sch, to_free,
90 SKB_DROP_REASON_QDISC_OVERLIMIT);
91}
92
93static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
94 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
95 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
96 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
97 [TCA_CODEL_ECN] = { .type = NLA_U32 },
98 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
99};
100
101static int codel_change(struct Qdisc *sch, struct nlattr *opt,
102 struct netlink_ext_ack *extack)
103{
104 unsigned int dropped_pkts = 0, dropped_bytes = 0;
105 struct codel_sched_data *q = qdisc_priv(sch);
106 struct nlattr *tb[TCA_CODEL_MAX + 1];
107 int err;
108
109 err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
110 codel_policy, NULL);
111 if (err < 0)
112 return err;
113
114 sch_tree_lock(sch);
115
116 if (tb[TCA_CODEL_TARGET]) {
117 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
118
119 WRITE_ONCE(q->params.target,
120 ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
121 }
122
123 if (tb[TCA_CODEL_CE_THRESHOLD]) {
124 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
125
126 WRITE_ONCE(q->params.ce_threshold,
127 (val * NSEC_PER_USEC) >> CODEL_SHIFT);
128 }
129
130 if (tb[TCA_CODEL_INTERVAL]) {
131 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
132
133 WRITE_ONCE(q->params.interval,
134 ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
135 }
136
137 if (tb[TCA_CODEL_LIMIT])
138 WRITE_ONCE(sch->limit,
139 nla_get_u32(tb[TCA_CODEL_LIMIT]));
140
141 if (tb[TCA_CODEL_ECN])
142 WRITE_ONCE(q->params.ecn,
143 !!nla_get_u32(tb[TCA_CODEL_ECN]));
144
145 while (sch->q.qlen > sch->limit) {
146 struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
147
148 if (!skb)
149 break;
150
151 dropped_pkts++;
152 dropped_bytes += qdisc_pkt_len(skb);
153 rtnl_qdisc_drop(skb, sch);
154 }
155 qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
156
157 sch_tree_unlock(sch);
158 return 0;
159}
160
161static int codel_init(struct Qdisc *sch, struct nlattr *opt,
162 struct netlink_ext_ack *extack)
163{
164 struct codel_sched_data *q = qdisc_priv(sch);
165
166 sch->limit = DEFAULT_CODEL_LIMIT;
167
168 codel_params_init(&q->params);
169 codel_vars_init(&q->vars);
170 codel_stats_init(&q->stats);
171 q->params.mtu = psched_mtu(qdisc_dev(sch));
172
173 if (opt) {
174 int err = codel_change(sch, opt, extack);
175
176 if (err)
177 return err;
178 }
179
180 if (sch->limit >= 1)
181 sch->flags |= TCQ_F_CAN_BYPASS;
182 else
183 sch->flags &= ~TCQ_F_CAN_BYPASS;
184
185 sch->flags |= TCQ_F_DEQUEUE_DROPS;
186
187 return 0;
188}
189
190static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
191{
192 struct codel_sched_data *q = qdisc_priv(sch);
193 codel_time_t ce_threshold;
194 struct nlattr *opts;
195
196 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
197 if (opts == NULL)
198 goto nla_put_failure;
199
200 if (nla_put_u32(skb, TCA_CODEL_TARGET,
201 codel_time_to_us(READ_ONCE(q->params.target))) ||
202 nla_put_u32(skb, TCA_CODEL_LIMIT,
203 READ_ONCE(sch->limit)) ||
204 nla_put_u32(skb, TCA_CODEL_INTERVAL,
205 codel_time_to_us(READ_ONCE(q->params.interval))) ||
206 nla_put_u32(skb, TCA_CODEL_ECN,
207 READ_ONCE(q->params.ecn)))
208 goto nla_put_failure;
209 ce_threshold = READ_ONCE(q->params.ce_threshold);
210 if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
211 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
212 codel_time_to_us(ce_threshold)))
213 goto nla_put_failure;
214 return nla_nest_end(skb, opts);
215
216nla_put_failure:
217 nla_nest_cancel(skb, opts);
218 return -1;
219}
220
221static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
222{
223 const struct codel_sched_data *q = qdisc_priv(sch);
224 struct tc_codel_xstats st = {
225 .maxpacket = q->stats.maxpacket,
226 .count = q->vars.count,
227 .lastcount = q->vars.lastcount,
228 .drop_overlimit = q->drop_overlimit,
229 .ldelay = codel_time_to_us(q->vars.ldelay),
230 .dropping = q->vars.dropping,
231 .ecn_mark = q->stats.ecn_mark,
232 .ce_mark = q->stats.ce_mark,
233 };
234
235 if (q->vars.dropping) {
236 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
237
238 if (delta >= 0)
239 st.drop_next = codel_time_to_us(delta);
240 else
241 st.drop_next = -codel_time_to_us(-delta);
242 }
243
244 return gnet_stats_copy_app(d, &st, sizeof(st));
245}
246
247static void codel_reset(struct Qdisc *sch)
248{
249 struct codel_sched_data *q = qdisc_priv(sch);
250
251 qdisc_reset_queue(sch);
252 codel_vars_init(&q->vars);
253}
254
255static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
256 .id = "codel",
257 .priv_size = sizeof(struct codel_sched_data),
258
259 .enqueue = codel_qdisc_enqueue,
260 .dequeue = codel_qdisc_dequeue,
261 .peek = qdisc_peek_dequeued,
262 .init = codel_init,
263 .reset = codel_reset,
264 .change = codel_change,
265 .dump = codel_dump,
266 .dump_stats = codel_dump_stats,
267 .owner = THIS_MODULE,
268};
269MODULE_ALIAS_NET_SCH("codel");
270
271static int __init codel_module_init(void)
272{
273 return register_qdisc(&codel_qdisc_ops);
274}
275
276static void __exit codel_module_exit(void)
277{
278 unregister_qdisc(&codel_qdisc_ops);
279}
280
281module_init(codel_module_init)
282module_exit(codel_module_exit)
283
284MODULE_DESCRIPTION("Controlled Delay queue discipline");
285MODULE_AUTHOR("Dave Taht");
286MODULE_AUTHOR("Eric Dumazet");
287MODULE_LICENSE("Dual BSD/GPL");