Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/types.h>
4#include <linux/bpf_verifier.h>
5#include <linux/bpf.h>
6#include <linux/btf.h>
7#include <linux/filter.h>
8#include <net/pkt_sched.h>
9#include <net/pkt_cls.h>
10
11#define QDISC_OP_IDX(op) (offsetof(struct Qdisc_ops, op) / sizeof(void (*)(void)))
12#define QDISC_MOFF_IDX(moff) (moff / sizeof(void (*)(void)))
13
14static struct bpf_struct_ops bpf_Qdisc_ops;
15
16struct bpf_sched_data {
17 struct qdisc_watchdog watchdog;
18};
19
20struct bpf_sk_buff_ptr {
21 struct sk_buff *skb;
22};
23
24static int bpf_qdisc_init(struct btf *btf)
25{
26 return 0;
27}
28
29BTF_ID_LIST_SINGLE(bpf_qdisc_ids, struct, Qdisc)
30BTF_ID_LIST_SINGLE(bpf_sk_buff_ids, struct, sk_buff)
31BTF_ID_LIST_SINGLE(bpf_sk_buff_ptr_ids, struct, bpf_sk_buff_ptr)
32
33static bool bpf_qdisc_is_valid_access(int off, int size,
34 enum bpf_access_type type,
35 const struct bpf_prog *prog,
36 struct bpf_insn_access_aux *info)
37{
38 struct btf *btf = prog->aux->attach_btf;
39 u32 arg;
40
41 arg = btf_ctx_arg_idx(btf, prog->aux->attach_func_proto, off);
42 if (prog->aux->attach_st_ops_member_off == offsetof(struct Qdisc_ops, enqueue)) {
43 if (arg == 2 && type == BPF_READ) {
44 info->reg_type = PTR_TO_BTF_ID | PTR_TRUSTED;
45 info->btf = btf;
46 info->btf_id = bpf_sk_buff_ptr_ids[0];
47 return true;
48 }
49 }
50
51 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
52}
53
54static int bpf_qdisc_qdisc_access(struct bpf_verifier_log *log,
55 const struct bpf_reg_state *reg,
56 int off, size_t *end)
57{
58 switch (off) {
59 case offsetof(struct Qdisc, limit):
60 *end = offsetofend(struct Qdisc, limit);
61 break;
62 case offsetof(struct Qdisc, q) + offsetof(struct qdisc_skb_head, qlen):
63 *end = offsetof(struct Qdisc, q) + offsetofend(struct qdisc_skb_head, qlen);
64 break;
65 case offsetof(struct Qdisc, qstats) ... offsetofend(struct Qdisc, qstats) - 1:
66 *end = offsetofend(struct Qdisc, qstats);
67 break;
68 default:
69 return -EACCES;
70 }
71
72 return 0;
73}
74
75static int bpf_qdisc_sk_buff_access(struct bpf_verifier_log *log,
76 const struct bpf_reg_state *reg,
77 int off, size_t *end)
78{
79 switch (off) {
80 case offsetof(struct sk_buff, tstamp):
81 *end = offsetofend(struct sk_buff, tstamp);
82 break;
83 case offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb, data[0]) ...
84 offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb,
85 data[QDISC_CB_PRIV_LEN - 1]):
86 *end = offsetof(struct sk_buff, cb) +
87 offsetofend(struct qdisc_skb_cb, data[QDISC_CB_PRIV_LEN - 1]);
88 break;
89 default:
90 return -EACCES;
91 }
92
93 return 0;
94}
95
96static int bpf_qdisc_btf_struct_access(struct bpf_verifier_log *log,
97 const struct bpf_reg_state *reg,
98 int off, int size)
99{
100 const struct btf_type *t, *skbt, *qdisct;
101 size_t end;
102 int err;
103
104 skbt = btf_type_by_id(reg->btf, bpf_sk_buff_ids[0]);
105 qdisct = btf_type_by_id(reg->btf, bpf_qdisc_ids[0]);
106 t = btf_type_by_id(reg->btf, reg->btf_id);
107
108 if (t == skbt) {
109 err = bpf_qdisc_sk_buff_access(log, reg, off, &end);
110 } else if (t == qdisct) {
111 err = bpf_qdisc_qdisc_access(log, reg, off, &end);
112 } else {
113 bpf_log(log, "only read is supported\n");
114 return -EACCES;
115 }
116
117 if (err) {
118 bpf_log(log, "no write support to %s at off %d\n",
119 btf_name_by_offset(reg->btf, t->name_off), off);
120 return -EACCES;
121 }
122
123 if (off + size > end) {
124 bpf_log(log,
125 "write access at off %d with size %d beyond the member of %s ended at %zu\n",
126 off, size, btf_name_by_offset(reg->btf, t->name_off), end);
127 return -EACCES;
128 }
129
130 return 0;
131}
132
133BTF_ID_LIST_SINGLE(bpf_qdisc_init_prologue_ids, func, bpf_qdisc_init_prologue)
134
135static int bpf_qdisc_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
136 const struct bpf_prog *prog)
137{
138 struct bpf_insn *insn = insn_buf;
139
140 if (prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, init))
141 return 0;
142
143 /* r6 = r1; // r6 will be "u64 *ctx". r1 is "u64 *ctx".
144 * r2 = r1[16]; // r2 will be "struct netlink_ext_ack *extack"
145 * r1 = r1[0]; // r1 will be "struct Qdisc *sch"
146 * r0 = bpf_qdisc_init_prologue(r1, r2);
147 * if r0 == 0 goto pc+1;
148 * BPF_EXIT;
149 * r1 = r6; // r1 will be "u64 *ctx".
150 */
151 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
152 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 16);
153 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
154 *insn++ = BPF_CALL_KFUNC(0, bpf_qdisc_init_prologue_ids[0]);
155 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1);
156 *insn++ = BPF_EXIT_INSN();
157 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
158 *insn++ = prog->insnsi[0];
159
160 return insn - insn_buf;
161}
162
163BTF_ID_LIST_SINGLE(bpf_qdisc_reset_destroy_epilogue_ids, func, bpf_qdisc_reset_destroy_epilogue)
164
165static int bpf_qdisc_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
166 s16 ctx_stack_off)
167{
168 struct bpf_insn *insn = insn_buf;
169
170 if (prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, reset) &&
171 prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, destroy))
172 return 0;
173
174 /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
175 * r1 = r1[0]; // r1 will be "struct Qdisc *sch"
176 * r0 = bpf_qdisc_reset_destroy_epilogue(r1);
177 * BPF_EXIT;
178 */
179 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
180 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
181 *insn++ = BPF_CALL_KFUNC(0, bpf_qdisc_reset_destroy_epilogue_ids[0]);
182 *insn++ = BPF_EXIT_INSN();
183
184 return insn - insn_buf;
185}
186
187__bpf_kfunc_start_defs();
188
189/* bpf_skb_get_hash - Get the flow hash of an skb.
190 * @skb: The skb to get the flow hash from.
191 */
192__bpf_kfunc u32 bpf_skb_get_hash(struct sk_buff *skb)
193{
194 return skb_get_hash(skb);
195}
196
197/* bpf_kfree_skb - Release an skb's reference and drop it immediately.
198 * @skb: The skb whose reference to be released and dropped.
199 */
200__bpf_kfunc void bpf_kfree_skb(struct sk_buff *skb)
201{
202 kfree_skb(skb);
203}
204
205/* bpf_qdisc_skb_drop - Drop an skb by adding it to a deferred free list.
206 * @skb: The skb whose reference to be released and dropped.
207 * @to_free_list: The list of skbs to be dropped.
208 */
209__bpf_kfunc void bpf_qdisc_skb_drop(struct sk_buff *skb,
210 struct bpf_sk_buff_ptr *to_free_list)
211{
212 __qdisc_drop(skb, (struct sk_buff **)to_free_list);
213}
214
215/* bpf_qdisc_watchdog_schedule - Schedule a qdisc to a later time using a timer.
216 * @sch: The qdisc to be scheduled.
217 * @expire: The expiry time of the timer.
218 * @delta_ns: The slack range of the timer.
219 */
220__bpf_kfunc void bpf_qdisc_watchdog_schedule(struct Qdisc *sch, u64 expire, u64 delta_ns)
221{
222 struct bpf_sched_data *q = qdisc_priv(sch);
223
224 qdisc_watchdog_schedule_range_ns(&q->watchdog, expire, delta_ns);
225}
226
227/* bpf_qdisc_init_prologue - Hidden kfunc called in prologue of .init. */
228__bpf_kfunc int bpf_qdisc_init_prologue(struct Qdisc *sch,
229 struct netlink_ext_ack *extack)
230{
231 struct bpf_sched_data *q = qdisc_priv(sch);
232 struct net_device *dev = qdisc_dev(sch);
233 struct Qdisc *p;
234
235 qdisc_watchdog_init(&q->watchdog, sch);
236
237 if (sch->parent != TC_H_ROOT) {
238 /* If qdisc_lookup() returns NULL, it means .init is called by
239 * qdisc_create_dflt() in mq/mqprio_init and the parent qdisc
240 * has not been added to qdisc_hash yet.
241 */
242 p = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
243 if (p && !(p->flags & TCQ_F_MQROOT)) {
244 NL_SET_ERR_MSG(extack, "BPF qdisc only supported on root or mq");
245 return -EINVAL;
246 }
247 }
248
249 return 0;
250}
251
252/* bpf_qdisc_reset_destroy_epilogue - Hidden kfunc called in epilogue of .reset
253 * and .destroy
254 */
255__bpf_kfunc void bpf_qdisc_reset_destroy_epilogue(struct Qdisc *sch)
256{
257 struct bpf_sched_data *q = qdisc_priv(sch);
258
259 qdisc_watchdog_cancel(&q->watchdog);
260}
261
262/* bpf_qdisc_bstats_update - Update Qdisc basic statistics
263 * @sch: The qdisc from which an skb is dequeued.
264 * @skb: The skb to be dequeued.
265 */
266__bpf_kfunc void bpf_qdisc_bstats_update(struct Qdisc *sch, const struct sk_buff *skb)
267{
268 bstats_update(&sch->bstats, skb);
269}
270
271__bpf_kfunc_end_defs();
272
273BTF_KFUNCS_START(qdisc_kfunc_ids)
274BTF_ID_FLAGS(func, bpf_skb_get_hash, KF_TRUSTED_ARGS)
275BTF_ID_FLAGS(func, bpf_kfree_skb, KF_RELEASE)
276BTF_ID_FLAGS(func, bpf_qdisc_skb_drop, KF_RELEASE)
277BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS)
278BTF_ID_FLAGS(func, bpf_qdisc_watchdog_schedule, KF_TRUSTED_ARGS)
279BTF_ID_FLAGS(func, bpf_qdisc_init_prologue, KF_TRUSTED_ARGS)
280BTF_ID_FLAGS(func, bpf_qdisc_reset_destroy_epilogue, KF_TRUSTED_ARGS)
281BTF_ID_FLAGS(func, bpf_qdisc_bstats_update, KF_TRUSTED_ARGS)
282BTF_KFUNCS_END(qdisc_kfunc_ids)
283
284BTF_SET_START(qdisc_common_kfunc_set)
285BTF_ID(func, bpf_skb_get_hash)
286BTF_ID(func, bpf_kfree_skb)
287BTF_ID(func, bpf_dynptr_from_skb)
288BTF_SET_END(qdisc_common_kfunc_set)
289
290BTF_SET_START(qdisc_enqueue_kfunc_set)
291BTF_ID(func, bpf_qdisc_skb_drop)
292BTF_ID(func, bpf_qdisc_watchdog_schedule)
293BTF_SET_END(qdisc_enqueue_kfunc_set)
294
295BTF_SET_START(qdisc_dequeue_kfunc_set)
296BTF_ID(func, bpf_qdisc_watchdog_schedule)
297BTF_ID(func, bpf_qdisc_bstats_update)
298BTF_SET_END(qdisc_dequeue_kfunc_set)
299
300enum qdisc_ops_kf_flags {
301 QDISC_OPS_KF_COMMON = 0,
302 QDISC_OPS_KF_ENQUEUE = 1 << 0,
303 QDISC_OPS_KF_DEQUEUE = 1 << 1,
304};
305
306static const u32 qdisc_ops_context_flags[] = {
307 [QDISC_OP_IDX(enqueue)] = QDISC_OPS_KF_ENQUEUE,
308 [QDISC_OP_IDX(dequeue)] = QDISC_OPS_KF_DEQUEUE,
309 [QDISC_OP_IDX(init)] = QDISC_OPS_KF_COMMON,
310 [QDISC_OP_IDX(reset)] = QDISC_OPS_KF_COMMON,
311 [QDISC_OP_IDX(destroy)] = QDISC_OPS_KF_COMMON,
312};
313
314static int bpf_qdisc_kfunc_filter(const struct bpf_prog *prog, u32 kfunc_id)
315{
316 u32 moff, flags;
317
318 if (!btf_id_set8_contains(&qdisc_kfunc_ids, kfunc_id))
319 return 0;
320
321 if (prog->aux->st_ops != &bpf_Qdisc_ops)
322 return -EACCES;
323
324 moff = prog->aux->attach_st_ops_member_off;
325 flags = qdisc_ops_context_flags[QDISC_MOFF_IDX(moff)];
326
327 if ((flags & QDISC_OPS_KF_ENQUEUE) &&
328 btf_id_set_contains(&qdisc_enqueue_kfunc_set, kfunc_id))
329 return 0;
330
331 if ((flags & QDISC_OPS_KF_DEQUEUE) &&
332 btf_id_set_contains(&qdisc_dequeue_kfunc_set, kfunc_id))
333 return 0;
334
335 if (btf_id_set_contains(&qdisc_common_kfunc_set, kfunc_id))
336 return 0;
337
338 return -EACCES;
339}
340
341static const struct btf_kfunc_id_set bpf_qdisc_kfunc_set = {
342 .owner = THIS_MODULE,
343 .set = &qdisc_kfunc_ids,
344 .filter = bpf_qdisc_kfunc_filter,
345};
346
347static const struct bpf_verifier_ops bpf_qdisc_verifier_ops = {
348 .get_func_proto = bpf_base_func_proto,
349 .is_valid_access = bpf_qdisc_is_valid_access,
350 .btf_struct_access = bpf_qdisc_btf_struct_access,
351 .gen_prologue = bpf_qdisc_gen_prologue,
352 .gen_epilogue = bpf_qdisc_gen_epilogue,
353};
354
355static int bpf_qdisc_init_member(const struct btf_type *t,
356 const struct btf_member *member,
357 void *kdata, const void *udata)
358{
359 const struct Qdisc_ops *uqdisc_ops;
360 struct Qdisc_ops *qdisc_ops;
361 u32 moff;
362
363 uqdisc_ops = (const struct Qdisc_ops *)udata;
364 qdisc_ops = (struct Qdisc_ops *)kdata;
365
366 moff = __btf_member_bit_offset(t, member) / 8;
367 switch (moff) {
368 case offsetof(struct Qdisc_ops, priv_size):
369 if (uqdisc_ops->priv_size)
370 return -EINVAL;
371 qdisc_ops->priv_size = sizeof(struct bpf_sched_data);
372 return 1;
373 case offsetof(struct Qdisc_ops, peek):
374 qdisc_ops->peek = qdisc_peek_dequeued;
375 return 0;
376 case offsetof(struct Qdisc_ops, id):
377 if (bpf_obj_name_cpy(qdisc_ops->id, uqdisc_ops->id,
378 sizeof(qdisc_ops->id)) <= 0)
379 return -EINVAL;
380 return 1;
381 }
382
383 return 0;
384}
385
386static int bpf_qdisc_reg(void *kdata, struct bpf_link *link)
387{
388 return register_qdisc(kdata);
389}
390
391static void bpf_qdisc_unreg(void *kdata, struct bpf_link *link)
392{
393 return unregister_qdisc(kdata);
394}
395
396static int bpf_qdisc_validate(void *kdata)
397{
398 struct Qdisc_ops *ops = (struct Qdisc_ops *)kdata;
399
400 if (!ops->enqueue || !ops->dequeue || !ops->init ||
401 !ops->reset || !ops->destroy)
402 return -EINVAL;
403
404 return 0;
405}
406
407static int Qdisc_ops__enqueue(struct sk_buff *skb__ref, struct Qdisc *sch,
408 struct sk_buff **to_free)
409{
410 return 0;
411}
412
413static struct sk_buff *Qdisc_ops__dequeue(struct Qdisc *sch)
414{
415 return NULL;
416}
417
418static int Qdisc_ops__init(struct Qdisc *sch, struct nlattr *arg,
419 struct netlink_ext_ack *extack)
420{
421 return 0;
422}
423
424static void Qdisc_ops__reset(struct Qdisc *sch)
425{
426}
427
428static void Qdisc_ops__destroy(struct Qdisc *sch)
429{
430}
431
432static struct Qdisc_ops __bpf_ops_qdisc_ops = {
433 .enqueue = Qdisc_ops__enqueue,
434 .dequeue = Qdisc_ops__dequeue,
435 .init = Qdisc_ops__init,
436 .reset = Qdisc_ops__reset,
437 .destroy = Qdisc_ops__destroy,
438};
439
440static struct bpf_struct_ops bpf_Qdisc_ops = {
441 .verifier_ops = &bpf_qdisc_verifier_ops,
442 .reg = bpf_qdisc_reg,
443 .unreg = bpf_qdisc_unreg,
444 .validate = bpf_qdisc_validate,
445 .init_member = bpf_qdisc_init_member,
446 .init = bpf_qdisc_init,
447 .name = "Qdisc_ops",
448 .cfi_stubs = &__bpf_ops_qdisc_ops,
449 .owner = THIS_MODULE,
450};
451
452BTF_ID_LIST_SINGLE(bpf_sk_buff_dtor_ids, func, bpf_kfree_skb)
453
454static int __init bpf_qdisc_kfunc_init(void)
455{
456 int ret;
457 const struct btf_id_dtor_kfunc skb_kfunc_dtors[] = {
458 {
459 .btf_id = bpf_sk_buff_ids[0],
460 .kfunc_btf_id = bpf_sk_buff_dtor_ids[0]
461 },
462 };
463
464 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_qdisc_kfunc_set);
465 ret = ret ?: register_btf_id_dtor_kfuncs(skb_kfunc_dtors,
466 ARRAY_SIZE(skb_kfunc_dtors),
467 THIS_MODULE);
468 ret = ret ?: register_bpf_struct_ops(&bpf_Qdisc_ops, Qdisc_ops);
469
470 return ret;
471}
472late_initcall(bpf_qdisc_kfunc_init);