Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 Facebook
3 */
4#include <linux/bpf.h>
5#include <linux/btf.h>
6#include <linux/btf_ids.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/vmalloc.h>
10#include <linux/etherdevice.h>
11#include <linux/filter.h>
12#include <linux/rcupdate_trace.h>
13#include <linux/sched/signal.h>
14#include <net/bpf_sk_storage.h>
15#include <net/sock.h>
16#include <net/tcp.h>
17#include <net/net_namespace.h>
18#include <net/page_pool.h>
19#include <linux/error-injection.h>
20#include <linux/smp.h>
21#include <linux/sock_diag.h>
22#include <net/xdp.h>
23
24#define CREATE_TRACE_POINTS
25#include <trace/events/bpf_test_run.h>
26
27struct bpf_test_timer {
28 enum { NO_PREEMPT, NO_MIGRATE } mode;
29 u32 i;
30 u64 time_start, time_spent;
31};
32
33static void bpf_test_timer_enter(struct bpf_test_timer *t)
34 __acquires(rcu)
35{
36 rcu_read_lock();
37 if (t->mode == NO_PREEMPT)
38 preempt_disable();
39 else
40 migrate_disable();
41
42 t->time_start = ktime_get_ns();
43}
44
45static void bpf_test_timer_leave(struct bpf_test_timer *t)
46 __releases(rcu)
47{
48 t->time_start = 0;
49
50 if (t->mode == NO_PREEMPT)
51 preempt_enable();
52 else
53 migrate_enable();
54 rcu_read_unlock();
55}
56
57static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
58 u32 repeat, int *err, u32 *duration)
59 __must_hold(rcu)
60{
61 t->i += iterations;
62 if (t->i >= repeat) {
63 /* We're done. */
64 t->time_spent += ktime_get_ns() - t->time_start;
65 do_div(t->time_spent, t->i);
66 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
67 *err = 0;
68 goto reset;
69 }
70
71 if (signal_pending(current)) {
72 /* During iteration: we've been cancelled, abort. */
73 *err = -EINTR;
74 goto reset;
75 }
76
77 if (need_resched()) {
78 /* During iteration: we need to reschedule between runs. */
79 t->time_spent += ktime_get_ns() - t->time_start;
80 bpf_test_timer_leave(t);
81 cond_resched();
82 bpf_test_timer_enter(t);
83 }
84
85 /* Do another round. */
86 return true;
87
88reset:
89 t->i = 0;
90 return false;
91}
92
93/* We put this struct at the head of each page with a context and frame
94 * initialised when the page is allocated, so we don't have to do this on each
95 * repetition of the test run.
96 */
97struct xdp_page_head {
98 struct xdp_buff orig_ctx;
99 struct xdp_buff ctx;
100 struct xdp_frame frm;
101 u8 data[];
102};
103
104struct xdp_test_data {
105 struct xdp_buff *orig_ctx;
106 struct xdp_rxq_info rxq;
107 struct net_device *dev;
108 struct page_pool *pp;
109 struct xdp_frame **frames;
110 struct sk_buff **skbs;
111 struct xdp_mem_info mem;
112 u32 batch_size;
113 u32 frame_cnt;
114};
115
116#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
117#define TEST_XDP_MAX_BATCH 256
118
119static void xdp_test_run_init_page(struct page *page, void *arg)
120{
121 struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
122 struct xdp_buff *new_ctx, *orig_ctx;
123 u32 headroom = XDP_PACKET_HEADROOM;
124 struct xdp_test_data *xdp = arg;
125 size_t frm_len, meta_len;
126 struct xdp_frame *frm;
127 void *data;
128
129 orig_ctx = xdp->orig_ctx;
130 frm_len = orig_ctx->data_end - orig_ctx->data_meta;
131 meta_len = orig_ctx->data - orig_ctx->data_meta;
132 headroom -= meta_len;
133
134 new_ctx = &head->ctx;
135 frm = &head->frm;
136 data = &head->data;
137 memcpy(data + headroom, orig_ctx->data_meta, frm_len);
138
139 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
140 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
141 new_ctx->data = new_ctx->data_meta + meta_len;
142
143 xdp_update_frame_from_buff(new_ctx, frm);
144 frm->mem = new_ctx->rxq->mem;
145
146 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
147}
148
149static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
150{
151 struct page_pool *pp;
152 int err = -ENOMEM;
153 struct page_pool_params pp_params = {
154 .order = 0,
155 .flags = 0,
156 .pool_size = xdp->batch_size,
157 .nid = NUMA_NO_NODE,
158 .init_callback = xdp_test_run_init_page,
159 .init_arg = xdp,
160 };
161
162 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
163 if (!xdp->frames)
164 return -ENOMEM;
165
166 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
167 if (!xdp->skbs)
168 goto err_skbs;
169
170 pp = page_pool_create(&pp_params);
171 if (IS_ERR(pp)) {
172 err = PTR_ERR(pp);
173 goto err_pp;
174 }
175
176 /* will copy 'mem.id' into pp->xdp_mem_id */
177 err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
178 if (err)
179 goto err_mmodel;
180
181 xdp->pp = pp;
182
183 /* We create a 'fake' RXQ referencing the original dev, but with an
184 * xdp_mem_info pointing to our page_pool
185 */
186 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
187 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
188 xdp->rxq.mem.id = pp->xdp_mem_id;
189 xdp->dev = orig_ctx->rxq->dev;
190 xdp->orig_ctx = orig_ctx;
191
192 return 0;
193
194err_mmodel:
195 page_pool_destroy(pp);
196err_pp:
197 kvfree(xdp->skbs);
198err_skbs:
199 kvfree(xdp->frames);
200 return err;
201}
202
203static void xdp_test_run_teardown(struct xdp_test_data *xdp)
204{
205 xdp_unreg_mem_model(&xdp->mem);
206 page_pool_destroy(xdp->pp);
207 kfree(xdp->frames);
208 kfree(xdp->skbs);
209}
210
211static bool ctx_was_changed(struct xdp_page_head *head)
212{
213 return head->orig_ctx.data != head->ctx.data ||
214 head->orig_ctx.data_meta != head->ctx.data_meta ||
215 head->orig_ctx.data_end != head->ctx.data_end;
216}
217
218static void reset_ctx(struct xdp_page_head *head)
219{
220 if (likely(!ctx_was_changed(head)))
221 return;
222
223 head->ctx.data = head->orig_ctx.data;
224 head->ctx.data_meta = head->orig_ctx.data_meta;
225 head->ctx.data_end = head->orig_ctx.data_end;
226 xdp_update_frame_from_buff(&head->ctx, &head->frm);
227}
228
229static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
230 struct sk_buff **skbs,
231 struct net_device *dev)
232{
233 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
234 int i, n;
235 LIST_HEAD(list);
236
237 n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs);
238 if (unlikely(n == 0)) {
239 for (i = 0; i < nframes; i++)
240 xdp_return_frame(frames[i]);
241 return -ENOMEM;
242 }
243
244 for (i = 0; i < nframes; i++) {
245 struct xdp_frame *xdpf = frames[i];
246 struct sk_buff *skb = skbs[i];
247
248 skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
249 if (!skb) {
250 xdp_return_frame(xdpf);
251 continue;
252 }
253
254 list_add_tail(&skb->list, &list);
255 }
256 netif_receive_skb_list(&list);
257
258 return 0;
259}
260
261static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
262 u32 repeat)
263{
264 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
265 int err = 0, act, ret, i, nframes = 0, batch_sz;
266 struct xdp_frame **frames = xdp->frames;
267 struct xdp_page_head *head;
268 struct xdp_frame *frm;
269 bool redirect = false;
270 struct xdp_buff *ctx;
271 struct page *page;
272
273 batch_sz = min_t(u32, repeat, xdp->batch_size);
274
275 local_bh_disable();
276 xdp_set_return_frame_no_direct();
277
278 for (i = 0; i < batch_sz; i++) {
279 page = page_pool_dev_alloc_pages(xdp->pp);
280 if (!page) {
281 err = -ENOMEM;
282 goto out;
283 }
284
285 head = phys_to_virt(page_to_phys(page));
286 reset_ctx(head);
287 ctx = &head->ctx;
288 frm = &head->frm;
289 xdp->frame_cnt++;
290
291 act = bpf_prog_run_xdp(prog, ctx);
292
293 /* if program changed pkt bounds we need to update the xdp_frame */
294 if (unlikely(ctx_was_changed(head))) {
295 ret = xdp_update_frame_from_buff(ctx, frm);
296 if (ret) {
297 xdp_return_buff(ctx);
298 continue;
299 }
300 }
301
302 switch (act) {
303 case XDP_TX:
304 /* we can't do a real XDP_TX since we're not in the
305 * driver, so turn it into a REDIRECT back to the same
306 * index
307 */
308 ri->tgt_index = xdp->dev->ifindex;
309 ri->map_id = INT_MAX;
310 ri->map_type = BPF_MAP_TYPE_UNSPEC;
311 fallthrough;
312 case XDP_REDIRECT:
313 redirect = true;
314 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
315 if (ret)
316 xdp_return_buff(ctx);
317 break;
318 case XDP_PASS:
319 frames[nframes++] = frm;
320 break;
321 default:
322 bpf_warn_invalid_xdp_action(NULL, prog, act);
323 fallthrough;
324 case XDP_DROP:
325 xdp_return_buff(ctx);
326 break;
327 }
328 }
329
330out:
331 if (redirect)
332 xdp_do_flush();
333 if (nframes) {
334 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
335 if (ret)
336 err = ret;
337 }
338
339 xdp_clear_return_frame_no_direct();
340 local_bh_enable();
341 return err;
342}
343
344static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
345 u32 repeat, u32 batch_size, u32 *time)
346
347{
348 struct xdp_test_data xdp = { .batch_size = batch_size };
349 struct bpf_test_timer t = { .mode = NO_MIGRATE };
350 int ret;
351
352 if (!repeat)
353 repeat = 1;
354
355 ret = xdp_test_run_setup(&xdp, ctx);
356 if (ret)
357 return ret;
358
359 bpf_test_timer_enter(&t);
360 do {
361 xdp.frame_cnt = 0;
362 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
363 if (unlikely(ret < 0))
364 break;
365 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
366 bpf_test_timer_leave(&t);
367
368 xdp_test_run_teardown(&xdp);
369 return ret;
370}
371
372static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
373 u32 *retval, u32 *time, bool xdp)
374{
375 struct bpf_prog_array_item item = {.prog = prog};
376 struct bpf_run_ctx *old_ctx;
377 struct bpf_cg_run_ctx run_ctx;
378 struct bpf_test_timer t = { NO_MIGRATE };
379 enum bpf_cgroup_storage_type stype;
380 int ret;
381
382 for_each_cgroup_storage_type(stype) {
383 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
384 if (IS_ERR(item.cgroup_storage[stype])) {
385 item.cgroup_storage[stype] = NULL;
386 for_each_cgroup_storage_type(stype)
387 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
388 return -ENOMEM;
389 }
390 }
391
392 if (!repeat)
393 repeat = 1;
394
395 bpf_test_timer_enter(&t);
396 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
397 do {
398 run_ctx.prog_item = &item;
399 if (xdp)
400 *retval = bpf_prog_run_xdp(prog, ctx);
401 else
402 *retval = bpf_prog_run(prog, ctx);
403 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
404 bpf_reset_run_ctx(old_ctx);
405 bpf_test_timer_leave(&t);
406
407 for_each_cgroup_storage_type(stype)
408 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
409
410 return ret;
411}
412
413static int bpf_test_finish(const union bpf_attr *kattr,
414 union bpf_attr __user *uattr, const void *data,
415 struct skb_shared_info *sinfo, u32 size,
416 u32 retval, u32 duration)
417{
418 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
419 int err = -EFAULT;
420 u32 copy_size = size;
421
422 /* Clamp copy if the user has provided a size hint, but copy the full
423 * buffer if not to retain old behaviour.
424 */
425 if (kattr->test.data_size_out &&
426 copy_size > kattr->test.data_size_out) {
427 copy_size = kattr->test.data_size_out;
428 err = -ENOSPC;
429 }
430
431 if (data_out) {
432 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
433
434 if (len < 0) {
435 err = -ENOSPC;
436 goto out;
437 }
438
439 if (copy_to_user(data_out, data, len))
440 goto out;
441
442 if (sinfo) {
443 int i, offset = len;
444 u32 data_len;
445
446 for (i = 0; i < sinfo->nr_frags; i++) {
447 skb_frag_t *frag = &sinfo->frags[i];
448
449 if (offset >= copy_size) {
450 err = -ENOSPC;
451 break;
452 }
453
454 data_len = min_t(u32, copy_size - offset,
455 skb_frag_size(frag));
456
457 if (copy_to_user(data_out + offset,
458 skb_frag_address(frag),
459 data_len))
460 goto out;
461
462 offset += data_len;
463 }
464 }
465 }
466
467 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
468 goto out;
469 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
470 goto out;
471 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
472 goto out;
473 if (err != -ENOSPC)
474 err = 0;
475out:
476 trace_bpf_test_finish(&err);
477 return err;
478}
479
480/* Integer types of various sizes and pointer combinations cover variety of
481 * architecture dependent calling conventions. 7+ can be supported in the
482 * future.
483 */
484__diag_push();
485__diag_ignore_all("-Wmissing-prototypes",
486 "Global functions as their definitions will be in vmlinux BTF");
487int noinline bpf_fentry_test1(int a)
488{
489 return a + 1;
490}
491EXPORT_SYMBOL_GPL(bpf_fentry_test1);
492ALLOW_ERROR_INJECTION(bpf_fentry_test1, ERRNO);
493
494int noinline bpf_fentry_test2(int a, u64 b)
495{
496 return a + b;
497}
498
499int noinline bpf_fentry_test3(char a, int b, u64 c)
500{
501 return a + b + c;
502}
503
504int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
505{
506 return (long)a + b + c + d;
507}
508
509int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
510{
511 return a + (long)b + c + d + e;
512}
513
514int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
515{
516 return a + (long)b + c + d + (long)e + f;
517}
518
519struct bpf_fentry_test_t {
520 struct bpf_fentry_test_t *a;
521};
522
523int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
524{
525 return (long)arg;
526}
527
528int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
529{
530 return (long)arg->a;
531}
532
533int noinline bpf_modify_return_test(int a, int *b)
534{
535 *b += 1;
536 return a + *b;
537}
538
539u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
540{
541 return a + b + c + d;
542}
543
544int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
545{
546 return a + b;
547}
548
549struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
550{
551 return sk;
552}
553
554struct prog_test_member {
555 u64 c;
556};
557
558struct prog_test_ref_kfunc {
559 int a;
560 int b;
561 struct prog_test_member memb;
562 struct prog_test_ref_kfunc *next;
563};
564
565static struct prog_test_ref_kfunc prog_test_struct = {
566 .a = 42,
567 .b = 108,
568 .next = &prog_test_struct,
569};
570
571noinline struct prog_test_ref_kfunc *
572bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
573{
574 /* randomly return NULL */
575 if (get_jiffies_64() % 2)
576 return NULL;
577 return &prog_test_struct;
578}
579
580noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
581{
582}
583
584noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
585{
586}
587
588struct prog_test_pass1 {
589 int x0;
590 struct {
591 int x1;
592 struct {
593 int x2;
594 struct {
595 int x3;
596 };
597 };
598 };
599};
600
601struct prog_test_pass2 {
602 int len;
603 short arr1[4];
604 struct {
605 char arr2[4];
606 unsigned long arr3[8];
607 } x;
608};
609
610struct prog_test_fail1 {
611 void *p;
612 int x;
613};
614
615struct prog_test_fail2 {
616 int x8;
617 struct prog_test_pass1 x;
618};
619
620struct prog_test_fail3 {
621 int len;
622 char arr1[2];
623 char arr2[];
624};
625
626noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
627{
628}
629
630noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
631{
632}
633
634noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
635{
636}
637
638noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
639{
640}
641
642noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
643{
644}
645
646noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
647{
648}
649
650noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
651{
652}
653
654noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
655{
656}
657
658noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
659{
660}
661
662__diag_pop();
663
664ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
665
666BTF_SET_START(test_sk_check_kfunc_ids)
667BTF_ID(func, bpf_kfunc_call_test1)
668BTF_ID(func, bpf_kfunc_call_test2)
669BTF_ID(func, bpf_kfunc_call_test3)
670BTF_ID(func, bpf_kfunc_call_test_acquire)
671BTF_ID(func, bpf_kfunc_call_test_release)
672BTF_ID(func, bpf_kfunc_call_memb_release)
673BTF_ID(func, bpf_kfunc_call_test_pass_ctx)
674BTF_ID(func, bpf_kfunc_call_test_pass1)
675BTF_ID(func, bpf_kfunc_call_test_pass2)
676BTF_ID(func, bpf_kfunc_call_test_fail1)
677BTF_ID(func, bpf_kfunc_call_test_fail2)
678BTF_ID(func, bpf_kfunc_call_test_fail3)
679BTF_ID(func, bpf_kfunc_call_test_mem_len_pass1)
680BTF_ID(func, bpf_kfunc_call_test_mem_len_fail1)
681BTF_ID(func, bpf_kfunc_call_test_mem_len_fail2)
682BTF_SET_END(test_sk_check_kfunc_ids)
683
684BTF_SET_START(test_sk_acquire_kfunc_ids)
685BTF_ID(func, bpf_kfunc_call_test_acquire)
686BTF_SET_END(test_sk_acquire_kfunc_ids)
687
688BTF_SET_START(test_sk_release_kfunc_ids)
689BTF_ID(func, bpf_kfunc_call_test_release)
690BTF_ID(func, bpf_kfunc_call_memb_release)
691BTF_SET_END(test_sk_release_kfunc_ids)
692
693BTF_SET_START(test_sk_ret_null_kfunc_ids)
694BTF_ID(func, bpf_kfunc_call_test_acquire)
695BTF_SET_END(test_sk_ret_null_kfunc_ids)
696
697static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
698 u32 size, u32 headroom, u32 tailroom)
699{
700 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
701 void *data;
702
703 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
704 return ERR_PTR(-EINVAL);
705
706 if (user_size > size)
707 return ERR_PTR(-EMSGSIZE);
708
709 data = kzalloc(size + headroom + tailroom, GFP_USER);
710 if (!data)
711 return ERR_PTR(-ENOMEM);
712
713 if (copy_from_user(data + headroom, data_in, user_size)) {
714 kfree(data);
715 return ERR_PTR(-EFAULT);
716 }
717
718 return data;
719}
720
721int bpf_prog_test_run_tracing(struct bpf_prog *prog,
722 const union bpf_attr *kattr,
723 union bpf_attr __user *uattr)
724{
725 struct bpf_fentry_test_t arg = {};
726 u16 side_effect = 0, ret = 0;
727 int b = 2, err = -EFAULT;
728 u32 retval = 0;
729
730 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
731 return -EINVAL;
732
733 switch (prog->expected_attach_type) {
734 case BPF_TRACE_FENTRY:
735 case BPF_TRACE_FEXIT:
736 if (bpf_fentry_test1(1) != 2 ||
737 bpf_fentry_test2(2, 3) != 5 ||
738 bpf_fentry_test3(4, 5, 6) != 15 ||
739 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
740 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
741 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
742 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
743 bpf_fentry_test8(&arg) != 0)
744 goto out;
745 break;
746 case BPF_MODIFY_RETURN:
747 ret = bpf_modify_return_test(1, &b);
748 if (b != 2)
749 side_effect = 1;
750 break;
751 default:
752 goto out;
753 }
754
755 retval = ((u32)side_effect << 16) | ret;
756 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
757 goto out;
758
759 err = 0;
760out:
761 trace_bpf_test_finish(&err);
762 return err;
763}
764
765struct bpf_raw_tp_test_run_info {
766 struct bpf_prog *prog;
767 void *ctx;
768 u32 retval;
769};
770
771static void
772__bpf_prog_test_run_raw_tp(void *data)
773{
774 struct bpf_raw_tp_test_run_info *info = data;
775
776 rcu_read_lock();
777 info->retval = bpf_prog_run(info->prog, info->ctx);
778 rcu_read_unlock();
779}
780
781int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
782 const union bpf_attr *kattr,
783 union bpf_attr __user *uattr)
784{
785 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
786 __u32 ctx_size_in = kattr->test.ctx_size_in;
787 struct bpf_raw_tp_test_run_info info;
788 int cpu = kattr->test.cpu, err = 0;
789 int current_cpu;
790
791 /* doesn't support data_in/out, ctx_out, duration, or repeat */
792 if (kattr->test.data_in || kattr->test.data_out ||
793 kattr->test.ctx_out || kattr->test.duration ||
794 kattr->test.repeat || kattr->test.batch_size)
795 return -EINVAL;
796
797 if (ctx_size_in < prog->aux->max_ctx_offset ||
798 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
799 return -EINVAL;
800
801 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
802 return -EINVAL;
803
804 if (ctx_size_in) {
805 info.ctx = memdup_user(ctx_in, ctx_size_in);
806 if (IS_ERR(info.ctx))
807 return PTR_ERR(info.ctx);
808 } else {
809 info.ctx = NULL;
810 }
811
812 info.prog = prog;
813
814 current_cpu = get_cpu();
815 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
816 cpu == current_cpu) {
817 __bpf_prog_test_run_raw_tp(&info);
818 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
819 /* smp_call_function_single() also checks cpu_online()
820 * after csd_lock(). However, since cpu is from user
821 * space, let's do an extra quick check to filter out
822 * invalid value before smp_call_function_single().
823 */
824 err = -ENXIO;
825 } else {
826 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
827 &info, 1);
828 }
829 put_cpu();
830
831 if (!err &&
832 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
833 err = -EFAULT;
834
835 kfree(info.ctx);
836 return err;
837}
838
839static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
840{
841 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
842 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
843 u32 size = kattr->test.ctx_size_in;
844 void *data;
845 int err;
846
847 if (!data_in && !data_out)
848 return NULL;
849
850 data = kzalloc(max_size, GFP_USER);
851 if (!data)
852 return ERR_PTR(-ENOMEM);
853
854 if (data_in) {
855 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
856 if (err) {
857 kfree(data);
858 return ERR_PTR(err);
859 }
860
861 size = min_t(u32, max_size, size);
862 if (copy_from_user(data, data_in, size)) {
863 kfree(data);
864 return ERR_PTR(-EFAULT);
865 }
866 }
867 return data;
868}
869
870static int bpf_ctx_finish(const union bpf_attr *kattr,
871 union bpf_attr __user *uattr, const void *data,
872 u32 size)
873{
874 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
875 int err = -EFAULT;
876 u32 copy_size = size;
877
878 if (!data || !data_out)
879 return 0;
880
881 if (copy_size > kattr->test.ctx_size_out) {
882 copy_size = kattr->test.ctx_size_out;
883 err = -ENOSPC;
884 }
885
886 if (copy_to_user(data_out, data, copy_size))
887 goto out;
888 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
889 goto out;
890 if (err != -ENOSPC)
891 err = 0;
892out:
893 return err;
894}
895
896/**
897 * range_is_zero - test whether buffer is initialized
898 * @buf: buffer to check
899 * @from: check from this position
900 * @to: check up until (excluding) this position
901 *
902 * This function returns true if the there is a non-zero byte
903 * in the buf in the range [from,to).
904 */
905static inline bool range_is_zero(void *buf, size_t from, size_t to)
906{
907 return !memchr_inv((u8 *)buf + from, 0, to - from);
908}
909
910static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
911{
912 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
913
914 if (!__skb)
915 return 0;
916
917 /* make sure the fields we don't use are zeroed */
918 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
919 return -EINVAL;
920
921 /* mark is allowed */
922
923 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
924 offsetof(struct __sk_buff, priority)))
925 return -EINVAL;
926
927 /* priority is allowed */
928 /* ingress_ifindex is allowed */
929 /* ifindex is allowed */
930
931 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
932 offsetof(struct __sk_buff, cb)))
933 return -EINVAL;
934
935 /* cb is allowed */
936
937 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
938 offsetof(struct __sk_buff, tstamp)))
939 return -EINVAL;
940
941 /* tstamp is allowed */
942 /* wire_len is allowed */
943 /* gso_segs is allowed */
944
945 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
946 offsetof(struct __sk_buff, gso_size)))
947 return -EINVAL;
948
949 /* gso_size is allowed */
950
951 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
952 offsetof(struct __sk_buff, hwtstamp)))
953 return -EINVAL;
954
955 /* hwtstamp is allowed */
956
957 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
958 sizeof(struct __sk_buff)))
959 return -EINVAL;
960
961 skb->mark = __skb->mark;
962 skb->priority = __skb->priority;
963 skb->skb_iif = __skb->ingress_ifindex;
964 skb->tstamp = __skb->tstamp;
965 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
966
967 if (__skb->wire_len == 0) {
968 cb->pkt_len = skb->len;
969 } else {
970 if (__skb->wire_len < skb->len ||
971 __skb->wire_len > GSO_MAX_SIZE)
972 return -EINVAL;
973 cb->pkt_len = __skb->wire_len;
974 }
975
976 if (__skb->gso_segs > GSO_MAX_SEGS)
977 return -EINVAL;
978 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
979 skb_shinfo(skb)->gso_size = __skb->gso_size;
980 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
981
982 return 0;
983}
984
985static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
986{
987 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
988
989 if (!__skb)
990 return;
991
992 __skb->mark = skb->mark;
993 __skb->priority = skb->priority;
994 __skb->ingress_ifindex = skb->skb_iif;
995 __skb->ifindex = skb->dev->ifindex;
996 __skb->tstamp = skb->tstamp;
997 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
998 __skb->wire_len = cb->pkt_len;
999 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
1000 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
1001}
1002
1003static struct proto bpf_dummy_proto = {
1004 .name = "bpf_dummy",
1005 .owner = THIS_MODULE,
1006 .obj_size = sizeof(struct sock),
1007};
1008
1009int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1010 union bpf_attr __user *uattr)
1011{
1012 bool is_l2 = false, is_direct_pkt_access = false;
1013 struct net *net = current->nsproxy->net_ns;
1014 struct net_device *dev = net->loopback_dev;
1015 u32 size = kattr->test.data_size_in;
1016 u32 repeat = kattr->test.repeat;
1017 struct __sk_buff *ctx = NULL;
1018 u32 retval, duration;
1019 int hh_len = ETH_HLEN;
1020 struct sk_buff *skb;
1021 struct sock *sk;
1022 void *data;
1023 int ret;
1024
1025 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1026 return -EINVAL;
1027
1028 data = bpf_test_init(kattr, kattr->test.data_size_in,
1029 size, NET_SKB_PAD + NET_IP_ALIGN,
1030 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1031 if (IS_ERR(data))
1032 return PTR_ERR(data);
1033
1034 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1035 if (IS_ERR(ctx)) {
1036 kfree(data);
1037 return PTR_ERR(ctx);
1038 }
1039
1040 switch (prog->type) {
1041 case BPF_PROG_TYPE_SCHED_CLS:
1042 case BPF_PROG_TYPE_SCHED_ACT:
1043 is_l2 = true;
1044 fallthrough;
1045 case BPF_PROG_TYPE_LWT_IN:
1046 case BPF_PROG_TYPE_LWT_OUT:
1047 case BPF_PROG_TYPE_LWT_XMIT:
1048 is_direct_pkt_access = true;
1049 break;
1050 default:
1051 break;
1052 }
1053
1054 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1055 if (!sk) {
1056 kfree(data);
1057 kfree(ctx);
1058 return -ENOMEM;
1059 }
1060 sock_init_data(NULL, sk);
1061
1062 skb = build_skb(data, 0);
1063 if (!skb) {
1064 kfree(data);
1065 kfree(ctx);
1066 sk_free(sk);
1067 return -ENOMEM;
1068 }
1069 skb->sk = sk;
1070
1071 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1072 __skb_put(skb, size);
1073 if (ctx && ctx->ifindex > 1) {
1074 dev = dev_get_by_index(net, ctx->ifindex);
1075 if (!dev) {
1076 ret = -ENODEV;
1077 goto out;
1078 }
1079 }
1080 skb->protocol = eth_type_trans(skb, dev);
1081 skb_reset_network_header(skb);
1082
1083 switch (skb->protocol) {
1084 case htons(ETH_P_IP):
1085 sk->sk_family = AF_INET;
1086 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1087 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1088 sk->sk_daddr = ip_hdr(skb)->daddr;
1089 }
1090 break;
1091#if IS_ENABLED(CONFIG_IPV6)
1092 case htons(ETH_P_IPV6):
1093 sk->sk_family = AF_INET6;
1094 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1095 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1096 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1097 }
1098 break;
1099#endif
1100 default:
1101 break;
1102 }
1103
1104 if (is_l2)
1105 __skb_push(skb, hh_len);
1106 if (is_direct_pkt_access)
1107 bpf_compute_data_pointers(skb);
1108 ret = convert___skb_to_skb(skb, ctx);
1109 if (ret)
1110 goto out;
1111 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1112 if (ret)
1113 goto out;
1114 if (!is_l2) {
1115 if (skb_headroom(skb) < hh_len) {
1116 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1117
1118 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1119 ret = -ENOMEM;
1120 goto out;
1121 }
1122 }
1123 memset(__skb_push(skb, hh_len), 0, hh_len);
1124 }
1125 convert_skb_to___skb(skb, ctx);
1126
1127 size = skb->len;
1128 /* bpf program can never convert linear skb to non-linear */
1129 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1130 size = skb_headlen(skb);
1131 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1132 duration);
1133 if (!ret)
1134 ret = bpf_ctx_finish(kattr, uattr, ctx,
1135 sizeof(struct __sk_buff));
1136out:
1137 if (dev && dev != net->loopback_dev)
1138 dev_put(dev);
1139 kfree_skb(skb);
1140 sk_free(sk);
1141 kfree(ctx);
1142 return ret;
1143}
1144
1145static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1146{
1147 unsigned int ingress_ifindex, rx_queue_index;
1148 struct netdev_rx_queue *rxqueue;
1149 struct net_device *device;
1150
1151 if (!xdp_md)
1152 return 0;
1153
1154 if (xdp_md->egress_ifindex != 0)
1155 return -EINVAL;
1156
1157 ingress_ifindex = xdp_md->ingress_ifindex;
1158 rx_queue_index = xdp_md->rx_queue_index;
1159
1160 if (!ingress_ifindex && rx_queue_index)
1161 return -EINVAL;
1162
1163 if (ingress_ifindex) {
1164 device = dev_get_by_index(current->nsproxy->net_ns,
1165 ingress_ifindex);
1166 if (!device)
1167 return -ENODEV;
1168
1169 if (rx_queue_index >= device->real_num_rx_queues)
1170 goto free_dev;
1171
1172 rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1173
1174 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1175 goto free_dev;
1176
1177 xdp->rxq = &rxqueue->xdp_rxq;
1178 /* The device is now tracked in the xdp->rxq for later
1179 * dev_put()
1180 */
1181 }
1182
1183 xdp->data = xdp->data_meta + xdp_md->data;
1184 return 0;
1185
1186free_dev:
1187 dev_put(device);
1188 return -EINVAL;
1189}
1190
1191static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1192{
1193 if (!xdp_md)
1194 return;
1195
1196 xdp_md->data = xdp->data - xdp->data_meta;
1197 xdp_md->data_end = xdp->data_end - xdp->data_meta;
1198
1199 if (xdp_md->ingress_ifindex)
1200 dev_put(xdp->rxq->dev);
1201}
1202
1203int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1204 union bpf_attr __user *uattr)
1205{
1206 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1207 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1208 u32 batch_size = kattr->test.batch_size;
1209 u32 retval = 0, duration, max_data_sz;
1210 u32 size = kattr->test.data_size_in;
1211 u32 headroom = XDP_PACKET_HEADROOM;
1212 u32 repeat = kattr->test.repeat;
1213 struct netdev_rx_queue *rxqueue;
1214 struct skb_shared_info *sinfo;
1215 struct xdp_buff xdp = {};
1216 int i, ret = -EINVAL;
1217 struct xdp_md *ctx;
1218 void *data;
1219
1220 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1221 prog->expected_attach_type == BPF_XDP_CPUMAP)
1222 return -EINVAL;
1223
1224 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1225 return -EINVAL;
1226
1227 if (do_live) {
1228 if (!batch_size)
1229 batch_size = NAPI_POLL_WEIGHT;
1230 else if (batch_size > TEST_XDP_MAX_BATCH)
1231 return -E2BIG;
1232
1233 headroom += sizeof(struct xdp_page_head);
1234 } else if (batch_size) {
1235 return -EINVAL;
1236 }
1237
1238 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1239 if (IS_ERR(ctx))
1240 return PTR_ERR(ctx);
1241
1242 if (ctx) {
1243 /* There can't be user provided data before the meta data */
1244 if (ctx->data_meta || ctx->data_end != size ||
1245 ctx->data > ctx->data_end ||
1246 unlikely(xdp_metalen_invalid(ctx->data)) ||
1247 (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1248 goto free_ctx;
1249 /* Meta data is allocated from the headroom */
1250 headroom -= ctx->data;
1251 }
1252
1253 max_data_sz = 4096 - headroom - tailroom;
1254 if (size > max_data_sz) {
1255 /* disallow live data mode for jumbo frames */
1256 if (do_live)
1257 goto free_ctx;
1258 size = max_data_sz;
1259 }
1260
1261 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1262 if (IS_ERR(data)) {
1263 ret = PTR_ERR(data);
1264 goto free_ctx;
1265 }
1266
1267 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1268 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1269 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1270 xdp_prepare_buff(&xdp, data, headroom, size, true);
1271 sinfo = xdp_get_shared_info_from_buff(&xdp);
1272
1273 ret = xdp_convert_md_to_buff(ctx, &xdp);
1274 if (ret)
1275 goto free_data;
1276
1277 if (unlikely(kattr->test.data_size_in > size)) {
1278 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1279
1280 while (size < kattr->test.data_size_in) {
1281 struct page *page;
1282 skb_frag_t *frag;
1283 u32 data_len;
1284
1285 if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1286 ret = -ENOMEM;
1287 goto out;
1288 }
1289
1290 page = alloc_page(GFP_KERNEL);
1291 if (!page) {
1292 ret = -ENOMEM;
1293 goto out;
1294 }
1295
1296 frag = &sinfo->frags[sinfo->nr_frags++];
1297 __skb_frag_set_page(frag, page);
1298
1299 data_len = min_t(u32, kattr->test.data_size_in - size,
1300 PAGE_SIZE);
1301 skb_frag_size_set(frag, data_len);
1302
1303 if (copy_from_user(page_address(page), data_in + size,
1304 data_len)) {
1305 ret = -EFAULT;
1306 goto out;
1307 }
1308 sinfo->xdp_frags_size += data_len;
1309 size += data_len;
1310 }
1311 xdp_buff_set_frags_flag(&xdp);
1312 }
1313
1314 if (repeat > 1)
1315 bpf_prog_change_xdp(NULL, prog);
1316
1317 if (do_live)
1318 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1319 else
1320 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1321 /* We convert the xdp_buff back to an xdp_md before checking the return
1322 * code so the reference count of any held netdevice will be decremented
1323 * even if the test run failed.
1324 */
1325 xdp_convert_buff_to_md(&xdp, ctx);
1326 if (ret)
1327 goto out;
1328
1329 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1330 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1331 retval, duration);
1332 if (!ret)
1333 ret = bpf_ctx_finish(kattr, uattr, ctx,
1334 sizeof(struct xdp_md));
1335
1336out:
1337 if (repeat > 1)
1338 bpf_prog_change_xdp(prog, NULL);
1339free_data:
1340 for (i = 0; i < sinfo->nr_frags; i++)
1341 __free_page(skb_frag_page(&sinfo->frags[i]));
1342 kfree(data);
1343free_ctx:
1344 kfree(ctx);
1345 return ret;
1346}
1347
1348static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1349{
1350 /* make sure the fields we don't use are zeroed */
1351 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1352 return -EINVAL;
1353
1354 /* flags is allowed */
1355
1356 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1357 sizeof(struct bpf_flow_keys)))
1358 return -EINVAL;
1359
1360 return 0;
1361}
1362
1363int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1364 const union bpf_attr *kattr,
1365 union bpf_attr __user *uattr)
1366{
1367 struct bpf_test_timer t = { NO_PREEMPT };
1368 u32 size = kattr->test.data_size_in;
1369 struct bpf_flow_dissector ctx = {};
1370 u32 repeat = kattr->test.repeat;
1371 struct bpf_flow_keys *user_ctx;
1372 struct bpf_flow_keys flow_keys;
1373 const struct ethhdr *eth;
1374 unsigned int flags = 0;
1375 u32 retval, duration;
1376 void *data;
1377 int ret;
1378
1379 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
1380 return -EINVAL;
1381
1382 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1383 return -EINVAL;
1384
1385 if (size < ETH_HLEN)
1386 return -EINVAL;
1387
1388 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1389 if (IS_ERR(data))
1390 return PTR_ERR(data);
1391
1392 eth = (struct ethhdr *)data;
1393
1394 if (!repeat)
1395 repeat = 1;
1396
1397 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1398 if (IS_ERR(user_ctx)) {
1399 kfree(data);
1400 return PTR_ERR(user_ctx);
1401 }
1402 if (user_ctx) {
1403 ret = verify_user_bpf_flow_keys(user_ctx);
1404 if (ret)
1405 goto out;
1406 flags = user_ctx->flags;
1407 }
1408
1409 ctx.flow_keys = &flow_keys;
1410 ctx.data = data;
1411 ctx.data_end = (__u8 *)data + size;
1412
1413 bpf_test_timer_enter(&t);
1414 do {
1415 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1416 size, flags);
1417 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1418 bpf_test_timer_leave(&t);
1419
1420 if (ret < 0)
1421 goto out;
1422
1423 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1424 sizeof(flow_keys), retval, duration);
1425 if (!ret)
1426 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1427 sizeof(struct bpf_flow_keys));
1428
1429out:
1430 kfree(user_ctx);
1431 kfree(data);
1432 return ret;
1433}
1434
1435int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1436 union bpf_attr __user *uattr)
1437{
1438 struct bpf_test_timer t = { NO_PREEMPT };
1439 struct bpf_prog_array *progs = NULL;
1440 struct bpf_sk_lookup_kern ctx = {};
1441 u32 repeat = kattr->test.repeat;
1442 struct bpf_sk_lookup *user_ctx;
1443 u32 retval, duration;
1444 int ret = -EINVAL;
1445
1446 if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
1447 return -EINVAL;
1448
1449 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1450 return -EINVAL;
1451
1452 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1453 kattr->test.data_size_out)
1454 return -EINVAL;
1455
1456 if (!repeat)
1457 repeat = 1;
1458
1459 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1460 if (IS_ERR(user_ctx))
1461 return PTR_ERR(user_ctx);
1462
1463 if (!user_ctx)
1464 return -EINVAL;
1465
1466 if (user_ctx->sk)
1467 goto out;
1468
1469 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1470 goto out;
1471
1472 if (user_ctx->local_port > U16_MAX) {
1473 ret = -ERANGE;
1474 goto out;
1475 }
1476
1477 ctx.family = (u16)user_ctx->family;
1478 ctx.protocol = (u16)user_ctx->protocol;
1479 ctx.dport = (u16)user_ctx->local_port;
1480 ctx.sport = user_ctx->remote_port;
1481
1482 switch (ctx.family) {
1483 case AF_INET:
1484 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1485 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1486 break;
1487
1488#if IS_ENABLED(CONFIG_IPV6)
1489 case AF_INET6:
1490 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1491 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1492 break;
1493#endif
1494
1495 default:
1496 ret = -EAFNOSUPPORT;
1497 goto out;
1498 }
1499
1500 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1501 if (!progs) {
1502 ret = -ENOMEM;
1503 goto out;
1504 }
1505
1506 progs->items[0].prog = prog;
1507
1508 bpf_test_timer_enter(&t);
1509 do {
1510 ctx.selected_sk = NULL;
1511 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1512 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1513 bpf_test_timer_leave(&t);
1514
1515 if (ret < 0)
1516 goto out;
1517
1518 user_ctx->cookie = 0;
1519 if (ctx.selected_sk) {
1520 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1521 ret = -EOPNOTSUPP;
1522 goto out;
1523 }
1524
1525 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1526 }
1527
1528 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1529 if (!ret)
1530 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1531
1532out:
1533 bpf_prog_array_free(progs);
1534 kfree(user_ctx);
1535 return ret;
1536}
1537
1538int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1539 const union bpf_attr *kattr,
1540 union bpf_attr __user *uattr)
1541{
1542 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1543 __u32 ctx_size_in = kattr->test.ctx_size_in;
1544 void *ctx = NULL;
1545 u32 retval;
1546 int err = 0;
1547
1548 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1549 if (kattr->test.data_in || kattr->test.data_out ||
1550 kattr->test.ctx_out || kattr->test.duration ||
1551 kattr->test.repeat || kattr->test.flags ||
1552 kattr->test.batch_size)
1553 return -EINVAL;
1554
1555 if (ctx_size_in < prog->aux->max_ctx_offset ||
1556 ctx_size_in > U16_MAX)
1557 return -EINVAL;
1558
1559 if (ctx_size_in) {
1560 ctx = memdup_user(ctx_in, ctx_size_in);
1561 if (IS_ERR(ctx))
1562 return PTR_ERR(ctx);
1563 }
1564
1565 rcu_read_lock_trace();
1566 retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1567 rcu_read_unlock_trace();
1568
1569 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1570 err = -EFAULT;
1571 goto out;
1572 }
1573 if (ctx_size_in)
1574 if (copy_to_user(ctx_in, ctx, ctx_size_in))
1575 err = -EFAULT;
1576out:
1577 kfree(ctx);
1578 return err;
1579}
1580
1581static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1582 .owner = THIS_MODULE,
1583 .check_set = &test_sk_check_kfunc_ids,
1584 .acquire_set = &test_sk_acquire_kfunc_ids,
1585 .release_set = &test_sk_release_kfunc_ids,
1586 .ret_null_set = &test_sk_ret_null_kfunc_ids,
1587};
1588
1589static int __init bpf_prog_test_run_init(void)
1590{
1591 return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1592}
1593late_initcall(bpf_prog_test_run_init);