Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Implement socket kfuncs for bpf_testmod

This patch adds a set of kfuncs to bpf_testmod that can be used to
manipulate a socket from kernel space.

Signed-off-by: Jordan Rife <jrife@google.com>
Link: https://lore.kernel.org/r/20240429214529.2644801-3-jrife@google.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>

authored by

Jordan Rife and committed by
Martin KaFai Lau
bbb1cfdd 8e667a06

+282
+255
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
··· 10 10 #include <linux/percpu-defs.h> 11 11 #include <linux/sysfs.h> 12 12 #include <linux/tracepoint.h> 13 + #include <linux/net.h> 14 + #include <linux/socket.h> 15 + #include <linux/nsproxy.h> 16 + #include <linux/inet.h> 17 + #include <linux/in.h> 18 + #include <linux/in6.h> 19 + #include <linux/un.h> 20 + #include <net/sock.h> 13 21 #include "bpf_testmod.h" 14 22 #include "bpf_testmod_kfunc.h" 15 23 16 24 #define CREATE_TRACE_POINTS 17 25 #include "bpf_testmod-events.h" 26 + 27 + #define CONNECT_TIMEOUT_SEC 1 18 28 19 29 typedef int (*func_proto_typedef)(long); 20 30 typedef int (*func_proto_typedef_nested1)(func_proto_typedef); ··· 32 22 33 23 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; 34 24 long bpf_testmod_test_struct_arg_result; 25 + static DEFINE_MUTEX(sock_lock); 26 + static struct socket *sock; 35 27 36 28 struct bpf_testmod_struct_arg_1 { 37 29 int a; ··· 510 498 { 511 499 } 512 500 501 + __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args) 502 + { 503 + int proto; 504 + int err; 505 + 506 + mutex_lock(&sock_lock); 507 + 508 + if (sock) { 509 + pr_err("%s called without releasing old sock", __func__); 510 + err = -EPERM; 511 + goto out; 512 + } 513 + 514 + switch (args->af) { 515 + case AF_INET: 516 + case AF_INET6: 517 + proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP; 518 + break; 519 + case AF_UNIX: 520 + proto = PF_UNIX; 521 + break; 522 + default: 523 + pr_err("invalid address family %d\n", args->af); 524 + err = -EINVAL; 525 + goto out; 526 + } 527 + 528 + err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type, 529 + proto, &sock); 530 + 531 + if (!err) 532 + /* Set timeout for call to kernel_connect() to prevent it from hanging, 533 + * and consider the connection attempt failed if it returns 534 + * -EINPROGRESS. 535 + */ 536 + sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ; 537 + out: 538 + mutex_unlock(&sock_lock); 539 + 540 + return err; 541 + } 542 + 543 + __bpf_kfunc void bpf_kfunc_close_sock(void) 544 + { 545 + mutex_lock(&sock_lock); 546 + 547 + if (sock) { 548 + sock_release(sock); 549 + sock = NULL; 550 + } 551 + 552 + mutex_unlock(&sock_lock); 553 + } 554 + 555 + __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args) 556 + { 557 + int err; 558 + 559 + if (args->addrlen > sizeof(args->addr)) 560 + return -EINVAL; 561 + 562 + mutex_lock(&sock_lock); 563 + 564 + if (!sock) { 565 + pr_err("%s called without initializing sock", __func__); 566 + err = -EPERM; 567 + goto out; 568 + } 569 + 570 + err = kernel_connect(sock, (struct sockaddr *)&args->addr, 571 + args->addrlen, 0); 572 + out: 573 + mutex_unlock(&sock_lock); 574 + 575 + return err; 576 + } 577 + 578 + __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args) 579 + { 580 + int err; 581 + 582 + if (args->addrlen > sizeof(args->addr)) 583 + return -EINVAL; 584 + 585 + mutex_lock(&sock_lock); 586 + 587 + if (!sock) { 588 + pr_err("%s called without initializing sock", __func__); 589 + err = -EPERM; 590 + goto out; 591 + } 592 + 593 + err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen); 594 + out: 595 + mutex_unlock(&sock_lock); 596 + 597 + return err; 598 + } 599 + 600 + __bpf_kfunc int bpf_kfunc_call_kernel_listen(void) 601 + { 602 + int err; 603 + 604 + mutex_lock(&sock_lock); 605 + 606 + if (!sock) { 607 + pr_err("%s called without initializing sock", __func__); 608 + err = -EPERM; 609 + goto out; 610 + } 611 + 612 + err = kernel_listen(sock, 128); 613 + out: 614 + mutex_unlock(&sock_lock); 615 + 616 + return err; 617 + } 618 + 619 + __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args) 620 + { 621 + struct msghdr msg = { 622 + .msg_name = &args->addr.addr, 623 + .msg_namelen = args->addr.addrlen, 624 + }; 625 + struct kvec iov; 626 + int err; 627 + 628 + if (args->addr.addrlen > sizeof(args->addr.addr) || 629 + args->msglen > sizeof(args->msg)) 630 + return -EINVAL; 631 + 632 + iov.iov_base = args->msg; 633 + iov.iov_len = args->msglen; 634 + 635 + mutex_lock(&sock_lock); 636 + 637 + if (!sock) { 638 + pr_err("%s called without initializing sock", __func__); 639 + err = -EPERM; 640 + goto out; 641 + } 642 + 643 + err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen); 644 + args->addr.addrlen = msg.msg_namelen; 645 + out: 646 + mutex_unlock(&sock_lock); 647 + 648 + return err; 649 + } 650 + 651 + __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) 652 + { 653 + struct msghdr msg = { 654 + .msg_name = &args->addr.addr, 655 + .msg_namelen = args->addr.addrlen, 656 + }; 657 + struct kvec iov; 658 + int err; 659 + 660 + if (args->addr.addrlen > sizeof(args->addr.addr) || 661 + args->msglen > sizeof(args->msg)) 662 + return -EINVAL; 663 + 664 + iov.iov_base = args->msg; 665 + iov.iov_len = args->msglen; 666 + 667 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen); 668 + mutex_lock(&sock_lock); 669 + 670 + if (!sock) { 671 + pr_err("%s called without initializing sock", __func__); 672 + err = -EPERM; 673 + goto out; 674 + } 675 + 676 + err = sock_sendmsg(sock, &msg); 677 + args->addr.addrlen = msg.msg_namelen; 678 + out: 679 + mutex_unlock(&sock_lock); 680 + 681 + return err; 682 + } 683 + 684 + __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) 685 + { 686 + int err; 687 + 688 + mutex_lock(&sock_lock); 689 + 690 + if (!sock) { 691 + pr_err("%s called without initializing sock", __func__); 692 + err = -EPERM; 693 + goto out; 694 + } 695 + 696 + err = kernel_getsockname(sock, (struct sockaddr *)&args->addr); 697 + if (err < 0) 698 + goto out; 699 + 700 + args->addrlen = err; 701 + err = 0; 702 + out: 703 + mutex_unlock(&sock_lock); 704 + 705 + return err; 706 + } 707 + 708 + __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) 709 + { 710 + int err; 711 + 712 + mutex_lock(&sock_lock); 713 + 714 + if (!sock) { 715 + pr_err("%s called without initializing sock", __func__); 716 + err = -EPERM; 717 + goto out; 718 + } 719 + 720 + err = kernel_getpeername(sock, (struct sockaddr *)&args->addr); 721 + if (err < 0) 722 + goto out; 723 + 724 + args->addrlen = err; 725 + err = 0; 726 + out: 727 + mutex_unlock(&sock_lock); 728 + 729 + return err; 730 + } 731 + 513 732 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) 514 733 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) 515 734 BTF_ID_FLAGS(func, bpf_kfunc_call_test1) ··· 768 525 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) 769 526 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) 770 527 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE) 528 + BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE) 529 + BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE) 530 + BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE) 531 + BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE) 532 + BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE) 533 + BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE) 534 + BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE) 535 + BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE) 536 + BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE) 771 537 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) 772 538 773 539 static int bpf_testmod_ops_init(struct btf *btf) ··· 907 655 return ret; 908 656 if (bpf_fentry_test1(0) < 0) 909 657 return -EINVAL; 658 + sock = NULL; 659 + mutex_init(&sock_lock); 910 660 return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); 911 661 } 912 662 ··· 922 668 while (refcount_read(&prog_test_struct.cnt) > 1) 923 669 msleep(20); 924 670 671 + bpf_kfunc_close_sock(); 925 672 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); 926 673 } 927 674
+27
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
··· 64 64 char arr2[]; 65 65 }; 66 66 67 + struct init_sock_args { 68 + int af; 69 + int type; 70 + }; 71 + 72 + struct addr_args { 73 + char addr[sizeof(struct __kernel_sockaddr_storage)]; 74 + int addrlen; 75 + }; 76 + 77 + struct sendmsg_args { 78 + struct addr_args addr; 79 + char msg[10]; 80 + int msglen; 81 + }; 82 + 67 83 struct prog_test_ref_kfunc * 68 84 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym; 69 85 void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; ··· 123 107 void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len); 124 108 125 109 void bpf_kfunc_common_test(void) __ksym; 110 + 111 + int bpf_kfunc_init_sock(struct init_sock_args *args) __ksym; 112 + void bpf_kfunc_close_sock(void) __ksym; 113 + int bpf_kfunc_call_kernel_connect(struct addr_args *args) __ksym; 114 + int bpf_kfunc_call_kernel_bind(struct addr_args *args) __ksym; 115 + int bpf_kfunc_call_kernel_listen(void) __ksym; 116 + int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args) __ksym; 117 + int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) __ksym; 118 + int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) __ksym; 119 + int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) __ksym; 120 + 126 121 #endif /* _BPF_TESTMOD_KFUNC_H */