Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: af_alg - consolidation of duplicate code

Consolidate following data structures:

skcipher_async_req, aead_async_req -> af_alg_async_req
skcipher_rsgl, aead_rsql -> af_alg_rsgl
skcipher_tsgl, aead_tsql -> af_alg_tsgl
skcipher_ctx, aead_ctx -> af_alg_ctx

Consolidate following functions:

skcipher_sndbuf, aead_sndbuf -> af_alg_sndbuf
skcipher_writable, aead_writable -> af_alg_writable
skcipher_rcvbuf, aead_rcvbuf -> af_alg_rcvbuf
skcipher_readable, aead_readable -> af_alg_readable
aead_alloc_tsgl, skcipher_alloc_tsgl -> af_alg_alloc_tsgl
aead_count_tsgl, skcipher_count_tsgl -> af_alg_count_tsgl
aead_pull_tsgl, skcipher_pull_tsgl -> af_alg_pull_tsgl
aead_free_areq_sgls, skcipher_free_areq_sgls -> af_alg_free_areq_sgls
aead_wait_for_wmem, skcipher_wait_for_wmem -> af_alg_wait_for_wmem
aead_wmem_wakeup, skcipher_wmem_wakeup -> af_alg_wmem_wakeup
aead_wait_for_data, skcipher_wait_for_data -> af_alg_wait_for_data
aead_data_wakeup, skcipher_data_wakeup -> af_alg_data_wakeup
aead_sendmsg, skcipher_sendmsg -> af_alg_sendmsg
aead_sendpage, skcipher_sendpage -> af_alg_sendpage
aead_async_cb, skcipher_async_cb -> af_alg_async_cb
aead_poll, skcipher_poll -> af_alg_poll

Split out the following common code from recvmsg:

af_alg_alloc_areq: allocation of the request data structure for the
cipher operation

af_alg_get_rsgl: creation of the RX SGL anchored in the request data
structure

The following changes to the implementation without affecting the
functionality have been applied to synchronize slightly different code
bases in algif_skcipher and algif_aead:

The wakeup in af_alg_wait_for_data is triggered when either more data
is received or the indicator that more data is to be expected is
released. The first is triggered by user space, the second is
triggered by the kernel upon finishing the processing of data
(i.e. the kernel is ready for more).

af_alg_sendmsg uses size_t in min_t calculation for obtaining len.
Return code determination is consistent with algif_skcipher. The
scope of the variable i is reduced to match algif_aead. The type of the
variable i is switched from int to unsigned int to match algif_aead.

af_alg_sendpage does not contain the superfluous err = 0 from
aead_sendpage.

af_alg_async_cb requires to store the number of output bytes in
areq->outlen before the AIO callback is triggered.

The POLLIN / POLLRDNORM is now set when either not more data is given or
the kernel is supplied with data. This is consistent to the wakeup from
sleep when the kernel waits for data.

The request data structure is extended by the field last_rsgl which
points to the last RX SGL list entry. This shall help recvmsg
implementation to chain the RX SGL to other SG(L)s if needed. It is
currently used by algif_aead which chains the tag SGL to the RX SGL
during decryption.

Signed-off-by: Stephan Mueller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Stephan Mueller and committed by
Herbert Xu
2d97591e a92f7af3

+940 -1262
+693
crypto/af_alg.c
··· 21 21 #include <linux/module.h> 22 22 #include <linux/net.h> 23 23 #include <linux/rwsem.h> 24 + #include <linux/sched/signal.h> 24 25 #include <linux/security.h> 25 26 26 27 struct alg_type_list { ··· 507 506 complete(&completion->completion); 508 507 } 509 508 EXPORT_SYMBOL_GPL(af_alg_complete); 509 + 510 + /** 511 + * af_alg_alloc_tsgl - allocate the TX SGL 512 + * 513 + * @sk socket of connection to user space 514 + * @return: 0 upon success, < 0 upon error 515 + */ 516 + int af_alg_alloc_tsgl(struct sock *sk) 517 + { 518 + struct alg_sock *ask = alg_sk(sk); 519 + struct af_alg_ctx *ctx = ask->private; 520 + struct af_alg_tsgl *sgl; 521 + struct scatterlist *sg = NULL; 522 + 523 + sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); 524 + if (!list_empty(&ctx->tsgl_list)) 525 + sg = sgl->sg; 526 + 527 + if (!sg || sgl->cur >= MAX_SGL_ENTS) { 528 + sgl = sock_kmalloc(sk, sizeof(*sgl) + 529 + sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 530 + GFP_KERNEL); 531 + if (!sgl) 532 + return -ENOMEM; 533 + 534 + sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 535 + sgl->cur = 0; 536 + 537 + if (sg) 538 + sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 539 + 540 + list_add_tail(&sgl->list, &ctx->tsgl_list); 541 + } 542 + 543 + return 0; 544 + } 545 + EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl); 546 + 547 + /** 548 + * aead_count_tsgl - Count number of TX SG entries 549 + * 550 + * The counting starts from the beginning of the SGL to @bytes. If 551 + * an offset is provided, the counting of the SG entries starts at the offset. 552 + * 553 + * @sk socket of connection to user space 554 + * @bytes Count the number of SG entries holding given number of bytes. 555 + * @offset Start the counting of SG entries from the given offset. 556 + * @return Number of TX SG entries found given the constraints 557 + */ 558 + unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) 559 + { 560 + struct alg_sock *ask = alg_sk(sk); 561 + struct af_alg_ctx *ctx = ask->private; 562 + struct af_alg_tsgl *sgl, *tmp; 563 + unsigned int i; 564 + unsigned int sgl_count = 0; 565 + 566 + if (!bytes) 567 + return 0; 568 + 569 + list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { 570 + struct scatterlist *sg = sgl->sg; 571 + 572 + for (i = 0; i < sgl->cur; i++) { 573 + size_t bytes_count; 574 + 575 + /* Skip offset */ 576 + if (offset >= sg[i].length) { 577 + offset -= sg[i].length; 578 + bytes -= sg[i].length; 579 + continue; 580 + } 581 + 582 + bytes_count = sg[i].length - offset; 583 + 584 + offset = 0; 585 + sgl_count++; 586 + 587 + /* If we have seen requested number of bytes, stop */ 588 + if (bytes_count >= bytes) 589 + return sgl_count; 590 + 591 + bytes -= bytes_count; 592 + } 593 + } 594 + 595 + return sgl_count; 596 + } 597 + EXPORT_SYMBOL_GPL(af_alg_count_tsgl); 598 + 599 + /** 600 + * aead_pull_tsgl - Release the specified buffers from TX SGL 601 + * 602 + * If @dst is non-null, reassign the pages to dst. The caller must release 603 + * the pages. If @dst_offset is given only reassign the pages to @dst starting 604 + * at the @dst_offset (byte). The caller must ensure that @dst is large 605 + * enough (e.g. by using af_alg_count_tsgl with the same offset). 606 + * 607 + * @sk socket of connection to user space 608 + * @used Number of bytes to pull from TX SGL 609 + * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The 610 + * caller must release the buffers in dst. 611 + * @dst_offset Reassign the TX SGL from given offset. All buffers before 612 + * reaching the offset is released. 613 + */ 614 + void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, 615 + size_t dst_offset) 616 + { 617 + struct alg_sock *ask = alg_sk(sk); 618 + struct af_alg_ctx *ctx = ask->private; 619 + struct af_alg_tsgl *sgl; 620 + struct scatterlist *sg; 621 + unsigned int i, j; 622 + 623 + while (!list_empty(&ctx->tsgl_list)) { 624 + sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, 625 + list); 626 + sg = sgl->sg; 627 + 628 + for (i = 0, j = 0; i < sgl->cur; i++) { 629 + size_t plen = min_t(size_t, used, sg[i].length); 630 + struct page *page = sg_page(sg + i); 631 + 632 + if (!page) 633 + continue; 634 + 635 + /* 636 + * Assumption: caller created af_alg_count_tsgl(len) 637 + * SG entries in dst. 638 + */ 639 + if (dst) { 640 + if (dst_offset >= plen) { 641 + /* discard page before offset */ 642 + dst_offset -= plen; 643 + put_page(page); 644 + } else { 645 + /* reassign page to dst after offset */ 646 + sg_set_page(dst + j, page, 647 + plen - dst_offset, 648 + sg[i].offset + dst_offset); 649 + dst_offset = 0; 650 + j++; 651 + } 652 + } 653 + 654 + sg[i].length -= plen; 655 + sg[i].offset += plen; 656 + 657 + used -= plen; 658 + ctx->used -= plen; 659 + 660 + if (sg[i].length) 661 + return; 662 + 663 + if (!dst) 664 + put_page(page); 665 + 666 + sg_assign_page(sg + i, NULL); 667 + } 668 + 669 + list_del(&sgl->list); 670 + sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * 671 + (MAX_SGL_ENTS + 1)); 672 + } 673 + 674 + if (!ctx->used) 675 + ctx->merge = 0; 676 + } 677 + EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); 678 + 679 + /** 680 + * af_alg_free_areq_sgls - Release TX and RX SGLs of the request 681 + * 682 + * @areq Request holding the TX and RX SGL 683 + */ 684 + void af_alg_free_areq_sgls(struct af_alg_async_req *areq) 685 + { 686 + struct sock *sk = areq->sk; 687 + struct alg_sock *ask = alg_sk(sk); 688 + struct af_alg_ctx *ctx = ask->private; 689 + struct af_alg_rsgl *rsgl, *tmp; 690 + struct scatterlist *tsgl; 691 + struct scatterlist *sg; 692 + unsigned int i; 693 + 694 + list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { 695 + ctx->rcvused -= rsgl->sg_num_bytes; 696 + af_alg_free_sg(&rsgl->sgl); 697 + list_del(&rsgl->list); 698 + if (rsgl != &areq->first_rsgl) 699 + sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 700 + } 701 + 702 + tsgl = areq->tsgl; 703 + for_each_sg(tsgl, sg, areq->tsgl_entries, i) { 704 + if (!sg_page(sg)) 705 + continue; 706 + put_page(sg_page(sg)); 707 + } 708 + 709 + if (areq->tsgl && areq->tsgl_entries) 710 + sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); 711 + } 712 + EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); 713 + 714 + /** 715 + * af_alg_wait_for_wmem - wait for availability of writable memory 716 + * 717 + * @sk socket of connection to user space 718 + * @flags If MSG_DONTWAIT is set, then only report if function would sleep 719 + * @return 0 when writable memory is available, < 0 upon error 720 + */ 721 + int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) 722 + { 723 + DEFINE_WAIT_FUNC(wait, woken_wake_function); 724 + int err = -ERESTARTSYS; 725 + long timeout; 726 + 727 + if (flags & MSG_DONTWAIT) 728 + return -EAGAIN; 729 + 730 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 731 + 732 + add_wait_queue(sk_sleep(sk), &wait); 733 + for (;;) { 734 + if (signal_pending(current)) 735 + break; 736 + timeout = MAX_SCHEDULE_TIMEOUT; 737 + if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) { 738 + err = 0; 739 + break; 740 + } 741 + } 742 + remove_wait_queue(sk_sleep(sk), &wait); 743 + 744 + return err; 745 + } 746 + EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem); 747 + 748 + /** 749 + * af_alg_wmem_wakeup - wakeup caller when writable memory is available 750 + * 751 + * @sk socket of connection to user space 752 + */ 753 + void af_alg_wmem_wakeup(struct sock *sk) 754 + { 755 + struct socket_wq *wq; 756 + 757 + if (!af_alg_writable(sk)) 758 + return; 759 + 760 + rcu_read_lock(); 761 + wq = rcu_dereference(sk->sk_wq); 762 + if (skwq_has_sleeper(wq)) 763 + wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 764 + POLLRDNORM | 765 + POLLRDBAND); 766 + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 767 + rcu_read_unlock(); 768 + } 769 + EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); 770 + 771 + /** 772 + * af_alg_wait_for_data - wait for availability of TX data 773 + * 774 + * @sk socket of connection to user space 775 + * @flags If MSG_DONTWAIT is set, then only report if function would sleep 776 + * @return 0 when writable memory is available, < 0 upon error 777 + */ 778 + int af_alg_wait_for_data(struct sock *sk, unsigned flags) 779 + { 780 + DEFINE_WAIT_FUNC(wait, woken_wake_function); 781 + struct alg_sock *ask = alg_sk(sk); 782 + struct af_alg_ctx *ctx = ask->private; 783 + long timeout; 784 + int err = -ERESTARTSYS; 785 + 786 + if (flags & MSG_DONTWAIT) 787 + return -EAGAIN; 788 + 789 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 790 + 791 + add_wait_queue(sk_sleep(sk), &wait); 792 + for (;;) { 793 + if (signal_pending(current)) 794 + break; 795 + timeout = MAX_SCHEDULE_TIMEOUT; 796 + if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), 797 + &wait)) { 798 + err = 0; 799 + break; 800 + } 801 + } 802 + remove_wait_queue(sk_sleep(sk), &wait); 803 + 804 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 805 + 806 + return err; 807 + } 808 + EXPORT_SYMBOL_GPL(af_alg_wait_for_data); 809 + 810 + /** 811 + * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel 812 + * 813 + * @sk socket of connection to user space 814 + */ 815 + 816 + void af_alg_data_wakeup(struct sock *sk) 817 + { 818 + struct alg_sock *ask = alg_sk(sk); 819 + struct af_alg_ctx *ctx = ask->private; 820 + struct socket_wq *wq; 821 + 822 + if (!ctx->used) 823 + return; 824 + 825 + rcu_read_lock(); 826 + wq = rcu_dereference(sk->sk_wq); 827 + if (skwq_has_sleeper(wq)) 828 + wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 829 + POLLRDNORM | 830 + POLLRDBAND); 831 + sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 832 + rcu_read_unlock(); 833 + } 834 + EXPORT_SYMBOL_GPL(af_alg_data_wakeup); 835 + 836 + /** 837 + * af_alg_sendmsg - implementation of sendmsg system call handler 838 + * 839 + * The sendmsg system call handler obtains the user data and stores it 840 + * in ctx->tsgl_list. This implies allocation of the required numbers of 841 + * struct af_alg_tsgl. 842 + * 843 + * In addition, the ctx is filled with the information sent via CMSG. 844 + * 845 + * @sock socket of connection to user space 846 + * @msg message from user space 847 + * @size size of message from user space 848 + * @ivsize the size of the IV for the cipher operation to verify that the 849 + * user-space-provided IV has the right size 850 + * @return the number of copied data upon success, < 0 upon error 851 + */ 852 + int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, 853 + unsigned int ivsize) 854 + { 855 + struct sock *sk = sock->sk; 856 + struct alg_sock *ask = alg_sk(sk); 857 + struct af_alg_ctx *ctx = ask->private; 858 + struct af_alg_tsgl *sgl; 859 + struct af_alg_control con = {}; 860 + long copied = 0; 861 + bool enc = 0; 862 + bool init = 0; 863 + int err = 0; 864 + 865 + if (msg->msg_controllen) { 866 + err = af_alg_cmsg_send(msg, &con); 867 + if (err) 868 + return err; 869 + 870 + init = 1; 871 + switch (con.op) { 872 + case ALG_OP_ENCRYPT: 873 + enc = 1; 874 + break; 875 + case ALG_OP_DECRYPT: 876 + enc = 0; 877 + break; 878 + default: 879 + return -EINVAL; 880 + } 881 + 882 + if (con.iv && con.iv->ivlen != ivsize) 883 + return -EINVAL; 884 + } 885 + 886 + lock_sock(sk); 887 + if (!ctx->more && ctx->used) { 888 + err = -EINVAL; 889 + goto unlock; 890 + } 891 + 892 + if (init) { 893 + ctx->enc = enc; 894 + if (con.iv) 895 + memcpy(ctx->iv, con.iv->iv, ivsize); 896 + 897 + ctx->aead_assoclen = con.aead_assoclen; 898 + } 899 + 900 + while (size) { 901 + struct scatterlist *sg; 902 + size_t len = size; 903 + size_t plen; 904 + 905 + /* use the existing memory in an allocated page */ 906 + if (ctx->merge) { 907 + sgl = list_entry(ctx->tsgl_list.prev, 908 + struct af_alg_tsgl, list); 909 + sg = sgl->sg + sgl->cur - 1; 910 + len = min_t(size_t, len, 911 + PAGE_SIZE - sg->offset - sg->length); 912 + 913 + err = memcpy_from_msg(page_address(sg_page(sg)) + 914 + sg->offset + sg->length, 915 + msg, len); 916 + if (err) 917 + goto unlock; 918 + 919 + sg->length += len; 920 + ctx->merge = (sg->offset + sg->length) & 921 + (PAGE_SIZE - 1); 922 + 923 + ctx->used += len; 924 + copied += len; 925 + size -= len; 926 + continue; 927 + } 928 + 929 + if (!af_alg_writable(sk)) { 930 + err = af_alg_wait_for_wmem(sk, msg->msg_flags); 931 + if (err) 932 + goto unlock; 933 + } 934 + 935 + /* allocate a new page */ 936 + len = min_t(unsigned long, len, af_alg_sndbuf(sk)); 937 + 938 + err = af_alg_alloc_tsgl(sk); 939 + if (err) 940 + goto unlock; 941 + 942 + sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, 943 + list); 944 + sg = sgl->sg; 945 + if (sgl->cur) 946 + sg_unmark_end(sg + sgl->cur - 1); 947 + 948 + do { 949 + unsigned int i = sgl->cur; 950 + 951 + plen = min_t(size_t, len, PAGE_SIZE); 952 + 953 + sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 954 + if (!sg_page(sg + i)) { 955 + err = -ENOMEM; 956 + goto unlock; 957 + } 958 + 959 + err = memcpy_from_msg(page_address(sg_page(sg + i)), 960 + msg, plen); 961 + if (err) { 962 + __free_page(sg_page(sg + i)); 963 + sg_assign_page(sg + i, NULL); 964 + goto unlock; 965 + } 966 + 967 + sg[i].length = plen; 968 + len -= plen; 969 + ctx->used += plen; 970 + copied += plen; 971 + size -= plen; 972 + sgl->cur++; 973 + } while (len && sgl->cur < MAX_SGL_ENTS); 974 + 975 + if (!size) 976 + sg_mark_end(sg + sgl->cur - 1); 977 + 978 + ctx->merge = plen & (PAGE_SIZE - 1); 979 + } 980 + 981 + err = 0; 982 + 983 + ctx->more = msg->msg_flags & MSG_MORE; 984 + 985 + unlock: 986 + af_alg_data_wakeup(sk); 987 + release_sock(sk); 988 + 989 + return copied ?: err; 990 + } 991 + EXPORT_SYMBOL_GPL(af_alg_sendmsg); 992 + 993 + /** 994 + * af_alg_sendpage - sendpage system call handler 995 + * 996 + * This is a generic implementation of sendpage to fill ctx->tsgl_list. 997 + */ 998 + ssize_t af_alg_sendpage(struct socket *sock, struct page *page, 999 + int offset, size_t size, int flags) 1000 + { 1001 + struct sock *sk = sock->sk; 1002 + struct alg_sock *ask = alg_sk(sk); 1003 + struct af_alg_ctx *ctx = ask->private; 1004 + struct af_alg_tsgl *sgl; 1005 + int err = -EINVAL; 1006 + 1007 + if (flags & MSG_SENDPAGE_NOTLAST) 1008 + flags |= MSG_MORE; 1009 + 1010 + lock_sock(sk); 1011 + if (!ctx->more && ctx->used) 1012 + goto unlock; 1013 + 1014 + if (!size) 1015 + goto done; 1016 + 1017 + if (!af_alg_writable(sk)) { 1018 + err = af_alg_wait_for_wmem(sk, flags); 1019 + if (err) 1020 + goto unlock; 1021 + } 1022 + 1023 + err = af_alg_alloc_tsgl(sk); 1024 + if (err) 1025 + goto unlock; 1026 + 1027 + ctx->merge = 0; 1028 + sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); 1029 + 1030 + if (sgl->cur) 1031 + sg_unmark_end(sgl->sg + sgl->cur - 1); 1032 + 1033 + sg_mark_end(sgl->sg + sgl->cur); 1034 + 1035 + get_page(page); 1036 + sg_set_page(sgl->sg + sgl->cur, page, size, offset); 1037 + sgl->cur++; 1038 + ctx->used += size; 1039 + 1040 + done: 1041 + ctx->more = flags & MSG_MORE; 1042 + 1043 + unlock: 1044 + af_alg_data_wakeup(sk); 1045 + release_sock(sk); 1046 + 1047 + return err ?: size; 1048 + } 1049 + EXPORT_SYMBOL_GPL(af_alg_sendpage); 1050 + 1051 + /** 1052 + * af_alg_async_cb - AIO callback handler 1053 + * 1054 + * This handler cleans up the struct af_alg_async_req upon completion of the 1055 + * AIO operation. 1056 + * 1057 + * The number of bytes to be generated with the AIO operation must be set 1058 + * in areq->outlen before the AIO callback handler is invoked. 1059 + */ 1060 + void af_alg_async_cb(struct crypto_async_request *_req, int err) 1061 + { 1062 + struct af_alg_async_req *areq = _req->data; 1063 + struct sock *sk = areq->sk; 1064 + struct kiocb *iocb = areq->iocb; 1065 + unsigned int resultlen; 1066 + 1067 + lock_sock(sk); 1068 + 1069 + /* Buffer size written by crypto operation. */ 1070 + resultlen = areq->outlen; 1071 + 1072 + af_alg_free_areq_sgls(areq); 1073 + sock_kfree_s(sk, areq, areq->areqlen); 1074 + __sock_put(sk); 1075 + 1076 + iocb->ki_complete(iocb, err ? err : resultlen, 0); 1077 + 1078 + release_sock(sk); 1079 + } 1080 + EXPORT_SYMBOL_GPL(af_alg_async_cb); 1081 + 1082 + /** 1083 + * af_alg_poll - poll system call handler 1084 + */ 1085 + unsigned int af_alg_poll(struct file *file, struct socket *sock, 1086 + poll_table *wait) 1087 + { 1088 + struct sock *sk = sock->sk; 1089 + struct alg_sock *ask = alg_sk(sk); 1090 + struct af_alg_ctx *ctx = ask->private; 1091 + unsigned int mask; 1092 + 1093 + sock_poll_wait(file, sk_sleep(sk), wait); 1094 + mask = 0; 1095 + 1096 + if (!ctx->more || ctx->used) 1097 + mask |= POLLIN | POLLRDNORM; 1098 + 1099 + if (af_alg_writable(sk)) 1100 + mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1101 + 1102 + return mask; 1103 + } 1104 + EXPORT_SYMBOL_GPL(af_alg_poll); 1105 + 1106 + /** 1107 + * af_alg_alloc_areq - allocate struct af_alg_async_req 1108 + * 1109 + * @sk socket of connection to user space 1110 + * @areqlen size of struct af_alg_async_req + crypto_*_reqsize 1111 + * @return allocated data structure or ERR_PTR upon error 1112 + */ 1113 + struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, 1114 + unsigned int areqlen) 1115 + { 1116 + struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); 1117 + 1118 + if (unlikely(!areq)) 1119 + return ERR_PTR(-ENOMEM); 1120 + 1121 + areq->areqlen = areqlen; 1122 + areq->sk = sk; 1123 + areq->last_rsgl = NULL; 1124 + INIT_LIST_HEAD(&areq->rsgl_list); 1125 + areq->tsgl = NULL; 1126 + areq->tsgl_entries = 0; 1127 + 1128 + return areq; 1129 + } 1130 + EXPORT_SYMBOL_GPL(af_alg_alloc_areq); 1131 + 1132 + /** 1133 + * af_alg_get_rsgl - create the RX SGL for the output data from the crypto 1134 + * operation 1135 + * 1136 + * @sk socket of connection to user space 1137 + * @msg user space message 1138 + * @flags flags used to invoke recvmsg with 1139 + * @areq instance of the cryptographic request that will hold the RX SGL 1140 + * @maxsize maximum number of bytes to be pulled from user space 1141 + * @outlen number of bytes in the RX SGL 1142 + * @return 0 on success, < 0 upon error 1143 + */ 1144 + int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, 1145 + struct af_alg_async_req *areq, size_t maxsize, 1146 + size_t *outlen) 1147 + { 1148 + struct alg_sock *ask = alg_sk(sk); 1149 + struct af_alg_ctx *ctx = ask->private; 1150 + size_t len = 0; 1151 + 1152 + while (maxsize > len && msg_data_left(msg)) { 1153 + struct af_alg_rsgl *rsgl; 1154 + size_t seglen; 1155 + int err; 1156 + 1157 + /* limit the amount of readable buffers */ 1158 + if (!af_alg_readable(sk)) 1159 + break; 1160 + 1161 + if (!ctx->used) { 1162 + err = af_alg_wait_for_data(sk, flags); 1163 + if (err) 1164 + return err; 1165 + } 1166 + 1167 + seglen = min_t(size_t, (maxsize - len), 1168 + msg_data_left(msg)); 1169 + 1170 + if (list_empty(&areq->rsgl_list)) { 1171 + rsgl = &areq->first_rsgl; 1172 + } else { 1173 + rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 1174 + if (unlikely(!rsgl)) 1175 + return -ENOMEM; 1176 + } 1177 + 1178 + rsgl->sgl.npages = 0; 1179 + list_add_tail(&rsgl->list, &areq->rsgl_list); 1180 + 1181 + /* make one iovec available as scatterlist */ 1182 + err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 1183 + if (err < 0) 1184 + return err; 1185 + 1186 + /* chain the new scatterlist with previous one */ 1187 + if (areq->last_rsgl) 1188 + af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); 1189 + 1190 + areq->last_rsgl = rsgl; 1191 + len += err; 1192 + ctx->rcvused += err; 1193 + rsgl->sg_num_bytes = err; 1194 + iov_iter_advance(&msg->msg_iter, err); 1195 + } 1196 + 1197 + *outlen = len; 1198 + return 0; 1199 + } 1200 + EXPORT_SYMBOL_GPL(af_alg_get_rsgl); 510 1201 511 1202 static int __init af_alg_init(void) 512 1203 {
+40 -661
crypto/algif_aead.c
··· 35 35 #include <linux/init.h> 36 36 #include <linux/list.h> 37 37 #include <linux/kernel.h> 38 - #include <linux/sched/signal.h> 39 38 #include <linux/mm.h> 40 39 #include <linux/module.h> 41 40 #include <linux/net.h> 42 41 #include <net/sock.h> 43 - 44 - struct aead_tsgl { 45 - struct list_head list; 46 - unsigned int cur; /* Last processed SG entry */ 47 - struct scatterlist sg[0]; /* Array of SGs forming the SGL */ 48 - }; 49 - 50 - struct aead_rsgl { 51 - struct af_alg_sgl sgl; 52 - struct list_head list; 53 - size_t sg_num_bytes; /* Bytes of data in that SGL */ 54 - }; 55 - 56 - struct aead_async_req { 57 - struct kiocb *iocb; 58 - struct sock *sk; 59 - 60 - struct aead_rsgl first_rsgl; /* First RX SG */ 61 - struct list_head rsgl_list; /* Track RX SGs */ 62 - 63 - struct scatterlist *tsgl; /* priv. TX SGL of buffers to process */ 64 - unsigned int tsgl_entries; /* number of entries in priv. TX SGL */ 65 - 66 - unsigned int outlen; /* Filled output buf length */ 67 - 68 - unsigned int areqlen; /* Length of this data struct */ 69 - struct aead_request aead_req; /* req ctx trails this struct */ 70 - }; 71 42 72 43 struct aead_tfm { 73 44 struct crypto_aead *aead; ··· 46 75 struct crypto_skcipher *null_tfm; 47 76 }; 48 77 49 - struct aead_ctx { 50 - struct list_head tsgl_list; /* Link to TX SGL */ 51 - 52 - void *iv; 53 - size_t aead_assoclen; 54 - 55 - struct af_alg_completion completion; /* sync work queue */ 56 - 57 - size_t used; /* TX bytes sent to kernel */ 58 - size_t rcvused; /* total RX bytes to be processed by kernel */ 59 - 60 - bool more; /* More data to be expected? */ 61 - bool merge; /* Merge new data into existing SG */ 62 - bool enc; /* Crypto operation: enc, dec */ 63 - 64 - unsigned int len; /* Length of allocated memory for this struct */ 65 - }; 66 - 67 - #define MAX_SGL_ENTS ((4096 - sizeof(struct aead_tsgl)) / \ 68 - sizeof(struct scatterlist) - 1) 69 - 70 - static inline int aead_sndbuf(struct sock *sk) 71 - { 72 - struct alg_sock *ask = alg_sk(sk); 73 - struct aead_ctx *ctx = ask->private; 74 - 75 - return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 76 - ctx->used, 0); 77 - } 78 - 79 - static inline bool aead_writable(struct sock *sk) 80 - { 81 - return PAGE_SIZE <= aead_sndbuf(sk); 82 - } 83 - 84 - static inline int aead_rcvbuf(struct sock *sk) 85 - { 86 - struct alg_sock *ask = alg_sk(sk); 87 - struct aead_ctx *ctx = ask->private; 88 - 89 - return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - 90 - ctx->rcvused, 0); 91 - } 92 - 93 - static inline bool aead_readable(struct sock *sk) 94 - { 95 - return PAGE_SIZE <= aead_rcvbuf(sk); 96 - } 97 - 98 78 static inline bool aead_sufficient_data(struct sock *sk) 99 79 { 100 80 struct alg_sock *ask = alg_sk(sk); 101 81 struct sock *psk = ask->parent; 102 82 struct alg_sock *pask = alg_sk(psk); 103 - struct aead_ctx *ctx = ask->private; 83 + struct af_alg_ctx *ctx = ask->private; 104 84 struct aead_tfm *aeadc = pask->private; 105 85 struct crypto_aead *tfm = aeadc->aead; 106 86 unsigned int as = crypto_aead_authsize(tfm); ··· 63 141 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); 64 142 } 65 143 66 - static int aead_alloc_tsgl(struct sock *sk) 67 - { 68 - struct alg_sock *ask = alg_sk(sk); 69 - struct aead_ctx *ctx = ask->private; 70 - struct aead_tsgl *sgl; 71 - struct scatterlist *sg = NULL; 72 - 73 - sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list); 74 - if (!list_empty(&ctx->tsgl_list)) 75 - sg = sgl->sg; 76 - 77 - if (!sg || sgl->cur >= MAX_SGL_ENTS) { 78 - sgl = sock_kmalloc(sk, sizeof(*sgl) + 79 - sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 80 - GFP_KERNEL); 81 - if (!sgl) 82 - return -ENOMEM; 83 - 84 - sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 85 - sgl->cur = 0; 86 - 87 - if (sg) 88 - sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 89 - 90 - list_add_tail(&sgl->list, &ctx->tsgl_list); 91 - } 92 - 93 - return 0; 94 - } 95 - 96 - /** 97 - * Count number of SG entries from the beginning of the SGL to @bytes. If 98 - * an offset is provided, the counting of the SG entries starts at the offset. 99 - */ 100 - static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes, 101 - size_t offset) 102 - { 103 - struct alg_sock *ask = alg_sk(sk); 104 - struct aead_ctx *ctx = ask->private; 105 - struct aead_tsgl *sgl, *tmp; 106 - unsigned int i; 107 - unsigned int sgl_count = 0; 108 - 109 - if (!bytes) 110 - return 0; 111 - 112 - list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { 113 - struct scatterlist *sg = sgl->sg; 114 - 115 - for (i = 0; i < sgl->cur; i++) { 116 - size_t bytes_count; 117 - 118 - /* Skip offset */ 119 - if (offset >= sg[i].length) { 120 - offset -= sg[i].length; 121 - bytes -= sg[i].length; 122 - continue; 123 - } 124 - 125 - bytes_count = sg[i].length - offset; 126 - 127 - offset = 0; 128 - sgl_count++; 129 - 130 - /* If we have seen requested number of bytes, stop */ 131 - if (bytes_count >= bytes) 132 - return sgl_count; 133 - 134 - bytes -= bytes_count; 135 - } 136 - } 137 - 138 - return sgl_count; 139 - } 140 - 141 - /** 142 - * Release the specified buffers from TX SGL pointed to by ctx->tsgl_list for 143 - * @used bytes. 144 - * 145 - * If @dst is non-null, reassign the pages to dst. The caller must release 146 - * the pages. If @dst_offset is given only reassign the pages to @dst starting 147 - * at the @dst_offset (byte). The caller must ensure that @dst is large 148 - * enough (e.g. by using aead_count_tsgl with the same offset). 149 - */ 150 - static void aead_pull_tsgl(struct sock *sk, size_t used, 151 - struct scatterlist *dst, size_t dst_offset) 152 - { 153 - struct alg_sock *ask = alg_sk(sk); 154 - struct aead_ctx *ctx = ask->private; 155 - struct aead_tsgl *sgl; 156 - struct scatterlist *sg; 157 - unsigned int i, j; 158 - 159 - while (!list_empty(&ctx->tsgl_list)) { 160 - sgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl, 161 - list); 162 - sg = sgl->sg; 163 - 164 - for (i = 0, j = 0; i < sgl->cur; i++) { 165 - size_t plen = min_t(size_t, used, sg[i].length); 166 - struct page *page = sg_page(sg + i); 167 - 168 - if (!page) 169 - continue; 170 - 171 - /* 172 - * Assumption: caller created aead_count_tsgl(len) 173 - * SG entries in dst. 174 - */ 175 - if (dst) { 176 - if (dst_offset >= plen) { 177 - /* discard page before offset */ 178 - dst_offset -= plen; 179 - put_page(page); 180 - } else { 181 - /* reassign page to dst after offset */ 182 - sg_set_page(dst + j, page, 183 - plen - dst_offset, 184 - sg[i].offset + dst_offset); 185 - dst_offset = 0; 186 - j++; 187 - } 188 - } 189 - 190 - sg[i].length -= plen; 191 - sg[i].offset += plen; 192 - 193 - used -= plen; 194 - ctx->used -= plen; 195 - 196 - if (sg[i].length) 197 - return; 198 - 199 - if (!dst) 200 - put_page(page); 201 - 202 - sg_assign_page(sg + i, NULL); 203 - } 204 - 205 - list_del(&sgl->list); 206 - sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * 207 - (MAX_SGL_ENTS + 1)); 208 - } 209 - 210 - if (!ctx->used) 211 - ctx->merge = 0; 212 - } 213 - 214 - static void aead_free_areq_sgls(struct aead_async_req *areq) 215 - { 216 - struct sock *sk = areq->sk; 217 - struct alg_sock *ask = alg_sk(sk); 218 - struct aead_ctx *ctx = ask->private; 219 - struct aead_rsgl *rsgl, *tmp; 220 - struct scatterlist *tsgl; 221 - struct scatterlist *sg; 222 - unsigned int i; 223 - 224 - list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { 225 - ctx->rcvused -= rsgl->sg_num_bytes; 226 - af_alg_free_sg(&rsgl->sgl); 227 - list_del(&rsgl->list); 228 - if (rsgl != &areq->first_rsgl) 229 - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 230 - } 231 - 232 - tsgl = areq->tsgl; 233 - for_each_sg(tsgl, sg, areq->tsgl_entries, i) { 234 - if (!sg_page(sg)) 235 - continue; 236 - put_page(sg_page(sg)); 237 - } 238 - 239 - if (areq->tsgl && areq->tsgl_entries) 240 - sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); 241 - } 242 - 243 - static int aead_wait_for_wmem(struct sock *sk, unsigned int flags) 244 - { 245 - DEFINE_WAIT_FUNC(wait, woken_wake_function); 246 - int err = -ERESTARTSYS; 247 - long timeout; 248 - 249 - if (flags & MSG_DONTWAIT) 250 - return -EAGAIN; 251 - 252 - sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 253 - 254 - add_wait_queue(sk_sleep(sk), &wait); 255 - for (;;) { 256 - if (signal_pending(current)) 257 - break; 258 - timeout = MAX_SCHEDULE_TIMEOUT; 259 - if (sk_wait_event(sk, &timeout, aead_writable(sk), &wait)) { 260 - err = 0; 261 - break; 262 - } 263 - } 264 - remove_wait_queue(sk_sleep(sk), &wait); 265 - 266 - return err; 267 - } 268 - 269 - static void aead_wmem_wakeup(struct sock *sk) 270 - { 271 - struct socket_wq *wq; 272 - 273 - if (!aead_writable(sk)) 274 - return; 275 - 276 - rcu_read_lock(); 277 - wq = rcu_dereference(sk->sk_wq); 278 - if (skwq_has_sleeper(wq)) 279 - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 280 - POLLRDNORM | 281 - POLLRDBAND); 282 - sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 283 - rcu_read_unlock(); 284 - } 285 - 286 - static int aead_wait_for_data(struct sock *sk, unsigned flags) 287 - { 288 - DEFINE_WAIT_FUNC(wait, woken_wake_function); 289 - struct alg_sock *ask = alg_sk(sk); 290 - struct aead_ctx *ctx = ask->private; 291 - long timeout; 292 - int err = -ERESTARTSYS; 293 - 294 - if (flags & MSG_DONTWAIT) 295 - return -EAGAIN; 296 - 297 - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 298 - 299 - add_wait_queue(sk_sleep(sk), &wait); 300 - for (;;) { 301 - if (signal_pending(current)) 302 - break; 303 - timeout = MAX_SCHEDULE_TIMEOUT; 304 - if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) { 305 - err = 0; 306 - break; 307 - } 308 - } 309 - remove_wait_queue(sk_sleep(sk), &wait); 310 - 311 - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 312 - 313 - return err; 314 - } 315 - 316 - static void aead_data_wakeup(struct sock *sk) 317 - { 318 - struct alg_sock *ask = alg_sk(sk); 319 - struct aead_ctx *ctx = ask->private; 320 - struct socket_wq *wq; 321 - 322 - if (!ctx->used) 323 - return; 324 - 325 - rcu_read_lock(); 326 - wq = rcu_dereference(sk->sk_wq); 327 - if (skwq_has_sleeper(wq)) 328 - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 329 - POLLRDNORM | 330 - POLLRDBAND); 331 - sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 332 - rcu_read_unlock(); 333 - } 334 - 335 144 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 336 145 { 337 146 struct sock *sk = sock->sk; 338 147 struct alg_sock *ask = alg_sk(sk); 339 148 struct sock *psk = ask->parent; 340 149 struct alg_sock *pask = alg_sk(psk); 341 - struct aead_ctx *ctx = ask->private; 342 150 struct aead_tfm *aeadc = pask->private; 343 151 struct crypto_aead *tfm = aeadc->aead; 344 152 unsigned int ivsize = crypto_aead_ivsize(tfm); 345 - struct aead_tsgl *sgl; 346 - struct af_alg_control con = {}; 347 - long copied = 0; 348 - bool enc = 0; 349 - bool init = 0; 350 - int err = 0; 351 153 352 - if (msg->msg_controllen) { 353 - err = af_alg_cmsg_send(msg, &con); 354 - if (err) 355 - return err; 356 - 357 - init = 1; 358 - switch (con.op) { 359 - case ALG_OP_ENCRYPT: 360 - enc = 1; 361 - break; 362 - case ALG_OP_DECRYPT: 363 - enc = 0; 364 - break; 365 - default: 366 - return -EINVAL; 367 - } 368 - 369 - if (con.iv && con.iv->ivlen != ivsize) 370 - return -EINVAL; 371 - } 372 - 373 - lock_sock(sk); 374 - if (!ctx->more && ctx->used) { 375 - err = -EINVAL; 376 - goto unlock; 377 - } 378 - 379 - if (init) { 380 - ctx->enc = enc; 381 - if (con.iv) 382 - memcpy(ctx->iv, con.iv->iv, ivsize); 383 - 384 - ctx->aead_assoclen = con.aead_assoclen; 385 - } 386 - 387 - while (size) { 388 - struct scatterlist *sg; 389 - size_t len = size; 390 - size_t plen; 391 - 392 - /* use the existing memory in an allocated page */ 393 - if (ctx->merge) { 394 - sgl = list_entry(ctx->tsgl_list.prev, 395 - struct aead_tsgl, list); 396 - sg = sgl->sg + sgl->cur - 1; 397 - len = min_t(unsigned long, len, 398 - PAGE_SIZE - sg->offset - sg->length); 399 - err = memcpy_from_msg(page_address(sg_page(sg)) + 400 - sg->offset + sg->length, 401 - msg, len); 402 - if (err) 403 - goto unlock; 404 - 405 - sg->length += len; 406 - ctx->merge = (sg->offset + sg->length) & 407 - (PAGE_SIZE - 1); 408 - 409 - ctx->used += len; 410 - copied += len; 411 - size -= len; 412 - continue; 413 - } 414 - 415 - if (!aead_writable(sk)) { 416 - err = aead_wait_for_wmem(sk, msg->msg_flags); 417 - if (err) 418 - goto unlock; 419 - } 420 - 421 - /* allocate a new page */ 422 - len = min_t(unsigned long, size, aead_sndbuf(sk)); 423 - 424 - err = aead_alloc_tsgl(sk); 425 - if (err) 426 - goto unlock; 427 - 428 - sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, 429 - list); 430 - sg = sgl->sg; 431 - if (sgl->cur) 432 - sg_unmark_end(sg + sgl->cur - 1); 433 - 434 - do { 435 - unsigned int i = sgl->cur; 436 - 437 - plen = min_t(size_t, len, PAGE_SIZE); 438 - 439 - sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 440 - if (!sg_page(sg + i)) { 441 - err = -ENOMEM; 442 - goto unlock; 443 - } 444 - 445 - err = memcpy_from_msg(page_address(sg_page(sg + i)), 446 - msg, plen); 447 - if (err) { 448 - __free_page(sg_page(sg + i)); 449 - sg_assign_page(sg + i, NULL); 450 - goto unlock; 451 - } 452 - 453 - sg[i].length = plen; 454 - len -= plen; 455 - ctx->used += plen; 456 - copied += plen; 457 - size -= plen; 458 - sgl->cur++; 459 - } while (len && sgl->cur < MAX_SGL_ENTS); 460 - 461 - if (!size) 462 - sg_mark_end(sg + sgl->cur - 1); 463 - 464 - ctx->merge = plen & (PAGE_SIZE - 1); 465 - } 466 - 467 - err = 0; 468 - 469 - ctx->more = msg->msg_flags & MSG_MORE; 470 - 471 - unlock: 472 - aead_data_wakeup(sk); 473 - release_sock(sk); 474 - 475 - return err ?: copied; 476 - } 477 - 478 - static ssize_t aead_sendpage(struct socket *sock, struct page *page, 479 - int offset, size_t size, int flags) 480 - { 481 - struct sock *sk = sock->sk; 482 - struct alg_sock *ask = alg_sk(sk); 483 - struct aead_ctx *ctx = ask->private; 484 - struct aead_tsgl *sgl; 485 - int err = -EINVAL; 486 - 487 - if (flags & MSG_SENDPAGE_NOTLAST) 488 - flags |= MSG_MORE; 489 - 490 - lock_sock(sk); 491 - if (!ctx->more && ctx->used) 492 - goto unlock; 493 - 494 - if (!size) 495 - goto done; 496 - 497 - if (!aead_writable(sk)) { 498 - err = aead_wait_for_wmem(sk, flags); 499 - if (err) 500 - goto unlock; 501 - } 502 - 503 - err = aead_alloc_tsgl(sk); 504 - if (err) 505 - goto unlock; 506 - 507 - ctx->merge = 0; 508 - sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list); 509 - 510 - if (sgl->cur) 511 - sg_unmark_end(sgl->sg + sgl->cur - 1); 512 - 513 - sg_mark_end(sgl->sg + sgl->cur); 514 - 515 - get_page(page); 516 - sg_set_page(sgl->sg + sgl->cur, page, size, offset); 517 - sgl->cur++; 518 - ctx->used += size; 519 - 520 - err = 0; 521 - 522 - done: 523 - ctx->more = flags & MSG_MORE; 524 - unlock: 525 - aead_data_wakeup(sk); 526 - release_sock(sk); 527 - 528 - return err ?: size; 529 - } 530 - 531 - static void aead_async_cb(struct crypto_async_request *_req, int err) 532 - { 533 - struct aead_async_req *areq = _req->data; 534 - struct sock *sk = areq->sk; 535 - struct kiocb *iocb = areq->iocb; 536 - unsigned int resultlen; 537 - 538 - lock_sock(sk); 539 - 540 - /* Buffer size written by crypto operation. */ 541 - resultlen = areq->outlen; 542 - 543 - aead_free_areq_sgls(areq); 544 - sock_kfree_s(sk, areq, areq->areqlen); 545 - __sock_put(sk); 546 - 547 - iocb->ki_complete(iocb, err ? err : resultlen, 0); 548 - 549 - release_sock(sk); 154 + return af_alg_sendmsg(sock, msg, size, ivsize); 550 155 } 551 156 552 157 static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm, ··· 97 648 struct alg_sock *ask = alg_sk(sk); 98 649 struct sock *psk = ask->parent; 99 650 struct alg_sock *pask = alg_sk(psk); 100 - struct aead_ctx *ctx = ask->private; 651 + struct af_alg_ctx *ctx = ask->private; 101 652 struct aead_tfm *aeadc = pask->private; 102 653 struct crypto_aead *tfm = aeadc->aead; 103 654 struct crypto_skcipher *null_tfm = aeadc->null_tfm; 104 655 unsigned int as = crypto_aead_authsize(tfm); 105 - unsigned int areqlen = 106 - sizeof(struct aead_async_req) + crypto_aead_reqsize(tfm); 107 - struct aead_async_req *areq; 108 - struct aead_rsgl *last_rsgl = NULL; 109 - struct aead_tsgl *tsgl; 656 + struct af_alg_async_req *areq; 657 + struct af_alg_tsgl *tsgl; 110 658 struct scatterlist *src; 111 659 int err = 0; 112 660 size_t used = 0; /* [in] TX bufs to be en/decrypted */ ··· 149 703 used -= ctx->aead_assoclen; 150 704 151 705 /* Allocate cipher request for current operation. */ 152 - areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); 153 - if (unlikely(!areq)) 154 - return -ENOMEM; 155 - areq->areqlen = areqlen; 156 - areq->sk = sk; 157 - INIT_LIST_HEAD(&areq->rsgl_list); 158 - areq->tsgl = NULL; 159 - areq->tsgl_entries = 0; 706 + areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + 707 + crypto_aead_reqsize(tfm)); 708 + if (IS_ERR(areq)) 709 + return PTR_ERR(areq); 160 710 161 711 /* convert iovecs of output buffers into RX SGL */ 162 - while (outlen > usedpages && msg_data_left(msg)) { 163 - struct aead_rsgl *rsgl; 164 - size_t seglen; 165 - 166 - /* limit the amount of readable buffers */ 167 - if (!aead_readable(sk)) 168 - break; 169 - 170 - if (!ctx->used) { 171 - err = aead_wait_for_data(sk, flags); 172 - if (err) 173 - goto free; 174 - } 175 - 176 - seglen = min_t(size_t, (outlen - usedpages), 177 - msg_data_left(msg)); 178 - 179 - if (list_empty(&areq->rsgl_list)) { 180 - rsgl = &areq->first_rsgl; 181 - } else { 182 - rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 183 - if (unlikely(!rsgl)) { 184 - err = -ENOMEM; 185 - goto free; 186 - } 187 - } 188 - 189 - rsgl->sgl.npages = 0; 190 - list_add_tail(&rsgl->list, &areq->rsgl_list); 191 - 192 - /* make one iovec available as scatterlist */ 193 - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 194 - if (err < 0) 195 - goto free; 196 - 197 - /* chain the new scatterlist with previous one */ 198 - if (last_rsgl) 199 - af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 200 - 201 - last_rsgl = rsgl; 202 - usedpages += err; 203 - ctx->rcvused += err; 204 - rsgl->sg_num_bytes = err; 205 - iov_iter_advance(&msg->msg_iter, err); 206 - } 712 + err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages); 713 + if (err) 714 + goto free; 207 715 208 716 /* 209 717 * Ensure output buffer is sufficiently large. If the caller provides ··· 178 778 } 179 779 180 780 processed = used + ctx->aead_assoclen; 181 - tsgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl, list); 781 + tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list); 182 782 183 783 /* 184 784 * Copy of AAD from source to destination ··· 211 811 areq->first_rsgl.sgl.sg, processed); 212 812 if (err) 213 813 goto free; 214 - aead_pull_tsgl(sk, processed, NULL, 0); 814 + af_alg_pull_tsgl(sk, processed, NULL, 0); 215 815 } else { 216 816 /* 217 817 * Decryption operation - To achieve an in-place cipher ··· 231 831 goto free; 232 832 233 833 /* Create TX SGL for tag and chain it to RX SGL. */ 234 - areq->tsgl_entries = aead_count_tsgl(sk, processed, 235 - processed - as); 834 + areq->tsgl_entries = af_alg_count_tsgl(sk, processed, 835 + processed - as); 236 836 if (!areq->tsgl_entries) 237 837 areq->tsgl_entries = 1; 238 838 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * ··· 245 845 sg_init_table(areq->tsgl, areq->tsgl_entries); 246 846 247 847 /* Release TX SGL, except for tag data and reassign tag data. */ 248 - aead_pull_tsgl(sk, processed, areq->tsgl, processed - as); 848 + af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as); 249 849 250 850 /* chain the areq TX SGL holding the tag with RX SGL */ 251 - if (last_rsgl) { 851 + if (usedpages) { 252 852 /* RX SGL present */ 253 - struct af_alg_sgl *sgl_prev = &last_rsgl->sgl; 853 + struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; 254 854 255 855 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); 256 856 sg_chain(sgl_prev->sg, sgl_prev->npages + 1, ··· 261 861 } 262 862 263 863 /* Initialize the crypto operation */ 264 - aead_request_set_crypt(&areq->aead_req, src, 864 + aead_request_set_crypt(&areq->cra_u.aead_req, src, 265 865 areq->first_rsgl.sgl.sg, used, ctx->iv); 266 - aead_request_set_ad(&areq->aead_req, ctx->aead_assoclen); 267 - aead_request_set_tfm(&areq->aead_req, tfm); 866 + aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); 867 + aead_request_set_tfm(&areq->cra_u.aead_req, tfm); 268 868 269 869 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { 270 870 /* AIO operation */ 271 871 areq->iocb = msg->msg_iocb; 272 - aead_request_set_callback(&areq->aead_req, 872 + aead_request_set_callback(&areq->cra_u.aead_req, 273 873 CRYPTO_TFM_REQ_MAY_BACKLOG, 274 - aead_async_cb, areq); 275 - err = ctx->enc ? crypto_aead_encrypt(&areq->aead_req) : 276 - crypto_aead_decrypt(&areq->aead_req); 874 + af_alg_async_cb, areq); 875 + err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : 876 + crypto_aead_decrypt(&areq->cra_u.aead_req); 277 877 } else { 278 878 /* Synchronous operation */ 279 - aead_request_set_callback(&areq->aead_req, 879 + aead_request_set_callback(&areq->cra_u.aead_req, 280 880 CRYPTO_TFM_REQ_MAY_BACKLOG, 281 881 af_alg_complete, &ctx->completion); 282 882 err = af_alg_wait_for_completion(ctx->enc ? 283 - crypto_aead_encrypt(&areq->aead_req) : 284 - crypto_aead_decrypt(&areq->aead_req), 285 - &ctx->completion); 883 + crypto_aead_encrypt(&areq->cra_u.aead_req) : 884 + crypto_aead_decrypt(&areq->cra_u.aead_req), 885 + &ctx->completion); 286 886 } 287 887 288 888 /* AIO operation in progress */ ··· 296 896 } 297 897 298 898 free: 299 - aead_free_areq_sgls(areq); 300 - if (areq) 301 - sock_kfree_s(sk, areq, areqlen); 899 + af_alg_free_areq_sgls(areq); 900 + sock_kfree_s(sk, areq, areq->areqlen); 302 901 303 902 return err ? err : outlen; 304 903 } ··· 330 931 } 331 932 332 933 out: 333 - aead_wmem_wakeup(sk); 934 + af_alg_wmem_wakeup(sk); 334 935 release_sock(sk); 335 936 return ret; 336 - } 337 - 338 - static unsigned int aead_poll(struct file *file, struct socket *sock, 339 - poll_table *wait) 340 - { 341 - struct sock *sk = sock->sk; 342 - struct alg_sock *ask = alg_sk(sk); 343 - struct aead_ctx *ctx = ask->private; 344 - unsigned int mask; 345 - 346 - sock_poll_wait(file, sk_sleep(sk), wait); 347 - mask = 0; 348 - 349 - if (!ctx->more) 350 - mask |= POLLIN | POLLRDNORM; 351 - 352 - if (aead_writable(sk)) 353 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 354 - 355 - return mask; 356 937 } 357 938 358 939 static struct proto_ops algif_aead_ops = { ··· 352 973 353 974 .release = af_alg_release, 354 975 .sendmsg = aead_sendmsg, 355 - .sendpage = aead_sendpage, 976 + .sendpage = af_alg_sendpage, 356 977 .recvmsg = aead_recvmsg, 357 - .poll = aead_poll, 978 + .poll = af_alg_poll, 358 979 }; 359 980 360 981 static int aead_check_key(struct socket *sock) ··· 416 1037 if (err) 417 1038 return err; 418 1039 419 - return aead_sendpage(sock, page, offset, size, flags); 1040 + return af_alg_sendpage(sock, page, offset, size, flags); 420 1041 } 421 1042 422 1043 static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, ··· 450 1071 .sendmsg = aead_sendmsg_nokey, 451 1072 .sendpage = aead_sendpage_nokey, 452 1073 .recvmsg = aead_recvmsg_nokey, 453 - .poll = aead_poll, 1074 + .poll = af_alg_poll, 454 1075 }; 455 1076 456 1077 static void *aead_bind(const char *name, u32 type, u32 mask) ··· 511 1132 static void aead_sock_destruct(struct sock *sk) 512 1133 { 513 1134 struct alg_sock *ask = alg_sk(sk); 514 - struct aead_ctx *ctx = ask->private; 1135 + struct af_alg_ctx *ctx = ask->private; 515 1136 struct sock *psk = ask->parent; 516 1137 struct alg_sock *pask = alg_sk(psk); 517 1138 struct aead_tfm *aeadc = pask->private; 518 1139 struct crypto_aead *tfm = aeadc->aead; 519 1140 unsigned int ivlen = crypto_aead_ivsize(tfm); 520 1141 521 - aead_pull_tsgl(sk, ctx->used, NULL, 0); 1142 + af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 522 1143 crypto_put_default_null_skcipher2(); 523 1144 sock_kzfree_s(sk, ctx->iv, ivlen); 524 1145 sock_kfree_s(sk, ctx, ctx->len); ··· 527 1148 528 1149 static int aead_accept_parent_nokey(void *private, struct sock *sk) 529 1150 { 530 - struct aead_ctx *ctx; 1151 + struct af_alg_ctx *ctx; 531 1152 struct alg_sock *ask = alg_sk(sk); 532 1153 struct aead_tfm *tfm = private; 533 1154 struct crypto_aead *aead = tfm->aead;
+37 -601
crypto/algif_skcipher.c
··· 33 33 #include <linux/init.h> 34 34 #include <linux/list.h> 35 35 #include <linux/kernel.h> 36 - #include <linux/sched/signal.h> 37 36 #include <linux/mm.h> 38 37 #include <linux/module.h> 39 38 #include <linux/net.h> 40 39 #include <net/sock.h> 41 40 42 - struct skcipher_tsgl { 43 - struct list_head list; 44 - int cur; 45 - struct scatterlist sg[0]; 46 - }; 47 - 48 - struct skcipher_rsgl { 49 - struct af_alg_sgl sgl; 50 - struct list_head list; 51 - size_t sg_num_bytes; 52 - }; 53 - 54 - struct skcipher_async_req { 55 - struct kiocb *iocb; 56 - struct sock *sk; 57 - 58 - struct skcipher_rsgl first_sgl; 59 - struct list_head rsgl_list; 60 - 61 - struct scatterlist *tsgl; 62 - unsigned int tsgl_entries; 63 - 64 - unsigned int areqlen; 65 - struct skcipher_request req; 66 - }; 67 - 68 41 struct skcipher_tfm { 69 42 struct crypto_skcipher *skcipher; 70 43 bool has_key; 71 44 }; 72 - 73 - struct skcipher_ctx { 74 - struct list_head tsgl_list; 75 - 76 - void *iv; 77 - 78 - struct af_alg_completion completion; 79 - 80 - size_t used; 81 - size_t rcvused; 82 - 83 - bool more; 84 - bool merge; 85 - bool enc; 86 - 87 - unsigned int len; 88 - }; 89 - 90 - #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_tsgl)) / \ 91 - sizeof(struct scatterlist) - 1) 92 - 93 - static inline int skcipher_sndbuf(struct sock *sk) 94 - { 95 - struct alg_sock *ask = alg_sk(sk); 96 - struct skcipher_ctx *ctx = ask->private; 97 - 98 - return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 99 - ctx->used, 0); 100 - } 101 - 102 - static inline bool skcipher_writable(struct sock *sk) 103 - { 104 - return PAGE_SIZE <= skcipher_sndbuf(sk); 105 - } 106 - 107 - static inline int skcipher_rcvbuf(struct sock *sk) 108 - { 109 - struct alg_sock *ask = alg_sk(sk); 110 - struct skcipher_ctx *ctx = ask->private; 111 - 112 - return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - 113 - ctx->rcvused, 0); 114 - } 115 - 116 - static inline bool skcipher_readable(struct sock *sk) 117 - { 118 - return PAGE_SIZE <= skcipher_rcvbuf(sk); 119 - } 120 - 121 - static int skcipher_alloc_tsgl(struct sock *sk) 122 - { 123 - struct alg_sock *ask = alg_sk(sk); 124 - struct skcipher_ctx *ctx = ask->private; 125 - struct skcipher_tsgl *sgl; 126 - struct scatterlist *sg = NULL; 127 - 128 - sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list); 129 - if (!list_empty(&ctx->tsgl_list)) 130 - sg = sgl->sg; 131 - 132 - if (!sg || sgl->cur >= MAX_SGL_ENTS) { 133 - sgl = sock_kmalloc(sk, sizeof(*sgl) + 134 - sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 135 - GFP_KERNEL); 136 - if (!sgl) 137 - return -ENOMEM; 138 - 139 - sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 140 - sgl->cur = 0; 141 - 142 - if (sg) 143 - sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 144 - 145 - list_add_tail(&sgl->list, &ctx->tsgl_list); 146 - } 147 - 148 - return 0; 149 - } 150 - 151 - static unsigned int skcipher_count_tsgl(struct sock *sk, size_t bytes) 152 - { 153 - struct alg_sock *ask = alg_sk(sk); 154 - struct skcipher_ctx *ctx = ask->private; 155 - struct skcipher_tsgl *sgl, *tmp; 156 - unsigned int i; 157 - unsigned int sgl_count = 0; 158 - 159 - if (!bytes) 160 - return 0; 161 - 162 - list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { 163 - struct scatterlist *sg = sgl->sg; 164 - 165 - for (i = 0; i < sgl->cur; i++) { 166 - sgl_count++; 167 - if (sg[i].length >= bytes) 168 - return sgl_count; 169 - 170 - bytes -= sg[i].length; 171 - } 172 - } 173 - 174 - return sgl_count; 175 - } 176 - 177 - static void skcipher_pull_tsgl(struct sock *sk, size_t used, 178 - struct scatterlist *dst) 179 - { 180 - struct alg_sock *ask = alg_sk(sk); 181 - struct skcipher_ctx *ctx = ask->private; 182 - struct skcipher_tsgl *sgl; 183 - struct scatterlist *sg; 184 - unsigned int i; 185 - 186 - while (!list_empty(&ctx->tsgl_list)) { 187 - sgl = list_first_entry(&ctx->tsgl_list, struct skcipher_tsgl, 188 - list); 189 - sg = sgl->sg; 190 - 191 - for (i = 0; i < sgl->cur; i++) { 192 - size_t plen = min_t(size_t, used, sg[i].length); 193 - struct page *page = sg_page(sg + i); 194 - 195 - if (!page) 196 - continue; 197 - 198 - /* 199 - * Assumption: caller created skcipher_count_tsgl(len) 200 - * SG entries in dst. 201 - */ 202 - if (dst) 203 - sg_set_page(dst + i, page, plen, sg[i].offset); 204 - 205 - sg[i].length -= plen; 206 - sg[i].offset += plen; 207 - 208 - used -= plen; 209 - ctx->used -= plen; 210 - 211 - if (sg[i].length) 212 - return; 213 - 214 - if (!dst) 215 - put_page(page); 216 - sg_assign_page(sg + i, NULL); 217 - } 218 - 219 - list_del(&sgl->list); 220 - sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * 221 - (MAX_SGL_ENTS + 1)); 222 - } 223 - 224 - if (!ctx->used) 225 - ctx->merge = 0; 226 - } 227 - 228 - static void skcipher_free_areq_sgls(struct skcipher_async_req *areq) 229 - { 230 - struct sock *sk = areq->sk; 231 - struct alg_sock *ask = alg_sk(sk); 232 - struct skcipher_ctx *ctx = ask->private; 233 - struct skcipher_rsgl *rsgl, *tmp; 234 - struct scatterlist *tsgl; 235 - struct scatterlist *sg; 236 - unsigned int i; 237 - 238 - list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { 239 - ctx->rcvused -= rsgl->sg_num_bytes; 240 - af_alg_free_sg(&rsgl->sgl); 241 - list_del(&rsgl->list); 242 - if (rsgl != &areq->first_sgl) 243 - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 244 - } 245 - 246 - tsgl = areq->tsgl; 247 - for_each_sg(tsgl, sg, areq->tsgl_entries, i) { 248 - if (!sg_page(sg)) 249 - continue; 250 - put_page(sg_page(sg)); 251 - } 252 - 253 - if (areq->tsgl && areq->tsgl_entries) 254 - sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); 255 - } 256 - 257 - static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) 258 - { 259 - DEFINE_WAIT_FUNC(wait, woken_wake_function); 260 - int err = -ERESTARTSYS; 261 - long timeout; 262 - 263 - if (flags & MSG_DONTWAIT) 264 - return -EAGAIN; 265 - 266 - sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 267 - 268 - add_wait_queue(sk_sleep(sk), &wait); 269 - for (;;) { 270 - if (signal_pending(current)) 271 - break; 272 - timeout = MAX_SCHEDULE_TIMEOUT; 273 - if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) { 274 - err = 0; 275 - break; 276 - } 277 - } 278 - remove_wait_queue(sk_sleep(sk), &wait); 279 - 280 - return err; 281 - } 282 - 283 - static void skcipher_wmem_wakeup(struct sock *sk) 284 - { 285 - struct socket_wq *wq; 286 - 287 - if (!skcipher_writable(sk)) 288 - return; 289 - 290 - rcu_read_lock(); 291 - wq = rcu_dereference(sk->sk_wq); 292 - if (skwq_has_sleeper(wq)) 293 - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 294 - POLLRDNORM | 295 - POLLRDBAND); 296 - sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 297 - rcu_read_unlock(); 298 - } 299 - 300 - static int skcipher_wait_for_data(struct sock *sk, unsigned flags) 301 - { 302 - DEFINE_WAIT_FUNC(wait, woken_wake_function); 303 - struct alg_sock *ask = alg_sk(sk); 304 - struct skcipher_ctx *ctx = ask->private; 305 - long timeout; 306 - int err = -ERESTARTSYS; 307 - 308 - if (flags & MSG_DONTWAIT) { 309 - return -EAGAIN; 310 - } 311 - 312 - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 313 - 314 - add_wait_queue(sk_sleep(sk), &wait); 315 - for (;;) { 316 - if (signal_pending(current)) 317 - break; 318 - timeout = MAX_SCHEDULE_TIMEOUT; 319 - if (sk_wait_event(sk, &timeout, ctx->used, &wait)) { 320 - err = 0; 321 - break; 322 - } 323 - } 324 - remove_wait_queue(sk_sleep(sk), &wait); 325 - 326 - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 327 - 328 - return err; 329 - } 330 - 331 - static void skcipher_data_wakeup(struct sock *sk) 332 - { 333 - struct alg_sock *ask = alg_sk(sk); 334 - struct skcipher_ctx *ctx = ask->private; 335 - struct socket_wq *wq; 336 - 337 - if (!ctx->used) 338 - return; 339 - 340 - rcu_read_lock(); 341 - wq = rcu_dereference(sk->sk_wq); 342 - if (skwq_has_sleeper(wq)) 343 - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 344 - POLLRDNORM | 345 - POLLRDBAND); 346 - sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 347 - rcu_read_unlock(); 348 - } 349 45 350 46 static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, 351 47 size_t size) ··· 50 354 struct alg_sock *ask = alg_sk(sk); 51 355 struct sock *psk = ask->parent; 52 356 struct alg_sock *pask = alg_sk(psk); 53 - struct skcipher_ctx *ctx = ask->private; 54 357 struct skcipher_tfm *skc = pask->private; 55 358 struct crypto_skcipher *tfm = skc->skcipher; 56 359 unsigned ivsize = crypto_skcipher_ivsize(tfm); 57 - struct skcipher_tsgl *sgl; 58 - struct af_alg_control con = {}; 59 - long copied = 0; 60 - bool enc = 0; 61 - bool init = 0; 62 - int err; 63 - int i; 64 360 65 - if (msg->msg_controllen) { 66 - err = af_alg_cmsg_send(msg, &con); 67 - if (err) 68 - return err; 69 - 70 - init = 1; 71 - switch (con.op) { 72 - case ALG_OP_ENCRYPT: 73 - enc = 1; 74 - break; 75 - case ALG_OP_DECRYPT: 76 - enc = 0; 77 - break; 78 - default: 79 - return -EINVAL; 80 - } 81 - 82 - if (con.iv && con.iv->ivlen != ivsize) 83 - return -EINVAL; 84 - } 85 - 86 - err = -EINVAL; 87 - 88 - lock_sock(sk); 89 - if (!ctx->more && ctx->used) 90 - goto unlock; 91 - 92 - if (init) { 93 - ctx->enc = enc; 94 - if (con.iv) 95 - memcpy(ctx->iv, con.iv->iv, ivsize); 96 - } 97 - 98 - while (size) { 99 - struct scatterlist *sg; 100 - unsigned long len = size; 101 - size_t plen; 102 - 103 - if (ctx->merge) { 104 - sgl = list_entry(ctx->tsgl_list.prev, 105 - struct skcipher_tsgl, list); 106 - sg = sgl->sg + sgl->cur - 1; 107 - len = min_t(unsigned long, len, 108 - PAGE_SIZE - sg->offset - sg->length); 109 - 110 - err = memcpy_from_msg(page_address(sg_page(sg)) + 111 - sg->offset + sg->length, 112 - msg, len); 113 - if (err) 114 - goto unlock; 115 - 116 - sg->length += len; 117 - ctx->merge = (sg->offset + sg->length) & 118 - (PAGE_SIZE - 1); 119 - 120 - ctx->used += len; 121 - copied += len; 122 - size -= len; 123 - continue; 124 - } 125 - 126 - if (!skcipher_writable(sk)) { 127 - err = skcipher_wait_for_wmem(sk, msg->msg_flags); 128 - if (err) 129 - goto unlock; 130 - } 131 - 132 - len = min_t(unsigned long, len, skcipher_sndbuf(sk)); 133 - 134 - err = skcipher_alloc_tsgl(sk); 135 - if (err) 136 - goto unlock; 137 - 138 - sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, 139 - list); 140 - sg = sgl->sg; 141 - if (sgl->cur) 142 - sg_unmark_end(sg + sgl->cur - 1); 143 - do { 144 - i = sgl->cur; 145 - plen = min_t(size_t, len, PAGE_SIZE); 146 - 147 - sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 148 - err = -ENOMEM; 149 - if (!sg_page(sg + i)) 150 - goto unlock; 151 - 152 - err = memcpy_from_msg(page_address(sg_page(sg + i)), 153 - msg, plen); 154 - if (err) { 155 - __free_page(sg_page(sg + i)); 156 - sg_assign_page(sg + i, NULL); 157 - goto unlock; 158 - } 159 - 160 - sg[i].length = plen; 161 - len -= plen; 162 - ctx->used += plen; 163 - copied += plen; 164 - size -= plen; 165 - sgl->cur++; 166 - } while (len && sgl->cur < MAX_SGL_ENTS); 167 - 168 - if (!size) 169 - sg_mark_end(sg + sgl->cur - 1); 170 - 171 - ctx->merge = plen & (PAGE_SIZE - 1); 172 - } 173 - 174 - err = 0; 175 - 176 - ctx->more = msg->msg_flags & MSG_MORE; 177 - 178 - unlock: 179 - skcipher_data_wakeup(sk); 180 - release_sock(sk); 181 - 182 - return copied ?: err; 183 - } 184 - 185 - static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, 186 - int offset, size_t size, int flags) 187 - { 188 - struct sock *sk = sock->sk; 189 - struct alg_sock *ask = alg_sk(sk); 190 - struct skcipher_ctx *ctx = ask->private; 191 - struct skcipher_tsgl *sgl; 192 - int err = -EINVAL; 193 - 194 - if (flags & MSG_SENDPAGE_NOTLAST) 195 - flags |= MSG_MORE; 196 - 197 - lock_sock(sk); 198 - if (!ctx->more && ctx->used) 199 - goto unlock; 200 - 201 - if (!size) 202 - goto done; 203 - 204 - if (!skcipher_writable(sk)) { 205 - err = skcipher_wait_for_wmem(sk, flags); 206 - if (err) 207 - goto unlock; 208 - } 209 - 210 - err = skcipher_alloc_tsgl(sk); 211 - if (err) 212 - goto unlock; 213 - 214 - ctx->merge = 0; 215 - sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list); 216 - 217 - if (sgl->cur) 218 - sg_unmark_end(sgl->sg + sgl->cur - 1); 219 - 220 - sg_mark_end(sgl->sg + sgl->cur); 221 - get_page(page); 222 - sg_set_page(sgl->sg + sgl->cur, page, size, offset); 223 - sgl->cur++; 224 - ctx->used += size; 225 - 226 - done: 227 - ctx->more = flags & MSG_MORE; 228 - 229 - unlock: 230 - skcipher_data_wakeup(sk); 231 - release_sock(sk); 232 - 233 - return err ?: size; 234 - } 235 - 236 - static void skcipher_async_cb(struct crypto_async_request *req, int err) 237 - { 238 - struct skcipher_async_req *areq = req->data; 239 - struct sock *sk = areq->sk; 240 - struct kiocb *iocb = areq->iocb; 241 - unsigned int resultlen; 242 - 243 - lock_sock(sk); 244 - 245 - /* Buffer size written by crypto operation. */ 246 - resultlen = areq->req.cryptlen; 247 - 248 - skcipher_free_areq_sgls(areq); 249 - sock_kfree_s(sk, areq, areq->areqlen); 250 - __sock_put(sk); 251 - 252 - iocb->ki_complete(iocb, err ? err : resultlen, 0); 253 - 254 - release_sock(sk); 361 + return af_alg_sendmsg(sock, msg, size, ivsize); 255 362 } 256 363 257 364 static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, ··· 64 565 struct alg_sock *ask = alg_sk(sk); 65 566 struct sock *psk = ask->parent; 66 567 struct alg_sock *pask = alg_sk(psk); 67 - struct skcipher_ctx *ctx = ask->private; 568 + struct af_alg_ctx *ctx = ask->private; 68 569 struct skcipher_tfm *skc = pask->private; 69 570 struct crypto_skcipher *tfm = skc->skcipher; 70 571 unsigned int bs = crypto_skcipher_blocksize(tfm); 71 - unsigned int areqlen = sizeof(struct skcipher_async_req) + 72 - crypto_skcipher_reqsize(tfm); 73 - struct skcipher_async_req *areq; 74 - struct skcipher_rsgl *last_rsgl = NULL; 572 + struct af_alg_async_req *areq; 75 573 int err = 0; 76 574 size_t len = 0; 77 575 78 576 /* Allocate cipher request for current operation. */ 79 - areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); 80 - if (unlikely(!areq)) 81 - return -ENOMEM; 82 - areq->areqlen = areqlen; 83 - areq->sk = sk; 84 - INIT_LIST_HEAD(&areq->rsgl_list); 85 - areq->tsgl = NULL; 86 - areq->tsgl_entries = 0; 577 + areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + 578 + crypto_skcipher_reqsize(tfm)); 579 + if (IS_ERR(areq)) 580 + return PTR_ERR(areq); 87 581 88 582 /* convert iovecs of output buffers into RX SGL */ 89 - while (msg_data_left(msg)) { 90 - struct skcipher_rsgl *rsgl; 91 - size_t seglen; 92 - 93 - /* limit the amount of readable buffers */ 94 - if (!skcipher_readable(sk)) 95 - break; 96 - 97 - if (!ctx->used) { 98 - err = skcipher_wait_for_data(sk, flags); 99 - if (err) 100 - goto free; 101 - } 102 - 103 - seglen = min_t(size_t, ctx->used, msg_data_left(msg)); 104 - 105 - if (list_empty(&areq->rsgl_list)) { 106 - rsgl = &areq->first_sgl; 107 - } else { 108 - rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 109 - if (!rsgl) { 110 - err = -ENOMEM; 111 - goto free; 112 - } 113 - } 114 - 115 - rsgl->sgl.npages = 0; 116 - list_add_tail(&rsgl->list, &areq->rsgl_list); 117 - 118 - /* make one iovec available as scatterlist */ 119 - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 120 - if (err < 0) 121 - goto free; 122 - 123 - /* chain the new scatterlist with previous one */ 124 - if (last_rsgl) 125 - af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 126 - 127 - last_rsgl = rsgl; 128 - len += err; 129 - ctx->rcvused += err; 130 - rsgl->sg_num_bytes = err; 131 - iov_iter_advance(&msg->msg_iter, err); 132 - } 583 + err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); 584 + if (err) 585 + goto free; 133 586 134 587 /* Process only as much RX buffers for which we have TX data */ 135 588 if (len > ctx->used) ··· 98 647 * Create a per request TX SGL for this request which tracks the 99 648 * SG entries from the global TX SGL. 100 649 */ 101 - areq->tsgl_entries = skcipher_count_tsgl(sk, len); 650 + areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0); 102 651 if (!areq->tsgl_entries) 103 652 areq->tsgl_entries = 1; 104 653 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries, ··· 108 657 goto free; 109 658 } 110 659 sg_init_table(areq->tsgl, areq->tsgl_entries); 111 - skcipher_pull_tsgl(sk, len, areq->tsgl); 660 + af_alg_pull_tsgl(sk, len, areq->tsgl, 0); 112 661 113 662 /* Initialize the crypto operation */ 114 - skcipher_request_set_tfm(&areq->req, tfm); 115 - skcipher_request_set_crypt(&areq->req, areq->tsgl, 116 - areq->first_sgl.sgl.sg, len, ctx->iv); 663 + skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm); 664 + skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl, 665 + areq->first_rsgl.sgl.sg, len, ctx->iv); 117 666 118 667 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { 119 668 /* AIO operation */ 120 669 areq->iocb = msg->msg_iocb; 121 - skcipher_request_set_callback(&areq->req, 670 + skcipher_request_set_callback(&areq->cra_u.skcipher_req, 122 671 CRYPTO_TFM_REQ_MAY_SLEEP, 123 - skcipher_async_cb, areq); 124 - err = ctx->enc ? crypto_skcipher_encrypt(&areq->req) : 125 - crypto_skcipher_decrypt(&areq->req); 672 + af_alg_async_cb, areq); 673 + err = ctx->enc ? 674 + crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : 675 + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); 126 676 } else { 127 677 /* Synchronous operation */ 128 - skcipher_request_set_callback(&areq->req, 678 + skcipher_request_set_callback(&areq->cra_u.skcipher_req, 129 679 CRYPTO_TFM_REQ_MAY_SLEEP | 130 680 CRYPTO_TFM_REQ_MAY_BACKLOG, 131 681 af_alg_complete, 132 682 &ctx->completion); 133 683 err = af_alg_wait_for_completion(ctx->enc ? 134 - crypto_skcipher_encrypt(&areq->req) : 135 - crypto_skcipher_decrypt(&areq->req), 684 + crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : 685 + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), 136 686 &ctx->completion); 137 687 } 138 688 139 689 /* AIO operation in progress */ 140 690 if (err == -EINPROGRESS) { 141 691 sock_hold(sk); 692 + 693 + /* Remember output size that will be generated. */ 694 + areq->outlen = len; 695 + 142 696 return -EIOCBQUEUED; 143 697 } 144 698 145 699 free: 146 - skcipher_free_areq_sgls(areq); 147 - if (areq) 148 - sock_kfree_s(sk, areq, areqlen); 700 + af_alg_free_areq_sgls(areq); 701 + sock_kfree_s(sk, areq, areq->areqlen); 149 702 150 703 return err ? err : len; 151 704 } ··· 182 727 } 183 728 184 729 out: 185 - skcipher_wmem_wakeup(sk); 730 + af_alg_wmem_wakeup(sk); 186 731 release_sock(sk); 187 732 return ret; 188 733 } 189 734 190 - static unsigned int skcipher_poll(struct file *file, struct socket *sock, 191 - poll_table *wait) 192 - { 193 - struct sock *sk = sock->sk; 194 - struct alg_sock *ask = alg_sk(sk); 195 - struct skcipher_ctx *ctx = ask->private; 196 - unsigned int mask; 197 - 198 - sock_poll_wait(file, sk_sleep(sk), wait); 199 - mask = 0; 200 - 201 - if (ctx->used) 202 - mask |= POLLIN | POLLRDNORM; 203 - 204 - if (skcipher_writable(sk)) 205 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 206 - 207 - return mask; 208 - } 209 735 210 736 static struct proto_ops algif_skcipher_ops = { 211 737 .family = PF_ALG, ··· 205 769 206 770 .release = af_alg_release, 207 771 .sendmsg = skcipher_sendmsg, 208 - .sendpage = skcipher_sendpage, 772 + .sendpage = af_alg_sendpage, 209 773 .recvmsg = skcipher_recvmsg, 210 - .poll = skcipher_poll, 774 + .poll = af_alg_poll, 211 775 }; 212 776 213 777 static int skcipher_check_key(struct socket *sock) ··· 269 833 if (err) 270 834 return err; 271 835 272 - return skcipher_sendpage(sock, page, offset, size, flags); 836 + return af_alg_sendpage(sock, page, offset, size, flags); 273 837 } 274 838 275 839 static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, ··· 303 867 .sendmsg = skcipher_sendmsg_nokey, 304 868 .sendpage = skcipher_sendpage_nokey, 305 869 .recvmsg = skcipher_recvmsg_nokey, 306 - .poll = skcipher_poll, 870 + .poll = af_alg_poll, 307 871 }; 308 872 309 873 static void *skcipher_bind(const char *name, u32 type, u32 mask) ··· 348 912 static void skcipher_sock_destruct(struct sock *sk) 349 913 { 350 914 struct alg_sock *ask = alg_sk(sk); 351 - struct skcipher_ctx *ctx = ask->private; 915 + struct af_alg_ctx *ctx = ask->private; 352 916 struct sock *psk = ask->parent; 353 917 struct alg_sock *pask = alg_sk(psk); 354 918 struct skcipher_tfm *skc = pask->private; 355 919 struct crypto_skcipher *tfm = skc->skcipher; 356 920 357 - skcipher_pull_tsgl(sk, ctx->used, NULL); 921 + af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 358 922 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); 359 923 sock_kfree_s(sk, ctx, ctx->len); 360 924 af_alg_release_parent(sk); ··· 362 926 363 927 static int skcipher_accept_parent_nokey(void *private, struct sock *sk) 364 928 { 365 - struct skcipher_ctx *ctx; 929 + struct af_alg_ctx *ctx; 366 930 struct alg_sock *ask = alg_sk(sk); 367 931 struct skcipher_tfm *tfm = private; 368 932 struct crypto_skcipher *skcipher = tfm->skcipher;
+170
include/crypto/if_alg.h
··· 20 20 #include <linux/types.h> 21 21 #include <net/sock.h> 22 22 23 + #include <crypto/aead.h> 24 + #include <crypto/skcipher.h> 25 + 23 26 #define ALG_MAX_PAGES 16 24 27 25 28 struct crypto_async_request; ··· 71 68 unsigned int npages; 72 69 }; 73 70 71 + /* TX SGL entry */ 72 + struct af_alg_tsgl { 73 + struct list_head list; 74 + unsigned int cur; /* Last processed SG entry */ 75 + struct scatterlist sg[0]; /* Array of SGs forming the SGL */ 76 + }; 77 + 78 + #define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \ 79 + sizeof(struct scatterlist) - 1) 80 + 81 + /* RX SGL entry */ 82 + struct af_alg_rsgl { 83 + struct af_alg_sgl sgl; 84 + struct list_head list; 85 + size_t sg_num_bytes; /* Bytes of data in that SGL */ 86 + }; 87 + 88 + /** 89 + * struct af_alg_async_req - definition of crypto request 90 + * @iocb: IOCB for AIO operations 91 + * @sk: Socket the request is associated with 92 + * @first_rsgl: First RX SG 93 + * @last_rsgl: Pointer to last RX SG 94 + * @rsgl_list: Track RX SGs 95 + * @tsgl: Private, per request TX SGL of buffers to process 96 + * @tsgl_entries: Number of entries in priv. TX SGL 97 + * @outlen: Number of output bytes generated by crypto op 98 + * @areqlen: Length of this data structure 99 + * @cra_u: Cipher request 100 + */ 101 + struct af_alg_async_req { 102 + struct kiocb *iocb; 103 + struct sock *sk; 104 + 105 + struct af_alg_rsgl first_rsgl; 106 + struct af_alg_rsgl *last_rsgl; 107 + struct list_head rsgl_list; 108 + 109 + struct scatterlist *tsgl; 110 + unsigned int tsgl_entries; 111 + 112 + unsigned int outlen; 113 + unsigned int areqlen; 114 + 115 + union { 116 + struct aead_request aead_req; 117 + struct skcipher_request skcipher_req; 118 + } cra_u; 119 + 120 + /* req ctx trails this struct */ 121 + }; 122 + 123 + /** 124 + * struct af_alg_ctx - definition of the crypto context 125 + * 126 + * The crypto context tracks the input data during the lifetime of an AF_ALG 127 + * socket. 128 + * 129 + * @tsgl_list: Link to TX SGL 130 + * @iv: IV for cipher operation 131 + * @aead_assoclen: Length of AAD for AEAD cipher operations 132 + * @completion: Work queue for synchronous operation 133 + * @used: TX bytes sent to kernel. This variable is used to 134 + * ensure that user space cannot cause the kernel 135 + * to allocate too much memory in sendmsg operation. 136 + * @rcvused: Total RX bytes to be filled by kernel. This variable 137 + * is used to ensure user space cannot cause the kernel 138 + * to allocate too much memory in a recvmsg operation. 139 + * @more: More data to be expected from user space? 140 + * @merge: Shall new data from user space be merged into existing 141 + * SG? 142 + * @enc: Cryptographic operation to be performed when 143 + * recvmsg is invoked. 144 + * @len: Length of memory allocated for this data structure. 145 + */ 146 + struct af_alg_ctx { 147 + struct list_head tsgl_list; 148 + 149 + void *iv; 150 + size_t aead_assoclen; 151 + 152 + struct af_alg_completion completion; 153 + 154 + size_t used; 155 + size_t rcvused; 156 + 157 + bool more; 158 + bool merge; 159 + bool enc; 160 + 161 + unsigned int len; 162 + }; 163 + 74 164 int af_alg_register_type(const struct af_alg_type *type); 75 165 int af_alg_unregister_type(const struct af_alg_type *type); 76 166 ··· 189 93 { 190 94 init_completion(&completion->completion); 191 95 } 96 + 97 + /** 98 + * Size of available buffer for sending data from user space to kernel. 99 + * 100 + * @sk socket of connection to user space 101 + * @return number of bytes still available 102 + */ 103 + static inline int af_alg_sndbuf(struct sock *sk) 104 + { 105 + struct alg_sock *ask = alg_sk(sk); 106 + struct af_alg_ctx *ctx = ask->private; 107 + 108 + return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 109 + ctx->used, 0); 110 + } 111 + 112 + /** 113 + * Can the send buffer still be written to? 114 + * 115 + * @sk socket of connection to user space 116 + * @return true => writable, false => not writable 117 + */ 118 + static inline bool af_alg_writable(struct sock *sk) 119 + { 120 + return PAGE_SIZE <= af_alg_sndbuf(sk); 121 + } 122 + 123 + /** 124 + * Size of available buffer used by kernel for the RX user space operation. 125 + * 126 + * @sk socket of connection to user space 127 + * @return number of bytes still available 128 + */ 129 + static inline int af_alg_rcvbuf(struct sock *sk) 130 + { 131 + struct alg_sock *ask = alg_sk(sk); 132 + struct af_alg_ctx *ctx = ask->private; 133 + 134 + return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - 135 + ctx->rcvused, 0); 136 + } 137 + 138 + /** 139 + * Can the RX buffer still be written to? 140 + * 141 + * @sk socket of connection to user space 142 + * @return true => writable, false => not writable 143 + */ 144 + static inline bool af_alg_readable(struct sock *sk) 145 + { 146 + return PAGE_SIZE <= af_alg_rcvbuf(sk); 147 + } 148 + 149 + int af_alg_alloc_tsgl(struct sock *sk); 150 + unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); 151 + void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, 152 + size_t dst_offset); 153 + void af_alg_free_areq_sgls(struct af_alg_async_req *areq); 154 + int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags); 155 + void af_alg_wmem_wakeup(struct sock *sk); 156 + int af_alg_wait_for_data(struct sock *sk, unsigned flags); 157 + void af_alg_data_wakeup(struct sock *sk); 158 + int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, 159 + unsigned int ivsize); 160 + ssize_t af_alg_sendpage(struct socket *sock, struct page *page, 161 + int offset, size_t size, int flags); 162 + void af_alg_async_cb(struct crypto_async_request *_req, int err); 163 + unsigned int af_alg_poll(struct file *file, struct socket *sock, 164 + poll_table *wait); 165 + struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, 166 + unsigned int areqlen); 167 + int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, 168 + struct af_alg_async_req *areq, size_t maxsize, 169 + size_t *outlen); 192 170 193 171 #endif /* _CRYPTO_IF_ALG_H */