Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cifs: smbd: Properly process errors on ib_post_send

When processing errors from ib_post_send(), the transport state needs to be
rolled back to the condition before the error.

Refactor the old code to make it easy to roll back on IB errors, and fix this.

Signed-off-by: Long Li <longli@microsoft.com>
Signed-off-by: Steve French <stfrench@microsoft.com>

authored by

Long Li and committed by
Steve French
f1b7b862 eda1c54f

+116 -142
+116 -142
fs/cifs/smbdirect.c
··· 800 800 return 0; 801 801 } 802 802 803 - /* 804 - * Build and prepare the SMBD packet header 805 - * This function waits for avaialbe send credits and build a SMBD packet 806 - * header. The caller then optional append payload to the packet after 807 - * the header 808 - * intput values 809 - * size: the size of the payload 810 - * remaining_data_length: remaining data to send if this is part of a 811 - * fragmented packet 812 - * output values 813 - * request_out: the request allocated from this function 814 - * return values: 0 on success, otherwise actual error code returned 815 - */ 816 - static int smbd_create_header(struct smbd_connection *info, 817 - int size, int remaining_data_length, 818 - struct smbd_request **request_out) 819 - { 820 - struct smbd_request *request; 821 - struct smbd_data_transfer *packet; 822 - int header_length; 823 - int new_credits; 824 - int rc; 825 - 826 - /* Wait for send credits. A SMBD packet needs one credit */ 827 - rc = wait_event_interruptible(info->wait_send_queue, 828 - atomic_read(&info->send_credits) > 0 || 829 - info->transport_status != SMBD_CONNECTED); 830 - if (rc) 831 - return rc; 832 - 833 - if (info->transport_status != SMBD_CONNECTED) { 834 - log_outgoing(ERR, "disconnected not sending\n"); 835 - return -EAGAIN; 836 - } 837 - atomic_dec(&info->send_credits); 838 - 839 - request = mempool_alloc(info->request_mempool, GFP_KERNEL); 840 - if (!request) { 841 - rc = -ENOMEM; 842 - goto err_alloc; 843 - } 844 - 845 - request->info = info; 846 - 847 - /* Fill in the packet header */ 848 - packet = smbd_request_payload(request); 849 - packet->credits_requested = cpu_to_le16(info->send_credit_target); 850 - 851 - new_credits = manage_credits_prior_sending(info); 852 - atomic_add(new_credits, &info->receive_credits); 853 - packet->credits_granted = cpu_to_le16(new_credits); 854 - 855 - info->send_immediate = false; 856 - 857 - packet->flags = 0; 858 - if (manage_keep_alive_before_sending(info)) 859 - packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); 860 - 861 - packet->reserved = 0; 862 - if (!size) 863 - packet->data_offset = 0; 864 - else 865 - packet->data_offset = cpu_to_le32(24); 866 - packet->data_length = cpu_to_le32(size); 867 - packet->remaining_data_length = cpu_to_le32(remaining_data_length); 868 - packet->padding = 0; 869 - 870 - log_outgoing(INFO, "credits_requested=%d credits_granted=%d " 871 - "data_offset=%d data_length=%d remaining_data_length=%d\n", 872 - le16_to_cpu(packet->credits_requested), 873 - le16_to_cpu(packet->credits_granted), 874 - le32_to_cpu(packet->data_offset), 875 - le32_to_cpu(packet->data_length), 876 - le32_to_cpu(packet->remaining_data_length)); 877 - 878 - /* Map the packet to DMA */ 879 - header_length = sizeof(struct smbd_data_transfer); 880 - /* If this is a packet without payload, don't send padding */ 881 - if (!size) 882 - header_length = offsetof(struct smbd_data_transfer, padding); 883 - 884 - request->num_sge = 1; 885 - request->sge[0].addr = ib_dma_map_single(info->id->device, 886 - (void *)packet, 887 - header_length, 888 - DMA_TO_DEVICE); 889 - if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { 890 - mempool_free(request, info->request_mempool); 891 - rc = -EIO; 892 - goto err_dma; 893 - } 894 - 895 - request->sge[0].length = header_length; 896 - request->sge[0].lkey = info->pd->local_dma_lkey; 897 - 898 - *request_out = request; 899 - return 0; 900 - 901 - err_dma: 902 - /* roll back receive credits */ 903 - spin_lock(&info->lock_new_credits_offered); 904 - info->new_credits_offered += new_credits; 905 - spin_unlock(&info->lock_new_credits_offered); 906 - atomic_sub(new_credits, &info->receive_credits); 907 - 908 - err_alloc: 909 - /* roll back send credits */ 910 - atomic_inc(&info->send_credits); 911 - 912 - return rc; 913 - } 914 - 915 - static void smbd_destroy_header(struct smbd_connection *info, 916 - struct smbd_request *request) 917 - { 918 - 919 - ib_dma_unmap_single(info->id->device, 920 - request->sge[0].addr, 921 - request->sge[0].length, 922 - DMA_TO_DEVICE); 923 - mempool_free(request, info->request_mempool); 924 - atomic_inc(&info->send_credits); 925 - } 926 - 927 803 /* Post the send request */ 928 804 static int smbd_post_send(struct smbd_connection *info, 929 805 struct smbd_request *request) ··· 827 951 send_wr.opcode = IB_WR_SEND; 828 952 send_wr.send_flags = IB_SEND_SIGNALED; 829 953 830 - wait_sq: 831 - wait_event(info->wait_post_send, 832 - atomic_read(&info->send_pending) < info->send_credit_target); 833 - if (unlikely(atomic_inc_return(&info->send_pending) > 834 - info->send_credit_target)) { 835 - atomic_dec(&info->send_pending); 836 - goto wait_sq; 837 - } 838 - 839 954 rc = ib_post_send(info->id->qp, &send_wr, NULL); 840 955 if (rc) { 841 956 log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); 842 - if (atomic_dec_and_test(&info->send_pending)) 843 - wake_up(&info->wait_send_pending); 844 957 smbd_disconnect_rdma_connection(info); 845 958 rc = -EAGAIN; 846 959 } else ··· 845 980 { 846 981 int num_sgs; 847 982 int i, rc; 983 + int header_length; 848 984 struct smbd_request *request; 985 + struct smbd_data_transfer *packet; 986 + int new_credits; 849 987 struct scatterlist *sg; 850 988 851 - rc = smbd_create_header( 852 - info, data_length, remaining_data_length, &request); 989 + wait_credit: 990 + /* Wait for send credits. A SMBD packet needs one credit */ 991 + rc = wait_event_interruptible(info->wait_send_queue, 992 + atomic_read(&info->send_credits) > 0 || 993 + info->transport_status != SMBD_CONNECTED); 853 994 if (rc) 854 - return rc; 995 + goto err_wait_credit; 855 996 997 + if (info->transport_status != SMBD_CONNECTED) { 998 + log_outgoing(ERR, "disconnected not sending on wait_credit\n"); 999 + rc = -EAGAIN; 1000 + goto err_wait_credit; 1001 + } 1002 + if (unlikely(atomic_dec_return(&info->send_credits) < 0)) { 1003 + atomic_inc(&info->send_credits); 1004 + goto wait_credit; 1005 + } 1006 + 1007 + wait_send_queue: 1008 + wait_event(info->wait_post_send, 1009 + atomic_read(&info->send_pending) < info->send_credit_target || 1010 + info->transport_status != SMBD_CONNECTED); 1011 + 1012 + if (info->transport_status != SMBD_CONNECTED) { 1013 + log_outgoing(ERR, "disconnected not sending on wait_send_queue\n"); 1014 + rc = -EAGAIN; 1015 + goto err_wait_send_queue; 1016 + } 1017 + 1018 + if (unlikely(atomic_inc_return(&info->send_pending) > 1019 + info->send_credit_target)) { 1020 + atomic_dec(&info->send_pending); 1021 + goto wait_send_queue; 1022 + } 1023 + 1024 + request = mempool_alloc(info->request_mempool, GFP_KERNEL); 1025 + if (!request) { 1026 + rc = -ENOMEM; 1027 + goto err_alloc; 1028 + } 1029 + 1030 + request->info = info; 1031 + 1032 + /* Fill in the packet header */ 1033 + packet = smbd_request_payload(request); 1034 + packet->credits_requested = cpu_to_le16(info->send_credit_target); 1035 + 1036 + new_credits = manage_credits_prior_sending(info); 1037 + atomic_add(new_credits, &info->receive_credits); 1038 + packet->credits_granted = cpu_to_le16(new_credits); 1039 + 1040 + info->send_immediate = false; 1041 + 1042 + packet->flags = 0; 1043 + if (manage_keep_alive_before_sending(info)) 1044 + packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); 1045 + 1046 + packet->reserved = 0; 1047 + if (!data_length) 1048 + packet->data_offset = 0; 1049 + else 1050 + packet->data_offset = cpu_to_le32(24); 1051 + packet->data_length = cpu_to_le32(data_length); 1052 + packet->remaining_data_length = cpu_to_le32(remaining_data_length); 1053 + packet->padding = 0; 1054 + 1055 + log_outgoing(INFO, "credits_requested=%d credits_granted=%d " 1056 + "data_offset=%d data_length=%d remaining_data_length=%d\n", 1057 + le16_to_cpu(packet->credits_requested), 1058 + le16_to_cpu(packet->credits_granted), 1059 + le32_to_cpu(packet->data_offset), 1060 + le32_to_cpu(packet->data_length), 1061 + le32_to_cpu(packet->remaining_data_length)); 1062 + 1063 + /* Map the packet to DMA */ 1064 + header_length = sizeof(struct smbd_data_transfer); 1065 + /* If this is a packet without payload, don't send padding */ 1066 + if (!data_length) 1067 + header_length = offsetof(struct smbd_data_transfer, padding); 1068 + 1069 + request->num_sge = 1; 1070 + request->sge[0].addr = ib_dma_map_single(info->id->device, 1071 + (void *)packet, 1072 + header_length, 1073 + DMA_TO_DEVICE); 1074 + if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { 1075 + rc = -EIO; 1076 + request->sge[0].addr = 0; 1077 + goto err_dma; 1078 + } 1079 + 1080 + request->sge[0].length = header_length; 1081 + request->sge[0].lkey = info->pd->local_dma_lkey; 1082 + 1083 + /* Fill in the packet data payload */ 856 1084 num_sgs = sgl ? sg_nents(sgl) : 0; 857 1085 for_each_sg(sgl, sg, num_sgs, i) { 858 1086 request->sge[i+1].addr = ··· 955 997 info->id->device, request->sge[i+1].addr)) { 956 998 rc = -EIO; 957 999 request->sge[i+1].addr = 0; 958 - goto dma_mapping_failure; 1000 + goto err_dma; 959 1001 } 960 1002 request->sge[i+1].length = sg->length; 961 1003 request->sge[i+1].lkey = info->pd->local_dma_lkey; ··· 966 1008 if (!rc) 967 1009 return 0; 968 1010 969 - dma_mapping_failure: 970 - for (i = 1; i < request->num_sge; i++) 1011 + err_dma: 1012 + for (i = 0; i < request->num_sge; i++) 971 1013 if (request->sge[i].addr) 972 1014 ib_dma_unmap_single(info->id->device, 973 1015 request->sge[i].addr, 974 1016 request->sge[i].length, 975 1017 DMA_TO_DEVICE); 976 - smbd_destroy_header(info, request); 1018 + mempool_free(request, info->request_mempool); 1019 + 1020 + /* roll back receive credits and credits to be offered */ 1021 + spin_lock(&info->lock_new_credits_offered); 1022 + info->new_credits_offered += new_credits; 1023 + spin_unlock(&info->lock_new_credits_offered); 1024 + atomic_sub(new_credits, &info->receive_credits); 1025 + 1026 + err_alloc: 1027 + if (atomic_dec_and_test(&info->send_pending)) 1028 + wake_up(&info->wait_send_pending); 1029 + 1030 + err_wait_send_queue: 1031 + /* roll back send credits and pending */ 1032 + atomic_inc(&info->send_credits); 1033 + 1034 + err_wait_credit: 977 1035 return rc; 978 1036 } 979 1037