Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: mpi3mr: Support for preallocation of SGL BSG data buffers part-2

The driver acquires the required SGLs from the pre-allocated pool.

Co-developed-by: Sathya Prakash <sathya.prakash@broadcom.com>
Signed-off-by: Sathya Prakash <sathya.prakash@broadcom.com>
Signed-off-by: Chandrakanth patil <chandrakanth.patil@broadcom.com>
Link: https://lore.kernel.org/r/20231205191630.12201-3-chandrakanth.patil@broadcom.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Chandrakanth patil and committed by
Martin K. Petersen
fb231d7d c432e167

+313 -85
+2
drivers/scsi/mpi3mr/mpi3mr.h
··· 247 247 u32 kern_buf_len; 248 248 dma_addr_t kern_buf_dma; 249 249 u8 data_dir; 250 + u16 num_dma_desc; 251 + struct dma_memory_desc *dma_desc; 250 252 }; 251 253 252 254 /* IOC State definitions */
+311 -85
drivers/scsi/mpi3mr/mpi3mr_app.c
··· 564 564 } 565 565 566 566 /** 567 + * mpi3mr_total_num_ioctl_sges - Count number of SGEs required 568 + * @drv_bufs: DMA address of the buffers to be placed in sgl 569 + * @bufcnt: Number of DMA buffers 570 + * 571 + * This function returns total number of data SGEs required 572 + * including zero length SGEs and excluding management request 573 + * and response buffer for the given list of data buffer 574 + * descriptors 575 + * 576 + * Return: Number of SGE elements needed 577 + */ 578 + static inline u16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map *drv_bufs, 579 + u8 bufcnt) 580 + { 581 + u16 i, sge_count = 0; 582 + 583 + for (i = 0; i < bufcnt; i++, drv_bufs++) { 584 + if (drv_bufs->data_dir == DMA_NONE || 585 + drv_bufs->kern_buf) 586 + continue; 587 + sge_count += drv_bufs->num_dma_desc; 588 + if (!drv_bufs->num_dma_desc) 589 + sge_count++; 590 + } 591 + return sge_count; 592 + } 593 + 594 + /** 567 595 * mpi3mr_bsg_build_sgl - SGL construction for MPI commands 596 + * @mrioc: Adapter instance reference 568 597 * @mpi_req: MPI request 569 598 * @sgl_offset: offset to start sgl in the MPI request 570 599 * @drv_bufs: DMA address of the buffers to be placed in sgl ··· 605 576 * This function places the DMA address of the given buffers in 606 577 * proper format as SGEs in the given MPI request. 607 578 * 608 - * Return: Nothing 579 + * Return: 0 on success,-1 on failure 609 580 */ 610 - static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset, 611 - struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc, 612 - u8 is_rmr, u8 num_datasges) 581 + static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc *mrioc, u8 *mpi_req, 582 + u32 sgl_offset, struct mpi3mr_buf_map *drv_bufs, 583 + u8 bufcnt, u8 is_rmc, u8 is_rmr, u8 num_datasges) 613 584 { 585 + struct mpi3_request_header *mpi_header = 586 + (struct mpi3_request_header *)mpi_req; 614 587 u8 *sgl = (mpi_req + sgl_offset), count = 0; 615 588 struct mpi3_mgmt_passthrough_request *rmgmt_req = 616 589 (struct mpi3_mgmt_passthrough_request *)mpi_req; 617 590 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 618 - u8 sgl_flags, sgl_flags_last; 591 + u8 flag, sgl_flags, sgl_flag_eob, sgl_flags_last, last_chain_sgl_flag; 592 + u16 available_sges, i, sges_needed; 593 + u32 sge_element_size = sizeof(struct mpi3_sge_common); 594 + bool chain_used = false; 619 595 620 596 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 621 - MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER; 622 - sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST; 597 + MPI3_SGE_FLAGS_DLAS_SYSTEM; 598 + sgl_flag_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER; 599 + sgl_flags_last = sgl_flag_eob | MPI3_SGE_FLAGS_END_OF_LIST; 600 + last_chain_sgl_flag = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 601 + MPI3_SGE_FLAGS_DLAS_SYSTEM; 602 + 603 + sges_needed = mpi3mr_total_num_ioctl_sges(drv_bufs, bufcnt); 623 604 624 605 if (is_rmc) { 625 606 mpi3mr_add_sg_single(&rmgmt_req->command_sgl, 626 607 sgl_flags_last, drv_buf_iter->kern_buf_len, 627 608 drv_buf_iter->kern_buf_dma); 628 - sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len; 609 + sgl = (u8 *)drv_buf_iter->kern_buf + 610 + drv_buf_iter->bsg_buf_len; 611 + available_sges = (drv_buf_iter->kern_buf_len - 612 + drv_buf_iter->bsg_buf_len) / sge_element_size; 613 + 614 + if (sges_needed > available_sges) 615 + return -1; 616 + 617 + chain_used = true; 629 618 drv_buf_iter++; 630 619 count++; 631 620 if (is_rmr) { ··· 655 608 } else 656 609 mpi3mr_build_zero_len_sge( 657 610 &rmgmt_req->response_sgl); 611 + if (num_datasges) { 612 + i = 0; 613 + goto build_sges; 614 + } 615 + } else { 616 + if (sgl_offset >= MPI3MR_ADMIN_REQ_FRAME_SZ) 617 + return -1; 618 + available_sges = (MPI3MR_ADMIN_REQ_FRAME_SZ - sgl_offset) / 619 + sge_element_size; 620 + if (!available_sges) 621 + return -1; 658 622 } 659 623 if (!num_datasges) { 660 624 mpi3mr_build_zero_len_sge(sgl); 661 - return; 625 + return 0; 662 626 } 627 + if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) { 628 + if ((sges_needed > 2) || (sges_needed > available_sges)) 629 + return -1; 630 + for (; count < bufcnt; count++, drv_buf_iter++) { 631 + if (drv_buf_iter->data_dir == DMA_NONE || 632 + !drv_buf_iter->num_dma_desc) 633 + continue; 634 + mpi3mr_add_sg_single(sgl, sgl_flags_last, 635 + drv_buf_iter->dma_desc[0].size, 636 + drv_buf_iter->dma_desc[0].dma_addr); 637 + sgl += sge_element_size; 638 + } 639 + return 0; 640 + } 641 + i = 0; 642 + 643 + build_sges: 663 644 for (; count < bufcnt; count++, drv_buf_iter++) { 664 645 if (drv_buf_iter->data_dir == DMA_NONE) 665 646 continue; 666 - if (num_datasges == 1 || !is_rmc) 667 - mpi3mr_add_sg_single(sgl, sgl_flags_last, 668 - drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma); 669 - else 670 - mpi3mr_add_sg_single(sgl, sgl_flags, 671 - drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma); 672 - sgl += sizeof(struct mpi3_sge_common); 647 + if (!drv_buf_iter->num_dma_desc) { 648 + if (chain_used && !available_sges) 649 + return -1; 650 + if (!chain_used && (available_sges == 1) && 651 + (sges_needed > 1)) 652 + goto setup_chain; 653 + flag = sgl_flag_eob; 654 + if (num_datasges == 1) 655 + flag = sgl_flags_last; 656 + mpi3mr_add_sg_single(sgl, flag, 0, 0); 657 + sgl += sge_element_size; 658 + sges_needed--; 659 + available_sges--; 660 + num_datasges--; 661 + continue; 662 + } 663 + for (; i < drv_buf_iter->num_dma_desc; i++) { 664 + if (chain_used && !available_sges) 665 + return -1; 666 + if (!chain_used && (available_sges == 1) && 667 + (sges_needed > 1)) 668 + goto setup_chain; 669 + flag = sgl_flags; 670 + if (i == (drv_buf_iter->num_dma_desc - 1)) { 671 + if (num_datasges == 1) 672 + flag = sgl_flags_last; 673 + else 674 + flag = sgl_flag_eob; 675 + } 676 + 677 + mpi3mr_add_sg_single(sgl, flag, 678 + drv_buf_iter->dma_desc[i].size, 679 + drv_buf_iter->dma_desc[i].dma_addr); 680 + sgl += sge_element_size; 681 + available_sges--; 682 + sges_needed--; 683 + } 673 684 num_datasges--; 685 + i = 0; 674 686 } 687 + return 0; 688 + 689 + setup_chain: 690 + available_sges = mrioc->ioctl_chain_sge.size / sge_element_size; 691 + if (sges_needed > available_sges) 692 + return -1; 693 + mpi3mr_add_sg_single(sgl, last_chain_sgl_flag, 694 + (sges_needed * sge_element_size), 695 + mrioc->ioctl_chain_sge.dma_addr); 696 + memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size); 697 + sgl = (u8 *)mrioc->ioctl_chain_sge.addr; 698 + chain_used = true; 699 + goto build_sges; 675 700 } 676 701 677 702 /** ··· 1054 935 } 1055 936 return -1; 1056 937 } 938 + 939 + /** 940 + * mpi3mr_map_data_buffer_dma - build dma descriptors for data 941 + * buffers 942 + * @mrioc: Adapter instance reference 943 + * @drv_buf: buffer map descriptor 944 + * @desc_count: Number of already consumed dma descriptors 945 + * 946 + * This function computes how many pre-allocated DMA descriptors 947 + * are required for the given data buffer and if those number of 948 + * descriptors are free, then setup the mapping of the scattered 949 + * DMA address to the given data buffer, if the data direction 950 + * of the buffer is DMA_TO_DEVICE then the actual data is copied to 951 + * the DMA buffers 952 + * 953 + * Return: 0 on success, -1 on failure 954 + */ 955 + static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc *mrioc, 956 + struct mpi3mr_buf_map *drv_buf, 957 + u16 desc_count) 958 + { 959 + u16 i, needed_desc = drv_buf->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE; 960 + u32 buf_len = drv_buf->kern_buf_len, copied_len = 0; 961 + 962 + if (drv_buf->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE) 963 + needed_desc++; 964 + if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) { 965 + dprint_bsg_err(mrioc, "%s: DMA descriptor mapping error %d:%d:%d\n", 966 + __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE); 967 + return -1; 968 + } 969 + drv_buf->dma_desc = kzalloc(sizeof(*drv_buf->dma_desc) * needed_desc, 970 + GFP_KERNEL); 971 + if (!drv_buf->dma_desc) 972 + return -1; 973 + for (i = 0; i < needed_desc; i++, desc_count++) { 974 + drv_buf->dma_desc[i].addr = mrioc->ioctl_sge[desc_count].addr; 975 + drv_buf->dma_desc[i].dma_addr = 976 + mrioc->ioctl_sge[desc_count].dma_addr; 977 + if (buf_len < mrioc->ioctl_sge[desc_count].size) 978 + drv_buf->dma_desc[i].size = buf_len; 979 + else 980 + drv_buf->dma_desc[i].size = 981 + mrioc->ioctl_sge[desc_count].size; 982 + buf_len -= drv_buf->dma_desc[i].size; 983 + memset(drv_buf->dma_desc[i].addr, 0, 984 + mrioc->ioctl_sge[desc_count].size); 985 + if (drv_buf->data_dir == DMA_TO_DEVICE) { 986 + memcpy(drv_buf->dma_desc[i].addr, 987 + drv_buf->bsg_buf + copied_len, 988 + drv_buf->dma_desc[i].size); 989 + copied_len += drv_buf->dma_desc[i].size; 990 + } 991 + } 992 + drv_buf->num_dma_desc = needed_desc; 993 + return 0; 994 + } 1057 995 /** 1058 996 * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler 1059 997 * @job: BSG job reference 1060 - * @reply_payload_rcv_len: length of payload recvd 1061 998 * 1062 999 * This function is the top level handler for MPI Pass through 1063 1000 * command, this does basic validation of the input data buffers, ··· 1129 954 * Return: 0 on success and proper error codes on failure 1130 955 */ 1131 956 1132 - static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len) 957 + static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) 1133 958 { 1134 959 long rval = -EINVAL; 1135 - 1136 960 struct mpi3mr_ioc *mrioc = NULL; 1137 961 u8 *mpi_req = NULL, *sense_buff_k = NULL; 1138 962 u8 mpi_msg_size = 0; ··· 1139 965 struct mpi3mr_bsg_mptcmd *karg; 1140 966 struct mpi3mr_buf_entry *buf_entries = NULL; 1141 967 struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL; 1142 - u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0; 1143 - u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0; 1144 - u8 block_io = 0, resp_code = 0, nvme_fmt = 0; 968 + u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0; 969 + u8 din_cnt = 0, dout_cnt = 0; 970 + u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF; 971 + u8 block_io = 0, nvme_fmt = 0, resp_code = 0; 1145 972 struct mpi3_request_header *mpi_header = NULL; 1146 973 struct mpi3_status_reply_descriptor *status_desc; 1147 974 struct mpi3_scsi_task_mgmt_request *tm_req; ··· 1154 979 u32 din_size = 0, dout_size = 0; 1155 980 u8 *din_buf = NULL, *dout_buf = NULL; 1156 981 u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL; 982 + u16 rmc_size = 0, desc_count = 0; 1157 983 1158 984 bsg_req = job->request; 1159 985 karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd; ··· 1162 986 mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id); 1163 987 if (!mrioc) 1164 988 return -ENODEV; 989 + 990 + if (!mrioc->ioctl_sges_allocated) { 991 + dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n", 992 + __func__); 993 + return -ENOMEM; 994 + } 1165 995 1166 996 if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT) 1167 997 karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT; ··· 1209 1027 1210 1028 for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) { 1211 1029 1212 - if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { 1213 - dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", 1214 - __func__); 1215 - rval = -EINVAL; 1216 - goto out; 1217 - } 1218 - if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { 1219 - dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", 1220 - __func__); 1221 - rval = -EINVAL; 1222 - goto out; 1223 - } 1224 - 1225 1030 switch (buf_entries->buf_type) { 1226 1031 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD: 1227 1032 sgl_iter = sgl_dout_iter; 1228 1033 sgl_dout_iter += buf_entries->buf_len; 1229 1034 drv_buf_iter->data_dir = DMA_TO_DEVICE; 1230 1035 is_rmcb = 1; 1231 - if (count != 0) 1036 + if ((count != 0) || !buf_entries->buf_len) 1232 1037 invalid_be = 1; 1233 1038 break; 1234 1039 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP: ··· 1223 1054 sgl_din_iter += buf_entries->buf_len; 1224 1055 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 1225 1056 is_rmrb = 1; 1226 - if (count != 1 || !is_rmcb) 1057 + if (count != 1 || !is_rmcb || !buf_entries->buf_len) 1227 1058 invalid_be = 1; 1228 1059 break; 1229 1060 case MPI3MR_BSG_BUFTYPE_DATA_IN: ··· 1231 1062 sgl_din_iter += buf_entries->buf_len; 1232 1063 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 1233 1064 din_cnt++; 1234 - din_size += drv_buf_iter->bsg_buf_len; 1065 + din_size += buf_entries->buf_len; 1235 1066 if ((din_cnt > 1) && !is_rmcb) 1236 1067 invalid_be = 1; 1237 1068 break; ··· 1240 1071 sgl_dout_iter += buf_entries->buf_len; 1241 1072 drv_buf_iter->data_dir = DMA_TO_DEVICE; 1242 1073 dout_cnt++; 1243 - dout_size += drv_buf_iter->bsg_buf_len; 1074 + dout_size += buf_entries->buf_len; 1244 1075 if ((dout_cnt > 1) && !is_rmcb) 1245 1076 invalid_be = 1; 1246 1077 break; ··· 1249 1080 sgl_din_iter += buf_entries->buf_len; 1250 1081 drv_buf_iter->data_dir = DMA_NONE; 1251 1082 mpirep_offset = count; 1083 + if (!buf_entries->buf_len) 1084 + invalid_be = 1; 1252 1085 break; 1253 1086 case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE: 1254 1087 sgl_iter = sgl_din_iter; 1255 1088 sgl_din_iter += buf_entries->buf_len; 1256 1089 drv_buf_iter->data_dir = DMA_NONE; 1257 1090 erb_offset = count; 1091 + if (!buf_entries->buf_len) 1092 + invalid_be = 1; 1258 1093 break; 1259 1094 case MPI3MR_BSG_BUFTYPE_MPI_REQUEST: 1260 1095 sgl_iter = sgl_dout_iter; ··· 1285 1112 goto out; 1286 1113 } 1287 1114 1288 - drv_buf_iter->bsg_buf = sgl_iter; 1289 - drv_buf_iter->bsg_buf_len = buf_entries->buf_len; 1290 - 1291 - } 1292 - if (!is_rmcb && (dout_cnt || din_cnt)) { 1293 - sg_entries = dout_cnt + din_cnt; 1294 - if (((mpi_msg_size) + (sg_entries * 1295 - sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) { 1296 - dprint_bsg_err(mrioc, 1297 - "%s:%d: invalid message size passed\n", 1298 - __func__, __LINE__); 1115 + if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { 1116 + dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", 1117 + __func__); 1299 1118 rval = -EINVAL; 1300 1119 goto out; 1301 1120 } 1121 + if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { 1122 + dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", 1123 + __func__); 1124 + rval = -EINVAL; 1125 + goto out; 1126 + } 1127 + 1128 + drv_buf_iter->bsg_buf = sgl_iter; 1129 + drv_buf_iter->bsg_buf_len = buf_entries->buf_len; 1302 1130 } 1131 + 1132 + if (is_rmcb && ((din_size + dout_size) > MPI3MR_MAX_APP_XFER_SIZE)) { 1133 + dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n", 1134 + __func__, __LINE__, mpi_header->function, din_size, 1135 + dout_size); 1136 + rval = -EINVAL; 1137 + goto out; 1138 + } 1139 + 1303 1140 if (din_size > MPI3MR_MAX_APP_XFER_SIZE) { 1304 1141 dprint_bsg_err(mrioc, 1305 1142 "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n", ··· 1325 1142 goto out; 1326 1143 } 1327 1144 1145 + if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) { 1146 + if (din_size > MPI3MR_IOCTL_SGE_SIZE || 1147 + dout_size > MPI3MR_IOCTL_SGE_SIZE) { 1148 + dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n", 1149 + __func__, __LINE__, din_cnt, dout_cnt, din_size, 1150 + dout_size); 1151 + rval = -EINVAL; 1152 + goto out; 1153 + } 1154 + } 1155 + 1328 1156 drv_buf_iter = drv_bufs; 1329 1157 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1330 1158 if (drv_buf_iter->data_dir == DMA_NONE) 1331 1159 continue; 1332 1160 1333 1161 drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len; 1334 - if (is_rmcb && !count) 1335 - drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) * 1336 - sizeof(struct mpi3_sge_common)); 1337 - 1338 - if (!drv_buf_iter->kern_buf_len) 1339 - continue; 1340 - 1341 - drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev, 1342 - drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma, 1343 - GFP_KERNEL); 1344 - if (!drv_buf_iter->kern_buf) { 1345 - rval = -ENOMEM; 1162 + if (is_rmcb && !count) { 1163 + drv_buf_iter->kern_buf_len = 1164 + mrioc->ioctl_chain_sge.size; 1165 + drv_buf_iter->kern_buf = 1166 + mrioc->ioctl_chain_sge.addr; 1167 + drv_buf_iter->kern_buf_dma = 1168 + mrioc->ioctl_chain_sge.dma_addr; 1169 + drv_buf_iter->dma_desc = NULL; 1170 + drv_buf_iter->num_dma_desc = 0; 1171 + memset(drv_buf_iter->kern_buf, 0, 1172 + drv_buf_iter->kern_buf_len); 1173 + tmplen = min(drv_buf_iter->kern_buf_len, 1174 + drv_buf_iter->bsg_buf_len); 1175 + rmc_size = tmplen; 1176 + memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen); 1177 + } else if (is_rmrb && (count == 1)) { 1178 + drv_buf_iter->kern_buf_len = 1179 + mrioc->ioctl_resp_sge.size; 1180 + drv_buf_iter->kern_buf = 1181 + mrioc->ioctl_resp_sge.addr; 1182 + drv_buf_iter->kern_buf_dma = 1183 + mrioc->ioctl_resp_sge.dma_addr; 1184 + drv_buf_iter->dma_desc = NULL; 1185 + drv_buf_iter->num_dma_desc = 0; 1186 + memset(drv_buf_iter->kern_buf, 0, 1187 + drv_buf_iter->kern_buf_len); 1188 + tmplen = min(drv_buf_iter->kern_buf_len, 1189 + drv_buf_iter->bsg_buf_len); 1190 + drv_buf_iter->kern_buf_len = tmplen; 1191 + memset(drv_buf_iter->bsg_buf, 0, 1192 + drv_buf_iter->bsg_buf_len); 1193 + } else { 1194 + if (!drv_buf_iter->kern_buf_len) 1195 + continue; 1196 + if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) { 1197 + rval = -ENOMEM; 1198 + dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n", 1199 + __func__, __LINE__); 1346 1200 goto out; 1347 1201 } 1348 - if (drv_buf_iter->data_dir == DMA_TO_DEVICE) { 1349 - tmplen = min(drv_buf_iter->kern_buf_len, 1350 - drv_buf_iter->bsg_buf_len); 1351 - memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen); 1202 + desc_count += drv_buf_iter->num_dma_desc; 1352 1203 } 1353 1204 } 1354 1205 ··· 1452 1235 goto out; 1453 1236 } 1454 1237 } else { 1455 - mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size), 1456 - drv_bufs, bufcnt, is_rmcb, is_rmrb, 1457 - (dout_cnt + din_cnt)); 1238 + if (mpi3mr_bsg_build_sgl(mrioc, mpi_req, mpi_msg_size, 1239 + drv_bufs, bufcnt, is_rmcb, is_rmrb, 1240 + (dout_cnt + din_cnt))) { 1241 + dprint_bsg_err(mrioc, "%s: sgl build failed\n", __func__); 1242 + rval = -EAGAIN; 1243 + mutex_unlock(&mrioc->bsg_cmds.mutex); 1244 + goto out; 1245 + } 1458 1246 } 1459 1247 1460 1248 if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) { ··· 1495 1273 if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { 1496 1274 drv_buf_iter = &drv_bufs[0]; 1497 1275 dprint_dump(drv_buf_iter->kern_buf, 1498 - drv_buf_iter->kern_buf_len, "mpi3_mgmt_req"); 1276 + rmc_size, "mpi3_mgmt_req"); 1499 1277 } 1500 1278 } 1501 1279 ··· 1530 1308 MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { 1531 1309 drv_buf_iter = &drv_bufs[0]; 1532 1310 dprint_dump(drv_buf_iter->kern_buf, 1533 - drv_buf_iter->kern_buf_len, "mpi3_mgmt_req"); 1311 + rmc_size, "mpi3_mgmt_req"); 1534 1312 } 1535 1313 } 1536 - 1537 1314 if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) || 1538 1315 (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) 1539 1316 mpi3mr_issue_tm(mrioc, ··· 1603 1382 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1604 1383 if (drv_buf_iter->data_dir == DMA_NONE) 1605 1384 continue; 1606 - if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) { 1607 - tmplen = min(drv_buf_iter->kern_buf_len, 1608 - drv_buf_iter->bsg_buf_len); 1385 + if ((count == 1) && is_rmrb) { 1609 1386 memcpy(drv_buf_iter->bsg_buf, 1610 - drv_buf_iter->kern_buf, tmplen); 1387 + drv_buf_iter->kern_buf, 1388 + drv_buf_iter->kern_buf_len); 1389 + } else if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) { 1390 + tmplen = 0; 1391 + for (desc_count = 0; 1392 + desc_count < drv_buf_iter->num_dma_desc; 1393 + desc_count++) { 1394 + memcpy(((u8 *)drv_buf_iter->bsg_buf + tmplen), 1395 + drv_buf_iter->dma_desc[desc_count].addr, 1396 + drv_buf_iter->dma_desc[desc_count].size); 1397 + tmplen += 1398 + drv_buf_iter->dma_desc[desc_count].size; 1611 1399 } 1400 + } 1612 1401 } 1613 1402 1614 1403 out_unlock: 1615 1404 if (din_buf) { 1616 - *reply_payload_rcv_len = 1405 + job->reply_payload_rcv_len = 1617 1406 sg_copy_from_buffer(job->reply_payload.sg_list, 1618 1407 job->reply_payload.sg_cnt, 1619 1408 din_buf, job->reply_payload.payload_len); ··· 1639 1408 kfree(mpi_req); 1640 1409 if (drv_bufs) { 1641 1410 drv_buf_iter = drv_bufs; 1642 - for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1643 - if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma) 1644 - dma_free_coherent(&mrioc->pdev->dev, 1645 - drv_buf_iter->kern_buf_len, 1646 - drv_buf_iter->kern_buf, 1647 - drv_buf_iter->kern_buf_dma); 1648 - } 1411 + for (count = 0; count < bufcnt; count++, drv_buf_iter++) 1412 + kfree(drv_buf_iter->dma_desc); 1649 1413 kfree(drv_bufs); 1650 1414 } 1651 1415 kfree(bsg_reply_buf); ··· 1699 1473 rval = mpi3mr_bsg_process_drv_cmds(job); 1700 1474 break; 1701 1475 case MPI3MR_MPT_CMD: 1702 - rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len); 1476 + rval = mpi3mr_bsg_process_mpt_cmds(job); 1703 1477 break; 1704 1478 default: 1705 1479 pr_err("%s: unsupported BSG command(0x%08x)\n",