+5
-3
drivers/nvme/host/rdma.c
+5
-3
drivers/nvme/host/rdma.c
···
850
850
if (new)
851
851
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
852
852
out_free_async_qe:
853
-
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
854
-
sizeof(struct nvme_command), DMA_TO_DEVICE);
855
-
ctrl->async_event_sqe.data = NULL;
853
+
if (ctrl->async_event_sqe.data) {
854
+
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
855
+
sizeof(struct nvme_command), DMA_TO_DEVICE);
856
+
ctrl->async_event_sqe.data = NULL;
857
+
}
856
858
out_free_queue:
857
859
nvme_rdma_free_queue(&ctrl->queues[0]);
858
860
return error;
+9
-3
drivers/nvme/target/tcp.c
+9
-3
drivers/nvme/target/tcp.c
···
515
515
return 1;
516
516
}
517
517
518
-
static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
518
+
static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
519
519
{
520
520
struct nvmet_tcp_queue *queue = cmd->queue;
521
521
int ret;
···
523
523
while (cmd->cur_sg) {
524
524
struct page *page = sg_page(cmd->cur_sg);
525
525
u32 left = cmd->cur_sg->length - cmd->offset;
526
+
int flags = MSG_DONTWAIT;
527
+
528
+
if ((!last_in_batch && cmd->queue->send_list_len) ||
529
+
cmd->wbytes_done + left < cmd->req.transfer_len ||
530
+
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
531
+
flags |= MSG_MORE;
526
532
527
533
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
528
-
left, MSG_DONTWAIT | MSG_MORE);
534
+
left, flags);
529
535
if (ret <= 0)
530
536
return ret;
531
537
···
666
660
}
667
661
668
662
if (cmd->state == NVMET_TCP_SEND_DATA) {
669
-
ret = nvmet_try_send_data(cmd);
663
+
ret = nvmet_try_send_data(cmd, last_in_batch);
670
664
if (ret <= 0)
671
665
goto done_send;
672
666
}