Merge branch 'nvme-4.10-fixes' of git://git.infradead.org/nvme into for-linus

Pull nvme target fixes from Sagi:

Given that its -rc6, I removed anything that is not
bug fix.

- nvmet-fc discard fix from Christoph
- queue disconnect fix from James
- nvmet-rdma dma sync fix from Parav
- Some more nvmet fixes

+58 -18
+3 -3
drivers/nvme/host/fc.c
··· 1663 1663 return 0; 1664 1664 1665 1665 freq->sg_table.sgl = freq->first_sgl; 1666 - ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments, 1667 - freq->sg_table.sgl); 1666 + ret = sg_alloc_table_chained(&freq->sg_table, 1667 + blk_rq_nr_phys_segments(rq), freq->sg_table.sgl); 1668 1668 if (ret) 1669 1669 return -ENOMEM; 1670 1670 1671 1671 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 1672 - WARN_ON(op->nents > rq->nr_phys_segments); 1672 + WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); 1673 1673 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 1674 1674 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 1675 1675 op->nents, dir);
+1
drivers/nvme/target/configfs.c
··· 631 631 { 632 632 struct nvmet_subsys *subsys = to_subsys(item); 633 633 634 + nvmet_subsys_del_ctrls(subsys); 634 635 nvmet_subsys_put(subsys); 635 636 } 636 637
+14 -1
drivers/nvme/target/core.c
··· 200 200 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", 201 201 ctrl->cntlid, ctrl->kato); 202 202 203 - ctrl->ops->delete_ctrl(ctrl); 203 + nvmet_ctrl_fatal_error(ctrl); 204 204 } 205 205 206 206 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) ··· 816 816 list_del(&ctrl->subsys_entry); 817 817 mutex_unlock(&subsys->lock); 818 818 819 + flush_work(&ctrl->async_event_work); 820 + cancel_work_sync(&ctrl->fatal_err_work); 821 + 819 822 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); 820 823 nvmet_subsys_put(subsys); 821 824 ··· 936 933 ida_destroy(&subsys->cntlid_ida); 937 934 kfree(subsys->subsysnqn); 938 935 kfree(subsys); 936 + } 937 + 938 + void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) 939 + { 940 + struct nvmet_ctrl *ctrl; 941 + 942 + mutex_lock(&subsys->lock); 943 + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 944 + ctrl->ops->delete_ctrl(ctrl); 945 + mutex_unlock(&subsys->lock); 939 946 } 940 947 941 948 void nvmet_subsys_put(struct nvmet_subsys *subsys)
+22 -14
drivers/nvme/target/fc.c
··· 1314 1314 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; 1315 1315 struct fcnvme_ls_disconnect_acc *acc = 1316 1316 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; 1317 - struct nvmet_fc_tgt_queue *queue; 1317 + struct nvmet_fc_tgt_queue *queue = NULL; 1318 1318 struct nvmet_fc_tgt_assoc *assoc; 1319 1319 int ret = 0; 1320 1320 bool del_assoc = false; ··· 1348 1348 assoc = nvmet_fc_find_target_assoc(tgtport, 1349 1349 be64_to_cpu(rqst->associd.association_id)); 1350 1350 iod->assoc = assoc; 1351 - if (!assoc) 1351 + if (assoc) { 1352 + if (rqst->discon_cmd.scope == 1353 + FCNVME_DISCONN_CONNECTION) { 1354 + queue = nvmet_fc_find_target_queue(tgtport, 1355 + be64_to_cpu( 1356 + rqst->discon_cmd.id)); 1357 + if (!queue) { 1358 + nvmet_fc_tgt_a_put(assoc); 1359 + ret = VERR_NO_CONN; 1360 + } 1361 + } 1362 + } else 1352 1363 ret = VERR_NO_ASSOC; 1353 1364 } 1354 1365 ··· 1384 1373 FCNVME_LS_DISCONNECT); 1385 1374 1386 1375 1387 - if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) { 1388 - queue = nvmet_fc_find_target_queue(tgtport, 1389 - be64_to_cpu(rqst->discon_cmd.id)); 1390 - if (queue) { 1391 - int qid = queue->qid; 1376 + /* are we to delete a Connection ID (queue) */ 1377 + if (queue) { 1378 + int qid = queue->qid; 1392 1379 1393 - nvmet_fc_delete_target_queue(queue); 1380 + nvmet_fc_delete_target_queue(queue); 1394 1381 1395 - /* release the get taken by find_target_queue */ 1396 - nvmet_fc_tgt_q_put(queue); 1382 + /* release the get taken by find_target_queue */ 1383 + nvmet_fc_tgt_q_put(queue); 1397 1384 1398 - /* tear association down if io queue terminated */ 1399 - if (!qid) 1400 - del_assoc = true; 1401 - } 1385 + /* tear association down if io queue terminated */ 1386 + if (!qid) 1387 + del_assoc = true; 1402 1388 } 1403 1389 1404 1390 /* release get taken in nvmet_fc_find_target_assoc */
+1
drivers/nvme/target/nvmet.h
··· 282 282 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, 283 283 enum nvme_subsys_type type); 284 284 void nvmet_subsys_put(struct nvmet_subsys *subsys); 285 + void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); 285 286 286 287 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); 287 288 void nvmet_put_namespace(struct nvmet_ns *ns);
+17
drivers/nvme/target/rdma.c
··· 438 438 { 439 439 struct ib_recv_wr *bad_wr; 440 440 441 + ib_dma_sync_single_for_device(ndev->device, 442 + cmd->sge[0].addr, cmd->sge[0].length, 443 + DMA_FROM_DEVICE); 444 + 441 445 if (ndev->srq) 442 446 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); 443 447 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); ··· 542 538 first_wr = &rsp->send_wr; 543 539 544 540 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 541 + 542 + ib_dma_sync_single_for_device(rsp->queue->dev->device, 543 + rsp->send_sge.addr, rsp->send_sge.length, 544 + DMA_TO_DEVICE); 545 + 545 546 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { 546 547 pr_err("sending cmd response failed\n"); 547 548 nvmet_rdma_release_rsp(rsp); ··· 706 697 cmd->queue = queue; 707 698 cmd->n_rdma = 0; 708 699 cmd->req.port = queue->port; 700 + 701 + 702 + ib_dma_sync_single_for_cpu(queue->dev->device, 703 + cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 704 + DMA_FROM_DEVICE); 705 + ib_dma_sync_single_for_cpu(queue->dev->device, 706 + cmd->send_sge.addr, cmd->send_sge.length, 707 + DMA_TO_DEVICE); 709 708 710 709 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 711 710 &queue->nvme_sq, &nvmet_rdma_ops))