Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

NVMe: Retry failed commands with non-fatal errors

For commands returned with failed status, queue these for resubmission
and continue retrying them until success or for a limited amount of
time. The final timeout was arbitrarily chosen so requests can't be
retried indefinitely.

Since these are requeued on the nvmeq that submitted the command, the
callbacks have to take an nvmeq instead of an nvme_dev as a parameter
so that we can use the locked queue to append the iod to retry later.

The nvme_iod conviently can be used to track how long we've been trying
to successfully complete an iod request. The nvme_iod also provides the
nvme prp dma mappings, so I had to move a few things around so we can
keep those mappings.

Signed-off-by: Keith Busch <keith.busch@intel.com>
[fixed checkpatch issue with long line]
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>

authored by

Keith Busch and committed by
Matthew Wilcox
edd10d33 4cc09e2d

+154 -96
+145 -90
drivers/block/nvme-core.c
··· 50 50 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 51 51 #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 52 52 #define ADMIN_TIMEOUT (60 * HZ) 53 + #define IOD_TIMEOUT (4 * NVME_IO_TIMEOUT) 53 54 54 55 unsigned char io_timeout = 30; 55 56 module_param(io_timeout, byte, 0644); ··· 95 94 wait_queue_head_t sq_full; 96 95 wait_queue_t sq_cong_wait; 97 96 struct bio_list sq_cong; 97 + struct list_head iod_bio; 98 98 u32 __iomem *q_db; 99 99 u16 q_depth; 100 100 u16 cq_vector; ··· 130 128 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 131 129 } 132 130 133 - typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, 131 + typedef void (*nvme_completion_fn)(struct nvme_queue *, void *, 134 132 struct nvme_completion *); 135 133 136 134 struct nvme_cmd_info { ··· 202 200 #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 203 201 #define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE) 204 202 205 - static void special_completion(struct nvme_dev *dev, void *ctx, 203 + static void special_completion(struct nvme_queue *nvmeq, void *ctx, 206 204 struct nvme_completion *cqe) 207 205 { 208 206 if (ctx == CMD_CTX_CANCELLED) ··· 210 208 if (ctx == CMD_CTX_FLUSH) 211 209 return; 212 210 if (ctx == CMD_CTX_ABORT) { 213 - ++dev->abort_limit; 211 + ++nvmeq->dev->abort_limit; 214 212 return; 215 213 } 216 214 if (ctx == CMD_CTX_COMPLETED) { 217 - dev_warn(&dev->pci_dev->dev, 215 + dev_warn(nvmeq->q_dmadev, 218 216 "completed id %d twice on queue %d\n", 219 217 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 220 218 return; 221 219 } 222 220 if (ctx == CMD_CTX_INVALID) { 223 - dev_warn(&dev->pci_dev->dev, 221 + dev_warn(nvmeq->q_dmadev, 224 222 "invalid id %d completed on queue %d\n", 225 223 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 226 224 return; 227 225 } 228 226 229 - dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); 227 + dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx); 230 228 } 231 229 232 - static void async_completion(struct nvme_dev *dev, void *ctx, 230 + static void async_completion(struct nvme_queue *nvmeq, void *ctx, 233 231 struct nvme_completion *cqe) 234 232 { 235 233 struct async_cmd_info *cmdinfo = ctx; ··· 359 357 iod->npages = -1; 360 358 iod->length = nbytes; 361 359 iod->nents = 0; 360 + iod->first_dma = 0ULL; 362 361 iod->start_time = jiffies; 363 362 } 364 363 ··· 408 405 part_stat_unlock(); 409 406 } 410 407 411 - static void bio_completion(struct nvme_dev *dev, void *ctx, 408 + static void bio_completion(struct nvme_queue *nvmeq, void *ctx, 412 409 struct nvme_completion *cqe) 413 410 { 414 411 struct nvme_iod *iod = ctx; 415 412 struct bio *bio = iod->private; 416 413 u16 status = le16_to_cpup(&cqe->status) >> 1; 417 414 415 + if (unlikely(status)) { 416 + if (!(status & NVME_SC_DNR || 417 + bio->bi_rw & REQ_FAILFAST_MASK) && 418 + (jiffies - iod->start_time) < IOD_TIMEOUT) { 419 + if (!waitqueue_active(&nvmeq->sq_full)) 420 + add_wait_queue(&nvmeq->sq_full, 421 + &nvmeq->sq_cong_wait); 422 + list_add_tail(&iod->node, &nvmeq->iod_bio); 423 + wake_up(&nvmeq->sq_full); 424 + return; 425 + } 426 + } 418 427 if (iod->nents) { 419 - dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 428 + dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents, 420 429 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 421 430 nvme_end_io_acct(bio, iod->start_time); 422 431 } 423 - nvme_free_iod(dev, iod); 432 + nvme_free_iod(nvmeq->dev, iod); 424 433 if (status) 425 434 bio_endio(bio, -EIO); 426 435 else ··· 440 425 } 441 426 442 427 /* length is in bytes. gfp flags indicates whether we may sleep. */ 443 - int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, 444 - struct nvme_iod *iod, int total_len, gfp_t gfp) 428 + int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len, 429 + gfp_t gfp) 445 430 { 446 431 struct dma_pool *pool; 447 432 int length = total_len; ··· 454 439 dma_addr_t prp_dma; 455 440 int nprps, i; 456 441 457 - cmd->prp1 = cpu_to_le64(dma_addr); 458 442 length -= (PAGE_SIZE - offset); 459 443 if (length <= 0) 460 444 return total_len; ··· 468 454 } 469 455 470 456 if (length <= PAGE_SIZE) { 471 - cmd->prp2 = cpu_to_le64(dma_addr); 457 + iod->first_dma = dma_addr; 472 458 return total_len; 473 459 } 474 460 ··· 483 469 484 470 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 485 471 if (!prp_list) { 486 - cmd->prp2 = cpu_to_le64(dma_addr); 472 + iod->first_dma = dma_addr; 487 473 iod->npages = -1; 488 474 return (total_len - length) + PAGE_SIZE; 489 475 } 490 476 list[0] = prp_list; 491 477 iod->first_dma = prp_dma; 492 - cmd->prp2 = cpu_to_le64(prp_dma); 493 478 i = 0; 494 479 for (;;) { 495 480 if (i == PAGE_SIZE / 8) { ··· 527 514 528 515 bio_chain(split, bio); 529 516 530 - if (bio_list_empty(&nvmeq->sq_cong)) 517 + if (!waitqueue_active(&nvmeq->sq_full)) 531 518 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 532 519 bio_list_add(&nvmeq->sq_cong, split); 533 520 bio_list_add(&nvmeq->sq_cong, bio); 521 + wake_up(&nvmeq->sq_full); 534 522 535 523 return 0; 536 524 } ··· 584 570 return length; 585 571 } 586 572 587 - /* 588 - * We reuse the small pool to allocate the 16-byte range here as it is not 589 - * worth having a special pool for these or additional cases to handle freeing 590 - * the iod. 591 - */ 592 573 static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, 593 574 struct bio *bio, struct nvme_iod *iod, int cmdid) 594 575 { 595 - struct nvme_dsm_range *range; 576 + struct nvme_dsm_range *range = 577 + (struct nvme_dsm_range *)iod_list(iod)[0]; 596 578 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 597 - 598 - range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC, 599 - &iod->first_dma); 600 - if (!range) 601 - return -ENOMEM; 602 - 603 - iod_list(iod)[0] = (__le64 *)range; 604 - iod->npages = 0; 605 579 606 580 range->cattr = cpu_to_le32(0); 607 581 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift); ··· 637 635 return nvme_submit_flush(nvmeq, ns, cmdid); 638 636 } 639 637 640 - /* 641 - * Called with local interrupts disabled and the q_lock held. May not sleep. 642 - */ 643 - static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 644 - struct bio *bio) 638 + static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod) 645 639 { 640 + struct bio *bio = iod->private; 641 + struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 646 642 struct nvme_command *cmnd; 647 - struct nvme_iod *iod; 648 - enum dma_data_direction dma_dir; 649 - int cmdid, length, result; 643 + int cmdid; 650 644 u16 control; 651 645 u32 dsmgmt; 652 - int psegs = bio_phys_segments(ns->queue, bio); 653 646 654 - if ((bio->bi_rw & REQ_FLUSH) && psegs) { 655 - result = nvme_submit_flush_data(nvmeq, ns); 656 - if (result) 657 - return result; 658 - } 659 - 660 - result = -ENOMEM; 661 - iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC); 662 - if (!iod) 663 - goto nomem; 664 - iod->private = bio; 665 - 666 - result = -EBUSY; 667 647 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); 668 648 if (unlikely(cmdid < 0)) 669 - goto free_iod; 649 + return cmdid; 670 650 671 - if (bio->bi_rw & REQ_DISCARD) { 672 - result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 673 - if (result) 674 - goto free_cmdid; 675 - return result; 676 - } 677 - if ((bio->bi_rw & REQ_FLUSH) && !psegs) 651 + if (bio->bi_rw & REQ_DISCARD) 652 + return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 653 + if ((bio->bi_rw & REQ_FLUSH) && !iod->nents) 678 654 return nvme_submit_flush(nvmeq, ns, cmdid); 679 655 680 656 control = 0; ··· 666 686 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 667 687 668 688 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 669 - 670 689 memset(cmnd, 0, sizeof(*cmnd)); 671 - if (bio_data_dir(bio)) { 672 - cmnd->rw.opcode = nvme_cmd_write; 673 - dma_dir = DMA_TO_DEVICE; 674 - } else { 675 - cmnd->rw.opcode = nvme_cmd_read; 676 - dma_dir = DMA_FROM_DEVICE; 677 - } 678 690 679 - result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs); 680 - if (result <= 0) 681 - goto free_cmdid; 682 - length = result; 683 - 691 + cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read; 684 692 cmnd->rw.command_id = cmdid; 685 693 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 686 - length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 687 - GFP_ATOMIC); 694 + cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 695 + cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); 688 696 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); 689 - cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 697 + cmnd->rw.length = 698 + cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1); 690 699 cmnd->rw.control = cpu_to_le16(control); 691 700 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 692 701 693 - nvme_start_io_acct(bio); 694 702 if (++nvmeq->sq_tail == nvmeq->q_depth) 695 703 nvmeq->sq_tail = 0; 696 704 writel(nvmeq->sq_tail, nvmeq->q_db); 697 705 698 706 return 0; 707 + } 699 708 700 - free_cmdid: 701 - free_cmdid(nvmeq, cmdid, NULL); 709 + /* 710 + * Called with local interrupts disabled and the q_lock held. May not sleep. 711 + */ 712 + static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 713 + struct bio *bio) 714 + { 715 + struct nvme_iod *iod; 716 + int psegs = bio_phys_segments(ns->queue, bio); 717 + int result; 718 + 719 + if ((bio->bi_rw & REQ_FLUSH) && psegs) { 720 + result = nvme_submit_flush_data(nvmeq, ns); 721 + if (result) 722 + return result; 723 + } 724 + 725 + iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC); 726 + if (!iod) 727 + return -ENOMEM; 728 + 729 + iod->private = bio; 730 + if (bio->bi_rw & REQ_DISCARD) { 731 + void *range; 732 + /* 733 + * We reuse the small pool to allocate the 16-byte range here 734 + * as it is not worth having a special pool for these or 735 + * additional cases to handle freeing the iod. 736 + */ 737 + range = dma_pool_alloc(nvmeq->dev->prp_small_pool, 738 + GFP_ATOMIC, 739 + &iod->first_dma); 740 + if (!range) { 741 + result = -ENOMEM; 742 + goto free_iod; 743 + } 744 + iod_list(iod)[0] = (__le64 *)range; 745 + iod->npages = 0; 746 + } else if (psegs) { 747 + result = nvme_map_bio(nvmeq, iod, bio, 748 + bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 749 + psegs); 750 + if (result <= 0) 751 + goto free_iod; 752 + if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) != 753 + result) { 754 + result = -ENOMEM; 755 + goto free_iod; 756 + } 757 + nvme_start_io_acct(bio); 758 + } 759 + if (unlikely(nvme_submit_iod(nvmeq, iod))) { 760 + if (!waitqueue_active(&nvmeq->sq_full)) 761 + add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 762 + list_add_tail(&iod->node, &nvmeq->iod_bio); 763 + } 764 + return 0; 765 + 702 766 free_iod: 703 767 nvme_free_iod(nvmeq->dev, iod); 704 - nomem: 705 768 return result; 706 769 } 707 770 ··· 768 745 } 769 746 770 747 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 771 - fn(nvmeq->dev, ctx, &cqe); 748 + fn(nvmeq, ctx, &cqe); 772 749 } 773 750 774 751 /* If the controller ignores the cq head doorbell and continuously ··· 804 781 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong)) 805 782 result = nvme_submit_bio_queue(nvmeq, ns, bio); 806 783 if (unlikely(result)) { 807 - if (bio_list_empty(&nvmeq->sq_cong)) 784 + if (!waitqueue_active(&nvmeq->sq_full)) 808 785 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 809 786 bio_list_add(&nvmeq->sq_cong, bio); 810 787 } ··· 848 825 int status; 849 826 }; 850 827 851 - static void sync_completion(struct nvme_dev *dev, void *ctx, 828 + static void sync_completion(struct nvme_queue *nvmeq, void *ctx, 852 829 struct nvme_completion *cqe) 853 830 { 854 831 struct sync_cmd_info *cmdinfo = ctx; ··· 1135 1112 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid, 1136 1113 nvmeq->qid); 1137 1114 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1138 - fn(nvmeq->dev, ctx, &cqe); 1115 + fn(nvmeq, ctx, &cqe); 1139 1116 } 1140 1117 } 1141 1118 ··· 1147 1124 while (bio_list_peek(&nvmeq->sq_cong)) { 1148 1125 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1149 1126 bio_endio(bio, -EIO); 1127 + } 1128 + while (!list_empty(&nvmeq->iod_bio)) { 1129 + static struct nvme_completion cqe = { 1130 + .status = cpu_to_le16( 1131 + (NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1), 1132 + }; 1133 + struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio, 1134 + struct nvme_iod, 1135 + node); 1136 + list_del(&iod->node); 1137 + bio_completion(nvmeq, iod, &cqe); 1150 1138 } 1151 1139 spin_unlock_irq(&nvmeq->q_lock); 1152 1140 ··· 1266 1232 init_waitqueue_head(&nvmeq->sq_full); 1267 1233 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 1268 1234 bio_list_init(&nvmeq->sq_cong); 1235 + INIT_LIST_HEAD(&nvmeq->iod_bio); 1269 1236 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1270 1237 nvmeq->q_depth = depth; 1271 1238 nvmeq->cq_vector = vector; ··· 1600 1565 c.rw.metadata = cpu_to_le64(meta_dma_addr); 1601 1566 } 1602 1567 1603 - length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); 1568 + length = nvme_setup_prps(dev, iod, length, GFP_KERNEL); 1569 + c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 1570 + c.rw.prp2 = cpu_to_le64(iod->first_dma); 1604 1571 1605 1572 if (length != (io.nblocks + 1) << ns->lba_shift) 1606 1573 status = -ENOMEM; ··· 1672 1635 length); 1673 1636 if (IS_ERR(iod)) 1674 1637 return PTR_ERR(iod); 1675 - length = nvme_setup_prps(dev, &c.common, iod, length, 1676 - GFP_KERNEL); 1638 + length = nvme_setup_prps(dev, iod, length, GFP_KERNEL); 1639 + c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 1640 + c.common.prp2 = cpu_to_le64(iod->first_dma); 1677 1641 } 1678 1642 1679 1643 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : ··· 1771 1733 .getgeo = nvme_getgeo, 1772 1734 }; 1773 1735 1736 + static void nvme_resubmit_iods(struct nvme_queue *nvmeq) 1737 + { 1738 + struct nvme_iod *iod, *next; 1739 + 1740 + list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) { 1741 + if (unlikely(nvme_submit_iod(nvmeq, iod))) 1742 + break; 1743 + list_del(&iod->node); 1744 + if (bio_list_empty(&nvmeq->sq_cong) && 1745 + list_empty(&nvmeq->iod_bio)) 1746 + remove_wait_queue(&nvmeq->sq_full, 1747 + &nvmeq->sq_cong_wait); 1748 + } 1749 + } 1750 + 1774 1751 static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1775 1752 { 1776 1753 while (bio_list_peek(&nvmeq->sq_cong)) { 1777 1754 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1778 1755 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1779 1756 1780 - if (bio_list_empty(&nvmeq->sq_cong)) 1757 + if (bio_list_empty(&nvmeq->sq_cong) && 1758 + list_empty(&nvmeq->iod_bio)) 1781 1759 remove_wait_queue(&nvmeq->sq_full, 1782 1760 &nvmeq->sq_cong_wait); 1783 1761 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1784 - if (bio_list_empty(&nvmeq->sq_cong)) 1762 + if (!waitqueue_active(&nvmeq->sq_full)) 1785 1763 add_wait_queue(&nvmeq->sq_full, 1786 1764 &nvmeq->sq_cong_wait); 1787 1765 bio_list_add_head(&nvmeq->sq_cong, bio); ··· 1839 1785 nvme_process_cq(nvmeq); 1840 1786 nvme_cancel_ios(nvmeq, true); 1841 1787 nvme_resubmit_bios(nvmeq); 1788 + nvme_resubmit_iods(nvmeq); 1842 1789 unlock: 1843 1790 spin_unlock_irq(&nvmeq->q_lock); 1844 1791 }
+6 -4
drivers/block/nvme-scsi.c
··· 1562 1562 res = PTR_ERR(iod); 1563 1563 goto out; 1564 1564 } 1565 - length = nvme_setup_prps(dev, &c.common, iod, tot_len, 1566 - GFP_KERNEL); 1565 + length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL); 1567 1566 if (length != tot_len) { 1568 1567 res = -ENOMEM; 1569 1568 goto out_unmap; 1570 1569 } 1571 1570 1571 + c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 1572 + c.dlfw.prp2 = cpu_to_le64(iod->first_dma); 1572 1573 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); 1573 1574 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); 1574 1575 } else if (opcode == nvme_admin_activate_fw) { ··· 2093 2092 res = PTR_ERR(iod); 2094 2093 goto out; 2095 2094 } 2096 - retcode = nvme_setup_prps(dev, &c.common, iod, unit_len, 2097 - GFP_KERNEL); 2095 + retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL); 2098 2096 if (retcode != unit_len) { 2099 2097 nvme_unmap_user_pages(dev, 2100 2098 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, ··· 2102 2102 res = -ENOMEM; 2103 2103 goto out; 2104 2104 } 2105 + c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 2106 + c.rw.prp2 = cpu_to_le64(iod->first_dma); 2105 2107 2106 2108 nvme_offset += unit_num_blocks; 2107 2109
+2 -2
include/linux/nvme.h
··· 136 136 int length; /* Of data, in bytes */ 137 137 unsigned long start_time; 138 138 dma_addr_t first_dma; 139 + struct list_head node; 139 140 struct scatterlist sg[0]; 140 141 }; 141 142 ··· 152 151 */ 153 152 void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); 154 153 155 - int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, 156 - struct nvme_iod *iod, int total_len, gfp_t gfp); 154 + int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t); 157 155 struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 158 156 unsigned long addr, unsigned length); 159 157 void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+1
include/uapi/linux/nvme.h
··· 434 434 NVME_SC_REFTAG_CHECK = 0x284, 435 435 NVME_SC_COMPARE_FAILED = 0x285, 436 436 NVME_SC_ACCESS_DENIED = 0x286, 437 + NVME_SC_DNR = 0x4000, 437 438 }; 438 439 439 440 struct nvme_completion {