Merge branch 'nvme-4.14' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Christoph:

"A trivial one-liner from Martin to fix the visible of the uuid attr,
and another one (originally from Abhishek Shah, rewritten by me) to fix
the CMB addresses passed back to the controller in case of a system that
remaps BAR addresses between host and device."

Changed files
+8 -8
drivers
nvme
host
+1 -1
drivers/nvme/host/core.c
··· 2136 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); 2137 2138 if (a == &dev_attr_uuid.attr) { 2139 - if (uuid_is_null(&ns->uuid) || 2140 !memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) 2141 return 0; 2142 }
··· 2136 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); 2137 2138 if (a == &dev_attr_uuid.attr) { 2139 + if (uuid_is_null(&ns->uuid) && 2140 !memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) 2141 return 0; 2142 }
+7 -7
drivers/nvme/host/pci.c
··· 94 struct mutex shutdown_lock; 95 bool subsystem; 96 void __iomem *cmb; 97 - dma_addr_t cmb_dma_addr; 98 u64 cmb_size; 99 u32 cmbsz; 100 u32 cmbloc; ··· 1226 if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { 1227 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 1228 dev->ctrl.page_size); 1229 - nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; 1230 nvmeq->sq_cmds_io = dev->cmb + offset; 1231 } else { 1232 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), ··· 1527 resource_size_t bar_size; 1528 struct pci_dev *pdev = to_pci_dev(dev->dev); 1529 void __iomem *cmb; 1530 - dma_addr_t dma_addr; 1531 1532 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1533 if (!(NVME_CMB_SZ(dev->cmbsz))) ··· 1540 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 1541 size = szu * NVME_CMB_SZ(dev->cmbsz); 1542 offset = szu * NVME_CMB_OFST(dev->cmbloc); 1543 - bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); 1544 1545 if (offset > bar_size) 1546 return NULL; ··· 1554 if (size > bar_size - offset) 1555 size = bar_size - offset; 1556 1557 - dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; 1558 - cmb = ioremap_wc(dma_addr, size); 1559 if (!cmb) 1560 return NULL; 1561 1562 - dev->cmb_dma_addr = dma_addr; 1563 dev->cmb_size = size; 1564 return cmb; 1565 }
··· 94 struct mutex shutdown_lock; 95 bool subsystem; 96 void __iomem *cmb; 97 + pci_bus_addr_t cmb_bus_addr; 98 u64 cmb_size; 99 u32 cmbsz; 100 u32 cmbloc; ··· 1226 if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { 1227 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 1228 dev->ctrl.page_size); 1229 + nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; 1230 nvmeq->sq_cmds_io = dev->cmb + offset; 1231 } else { 1232 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), ··· 1527 resource_size_t bar_size; 1528 struct pci_dev *pdev = to_pci_dev(dev->dev); 1529 void __iomem *cmb; 1530 + int bar; 1531 1532 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1533 if (!(NVME_CMB_SZ(dev->cmbsz))) ··· 1540 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 1541 size = szu * NVME_CMB_SZ(dev->cmbsz); 1542 offset = szu * NVME_CMB_OFST(dev->cmbloc); 1543 + bar = NVME_CMB_BIR(dev->cmbloc); 1544 + bar_size = pci_resource_len(pdev, bar); 1545 1546 if (offset > bar_size) 1547 return NULL; ··· 1553 if (size > bar_size - offset) 1554 size = bar_size - offset; 1555 1556 + cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size); 1557 if (!cmb) 1558 return NULL; 1559 1560 + dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset; 1561 dev->cmb_size = size; 1562 return cmb; 1563 }