Merge tag 'nvme-5.16-2021-12-10' of git://git.infradead.org/nvme into block-5.16

Pull NVMe fixes from Christoph:

"nvme fixes for Linux 5.16

- set ana_log_size to 0 after freeing ana_log_buf (Hou Tao)
- show subsys nqn for duplicate cntlids (Keith Busch)
- disable namespace access for unsupported metadata (Keith Busch)
- report write pointer for a full zone as zone start + zone len
(Niklas Cassel)
- fix use after free when disconnecting a reconnecting ctrl
(Ruozhu Li)
- fix a list corruption in nvmet-tcp (Sagi Grimberg)"

* tag 'nvme-5.16-2021-12-10' of git://git.infradead.org/nvme:
nvmet-tcp: fix possible list corruption for unexpected command failure
nvme: fix use after free when disconnecting a reconnecting ctrl
nvme-multipath: set ana_log_size to 0 after free ana_log_buf
nvme: report write pointer for a full zone as zone start + zone len
nvme: disable namespace access for unsupported metadata
nvme: show subsys nqn for duplicate cntlids

+33 -9
+18 -5
drivers/nvme/host/core.c
··· 666 struct request *rq) 667 { 668 if (ctrl->state != NVME_CTRL_DELETING_NOIO && 669 ctrl->state != NVME_CTRL_DEAD && 670 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 671 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) ··· 1750 */ 1751 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1752 return -EINVAL; 1753 - if (ctrl->max_integrity_segments) 1754 - ns->features |= 1755 - (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1756 } else { 1757 /* 1758 * For PCIe controllers, we can't easily remap the separate ··· 2708 2709 if (tmp->cntlid == ctrl->cntlid) { 2710 dev_err(ctrl->device, 2711 - "Duplicate cntlid %u with %s, rejecting\n", 2712 - ctrl->cntlid, dev_name(tmp->device)); 2713 return false; 2714 } 2715
··· 666 struct request *rq) 667 { 668 if (ctrl->state != NVME_CTRL_DELETING_NOIO && 669 + ctrl->state != NVME_CTRL_DELETING && 670 ctrl->state != NVME_CTRL_DEAD && 671 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 672 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) ··· 1749 */ 1750 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1751 return -EINVAL; 1752 + 1753 + ns->features |= NVME_NS_EXT_LBAS; 1754 + 1755 + /* 1756 + * The current fabrics transport drivers support namespace 1757 + * metadata formats only if nvme_ns_has_pi() returns true. 1758 + * Suppress support for all other formats so the namespace will 1759 + * have a 0 capacity and not be usable through the block stack. 1760 + * 1761 + * Note, this check will need to be modified if any drivers 1762 + * gain the ability to use other metadata formats. 1763 + */ 1764 + if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) 1765 + ns->features |= NVME_NS_METADATA_SUPPORTED; 1766 } else { 1767 /* 1768 * For PCIe controllers, we can't easily remap the separate ··· 2696 2697 if (tmp->cntlid == ctrl->cntlid) { 2698 dev_err(ctrl->device, 2699 + "Duplicate cntlid %u with %s, subsys %s, rejecting\n", 2700 + ctrl->cntlid, dev_name(tmp->device), 2701 + subsys->subnqn); 2702 return false; 2703 } 2704
+2 -1
drivers/nvme/host/multipath.c
··· 866 } 867 if (ana_log_size > ctrl->ana_log_size) { 868 nvme_mpath_stop(ctrl); 869 - kfree(ctrl->ana_log_buf); 870 ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL); 871 if (!ctrl->ana_log_buf) 872 return -ENOMEM; ··· 886 { 887 kfree(ctrl->ana_log_buf); 888 ctrl->ana_log_buf = NULL; 889 }
··· 866 } 867 if (ana_log_size > ctrl->ana_log_size) { 868 nvme_mpath_stop(ctrl); 869 + nvme_mpath_uninit(ctrl); 870 ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL); 871 if (!ctrl->ana_log_buf) 872 return -ENOMEM; ··· 886 { 887 kfree(ctrl->ana_log_buf); 888 ctrl->ana_log_buf = NULL; 889 + ctrl->ana_log_size = 0; 890 }
+1 -1
drivers/nvme/host/nvme.h
··· 709 return true; 710 if (ctrl->ops->flags & NVME_F_FABRICS && 711 ctrl->state == NVME_CTRL_DELETING) 712 - return true; 713 return __nvme_check_ready(ctrl, rq, queue_live); 714 } 715 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
··· 709 return true; 710 if (ctrl->ops->flags & NVME_F_FABRICS && 711 ctrl->state == NVME_CTRL_DELETING) 712 + return queue_live; 713 return __nvme_check_ready(ctrl, rq, queue_live); 714 } 715 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+4 -1
drivers/nvme/host/zns.c
··· 166 zone.len = ns->zsze; 167 zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); 168 zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); 169 - zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); 170 171 return cb(&zone, idx, data); 172 }
··· 166 zone.len = ns->zsze; 167 zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); 168 zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); 169 + if (zone.cond == BLK_ZONE_COND_FULL) 170 + zone.wp = zone.start + zone.len; 171 + else 172 + zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); 173 174 return cb(&zone, idx, data); 175 }
+8 -1
drivers/nvme/target/tcp.c
··· 922 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); 923 int ret; 924 925 - if (!nvme_is_write(cmd->req.cmd) || 926 data_len > cmd->req.port->inline_data_size) { 927 nvmet_prepare_receive_pdu(queue); 928 return;
··· 922 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); 923 int ret; 924 925 + /* 926 + * This command has not been processed yet, hence we are trying to 927 + * figure out if there is still pending data left to receive. If 928 + * we don't, we can simply prepare for the next pdu and bail out, 929 + * otherwise we will need to prepare a buffer and receive the 930 + * stale data before continuing forward. 931 + */ 932 + if (!nvme_is_write(cmd->req.cmd) || !data_len || 933 data_len > cmd->req.port->inline_data_size) { 934 nvmet_prepare_receive_pdu(queue); 935 return;