Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'block-6.15-20250411' of git://git.kernel.dk/linux

Pull more block fixes from Jens Axboe:
"Apparently my internal clock was off, or perhaps it was just wishful
thinking, but I sent out block fixes yesterday as my brain assumed it
was Friday. Subsequently, that missed the NVMe fixes that should go
into this weeks release as well. Hence, here's a followup with those,
and another simple fix.

- NVMe pull request via Christoph:
- nvmet fc/fcloop refcounting fixes (Daniel Wagner)
- fix missed namespace/ANA scans (Hannes Reinecke)
- fix a use after free in the new TCP netns support (Kuniyuki
Iwashima)
- fix a NULL instead of false review in multipath (Uday Shankar)

- Use strscpy() for null_blk disk name copy"

* tag 'block-6.15-20250411' of git://git.kernel.dk/linux:
null_blk: Use strscpy() instead of strscpy_pad() in null_add_dev()
nvmet-fc: put ref when assoc->del_work is already scheduled
nvmet-fc: take tgtport reference only once
nvmet-fc: update tgtport ref per assoc
nvmet-fc: inline nvmet_fc_free_hostport
nvmet-fc: inline nvmet_fc_delete_assoc
nvmet-fcloop: add ref counting to lport
nvmet-fcloop: replace kref with refcount
nvmet-fcloop: swap list_add_tail arguments
nvme-tcp: fix use-after-free of netns by kernel TCP socket.
nvme: multipath: fix return value of nvme_available_path
nvme: re-read ANA log page after ns scan completes
nvme: requeue namespace scan on missed AENs

+74 -77
+1 -1
drivers/block/null_blk/main.c
··· 2031 2031 nullb->disk->minors = 1; 2032 2032 nullb->disk->fops = &null_ops; 2033 2033 nullb->disk->private_data = nullb; 2034 - strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 2034 + strscpy(nullb->disk->disk_name, nullb->disk_name); 2035 2035 2036 2036 if (nullb->dev->zoned) { 2037 2037 rv = null_register_zoned_dev(nullb);
+9
drivers/nvme/host/core.c
··· 4295 4295 nvme_scan_ns_sequential(ctrl); 4296 4296 } 4297 4297 mutex_unlock(&ctrl->scan_lock); 4298 + 4299 + /* Requeue if we have missed AENs */ 4300 + if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) 4301 + nvme_queue_scan(ctrl); 4302 + #ifdef CONFIG_NVME_MULTIPATH 4303 + else 4304 + /* Re-read the ANA log page to not miss updates */ 4305 + queue_work(nvme_wq, &ctrl->ana_work); 4306 + #endif 4298 4307 } 4299 4308 4300 4309 /*
+1 -1
drivers/nvme/host/multipath.c
··· 427 427 struct nvme_ns *ns; 428 428 429 429 if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) 430 - return NULL; 430 + return false; 431 431 432 432 list_for_each_entry_srcu(ns, &head->list, siblings, 433 433 srcu_read_lock_held(&head->srcu)) {
+2
drivers/nvme/host/tcp.c
··· 1803 1803 ret = PTR_ERR(sock_file); 1804 1804 goto err_destroy_mutex; 1805 1805 } 1806 + 1807 + sk_net_refcnt_upgrade(queue->sock->sk); 1806 1808 nvme_tcp_reclassify_socket(queue->sock); 1807 1809 1808 1810 /* Single syn retry */
+20 -40
drivers/nvme/target/fc.c
··· 995 995 return kref_get_unless_zero(&hostport->ref); 996 996 } 997 997 998 - static void 999 - nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) 1000 - { 1001 - /* if LLDD not implemented, leave as NULL */ 1002 - if (!hostport || !hostport->hosthandle) 1003 - return; 1004 - 1005 - nvmet_fc_hostport_put(hostport); 1006 - } 1007 - 1008 998 static struct nvmet_fc_hostport * 1009 999 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1010 1000 { ··· 1018 1028 struct nvmet_fc_hostport *newhost, *match = NULL; 1019 1029 unsigned long flags; 1020 1030 1031 + /* 1032 + * Caller holds a reference on tgtport. 1033 + */ 1034 + 1021 1035 /* if LLDD not implemented, leave as NULL */ 1022 1036 if (!hosthandle) 1023 1037 return NULL; 1024 - 1025 - /* 1026 - * take reference for what will be the newly allocated hostport if 1027 - * we end up using a new allocation 1028 - */ 1029 - if (!nvmet_fc_tgtport_get(tgtport)) 1030 - return ERR_PTR(-EINVAL); 1031 1038 1032 1039 spin_lock_irqsave(&tgtport->lock, flags); 1033 1040 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1034 1041 spin_unlock_irqrestore(&tgtport->lock, flags); 1035 1042 1036 - if (match) { 1037 - /* no new allocation - release reference */ 1038 - nvmet_fc_tgtport_put(tgtport); 1043 + if (match) 1039 1044 return match; 1040 - } 1041 1045 1042 1046 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1043 - if (!newhost) { 1044 - /* no new allocation - release reference */ 1045 - nvmet_fc_tgtport_put(tgtport); 1047 + if (!newhost) 1046 1048 return ERR_PTR(-ENOMEM); 1047 - } 1048 1049 1049 1050 spin_lock_irqsave(&tgtport->lock, flags); 1050 1051 match = nvmet_fc_match_hostport(tgtport, hosthandle); ··· 1044 1063 kfree(newhost); 1045 1064 newhost = match; 1046 1065 } else { 1066 + nvmet_fc_tgtport_get(tgtport); 1047 1067 newhost->tgtport = tgtport; 1048 1068 newhost->hosthandle = hosthandle; 1049 1069 INIT_LIST_HEAD(&newhost->host_list); ··· 1058 1076 } 1059 1077 1060 1078 static void 1061 - nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1062 - { 1063 - nvmet_fc_delete_target_assoc(assoc); 1064 - nvmet_fc_tgt_a_put(assoc); 1065 - } 1066 - 1067 - static void 1068 1079 nvmet_fc_delete_assoc_work(struct work_struct *work) 1069 1080 { 1070 1081 struct nvmet_fc_tgt_assoc *assoc = 1071 1082 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1072 1083 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1073 1084 1074 - nvmet_fc_delete_assoc(assoc); 1085 + nvmet_fc_delete_target_assoc(assoc); 1086 + nvmet_fc_tgt_a_put(assoc); 1075 1087 nvmet_fc_tgtport_put(tgtport); 1076 1088 } 1077 1089 ··· 1073 1097 nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1074 1098 { 1075 1099 nvmet_fc_tgtport_get(assoc->tgtport); 1076 - queue_work(nvmet_wq, &assoc->del_work); 1100 + if (!queue_work(nvmet_wq, &assoc->del_work)) 1101 + nvmet_fc_tgtport_put(assoc->tgtport); 1077 1102 } 1078 1103 1079 1104 static bool ··· 1120 1143 goto out_ida; 1121 1144 1122 1145 assoc->tgtport = tgtport; 1146 + nvmet_fc_tgtport_get(tgtport); 1123 1147 assoc->a_id = idx; 1124 1148 INIT_LIST_HEAD(&assoc->a_list); 1125 1149 kref_init(&assoc->ref); ··· 1168 1190 /* Send Disconnect now that all i/o has completed */ 1169 1191 nvmet_fc_xmt_disconnect_assoc(assoc); 1170 1192 1171 - nvmet_fc_free_hostport(assoc->hostport); 1193 + nvmet_fc_hostport_put(assoc->hostport); 1172 1194 spin_lock_irqsave(&tgtport->lock, flags); 1173 1195 oldls = assoc->rcv_disconn; 1174 1196 spin_unlock_irqrestore(&tgtport->lock, flags); ··· 1222 1244 dev_info(tgtport->dev, 1223 1245 "{%d:%d} Association deleted\n", 1224 1246 tgtport->fc_target_port.port_num, assoc->a_id); 1247 + 1248 + nvmet_fc_tgtport_put(tgtport); 1225 1249 } 1226 1250 1227 1251 static struct nvmet_fc_tgt_assoc * ··· 1435 1455 struct nvmet_fc_tgtport *tgtport = 1436 1456 container_of(ref, struct nvmet_fc_tgtport, ref); 1437 1457 struct device *dev = tgtport->dev; 1438 - unsigned long flags; 1439 - 1440 - spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1441 - list_del(&tgtport->tgt_list); 1442 - spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1443 1458 1444 1459 nvmet_fc_free_ls_iodlist(tgtport); 1445 1460 ··· 1595 1620 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1596 1621 { 1597 1622 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1623 + unsigned long flags; 1624 + 1625 + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1626 + list_del(&tgtport->tgt_list); 1627 + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1598 1628 1599 1629 nvmet_fc_portentry_unbind_tgt(tgtport); 1600 1630
+41 -35
drivers/nvme/target/fcloop.c
··· 208 208 struct nvme_fc_local_port *localport; 209 209 struct list_head lport_list; 210 210 struct completion unreg_done; 211 + refcount_t ref; 211 212 }; 212 213 213 214 struct fcloop_lport_priv { ··· 240 239 struct fcloop_tport *tport; 241 240 struct fcloop_lport *lport; 242 241 struct list_head nport_list; 243 - struct kref ref; 242 + refcount_t ref; 244 243 u64 node_name; 245 244 u64 port_name; 246 245 u32 port_role; ··· 275 274 u32 inistate; 276 275 bool active; 277 276 bool aborted; 278 - struct kref ref; 277 + refcount_t ref; 279 278 struct work_struct fcp_rcv_work; 280 279 struct work_struct abort_rcv_work; 281 280 struct work_struct tio_done_work; ··· 479 478 if (targetport) { 480 479 tport = targetport->private; 481 480 spin_lock(&tport->lock); 482 - list_add_tail(&tport->ls_list, &tls_req->ls_list); 481 + list_add_tail(&tls_req->ls_list, &tport->ls_list); 483 482 spin_unlock(&tport->lock); 484 483 queue_work(nvmet_wq, &tport->ls_work); 485 484 } ··· 535 534 } 536 535 537 536 static void 538 - fcloop_tfcp_req_free(struct kref *ref) 539 - { 540 - struct fcloop_fcpreq *tfcp_req = 541 - container_of(ref, struct fcloop_fcpreq, ref); 542 - 543 - kfree(tfcp_req); 544 - } 545 - 546 - static void 547 537 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) 548 538 { 549 - kref_put(&tfcp_req->ref, fcloop_tfcp_req_free); 539 + if (!refcount_dec_and_test(&tfcp_req->ref)) 540 + return; 541 + 542 + kfree(tfcp_req); 550 543 } 551 544 552 545 static int 553 546 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) 554 547 { 555 - return kref_get_unless_zero(&tfcp_req->ref); 548 + return refcount_inc_not_zero(&tfcp_req->ref); 556 549 } 557 550 558 551 static void ··· 743 748 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); 744 749 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); 745 750 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); 746 - kref_init(&tfcp_req->ref); 751 + refcount_set(&tfcp_req->ref, 1); 747 752 748 753 queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work); 749 754 ··· 996 1001 } 997 1002 998 1003 static void 999 - fcloop_nport_free(struct kref *ref) 1004 + fcloop_lport_put(struct fcloop_lport *lport) 1000 1005 { 1001 - struct fcloop_nport *nport = 1002 - container_of(ref, struct fcloop_nport, ref); 1006 + unsigned long flags; 1003 1007 1004 - kfree(nport); 1008 + if (!refcount_dec_and_test(&lport->ref)) 1009 + return; 1010 + 1011 + spin_lock_irqsave(&fcloop_lock, flags); 1012 + list_del(&lport->lport_list); 1013 + spin_unlock_irqrestore(&fcloop_lock, flags); 1014 + 1015 + kfree(lport); 1016 + } 1017 + 1018 + static int 1019 + fcloop_lport_get(struct fcloop_lport *lport) 1020 + { 1021 + return refcount_inc_not_zero(&lport->ref); 1005 1022 } 1006 1023 1007 1024 static void 1008 1025 fcloop_nport_put(struct fcloop_nport *nport) 1009 1026 { 1010 - kref_put(&nport->ref, fcloop_nport_free); 1027 + if (!refcount_dec_and_test(&nport->ref)) 1028 + return; 1029 + 1030 + kfree(nport); 1011 1031 } 1012 1032 1013 1033 static int 1014 1034 fcloop_nport_get(struct fcloop_nport *nport) 1015 1035 { 1016 - return kref_get_unless_zero(&nport->ref); 1036 + return refcount_inc_not_zero(&nport->ref); 1017 1037 } 1018 1038 1019 1039 static void ··· 1039 1029 1040 1030 /* release any threads waiting for the unreg to complete */ 1041 1031 complete(&lport->unreg_done); 1032 + 1033 + fcloop_lport_put(lport); 1042 1034 } 1043 1035 1044 1036 static void ··· 1152 1140 1153 1141 lport->localport = localport; 1154 1142 INIT_LIST_HEAD(&lport->lport_list); 1143 + refcount_set(&lport->ref, 1); 1155 1144 1156 1145 spin_lock_irqsave(&fcloop_lock, flags); 1157 1146 list_add_tail(&lport->lport_list, &fcloop_lports); ··· 1169 1156 return ret ? ret : count; 1170 1157 } 1171 1158 1172 - 1173 - static void 1174 - __unlink_local_port(struct fcloop_lport *lport) 1175 - { 1176 - list_del(&lport->lport_list); 1177 - } 1178 - 1179 1159 static int 1180 1160 __wait_localport_unreg(struct fcloop_lport *lport) 1181 1161 { ··· 1180 1174 1181 1175 if (!ret) 1182 1176 wait_for_completion(&lport->unreg_done); 1183 - 1184 - kfree(lport); 1185 1177 1186 1178 return ret; 1187 1179 } ··· 1203 1199 list_for_each_entry(tlport, &fcloop_lports, lport_list) { 1204 1200 if (tlport->localport->node_name == nodename && 1205 1201 tlport->localport->port_name == portname) { 1202 + if (!fcloop_lport_get(tlport)) 1203 + break; 1206 1204 lport = tlport; 1207 - __unlink_local_port(lport); 1208 1205 break; 1209 1206 } 1210 1207 } ··· 1215 1210 return -ENOENT; 1216 1211 1217 1212 ret = __wait_localport_unreg(lport); 1213 + fcloop_lport_put(lport); 1218 1214 1219 1215 return ret ? ret : count; 1220 1216 } ··· 1255 1249 newnport->port_role = opts->roles; 1256 1250 if (opts->mask & NVMF_OPT_FCADDR) 1257 1251 newnport->port_id = opts->fcaddr; 1258 - kref_init(&newnport->ref); 1252 + refcount_set(&newnport->ref, 1); 1259 1253 1260 1254 spin_lock_irqsave(&fcloop_lock, flags); 1261 1255 ··· 1643 1637 for (;;) { 1644 1638 lport = list_first_entry_or_null(&fcloop_lports, 1645 1639 typeof(*lport), lport_list); 1646 - if (!lport) 1640 + if (!lport || !fcloop_lport_get(lport)) 1647 1641 break; 1648 - 1649 - __unlink_local_port(lport); 1650 1642 1651 1643 spin_unlock_irqrestore(&fcloop_lock, flags); 1652 1644 1653 1645 ret = __wait_localport_unreg(lport); 1654 1646 if (ret) 1655 1647 pr_warn("%s: Failed deleting local port\n", __func__); 1648 + 1649 + fcloop_lport_put(lport); 1656 1650 1657 1651 spin_lock_irqsave(&fcloop_lock, flags); 1658 1652 }