Merge tag 'block-6.19-20260205' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

- Revert of a change for loop, which caused regressions for some users
(Actually revert of two commits, where one is just an existing fix
for the offending commit)

- NVMe pull via Keith:
- Fix NULL pointer access setting up dma mappings
- Fix invalid memory access from malformed TCP PDU

* tag 'block-6.19-20260205' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
loop: revert exclusive opener loop status change
nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec
nvme-pci: handle changing device dma map requirements

+59 -48
+12 -33
drivers/block/loop.c
··· 1225 1225 } 1226 1226 1227 1227 static int 1228 - loop_set_status(struct loop_device *lo, blk_mode_t mode, 1229 - struct block_device *bdev, const struct loop_info64 *info) 1228 + loop_set_status(struct loop_device *lo, const struct loop_info64 *info) 1230 1229 { 1231 1230 int err; 1232 1231 bool partscan = false; 1233 1232 bool size_changed = false; 1234 1233 unsigned int memflags; 1235 1234 1236 - /* 1237 - * If we don't hold exclusive handle for the device, upgrade to it 1238 - * here to avoid changing device under exclusive owner. 1239 - */ 1240 - if (!(mode & BLK_OPEN_EXCL)) { 1241 - err = bd_prepare_to_claim(bdev, loop_set_status, NULL); 1242 - if (err) 1243 - goto out_reread_partitions; 1244 - } 1245 - 1246 1235 err = mutex_lock_killable(&lo->lo_mutex); 1247 1236 if (err) 1248 - goto out_abort_claiming; 1249 - 1237 + return err; 1250 1238 if (lo->lo_state != Lo_bound) { 1251 1239 err = -ENXIO; 1252 1240 goto out_unlock; ··· 1273 1285 } 1274 1286 out_unlock: 1275 1287 mutex_unlock(&lo->lo_mutex); 1276 - out_abort_claiming: 1277 - if (!(mode & BLK_OPEN_EXCL)) 1278 - bd_abort_claiming(bdev, loop_set_status); 1279 - out_reread_partitions: 1280 1288 if (partscan) 1281 1289 loop_reread_partitions(lo); 1282 1290 ··· 1352 1368 } 1353 1369 1354 1370 static int 1355 - loop_set_status_old(struct loop_device *lo, blk_mode_t mode, 1356 - struct block_device *bdev, 1357 - const struct loop_info __user *arg) 1371 + loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) 1358 1372 { 1359 1373 struct loop_info info; 1360 1374 struct loop_info64 info64; ··· 1360 1378 if (copy_from_user(&info, arg, sizeof (struct loop_info))) 1361 1379 return -EFAULT; 1362 1380 loop_info64_from_old(&info, &info64); 1363 - return loop_set_status(lo, mode, bdev, &info64); 1381 + return loop_set_status(lo, &info64); 1364 1382 } 1365 1383 1366 1384 static int 1367 - loop_set_status64(struct loop_device *lo, blk_mode_t mode, 1368 - struct block_device *bdev, 1369 - const struct loop_info64 __user *arg) 1385 + loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) 1370 1386 { 1371 1387 struct loop_info64 info64; 1372 1388 1373 1389 if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) 1374 1390 return -EFAULT; 1375 - return loop_set_status(lo, mode, bdev, &info64); 1391 + return loop_set_status(lo, &info64); 1376 1392 } 1377 1393 1378 1394 static int ··· 1549 1569 case LOOP_SET_STATUS: 1550 1570 err = -EPERM; 1551 1571 if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) 1552 - err = loop_set_status_old(lo, mode, bdev, argp); 1572 + err = loop_set_status_old(lo, argp); 1553 1573 break; 1554 1574 case LOOP_GET_STATUS: 1555 1575 return loop_get_status_old(lo, argp); 1556 1576 case LOOP_SET_STATUS64: 1557 1577 err = -EPERM; 1558 1578 if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) 1559 - err = loop_set_status64(lo, mode, bdev, argp); 1579 + err = loop_set_status64(lo, argp); 1560 1580 break; 1561 1581 case LOOP_GET_STATUS64: 1562 1582 return loop_get_status64(lo, argp); ··· 1650 1670 } 1651 1671 1652 1672 static int 1653 - loop_set_status_compat(struct loop_device *lo, blk_mode_t mode, 1654 - struct block_device *bdev, 1655 - const struct compat_loop_info __user *arg) 1673 + loop_set_status_compat(struct loop_device *lo, 1674 + const struct compat_loop_info __user *arg) 1656 1675 { 1657 1676 struct loop_info64 info64; 1658 1677 int ret; ··· 1659 1680 ret = loop_info64_from_compat(arg, &info64); 1660 1681 if (ret < 0) 1661 1682 return ret; 1662 - return loop_set_status(lo, mode, bdev, &info64); 1683 + return loop_set_status(lo, &info64); 1663 1684 } 1664 1685 1665 1686 static int ··· 1685 1706 1686 1707 switch(cmd) { 1687 1708 case LOOP_SET_STATUS: 1688 - err = loop_set_status_compat(lo, mode, bdev, 1709 + err = loop_set_status_compat(lo, 1689 1710 (const struct compat_loop_info __user *)arg); 1690 1711 break; 1691 1712 case LOOP_GET_STATUS:
+30 -15
drivers/nvme/host/pci.c
··· 816 816 nvme_free_descriptors(req); 817 817 } 818 818 819 + static bool nvme_pci_prp_save_mapping(struct request *req, 820 + struct device *dma_dev, 821 + struct blk_dma_iter *iter) 822 + { 823 + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 824 + 825 + if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev)) 826 + return true; 827 + 828 + if (!iod->nr_dma_vecs) { 829 + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 830 + 831 + iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, 832 + GFP_ATOMIC); 833 + if (!iod->dma_vecs) { 834 + iter->status = BLK_STS_RESOURCE; 835 + return false; 836 + } 837 + } 838 + 839 + iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; 840 + iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; 841 + iod->nr_dma_vecs++; 842 + return true; 843 + } 844 + 819 845 static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, 820 846 struct blk_dma_iter *iter) 821 847 { ··· 851 825 return true; 852 826 if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) 853 827 return false; 854 - if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { 855 - iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; 856 - iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; 857 - iod->nr_dma_vecs++; 858 - } 859 - return true; 828 + return nvme_pci_prp_save_mapping(req, dma_dev, iter); 860 829 } 861 830 862 831 static blk_status_t nvme_pci_setup_data_prp(struct request *req, ··· 864 843 unsigned int prp_len, i; 865 844 __le64 *prp_list; 866 845 867 - if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) { 868 - iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, 869 - GFP_ATOMIC); 870 - if (!iod->dma_vecs) 871 - return BLK_STS_RESOURCE; 872 - iod->dma_vecs[0].addr = iter->addr; 873 - iod->dma_vecs[0].len = iter->len; 874 - iod->nr_dma_vecs = 1; 875 - } 846 + if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter)) 847 + return iter->status; 876 848 877 849 /* 878 850 * PRP1 always points to the start of the DMA transfers. ··· 1233 1219 iod->nr_descriptors = 0; 1234 1220 iod->total_len = 0; 1235 1221 iod->meta_total_len = 0; 1222 + iod->nr_dma_vecs = 0; 1236 1223 1237 1224 ret = nvme_setup_cmd(req->q->queuedata, req); 1238 1225 if (ret)
+17
drivers/nvme/target/tcp.c
··· 349 349 cmd->req.sg = NULL; 350 350 } 351 351 352 + static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue); 353 + 352 354 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 353 355 { 354 356 struct bio_vec *iov = cmd->iov; 355 357 struct scatterlist *sg; 356 358 u32 length, offset, sg_offset; 359 + unsigned int sg_remaining; 357 360 int nr_pages; 358 361 359 362 length = cmd->pdu_len; ··· 364 361 offset = cmd->rbytes_done; 365 362 cmd->sg_idx = offset / PAGE_SIZE; 366 363 sg_offset = offset % PAGE_SIZE; 364 + if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) { 365 + nvmet_tcp_fatal_error(cmd->queue); 366 + return; 367 + } 367 368 sg = &cmd->req.sg[cmd->sg_idx]; 369 + sg_remaining = cmd->req.sg_cnt - cmd->sg_idx; 368 370 369 371 while (length) { 372 + if (!sg_remaining) { 373 + nvmet_tcp_fatal_error(cmd->queue); 374 + return; 375 + } 376 + if (!sg->length || sg->length <= sg_offset) { 377 + nvmet_tcp_fatal_error(cmd->queue); 378 + return; 379 + } 370 380 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 371 381 372 382 bvec_set_page(iov, sg_page(sg), iov_len, ··· 387 371 388 372 length -= iov_len; 389 373 sg = sg_next(sg); 374 + sg_remaining--; 390 375 iov++; 391 376 sg_offset = 0; 392 377 }