Merge tag 'block-6.19-20260205' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

- Revert of a change for loop, which caused regressions for some users
(Actually revert of two commits, where one is just an existing fix
for the offending commit)

- NVMe pull via Keith:
- Fix NULL pointer access setting up dma mappings
- Fix invalid memory access from malformed TCP PDU

* tag 'block-6.19-20260205' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
loop: revert exclusive opener loop status change
nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec
nvme-pci: handle changing device dma map requirements

+59 -48
+12 -33
drivers/block/loop.c
··· 1225 } 1226 1227 static int 1228 - loop_set_status(struct loop_device *lo, blk_mode_t mode, 1229 - struct block_device *bdev, const struct loop_info64 *info) 1230 { 1231 int err; 1232 bool partscan = false; 1233 bool size_changed = false; 1234 unsigned int memflags; 1235 1236 - /* 1237 - * If we don't hold exclusive handle for the device, upgrade to it 1238 - * here to avoid changing device under exclusive owner. 1239 - */ 1240 - if (!(mode & BLK_OPEN_EXCL)) { 1241 - err = bd_prepare_to_claim(bdev, loop_set_status, NULL); 1242 - if (err) 1243 - goto out_reread_partitions; 1244 - } 1245 - 1246 err = mutex_lock_killable(&lo->lo_mutex); 1247 if (err) 1248 - goto out_abort_claiming; 1249 - 1250 if (lo->lo_state != Lo_bound) { 1251 err = -ENXIO; 1252 goto out_unlock; ··· 1273 } 1274 out_unlock: 1275 mutex_unlock(&lo->lo_mutex); 1276 - out_abort_claiming: 1277 - if (!(mode & BLK_OPEN_EXCL)) 1278 - bd_abort_claiming(bdev, loop_set_status); 1279 - out_reread_partitions: 1280 if (partscan) 1281 loop_reread_partitions(lo); 1282 ··· 1352 } 1353 1354 static int 1355 - loop_set_status_old(struct loop_device *lo, blk_mode_t mode, 1356 - struct block_device *bdev, 1357 - const struct loop_info __user *arg) 1358 { 1359 struct loop_info info; 1360 struct loop_info64 info64; ··· 1360 if (copy_from_user(&info, arg, sizeof (struct loop_info))) 1361 return -EFAULT; 1362 loop_info64_from_old(&info, &info64); 1363 - return loop_set_status(lo, mode, bdev, &info64); 1364 } 1365 1366 static int 1367 - loop_set_status64(struct loop_device *lo, blk_mode_t mode, 1368 - struct block_device *bdev, 1369 - const struct loop_info64 __user *arg) 1370 { 1371 struct loop_info64 info64; 1372 1373 if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) 1374 return -EFAULT; 1375 - return loop_set_status(lo, mode, bdev, &info64); 1376 } 1377 1378 static int ··· 1549 case LOOP_SET_STATUS: 1550 err = -EPERM; 1551 if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) 1552 - err = loop_set_status_old(lo, mode, bdev, argp); 1553 break; 1554 case LOOP_GET_STATUS: 1555 return loop_get_status_old(lo, argp); 1556 case LOOP_SET_STATUS64: 1557 err = -EPERM; 1558 if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) 1559 - err = loop_set_status64(lo, mode, bdev, argp); 1560 break; 1561 case LOOP_GET_STATUS64: 1562 return loop_get_status64(lo, argp); ··· 1650 } 1651 1652 static int 1653 - loop_set_status_compat(struct loop_device *lo, blk_mode_t mode, 1654 - struct block_device *bdev, 1655 - const struct compat_loop_info __user *arg) 1656 { 1657 struct loop_info64 info64; 1658 int ret; ··· 1659 ret = loop_info64_from_compat(arg, &info64); 1660 if (ret < 0) 1661 return ret; 1662 - return loop_set_status(lo, mode, bdev, &info64); 1663 } 1664 1665 static int ··· 1685 1686 switch(cmd) { 1687 case LOOP_SET_STATUS: 1688 - err = loop_set_status_compat(lo, mode, bdev, 1689 (const struct compat_loop_info __user *)arg); 1690 break; 1691 case LOOP_GET_STATUS:
··· 1225 } 1226 1227 static int 1228 + loop_set_status(struct loop_device *lo, const struct loop_info64 *info) 1229 { 1230 int err; 1231 bool partscan = false; 1232 bool size_changed = false; 1233 unsigned int memflags; 1234 1235 err = mutex_lock_killable(&lo->lo_mutex); 1236 if (err) 1237 + return err; 1238 if (lo->lo_state != Lo_bound) { 1239 err = -ENXIO; 1240 goto out_unlock; ··· 1285 } 1286 out_unlock: 1287 mutex_unlock(&lo->lo_mutex); 1288 if (partscan) 1289 loop_reread_partitions(lo); 1290 ··· 1368 } 1369 1370 static int 1371 + loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) 1372 { 1373 struct loop_info info; 1374 struct loop_info64 info64; ··· 1378 if (copy_from_user(&info, arg, sizeof (struct loop_info))) 1379 return -EFAULT; 1380 loop_info64_from_old(&info, &info64); 1381 + return loop_set_status(lo, &info64); 1382 } 1383 1384 static int 1385 + loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) 1386 { 1387 struct loop_info64 info64; 1388 1389 if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) 1390 return -EFAULT; 1391 + return loop_set_status(lo, &info64); 1392 } 1393 1394 static int ··· 1569 case LOOP_SET_STATUS: 1570 err = -EPERM; 1571 if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) 1572 + err = loop_set_status_old(lo, argp); 1573 break; 1574 case LOOP_GET_STATUS: 1575 return loop_get_status_old(lo, argp); 1576 case LOOP_SET_STATUS64: 1577 err = -EPERM; 1578 if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) 1579 + err = loop_set_status64(lo, argp); 1580 break; 1581 case LOOP_GET_STATUS64: 1582 return loop_get_status64(lo, argp); ··· 1670 } 1671 1672 static int 1673 + loop_set_status_compat(struct loop_device *lo, 1674 + const struct compat_loop_info __user *arg) 1675 { 1676 struct loop_info64 info64; 1677 int ret; ··· 1680 ret = loop_info64_from_compat(arg, &info64); 1681 if (ret < 0) 1682 return ret; 1683 + return loop_set_status(lo, &info64); 1684 } 1685 1686 static int ··· 1706 1707 switch(cmd) { 1708 case LOOP_SET_STATUS: 1709 + err = loop_set_status_compat(lo, 1710 (const struct compat_loop_info __user *)arg); 1711 break; 1712 case LOOP_GET_STATUS:
+30 -15
drivers/nvme/host/pci.c
··· 816 nvme_free_descriptors(req); 817 } 818 819 static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, 820 struct blk_dma_iter *iter) 821 { ··· 851 return true; 852 if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) 853 return false; 854 - if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { 855 - iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; 856 - iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; 857 - iod->nr_dma_vecs++; 858 - } 859 - return true; 860 } 861 862 static blk_status_t nvme_pci_setup_data_prp(struct request *req, ··· 864 unsigned int prp_len, i; 865 __le64 *prp_list; 866 867 - if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) { 868 - iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, 869 - GFP_ATOMIC); 870 - if (!iod->dma_vecs) 871 - return BLK_STS_RESOURCE; 872 - iod->dma_vecs[0].addr = iter->addr; 873 - iod->dma_vecs[0].len = iter->len; 874 - iod->nr_dma_vecs = 1; 875 - } 876 877 /* 878 * PRP1 always points to the start of the DMA transfers. ··· 1233 iod->nr_descriptors = 0; 1234 iod->total_len = 0; 1235 iod->meta_total_len = 0; 1236 1237 ret = nvme_setup_cmd(req->q->queuedata, req); 1238 if (ret)
··· 816 nvme_free_descriptors(req); 817 } 818 819 + static bool nvme_pci_prp_save_mapping(struct request *req, 820 + struct device *dma_dev, 821 + struct blk_dma_iter *iter) 822 + { 823 + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 824 + 825 + if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev)) 826 + return true; 827 + 828 + if (!iod->nr_dma_vecs) { 829 + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 830 + 831 + iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, 832 + GFP_ATOMIC); 833 + if (!iod->dma_vecs) { 834 + iter->status = BLK_STS_RESOURCE; 835 + return false; 836 + } 837 + } 838 + 839 + iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; 840 + iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; 841 + iod->nr_dma_vecs++; 842 + return true; 843 + } 844 + 845 static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, 846 struct blk_dma_iter *iter) 847 { ··· 825 return true; 826 if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) 827 return false; 828 + return nvme_pci_prp_save_mapping(req, dma_dev, iter); 829 } 830 831 static blk_status_t nvme_pci_setup_data_prp(struct request *req, ··· 843 unsigned int prp_len, i; 844 __le64 *prp_list; 845 846 + if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter)) 847 + return iter->status; 848 849 /* 850 * PRP1 always points to the start of the DMA transfers. ··· 1219 iod->nr_descriptors = 0; 1220 iod->total_len = 0; 1221 iod->meta_total_len = 0; 1222 + iod->nr_dma_vecs = 0; 1223 1224 ret = nvme_setup_cmd(req->q->queuedata, req); 1225 if (ret)
+17
drivers/nvme/target/tcp.c
··· 349 cmd->req.sg = NULL; 350 } 351 352 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 353 { 354 struct bio_vec *iov = cmd->iov; 355 struct scatterlist *sg; 356 u32 length, offset, sg_offset; 357 int nr_pages; 358 359 length = cmd->pdu_len; ··· 364 offset = cmd->rbytes_done; 365 cmd->sg_idx = offset / PAGE_SIZE; 366 sg_offset = offset % PAGE_SIZE; 367 sg = &cmd->req.sg[cmd->sg_idx]; 368 369 while (length) { 370 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 371 372 bvec_set_page(iov, sg_page(sg), iov_len, ··· 387 388 length -= iov_len; 389 sg = sg_next(sg); 390 iov++; 391 sg_offset = 0; 392 }
··· 349 cmd->req.sg = NULL; 350 } 351 352 + static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue); 353 + 354 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 355 { 356 struct bio_vec *iov = cmd->iov; 357 struct scatterlist *sg; 358 u32 length, offset, sg_offset; 359 + unsigned int sg_remaining; 360 int nr_pages; 361 362 length = cmd->pdu_len; ··· 361 offset = cmd->rbytes_done; 362 cmd->sg_idx = offset / PAGE_SIZE; 363 sg_offset = offset % PAGE_SIZE; 364 + if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) { 365 + nvmet_tcp_fatal_error(cmd->queue); 366 + return; 367 + } 368 sg = &cmd->req.sg[cmd->sg_idx]; 369 + sg_remaining = cmd->req.sg_cnt - cmd->sg_idx; 370 371 while (length) { 372 + if (!sg_remaining) { 373 + nvmet_tcp_fatal_error(cmd->queue); 374 + return; 375 + } 376 + if (!sg->length || sg->length <= sg_offset) { 377 + nvmet_tcp_fatal_error(cmd->queue); 378 + return; 379 + } 380 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 381 382 bvec_set_page(iov, sg_page(sg), iov_len, ··· 371 372 length -= iov_len; 373 sg = sg_next(sg); 374 + sg_remaining--; 375 iov++; 376 sg_offset = 0; 377 }