Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'block-6.18-20251031' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

- Fix blk-crypto reporting EIO when EINVAL is the correct error code

- Two bug fixes for the block zone support

- NVME pull request via Keith:
- Target side authentication fixup
- Peer-to-peer metadata fixup

- null_blk DMA alignment fix

* tag 'block-6.18-20251031' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
null_blk: set dma alignment to logical block size
blk-crypto: use BLK_STS_INVAL for alignment errors
block: make REQ_OP_ZONE_OPEN a write operation
block: fix op_is_zone_mgmt() to handle REQ_OP_ZONE_RESET_ALL
nvme-pci: use blk_map_iter for p2p metadata
nvmet-auth: update sc_c in host response

+21 -11
+1 -1
block/blk-crypto.c
··· 292 292 } 293 293 294 294 if (!bio_crypt_check_alignment(bio)) { 295 - bio->bi_status = BLK_STS_IOERR; 295 + bio->bi_status = BLK_STS_INVAL; 296 296 goto fail; 297 297 } 298 298
+1
drivers/block/null_blk/main.c
··· 1949 1949 .logical_block_size = dev->blocksize, 1950 1950 .physical_block_size = dev->blocksize, 1951 1951 .max_hw_sectors = dev->max_sectors, 1952 + .dma_alignment = dev->blocksize - 1, 1952 1953 }; 1953 1954 1954 1955 struct nullb *nullb;
+10 -3
drivers/nvme/host/pci.c
··· 1042 1042 return nvme_pci_setup_data_prp(req, &iter); 1043 1043 } 1044 1044 1045 - static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) 1045 + static blk_status_t nvme_pci_setup_meta_iter(struct request *req) 1046 1046 { 1047 1047 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1048 1048 unsigned int entries = req->nr_integrity_segments; ··· 1072 1072 * descriptor provides an explicit length, so we're relying on that 1073 1073 * mechanism to catch any misunderstandings between the application and 1074 1074 * device. 1075 + * 1076 + * P2P DMA also needs to use the blk_dma_iter method, so mptr setup 1077 + * leverages this routine when that happens. 1075 1078 */ 1076 - if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) { 1079 + if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) || 1080 + (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) { 1077 1081 iod->cmd.common.metadata = cpu_to_le64(iter.addr); 1078 1082 iod->meta_total_len = iter.len; 1079 1083 iod->meta_dma = iter.addr; ··· 1118 1114 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1119 1115 struct bio_vec bv = rq_integrity_vec(req); 1120 1116 1117 + if (is_pci_p2pdma_page(bv.bv_page)) 1118 + return nvme_pci_setup_meta_iter(req); 1119 + 1121 1120 iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); 1122 1121 if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma)) 1123 1122 return BLK_STS_IOERR; ··· 1135 1128 1136 1129 if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && 1137 1130 nvme_pci_metadata_use_sgls(req)) 1138 - return nvme_pci_setup_meta_sgls(req); 1131 + return nvme_pci_setup_meta_iter(req); 1139 1132 return nvme_pci_setup_meta_mptr(req); 1140 1133 } 1141 1134
+3 -2
drivers/nvme/target/auth.c
··· 298 298 const char *hash_name; 299 299 u8 *challenge = req->sq->dhchap_c1; 300 300 struct nvme_dhchap_key *transformed_key; 301 - u8 buf[4]; 301 + u8 buf[4], sc_c = ctrl->concat ? 1 : 0; 302 302 int ret; 303 303 304 304 hash_name = nvme_auth_hmac_name(ctrl->shash_id); ··· 367 367 ret = crypto_shash_update(shash, buf, 2); 368 368 if (ret) 369 369 goto out; 370 - memset(buf, 0, 4); 370 + *buf = sc_c; 371 371 ret = crypto_shash_update(shash, buf, 1); 372 372 if (ret) 373 373 goto out; 374 374 ret = crypto_shash_update(shash, "HostHost", 8); 375 375 if (ret) 376 376 goto out; 377 + memset(buf, 0, 4); 377 378 ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); 378 379 if (ret) 379 380 goto out;
+6 -5
include/linux/blk_types.h
··· 341 341 /* write the zero filled sector many times */ 342 342 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, 343 343 /* Open a zone */ 344 - REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, 344 + REQ_OP_ZONE_OPEN = (__force blk_opf_t)11, 345 345 /* Close a zone */ 346 - REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, 346 + REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13, 347 347 /* Transition a zone to full */ 348 - REQ_OP_ZONE_FINISH = (__force blk_opf_t)13, 348 + REQ_OP_ZONE_FINISH = (__force blk_opf_t)15, 349 349 /* reset a zone write pointer */ 350 - REQ_OP_ZONE_RESET = (__force blk_opf_t)15, 350 + REQ_OP_ZONE_RESET = (__force blk_opf_t)17, 351 351 /* reset all the zone present on the device */ 352 - REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, 352 + REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19, 353 353 354 354 /* Driver private requests */ 355 355 REQ_OP_DRV_IN = (__force blk_opf_t)34, ··· 478 478 { 479 479 switch (op & REQ_OP_MASK) { 480 480 case REQ_OP_ZONE_RESET: 481 + case REQ_OP_ZONE_RESET_ALL: 481 482 case REQ_OP_ZONE_OPEN: 482 483 case REQ_OP_ZONE_CLOSE: 483 484 case REQ_OP_ZONE_FINISH: