Merge tag 'block-5.15-2021-10-01' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"A few block fixes for this release:

- Revert a BFQ commit that causes breakage for people. Unfortunately
it was auto-selected for stable as well, so now 5.14.7 suffers from
it too. Hopefully stable will pick up this revert quickly too, so
we can remove the issue on that end as well.

- Add a quirk for Apple NVMe controllers, which due to their
non-compliance broke due to the introduction of command sequences
(Keith)

- Use shifts in nbd, fixing a __divdi3 issue (Nick)"

* tag 'block-5.15-2021-10-01' of git://git.kernel.dk/linux-block:
nbd: use shifts rather than multiplies
Revert "block, bfq: honor already-setup queue merges"
nvme: add command id quirk for apple controllers

+31 -27
+3 -13
block/bfq-iosched.c
··· 2662 * are likely to increase the throughput. 2663 */ 2664 bfqq->new_bfqq = new_bfqq; 2665 - /* 2666 - * The above assignment schedules the following redirections: 2667 - * each time some I/O for bfqq arrives, the process that 2668 - * generated that I/O is disassociated from bfqq and 2669 - * associated with new_bfqq. Here we increases new_bfqq->ref 2670 - * in advance, adding the number of processes that are 2671 - * expected to be associated with new_bfqq as they happen to 2672 - * issue I/O. 2673 - */ 2674 new_bfqq->ref += process_refs; 2675 return new_bfqq; 2676 } ··· 2723 void *io_struct, bool request, struct bfq_io_cq *bic) 2724 { 2725 struct bfq_queue *in_service_bfqq, *new_bfqq; 2726 - 2727 - /* if a merge has already been setup, then proceed with that first */ 2728 - if (bfqq->new_bfqq) 2729 - return bfqq->new_bfqq; 2730 2731 /* 2732 * Check delayed stable merge for rotational or non-queueing ··· 2824 */ 2825 if (bfq_too_late_for_merging(bfqq)) 2826 return NULL; 2827 2828 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) 2829 return NULL;
··· 2662 * are likely to increase the throughput. 2663 */ 2664 bfqq->new_bfqq = new_bfqq; 2665 new_bfqq->ref += process_refs; 2666 return new_bfqq; 2667 } ··· 2732 void *io_struct, bool request, struct bfq_io_cq *bic) 2733 { 2734 struct bfq_queue *in_service_bfqq, *new_bfqq; 2735 2736 /* 2737 * Check delayed stable merge for rotational or non-queueing ··· 2837 */ 2838 if (bfq_too_late_for_merging(bfqq)) 2839 return NULL; 2840 + 2841 + if (bfqq->new_bfqq) 2842 + return bfqq->new_bfqq; 2843 2844 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) 2845 return NULL;
+17 -12
drivers/block/nbd.c
··· 97 98 atomic_t recv_threads; 99 wait_queue_head_t recv_wq; 100 - loff_t blksize; 101 loff_t bytesize; 102 #if IS_ENABLED(CONFIG_DEBUG_FS) 103 struct dentry *dbg_dir; 104 #endif 105 }; 106 107 struct nbd_device { 108 struct blk_mq_tag_set tag_set; ··· 151 152 #define NBD_MAGIC 0x68797548 153 154 - #define NBD_DEF_BLKSIZE 1024 155 156 static unsigned int nbds_max = 16; 157 static int max_part = 16; ··· 322 loff_t blksize) 323 { 324 if (!blksize) 325 - blksize = NBD_DEF_BLKSIZE; 326 if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize)) 327 return -EINVAL; 328 329 nbd->config->bytesize = bytesize; 330 - nbd->config->blksize = blksize; 331 332 if (!nbd->task_recv) 333 return 0; ··· 1342 args->index = i; 1343 queue_work(nbd->recv_workq, &args->work); 1344 } 1345 - return nbd_set_size(nbd, config->bytesize, config->blksize); 1346 } 1347 1348 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) ··· 1411 case NBD_SET_BLKSIZE: 1412 return nbd_set_size(nbd, config->bytesize, arg); 1413 case NBD_SET_SIZE: 1414 - return nbd_set_size(nbd, arg, config->blksize); 1415 case NBD_SET_SIZE_BLOCKS: 1416 - if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize)) 1417 return -EINVAL; 1418 - return nbd_set_size(nbd, bytesize, config->blksize); 1419 case NBD_SET_TIMEOUT: 1420 nbd_set_cmd_timeout(nbd, arg); 1421 return 0; ··· 1481 atomic_set(&config->recv_threads, 0); 1482 init_waitqueue_head(&config->recv_wq); 1483 init_waitqueue_head(&config->conn_wait); 1484 - config->blksize = NBD_DEF_BLKSIZE; 1485 atomic_set(&config->live_connections, 0); 1486 try_module_get(THIS_MODULE); 1487 return config; ··· 1609 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops); 1610 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); 1611 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 1612 - debugfs_create_u64("blocksize", 0444, dir, &config->blksize); 1613 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops); 1614 1615 return 0; ··· 1831 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) 1832 { 1833 struct nbd_config *config = nbd->config; 1834 - u64 bsize = config->blksize; 1835 u64 bytes = config->bytesize; 1836 1837 if (info->attrs[NBD_ATTR_SIZE_BYTES]) ··· 1840 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) 1841 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); 1842 1843 - if (bytes != config->bytesize || bsize != config->blksize) 1844 return nbd_set_size(nbd, bytes, bsize); 1845 return 0; 1846 }
··· 97 98 atomic_t recv_threads; 99 wait_queue_head_t recv_wq; 100 + unsigned int blksize_bits; 101 loff_t bytesize; 102 #if IS_ENABLED(CONFIG_DEBUG_FS) 103 struct dentry *dbg_dir; 104 #endif 105 }; 106 + 107 + static inline unsigned int nbd_blksize(struct nbd_config *config) 108 + { 109 + return 1u << config->blksize_bits; 110 + } 111 112 struct nbd_device { 113 struct blk_mq_tag_set tag_set; ··· 146 147 #define NBD_MAGIC 0x68797548 148 149 + #define NBD_DEF_BLKSIZE_BITS 10 150 151 static unsigned int nbds_max = 16; 152 static int max_part = 16; ··· 317 loff_t blksize) 318 { 319 if (!blksize) 320 + blksize = 1u << NBD_DEF_BLKSIZE_BITS; 321 if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize)) 322 return -EINVAL; 323 324 nbd->config->bytesize = bytesize; 325 + nbd->config->blksize_bits = __ffs(blksize); 326 327 if (!nbd->task_recv) 328 return 0; ··· 1337 args->index = i; 1338 queue_work(nbd->recv_workq, &args->work); 1339 } 1340 + return nbd_set_size(nbd, config->bytesize, nbd_blksize(config)); 1341 } 1342 1343 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) ··· 1406 case NBD_SET_BLKSIZE: 1407 return nbd_set_size(nbd, config->bytesize, arg); 1408 case NBD_SET_SIZE: 1409 + return nbd_set_size(nbd, arg, nbd_blksize(config)); 1410 case NBD_SET_SIZE_BLOCKS: 1411 + if (check_shl_overflow(arg, config->blksize_bits, &bytesize)) 1412 return -EINVAL; 1413 + return nbd_set_size(nbd, bytesize, nbd_blksize(config)); 1414 case NBD_SET_TIMEOUT: 1415 nbd_set_cmd_timeout(nbd, arg); 1416 return 0; ··· 1476 atomic_set(&config->recv_threads, 0); 1477 init_waitqueue_head(&config->recv_wq); 1478 init_waitqueue_head(&config->conn_wait); 1479 + config->blksize_bits = NBD_DEF_BLKSIZE_BITS; 1480 atomic_set(&config->live_connections, 0); 1481 try_module_get(THIS_MODULE); 1482 return config; ··· 1604 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops); 1605 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); 1606 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 1607 + debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits); 1608 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops); 1609 1610 return 0; ··· 1826 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) 1827 { 1828 struct nbd_config *config = nbd->config; 1829 + u64 bsize = nbd_blksize(config); 1830 u64 bytes = config->bytesize; 1831 1832 if (info->attrs[NBD_ATTR_SIZE_BYTES]) ··· 1835 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) 1836 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); 1837 1838 + if (bytes != config->bytesize || bsize != nbd_blksize(config)) 1839 return nbd_set_size(nbd, bytes, bsize); 1840 return 0; 1841 }
+3 -1
drivers/nvme/host/core.c
··· 978 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 979 { 980 struct nvme_command *cmd = nvme_req(req)->cmd; 981 blk_status_t ret = BLK_STS_OK; 982 983 if (!(req->rq_flags & RQF_DONTPREP)) { ··· 1027 return BLK_STS_IOERR; 1028 } 1029 1030 - nvme_req(req)->genctr++; 1031 cmd->common.command_id = nvme_cid(req); 1032 trace_nvme_setup_cmd(req, cmd); 1033 return ret;
··· 978 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 979 { 980 struct nvme_command *cmd = nvme_req(req)->cmd; 981 + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 982 blk_status_t ret = BLK_STS_OK; 983 984 if (!(req->rq_flags & RQF_DONTPREP)) { ··· 1026 return BLK_STS_IOERR; 1027 } 1028 1029 + if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN)) 1030 + nvme_req(req)->genctr++; 1031 cmd->common.command_id = nvme_cid(req); 1032 trace_nvme_setup_cmd(req, cmd); 1033 return ret;
+6
drivers/nvme/host/nvme.h
··· 138 * 48 bits. 139 */ 140 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), 141 }; 142 143 /*
··· 138 * 48 bits. 139 */ 140 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), 141 + 142 + /* 143 + * The controller requires the command_id value be be limited, so skip 144 + * encoding the generation sequence number. 145 + */ 146 + NVME_QUIRK_SKIP_CID_GEN = (1 << 17), 147 }; 148 149 /*
+2 -1
drivers/nvme/host/pci.c
··· 3369 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 3370 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3371 NVME_QUIRK_128_BYTES_SQES | 3372 - NVME_QUIRK_SHARED_TAGS }, 3373 3374 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3375 { 0, }
··· 3369 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 3370 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3371 NVME_QUIRK_128_BYTES_SQES | 3372 + NVME_QUIRK_SHARED_TAGS | 3373 + NVME_QUIRK_SKIP_CID_GEN }, 3374 3375 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3376 { 0, }