Merge tag 'block-5.15-2021-10-01' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"A few block fixes for this release:

- Revert a BFQ commit that causes breakage for people. Unfortunately
it was auto-selected for stable as well, so now 5.14.7 suffers from
it too. Hopefully stable will pick up this revert quickly too, so
we can remove the issue on that end as well.

- Add a quirk for Apple NVMe controllers, which due to their
non-compliance broke due to the introduction of command sequences
(Keith)

- Use shifts in nbd, fixing a __divdi3 issue (Nick)"

* tag 'block-5.15-2021-10-01' of git://git.kernel.dk/linux-block:
nbd: use shifts rather than multiplies
Revert "block, bfq: honor already-setup queue merges"
nvme: add command id quirk for apple controllers

+31 -27
+3 -13
block/bfq-iosched.c
··· 2662 2662 * are likely to increase the throughput. 2663 2663 */ 2664 2664 bfqq->new_bfqq = new_bfqq; 2665 - /* 2666 - * The above assignment schedules the following redirections: 2667 - * each time some I/O for bfqq arrives, the process that 2668 - * generated that I/O is disassociated from bfqq and 2669 - * associated with new_bfqq. Here we increases new_bfqq->ref 2670 - * in advance, adding the number of processes that are 2671 - * expected to be associated with new_bfqq as they happen to 2672 - * issue I/O. 2673 - */ 2674 2665 new_bfqq->ref += process_refs; 2675 2666 return new_bfqq; 2676 2667 } ··· 2723 2732 void *io_struct, bool request, struct bfq_io_cq *bic) 2724 2733 { 2725 2734 struct bfq_queue *in_service_bfqq, *new_bfqq; 2726 - 2727 - /* if a merge has already been setup, then proceed with that first */ 2728 - if (bfqq->new_bfqq) 2729 - return bfqq->new_bfqq; 2730 2735 2731 2736 /* 2732 2737 * Check delayed stable merge for rotational or non-queueing ··· 2824 2837 */ 2825 2838 if (bfq_too_late_for_merging(bfqq)) 2826 2839 return NULL; 2840 + 2841 + if (bfqq->new_bfqq) 2842 + return bfqq->new_bfqq; 2827 2843 2828 2844 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) 2829 2845 return NULL;
+17 -12
drivers/block/nbd.c
··· 97 97 98 98 atomic_t recv_threads; 99 99 wait_queue_head_t recv_wq; 100 - loff_t blksize; 100 + unsigned int blksize_bits; 101 101 loff_t bytesize; 102 102 #if IS_ENABLED(CONFIG_DEBUG_FS) 103 103 struct dentry *dbg_dir; 104 104 #endif 105 105 }; 106 + 107 + static inline unsigned int nbd_blksize(struct nbd_config *config) 108 + { 109 + return 1u << config->blksize_bits; 110 + } 106 111 107 112 struct nbd_device { 108 113 struct blk_mq_tag_set tag_set; ··· 151 146 152 147 #define NBD_MAGIC 0x68797548 153 148 154 - #define NBD_DEF_BLKSIZE 1024 149 + #define NBD_DEF_BLKSIZE_BITS 10 155 150 156 151 static unsigned int nbds_max = 16; 157 152 static int max_part = 16; ··· 322 317 loff_t blksize) 323 318 { 324 319 if (!blksize) 325 - blksize = NBD_DEF_BLKSIZE; 320 + blksize = 1u << NBD_DEF_BLKSIZE_BITS; 326 321 if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize)) 327 322 return -EINVAL; 328 323 329 324 nbd->config->bytesize = bytesize; 330 - nbd->config->blksize = blksize; 325 + nbd->config->blksize_bits = __ffs(blksize); 331 326 332 327 if (!nbd->task_recv) 333 328 return 0; ··· 1342 1337 args->index = i; 1343 1338 queue_work(nbd->recv_workq, &args->work); 1344 1339 } 1345 - return nbd_set_size(nbd, config->bytesize, config->blksize); 1340 + return nbd_set_size(nbd, config->bytesize, nbd_blksize(config)); 1346 1341 } 1347 1342 1348 1343 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) ··· 1411 1406 case NBD_SET_BLKSIZE: 1412 1407 return nbd_set_size(nbd, config->bytesize, arg); 1413 1408 case NBD_SET_SIZE: 1414 - return nbd_set_size(nbd, arg, config->blksize); 1409 + return nbd_set_size(nbd, arg, nbd_blksize(config)); 1415 1410 case NBD_SET_SIZE_BLOCKS: 1416 - if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize)) 1411 + if (check_shl_overflow(arg, config->blksize_bits, &bytesize)) 1417 1412 return -EINVAL; 1418 - return nbd_set_size(nbd, bytesize, config->blksize); 1413 + return nbd_set_size(nbd, bytesize, nbd_blksize(config)); 1419 1414 case NBD_SET_TIMEOUT: 1420 1415 nbd_set_cmd_timeout(nbd, arg); 1421 1416 return 0; ··· 1481 1476 atomic_set(&config->recv_threads, 0); 1482 1477 init_waitqueue_head(&config->recv_wq); 1483 1478 init_waitqueue_head(&config->conn_wait); 1484 - config->blksize = NBD_DEF_BLKSIZE; 1479 + config->blksize_bits = NBD_DEF_BLKSIZE_BITS; 1485 1480 atomic_set(&config->live_connections, 0); 1486 1481 try_module_get(THIS_MODULE); 1487 1482 return config; ··· 1609 1604 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops); 1610 1605 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); 1611 1606 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 1612 - debugfs_create_u64("blocksize", 0444, dir, &config->blksize); 1607 + debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits); 1613 1608 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops); 1614 1609 1615 1610 return 0; ··· 1831 1826 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) 1832 1827 { 1833 1828 struct nbd_config *config = nbd->config; 1834 - u64 bsize = config->blksize; 1829 + u64 bsize = nbd_blksize(config); 1835 1830 u64 bytes = config->bytesize; 1836 1831 1837 1832 if (info->attrs[NBD_ATTR_SIZE_BYTES]) ··· 1840 1835 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) 1841 1836 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); 1842 1837 1843 - if (bytes != config->bytesize || bsize != config->blksize) 1838 + if (bytes != config->bytesize || bsize != nbd_blksize(config)) 1844 1839 return nbd_set_size(nbd, bytes, bsize); 1845 1840 return 0; 1846 1841 }
+3 -1
drivers/nvme/host/core.c
··· 978 978 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 979 979 { 980 980 struct nvme_command *cmd = nvme_req(req)->cmd; 981 + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 981 982 blk_status_t ret = BLK_STS_OK; 982 983 983 984 if (!(req->rq_flags & RQF_DONTPREP)) { ··· 1027 1026 return BLK_STS_IOERR; 1028 1027 } 1029 1028 1030 - nvme_req(req)->genctr++; 1029 + if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN)) 1030 + nvme_req(req)->genctr++; 1031 1031 cmd->common.command_id = nvme_cid(req); 1032 1032 trace_nvme_setup_cmd(req, cmd); 1033 1033 return ret;
+6
drivers/nvme/host/nvme.h
··· 138 138 * 48 bits. 139 139 */ 140 140 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), 141 + 142 + /* 143 + * The controller requires the command_id value be be limited, so skip 144 + * encoding the generation sequence number. 145 + */ 146 + NVME_QUIRK_SKIP_CID_GEN = (1 << 17), 141 147 }; 142 148 143 149 /*
+2 -1
drivers/nvme/host/pci.c
··· 3369 3369 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 3370 3370 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3371 3371 NVME_QUIRK_128_BYTES_SQES | 3372 - NVME_QUIRK_SHARED_TAGS }, 3372 + NVME_QUIRK_SHARED_TAGS | 3373 + NVME_QUIRK_SKIP_CID_GEN }, 3373 3374 3374 3375 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3375 3376 { 0, }