Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-5.18/drivers-2022-04-01' of git://git.kernel.dk/linux-block

Pull block driver fixes from Jens Axboe:
"Followup block driver updates and fixes for the 5.18-rc1 merge window.
In detail:

- NVMe pull request
- Fix multipath hang when disk goes live over reconnect (Anton
Eidelman)
- fix RCU hole that allowed for endless looping in multipath
round robin (Chris Leech)
- remove redundant assignment after left shift (Colin Ian King)
- add quirks for Samsung X5 SSDs (Monish Kumar R)
- fix the read-only state for zoned namespaces with unsupposed
features (Pankaj Raghav)
- use a private workqueue instead of the system workqueue in
nvmet (Sagi Grimberg)
- allow duplicate NSIDs for private namespaces (Sungup Moon)
- expose use_threaded_interrupts read-only in sysfs (Xin Hao)"

- nbd minor allocation fix (Zhang)

- drbd fixes and maintainer addition (Lars, Jakob, Christoph)

- n64cart build fix (Jackie)

- loop compat ioctl fix (Carlos)

- misc fixes (Colin, Dongli)"

* tag 'for-5.18/drivers-2022-04-01' of git://git.kernel.dk/linux-block:
drbd: remove check of list iterator against head past the loop body
drbd: remove usage of list iterator variable after loop
nbd: fix possible overflow on 'first_minor' in nbd_dev_add()
MAINTAINERS: add drbd co-maintainer
drbd: fix potential silent data corruption
loop: fix ioctl calls using compat_loop_info
nvme-multipath: fix hang when disk goes live over reconnect
nvme: fix RCU hole that allowed for endless looping in multipath round robin
nvme: allow duplicate NSIDs for private namespaces
nvmet: remove redundant assignment after left shift
nvmet: use a private workqueue instead of the system workqueue
nvme-pci: add quirks for Samsung X5 SSDs
nvme-pci: expose use_threaded_interrupts read-only in sysfs
nvme: fix the read-only state for zoned namespaces with unsupposed features
n64cart: convert bi_disk to bi_bdev->bd_disk fix build
xen/blkfront: fix comment for need_copy
xen-blkback: remove redundant assignment to variable i

+186 -92
+1
MAINTAINERS
··· 6052 6052 DRBD DRIVER 6053 6053 M: Philipp Reisner <philipp.reisner@linbit.com> 6054 6054 M: Lars Ellenberg <lars.ellenberg@linbit.com> 6055 + M: Christoph Böhmwalder <christoph.boehmwalder@linbit.com> 6055 6056 L: drbd-dev@lists.linbit.com 6056 6057 S: Supported 6057 6058 W: http://www.drbd.org
+5 -2
drivers/block/drbd/drbd_main.c
··· 171 171 unsigned int set_size) 172 172 { 173 173 struct drbd_request *r; 174 - struct drbd_request *req = NULL; 174 + struct drbd_request *req = NULL, *tmp = NULL; 175 175 int expect_epoch = 0; 176 176 int expect_size = 0; 177 177 ··· 225 225 * to catch requests being barrier-acked "unexpectedly". 226 226 * It usually should find the same req again, or some READ preceding it. */ 227 227 list_for_each_entry(req, &connection->transfer_log, tl_requests) 228 - if (req->epoch == expect_epoch) 228 + if (req->epoch == expect_epoch) { 229 + tmp = req; 229 230 break; 231 + } 232 + req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests); 230 233 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) { 231 234 if (req->epoch != expect_epoch) 232 235 break;
+29 -16
drivers/block/drbd/drbd_req.c
··· 180 180 void complete_master_bio(struct drbd_device *device, 181 181 struct bio_and_error *m) 182 182 { 183 - m->bio->bi_status = errno_to_blk_status(m->error); 183 + if (unlikely(m->error)) 184 + m->bio->bi_status = errno_to_blk_status(m->error); 184 185 bio_endio(m->bio); 185 186 dec_ap_bio(device); 186 187 } ··· 333 332 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) 334 333 { 335 334 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; 335 + struct drbd_request *iter = req; 336 336 if (!connection) 337 337 return; 338 338 if (connection->req_next != req) 339 339 return; 340 - list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { 341 - const unsigned s = req->rq_state; 342 - if (s & RQ_NET_QUEUED) 340 + 341 + req = NULL; 342 + list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) { 343 + const unsigned int s = iter->rq_state; 344 + 345 + if (s & RQ_NET_QUEUED) { 346 + req = iter; 343 347 break; 348 + } 344 349 } 345 - if (&req->tl_requests == &connection->transfer_log) 346 - req = NULL; 347 350 connection->req_next = req; 348 351 } 349 352 ··· 363 358 static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) 364 359 { 365 360 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; 361 + struct drbd_request *iter = req; 366 362 if (!connection) 367 363 return; 368 364 if (connection->req_ack_pending != req) 369 365 return; 370 - list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { 371 - const unsigned s = req->rq_state; 372 - if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) 366 + 367 + req = NULL; 368 + list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) { 369 + const unsigned int s = iter->rq_state; 370 + 371 + if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) { 372 + req = iter; 373 373 break; 374 + } 374 375 } 375 - if (&req->tl_requests == &connection->transfer_log) 376 - req = NULL; 377 376 connection->req_ack_pending = req; 378 377 } 379 378 ··· 393 384 static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) 394 385 { 395 386 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; 387 + struct drbd_request *iter = req; 396 388 if (!connection) 397 389 return; 398 390 if (connection->req_not_net_done != req) 399 391 return; 400 - list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { 401 - const unsigned s = req->rq_state; 402 - if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) 392 + 393 + req = NULL; 394 + list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) { 395 + const unsigned int s = iter->rq_state; 396 + 397 + if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) { 398 + req = iter; 403 399 break; 400 + } 404 401 } 405 - if (&req->tl_requests == &connection->transfer_log) 406 - req = NULL; 407 402 connection->req_not_net_done = req; 408 403 } 409 404
+1
drivers/block/loop.c
··· 1591 1591 compat_ulong_t lo_inode; /* ioctl r/o */ 1592 1592 compat_dev_t lo_rdevice; /* ioctl r/o */ 1593 1593 compat_int_t lo_offset; 1594 + compat_int_t lo_encrypt_type; /* obsolete, ignored */ 1594 1595 compat_int_t lo_encrypt_key_size; /* ioctl w/o */ 1595 1596 compat_int_t lo_flags; /* ioctl r/o */ 1596 1597 char lo_name[LO_NAME_SIZE];
+1 -1
drivers/block/n64cart.c
··· 88 88 { 89 89 struct bio_vec bvec; 90 90 struct bvec_iter iter; 91 - struct device *dev = bio->bi_disk->private_data; 91 + struct device *dev = bio->bi_bdev->bd_disk->private_data; 92 92 u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT; 93 93 94 94 bio_for_each_segment(bvec, bio, iter) {
+12 -12
drivers/block/nbd.c
··· 1800 1800 refcount_set(&nbd->refs, 0); 1801 1801 INIT_LIST_HEAD(&nbd->list); 1802 1802 disk->major = NBD_MAJOR; 1803 - 1804 - /* Too big first_minor can cause duplicate creation of 1805 - * sysfs files/links, since index << part_shift might overflow, or 1806 - * MKDEV() expect that the max bits of first_minor is 20. 1807 - */ 1808 - disk->first_minor = index << part_shift; 1809 - if (disk->first_minor < index || disk->first_minor > MINORMASK) { 1810 - err = -EINVAL; 1811 - goto out_free_work; 1812 - } 1813 - 1814 1803 disk->minors = 1 << part_shift; 1815 1804 disk->fops = &nbd_fops; 1816 1805 disk->private_data = nbd; ··· 1904 1915 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 1905 1916 return -EPERM; 1906 1917 1907 - if (info->attrs[NBD_ATTR_INDEX]) 1918 + if (info->attrs[NBD_ATTR_INDEX]) { 1908 1919 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 1920 + 1921 + /* 1922 + * Too big first_minor can cause duplicate creation of 1923 + * sysfs files/links, since index << part_shift might overflow, or 1924 + * MKDEV() expect that the max bits of first_minor is 20. 1925 + */ 1926 + if (index < 0 || index > MINORMASK >> part_shift) { 1927 + printk(KERN_ERR "nbd: illegal input index %d\n", index); 1928 + return -EINVAL; 1929 + } 1930 + } 1909 1931 if (!info->attrs[NBD_ATTR_SOCKETS]) { 1910 1932 printk(KERN_ERR "nbd: must specify at least one socket\n"); 1911 1933 return -EINVAL;
+1 -1
drivers/block/xen-blkback/blkback.c
··· 931 931 if (rc) 932 932 goto unmap; 933 933 934 - for (n = 0, i = 0; n < nseg; n++) { 934 + for (n = 0; n < nseg; n++) { 935 935 uint8_t first_sect, last_sect; 936 936 937 937 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
+1 -1
drivers/block/xen-blkfront.c
··· 576 576 struct blkif_request *ring_req; 577 577 grant_ref_t gref_head; 578 578 unsigned int id; 579 - /* Only used when persistent grant is used and it's a read request */ 579 + /* Only used when persistent grant is used and it's a write request */ 580 580 bool need_copy; 581 581 unsigned int bvec_off; 582 582 char *bvec_data;
+26 -12
drivers/nvme/host/core.c
··· 1830 1830 nvme_config_discard(disk, ns); 1831 1831 blk_queue_max_write_zeroes_sectors(disk->queue, 1832 1832 ns->ctrl->max_zeroes_sectors); 1833 - 1834 - set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || 1835 - test_bit(NVME_NS_FORCE_RO, &ns->flags)); 1836 1833 } 1837 1834 1838 1835 static inline bool nvme_first_scan(struct gendisk *disk) ··· 1888 1891 goto out_unfreeze; 1889 1892 } 1890 1893 1894 + set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) || 1895 + test_bit(NVME_NS_FORCE_RO, &ns->flags)); 1891 1896 set_bit(NVME_NS_READY, &ns->flags); 1892 1897 blk_mq_unfreeze_queue(ns->disk->queue); 1893 1898 ··· 1902 1903 if (nvme_ns_head_multipath(ns->head)) { 1903 1904 blk_mq_freeze_queue(ns->head->disk->queue); 1904 1905 nvme_update_disk_info(ns->head->disk, ns, id); 1906 + set_disk_ro(ns->head->disk, 1907 + (id->nsattr & NVME_NS_ATTR_RO) || 1908 + test_bit(NVME_NS_FORCE_RO, &ns->flags)); 1905 1909 nvme_mpath_revalidate_paths(ns); 1906 1910 blk_stack_limits(&ns->head->disk->queue->limits, 1907 1911 &ns->queue->limits, 0); ··· 3591 3589 NULL, 3592 3590 }; 3593 3591 3594 - static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, 3592 + static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, 3595 3593 unsigned nsid) 3596 3594 { 3597 3595 struct nvme_ns_head *h; 3598 3596 3599 - lockdep_assert_held(&subsys->lock); 3597 + lockdep_assert_held(&ctrl->subsys->lock); 3600 3598 3601 - list_for_each_entry(h, &subsys->nsheads, entry) { 3602 - if (h->ns_id != nsid) 3599 + list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { 3600 + /* 3601 + * Private namespaces can share NSIDs under some conditions. 3602 + * In that case we can't use the same ns_head for namespaces 3603 + * with the same NSID. 3604 + */ 3605 + if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) 3603 3606 continue; 3604 3607 if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) 3605 3608 return h; ··· 3798 3791 } 3799 3792 3800 3793 mutex_lock(&ctrl->subsys->lock); 3801 - head = nvme_find_ns_head(ctrl->subsys, nsid); 3794 + head = nvme_find_ns_head(ctrl, nsid); 3802 3795 if (!head) { 3803 3796 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids); 3804 3797 if (ret) { ··· 3995 3988 set_capacity(ns->disk, 0); 3996 3989 nvme_fault_inject_fini(&ns->fault_inject); 3997 3990 3991 + /* 3992 + * Ensure that !NVME_NS_READY is seen by other threads to prevent 3993 + * this ns going back into current_path. 3994 + */ 3995 + synchronize_srcu(&ns->head->srcu); 3996 + 3997 + /* wait for concurrent submissions */ 3998 + if (nvme_mpath_clear_current_path(ns)) 3999 + synchronize_srcu(&ns->head->srcu); 4000 + 3998 4001 mutex_lock(&ns->ctrl->subsys->lock); 3999 4002 list_del_rcu(&ns->siblings); 4000 4003 if (list_empty(&ns->head->list)) { ··· 4015 3998 4016 3999 /* guarantee not available in head->list */ 4017 4000 synchronize_rcu(); 4018 - 4019 - /* wait for concurrent submissions */ 4020 - if (nvme_mpath_clear_current_path(ns)) 4021 - synchronize_srcu(&ns->head->srcu); 4022 4001 4023 4002 if (!nvme_ns_head_multipath(ns->head)) 4024 4003 nvme_cdev_del(&ns->cdev, &ns->cdev_device); ··· 4493 4480 if (ctrl->queue_count > 1) { 4494 4481 nvme_queue_scan(ctrl); 4495 4482 nvme_start_queues(ctrl); 4483 + nvme_mpath_update(ctrl); 4496 4484 } 4497 4485 4498 4486 nvme_change_uevent(ctrl, "NVME_EVENT=connected");
+27 -5
drivers/nvme/host/multipath.c
··· 482 482 483 483 /* 484 484 * Add a multipath node if the subsystems supports multiple controllers. 485 - * We also do this for private namespaces as the namespace sharing data could 486 - * change after a rescan. 485 + * We also do this for private namespaces as the namespace sharing flag 486 + * could change after a rescan. 487 487 */ 488 - if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath) 488 + if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 489 + !nvme_is_unique_nsid(ctrl, head) || !multipath) 489 490 return 0; 490 491 491 492 head->disk = blk_alloc_disk(ctrl->numa_node); ··· 613 612 ns->ana_grpid = le32_to_cpu(desc->grpid); 614 613 ns->ana_state = desc->state; 615 614 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); 616 - 617 - if (nvme_state_is_live(ns->ana_state)) 615 + /* 616 + * nvme_mpath_set_live() will trigger I/O to the multipath path device 617 + * and in turn to this path device. However we cannot accept this I/O 618 + * if the controller is not live. This may deadlock if called from 619 + * nvme_mpath_init_identify() and the ctrl will never complete 620 + * initialization, preventing I/O from completing. For this case we 621 + * will reprocess the ANA log page in nvme_mpath_update() once the 622 + * controller is ready. 623 + */ 624 + if (nvme_state_is_live(ns->ana_state) && 625 + ns->ctrl->state == NVME_CTRL_LIVE) 618 626 nvme_mpath_set_live(ns); 619 627 } 620 628 ··· 708 698 return; 709 699 710 700 nvme_read_ana_log(ctrl); 701 + } 702 + 703 + void nvme_mpath_update(struct nvme_ctrl *ctrl) 704 + { 705 + u32 nr_change_groups = 0; 706 + 707 + if (!ctrl->ana_log_buf) 708 + return; 709 + 710 + mutex_lock(&ctrl->ana_lock); 711 + nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state); 712 + mutex_unlock(&ctrl->ana_lock); 711 713 } 712 714 713 715 static void nvme_anatt_timeout(struct timer_list *t)
+23
drivers/nvme/host/nvme.h
··· 723 723 return queue_live; 724 724 return __nvme_check_ready(ctrl, rq, queue_live); 725 725 } 726 + 727 + /* 728 + * NSID shall be unique for all shared namespaces, or if at least one of the 729 + * following conditions is met: 730 + * 1. Namespace Management is supported by the controller 731 + * 2. ANA is supported by the controller 732 + * 3. NVM Set are supported by the controller 733 + * 734 + * In other case, private namespace are not required to report a unique NSID. 735 + */ 736 + static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, 737 + struct nvme_ns_head *head) 738 + { 739 + return head->shared || 740 + (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) || 741 + (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) || 742 + (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); 743 + } 744 + 726 745 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 727 746 void *buf, unsigned bufflen); 728 747 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ··· 801 782 void nvme_mpath_remove_disk(struct nvme_ns_head *head); 802 783 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 803 784 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); 785 + void nvme_mpath_update(struct nvme_ctrl *ctrl); 804 786 void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 805 787 void nvme_mpath_stop(struct nvme_ctrl *ctrl); 806 788 bool nvme_mpath_clear_current_path(struct nvme_ns *ns); ··· 872 852 dev_warn(ctrl->device, 873 853 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); 874 854 return 0; 855 + } 856 + static inline void nvme_mpath_update(struct nvme_ctrl *ctrl) 857 + { 875 858 } 876 859 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 877 860 {
+5 -2
drivers/nvme/host/pci.c
··· 45 45 #define NVME_MAX_SEGS 127 46 46 47 47 static int use_threaded_interrupts; 48 - module_param(use_threaded_interrupts, int, 0); 48 + module_param(use_threaded_interrupts, int, 0444); 49 49 50 50 static bool use_cmb_sqes = true; 51 51 module_param(use_cmb_sqes, bool, 0444); ··· 3467 3467 NVME_QUIRK_128_BYTES_SQES | 3468 3468 NVME_QUIRK_SHARED_TAGS | 3469 3469 NVME_QUIRK_SKIP_CID_GEN }, 3470 - 3470 + { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */ 3471 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY| 3472 + NVME_QUIRK_NO_DEEPEST_PS | 3473 + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3471 3474 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3472 3475 { 0, } 3473 3476 };
+1 -1
drivers/nvme/target/admin-cmd.c
··· 988 988 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; 989 989 mutex_unlock(&ctrl->lock); 990 990 991 - schedule_work(&ctrl->async_event_work); 991 + queue_work(nvmet_wq, &ctrl->async_event_work); 992 992 } 993 993 994 994 void nvmet_execute_keep_alive(struct nvmet_req *req)
+1 -1
drivers/nvme/target/configfs.c
··· 1555 1555 struct nvmet_port *port = to_nvmet_port(item); 1556 1556 1557 1557 /* Let inflight controllers teardown complete */ 1558 - flush_scheduled_work(); 1558 + flush_workqueue(nvmet_wq); 1559 1559 list_del(&port->global_entry); 1560 1560 1561 1561 kfree(port->ana_state);
+19 -7
drivers/nvme/target/core.c
··· 20 20 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; 21 21 static DEFINE_IDA(cntlid_ida); 22 22 23 + struct workqueue_struct *nvmet_wq; 24 + EXPORT_SYMBOL_GPL(nvmet_wq); 25 + 23 26 /* 24 27 * This read/write semaphore is used to synchronize access to configuration 25 28 * information on a target system that will result in discovery log page ··· 208 205 list_add_tail(&aen->entry, &ctrl->async_events); 209 206 mutex_unlock(&ctrl->lock); 210 207 211 - schedule_work(&ctrl->async_event_work); 208 + queue_work(nvmet_wq, &ctrl->async_event_work); 212 209 } 213 210 214 211 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) ··· 388 385 if (reset_tbkas) { 389 386 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", 390 387 ctrl->cntlid); 391 - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 388 + queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); 392 389 return; 393 390 } 394 391 ··· 406 403 pr_debug("ctrl %d start keep-alive timer for %d secs\n", 407 404 ctrl->cntlid, ctrl->kato); 408 405 409 - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 406 + queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); 410 407 } 411 408 412 409 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) ··· 1123 1120 1124 1121 static inline bool nvmet_css_supported(u8 cc_css) 1125 1122 { 1126 - switch (cc_css <<= NVME_CC_CSS_SHIFT) { 1123 + switch (cc_css << NVME_CC_CSS_SHIFT) { 1127 1124 case NVME_CC_CSS_NVM: 1128 1125 case NVME_CC_CSS_CSI: 1129 1126 return true; ··· 1481 1478 mutex_lock(&ctrl->lock); 1482 1479 if (!(ctrl->csts & NVME_CSTS_CFS)) { 1483 1480 ctrl->csts |= NVME_CSTS_CFS; 1484 - schedule_work(&ctrl->fatal_err_work); 1481 + queue_work(nvmet_wq, &ctrl->fatal_err_work); 1485 1482 } 1486 1483 mutex_unlock(&ctrl->lock); 1487 1484 } ··· 1622 1619 goto out_free_zbd_work_queue; 1623 1620 } 1624 1621 1622 + nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0); 1623 + if (!nvmet_wq) { 1624 + error = -ENOMEM; 1625 + goto out_free_buffered_work_queue; 1626 + } 1627 + 1625 1628 error = nvmet_init_discovery(); 1626 1629 if (error) 1627 - goto out_free_work_queue; 1630 + goto out_free_nvmet_work_queue; 1628 1631 1629 1632 error = nvmet_init_configfs(); 1630 1633 if (error) ··· 1639 1630 1640 1631 out_exit_discovery: 1641 1632 nvmet_exit_discovery(); 1642 - out_free_work_queue: 1633 + out_free_nvmet_work_queue: 1634 + destroy_workqueue(nvmet_wq); 1635 + out_free_buffered_work_queue: 1643 1636 destroy_workqueue(buffered_io_wq); 1644 1637 out_free_zbd_work_queue: 1645 1638 destroy_workqueue(zbd_wq); ··· 1653 1642 nvmet_exit_configfs(); 1654 1643 nvmet_exit_discovery(); 1655 1644 ida_destroy(&cntlid_ida); 1645 + destroy_workqueue(nvmet_wq); 1656 1646 destroy_workqueue(buffered_io_wq); 1657 1647 destroy_workqueue(zbd_wq); 1658 1648
+4 -4
drivers/nvme/target/fc.c
··· 1491 1491 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1492 1492 if (!nvmet_fc_tgt_a_get(assoc)) 1493 1493 continue; 1494 - if (!schedule_work(&assoc->del_work)) 1494 + if (!queue_work(nvmet_wq, &assoc->del_work)) 1495 1495 /* already deleting - release local reference */ 1496 1496 nvmet_fc_tgt_a_put(assoc); 1497 1497 } ··· 1546 1546 continue; 1547 1547 assoc->hostport->invalid = 1; 1548 1548 noassoc = false; 1549 - if (!schedule_work(&assoc->del_work)) 1549 + if (!queue_work(nvmet_wq, &assoc->del_work)) 1550 1550 /* already deleting - release local reference */ 1551 1551 nvmet_fc_tgt_a_put(assoc); 1552 1552 } ··· 1592 1592 nvmet_fc_tgtport_put(tgtport); 1593 1593 1594 1594 if (found_ctrl) { 1595 - if (!schedule_work(&assoc->del_work)) 1595 + if (!queue_work(nvmet_wq, &assoc->del_work)) 1596 1596 /* already deleting - release local reference */ 1597 1597 nvmet_fc_tgt_a_put(assoc); 1598 1598 return; ··· 2060 2060 iod->rqstdatalen = lsreqbuf_len; 2061 2061 iod->hosthandle = hosthandle; 2062 2062 2063 - schedule_work(&iod->work); 2063 + queue_work(nvmet_wq, &iod->work); 2064 2064 2065 2065 return 0; 2066 2066 }
+8 -8
drivers/nvme/target/fcloop.c
··· 360 360 spin_lock(&rport->lock); 361 361 list_add_tail(&rport->ls_list, &tls_req->ls_list); 362 362 spin_unlock(&rport->lock); 363 - schedule_work(&rport->ls_work); 363 + queue_work(nvmet_wq, &rport->ls_work); 364 364 return ret; 365 365 } 366 366 ··· 393 393 spin_lock(&rport->lock); 394 394 list_add_tail(&rport->ls_list, &tls_req->ls_list); 395 395 spin_unlock(&rport->lock); 396 - schedule_work(&rport->ls_work); 396 + queue_work(nvmet_wq, &rport->ls_work); 397 397 } 398 398 399 399 return 0; ··· 448 448 spin_lock(&tport->lock); 449 449 list_add_tail(&tport->ls_list, &tls_req->ls_list); 450 450 spin_unlock(&tport->lock); 451 - schedule_work(&tport->ls_work); 451 + queue_work(nvmet_wq, &tport->ls_work); 452 452 return ret; 453 453 } 454 454 ··· 480 480 spin_lock(&tport->lock); 481 481 list_add_tail(&tport->ls_list, &tls_req->ls_list); 482 482 spin_unlock(&tport->lock); 483 - schedule_work(&tport->ls_work); 483 + queue_work(nvmet_wq, &tport->ls_work); 484 484 } 485 485 486 486 return 0; ··· 520 520 tgt_rscn->tport = tgtport->private; 521 521 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); 522 522 523 - schedule_work(&tgt_rscn->work); 523 + queue_work(nvmet_wq, &tgt_rscn->work); 524 524 } 525 525 526 526 static void ··· 739 739 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); 740 740 kref_init(&tfcp_req->ref); 741 741 742 - schedule_work(&tfcp_req->fcp_rcv_work); 742 + queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work); 743 743 744 744 return 0; 745 745 } ··· 921 921 { 922 922 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 923 923 924 - schedule_work(&tfcp_req->tio_done_work); 924 + queue_work(nvmet_wq, &tfcp_req->tio_done_work); 925 925 } 926 926 927 927 static void ··· 976 976 977 977 if (abortio) 978 978 /* leave the reference while the work item is scheduled */ 979 - WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work)); 979 + WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work)); 980 980 else { 981 981 /* 982 982 * as the io has already had the done callback made,
+3 -3
drivers/nvme/target/io-cmd-file.c
··· 283 283 if (!nvmet_check_transfer_len(req, 0)) 284 284 return; 285 285 INIT_WORK(&req->f.work, nvmet_file_flush_work); 286 - schedule_work(&req->f.work); 286 + queue_work(nvmet_wq, &req->f.work); 287 287 } 288 288 289 289 static void nvmet_file_execute_discard(struct nvmet_req *req) ··· 343 343 if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) 344 344 return; 345 345 INIT_WORK(&req->f.work, nvmet_file_dsm_work); 346 - schedule_work(&req->f.work); 346 + queue_work(nvmet_wq, &req->f.work); 347 347 } 348 348 349 349 static void nvmet_file_write_zeroes_work(struct work_struct *w) ··· 373 373 if (!nvmet_check_transfer_len(req, 0)) 374 374 return; 375 375 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); 376 - schedule_work(&req->f.work); 376 + queue_work(nvmet_wq, &req->f.work); 377 377 } 378 378 379 379 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
+2 -2
drivers/nvme/target/loop.c
··· 166 166 iod->req.transfer_len = blk_rq_payload_bytes(req); 167 167 } 168 168 169 - schedule_work(&iod->work); 169 + queue_work(nvmet_wq, &iod->work); 170 170 return BLK_STS_OK; 171 171 } 172 172 ··· 187 187 return; 188 188 } 189 189 190 - schedule_work(&iod->work); 190 + queue_work(nvmet_wq, &iod->work); 191 191 } 192 192 193 193 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
+1
drivers/nvme/target/nvmet.h
··· 366 366 367 367 extern struct workqueue_struct *buffered_io_wq; 368 368 extern struct workqueue_struct *zbd_wq; 369 + extern struct workqueue_struct *nvmet_wq; 369 370 370 371 static inline void nvmet_set_result(struct nvmet_req *req, u32 result) 371 372 {
+1 -1
drivers/nvme/target/passthru.c
··· 283 283 if (req->p.use_workqueue || effects) { 284 284 INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); 285 285 req->p.rq = rq; 286 - schedule_work(&req->p.work); 286 + queue_work(nvmet_wq, &req->p.work); 287 287 } else { 288 288 rq->end_io_data = req; 289 289 blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
+6 -6
drivers/nvme/target/rdma.c
··· 1584 1584 1585 1585 if (queue->host_qid == 0) { 1586 1586 /* Let inflight controller teardown complete */ 1587 - flush_scheduled_work(); 1587 + flush_workqueue(nvmet_wq); 1588 1588 } 1589 1589 1590 1590 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); ··· 1669 1669 1670 1670 if (disconnect) { 1671 1671 rdma_disconnect(queue->cm_id); 1672 - schedule_work(&queue->release_work); 1672 + queue_work(nvmet_wq, &queue->release_work); 1673 1673 } 1674 1674 } 1675 1675 ··· 1699 1699 mutex_unlock(&nvmet_rdma_queue_mutex); 1700 1700 1701 1701 pr_err("failed to connect queue %d\n", queue->idx); 1702 - schedule_work(&queue->release_work); 1702 + queue_work(nvmet_wq, &queue->release_work); 1703 1703 } 1704 1704 1705 1705 /** ··· 1773 1773 if (!queue) { 1774 1774 struct nvmet_rdma_port *port = cm_id->context; 1775 1775 1776 - schedule_delayed_work(&port->repair_work, 0); 1776 + queue_delayed_work(nvmet_wq, &port->repair_work, 0); 1777 1777 break; 1778 1778 } 1779 1779 fallthrough; ··· 1903 1903 nvmet_rdma_disable_port(port); 1904 1904 ret = nvmet_rdma_enable_port(port); 1905 1905 if (ret) 1906 - schedule_delayed_work(&port->repair_work, 5 * HZ); 1906 + queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ); 1907 1907 } 1908 1908 1909 1909 static int nvmet_rdma_add_port(struct nvmet_port *nport) ··· 2053 2053 } 2054 2054 mutex_unlock(&nvmet_rdma_queue_mutex); 2055 2055 2056 - flush_scheduled_work(); 2056 + flush_workqueue(nvmet_wq); 2057 2057 } 2058 2058 2059 2059 static struct ib_client nvmet_rdma_ib_client = {
+5 -5
drivers/nvme/target/tcp.c
··· 1269 1269 spin_lock(&queue->state_lock); 1270 1270 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { 1271 1271 queue->state = NVMET_TCP_Q_DISCONNECTING; 1272 - schedule_work(&queue->release_work); 1272 + queue_work(nvmet_wq, &queue->release_work); 1273 1273 } 1274 1274 spin_unlock(&queue->state_lock); 1275 1275 } ··· 1684 1684 goto out; 1685 1685 1686 1686 if (sk->sk_state == TCP_LISTEN) 1687 - schedule_work(&port->accept_work); 1687 + queue_work(nvmet_wq, &port->accept_work); 1688 1688 out: 1689 1689 read_unlock_bh(&sk->sk_callback_lock); 1690 1690 } ··· 1815 1815 1816 1816 if (sq->qid == 0) { 1817 1817 /* Let inflight controller teardown complete */ 1818 - flush_scheduled_work(); 1818 + flush_workqueue(nvmet_wq); 1819 1819 } 1820 1820 1821 1821 queue->nr_cmds = sq->size * 2; ··· 1876 1876 1877 1877 nvmet_unregister_transport(&nvmet_tcp_ops); 1878 1878 1879 - flush_scheduled_work(); 1879 + flush_workqueue(nvmet_wq); 1880 1880 mutex_lock(&nvmet_tcp_queue_mutex); 1881 1881 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1882 1882 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1883 1883 mutex_unlock(&nvmet_tcp_queue_mutex); 1884 - flush_scheduled_work(); 1884 + flush_workqueue(nvmet_wq); 1885 1885 1886 1886 destroy_workqueue(nvmet_tcp_wq); 1887 1887 }
+1
include/linux/nvme.h
··· 346 346 NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, 347 347 NVME_CTRL_VWC_PRESENT = 1 << 0, 348 348 NVME_CTRL_OACS_SEC_SUPP = 1 << 0, 349 + NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3, 349 350 NVME_CTRL_OACS_DIRECTIVES = 1 << 5, 350 351 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, 351 352 NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
+2 -2
include/uapi/linux/loop.h
··· 45 45 unsigned long lo_inode; /* ioctl r/o */ 46 46 __kernel_old_dev_t lo_rdevice; /* ioctl r/o */ 47 47 int lo_offset; 48 - int lo_encrypt_type; 48 + int lo_encrypt_type; /* obsolete, ignored */ 49 49 int lo_encrypt_key_size; /* ioctl w/o */ 50 50 int lo_flags; 51 51 char lo_name[LO_NAME_SIZE]; ··· 61 61 __u64 lo_offset; 62 62 __u64 lo_sizelimit;/* bytes, 0 == max available */ 63 63 __u32 lo_number; /* ioctl r/o */ 64 - __u32 lo_encrypt_type; 64 + __u32 lo_encrypt_type; /* obsolete, ignored */ 65 65 __u32 lo_encrypt_key_size; /* ioctl w/o */ 66 66 __u32 lo_flags; 67 67 __u8 lo_file_name[LO_NAME_SIZE];