Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'block-6.5-2023-07-28' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
"A few fixes that should go into the current kernel release, mainly:

- Set of fixes for dasd (Stefan)

- Handle interruptible waits returning because of a signal for ublk
(Ming)"

* tag 'block-6.5-2023-07-28' of git://git.kernel.dk/linux:
ublk: return -EINTR if breaking from waiting for existed users in DEL_DEV
ublk: fail to recover device if queue setup is interrupted
ublk: fail to start device if queue setup is interrupted
block: Fix a source code comment in include/uapi/linux/blkzoned.h
s390/dasd: print copy pair message only for the correct error
s390/dasd: fix hanging device after request requeue
s390/dasd: use correct number of retries for ERP requests
s390/dasd: fix hanging device after quiesce/resume

+64 -89
+7 -4
drivers/block/ublk_drv.c
··· 1847 1847 if (ublksrv_pid <= 0) 1848 1848 return -EINVAL; 1849 1849 1850 - wait_for_completion_interruptible(&ub->completion); 1850 + if (wait_for_completion_interruptible(&ub->completion) != 0) 1851 + return -EINTR; 1851 1852 1852 1853 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD); 1853 1854 ··· 2126 2125 * - the device number is freed already, we will not find this 2127 2126 * device via ublk_get_device_from_id() 2128 2127 */ 2129 - wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)); 2130 - 2128 + if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx))) 2129 + return -EINTR; 2131 2130 return 0; 2132 2131 } 2133 2132 ··· 2324 2323 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n", 2325 2324 __func__, ub->dev_info.nr_hw_queues, header->dev_id); 2326 2325 /* wait until new ubq_daemon sending all FETCH_REQ */ 2327 - wait_for_completion_interruptible(&ub->completion); 2326 + if (wait_for_completion_interruptible(&ub->completion)) 2327 + return -EINTR; 2328 + 2328 2329 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n", 2329 2330 __func__, ub->dev_info.nr_hw_queues, header->dev_id); 2330 2331
+49 -78
drivers/s390/block/dasd.c
··· 2943 2943 * Requeue a request back to the block request queue 2944 2944 * only works for block requests 2945 2945 */ 2946 - static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2946 + static void _dasd_requeue_request(struct dasd_ccw_req *cqr) 2947 2947 { 2948 - struct dasd_block *block = cqr->block; 2949 2948 struct request *req; 2950 2949 2951 - if (!block) 2952 - return -EINVAL; 2953 2950 /* 2954 2951 * If the request is an ERP request there is nothing to requeue. 2955 2952 * This will be done with the remaining original request. 2956 2953 */ 2957 2954 if (cqr->refers) 2958 - return 0; 2955 + return; 2959 2956 spin_lock_irq(&cqr->dq->lock); 2960 2957 req = (struct request *) cqr->callback_data; 2961 2958 blk_mq_requeue_request(req, true); 2962 2959 spin_unlock_irq(&cqr->dq->lock); 2963 2960 2964 - return 0; 2961 + return; 2965 2962 } 2966 2963 2967 - /* 2968 - * Go through all request on the dasd_block request queue, cancel them 2969 - * on the respective dasd_device, and return them to the generic 2970 - * block layer. 2971 - */ 2972 - static int dasd_flush_block_queue(struct dasd_block *block) 2964 + static int _dasd_requests_to_flushqueue(struct dasd_block *block, 2965 + struct list_head *flush_queue) 2973 2966 { 2974 2967 struct dasd_ccw_req *cqr, *n; 2975 - int rc, i; 2976 - struct list_head flush_queue; 2977 2968 unsigned long flags; 2969 + int rc, i; 2978 2970 2979 - INIT_LIST_HEAD(&flush_queue); 2980 - spin_lock_bh(&block->queue_lock); 2971 + spin_lock_irqsave(&block->queue_lock, flags); 2981 2972 rc = 0; 2982 2973 restart: 2983 2974 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { ··· 2983 2992 * is returned from the dasd_device layer. 2984 2993 */ 2985 2994 cqr->callback = _dasd_wake_block_flush_cb; 2986 - for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2987 - list_move_tail(&cqr->blocklist, &flush_queue); 2995 + for (i = 0; cqr; cqr = cqr->refers, i++) 2996 + list_move_tail(&cqr->blocklist, flush_queue); 2988 2997 if (i > 1) 2989 2998 /* moved more than one request - need to restart */ 2990 2999 goto restart; 2991 3000 } 2992 - spin_unlock_bh(&block->queue_lock); 3001 + spin_unlock_irqrestore(&block->queue_lock, flags); 3002 + 3003 + return rc; 3004 + } 3005 + 3006 + /* 3007 + * Go through all request on the dasd_block request queue, cancel them 3008 + * on the respective dasd_device, and return them to the generic 3009 + * block layer. 3010 + */ 3011 + static int dasd_flush_block_queue(struct dasd_block *block) 3012 + { 3013 + struct dasd_ccw_req *cqr, *n; 3014 + struct list_head flush_queue; 3015 + unsigned long flags; 3016 + int rc; 3017 + 3018 + INIT_LIST_HEAD(&flush_queue); 3019 + rc = _dasd_requests_to_flushqueue(block, &flush_queue); 3020 + 2993 3021 /* Now call the callback function of flushed requests */ 2994 3022 restart_cb: 2995 3023 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { ··· 3891 3881 */ 3892 3882 int dasd_generic_requeue_all_requests(struct dasd_device *device) 3893 3883 { 3884 + struct dasd_block *block = device->block; 3894 3885 struct list_head requeue_queue; 3895 3886 struct dasd_ccw_req *cqr, *n; 3896 - struct dasd_ccw_req *refers; 3897 3887 int rc; 3898 3888 3889 + if (!block) 3890 + return 0; 3891 + 3899 3892 INIT_LIST_HEAD(&requeue_queue); 3900 - spin_lock_irq(get_ccwdev_lock(device->cdev)); 3901 - rc = 0; 3902 - list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3903 - /* Check status and move request to flush_queue */ 3904 - if (cqr->status == DASD_CQR_IN_IO) { 3905 - rc = device->discipline->term_IO(cqr); 3906 - if (rc) { 3907 - /* unable to terminate requeust */ 3908 - dev_err(&device->cdev->dev, 3909 - "Unable to terminate request %p " 3910 - "on suspend\n", cqr); 3911 - spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3912 - dasd_put_device(device); 3913 - return rc; 3914 - } 3893 + rc = _dasd_requests_to_flushqueue(block, &requeue_queue); 3894 + 3895 + /* Now call the callback function of flushed requests */ 3896 + restart_cb: 3897 + list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { 3898 + wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3899 + /* Process finished ERP request. */ 3900 + if (cqr->refers) { 3901 + spin_lock_bh(&block->queue_lock); 3902 + __dasd_process_erp(block->base, cqr); 3903 + spin_unlock_bh(&block->queue_lock); 3904 + /* restart list_for_xx loop since dasd_process_erp 3905 + * might remove multiple elements 3906 + */ 3907 + goto restart_cb; 3915 3908 } 3916 - list_move_tail(&cqr->devlist, &requeue_queue); 3917 - } 3918 - spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3919 - 3920 - list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3921 - wait_event(dasd_flush_wq, 3922 - (cqr->status != DASD_CQR_CLEAR_PENDING)); 3923 - 3924 - /* 3925 - * requeue requests to blocklayer will only work 3926 - * for block device requests 3927 - */ 3928 - if (_dasd_requeue_request(cqr)) 3929 - continue; 3930 - 3931 - /* remove requests from device and block queue */ 3932 - list_del_init(&cqr->devlist); 3933 - while (cqr->refers != NULL) { 3934 - refers = cqr->refers; 3935 - /* remove the request from the block queue */ 3936 - list_del(&cqr->blocklist); 3937 - /* free the finished erp request */ 3938 - dasd_free_erp_request(cqr, cqr->memdev); 3939 - cqr = refers; 3940 - } 3941 - 3942 - /* 3943 - * _dasd_requeue_request already checked for a valid 3944 - * blockdevice, no need to check again 3945 - * all erp requests (cqr->refers) have a cqr->block 3946 - * pointer copy from the original cqr 3947 - */ 3909 + _dasd_requeue_request(cqr); 3948 3910 list_del_init(&cqr->blocklist); 3949 3911 cqr->block->base->discipline->free_cp( 3950 3912 cqr, (struct request *) cqr->callback_data); 3951 - } 3952 - 3953 - /* 3954 - * if requests remain then they are internal request 3955 - * and go back to the device queue 3956 - */ 3957 - if (!list_empty(&requeue_queue)) { 3958 - /* move freeze_queue to start of the ccw_queue */ 3959 - spin_lock_irq(get_ccwdev_lock(device->cdev)); 3960 - list_splice_tail(&requeue_queue, &device->ccw_queue); 3961 - spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3962 3913 } 3963 3914 dasd_schedule_device_bh(device); 3964 3915 return rc;
+2 -2
drivers/s390/block/dasd_3990_erp.c
··· 1050 1050 dev_err(&device->cdev->dev, "An I/O request was rejected" 1051 1051 " because writing is inhibited\n"); 1052 1052 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 1053 - } else if (sense[7] & SNS7_INVALID_ON_SEC) { 1053 + } else if (sense[7] == SNS7_INVALID_ON_SEC) { 1054 1054 dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n"); 1055 1055 /* suppress dump of sense data for this error */ 1056 1056 set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags); ··· 2441 2441 erp->block = cqr->block; 2442 2442 erp->magic = cqr->magic; 2443 2443 erp->expires = cqr->expires; 2444 - erp->retries = 256; 2444 + erp->retries = device->default_retries; 2445 2445 erp->buildclk = get_tod_clock(); 2446 2446 erp->status = DASD_CQR_FILLED; 2447 2447
+1
drivers/s390/block/dasd_ioctl.c
··· 131 131 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 132 132 133 133 dasd_schedule_block_bh(block); 134 + dasd_schedule_device_bh(base); 134 135 return 0; 135 136 } 136 137
+5 -5
include/uapi/linux/blkzoned.h
··· 51 51 * 52 52 * The Zone Condition state machine in the ZBC/ZAC standards maps the above 53 53 * deinitions as: 54 - * - ZC1: Empty | BLK_ZONE_EMPTY 54 + * - ZC1: Empty | BLK_ZONE_COND_EMPTY 55 55 * - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN 56 56 * - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN 57 - * - ZC4: Closed | BLK_ZONE_CLOSED 58 - * - ZC5: Full | BLK_ZONE_FULL 59 - * - ZC6: Read Only | BLK_ZONE_READONLY 60 - * - ZC7: Offline | BLK_ZONE_OFFLINE 57 + * - ZC4: Closed | BLK_ZONE_COND_CLOSED 58 + * - ZC5: Full | BLK_ZONE_COND_FULL 59 + * - ZC6: Read Only | BLK_ZONE_COND_READONLY 60 + * - ZC7: Offline | BLK_ZONE_COND_OFFLINE 61 61 * 62 62 * Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should 63 63 * be considered invalid.