Merge tag 'block-5.5-20191221' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"Let's try this one again, this time without the compat_ioctl changes.
We've got those fixed up, but that can go out next week.

This contains:

- block queue flush lockdep annotation (Bart)

- Type fix for bsg_queue_rq() (Bart)

- Three dasd fixes (Stefan, Jan)

- nbd deadlock fix (Mike)

- Error handling bio user map fix (Yang)

- iocost fix (Tejun)

- sbitmap waitqueue addition fix that affects the kyber IO scheduler
(David)"

* tag 'block-5.5-20191221' of git://git.kernel.dk/linux-block:
sbitmap: only queue kyber's wait callback if not already active
block: fix memleak when __blk_rq_map_user_iov() is failed
s390/dasd: fix typo in copyright statement
s390/dasd: fix memleak in path handling error case
s390/dasd/cio: Interpret ccw_device_get_mdc return value correctly
block: Fix a lockdep complaint triggered by request queue flushing
block: Fix the type of 'sts' in bsg_queue_rq()
block: end bio with BLK_STS_AGAIN in case of non-mq devs and REQ_NOWAIT
nbd: fix shutdown and recv work deadlock v2
iocost: over-budget forced IOs should schedule async delay

+7 -4
block/blk-core.c
··· 885 885 } 886 886 887 887 /* 888 - * For a REQ_NOWAIT based request, return -EOPNOTSUPP 889 - * if queue is not a request based queue. 888 + * Non-mq queues do not honor REQ_NOWAIT, so complete a bio 889 + * with BLK_STS_AGAIN status in order to catch -EAGAIN and 890 + * to give a chance to the caller to repeat request gracefully. 890 891 */ 891 - if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) 892 - goto not_supported; 892 + if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) { 893 + status = BLK_STS_AGAIN; 894 + goto end_io; 895 + } 893 896 894 897 if (should_fail_bio(bio)) 895 898 goto end_io;
+5
block/blk-flush.c
··· 69 69 #include <linux/blkdev.h> 70 70 #include <linux/gfp.h> 71 71 #include <linux/blk-mq.h> 72 + #include <linux/lockdep.h> 72 73 73 74 #include "blk.h" 74 75 #include "blk-mq.h" ··· 506 505 INIT_LIST_HEAD(&fq->flush_queue[1]); 507 506 INIT_LIST_HEAD(&fq->flush_data_in_flight); 508 507 508 + lockdep_register_key(&fq->key); 509 + lockdep_set_class(&fq->mq_flush_lock, &fq->key); 510 + 509 511 return fq; 510 512 511 513 fail_rq: ··· 523 519 if (!fq) 524 520 return; 525 521 522 + lockdep_unregister_key(&fq->key); 526 523 kfree(fq->flush_rq); 527 524 kfree(fq); 528 525 }
+8 -5
block/blk-iocost.c
··· 1212 1212 return HRTIMER_NORESTART; 1213 1213 } 1214 1214 1215 - static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) 1215 + static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) 1216 1216 { 1217 1217 struct ioc *ioc = iocg->ioc; 1218 1218 struct blkcg_gq *blkg = iocg_to_blkg(iocg); ··· 1229 1229 /* clear or maintain depending on the overage */ 1230 1230 if (time_before_eq64(vtime, now->vnow)) { 1231 1231 blkcg_clear_delay(blkg); 1232 - return; 1232 + return false; 1233 1233 } 1234 1234 if (!atomic_read(&blkg->use_delay) && 1235 1235 time_before_eq64(vtime, now->vnow + vmargin)) 1236 - return; 1236 + return false; 1237 1237 1238 1238 /* use delay */ 1239 1239 if (cost) { ··· 1250 1250 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); 1251 1251 if (hrtimer_is_queued(&iocg->delay_timer) && 1252 1252 abs(oexpires - expires) <= margin_ns / 4) 1253 - return; 1253 + return true; 1254 1254 1255 1255 hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), 1256 1256 margin_ns / 4, HRTIMER_MODE_ABS); 1257 + return true; 1257 1258 } 1258 1259 1259 1260 static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) ··· 1740 1739 */ 1741 1740 if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { 1742 1741 atomic64_add(abs_cost, &iocg->abs_vdebt); 1743 - iocg_kick_delay(iocg, &now, cost); 1742 + if (iocg_kick_delay(iocg, &now, cost)) 1743 + blkcg_schedule_throttle(rqos->q, 1744 + (bio->bi_opf & REQ_SWAP) == REQ_SWAP); 1744 1745 return; 1745 1746 } 1746 1747
+1 -1
block/blk-map.c
··· 151 151 return 0; 152 152 153 153 unmap_rq: 154 - __blk_rq_unmap_user(bio); 154 + blk_rq_unmap_user(bio); 155 155 fail: 156 156 rq->bio = NULL; 157 157 return ret;
+1
block/blk.h
··· 30 30 * at the same time 31 31 */ 32 32 struct request *orig_rq; 33 + struct lock_class_key key; 33 34 spinlock_t mq_flush_lock; 34 35 }; 35 36
+1 -1
block/bsg-lib.c
··· 266 266 struct request *req = bd->rq; 267 267 struct bsg_set *bset = 268 268 container_of(q->tag_set, struct bsg_set, tag_set); 269 - int sts = BLK_STS_IOERR; 269 + blk_status_t sts = BLK_STS_IOERR; 270 270 int ret; 271 271 272 272 blk_mq_start_request(req);
+3 -3
drivers/block/nbd.c
··· 1296 1296 mutex_unlock(&nbd->config_lock); 1297 1297 ret = wait_event_interruptible(config->recv_wq, 1298 1298 atomic_read(&config->recv_threads) == 0); 1299 - if (ret) { 1299 + if (ret) 1300 1300 sock_shutdown(nbd); 1301 - flush_workqueue(nbd->recv_workq); 1302 - } 1301 + flush_workqueue(nbd->recv_workq); 1302 + 1303 1303 mutex_lock(&nbd->config_lock); 1304 1304 nbd_bdev_reset(bdev); 1305 1305 /* user requested, ignore socket errors */
+7 -21
drivers/s390/block/dasd_eckd.c
··· 1128 1128 { 1129 1129 struct dasd_eckd_private *private = device->private; 1130 1130 int fcx_in_css, fcx_in_gneq, fcx_in_features; 1131 - int tpm, mdc; 1131 + unsigned int mdc; 1132 + int tpm; 1132 1133 1133 1134 if (dasd_nofcx) 1134 1135 return 0; ··· 1143 1142 return 0; 1144 1143 1145 1144 mdc = ccw_device_get_mdc(device->cdev, 0); 1146 - if (mdc < 0) { 1145 + if (mdc == 0) { 1147 1146 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); 1148 1147 return 0; 1149 1148 } else { ··· 1154 1153 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1155 1154 { 1156 1155 struct dasd_eckd_private *private = device->private; 1157 - int mdc; 1156 + unsigned int mdc; 1158 1157 u32 fcx_max_data; 1159 1158 1160 1159 if (private->fcx_max_data) { 1161 1160 mdc = ccw_device_get_mdc(device->cdev, lpm); 1162 - if ((mdc < 0)) { 1161 + if (mdc == 0) { 1163 1162 dev_warn(&device->cdev->dev, 1164 1163 "Detecting the maximum data size for zHPF " 1165 1164 "requests failed (rc=%d) for a new path %x\n", ··· 2074 2073 dasd_free_block(device->block); 2075 2074 device->block = NULL; 2076 2075 out_err1: 2077 - kfree(private->conf_data); 2076 + dasd_eckd_clear_conf_data(device); 2078 2077 kfree(device->private); 2079 2078 device->private = NULL; 2080 2079 return rc; ··· 2083 2082 static void dasd_eckd_uncheck_device(struct dasd_device *device) 2084 2083 { 2085 2084 struct dasd_eckd_private *private = device->private; 2086 - int i; 2087 2085 2088 2086 if (!private) 2089 2087 return; ··· 2092 2092 private->sneq = NULL; 2093 2093 private->vdsneq = NULL; 2094 2094 private->gneq = NULL; 2095 - private->conf_len = 0; 2096 - for (i = 0; i < 8; i++) { 2097 - kfree(device->path[i].conf_data); 2098 - if ((__u8 *)device->path[i].conf_data == 2099 - private->conf_data) { 2100 - private->conf_data = NULL; 2101 - private->conf_len = 0; 2102 - } 2103 - device->path[i].conf_data = NULL; 2104 - device->path[i].cssid = 0; 2105 - device->path[i].ssid = 0; 2106 - device->path[i].chpid = 0; 2107 - } 2108 - kfree(private->conf_data); 2109 - private->conf_data = NULL; 2095 + dasd_eckd_clear_conf_data(device); 2110 2096 } 2111 2097 2112 2098 static struct dasd_ccw_req *
+1 -1
drivers/s390/block/dasd_fba.h
··· 2 2 /* 3 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 4 * Bugreports.to..: <Linux390@de.ibm.com> 5 - * Coypright IBM Corp. 1999, 2000 5 + * Copyright IBM Corp. 1999, 2000 6 6 * 7 7 */ 8 8
+1 -1
drivers/s390/block/dasd_proc.c
··· 5 5 * Carsten Otte <Cotte@de.ibm.com> 6 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 - * Coypright IBM Corp. 1999, 2002 8 + * Copyright IBM Corp. 1999, 2002 9 9 * 10 10 * /proc interface for the dasd driver. 11 11 *
+1 -1
drivers/s390/cio/device_ops.c
··· 635 635 * @mask: mask of paths to use 636 636 * 637 637 * Return the number of 64K-bytes blocks all paths at least support 638 - * for a transport command. Return values <= 0 indicate failures. 638 + * for a transport command. Return value 0 indicates failure. 639 639 */ 640 640 int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) 641 641 {
+1 -1
lib/sbitmap.c
··· 650 650 if (!sbq_wait->sbq) { 651 651 sbq_wait->sbq = sbq; 652 652 atomic_inc(&sbq->ws_active); 653 + add_wait_queue(&ws->wait, &sbq_wait->wait); 653 654 } 654 - add_wait_queue(&ws->wait, &sbq_wait->wait); 655 655 } 656 656 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); 657 657