Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-20190802' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"Here's a small collection of fixes that should go into this series.
This contains:

- io_uring potential use-after-free fix (Jackie)

- loop regression fix (Jan)

- O_DIRECT fragmented bio regression fix (Damien)

- Mark Denis as the new floppy maintainer (Denis)

- ataflop switch fall-through annotation (Gustavo)

- libata zpodd overflow fix (Kees)

- libata ahci deferred probe fix (Miquel)

- nbd invalidation BUG_ON() fix (Munehisa)

- dasd endless loop fix (Stefan)"

* tag 'for-linus-20190802' of git://git.kernel.dk/linux-block:
s390/dasd: fix endless loop after read unit address configuration
block: Fix __blkdev_direct_IO() for bio fragments
MAINTAINERS: floppy: take over maintainership
nbd: replace kill_bdev() with __invalidate_device() again
ata: libahci: do not complain in case of deferred probe
io_uring: fix KASAN use after free in io_sq_wq_submit_work
loop: Fix mount(2) failure due to race with LOOP_SET_FD
libata: zpodd: Fix small read overflow in zpodd_get_mech_type()
ataflop: Mark expected switch fall-through

+101 -43
+2 -1
MAINTAINERS
··· 6322 6322 F: drivers/counter/ftm-quaddec.c 6323 6323 6324 6324 FLOPPY DRIVER 6325 - S: Orphan 6325 + M: Denis Efremov <efremov@linux.com> 6326 + S: Odd Fixes 6326 6327 L: linux-block@vger.kernel.org 6327 6328 F: drivers/block/floppy.c 6328 6329
+3
drivers/ata/libahci_platform.c
··· 338 338 hpriv->phys[port] = NULL; 339 339 rc = 0; 340 340 break; 341 + case -EPROBE_DEFER: 342 + /* Do not complain yet */ 343 + break; 341 344 342 345 default: 343 346 dev_err(dev,
+1 -1
drivers/ata/libata-zpodd.c
··· 56 56 unsigned int ret; 57 57 struct rm_feature_desc *desc; 58 58 struct ata_taskfile tf; 59 - static const char cdb[] = { GPCMD_GET_CONFIGURATION, 59 + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION, 60 60 2, /* only 1 feature descriptor requested */ 61 61 0, 3, /* 3, removable medium feature */ 62 62 0, 0, 0,/* reserved */
+1
drivers/block/ataflop.c
··· 1726 1726 /* MSch: invalidate default_params */ 1727 1727 default_params[drive].blocks = 0; 1728 1728 set_capacity(floppy->disk, MAX_DISK_SIZE * 2); 1729 + /* Fall through */ 1729 1730 case FDFMTEND: 1730 1731 case FDFLUSH: 1731 1732 /* invalidate the buffer track to force a reread */
+9 -7
drivers/block/loop.c
··· 924 924 struct file *file; 925 925 struct inode *inode; 926 926 struct address_space *mapping; 927 + struct block_device *claimed_bdev = NULL; 927 928 int lo_flags = 0; 928 929 int error; 929 930 loff_t size; ··· 943 942 * here to avoid changing device under exclusive owner. 944 943 */ 945 944 if (!(mode & FMODE_EXCL)) { 946 - bdgrab(bdev); 947 - error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd); 948 - if (error) 945 + claimed_bdev = bd_start_claiming(bdev, loop_set_fd); 946 + if (IS_ERR(claimed_bdev)) { 947 + error = PTR_ERR(claimed_bdev); 949 948 goto out_putf; 949 + } 950 950 } 951 951 952 952 error = mutex_lock_killable(&loop_ctl_mutex); ··· 1017 1015 mutex_unlock(&loop_ctl_mutex); 1018 1016 if (partscan) 1019 1017 loop_reread_partitions(lo, bdev); 1020 - if (!(mode & FMODE_EXCL)) 1021 - blkdev_put(bdev, mode | FMODE_EXCL); 1018 + if (claimed_bdev) 1019 + bd_abort_claiming(bdev, claimed_bdev, loop_set_fd); 1022 1020 return 0; 1023 1021 1024 1022 out_unlock: 1025 1023 mutex_unlock(&loop_ctl_mutex); 1026 1024 out_bdev: 1027 - if (!(mode & FMODE_EXCL)) 1028 - blkdev_put(bdev, mode | FMODE_EXCL); 1025 + if (claimed_bdev) 1026 + bd_abort_claiming(bdev, claimed_bdev, loop_set_fd); 1029 1027 out_putf: 1030 1028 fput(file); 1031 1029 out:
+1 -1
drivers/block/nbd.c
··· 1231 1231 struct block_device *bdev) 1232 1232 { 1233 1233 sock_shutdown(nbd); 1234 - kill_bdev(bdev); 1234 + __invalidate_device(bdev, true); 1235 1235 nbd_bdev_reset(bdev); 1236 1236 if (test_and_clear_bit(NBD_HAS_CONFIG_REF, 1237 1237 &nbd->config->runtime_flags))
+16 -6
drivers/s390/block/dasd_alias.c
··· 383 383 char msg_format; 384 384 char msg_no; 385 385 386 + /* 387 + * intrc values ENODEV, ENOLINK and EPERM 388 + * will be optained from sleep_on to indicate that no 389 + * IO operation can be started 390 + */ 391 + if (cqr->intrc == -ENODEV) 392 + return 1; 393 + 394 + if (cqr->intrc == -ENOLINK) 395 + return 1; 396 + 397 + if (cqr->intrc == -EPERM) 398 + return 1; 399 + 386 400 sense = dasd_get_sense(&cqr->irb); 387 401 if (!sense) 388 402 return 0; ··· 461 447 lcu->flags &= ~NEED_UAC_UPDATE; 462 448 spin_unlock_irqrestore(&lcu->lock, flags); 463 449 464 - do { 465 - rc = dasd_sleep_on(cqr); 466 - if (rc && suborder_not_supported(cqr)) 467 - return -EOPNOTSUPP; 468 - } while (rc && (cqr->retries > 0)); 469 - if (rc) { 450 + rc = dasd_sleep_on(cqr); 451 + if (rc && !suborder_not_supported(cqr)) { 470 452 spin_lock_irqsave(&lcu->lock, flags); 471 453 lcu->flags |= NEED_UAC_UPDATE; 472 454 spin_unlock_irqrestore(&lcu->lock, flags);
+60 -26
fs/block_dev.c
··· 439 439 ret = -EAGAIN; 440 440 goto error; 441 441 } 442 + ret = dio->size; 442 443 443 444 if (polled) 444 445 WRITE_ONCE(iocb->ki_cookie, qc); ··· 466 465 ret = -EAGAIN; 467 466 goto error; 468 467 } 469 - ret += bio->bi_iter.bi_size; 468 + ret = dio->size; 470 469 471 470 bio = bio_alloc(gfp, nr_pages); 472 471 if (!bio) { ··· 1182 1181 * Pointer to the block device containing @bdev on success, ERR_PTR() 1183 1182 * value on failure. 1184 1183 */ 1185 - static struct block_device *bd_start_claiming(struct block_device *bdev, 1186 - void *holder) 1184 + struct block_device *bd_start_claiming(struct block_device *bdev, void *holder) 1187 1185 { 1188 1186 struct gendisk *disk; 1189 1187 struct block_device *whole; ··· 1229 1229 return ERR_PTR(err); 1230 1230 } 1231 1231 } 1232 + EXPORT_SYMBOL(bd_start_claiming); 1233 + 1234 + static void bd_clear_claiming(struct block_device *whole, void *holder) 1235 + { 1236 + lockdep_assert_held(&bdev_lock); 1237 + /* tell others that we're done */ 1238 + BUG_ON(whole->bd_claiming != holder); 1239 + whole->bd_claiming = NULL; 1240 + wake_up_bit(&whole->bd_claiming, 0); 1241 + } 1242 + 1243 + /** 1244 + * bd_finish_claiming - finish claiming of a block device 1245 + * @bdev: block device of interest 1246 + * @whole: whole block device (returned from bd_start_claiming()) 1247 + * @holder: holder that has claimed @bdev 1248 + * 1249 + * Finish exclusive open of a block device. Mark the device as exlusively 1250 + * open by the holder and wake up all waiters for exclusive open to finish. 1251 + */ 1252 + void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, 1253 + void *holder) 1254 + { 1255 + spin_lock(&bdev_lock); 1256 + BUG_ON(!bd_may_claim(bdev, whole, holder)); 1257 + /* 1258 + * Note that for a whole device bd_holders will be incremented twice, 1259 + * and bd_holder will be set to bd_may_claim before being set to holder 1260 + */ 1261 + whole->bd_holders++; 1262 + whole->bd_holder = bd_may_claim; 1263 + bdev->bd_holders++; 1264 + bdev->bd_holder = holder; 1265 + bd_clear_claiming(whole, holder); 1266 + spin_unlock(&bdev_lock); 1267 + } 1268 + EXPORT_SYMBOL(bd_finish_claiming); 1269 + 1270 + /** 1271 + * bd_abort_claiming - abort claiming of a block device 1272 + * @bdev: block device of interest 1273 + * @whole: whole block device (returned from bd_start_claiming()) 1274 + * @holder: holder that has claimed @bdev 1275 + * 1276 + * Abort claiming of a block device when the exclusive open failed. This can be 1277 + * also used when exclusive open is not actually desired and we just needed 1278 + * to block other exclusive openers for a while. 1279 + */ 1280 + void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, 1281 + void *holder) 1282 + { 1283 + spin_lock(&bdev_lock); 1284 + bd_clear_claiming(whole, holder); 1285 + spin_unlock(&bdev_lock); 1286 + } 1287 + EXPORT_SYMBOL(bd_abort_claiming); 1232 1288 1233 1289 #ifdef CONFIG_SYSFS 1234 1290 struct bd_holder_disk { ··· 1754 1698 1755 1699 /* finish claiming */ 1756 1700 mutex_lock(&bdev->bd_mutex); 1757 - spin_lock(&bdev_lock); 1758 - 1759 - if (!res) { 1760 - BUG_ON(!bd_may_claim(bdev, whole, holder)); 1761 - /* 1762 - * Note that for a whole device bd_holders 1763 - * will be incremented twice, and bd_holder 1764 - * will be set to bd_may_claim before being 1765 - * set to holder 1766 - */ 1767 - whole->bd_holders++; 1768 - whole->bd_holder = bd_may_claim; 1769 - bdev->bd_holders++; 1770 - bdev->bd_holder = holder; 1771 - } 1772 - 1773 - /* tell others that we're done */ 1774 - BUG_ON(whole->bd_claiming != holder); 1775 - whole->bd_claiming = NULL; 1776 - wake_up_bit(&whole->bd_claiming, 0); 1777 - 1778 - spin_unlock(&bdev_lock); 1779 - 1701 + bd_finish_claiming(bdev, whole, holder); 1780 1702 /* 1781 1703 * Block event polling for write claims if requested. Any 1782 1704 * write holder makes the write_holder state stick until
+2 -1
fs/io_uring.c
··· 1838 1838 do { 1839 1839 struct sqe_submit *s = &req->submit; 1840 1840 const struct io_uring_sqe *sqe = s->sqe; 1841 + unsigned int flags = req->flags; 1841 1842 1842 1843 /* Ensure we clear previously set non-block flag */ 1843 1844 req->rw.ki_flags &= ~IOCB_NOWAIT; ··· 1884 1883 kfree(sqe); 1885 1884 1886 1885 /* req from defer and link list needn't decrease async cnt */ 1887 - if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE)) 1886 + if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE)) 1888 1887 goto out; 1889 1888 1890 1889 if (!async_list)
+6
include/linux/fs.h
··· 2598 2598 void *holder); 2599 2599 extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, 2600 2600 void *holder); 2601 + extern struct block_device *bd_start_claiming(struct block_device *bdev, 2602 + void *holder); 2603 + extern void bd_finish_claiming(struct block_device *bdev, 2604 + struct block_device *whole, void *holder); 2605 + extern void bd_abort_claiming(struct block_device *bdev, 2606 + struct block_device *whole, void *holder); 2601 2607 extern void blkdev_put(struct block_device *bdev, fmode_t mode); 2602 2608 extern int __blkdev_reread_part(struct block_device *bdev); 2603 2609 extern int blkdev_reread_part(struct block_device *bdev);