Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove i_bdev

Switch the block device lookup interfaces to directly work with a dev_t
so that struct block_device references are only acquired by the
blkdev_get variants (and the blk-cgroup special case). This means that
we now don't need an extra reference in the inode and can generally
simplify handling of struct block_device to keep the lookups contained
in the core block layer code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Coly Li <colyli@suse.de> [bcache]
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
4e7b5671 7918f0f6

+121 -252
+1 -2
block/ioctl.c
··· 590 590 { 591 591 int ret; 592 592 void __user *argp = compat_ptr(arg); 593 - struct inode *inode = file->f_mapping->host; 594 - struct block_device *bdev = inode->i_bdev; 593 + struct block_device *bdev = I_BDEV(file->f_mapping->host); 595 594 struct gendisk *disk = bdev->bd_disk; 596 595 fmode_t mode = file->f_mode; 597 596 loff_t size;
+3 -5
drivers/block/loop.c
··· 675 675 while (is_loop_device(f)) { 676 676 struct loop_device *l; 677 677 678 - if (f->f_mapping->host->i_bdev == bdev) 678 + if (f->f_mapping->host->i_rdev == bdev->bd_dev) 679 679 return -EBADF; 680 680 681 - l = f->f_mapping->host->i_bdev->bd_disk->private_data; 681 + l = I_BDEV(f->f_mapping->host)->bd_disk->private_data; 682 682 if (l->lo_state != Lo_bound) { 683 683 return -EINVAL; 684 684 } ··· 885 885 * file-backed loop devices: discarded regions read back as zero. 886 886 */ 887 887 if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) { 888 - struct request_queue *backingq; 889 - 890 - backingq = bdev_get_queue(inode->i_bdev); 888 + struct request_queue *backingq = bdev_get_queue(I_BDEV(inode)); 891 889 892 890 max_discard_sectors = backingq->limits.max_write_zeroes_sectors; 893 891 granularity = backingq->limits.discard_granularity ?:
+11 -9
drivers/md/bcache/super.c
··· 2380 2380 kobj_attribute_write(register_quiet, register_bcache); 2381 2381 kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup); 2382 2382 2383 - static bool bch_is_open_backing(struct block_device *bdev) 2383 + static bool bch_is_open_backing(dev_t dev) 2384 2384 { 2385 2385 struct cache_set *c, *tc; 2386 2386 struct cached_dev *dc, *t; 2387 2387 2388 2388 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 2389 2389 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 2390 - if (dc->bdev == bdev) 2390 + if (dc->bdev->bd_dev == dev) 2391 2391 return true; 2392 2392 list_for_each_entry_safe(dc, t, &uncached_devices, list) 2393 - if (dc->bdev == bdev) 2393 + if (dc->bdev->bd_dev == dev) 2394 2394 return true; 2395 2395 return false; 2396 2396 } 2397 2397 2398 - static bool bch_is_open_cache(struct block_device *bdev) 2398 + static bool bch_is_open_cache(dev_t dev) 2399 2399 { 2400 2400 struct cache_set *c, *tc; 2401 2401 2402 2402 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { 2403 2403 struct cache *ca = c->cache; 2404 2404 2405 - if (ca->bdev == bdev) 2405 + if (ca->bdev->bd_dev == dev) 2406 2406 return true; 2407 2407 } 2408 2408 2409 2409 return false; 2410 2410 } 2411 2411 2412 - static bool bch_is_open(struct block_device *bdev) 2412 + static bool bch_is_open(dev_t dev) 2413 2413 { 2414 - return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 2414 + return bch_is_open_cache(dev) || bch_is_open_backing(dev); 2415 2415 } 2416 2416 2417 2417 struct async_reg_args { ··· 2535 2535 sb); 2536 2536 if (IS_ERR(bdev)) { 2537 2537 if (bdev == ERR_PTR(-EBUSY)) { 2538 - bdev = lookup_bdev(strim(path)); 2538 + dev_t dev; 2539 + 2539 2540 mutex_lock(&bch_register_lock); 2540 - if (!IS_ERR(bdev) && bch_is_open(bdev)) 2541 + if (lookup_bdev(strim(path), &dev) == 0 && 2542 + bch_is_open(dev)) 2541 2543 err = "device already registered"; 2542 2544 else 2543 2545 err = "device busy";
+1 -8
drivers/md/dm-table.c
··· 348 348 dev_t dm_get_dev_t(const char *path) 349 349 { 350 350 dev_t dev; 351 - struct block_device *bdev; 352 351 353 - bdev = lookup_bdev(path); 354 - if (IS_ERR(bdev)) 352 + if (lookup_bdev(path, &dev)) 355 353 dev = name_to_dev_t(path); 356 - else { 357 - dev = bdev->bd_dev; 358 - bdput(bdev); 359 - } 360 - 361 354 return dev; 362 355 } 363 356 EXPORT_SYMBOL_GPL(dm_get_dev_t);
+6 -11
drivers/mtd/mtdsuper.c
··· 120 120 struct fs_context *fc)) 121 121 { 122 122 #ifdef CONFIG_BLOCK 123 - struct block_device *bdev; 124 - int ret, major; 123 + dev_t dev; 124 + int ret; 125 125 #endif 126 126 int mtdnr; 127 127 ··· 169 169 /* try the old way - the hack where we allowed users to mount 170 170 * /dev/mtdblock$(n) but didn't actually _use_ the blockdev 171 171 */ 172 - bdev = lookup_bdev(fc->source); 173 - if (IS_ERR(bdev)) { 174 - ret = PTR_ERR(bdev); 172 + ret = lookup_bdev(fc->source, &dev); 173 + if (ret) { 175 174 errorf(fc, "MTD: Couldn't look up '%s': %d", fc->source, ret); 176 175 return ret; 177 176 } 178 177 pr_debug("MTDSB: lookup_bdev() returned 0\n"); 179 178 180 - major = MAJOR(bdev->bd_dev); 181 - mtdnr = MINOR(bdev->bd_dev); 182 - bdput(bdev); 183 - 184 - if (major == MTD_BLOCK_MAJOR) 185 - return mtd_get_sb_by_nr(fc, mtdnr, fill_super); 179 + if (MAJOR(dev) == MTD_BLOCK_MAJOR) 180 + return mtd_get_sb_by_nr(fc, MINOR(dev), fill_super); 186 181 187 182 #endif /* CONFIG_BLOCK */ 188 183
+3 -3
drivers/target/target_core_file.c
··· 133 133 */ 134 134 inode = file->f_mapping->host; 135 135 if (S_ISBLK(inode->i_mode)) { 136 - struct request_queue *q = bdev_get_queue(inode->i_bdev); 136 + struct request_queue *q = bdev_get_queue(I_BDEV(inode)); 137 137 unsigned long long dev_size; 138 138 139 - fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 139 + fd_dev->fd_block_size = bdev_logical_block_size(I_BDEV(inode)); 140 140 /* 141 141 * Determine the number of bytes from i_size_read() minus 142 142 * one (1) logical sector from underlying struct block_device ··· 559 559 560 560 if (S_ISBLK(inode->i_mode)) { 561 561 /* The backend is block device, use discard */ 562 - struct block_device *bdev = inode->i_bdev; 562 + struct block_device *bdev = I_BDEV(inode); 563 563 struct se_device *dev = cmd->se_dev; 564 564 565 565 ret = blkdev_issue_discard(bdev,
+4 -4
drivers/usb/gadget/function/storage_common.c
··· 204 204 if (!(filp->f_mode & FMODE_WRITE)) 205 205 ro = 1; 206 206 207 - inode = file_inode(filp); 207 + inode = filp->f_mapping->host; 208 208 if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { 209 209 LINFO(curlun, "invalid file type: %s\n", filename); 210 210 goto out; ··· 221 221 if (!(filp->f_mode & FMODE_CAN_WRITE)) 222 222 ro = 1; 223 223 224 - size = i_size_read(inode->i_mapping->host); 224 + size = i_size_read(inode); 225 225 if (size < 0) { 226 226 LINFO(curlun, "unable to find file size: %s\n", filename); 227 227 rc = (int) size; ··· 231 231 if (curlun->cdrom) { 232 232 blksize = 2048; 233 233 blkbits = 11; 234 - } else if (inode->i_bdev) { 235 - blksize = bdev_logical_block_size(inode->i_bdev); 234 + } else if (S_ISBLK(inode->i_mode)) { 235 + blksize = bdev_logical_block_size(I_BDEV(inode)); 236 236 blkbits = blksize_bits(blksize); 237 237 } else { 238 238 blksize = 512;
+49 -147
fs/block_dev.c
··· 883 883 bdev->bd_dev = dev; 884 884 inode->i_mode = S_IFBLK; 885 885 inode->i_rdev = dev; 886 - inode->i_bdev = bdev; 887 886 inode->i_data.a_ops = &def_blk_aops; 888 887 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 889 888 unlock_new_inode(inode); ··· 927 928 { 928 929 iput(bdev->bd_inode); 929 930 } 930 - 931 931 EXPORT_SYMBOL(bdput); 932 932 933 - static struct block_device *bd_acquire(struct inode *inode) 934 - { 935 - struct block_device *bdev; 936 - 937 - spin_lock(&bdev_lock); 938 - bdev = inode->i_bdev; 939 - if (bdev && !inode_unhashed(bdev->bd_inode)) { 940 - bdgrab(bdev); 941 - spin_unlock(&bdev_lock); 942 - return bdev; 943 - } 944 - spin_unlock(&bdev_lock); 945 - 946 - /* 947 - * i_bdev references block device inode that was already shut down 948 - * (corresponding device got removed). Remove the reference and look 949 - * up block device inode again just in case new device got 950 - * reestablished under the same device number. 951 - */ 952 - if (bdev) 953 - bd_forget(inode); 954 - 955 - bdev = bdget(inode->i_rdev); 956 - if (bdev) { 957 - spin_lock(&bdev_lock); 958 - if (!inode->i_bdev) { 959 - /* 960 - * We take an additional reference to bd_inode, 961 - * and it's released in clear_inode() of inode. 962 - * So, we can access it via ->i_mapping always 963 - * without igrab(). 964 - */ 965 - bdgrab(bdev); 966 - inode->i_bdev = bdev; 967 - inode->i_mapping = bdev->bd_inode->i_mapping; 968 - } 969 - spin_unlock(&bdev_lock); 970 - } 971 - return bdev; 972 - } 973 - 974 - /* Call when you free inode */ 975 - 976 - void bd_forget(struct inode *inode) 977 - { 978 - struct block_device *bdev = NULL; 979 - 980 - spin_lock(&bdev_lock); 981 - if (!sb_is_blkdev_sb(inode->i_sb)) 982 - bdev = inode->i_bdev; 983 - inode->i_bdev = NULL; 984 - inode->i_mapping = &inode->i_data; 985 - spin_unlock(&bdev_lock); 986 - 987 - if (bdev) 988 - bdput(bdev); 989 - } 990 - 991 933 /** 992 934 * bd_may_claim - test whether a block device can be claimed 993 935 * @bdev: block device of interest ··· 1437 1497 } 1438 1498 1439 1499 /** 1440 - * blkdev_get - open a block device 1441 - * @bdev: block_device to open 1500 + * blkdev_get_by_dev - open a block device by device number 1501 + * @dev: device number of block device to open 1442 1502 * @mode: FMODE_* mask 1443 1503 * @holder: exclusive holder identifier 1444 1504 * 1445 - * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is 1446 - * open with exclusive access. Specifying %FMODE_EXCL with %NULL 1447 - * @holder is invalid. Exclusive opens may nest for the same @holder. 1505 + * Open the block device described by device number @dev. If @mode includes 1506 + * %FMODE_EXCL, the block device is opened with exclusive access. Specifying 1507 + * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for 1508 + * the same @holder. 1448 1509 * 1449 - * On success, the reference count of @bdev is unchanged. On failure, 1450 - * @bdev is put. 1510 + * Use this interface ONLY if you really do not have anything better - i.e. when 1511 + * you are behind a truly sucky interface and all you are given is a device 1512 + * number. Everything else should use blkdev_get_by_path(). 1451 1513 * 1452 1514 * CONTEXT: 1453 1515 * Might sleep. 1454 1516 * 1455 1517 * RETURNS: 1456 - * 0 on success, -errno on failure. 1518 + * Reference to the block_device on success, ERR_PTR(-errno) on failure. 1457 1519 */ 1458 - static int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) 1520 + struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) 1459 1521 { 1460 1522 struct block_device *claiming; 1461 1523 bool unblock_events = true; 1524 + struct block_device *bdev; 1462 1525 struct gendisk *disk; 1463 1526 int partno; 1464 1527 int ret; 1465 1528 1466 1529 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK, 1467 - imajor(bdev->bd_inode), iminor(bdev->bd_inode), 1530 + MAJOR(dev), MINOR(dev), 1468 1531 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) | 1469 1532 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0)); 1470 1533 if (ret) 1471 - goto bdput; 1534 + return ERR_PTR(ret); 1535 + 1536 + bdev = bdget(dev); 1537 + if (!bdev) 1538 + return ERR_PTR(-ENOMEM); 1472 1539 1473 1540 /* 1474 1541 * If we lost a race with 'disk' being deleted, try again. See md.c. ··· 1536 1589 if (ret == -ERESTARTSYS) 1537 1590 goto retry; 1538 1591 bdput: 1539 - if (ret) 1592 + if (ret) { 1540 1593 bdput(bdev); 1541 - return ret; 1594 + return ERR_PTR(ret); 1595 + } 1596 + return bdev; 1542 1597 } 1598 + EXPORT_SYMBOL(blkdev_get_by_dev); 1543 1599 1544 1600 /** 1545 1601 * blkdev_get_by_path - open a block device by name ··· 1550 1600 * @mode: FMODE_* mask 1551 1601 * @holder: exclusive holder identifier 1552 1602 * 1553 - * Open the blockdevice described by the device file at @path. @mode 1554 - * and @holder are identical to blkdev_get(). 1555 - * 1556 - * On success, the returned block_device has reference count of one. 1603 + * Open the block device described by the device file at @path. If @mode 1604 + * includes %FMODE_EXCL, the block device is opened with exclusive access. 1605 + * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may 1606 + * nest for the same @holder. 1557 1607 * 1558 1608 * CONTEXT: 1559 1609 * Might sleep. 1560 1610 * 1561 1611 * RETURNS: 1562 - * Pointer to block_device on success, ERR_PTR(-errno) on failure. 1612 + * Reference to the block_device on success, ERR_PTR(-errno) on failure. 1563 1613 */ 1564 1614 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, 1565 1615 void *holder) 1566 1616 { 1567 1617 struct block_device *bdev; 1568 - int err; 1618 + dev_t dev; 1619 + int error; 1569 1620 1570 - bdev = lookup_bdev(path); 1571 - if (IS_ERR(bdev)) 1572 - return bdev; 1621 + error = lookup_bdev(path, &dev); 1622 + if (error) 1623 + return ERR_PTR(error); 1573 1624 1574 - err = blkdev_get(bdev, mode, holder); 1575 - if (err) 1576 - return ERR_PTR(err); 1577 - 1578 - if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { 1625 + bdev = blkdev_get_by_dev(dev, mode, holder); 1626 + if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { 1579 1627 blkdev_put(bdev, mode); 1580 1628 return ERR_PTR(-EACCES); 1581 1629 } ··· 1581 1633 return bdev; 1582 1634 } 1583 1635 EXPORT_SYMBOL(blkdev_get_by_path); 1584 - 1585 - /** 1586 - * blkdev_get_by_dev - open a block device by device number 1587 - * @dev: device number of block device to open 1588 - * @mode: FMODE_* mask 1589 - * @holder: exclusive holder identifier 1590 - * 1591 - * Open the blockdevice described by device number @dev. @mode and 1592 - * @holder are identical to blkdev_get(). 1593 - * 1594 - * Use it ONLY if you really do not have anything better - i.e. when 1595 - * you are behind a truly sucky interface and all you are given is a 1596 - * device number. _Never_ to be used for internal purposes. If you 1597 - * ever need it - reconsider your API. 1598 - * 1599 - * On success, the returned block_device has reference count of one. 1600 - * 1601 - * CONTEXT: 1602 - * Might sleep. 1603 - * 1604 - * RETURNS: 1605 - * Pointer to block_device on success, ERR_PTR(-errno) on failure. 1606 - */ 1607 - struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) 1608 - { 1609 - struct block_device *bdev; 1610 - int err; 1611 - 1612 - bdev = bdget(dev); 1613 - if (!bdev) 1614 - return ERR_PTR(-ENOMEM); 1615 - 1616 - err = blkdev_get(bdev, mode, holder); 1617 - if (err) 1618 - return ERR_PTR(err); 1619 - 1620 - return bdev; 1621 - } 1622 - EXPORT_SYMBOL(blkdev_get_by_dev); 1623 1636 1624 1637 static int blkdev_open(struct inode * inode, struct file * filp) 1625 1638 { ··· 1603 1694 if ((filp->f_flags & O_ACCMODE) == 3) 1604 1695 filp->f_mode |= FMODE_WRITE_IOCTL; 1605 1696 1606 - bdev = bd_acquire(inode); 1607 - if (bdev == NULL) 1608 - return -ENOMEM; 1609 - 1697 + bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp); 1698 + if (IS_ERR(bdev)) 1699 + return PTR_ERR(bdev); 1610 1700 filp->f_mapping = bdev->bd_inode->i_mapping; 1611 1701 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); 1612 - 1613 - return blkdev_get(bdev, filp->f_mode, filp); 1702 + return 0; 1614 1703 } 1615 1704 1616 1705 static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) ··· 1917 2010 * namespace if possible and return it. Return ERR_PTR(error) 1918 2011 * otherwise. 1919 2012 */ 1920 - struct block_device *lookup_bdev(const char *pathname) 2013 + int lookup_bdev(const char *pathname, dev_t *dev) 1921 2014 { 1922 - struct block_device *bdev; 1923 2015 struct inode *inode; 1924 2016 struct path path; 1925 2017 int error; 1926 2018 1927 2019 if (!pathname || !*pathname) 1928 - return ERR_PTR(-EINVAL); 2020 + return -EINVAL; 1929 2021 1930 2022 error = kern_path(pathname, LOOKUP_FOLLOW, &path); 1931 2023 if (error) 1932 - return ERR_PTR(error); 2024 + return error; 1933 2025 1934 2026 inode = d_backing_inode(path.dentry); 1935 2027 error = -ENOTBLK; 1936 2028 if (!S_ISBLK(inode->i_mode)) 1937 - goto fail; 2029 + goto out_path_put; 1938 2030 error = -EACCES; 1939 2031 if (!may_open_dev(&path)) 1940 - goto fail; 1941 - error = -ENOMEM; 1942 - bdev = bd_acquire(inode); 1943 - if (!bdev) 1944 - goto fail; 1945 - out: 2032 + goto out_path_put; 2033 + 2034 + *dev = inode->i_rdev; 2035 + error = 0; 2036 + out_path_put: 1946 2037 path_put(&path); 1947 - return bdev; 1948 - fail: 1949 - bdev = ERR_PTR(error); 1950 - goto out; 2038 + return error; 1951 2039 } 1952 2040 EXPORT_SYMBOL(lookup_bdev); 1953 2041
+6 -7
fs/btrfs/volumes.c
··· 929 929 * make sure it's the same device if the device is mounted 930 930 */ 931 931 if (device->bdev) { 932 - struct block_device *path_bdev; 932 + int error; 933 + dev_t path_dev; 933 934 934 - path_bdev = lookup_bdev(path); 935 - if (IS_ERR(path_bdev)) { 935 + error = lookup_bdev(path, &path_dev); 936 + if (error) { 936 937 mutex_unlock(&fs_devices->device_list_mutex); 937 - return ERR_CAST(path_bdev); 938 + return ERR_PTR(error); 938 939 } 939 940 940 - if (device->bdev != path_bdev) { 941 - bdput(path_bdev); 941 + if (device->bdev->bd_dev != path_dev) { 942 942 mutex_unlock(&fs_devices->device_list_mutex); 943 943 btrfs_warn_in_rcu(device->fs_info, 944 944 "duplicate device %s devid %llu generation %llu scanned by %s (%d)", ··· 947 947 task_pid_nr(current)); 948 948 return ERR_PTR(-EEXIST); 949 949 } 950 - bdput(path_bdev); 951 950 btrfs_info_in_rcu(device->fs_info, 952 951 "devid %llu device path %s changed to %s scanned by %s (%d)", 953 952 devid, rcu_str_deref(device->name),
-3
fs/inode.c
··· 155 155 inode->i_bytes = 0; 156 156 inode->i_generation = 0; 157 157 inode->i_pipe = NULL; 158 - inode->i_bdev = NULL; 159 158 inode->i_cdev = NULL; 160 159 inode->i_link = NULL; 161 160 inode->i_dir_seq = 0; ··· 579 580 truncate_inode_pages_final(&inode->i_data); 580 581 clear_inode(inode); 581 582 } 582 - if (S_ISBLK(inode->i_mode) && inode->i_bdev) 583 - bd_forget(inode); 584 583 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 585 584 cd_forget(inode); 586 585
+1 -6
fs/internal.h
··· 25 25 extern int __sync_blockdev(struct block_device *bdev, int wait); 26 26 void iterate_bdevs(void (*)(struct block_device *, void *), void *); 27 27 void emergency_thaw_bdev(struct super_block *sb); 28 - void bd_forget(struct inode *inode); 29 28 #else 30 29 static inline void bdev_cache_init(void) 31 30 { ··· 41 42 static inline int emergency_thaw_bdev(struct super_block *sb) 42 43 { 43 44 return 0; 44 - } 45 - static inline void bd_forget(struct inode *inode) 46 - { 47 45 } 48 46 #endif /* CONFIG_BLOCK */ 49 47 ··· 110 114 */ 111 115 extern int reconfigure_super(struct fs_context *); 112 116 extern bool trylock_super(struct super_block *sb); 113 - struct super_block *__get_super(struct block_device *bdev, bool excl); 114 - extern struct super_block *user_get_super(dev_t); 117 + struct super_block *user_get_super(dev_t, bool excl); 115 118 void put_super(struct super_block *sb); 116 119 extern bool mount_capable(struct fs_context *); 117 120
+4 -6
fs/io_uring.c
··· 2716 2716 2717 2717 static bool io_bdev_nowait(struct block_device *bdev) 2718 2718 { 2719 - #ifdef CONFIG_BLOCK 2720 2719 return !bdev || blk_queue_nowait(bdev_get_queue(bdev)); 2721 - #else 2722 - return true; 2723 - #endif 2724 2720 } 2725 2721 2726 2722 /* ··· 2729 2733 umode_t mode = file_inode(file)->i_mode; 2730 2734 2731 2735 if (S_ISBLK(mode)) { 2732 - if (io_bdev_nowait(file->f_inode->i_bdev)) 2736 + if (IS_ENABLED(CONFIG_BLOCK) && 2737 + io_bdev_nowait(I_BDEV(file->f_mapping->host))) 2733 2738 return true; 2734 2739 return false; 2735 2740 } 2736 2741 if (S_ISCHR(mode) || S_ISSOCK(mode)) 2737 2742 return true; 2738 2743 if (S_ISREG(mode)) { 2739 - if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) && 2744 + if (IS_ENABLED(CONFIG_BLOCK) && 2745 + io_bdev_nowait(file->f_inode->i_sb->s_bdev) && 2740 2746 file->f_op != &io_uring_fops) 2741 2747 return true; 2742 2748 return false;
+2 -3
fs/pipe.c
··· 1342 1342 } 1343 1343 1344 1344 /* 1345 - * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same 1346 - * location, so checking ->i_pipe is not enough to verify that this is a 1347 - * pipe. 1345 + * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is 1346 + * not enough to verify that this is a pipe. 1348 1347 */ 1349 1348 struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice) 1350 1349 {
+9 -10
fs/quota/quota.c
··· 866 866 static struct super_block *quotactl_block(const char __user *special, int cmd) 867 867 { 868 868 #ifdef CONFIG_BLOCK 869 - struct block_device *bdev; 870 869 struct super_block *sb; 871 870 struct filename *tmp = getname(special); 872 871 bool excl = false, thawed = false; 872 + int error; 873 + dev_t dev; 873 874 874 875 if (IS_ERR(tmp)) 875 876 return ERR_CAST(tmp); 876 - bdev = lookup_bdev(tmp->name); 877 + error = lookup_bdev(tmp->name, &dev); 877 878 putname(tmp); 878 - if (IS_ERR(bdev)) 879 - return ERR_CAST(bdev); 879 + if (error) 880 + return ERR_PTR(error); 880 881 881 882 if (quotactl_cmd_onoff(cmd)) { 882 883 excl = true; ··· 887 886 } 888 887 889 888 retry: 890 - sb = __get_super(bdev, excl); 891 - if (thawed && sb && sb->s_writers.frozen != SB_UNFROZEN) { 889 + sb = user_get_super(dev, excl); 890 + if (!sb) 891 + return ERR_PTR(-ENODEV); 892 + if (thawed && sb->s_writers.frozen != SB_UNFROZEN) { 892 893 if (excl) 893 894 up_write(&sb->s_umount); 894 895 else ··· 900 897 put_super(sb); 901 898 goto retry; 902 899 } 903 - 904 - bdput(bdev); 905 - if (!sb) 906 - return ERR_PTR(-ENODEV); 907 900 return sb; 908 901 909 902 #else
+1 -1
fs/statfs.c
··· 235 235 236 236 static int vfs_ustat(dev_t dev, struct kstatfs *sbuf) 237 237 { 238 - struct super_block *s = user_get_super(dev); 238 + struct super_block *s = user_get_super(dev, false); 239 239 int err; 240 240 if (!s) 241 241 return -EINVAL;
+19 -25
fs/super.c
··· 740 740 741 741 EXPORT_SYMBOL(iterate_supers_type); 742 742 743 - struct super_block *__get_super(struct block_device *bdev, bool excl) 743 + /** 744 + * get_super - get the superblock of a device 745 + * @bdev: device to get the superblock for 746 + * 747 + * Scans the superblock list and finds the superblock of the file system 748 + * mounted on the device given. %NULL is returned if no match is found. 749 + */ 750 + struct super_block *get_super(struct block_device *bdev) 744 751 { 745 752 struct super_block *sb; 746 753 ··· 762 755 if (sb->s_bdev == bdev) { 763 756 sb->s_count++; 764 757 spin_unlock(&sb_lock); 765 - if (!excl) 766 - down_read(&sb->s_umount); 767 - else 768 - down_write(&sb->s_umount); 758 + down_read(&sb->s_umount); 769 759 /* still alive? */ 770 760 if (sb->s_root && (sb->s_flags & SB_BORN)) 771 761 return sb; 772 - if (!excl) 773 - up_read(&sb->s_umount); 774 - else 775 - up_write(&sb->s_umount); 762 + up_read(&sb->s_umount); 776 763 /* nope, got unmounted */ 777 764 spin_lock(&sb_lock); 778 765 __put_super(sb); ··· 776 775 spin_unlock(&sb_lock); 777 776 return NULL; 778 777 } 779 - 780 - /** 781 - * get_super - get the superblock of a device 782 - * @bdev: device to get the superblock for 783 - * 784 - * Scans the superblock list and finds the superblock of the file system 785 - * mounted on the device given. %NULL is returned if no match is found. 786 - */ 787 - struct super_block *get_super(struct block_device *bdev) 788 - { 789 - return __get_super(bdev, false); 790 - } 791 - EXPORT_SYMBOL(get_super); 792 778 793 779 /** 794 780 * get_active_super - get an active reference to the superblock of a device ··· 808 820 return NULL; 809 821 } 810 822 811 - struct super_block *user_get_super(dev_t dev) 823 + struct super_block *user_get_super(dev_t dev, bool excl) 812 824 { 813 825 struct super_block *sb; 814 826 ··· 820 832 if (sb->s_dev == dev) { 821 833 sb->s_count++; 822 834 spin_unlock(&sb_lock); 823 - down_read(&sb->s_umount); 835 + if (excl) 836 + down_write(&sb->s_umount); 837 + else 838 + down_read(&sb->s_umount); 824 839 /* still alive? */ 825 840 if (sb->s_root && (sb->s_flags & SB_BORN)) 826 841 return sb; 827 - up_read(&sb->s_umount); 842 + if (excl) 843 + up_write(&sb->s_umount); 844 + else 845 + up_read(&sb->s_umount); 828 846 /* nope, got unmounted */ 829 847 spin_lock(&sb_lock); 830 848 __put_super(sb);
+1 -1
include/linux/blkdev.h
··· 1973 1973 int set_blocksize(struct block_device *bdev, int size); 1974 1974 1975 1975 const char *bdevname(struct block_device *bdev, char *buffer); 1976 - struct block_device *lookup_bdev(const char *); 1976 + int lookup_bdev(const char *pathname, dev_t *dev); 1977 1977 1978 1978 void blkdev_show(struct seq_file *seqf, off_t offset); 1979 1979
-1
include/linux/fs.h
··· 696 696 struct list_head i_devices; 697 697 union { 698 698 struct pipe_inode_info *i_pipe; 699 - struct block_device *i_bdev; 700 699 struct cdev *i_cdev; 701 700 char *i_link; 702 701 unsigned i_dir_seq;