Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'pull-bd_inode-1' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull bdev bd_inode updates from Al Viro:
"Replacement of bdev->bd_inode with sane(r) set of primitives by me and
Yu Kuai"

* tag 'pull-bd_inode-1' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
RIP ->bd_inode
dasd_format(): killing the last remaining user of ->bd_inode
nilfs_attach_log_writer(): use ->bd_mapping->host instead of ->bd_inode
block/bdev.c: use the knowledge of inode/bdev coallocation
gfs2: more obvious initializations of mapping->host
fs/buffer.c: massage the remaining users of ->bd_inode to ->bd_mapping
blk_ioctl_{discard,zeroout}(): we only want ->bd_inode->i_mapping here...
grow_dev_folio(): we only want ->bd_inode->i_mapping there
use ->bd_mapping instead of ->bd_inode->i_mapping
block_device: add a pointer to struct address_space (page cache of bdev)
missing helpers: bdev_unhash(), bdev_drop()
block: move two helpers into bdev.c
block2mtd: prevent direct access of bd_inode
dm-vdo: use bdev_nr_bytes(bdev) instead of i_size_read(bdev->bd_inode)
blkdev_write_iter(): saner way to get inode and bdev
bcachefs: remove dead function bdev_sectors()
ext4: remove block_device_ejected()
erofs_buf: store address_space instead of inode
erofs: switch erofs_bread() to passing offset instead of block number

+145 -157
+47 -19
block/bdev.c
··· 43 43 return container_of(inode, struct bdev_inode, vfs_inode); 44 44 } 45 45 46 + static inline struct inode *BD_INODE(struct block_device *bdev) 47 + { 48 + return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode; 49 + } 50 + 46 51 struct block_device *I_BDEV(struct inode *inode) 47 52 { 48 53 return &BDEV_I(inode)->bdev; ··· 62 57 63 58 static void bdev_write_inode(struct block_device *bdev) 64 59 { 65 - struct inode *inode = bdev->bd_inode; 60 + struct inode *inode = BD_INODE(bdev); 66 61 int ret; 67 62 68 63 spin_lock(&inode->i_lock); ··· 81 76 /* Kill _all_ buffers and pagecache , dirty or not.. */ 82 77 static void kill_bdev(struct block_device *bdev) 83 78 { 84 - struct address_space *mapping = bdev->bd_inode->i_mapping; 79 + struct address_space *mapping = bdev->bd_mapping; 85 80 86 81 if (mapping_empty(mapping)) 87 82 return; ··· 93 88 /* Invalidate clean unused buffers and pagecache. */ 94 89 void invalidate_bdev(struct block_device *bdev) 95 90 { 96 - struct address_space *mapping = bdev->bd_inode->i_mapping; 91 + struct address_space *mapping = bdev->bd_mapping; 97 92 98 93 if (mapping->nrpages) { 99 94 invalidate_bh_lrus(); ··· 121 116 goto invalidate; 122 117 } 123 118 124 - truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); 119 + truncate_inode_pages_range(bdev->bd_mapping, lstart, lend); 125 120 if (!(mode & BLK_OPEN_EXCL)) 126 121 bd_abort_claiming(bdev, truncate_bdev_range); 127 122 return 0; ··· 131 126 * Someone else has handle exclusively open. Try invalidating instead. 132 127 * The 'end' argument is inclusive so the rounding is safe. 133 128 */ 134 - return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, 129 + return invalidate_inode_pages2_range(bdev->bd_mapping, 135 130 lstart >> PAGE_SHIFT, 136 131 lend >> PAGE_SHIFT); 137 132 } ··· 139 134 static void set_init_blocksize(struct block_device *bdev) 140 135 { 141 136 unsigned int bsize = bdev_logical_block_size(bdev); 142 - loff_t size = i_size_read(bdev->bd_inode); 137 + loff_t size = i_size_read(BD_INODE(bdev)); 143 138 144 139 while (bsize < PAGE_SIZE) { 145 140 if (size & bsize) 146 141 break; 147 142 bsize <<= 1; 148 143 } 149 - bdev->bd_inode->i_blkbits = blksize_bits(bsize); 144 + BD_INODE(bdev)->i_blkbits = blksize_bits(bsize); 150 145 } 151 146 152 147 int set_blocksize(struct file *file, int size) ··· 203 198 { 204 199 if (!bdev) 205 200 return 0; 206 - return filemap_flush(bdev->bd_inode->i_mapping); 201 + return filemap_flush(bdev->bd_mapping); 207 202 } 208 203 EXPORT_SYMBOL_GPL(sync_blockdev_nowait); 209 204 ··· 215 210 { 216 211 if (!bdev) 217 212 return 0; 218 - return filemap_write_and_wait(bdev->bd_inode->i_mapping); 213 + return filemap_write_and_wait(bdev->bd_mapping); 219 214 } 220 215 EXPORT_SYMBOL(sync_blockdev); 221 216 222 217 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) 223 218 { 224 - return filemap_write_and_wait_range(bdev->bd_inode->i_mapping, 219 + return filemap_write_and_wait_range(bdev->bd_mapping, 225 220 lstart, lend); 226 221 } 227 222 EXPORT_SYMBOL(sync_blockdev_range); ··· 423 418 spin_lock_init(&bdev->bd_size_lock); 424 419 mutex_init(&bdev->bd_holder_lock); 425 420 bdev->bd_partno = partno; 426 - bdev->bd_inode = inode; 421 + bdev->bd_mapping = &inode->i_data; 427 422 bdev->bd_queue = disk->queue; 428 423 if (partno) 429 424 bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio; ··· 441 436 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) 442 437 { 443 438 spin_lock(&bdev->bd_size_lock); 444 - i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); 439 + i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT); 445 440 bdev->bd_nr_sectors = sectors; 446 441 spin_unlock(&bdev->bd_size_lock); 447 442 } 448 443 449 444 void bdev_add(struct block_device *bdev, dev_t dev) 450 445 { 446 + struct inode *inode = BD_INODE(bdev); 451 447 if (bdev_stable_writes(bdev)) 452 - mapping_set_stable_writes(bdev->bd_inode->i_mapping); 448 + mapping_set_stable_writes(bdev->bd_mapping); 453 449 bdev->bd_dev = dev; 454 - bdev->bd_inode->i_rdev = dev; 455 - bdev->bd_inode->i_ino = dev; 456 - insert_inode_hash(bdev->bd_inode); 450 + inode->i_rdev = dev; 451 + inode->i_ino = dev; 452 + insert_inode_hash(inode); 453 + } 454 + 455 + void bdev_unhash(struct block_device *bdev) 456 + { 457 + remove_inode_hash(BD_INODE(bdev)); 458 + } 459 + 460 + void bdev_drop(struct block_device *bdev) 461 + { 462 + iput(BD_INODE(bdev)); 457 463 } 458 464 459 465 long nr_blockdev_pages(void) ··· 939 923 bdev_file->f_mode |= FMODE_NOWAIT; 940 924 if (mode & BLK_OPEN_RESTRICT_WRITES) 941 925 bdev_file->f_mode |= FMODE_WRITE_RESTRICTED; 942 - bdev_file->f_mapping = bdev->bd_inode->i_mapping; 926 + bdev_file->f_mapping = bdev->bd_mapping; 943 927 bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping); 944 928 bdev_file->private_data = holder; 945 929 ··· 1001 985 return ERR_PTR(-ENXIO); 1002 986 1003 987 flags = blk_to_file_flags(mode); 1004 - bdev_file = alloc_file_pseudo_noaccount(bdev->bd_inode, 988 + bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev), 1005 989 blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops); 1006 990 if (IS_ERR(bdev_file)) { 1007 991 blkdev_put_no_open(bdev); 1008 992 return bdev_file; 1009 993 } 1010 - ihold(bdev->bd_inode); 994 + ihold(BD_INODE(bdev)); 1011 995 1012 996 ret = bdev_open(bdev, mode, holder, hops, bdev_file); 1013 997 if (ret) { ··· 1281 1265 1282 1266 blkdev_put_no_open(bdev); 1283 1267 } 1268 + 1269 + bool disk_live(struct gendisk *disk) 1270 + { 1271 + return !inode_unhashed(BD_INODE(disk->part0)); 1272 + } 1273 + EXPORT_SYMBOL_GPL(disk_live); 1274 + 1275 + unsigned int block_size(struct block_device *bdev) 1276 + { 1277 + return 1 << BD_INODE(bdev)->i_blkbits; 1278 + } 1279 + EXPORT_SYMBOL_GPL(block_size); 1284 1280 1285 1281 static int __init setup_bdev_allow_write_mounted(char *str) 1286 1282 {
+2 -2
block/blk-zoned.c
··· 416 416 op = REQ_OP_ZONE_RESET; 417 417 418 418 /* Invalidate the page cache, including dirty pages. */ 419 - filemap_invalidate_lock(bdev->bd_inode->i_mapping); 419 + filemap_invalidate_lock(bdev->bd_mapping); 420 420 ret = blkdev_truncate_zone_range(bdev, mode, &zrange); 421 421 if (ret) 422 422 goto fail; ··· 438 438 439 439 fail: 440 440 if (cmd == BLKRESETZONE) 441 - filemap_invalidate_unlock(bdev->bd_inode->i_mapping); 441 + filemap_invalidate_unlock(bdev->bd_mapping); 442 442 443 443 return ret; 444 444 }
+2
block/blk.h
··· 499 499 500 500 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); 501 501 void bdev_add(struct block_device *bdev, dev_t dev); 502 + void bdev_unhash(struct block_device *bdev); 503 + void bdev_drop(struct block_device *bdev); 502 504 503 505 int blk_alloc_ext_minor(void); 504 506 void blk_free_ext_minor(unsigned int minor);
+2 -2
block/fops.c
··· 663 663 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) 664 664 { 665 665 struct file *file = iocb->ki_filp; 666 - struct block_device *bdev = I_BDEV(file->f_mapping->host); 667 - struct inode *bd_inode = bdev->bd_inode; 666 + struct inode *bd_inode = bdev_file_inode(file); 667 + struct block_device *bdev = I_BDEV(bd_inode); 668 668 loff_t size = bdev_nr_bytes(bdev); 669 669 size_t shorted = 0; 670 670 ssize_t ret;
+4 -4
block/genhd.c
··· 653 653 */ 654 654 mutex_lock(&disk->open_mutex); 655 655 xa_for_each(&disk->part_tbl, idx, part) 656 - remove_inode_hash(part->bd_inode); 656 + bdev_unhash(part); 657 657 mutex_unlock(&disk->open_mutex); 658 658 659 659 /* ··· 742 742 struct block_device *bdev = disk->part0; 743 743 744 744 invalidate_bdev(bdev); 745 - bdev->bd_inode->i_mapping->wb_err = 0; 745 + bdev->bd_mapping->wb_err = 0; 746 746 set_capacity(disk, 0); 747 747 } 748 748 EXPORT_SYMBOL(invalidate_disk); ··· 1191 1191 if (test_bit(GD_ADDED, &disk->state) && disk->fops->free_disk) 1192 1192 disk->fops->free_disk(disk); 1193 1193 1194 - iput(disk->part0->bd_inode); /* frees the disk */ 1194 + bdev_drop(disk->part0); /* frees the disk */ 1195 1195 } 1196 1196 1197 1197 static int block_uevent(const struct device *dev, struct kobj_uevent_env *env) ··· 1379 1379 out_destroy_part_tbl: 1380 1380 xa_destroy(&disk->part_tbl); 1381 1381 disk->part0->bd_disk = NULL; 1382 - iput(disk->part0->bd_inode); 1382 + bdev_drop(disk->part0); 1383 1383 out_free_bdi: 1384 1384 bdi_put(disk->bdi); 1385 1385 out_free_bioset:
+6 -8
block/ioctl.c
··· 96 96 unsigned long arg) 97 97 { 98 98 unsigned int bs_mask = bdev_logical_block_size(bdev) - 1; 99 - struct inode *inode = bdev->bd_inode; 100 99 uint64_t range[2], start, len, end; 101 100 struct bio *prev = NULL, *bio; 102 101 sector_t sector, nr_sects; ··· 125 126 end > bdev_nr_bytes(bdev)) 126 127 return -EINVAL; 127 128 128 - filemap_invalidate_lock(inode->i_mapping); 129 + filemap_invalidate_lock(bdev->bd_mapping); 129 130 err = truncate_bdev_range(bdev, mode, start, start + len - 1); 130 131 if (err) 131 132 goto fail; ··· 156 157 out_unplug: 157 158 blk_finish_plug(&plug); 158 159 fail: 159 - filemap_invalidate_unlock(inode->i_mapping); 160 + filemap_invalidate_unlock(bdev->bd_mapping); 160 161 return err; 161 162 } 162 163 ··· 181 182 if (start + len > bdev_nr_bytes(bdev)) 182 183 return -EINVAL; 183 184 184 - filemap_invalidate_lock(bdev->bd_inode->i_mapping); 185 + filemap_invalidate_lock(bdev->bd_mapping); 185 186 err = truncate_bdev_range(bdev, mode, start, start + len - 1); 186 187 if (!err) 187 188 err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9, 188 189 GFP_KERNEL); 189 - filemap_invalidate_unlock(bdev->bd_inode->i_mapping); 190 + filemap_invalidate_unlock(bdev->bd_mapping); 190 191 return err; 191 192 } 192 193 ··· 196 197 { 197 198 uint64_t range[2]; 198 199 uint64_t start, end, len; 199 - struct inode *inode = bdev->bd_inode; 200 200 int err; 201 201 202 202 if (!(mode & BLK_OPEN_WRITE)) ··· 218 220 return -EINVAL; 219 221 220 222 /* Invalidate the page cache, including dirty pages */ 221 - filemap_invalidate_lock(inode->i_mapping); 223 + filemap_invalidate_lock(bdev->bd_mapping); 222 224 err = truncate_bdev_range(bdev, mode, start, end); 223 225 if (err) 224 226 goto fail; ··· 227 229 BLKDEV_ZERO_NOUNMAP); 228 230 229 231 fail: 230 - filemap_invalidate_unlock(inode->i_mapping); 232 + filemap_invalidate_unlock(bdev->bd_mapping); 231 233 return err; 232 234 } 233 235
+4 -4
block/partitions/core.c
··· 243 243 static void part_release(struct device *dev) 244 244 { 245 245 put_disk(dev_to_bdev(dev)->bd_disk); 246 - iput(dev_to_bdev(dev)->bd_inode); 246 + bdev_drop(dev_to_bdev(dev)); 247 247 } 248 248 249 249 static int part_uevent(const struct device *dev, struct kobj_uevent_env *env) ··· 469 469 * Just delete the partition and invalidate it. 470 470 */ 471 471 472 - remove_inode_hash(part->bd_inode); 472 + bdev_unhash(part); 473 473 invalidate_bdev(part); 474 474 drop_partition(part); 475 475 ret = 0; ··· 652 652 * it cannot be looked up any more even when openers 653 653 * still hold references. 654 654 */ 655 - remove_inode_hash(part->bd_inode); 655 + bdev_unhash(part); 656 656 657 657 /* 658 658 * If @disk->open_partitions isn't elevated but there's ··· 701 701 702 702 void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p) 703 703 { 704 - struct address_space *mapping = state->disk->part0->bd_inode->i_mapping; 704 + struct address_space *mapping = state->disk->part0->bd_mapping; 705 705 struct folio *folio; 706 706 707 707 if (n >= get_capacity(state->disk)) {
+1 -1
drivers/md/bcache/super.c
··· 171 171 struct page *page; 172 172 unsigned int i; 173 173 174 - page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 174 + page = read_cache_page_gfp(bdev->bd_mapping, 175 175 SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); 176 176 if (IS_ERR(page)) 177 177 return "IO error";
+2 -2
drivers/md/dm-vdo/dm-vdo-target.c
··· 878 878 } 879 879 880 880 if (config->version == 0) { 881 - u64 device_size = i_size_read(config->owned_device->bdev->bd_inode); 881 + u64 device_size = bdev_nr_bytes(config->owned_device->bdev); 882 882 883 883 config->physical_blocks = device_size / VDO_BLOCK_SIZE; 884 884 } ··· 1011 1011 1012 1012 static block_count_t __must_check get_underlying_device_block_count(const struct vdo *vdo) 1013 1013 { 1014 - return i_size_read(vdo_get_backing_device(vdo)->bd_inode) / VDO_BLOCK_SIZE; 1014 + return bdev_nr_bytes(vdo_get_backing_device(vdo)) / VDO_BLOCK_SIZE; 1015 1015 } 1016 1016 1017 1017 static int __must_check process_vdo_message_locked(struct vdo *vdo, unsigned int argc,
+1 -1
drivers/md/dm-vdo/indexer/io-factory.c
··· 90 90 91 91 size_t uds_get_writable_size(struct io_factory *factory) 92 92 { 93 - return i_size_read(factory->bdev->bd_inode); 93 + return bdev_nr_bytes(factory->bdev); 94 94 } 95 95 96 96 /* Create a struct dm_bufio_client for an index region starting at offset. */
+4 -2
drivers/mtd/devices/block2mtd.c
··· 265 265 struct file *bdev_file; 266 266 struct block_device *bdev; 267 267 struct block2mtd_dev *dev; 268 + loff_t size; 268 269 char *name; 269 270 270 271 if (!devname) ··· 292 291 goto err_free_block2mtd; 293 292 } 294 293 295 - if ((long)bdev->bd_inode->i_size % erase_size) { 294 + size = bdev_nr_bytes(bdev); 295 + if ((long)size % erase_size) { 296 296 pr_err("erasesize must be a divisor of device size\n"); 297 297 goto err_free_block2mtd; 298 298 } ··· 311 309 312 310 dev->mtd.name = name; 313 311 314 - dev->mtd.size = bdev->bd_inode->i_size & PAGE_MASK; 312 + dev->mtd.size = size & PAGE_MASK; 315 313 dev->mtd.erasesize = erase_size; 316 314 dev->mtd.writesize = 1; 317 315 dev->mtd.writebufsize = PAGE_SIZE;
+1 -1
drivers/s390/block/dasd_ioctl.c
··· 215 215 * enabling the device later. 216 216 */ 217 217 if (fdata->start_unit == 0) { 218 - block->gdp->part0->bd_inode->i_blkbits = 218 + block->gdp->part0->bd_mapping->host->i_blkbits = 219 219 blksize_bits(fdata->blksize); 220 220 } 221 221
+1 -1
drivers/scsi/scsicam.c
··· 32 32 */ 33 33 unsigned char *scsi_bios_ptable(struct block_device *dev) 34 34 { 35 - struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping; 35 + struct address_space *mapping = bdev_whole(dev)->bd_mapping; 36 36 unsigned char *res = NULL; 37 37 struct folio *folio; 38 38
-5
fs/bcachefs/util.h
··· 445 445 void bch2_bio_map(struct bio *bio, void *base, size_t); 446 446 int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t); 447 447 448 - static inline sector_t bdev_sectors(struct block_device *bdev) 449 - { 450 - return bdev->bd_inode->i_size >> 9; 451 - } 452 - 453 448 #define closure_bio_submit(bio, cl) \ 454 449 do { \ 455 450 closure_get(cl); \
+3 -3
fs/btrfs/disk-io.c
··· 3656 3656 struct btrfs_super_block *super; 3657 3657 struct page *page; 3658 3658 u64 bytenr, bytenr_orig; 3659 - struct address_space *mapping = bdev->bd_inode->i_mapping; 3659 + struct address_space *mapping = bdev->bd_mapping; 3660 3660 int ret; 3661 3661 3662 3662 bytenr_orig = btrfs_sb_offset(copy_num); ··· 3743 3743 struct btrfs_super_block *sb, int max_mirrors) 3744 3744 { 3745 3745 struct btrfs_fs_info *fs_info = device->fs_info; 3746 - struct address_space *mapping = device->bdev->bd_inode->i_mapping; 3746 + struct address_space *mapping = device->bdev->bd_mapping; 3747 3747 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3748 3748 int i; 3749 3749 int ret; ··· 3861 3861 device->commit_total_bytes) 3862 3862 break; 3863 3863 3864 - folio = filemap_get_folio(device->bdev->bd_inode->i_mapping, 3864 + folio = filemap_get_folio(device->bdev->bd_mapping, 3865 3865 bytenr >> PAGE_SHIFT); 3866 3866 /* If the folio has been removed, then we know it completed. */ 3867 3867 if (IS_ERR(folio))
+1 -1
fs/btrfs/volumes.c
··· 1290 1290 return ERR_PTR(-EINVAL); 1291 1291 1292 1292 /* pull in the page with our super */ 1293 - page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1293 + page = read_cache_page_gfp(bdev->bd_mapping, index, GFP_KERNEL); 1294 1294 1295 1295 if (IS_ERR(page)) 1296 1296 return ERR_CAST(page);
+1 -1
fs/btrfs/zoned.c
··· 118 118 return -ENOENT; 119 119 } else if (full[0] && full[1]) { 120 120 /* Compare two super blocks */ 121 - struct address_space *mapping = bdev->bd_inode->i_mapping; 121 + struct address_space *mapping = bdev->bd_mapping; 122 122 struct page *page[BTRFS_NR_SB_LOG_ZONES]; 123 123 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES]; 124 124 int i;
+13 -13
fs/buffer.c
··· 189 189 static struct buffer_head * 190 190 __find_get_block_slow(struct block_device *bdev, sector_t block) 191 191 { 192 - struct inode *bd_inode = bdev->bd_inode; 193 - struct address_space *bd_mapping = bd_inode->i_mapping; 192 + struct address_space *bd_mapping = bdev->bd_mapping; 193 + const int blkbits = bd_mapping->host->i_blkbits; 194 194 struct buffer_head *ret = NULL; 195 195 pgoff_t index; 196 196 struct buffer_head *bh; ··· 199 199 int all_mapped = 1; 200 200 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 201 201 202 - index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE; 202 + index = ((loff_t)block << blkbits) / PAGE_SIZE; 203 203 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0); 204 204 if (IS_ERR(folio)) 205 205 goto out; ··· 233 233 (unsigned long long)block, 234 234 (unsigned long long)bh->b_blocknr, 235 235 bh->b_state, bh->b_size, bdev, 236 - 1 << bd_inode->i_blkbits); 236 + 1 << blkbits); 237 237 } 238 238 out_unlock: 239 239 spin_unlock(&bd_mapping->i_private_lock); ··· 1041 1041 static bool grow_dev_folio(struct block_device *bdev, sector_t block, 1042 1042 pgoff_t index, unsigned size, gfp_t gfp) 1043 1043 { 1044 - struct inode *inode = bdev->bd_inode; 1044 + struct address_space *mapping = bdev->bd_mapping; 1045 1045 struct folio *folio; 1046 1046 struct buffer_head *bh; 1047 1047 sector_t end_block = 0; 1048 1048 1049 - folio = __filemap_get_folio(inode->i_mapping, index, 1049 + folio = __filemap_get_folio(mapping, index, 1050 1050 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1051 1051 if (IS_ERR(folio)) 1052 1052 return false; ··· 1080 1080 * lock to be atomic wrt __find_get_block(), which does not 1081 1081 * run under the folio lock. 1082 1082 */ 1083 - spin_lock(&inode->i_mapping->i_private_lock); 1083 + spin_lock(&mapping->i_private_lock); 1084 1084 link_dev_buffers(folio, bh); 1085 1085 end_block = folio_init_buffers(folio, bdev, size); 1086 - spin_unlock(&inode->i_mapping->i_private_lock); 1086 + spin_unlock(&mapping->i_private_lock); 1087 1087 unlock: 1088 1088 folio_unlock(folio); 1089 1089 folio_put(folio); ··· 1486 1486 { 1487 1487 struct buffer_head *bh; 1488 1488 1489 - gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); 1489 + gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 1490 1490 1491 1491 /* 1492 1492 * Prefer looping in the allocator rather than here, at least that ··· 1719 1719 */ 1720 1720 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 1721 1721 { 1722 - struct inode *bd_inode = bdev->bd_inode; 1723 - struct address_space *bd_mapping = bd_inode->i_mapping; 1722 + struct address_space *bd_mapping = bdev->bd_mapping; 1723 + const int blkbits = bd_mapping->host->i_blkbits; 1724 1724 struct folio_batch fbatch; 1725 - pgoff_t index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE; 1725 + pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE; 1726 1726 pgoff_t end; 1727 1727 int i, count; 1728 1728 struct buffer_head *bh; 1729 1729 struct buffer_head *head; 1730 1730 1731 - end = ((loff_t)(block + len - 1) << bd_inode->i_blkbits) / PAGE_SIZE; 1731 + end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE; 1732 1732 folio_batch_init(&fbatch); 1733 1733 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { 1734 1734 count = folio_batch_count(&fbatch);
+1 -1
fs/cramfs/inode.c
··· 183 183 static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset, 184 184 unsigned int len) 185 185 { 186 - struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; 186 + struct address_space *mapping = sb->s_bdev->bd_mapping; 187 187 struct file_ra_state ra = {}; 188 188 struct page *pages[BLKS_PER_BUF]; 189 189 unsigned i, blocknr, buffer;
+5 -7
fs/erofs/data.c
··· 29 29 * Derive the block size from inode->i_blkbits to make compatible with 30 30 * anonymous inode in fscache mode. 31 31 */ 32 - void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr, 32 + void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, 33 33 enum erofs_kmap_type type) 34 34 { 35 - struct inode *inode = buf->inode; 36 - erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits; 37 35 pgoff_t index = offset >> PAGE_SHIFT; 38 36 struct page *page = buf->page; 39 37 struct folio *folio; ··· 41 43 erofs_put_metabuf(buf); 42 44 43 45 nofs_flag = memalloc_nofs_save(); 44 - folio = read_cache_folio(inode->i_mapping, index, NULL, NULL); 46 + folio = read_cache_folio(buf->mapping, index, NULL, NULL); 45 47 memalloc_nofs_restore(nofs_flag); 46 48 if (IS_ERR(folio)) 47 49 return folio; ··· 66 68 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) 67 69 { 68 70 if (erofs_is_fscache_mode(sb)) 69 - buf->inode = EROFS_SB(sb)->s_fscache->inode; 71 + buf->mapping = EROFS_SB(sb)->s_fscache->inode->i_mapping; 70 72 else 71 - buf->inode = sb->s_bdev->bd_inode; 73 + buf->mapping = sb->s_bdev->bd_mapping; 72 74 } 73 75 74 76 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, 75 77 erofs_blk_t blkaddr, enum erofs_kmap_type type) 76 78 { 77 79 erofs_init_metabuf(buf, sb); 78 - return erofs_bread(buf, blkaddr, type); 80 + return erofs_bread(buf, erofs_pos(sb, blkaddr), type); 79 81 } 80 82 81 83 static int erofs_map_blocks_flatmode(struct inode *inode,
+2 -2
fs/erofs/dir.c
··· 58 58 int err = 0; 59 59 bool initial = true; 60 60 61 - buf.inode = dir; 61 + buf.mapping = dir->i_mapping; 62 62 while (ctx->pos < dirsize) { 63 63 struct erofs_dirent *de; 64 64 unsigned int nameoff, maxsize; 65 65 66 - de = erofs_bread(&buf, i, EROFS_KMAP); 66 + de = erofs_bread(&buf, erofs_pos(sb, i), EROFS_KMAP); 67 67 if (IS_ERR(de)) { 68 68 erofs_err(sb, "fail to readdir of logical block %u of nid %llu", 69 69 i, EROFS_I(dir)->nid);
+2 -2
fs/erofs/internal.h
··· 216 216 }; 217 217 218 218 struct erofs_buf { 219 - struct inode *inode; 219 + struct address_space *mapping; 220 220 struct page *page; 221 221 void *base; 222 222 enum erofs_kmap_type kmap_type; ··· 402 402 erofs_off_t *offset, int *lengthp); 403 403 void erofs_unmap_metabuf(struct erofs_buf *buf); 404 404 void erofs_put_metabuf(struct erofs_buf *buf); 405 - void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr, 405 + void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, 406 406 enum erofs_kmap_type type); 407 407 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb); 408 408 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
+3 -3
fs/erofs/namei.c
··· 99 99 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 100 100 struct erofs_dirent *de; 101 101 102 - buf.inode = dir; 103 - de = erofs_bread(&buf, mid, EROFS_KMAP); 102 + buf.mapping = dir->i_mapping; 103 + de = erofs_bread(&buf, erofs_pos(dir->i_sb, mid), EROFS_KMAP); 104 104 if (!IS_ERR(de)) { 105 105 const int nameoff = nameoff_from_disk(de->nameoff, bsz); 106 106 const int ndirents = nameoff / sizeof(*de); ··· 171 171 172 172 qn.name = name->name; 173 173 qn.end = name->name + name->len; 174 - buf.inode = dir; 174 + buf.mapping = dir->i_mapping; 175 175 176 176 ndirents = 0; 177 177 de = erofs_find_target_block(&buf, dir, &qn, &ndirents);
+4 -4
fs/erofs/super.c
··· 132 132 int len, i, cnt; 133 133 134 134 *offset = round_up(*offset, 4); 135 - ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); 135 + ptr = erofs_bread(buf, *offset, EROFS_KMAP); 136 136 if (IS_ERR(ptr)) 137 137 return ptr; 138 138 139 - len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]); 139 + len = le16_to_cpu(*(__le16 *)ptr); 140 140 if (!len) 141 141 len = U16_MAX + 1; 142 142 buffer = kmalloc(len, GFP_KERNEL); ··· 148 148 for (i = 0; i < len; i += cnt) { 149 149 cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset), 150 150 len - i); 151 - ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); 151 + ptr = erofs_bread(buf, *offset, EROFS_KMAP); 152 152 if (IS_ERR(ptr)) { 153 153 kfree(buffer); 154 154 return ptr; 155 155 } 156 - memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt); 156 + memcpy(buffer + i, ptr, cnt); 157 157 *offset += cnt; 158 158 } 159 159 return buffer;
+14 -23
fs/erofs/xattr.c
··· 81 81 it.pos = erofs_iloc(inode) + vi->inode_isize; 82 82 83 83 /* read in shared xattr array (non-atomic, see kmalloc below) */ 84 - it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), EROFS_KMAP); 84 + it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP); 85 85 if (IS_ERR(it.kaddr)) { 86 86 ret = PTR_ERR(it.kaddr); 87 87 goto out_unlock; 88 88 } 89 89 90 - ih = it.kaddr + erofs_blkoff(sb, it.pos); 90 + ih = it.kaddr; 91 91 vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter); 92 92 vi->xattr_shared_count = ih->h_shared_count; 93 93 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, ··· 102 102 it.pos += sizeof(struct erofs_xattr_ibody_header); 103 103 104 104 for (i = 0; i < vi->xattr_shared_count; ++i) { 105 - it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), 106 - EROFS_KMAP); 105 + it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP); 107 106 if (IS_ERR(it.kaddr)) { 108 107 kfree(vi->xattr_shared_xattrs); 109 108 vi->xattr_shared_xattrs = NULL; 110 109 ret = PTR_ERR(it.kaddr); 111 110 goto out_unlock; 112 111 } 113 - vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *) 114 - (it.kaddr + erofs_blkoff(sb, it.pos))); 112 + vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)it.kaddr); 115 113 it.pos += sizeof(__le32); 116 114 } 117 115 erofs_put_metabuf(&it.buf); ··· 183 185 void *src; 184 186 185 187 for (processed = 0; processed < len; processed += slice) { 186 - it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos), 187 - EROFS_KMAP); 188 + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); 188 189 if (IS_ERR(it->kaddr)) 189 190 return PTR_ERR(it->kaddr); 190 191 191 - src = it->kaddr + erofs_blkoff(sb, it->pos); 192 + src = it->kaddr; 192 193 slice = min_t(unsigned int, sb->s_blocksize - 193 194 erofs_blkoff(sb, it->pos), len - processed); 194 195 memcpy(it->buffer + it->buffer_ofs, src, slice); ··· 205 208 int err; 206 209 207 210 /* 1. handle xattr entry */ 208 - entry = *(struct erofs_xattr_entry *) 209 - (it->kaddr + erofs_blkoff(it->sb, it->pos)); 211 + entry = *(struct erofs_xattr_entry *)it->kaddr; 210 212 it->pos += sizeof(struct erofs_xattr_entry); 211 213 212 214 base_index = entry.e_name_index; ··· 255 259 unsigned int slice, processed, value_sz; 256 260 257 261 /* 1. handle xattr entry */ 258 - entry = *(struct erofs_xattr_entry *) 259 - (it->kaddr + erofs_blkoff(sb, it->pos)); 262 + entry = *(struct erofs_xattr_entry *)it->kaddr; 260 263 it->pos += sizeof(struct erofs_xattr_entry); 261 264 value_sz = le16_to_cpu(entry.e_value_size); 262 265 ··· 286 291 287 292 /* 2. handle xattr name */ 288 293 for (processed = 0; processed < entry.e_name_len; processed += slice) { 289 - it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos), 290 - EROFS_KMAP); 294 + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); 291 295 if (IS_ERR(it->kaddr)) 292 296 return PTR_ERR(it->kaddr); 293 297 ··· 294 300 sb->s_blocksize - erofs_blkoff(sb, it->pos), 295 301 entry.e_name_len - processed); 296 302 if (memcmp(it->name.name + it->infix_len + processed, 297 - it->kaddr + erofs_blkoff(sb, it->pos), slice)) 303 + it->kaddr, slice)) 298 304 return -ENOATTR; 299 305 it->pos += slice; 300 306 } ··· 330 336 it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz; 331 337 332 338 while (remaining) { 333 - it->kaddr = erofs_bread(&it->buf, erofs_blknr(it->sb, it->pos), 334 - EROFS_KMAP); 339 + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); 335 340 if (IS_ERR(it->kaddr)) 336 341 return PTR_ERR(it->kaddr); 337 342 338 - entry_sz = erofs_xattr_entry_size(it->kaddr + 339 - erofs_blkoff(it->sb, it->pos)); 343 + entry_sz = erofs_xattr_entry_size(it->kaddr); 340 344 /* xattr on-disk corruption: xattr entry beyond xattr_isize */ 341 345 if (remaining < entry_sz) { 342 346 DBG_BUGON(1); ··· 367 375 for (i = 0; i < vi->xattr_shared_count; ++i) { 368 376 it->pos = erofs_pos(sb, sbi->xattr_blkaddr) + 369 377 vi->xattr_shared_xattrs[i] * sizeof(__le32); 370 - it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos), 371 - EROFS_KMAP); 378 + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); 372 379 if (IS_ERR(it->kaddr)) 373 380 return PTR_ERR(it->kaddr); 374 381 ··· 483 492 return -ENOMEM; 484 493 485 494 if (sbi->packed_inode) 486 - buf.inode = sbi->packed_inode; 495 + buf.mapping = sbi->packed_inode->i_mapping; 487 496 else 488 497 erofs_init_metabuf(&buf, sb); 489 498
+3 -3
fs/erofs/zdata.c
··· 936 936 if (!packed_inode) 937 937 return -EFSCORRUPTED; 938 938 939 - buf.inode = packed_inode; 939 + buf.mapping = packed_inode->i_mapping; 940 940 for (; cur < end; cur += cnt, pos += cnt) { 941 941 cnt = min_t(unsigned int, end - cur, 942 942 sb->s_blocksize - erofs_blkoff(sb, pos)); 943 - src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP); 943 + src = erofs_bread(&buf, pos, EROFS_KMAP); 944 944 if (IS_ERR(src)) { 945 945 erofs_put_metabuf(&buf); 946 946 return PTR_ERR(src); 947 947 } 948 - memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt); 948 + memcpy_to_page(page, cur, src, cnt); 949 949 } 950 950 erofs_put_metabuf(&buf); 951 951 return 0;
+1 -1
fs/ext4/dir.c
··· 192 192 (PAGE_SHIFT - inode->i_blkbits); 193 193 if (!ra_has_index(&file->f_ra, index)) 194 194 page_cache_sync_readahead( 195 - sb->s_bdev->bd_inode->i_mapping, 195 + sb->s_bdev->bd_mapping, 196 196 &file->f_ra, file, 197 197 index, 1); 198 198 file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
+1 -1
fs/ext4/ext4_jbd2.c
··· 206 206 207 207 static void ext4_check_bdev_write_error(struct super_block *sb) 208 208 { 209 - struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; 209 + struct address_space *mapping = sb->s_bdev->bd_mapping; 210 210 struct ext4_sb_info *sbi = EXT4_SB(sb); 211 211 int err; 212 212
+3 -21
fs/ext4/super.c
··· 244 244 struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, 245 245 blk_opf_t op_flags) 246 246 { 247 - gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping, 247 + gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping, 248 248 ~__GFP_FS) | __GFP_MOVABLE; 249 249 250 250 return __ext4_sb_bread_gfp(sb, block, op_flags, gfp); ··· 253 253 struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, 254 254 sector_t block) 255 255 { 256 - gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping, 256 + gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping, 257 257 ~__GFP_FS); 258 258 259 259 return __ext4_sb_bread_gfp(sb, block, 0, gfp); ··· 490 490 491 491 if (diff_size > EXT4_SB_REFRESH_INTERVAL_KB) 492 492 schedule_work(&EXT4_SB(sb)->s_sb_upd_work); 493 - } 494 - 495 - /* 496 - * The del_gendisk() function uninitializes the disk-specific data 497 - * structures, including the bdi structure, without telling anyone 498 - * else. Once this happens, any attempt to call mark_buffer_dirty() 499 - * (for example, by ext4_commit_super), will cause a kernel OOPS. 500 - * This is a kludge to prevent these oops until we can put in a proper 501 - * hook in del_gendisk() to inform the VFS and file system layers. 502 - */ 503 - static int block_device_ejected(struct super_block *sb) 504 - { 505 - struct inode *bd_inode = sb->s_bdev->bd_inode; 506 - struct backing_dev_info *bdi = inode_to_bdi(bd_inode); 507 - 508 - return bdi->dev == NULL; 509 493 } 510 494 511 495 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) ··· 5547 5563 * used to detect the metadata async write error. 5548 5564 */ 5549 5565 spin_lock_init(&sbi->s_bdev_wb_lock); 5550 - errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, 5566 + errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err, 5551 5567 &sbi->s_bdev_wb_err); 5552 5568 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; 5553 5569 ext4_orphan_cleanup(sb, es); ··· 6148 6164 6149 6165 if (!sbh) 6150 6166 return -EINVAL; 6151 - if (block_device_ejected(sb)) 6152 - return -ENODEV; 6153 6167 6154 6168 ext4_update_super(sb); 6155 6169
+1 -1
fs/gfs2/glock.c
··· 1267 1267 mapping = gfs2_glock2aspace(gl); 1268 1268 if (mapping) { 1269 1269 mapping->a_ops = &gfs2_meta_aops; 1270 - mapping->host = s->s_bdev->bd_inode; 1270 + mapping->host = s->s_bdev->bd_mapping->host; 1271 1271 mapping->flags = 0; 1272 1272 mapping_set_gfp_mask(mapping, GFP_NOFS); 1273 1273 mapping->i_private_data = NULL;
+1 -1
fs/gfs2/ops_fstype.c
··· 114 114 115 115 address_space_init_once(mapping); 116 116 mapping->a_ops = &gfs2_rgrp_aops; 117 - mapping->host = sb->s_bdev->bd_inode; 117 + mapping->host = sb->s_bdev->bd_mapping->host; 118 118 mapping->flags = 0; 119 119 mapping_set_gfp_mask(mapping, GFP_NOFS); 120 120 mapping->i_private_data = NULL;
+1 -1
fs/jbd2/journal.c
··· 2009 2009 byte_count = (block_stop - block_start + 1) * 2010 2010 journal->j_blocksize; 2011 2011 2012 - truncate_inode_pages_range(journal->j_dev->bd_inode->i_mapping, 2012 + truncate_inode_pages_range(journal->j_dev->bd_mapping, 2013 2013 byte_start, byte_stop); 2014 2014 2015 2015 if (flags & JBD2_JOURNAL_FLUSH_DISCARD) {
+1 -1
fs/nilfs2/segment.c
··· 2784 2784 if (!nilfs->ns_writer) 2785 2785 return -ENOMEM; 2786 2786 2787 - inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); 2787 + inode_attach_wb(nilfs->ns_bdev->bd_mapping->host, NULL); 2788 2788 2789 2789 err = nilfs_segctor_start_thread(nilfs->ns_writer); 2790 2790 if (unlikely(err))
+1 -1
include/linux/blk_types.h
··· 50 50 bool bd_write_holder; 51 51 bool bd_has_submit_bio; 52 52 dev_t bd_dev; 53 - struct inode *bd_inode; /* will die */ 53 + struct address_space *bd_mapping; /* page cache */ 54 54 55 55 atomic_t bd_openers; 56 56 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
+2 -10
include/linux/blkdev.h
··· 212 212 struct blk_independent_access_ranges *ia_ranges; 213 213 }; 214 214 215 - static inline bool disk_live(struct gendisk *disk) 216 - { 217 - return !inode_unhashed(disk->part0->bd_inode); 218 - } 219 - 220 215 /** 221 216 * disk_openers - returns how many openers are there for a disk 222 217 * @disk: disk to check ··· 1366 1371 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 1367 1372 } 1368 1373 1369 - static inline unsigned int block_size(struct block_device *bdev) 1370 - { 1371 - return 1 << bdev->bd_inode->i_blkbits; 1372 - } 1373 - 1374 1374 int kblockd_schedule_work(struct work_struct *work); 1375 1375 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1376 1376 ··· 1533 1543 1534 1544 struct block_device *I_BDEV(struct inode *inode); 1535 1545 struct block_device *file_bdev(struct file *bdev_file); 1546 + bool disk_live(struct gendisk *disk); 1547 + unsigned int block_size(struct block_device *bdev); 1536 1548 1537 1549 #ifdef CONFIG_BLOCK 1538 1550 void invalidate_bdev(struct block_device *bdev);
+2 -2
include/linux/buffer_head.h
··· 364 364 { 365 365 gfp_t gfp; 366 366 367 - gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); 367 + gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 368 368 gfp |= __GFP_NOFAIL; 369 369 370 370 return bdev_getblk(bdev, block, size, gfp); ··· 375 375 { 376 376 gfp_t gfp; 377 377 378 - gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); 378 + gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 379 379 gfp |= __GFP_MOVABLE | __GFP_NOFAIL; 380 380 381 381 return bdev_getblk(bdev, block, size, gfp);
+2 -2
include/linux/jbd2.h
··· 1694 1694 1695 1695 static inline void jbd2_init_fs_dev_write_error(journal_t *journal) 1696 1696 { 1697 - struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping; 1697 + struct address_space *mapping = journal->j_fs_dev->bd_mapping; 1698 1698 1699 1699 /* 1700 1700 * Save the original wb_err value of client fs's bdev mapping which ··· 1705 1705 1706 1706 static inline int jbd2_check_fs_dev_write_error(journal_t *journal) 1707 1707 { 1708 - struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping; 1708 + struct address_space *mapping = journal->j_fs_dev->bd_mapping; 1709 1709 1710 1710 return errseq_check(&mapping->wb_err, 1711 1711 READ_ONCE(journal->j_fs_dev_wb_err));