Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: allocate struct hd_struct as part of struct bdev_inode

Allocate hd_struct together with struct block_device to pre-load
the lifetime rule changes in preparation of merging the two structures.

Note that part0 was previously embedded into struct gendisk, but is
a separate allocation now, and already points to the block_device instead
of the hd_struct. The lifetime of struct gendisk is still controlled by
the struct device embedded in the part0 hd_struct.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
cb8432d6 83950d35

+61 -169
+7 -9
block/blk-core.c
··· 714 714 715 715 static noinline int should_fail_bio(struct bio *bio) 716 716 { 717 - if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) 717 + if (should_fail_request(bio->bi_disk->part0->bd_part, 718 + bio->bi_iter.bi_size)) 718 719 return -EIO; 719 720 return 0; 720 721 } ··· 832 831 if (unlikely(blk_partition_remap(bio))) 833 832 goto end_io; 834 833 } else { 835 - if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0))) 834 + if (unlikely(bio_check_ro(bio, bio->bi_disk->part0->bd_part))) 836 835 goto end_io; 837 836 if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk)))) 838 837 goto end_io; ··· 1204 1203 return ret; 1205 1204 1206 1205 if (rq->rq_disk && 1207 - should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) 1206 + should_fail_request(rq->rq_disk->part0->bd_part, blk_rq_bytes(rq))) 1208 1207 return BLK_STS_IOERR; 1209 1208 1210 1209 if (blk_crypto_insert_cloned_request(rq)) ··· 1273 1272 __part_stat_add(part, io_ticks, end ? now - stamp : 1); 1274 1273 } 1275 1274 if (part->partno) { 1276 - part = &part_to_disk(part)->part0; 1275 + part = part_to_disk(part)->part0->bd_part; 1277 1276 goto again; 1278 1277 } 1279 1278 } ··· 1310 1309 part_stat_inc(part, ios[sgrp]); 1311 1310 part_stat_add(part, nsecs[sgrp], now - req->start_time_ns); 1312 1311 part_stat_unlock(); 1313 - 1314 - hd_struct_put(part); 1315 1312 } 1316 1313 } 1317 1314 ··· 1353 1354 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, 1354 1355 unsigned int op) 1355 1356 { 1356 - return __part_start_io_acct(&disk->part0, sectors, op); 1357 + return __part_start_io_acct(disk->part0->bd_part, sectors, op); 1357 1358 } 1358 1359 EXPORT_SYMBOL(disk_start_io_acct); 1359 1360 ··· 1375 1376 unsigned long start_time) 1376 1377 { 1377 1378 __part_end_io_acct(part, bio_op(bio), start_time); 1378 - hd_struct_put(part); 1379 1379 } 1380 1380 EXPORT_SYMBOL_GPL(part_end_io_acct); 1381 1381 1382 1382 void disk_end_io_acct(struct gendisk *disk, unsigned int op, 1383 1383 unsigned long start_time) 1384 1384 { 1385 - __part_end_io_acct(&disk->part0, op, start_time); 1385 + __part_end_io_acct(disk->part0->bd_part, op, start_time); 1386 1386 } 1387 1387 EXPORT_SYMBOL(disk_end_io_acct); 1388 1388
+1 -1
block/blk-flush.c
··· 139 139 140 140 static void blk_account_io_flush(struct request *rq) 141 141 { 142 - struct hd_struct *part = &rq->rq_disk->part0; 142 + struct hd_struct *part = rq->rq_disk->part0->bd_part; 143 143 144 144 part_stat_lock(); 145 145 part_stat_inc(part, ios[STAT_FLUSH]);
-2
block/blk-merge.c
··· 683 683 part_stat_lock(); 684 684 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); 685 685 part_stat_unlock(); 686 - 687 - hd_struct_put(req->part); 688 686 } 689 687 } 690 688
-21
block/blk.h
··· 363 363 int bdev_resize_partition(struct block_device *bdev, int partno, 364 364 sector_t start, sector_t length); 365 365 int disk_expand_part_tbl(struct gendisk *disk, int target); 366 - int hd_ref_init(struct hd_struct *part); 367 - 368 - /* no need to get/put refcount of part0 */ 369 - static inline int hd_struct_try_get(struct hd_struct *part) 370 - { 371 - if (part->partno) 372 - return percpu_ref_tryget_live(&part->ref); 373 - return 1; 374 - } 375 - 376 - static inline void hd_struct_put(struct hd_struct *part) 377 - { 378 - if (part->partno) 379 - percpu_ref_put(&part->ref); 380 - } 381 - 382 - static inline void hd_free_part(struct hd_struct *part) 383 - { 384 - bdput(part->bdev); 385 - percpu_ref_exit(&part->ref); 386 - } 387 366 388 367 int bio_add_hw_page(struct request_queue *q, struct bio *bio, 389 368 struct page *page, unsigned int len, unsigned int offset,
+19 -31
block/genhd.c
··· 42 42 43 43 void set_capacity(struct gendisk *disk, sector_t sectors) 44 44 { 45 - struct block_device *bdev = disk->part0.bdev; 45 + struct block_device *bdev = disk->part0; 46 46 47 47 spin_lock(&bdev->bd_size_lock); 48 48 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); ··· 318 318 * primarily used for stats accounting. 319 319 * 320 320 * CONTEXT: 321 - * RCU read locked. The returned partition pointer is always valid 322 - * because its refcount is grabbed except for part0, which lifetime 323 - * is same with the disk. 321 + * RCU read locked. 324 322 * 325 323 * RETURNS: 326 324 * Found partition on success, part0 is returned if no partition matches ··· 334 336 ptbl = rcu_dereference(disk->part_tbl); 335 337 336 338 part = rcu_dereference(ptbl->last_lookup); 337 - if (part && sector_in_part(part, sector) && hd_struct_try_get(part)) 339 + if (part && sector_in_part(part, sector)) 338 340 goto out_unlock; 339 341 340 342 for (i = 1; i < ptbl->len; i++) { 341 343 part = rcu_dereference(ptbl->part[i]); 342 344 343 345 if (part && sector_in_part(part, sector)) { 344 - /* 345 - * only live partition can be cached for lookup, 346 - * so use-after-free on cached & deleting partition 347 - * can be avoided 348 - */ 349 - if (!hd_struct_try_get(part)) 350 - break; 351 346 rcu_assign_pointer(ptbl->last_lookup, part); 352 347 goto out_unlock; 353 348 } 354 349 } 355 350 356 - part = &disk->part0; 351 + part = disk->part0->bd_part; 357 352 out_unlock: 358 353 rcu_read_unlock(); 359 354 return part; ··· 672 681 */ 673 682 pm_runtime_set_memalloc_noio(ddev, true); 674 683 675 - disk->part0.bdev->bd_holder_dir = 676 - kobject_create_and_add("holders", &ddev->kobj); 684 + disk->part0->bd_holder_dir = 685 + kobject_create_and_add("holders", &ddev->kobj); 677 686 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); 678 687 679 688 if (disk->flags & GENHD_FL_HIDDEN) { ··· 739 748 740 749 disk->flags |= GENHD_FL_UP; 741 750 742 - retval = blk_alloc_devt(&disk->part0, &devt); 751 + retval = blk_alloc_devt(disk->part0->bd_part, &devt); 743 752 if (retval) { 744 753 WARN_ON(1); 745 754 return; ··· 766 775 ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt)); 767 776 WARN_ON(ret); 768 777 bdi_set_owner(bdi, dev); 769 - bdev_add(disk->part0.bdev, devt); 778 + bdev_add(disk->part0, devt); 770 779 } 771 780 register_disk(parent, disk, groups); 772 781 if (register_queue) ··· 879 888 880 889 blk_unregister_queue(disk); 881 890 882 - kobject_put(disk->part0.bdev->bd_holder_dir); 891 + kobject_put(disk->part0->bd_holder_dir); 883 892 kobject_put(disk->slave_dir); 884 893 885 - part_stat_set_all(&disk->part0, 0); 886 - disk->part0.bdev->bd_stamp = 0; 894 + part_stat_set_all(disk->part0->bd_part, 0); 895 + disk->part0->bd_stamp = 0; 887 896 if (!sysfs_deprecated) 888 897 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); 889 898 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); ··· 996 1005 */ 997 1006 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 998 1007 while ((part = disk_part_iter_next(&piter))) { 999 - bool is_part0 = part == &disk->part0; 1008 + bool is_part0 = part == disk->part0->bd_part; 1000 1009 1001 1010 printk("%s%s %10llu %s %s", is_part0 ? "" : " ", 1002 1011 bdevt_str(part_devt(part), devt_buf), ··· 1451 1460 disk_release_events(disk); 1452 1461 kfree(disk->random); 1453 1462 disk_replace_part_tbl(disk, NULL); 1454 - hd_free_part(&disk->part0); 1463 + bdput(disk->part0); 1455 1464 if (disk->queue) 1456 1465 blk_put_queue(disk->queue); 1457 1466 kfree(disk); ··· 1617 1626 if (!disk) 1618 1627 return NULL; 1619 1628 1620 - disk->part0.bdev = bdev_alloc(disk, 0); 1621 - if (!disk->part0.bdev) 1629 + disk->part0 = bdev_alloc(disk, 0); 1630 + if (!disk->part0) 1622 1631 goto out_free_disk; 1623 1632 1624 1633 disk->node_id = node_id; ··· 1626 1635 goto out_bdput; 1627 1636 1628 1637 ptbl = rcu_dereference_protected(disk->part_tbl, 1); 1629 - rcu_assign_pointer(ptbl->part[0], &disk->part0); 1630 - 1631 - if (hd_ref_init(&disk->part0)) 1632 - goto out_bdput; 1638 + rcu_assign_pointer(ptbl->part[0], disk->part0->bd_part); 1633 1639 1634 1640 disk->minors = minors; 1635 1641 rand_initialize_disk(disk); ··· 1636 1648 return disk; 1637 1649 1638 1650 out_bdput: 1639 - bdput(disk->part0.bdev); 1651 + bdput(disk->part0); 1640 1652 out_free_disk: 1641 1653 kfree(disk); 1642 1654 return NULL; ··· 1675 1687 struct disk_part_iter piter; 1676 1688 struct hd_struct *part; 1677 1689 1678 - if (disk->part0.bdev->bd_read_only != flag) { 1690 + if (disk->part0->bd_read_only != flag) { 1679 1691 set_disk_ro_uevent(disk, flag); 1680 - disk->part0.bdev->bd_read_only = flag; 1692 + disk->part0->bd_read_only = flag; 1681 1693 } 1682 1694 1683 1695 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
+6 -61
block/partitions/core.c
··· 265 265 static void part_release(struct device *dev) 266 266 { 267 267 struct hd_struct *p = dev_to_part(dev); 268 + 268 269 blk_free_devt(dev->devt); 269 - hd_free_part(p); 270 - kfree(p); 270 + bdput(p->bdev); 271 271 } 272 272 273 273 static int part_uevent(struct device *dev, struct kobj_uevent_env *env) ··· 288 288 .uevent = part_uevent, 289 289 }; 290 290 291 - static void hd_struct_free_work(struct work_struct *work) 292 - { 293 - struct hd_struct *part = 294 - container_of(to_rcu_work(work), struct hd_struct, rcu_work); 295 - struct gendisk *disk = part_to_disk(part); 296 - 297 - /* 298 - * Release the disk reference acquired in delete_partition here. 299 - * We can't release it in hd_struct_free because the final put_device 300 - * needs process context and thus can't be run directly from a 301 - * percpu_ref ->release handler. 302 - */ 303 - put_device(disk_to_dev(disk)); 304 - 305 - part->bdev->bd_start_sect = 0; 306 - bdev_set_nr_sectors(part->bdev, 0); 307 - part_stat_set_all(part, 0); 308 - put_device(part_to_dev(part)); 309 - } 310 - 311 - static void hd_struct_free(struct percpu_ref *ref) 312 - { 313 - struct hd_struct *part = container_of(ref, struct hd_struct, ref); 314 - struct gendisk *disk = part_to_disk(part); 315 - struct disk_part_tbl *ptbl = 316 - rcu_dereference_protected(disk->part_tbl, 1); 317 - 318 - rcu_assign_pointer(ptbl->last_lookup, NULL); 319 - 320 - INIT_RCU_WORK(&part->rcu_work, hd_struct_free_work); 321 - queue_rcu_work(system_wq, &part->rcu_work); 322 - } 323 - 324 - int hd_ref_init(struct hd_struct *part) 325 - { 326 - if (percpu_ref_init(&part->ref, hd_struct_free, 0, GFP_KERNEL)) 327 - return -ENOMEM; 328 - return 0; 329 - } 330 - 331 291 /* 332 292 * Must be called either with bd_mutex held, before a disk can be opened or 333 293 * after all disk users are gone. ··· 302 342 * ->part_tbl is referenced in this part's release handler, so 303 343 * we have to hold the disk device 304 344 */ 305 - get_device(disk_to_dev(disk)); 306 345 rcu_assign_pointer(ptbl->part[part->partno], NULL); 346 + rcu_assign_pointer(ptbl->last_lookup, NULL); 307 347 kobject_put(part->bdev->bd_holder_dir); 308 348 device_del(part_to_dev(part)); 309 349 ··· 313 353 */ 314 354 remove_inode_hash(part->bdev->bd_inode); 315 355 316 - percpu_ref_kill(&part->ref); 356 + put_device(part_to_dev(part)); 317 357 } 318 358 319 359 static ssize_t whole_disk_show(struct device *dev, ··· 366 406 if (ptbl->part[partno]) 367 407 return ERR_PTR(-EBUSY); 368 408 369 - p = kzalloc(sizeof(*p), GFP_KERNEL); 370 - if (!p) 371 - return ERR_PTR(-EBUSY); 372 - 373 409 bdev = bdev_alloc(disk, partno); 374 410 if (!bdev) 375 - goto out_free; 376 - p->bdev = bdev; 411 + return ERR_PTR(-ENOMEM); 377 412 413 + p = bdev->bd_part; 378 414 pdev = part_to_dev(p); 379 415 380 416 bdev->bd_start_sect = start; ··· 419 463 goto out_del; 420 464 } 421 465 422 - err = hd_ref_init(p); 423 - if (err) { 424 - if (flags & ADDPART_FLAG_WHOLEDISK) 425 - goto out_remove_file; 426 - goto out_del; 427 - } 428 - 429 466 /* everything is up and running, commence */ 430 467 bdev_add(bdev, devt); 431 468 rcu_assign_pointer(ptbl->part[partno], p); ··· 430 481 431 482 out_bdput: 432 483 bdput(bdev); 433 - out_free: 434 - kfree(p); 435 484 return ERR_PTR(err); 436 - out_remove_file: 437 - device_remove_file(pdev, &dev_attr_whole_disk); 438 485 out_del: 439 486 kobject_put(bdev->bd_holder_dir); 440 487 device_del(pdev);
+1 -1
drivers/block/drbd/drbd_receiver.c
··· 2802 2802 if (c_min_rate == 0) 2803 2803 return false; 2804 2804 2805 - curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - 2805 + curr_events = (int)part_stat_read_accum(disk->part0->bd_part, sectors) - 2806 2806 atomic_read(&device->rs_sect_ev); 2807 2807 2808 2808 if (atomic_read(&device->ap_actlog_cnt)
+2 -1
drivers/block/drbd/drbd_worker.c
··· 1678 1678 atomic_set(&device->rs_sect_in, 0); 1679 1679 atomic_set(&device->rs_sect_ev, 0); 1680 1680 device->rs_in_flight = 0; 1681 - device->rs_last_events = (int)part_stat_read_accum(&disk->part0, sectors); 1681 + device->rs_last_events = 1682 + (int)part_stat_read_accum(disk->part0->bd_part, sectors); 1682 1683 1683 1684 /* Updating the RCU protected object in place is necessary since 1684 1685 this function gets called from atomic context.
+1 -1
drivers/block/zram/zram_drv.c
··· 1687 1687 zram->disksize = 0; 1688 1688 1689 1689 set_capacity_and_notify(zram->disk, 0); 1690 - part_stat_set_all(&zram->disk->part0, 0); 1690 + part_stat_set_all(zram->disk->part0->bd_part, 0); 1691 1691 1692 1692 up_write(&zram->init_lock); 1693 1693 /* I/O operation under all of CPU are done so let's free */
+2 -2
drivers/md/dm.c
··· 1607 1607 * (by eliminating DM's splitting and just using bio_split) 1608 1608 */ 1609 1609 part_stat_lock(); 1610 - __dm_part_stat_sub(&dm_disk(md)->part0, 1610 + __dm_part_stat_sub(dm_disk(md)->part0->bd_part, 1611 1611 sectors[op_stat_group(bio_op(bio))], ci.sector_count); 1612 1612 part_stat_unlock(); 1613 1613 ··· 2242 2242 static bool md_in_flight_bios(struct mapped_device *md) 2243 2243 { 2244 2244 int cpu; 2245 - struct hd_struct *part = &dm_disk(md)->part0; 2245 + struct hd_struct *part = dm_disk(md)->part0->bd_part; 2246 2246 long sum = 0; 2247 2247 2248 2248 for_each_possible_cpu(cpu) {
+1 -1
drivers/md/md.c
··· 8441 8441 rcu_read_lock(); 8442 8442 rdev_for_each_rcu(rdev, mddev) { 8443 8443 struct gendisk *disk = rdev->bdev->bd_disk; 8444 - curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - 8444 + curr_events = (int)part_stat_read_accum(disk->part0->bd_part, sectors) - 8445 8445 atomic_read(&disk->sync_io); 8446 8446 /* sync IO will cause sync_io to increase before the disk_stats 8447 8447 * as sync_io is counted when a request starts, and
+12 -27
fs/block_dev.c
··· 39 39 40 40 struct bdev_inode { 41 41 struct block_device bdev; 42 + struct hd_struct hd; 42 43 struct inode vfs_inode; 43 44 }; 44 45 ··· 887 886 iput(inode); 888 887 return NULL; 889 888 } 889 + bdev->bd_part = &BDEV_I(inode)->hd; 890 + memset(bdev->bd_part, 0, sizeof(*bdev->bd_part)); 891 + bdev->bd_part->bdev = bdev; 890 892 return bdev; 891 893 } 892 894 ··· 1284 1280 static int __blkdev_get(struct block_device *bdev, fmode_t mode) 1285 1281 { 1286 1282 struct gendisk *disk = bdev->bd_disk; 1287 - int ret; 1283 + int ret = 0; 1288 1284 1289 1285 if (!bdev->bd_openers) { 1290 1286 if (!bdev_is_partition(bdev)) { 1291 - ret = -ENXIO; 1292 - bdev->bd_part = disk_get_part(disk, 0); 1293 - if (!bdev->bd_part) 1294 - goto out_clear; 1295 - 1296 1287 ret = 0; 1297 1288 if (disk->fops->open) 1298 1289 ret = disk->fops->open(bdev, mode); ··· 1306 1307 bdev_disk_changed(bdev, ret == -ENOMEDIUM); 1307 1308 1308 1309 if (ret) 1309 - goto out_clear; 1310 + return ret; 1310 1311 } else { 1311 1312 struct block_device *whole = bdget_disk(disk, 0); 1312 1313 ··· 1315 1316 if (ret) { 1316 1317 mutex_unlock(&whole->bd_mutex); 1317 1318 bdput(whole); 1318 - goto out_clear; 1319 + return ret; 1319 1320 } 1320 1321 whole->bd_part_count++; 1321 1322 mutex_unlock(&whole->bd_mutex); 1322 1323 1323 - bdev->bd_part = disk_get_part(disk, bdev->bd_partno); 1324 1324 if (!(disk->flags & GENHD_FL_UP) || 1325 - !bdev->bd_part || !bdev_nr_sectors(bdev)) { 1325 + !bdev_nr_sectors(bdev)) { 1326 1326 __blkdev_put(whole, mode, 1); 1327 1327 bdput(whole); 1328 - ret = -ENXIO; 1329 - goto out_clear; 1328 + return -ENXIO; 1330 1329 } 1331 1330 set_init_blocksize(bdev); 1332 1331 } ··· 1333 1336 bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info); 1334 1337 } else { 1335 1338 if (!bdev_is_partition(bdev)) { 1336 - ret = 0; 1337 1339 if (bdev->bd_disk->fops->open) 1338 1340 ret = bdev->bd_disk->fops->open(bdev, mode); 1339 1341 /* the same as first opener case, read comment there */ ··· 1345 1349 } 1346 1350 bdev->bd_openers++; 1347 1351 return 0; 1348 - 1349 - out_clear: 1350 - disk_put_part(bdev->bd_part); 1351 - bdev->bd_part = NULL; 1352 - return ret; 1353 1352 } 1354 1353 1355 1354 struct block_device *blkdev_get_no_open(dev_t dev) ··· 1571 1580 sync_blockdev(bdev); 1572 1581 kill_bdev(bdev); 1573 1582 bdev_write_inode(bdev); 1574 - 1575 - if (!bdev_is_partition(bdev) && disk->fops->release) 1576 - disk->fops->release(disk, mode); 1577 - 1578 - disk_put_part(bdev->bd_part); 1579 - bdev->bd_part = NULL; 1580 1583 if (bdev_is_partition(bdev)) 1581 1584 victim = bdev_whole(bdev); 1582 - } else { 1583 - if (!bdev_is_partition(bdev) && disk->fops->release) 1584 - disk->fops->release(disk, mode); 1585 1585 } 1586 + 1587 + if (!bdev_is_partition(bdev) && disk->fops->release) 1588 + disk->fops->release(disk, mode); 1586 1589 mutex_unlock(&bdev->bd_mutex); 1587 1590 if (victim) { 1588 1591 __blkdev_put(victim, mode, 1);
+1 -1
include/linux/blk_types.h
··· 59 59 } __randomize_layout; 60 60 61 61 #define bdev_whole(_bdev) \ 62 - ((_bdev)->bd_disk->part0.bdev) 62 + ((_bdev)->bd_disk->part0) 63 63 64 64 #define bdev_kobj(_bdev) \ 65 65 (&part_to_dev((_bdev)->bd_part)->kobj)
+6 -8
include/linux/genhd.h
··· 19 19 #include <linux/blk_types.h> 20 20 #include <asm/local.h> 21 21 22 - #define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) 23 22 #define dev_to_part(device) container_of((device), struct hd_struct, __dev) 24 - #define disk_to_dev(disk) (&(disk)->part0.__dev) 25 23 #define part_to_dev(part) (&((part)->__dev)) 24 + 25 + #define dev_to_disk(device) (dev_to_part(device)->bdev->bd_disk) 26 + #define disk_to_dev(disk) (part_to_dev((disk)->part0->bd_part)) 26 27 27 28 extern const struct device_type disk_type; 28 29 extern struct device_type part_type; ··· 52 51 }; 53 52 54 53 struct hd_struct { 55 - struct percpu_ref ref; 56 - 57 54 struct block_device *bdev; 58 55 struct device __dev; 59 56 int partno; 60 - struct rcu_work rcu_work; 61 57 }; 62 58 63 59 /** ··· 166 168 * helpers. 167 169 */ 168 170 struct disk_part_tbl __rcu *part_tbl; 169 - struct hd_struct part0; 171 + struct block_device *part0; 170 172 171 173 const struct block_device_operations *fops; 172 174 struct request_queue *queue; ··· 276 278 277 279 static inline int get_disk_ro(struct gendisk *disk) 278 280 { 279 - return disk->part0.bdev->bd_read_only; 281 + return disk->part0->bd_read_only; 280 282 } 281 283 282 284 extern void disk_block_events(struct gendisk *disk); ··· 300 302 301 303 static inline sector_t get_capacity(struct gendisk *disk) 302 304 { 303 - return bdev_nr_sectors(disk->part0.bdev); 305 + return bdev_nr_sectors(disk->part0); 304 306 } 305 307 306 308 int bdev_disk_changed(struct block_device *bdev, bool invalidate);
+2 -2
include/linux/part_stat.h
··· 59 59 #define part_stat_add(part, field, addnd) do { \ 60 60 __part_stat_add((part), field, addnd); \ 61 61 if ((part)->partno) \ 62 - __part_stat_add(&part_to_disk((part))->part0, \ 63 - field, addnd); \ 62 + __part_stat_add(part_to_disk((part))->part0->bd_part, \ 63 + field, addnd); \ 64 64 } while (0) 65 65 66 66 #define part_stat_dec(part, field) \