at v2.6.26 31 kB view raw
1/* 2 * linux/fs/block_dev.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 6 */ 7 8#include <linux/init.h> 9#include <linux/mm.h> 10#include <linux/fcntl.h> 11#include <linux/slab.h> 12#include <linux/kmod.h> 13#include <linux/major.h> 14#include <linux/smp_lock.h> 15#include <linux/device_cgroup.h> 16#include <linux/highmem.h> 17#include <linux/blkdev.h> 18#include <linux/module.h> 19#include <linux/blkpg.h> 20#include <linux/buffer_head.h> 21#include <linux/writeback.h> 22#include <linux/mpage.h> 23#include <linux/mount.h> 24#include <linux/uio.h> 25#include <linux/namei.h> 26#include <linux/log2.h> 27#include <asm/uaccess.h> 28#include "internal.h" 29 30struct bdev_inode { 31 struct block_device bdev; 32 struct inode vfs_inode; 33}; 34 35static const struct address_space_operations def_blk_aops; 36 37static inline struct bdev_inode *BDEV_I(struct inode *inode) 38{ 39 return container_of(inode, struct bdev_inode, vfs_inode); 40} 41 42inline struct block_device *I_BDEV(struct inode *inode) 43{ 44 return &BDEV_I(inode)->bdev; 45} 46 47EXPORT_SYMBOL(I_BDEV); 48 49static sector_t max_block(struct block_device *bdev) 50{ 51 sector_t retval = ~((sector_t)0); 52 loff_t sz = i_size_read(bdev->bd_inode); 53 54 if (sz) { 55 unsigned int size = block_size(bdev); 56 unsigned int sizebits = blksize_bits(size); 57 retval = (sz >> sizebits); 58 } 59 return retval; 60} 61 62/* Kill _all_ buffers and pagecache , dirty or not.. */ 63static void kill_bdev(struct block_device *bdev) 64{ 65 if (bdev->bd_inode->i_mapping->nrpages == 0) 66 return; 67 invalidate_bh_lrus(); 68 truncate_inode_pages(bdev->bd_inode->i_mapping, 0); 69} 70 71int set_blocksize(struct block_device *bdev, int size) 72{ 73 /* Size must be a power of two, and between 512 and PAGE_SIZE */ 74 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) 75 return -EINVAL; 76 77 /* Size cannot be smaller than the size supported by the device */ 78 if (size < bdev_hardsect_size(bdev)) 79 return -EINVAL; 80 81 /* Don't change the size if it is same as current */ 82 if (bdev->bd_block_size != size) { 83 sync_blockdev(bdev); 84 bdev->bd_block_size = size; 85 bdev->bd_inode->i_blkbits = blksize_bits(size); 86 kill_bdev(bdev); 87 } 88 return 0; 89} 90 91EXPORT_SYMBOL(set_blocksize); 92 93int sb_set_blocksize(struct super_block *sb, int size) 94{ 95 if (set_blocksize(sb->s_bdev, size)) 96 return 0; 97 /* If we get here, we know size is power of two 98 * and it's value is between 512 and PAGE_SIZE */ 99 sb->s_blocksize = size; 100 sb->s_blocksize_bits = blksize_bits(size); 101 return sb->s_blocksize; 102} 103 104EXPORT_SYMBOL(sb_set_blocksize); 105 106int sb_min_blocksize(struct super_block *sb, int size) 107{ 108 int minsize = bdev_hardsect_size(sb->s_bdev); 109 if (size < minsize) 110 size = minsize; 111 return sb_set_blocksize(sb, size); 112} 113 114EXPORT_SYMBOL(sb_min_blocksize); 115 116static int 117blkdev_get_block(struct inode *inode, sector_t iblock, 118 struct buffer_head *bh, int create) 119{ 120 if (iblock >= max_block(I_BDEV(inode))) { 121 if (create) 122 return -EIO; 123 124 /* 125 * for reads, we're just trying to fill a partial page. 126 * return a hole, they will have to call get_block again 127 * before they can fill it, and they will get -EIO at that 128 * time 129 */ 130 return 0; 131 } 132 bh->b_bdev = I_BDEV(inode); 133 bh->b_blocknr = iblock; 134 set_buffer_mapped(bh); 135 return 0; 136} 137 138static int 139blkdev_get_blocks(struct inode *inode, sector_t iblock, 140 struct buffer_head *bh, int create) 141{ 142 sector_t end_block = max_block(I_BDEV(inode)); 143 unsigned long max_blocks = bh->b_size >> inode->i_blkbits; 144 145 if ((iblock + max_blocks) > end_block) { 146 max_blocks = end_block - iblock; 147 if ((long)max_blocks <= 0) { 148 if (create) 149 return -EIO; /* write fully beyond EOF */ 150 /* 151 * It is a read which is fully beyond EOF. We return 152 * a !buffer_mapped buffer 153 */ 154 max_blocks = 0; 155 } 156 } 157 158 bh->b_bdev = I_BDEV(inode); 159 bh->b_blocknr = iblock; 160 bh->b_size = max_blocks << inode->i_blkbits; 161 if (max_blocks) 162 set_buffer_mapped(bh); 163 return 0; 164} 165 166static ssize_t 167blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 168 loff_t offset, unsigned long nr_segs) 169{ 170 struct file *file = iocb->ki_filp; 171 struct inode *inode = file->f_mapping->host; 172 173 return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode), 174 iov, offset, nr_segs, blkdev_get_blocks, NULL); 175} 176 177static int blkdev_writepage(struct page *page, struct writeback_control *wbc) 178{ 179 return block_write_full_page(page, blkdev_get_block, wbc); 180} 181 182static int blkdev_readpage(struct file * file, struct page * page) 183{ 184 return block_read_full_page(page, blkdev_get_block); 185} 186 187static int blkdev_write_begin(struct file *file, struct address_space *mapping, 188 loff_t pos, unsigned len, unsigned flags, 189 struct page **pagep, void **fsdata) 190{ 191 *pagep = NULL; 192 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 193 blkdev_get_block); 194} 195 196static int blkdev_write_end(struct file *file, struct address_space *mapping, 197 loff_t pos, unsigned len, unsigned copied, 198 struct page *page, void *fsdata) 199{ 200 int ret; 201 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); 202 203 unlock_page(page); 204 page_cache_release(page); 205 206 return ret; 207} 208 209/* 210 * private llseek: 211 * for a block special file file->f_path.dentry->d_inode->i_size is zero 212 * so we compute the size by hand (just as in block_read/write above) 213 */ 214static loff_t block_llseek(struct file *file, loff_t offset, int origin) 215{ 216 struct inode *bd_inode = file->f_mapping->host; 217 loff_t size; 218 loff_t retval; 219 220 mutex_lock(&bd_inode->i_mutex); 221 size = i_size_read(bd_inode); 222 223 switch (origin) { 224 case 2: 225 offset += size; 226 break; 227 case 1: 228 offset += file->f_pos; 229 } 230 retval = -EINVAL; 231 if (offset >= 0 && offset <= size) { 232 if (offset != file->f_pos) { 233 file->f_pos = offset; 234 } 235 retval = offset; 236 } 237 mutex_unlock(&bd_inode->i_mutex); 238 return retval; 239} 240 241/* 242 * Filp is never NULL; the only case when ->fsync() is called with 243 * NULL first argument is nfsd_sync_dir() and that's not a directory. 244 */ 245 246static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) 247{ 248 return sync_blockdev(I_BDEV(filp->f_mapping->host)); 249} 250 251/* 252 * pseudo-fs 253 */ 254 255static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); 256static struct kmem_cache * bdev_cachep __read_mostly; 257 258static struct inode *bdev_alloc_inode(struct super_block *sb) 259{ 260 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); 261 if (!ei) 262 return NULL; 263 return &ei->vfs_inode; 264} 265 266static void bdev_destroy_inode(struct inode *inode) 267{ 268 struct bdev_inode *bdi = BDEV_I(inode); 269 270 bdi->bdev.bd_inode_backing_dev_info = NULL; 271 kmem_cache_free(bdev_cachep, bdi); 272} 273 274static void init_once(struct kmem_cache * cachep, void *foo) 275{ 276 struct bdev_inode *ei = (struct bdev_inode *) foo; 277 struct block_device *bdev = &ei->bdev; 278 279 memset(bdev, 0, sizeof(*bdev)); 280 mutex_init(&bdev->bd_mutex); 281 sema_init(&bdev->bd_mount_sem, 1); 282 INIT_LIST_HEAD(&bdev->bd_inodes); 283 INIT_LIST_HEAD(&bdev->bd_list); 284#ifdef CONFIG_SYSFS 285 INIT_LIST_HEAD(&bdev->bd_holder_list); 286#endif 287 inode_init_once(&ei->vfs_inode); 288} 289 290static inline void __bd_forget(struct inode *inode) 291{ 292 list_del_init(&inode->i_devices); 293 inode->i_bdev = NULL; 294 inode->i_mapping = &inode->i_data; 295} 296 297static void bdev_clear_inode(struct inode *inode) 298{ 299 struct block_device *bdev = &BDEV_I(inode)->bdev; 300 struct list_head *p; 301 spin_lock(&bdev_lock); 302 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) { 303 __bd_forget(list_entry(p, struct inode, i_devices)); 304 } 305 list_del_init(&bdev->bd_list); 306 spin_unlock(&bdev_lock); 307} 308 309static const struct super_operations bdev_sops = { 310 .statfs = simple_statfs, 311 .alloc_inode = bdev_alloc_inode, 312 .destroy_inode = bdev_destroy_inode, 313 .drop_inode = generic_delete_inode, 314 .clear_inode = bdev_clear_inode, 315}; 316 317static int bd_get_sb(struct file_system_type *fs_type, 318 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 319{ 320 return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt); 321} 322 323static struct file_system_type bd_type = { 324 .name = "bdev", 325 .get_sb = bd_get_sb, 326 .kill_sb = kill_anon_super, 327}; 328 329static struct vfsmount *bd_mnt __read_mostly; 330struct super_block *blockdev_superblock; 331 332void __init bdev_cache_init(void) 333{ 334 int err; 335 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 336 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 337 SLAB_MEM_SPREAD|SLAB_PANIC), 338 init_once); 339 err = register_filesystem(&bd_type); 340 if (err) 341 panic("Cannot register bdev pseudo-fs"); 342 bd_mnt = kern_mount(&bd_type); 343 if (IS_ERR(bd_mnt)) 344 panic("Cannot create bdev pseudo-fs"); 345 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ 346} 347 348/* 349 * Most likely _very_ bad one - but then it's hardly critical for small 350 * /dev and can be fixed when somebody will need really large one. 351 * Keep in mind that it will be fed through icache hash function too. 352 */ 353static inline unsigned long hash(dev_t dev) 354{ 355 return MAJOR(dev)+MINOR(dev); 356} 357 358static int bdev_test(struct inode *inode, void *data) 359{ 360 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; 361} 362 363static int bdev_set(struct inode *inode, void *data) 364{ 365 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; 366 return 0; 367} 368 369static LIST_HEAD(all_bdevs); 370 371struct block_device *bdget(dev_t dev) 372{ 373 struct block_device *bdev; 374 struct inode *inode; 375 376 inode = iget5_locked(bd_mnt->mnt_sb, hash(dev), 377 bdev_test, bdev_set, &dev); 378 379 if (!inode) 380 return NULL; 381 382 bdev = &BDEV_I(inode)->bdev; 383 384 if (inode->i_state & I_NEW) { 385 bdev->bd_contains = NULL; 386 bdev->bd_inode = inode; 387 bdev->bd_block_size = (1 << inode->i_blkbits); 388 bdev->bd_part_count = 0; 389 bdev->bd_invalidated = 0; 390 inode->i_mode = S_IFBLK; 391 inode->i_rdev = dev; 392 inode->i_bdev = bdev; 393 inode->i_data.a_ops = &def_blk_aops; 394 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 395 inode->i_data.backing_dev_info = &default_backing_dev_info; 396 spin_lock(&bdev_lock); 397 list_add(&bdev->bd_list, &all_bdevs); 398 spin_unlock(&bdev_lock); 399 unlock_new_inode(inode); 400 } 401 return bdev; 402} 403 404EXPORT_SYMBOL(bdget); 405 406long nr_blockdev_pages(void) 407{ 408 struct block_device *bdev; 409 long ret = 0; 410 spin_lock(&bdev_lock); 411 list_for_each_entry(bdev, &all_bdevs, bd_list) { 412 ret += bdev->bd_inode->i_mapping->nrpages; 413 } 414 spin_unlock(&bdev_lock); 415 return ret; 416} 417 418void bdput(struct block_device *bdev) 419{ 420 iput(bdev->bd_inode); 421} 422 423EXPORT_SYMBOL(bdput); 424 425static struct block_device *bd_acquire(struct inode *inode) 426{ 427 struct block_device *bdev; 428 429 spin_lock(&bdev_lock); 430 bdev = inode->i_bdev; 431 if (bdev) { 432 atomic_inc(&bdev->bd_inode->i_count); 433 spin_unlock(&bdev_lock); 434 return bdev; 435 } 436 spin_unlock(&bdev_lock); 437 438 bdev = bdget(inode->i_rdev); 439 if (bdev) { 440 spin_lock(&bdev_lock); 441 if (!inode->i_bdev) { 442 /* 443 * We take an additional bd_inode->i_count for inode, 444 * and it's released in clear_inode() of inode. 445 * So, we can access it via ->i_mapping always 446 * without igrab(). 447 */ 448 atomic_inc(&bdev->bd_inode->i_count); 449 inode->i_bdev = bdev; 450 inode->i_mapping = bdev->bd_inode->i_mapping; 451 list_add(&inode->i_devices, &bdev->bd_inodes); 452 } 453 spin_unlock(&bdev_lock); 454 } 455 return bdev; 456} 457 458/* Call when you free inode */ 459 460void bd_forget(struct inode *inode) 461{ 462 struct block_device *bdev = NULL; 463 464 spin_lock(&bdev_lock); 465 if (inode->i_bdev) { 466 if (inode->i_sb != blockdev_superblock) 467 bdev = inode->i_bdev; 468 __bd_forget(inode); 469 } 470 spin_unlock(&bdev_lock); 471 472 if (bdev) 473 iput(bdev->bd_inode); 474} 475 476int bd_claim(struct block_device *bdev, void *holder) 477{ 478 int res; 479 spin_lock(&bdev_lock); 480 481 /* first decide result */ 482 if (bdev->bd_holder == holder) 483 res = 0; /* already a holder */ 484 else if (bdev->bd_holder != NULL) 485 res = -EBUSY; /* held by someone else */ 486 else if (bdev->bd_contains == bdev) 487 res = 0; /* is a whole device which isn't held */ 488 489 else if (bdev->bd_contains->bd_holder == bd_claim) 490 res = 0; /* is a partition of a device that is being partitioned */ 491 else if (bdev->bd_contains->bd_holder != NULL) 492 res = -EBUSY; /* is a partition of a held device */ 493 else 494 res = 0; /* is a partition of an un-held device */ 495 496 /* now impose change */ 497 if (res==0) { 498 /* note that for a whole device bd_holders 499 * will be incremented twice, and bd_holder will 500 * be set to bd_claim before being set to holder 501 */ 502 bdev->bd_contains->bd_holders ++; 503 bdev->bd_contains->bd_holder = bd_claim; 504 bdev->bd_holders++; 505 bdev->bd_holder = holder; 506 } 507 spin_unlock(&bdev_lock); 508 return res; 509} 510 511EXPORT_SYMBOL(bd_claim); 512 513void bd_release(struct block_device *bdev) 514{ 515 spin_lock(&bdev_lock); 516 if (!--bdev->bd_contains->bd_holders) 517 bdev->bd_contains->bd_holder = NULL; 518 if (!--bdev->bd_holders) 519 bdev->bd_holder = NULL; 520 spin_unlock(&bdev_lock); 521} 522 523EXPORT_SYMBOL(bd_release); 524 525#ifdef CONFIG_SYSFS 526/* 527 * Functions for bd_claim_by_kobject / bd_release_from_kobject 528 * 529 * If a kobject is passed to bd_claim_by_kobject() 530 * and the kobject has a parent directory, 531 * following symlinks are created: 532 * o from the kobject to the claimed bdev 533 * o from "holders" directory of the bdev to the parent of the kobject 534 * bd_release_from_kobject() removes these symlinks. 535 * 536 * Example: 537 * If /dev/dm-0 maps to /dev/sda, kobject corresponding to 538 * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then: 539 * /sys/block/dm-0/slaves/sda --> /sys/block/sda 540 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 541 */ 542 543static struct kobject *bdev_get_kobj(struct block_device *bdev) 544{ 545 if (bdev->bd_contains != bdev) 546 return kobject_get(&bdev->bd_part->dev.kobj); 547 else 548 return kobject_get(&bdev->bd_disk->dev.kobj); 549} 550 551static struct kobject *bdev_get_holder(struct block_device *bdev) 552{ 553 if (bdev->bd_contains != bdev) 554 return kobject_get(bdev->bd_part->holder_dir); 555 else 556 return kobject_get(bdev->bd_disk->holder_dir); 557} 558 559static int add_symlink(struct kobject *from, struct kobject *to) 560{ 561 if (!from || !to) 562 return 0; 563 return sysfs_create_link(from, to, kobject_name(to)); 564} 565 566static void del_symlink(struct kobject *from, struct kobject *to) 567{ 568 if (!from || !to) 569 return; 570 sysfs_remove_link(from, kobject_name(to)); 571} 572 573/* 574 * 'struct bd_holder' contains pointers to kobjects symlinked by 575 * bd_claim_by_kobject. 576 * It's connected to bd_holder_list which is protected by bdev->bd_sem. 577 */ 578struct bd_holder { 579 struct list_head list; /* chain of holders of the bdev */ 580 int count; /* references from the holder */ 581 struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */ 582 struct kobject *hdev; /* e.g. "/block/dm-0" */ 583 struct kobject *hdir; /* e.g. "/block/sda/holders" */ 584 struct kobject *sdev; /* e.g. "/block/sda" */ 585}; 586 587/* 588 * Get references of related kobjects at once. 589 * Returns 1 on success. 0 on failure. 590 * 591 * Should call bd_holder_release_dirs() after successful use. 592 */ 593static int bd_holder_grab_dirs(struct block_device *bdev, 594 struct bd_holder *bo) 595{ 596 if (!bdev || !bo) 597 return 0; 598 599 bo->sdir = kobject_get(bo->sdir); 600 if (!bo->sdir) 601 return 0; 602 603 bo->hdev = kobject_get(bo->sdir->parent); 604 if (!bo->hdev) 605 goto fail_put_sdir; 606 607 bo->sdev = bdev_get_kobj(bdev); 608 if (!bo->sdev) 609 goto fail_put_hdev; 610 611 bo->hdir = bdev_get_holder(bdev); 612 if (!bo->hdir) 613 goto fail_put_sdev; 614 615 return 1; 616 617fail_put_sdev: 618 kobject_put(bo->sdev); 619fail_put_hdev: 620 kobject_put(bo->hdev); 621fail_put_sdir: 622 kobject_put(bo->sdir); 623 624 return 0; 625} 626 627/* Put references of related kobjects at once. */ 628static void bd_holder_release_dirs(struct bd_holder *bo) 629{ 630 kobject_put(bo->hdir); 631 kobject_put(bo->sdev); 632 kobject_put(bo->hdev); 633 kobject_put(bo->sdir); 634} 635 636static struct bd_holder *alloc_bd_holder(struct kobject *kobj) 637{ 638 struct bd_holder *bo; 639 640 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 641 if (!bo) 642 return NULL; 643 644 bo->count = 1; 645 bo->sdir = kobj; 646 647 return bo; 648} 649 650static void free_bd_holder(struct bd_holder *bo) 651{ 652 kfree(bo); 653} 654 655/** 656 * find_bd_holder - find matching struct bd_holder from the block device 657 * 658 * @bdev: struct block device to be searched 659 * @bo: target struct bd_holder 660 * 661 * Returns matching entry with @bo in @bdev->bd_holder_list. 662 * If found, increment the reference count and return the pointer. 663 * If not found, returns NULL. 664 */ 665static struct bd_holder *find_bd_holder(struct block_device *bdev, 666 struct bd_holder *bo) 667{ 668 struct bd_holder *tmp; 669 670 list_for_each_entry(tmp, &bdev->bd_holder_list, list) 671 if (tmp->sdir == bo->sdir) { 672 tmp->count++; 673 return tmp; 674 } 675 676 return NULL; 677} 678 679/** 680 * add_bd_holder - create sysfs symlinks for bd_claim() relationship 681 * 682 * @bdev: block device to be bd_claimed 683 * @bo: preallocated and initialized by alloc_bd_holder() 684 * 685 * Add @bo to @bdev->bd_holder_list, create symlinks. 686 * 687 * Returns 0 if symlinks are created. 688 * Returns -ve if something fails. 689 */ 690static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo) 691{ 692 int err; 693 694 if (!bo) 695 return -EINVAL; 696 697 if (!bd_holder_grab_dirs(bdev, bo)) 698 return -EBUSY; 699 700 err = add_symlink(bo->sdir, bo->sdev); 701 if (err) 702 return err; 703 704 err = add_symlink(bo->hdir, bo->hdev); 705 if (err) { 706 del_symlink(bo->sdir, bo->sdev); 707 return err; 708 } 709 710 list_add_tail(&bo->list, &bdev->bd_holder_list); 711 return 0; 712} 713 714/** 715 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship 716 * 717 * @bdev: block device to be bd_claimed 718 * @kobj: holder's kobject 719 * 720 * If there is matching entry with @kobj in @bdev->bd_holder_list 721 * and no other bd_claim() from the same kobject, 722 * remove the struct bd_holder from the list, delete symlinks for it. 723 * 724 * Returns a pointer to the struct bd_holder when it's removed from the list 725 * and ready to be freed. 726 * Returns NULL if matching claim isn't found or there is other bd_claim() 727 * by the same kobject. 728 */ 729static struct bd_holder *del_bd_holder(struct block_device *bdev, 730 struct kobject *kobj) 731{ 732 struct bd_holder *bo; 733 734 list_for_each_entry(bo, &bdev->bd_holder_list, list) { 735 if (bo->sdir == kobj) { 736 bo->count--; 737 BUG_ON(bo->count < 0); 738 if (!bo->count) { 739 list_del(&bo->list); 740 del_symlink(bo->sdir, bo->sdev); 741 del_symlink(bo->hdir, bo->hdev); 742 bd_holder_release_dirs(bo); 743 return bo; 744 } 745 break; 746 } 747 } 748 749 return NULL; 750} 751 752/** 753 * bd_claim_by_kobject - bd_claim() with additional kobject signature 754 * 755 * @bdev: block device to be claimed 756 * @holder: holder's signature 757 * @kobj: holder's kobject 758 * 759 * Do bd_claim() and if it succeeds, create sysfs symlinks between 760 * the bdev and the holder's kobject. 761 * Use bd_release_from_kobject() when relesing the claimed bdev. 762 * 763 * Returns 0 on success. (same as bd_claim()) 764 * Returns errno on failure. 765 */ 766static int bd_claim_by_kobject(struct block_device *bdev, void *holder, 767 struct kobject *kobj) 768{ 769 int err; 770 struct bd_holder *bo, *found; 771 772 if (!kobj) 773 return -EINVAL; 774 775 bo = alloc_bd_holder(kobj); 776 if (!bo) 777 return -ENOMEM; 778 779 mutex_lock(&bdev->bd_mutex); 780 781 err = bd_claim(bdev, holder); 782 if (err) 783 goto fail; 784 785 found = find_bd_holder(bdev, bo); 786 if (found) 787 goto fail; 788 789 err = add_bd_holder(bdev, bo); 790 if (err) 791 bd_release(bdev); 792 else 793 bo = NULL; 794fail: 795 mutex_unlock(&bdev->bd_mutex); 796 free_bd_holder(bo); 797 return err; 798} 799 800/** 801 * bd_release_from_kobject - bd_release() with additional kobject signature 802 * 803 * @bdev: block device to be released 804 * @kobj: holder's kobject 805 * 806 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject(). 807 */ 808static void bd_release_from_kobject(struct block_device *bdev, 809 struct kobject *kobj) 810{ 811 if (!kobj) 812 return; 813 814 mutex_lock(&bdev->bd_mutex); 815 bd_release(bdev); 816 free_bd_holder(del_bd_holder(bdev, kobj)); 817 mutex_unlock(&bdev->bd_mutex); 818} 819 820/** 821 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject() 822 * 823 * @bdev: block device to be claimed 824 * @holder: holder's signature 825 * @disk: holder's gendisk 826 * 827 * Call bd_claim_by_kobject() with getting @disk->slave_dir. 828 */ 829int bd_claim_by_disk(struct block_device *bdev, void *holder, 830 struct gendisk *disk) 831{ 832 return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir)); 833} 834EXPORT_SYMBOL_GPL(bd_claim_by_disk); 835 836/** 837 * bd_release_from_disk - wrapper function for bd_release_from_kobject() 838 * 839 * @bdev: block device to be claimed 840 * @disk: holder's gendisk 841 * 842 * Call bd_release_from_kobject() and put @disk->slave_dir. 843 */ 844void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk) 845{ 846 bd_release_from_kobject(bdev, disk->slave_dir); 847 kobject_put(disk->slave_dir); 848} 849EXPORT_SYMBOL_GPL(bd_release_from_disk); 850#endif 851 852/* 853 * Tries to open block device by device number. Use it ONLY if you 854 * really do not have anything better - i.e. when you are behind a 855 * truly sucky interface and all you are given is a device number. _Never_ 856 * to be used for internal purposes. If you ever need it - reconsider 857 * your API. 858 */ 859struct block_device *open_by_devnum(dev_t dev, unsigned mode) 860{ 861 struct block_device *bdev = bdget(dev); 862 int err = -ENOMEM; 863 int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY; 864 if (bdev) 865 err = blkdev_get(bdev, mode, flags); 866 return err ? ERR_PTR(err) : bdev; 867} 868 869EXPORT_SYMBOL(open_by_devnum); 870 871/* 872 * This routine checks whether a removable media has been changed, 873 * and invalidates all buffer-cache-entries in that case. This 874 * is a relatively slow routine, so we have to try to minimize using 875 * it. Thus it is called only upon a 'mount' or 'open'. This 876 * is the best way of combining speed and utility, I think. 877 * People changing diskettes in the middle of an operation deserve 878 * to lose :-) 879 */ 880int check_disk_change(struct block_device *bdev) 881{ 882 struct gendisk *disk = bdev->bd_disk; 883 struct block_device_operations * bdops = disk->fops; 884 885 if (!bdops->media_changed) 886 return 0; 887 if (!bdops->media_changed(bdev->bd_disk)) 888 return 0; 889 890 if (__invalidate_device(bdev)) 891 printk("VFS: busy inodes on changed media.\n"); 892 893 if (bdops->revalidate_disk) 894 bdops->revalidate_disk(bdev->bd_disk); 895 if (bdev->bd_disk->minors > 1) 896 bdev->bd_invalidated = 1; 897 return 1; 898} 899 900EXPORT_SYMBOL(check_disk_change); 901 902void bd_set_size(struct block_device *bdev, loff_t size) 903{ 904 unsigned bsize = bdev_hardsect_size(bdev); 905 906 bdev->bd_inode->i_size = size; 907 while (bsize < PAGE_CACHE_SIZE) { 908 if (size & bsize) 909 break; 910 bsize <<= 1; 911 } 912 bdev->bd_block_size = bsize; 913 bdev->bd_inode->i_blkbits = blksize_bits(bsize); 914} 915EXPORT_SYMBOL(bd_set_size); 916 917static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, 918 int for_part); 919static int __blkdev_put(struct block_device *bdev, int for_part); 920 921/* 922 * bd_mutex locking: 923 * 924 * mutex_lock(part->bd_mutex) 925 * mutex_lock_nested(whole->bd_mutex, 1) 926 */ 927 928static int do_open(struct block_device *bdev, struct file *file, int for_part) 929{ 930 struct module *owner = NULL; 931 struct gendisk *disk; 932 int ret; 933 int part; 934 int perm = 0; 935 936 if (file->f_mode & FMODE_READ) 937 perm |= MAY_READ; 938 if (file->f_mode & FMODE_WRITE) 939 perm |= MAY_WRITE; 940 /* 941 * hooks: /n/, see "layering violations". 942 */ 943 ret = devcgroup_inode_permission(bdev->bd_inode, perm); 944 if (ret != 0) 945 return ret; 946 947 ret = -ENXIO; 948 file->f_mapping = bdev->bd_inode->i_mapping; 949 lock_kernel(); 950 disk = get_gendisk(bdev->bd_dev, &part); 951 if (!disk) { 952 unlock_kernel(); 953 bdput(bdev); 954 return ret; 955 } 956 owner = disk->fops->owner; 957 958 mutex_lock_nested(&bdev->bd_mutex, for_part); 959 if (!bdev->bd_openers) { 960 bdev->bd_disk = disk; 961 bdev->bd_contains = bdev; 962 if (!part) { 963 struct backing_dev_info *bdi; 964 if (disk->fops->open) { 965 ret = disk->fops->open(bdev->bd_inode, file); 966 if (ret) 967 goto out_first; 968 } 969 if (!bdev->bd_openers) { 970 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); 971 bdi = blk_get_backing_dev_info(bdev); 972 if (bdi == NULL) 973 bdi = &default_backing_dev_info; 974 bdev->bd_inode->i_data.backing_dev_info = bdi; 975 } 976 if (bdev->bd_invalidated) 977 rescan_partitions(disk, bdev); 978 } else { 979 struct hd_struct *p; 980 struct block_device *whole; 981 whole = bdget_disk(disk, 0); 982 ret = -ENOMEM; 983 if (!whole) 984 goto out_first; 985 BUG_ON(for_part); 986 ret = __blkdev_get(whole, file->f_mode, file->f_flags, 1); 987 if (ret) 988 goto out_first; 989 bdev->bd_contains = whole; 990 p = disk->part[part - 1]; 991 bdev->bd_inode->i_data.backing_dev_info = 992 whole->bd_inode->i_data.backing_dev_info; 993 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { 994 ret = -ENXIO; 995 goto out_first; 996 } 997 kobject_get(&p->dev.kobj); 998 bdev->bd_part = p; 999 bd_set_size(bdev, (loff_t) p->nr_sects << 9); 1000 } 1001 } else { 1002 put_disk(disk); 1003 module_put(owner); 1004 if (bdev->bd_contains == bdev) { 1005 if (bdev->bd_disk->fops->open) { 1006 ret = bdev->bd_disk->fops->open(bdev->bd_inode, file); 1007 if (ret) 1008 goto out; 1009 } 1010 if (bdev->bd_invalidated) 1011 rescan_partitions(bdev->bd_disk, bdev); 1012 } 1013 } 1014 bdev->bd_openers++; 1015 if (for_part) 1016 bdev->bd_part_count++; 1017 mutex_unlock(&bdev->bd_mutex); 1018 unlock_kernel(); 1019 return 0; 1020 1021out_first: 1022 bdev->bd_disk = NULL; 1023 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; 1024 if (bdev != bdev->bd_contains) 1025 __blkdev_put(bdev->bd_contains, 1); 1026 bdev->bd_contains = NULL; 1027 put_disk(disk); 1028 module_put(owner); 1029out: 1030 mutex_unlock(&bdev->bd_mutex); 1031 unlock_kernel(); 1032 if (ret) 1033 bdput(bdev); 1034 return ret; 1035} 1036 1037static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, 1038 int for_part) 1039{ 1040 /* 1041 * This crockload is due to bad choice of ->open() type. 1042 * It will go away. 1043 * For now, block device ->open() routine must _not_ 1044 * examine anything in 'inode' argument except ->i_rdev. 1045 */ 1046 struct file fake_file = {}; 1047 struct dentry fake_dentry = {}; 1048 fake_file.f_mode = mode; 1049 fake_file.f_flags = flags; 1050 fake_file.f_path.dentry = &fake_dentry; 1051 fake_dentry.d_inode = bdev->bd_inode; 1052 1053 return do_open(bdev, &fake_file, for_part); 1054} 1055 1056int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) 1057{ 1058 return __blkdev_get(bdev, mode, flags, 0); 1059} 1060EXPORT_SYMBOL(blkdev_get); 1061 1062static int blkdev_open(struct inode * inode, struct file * filp) 1063{ 1064 struct block_device *bdev; 1065 int res; 1066 1067 /* 1068 * Preserve backwards compatibility and allow large file access 1069 * even if userspace doesn't ask for it explicitly. Some mkfs 1070 * binary needs it. We might want to drop this workaround 1071 * during an unstable branch. 1072 */ 1073 filp->f_flags |= O_LARGEFILE; 1074 1075 bdev = bd_acquire(inode); 1076 if (bdev == NULL) 1077 return -ENOMEM; 1078 1079 res = do_open(bdev, filp, 0); 1080 if (res) 1081 return res; 1082 1083 if (!(filp->f_flags & O_EXCL) ) 1084 return 0; 1085 1086 if (!(res = bd_claim(bdev, filp))) 1087 return 0; 1088 1089 blkdev_put(bdev); 1090 return res; 1091} 1092 1093static int __blkdev_put(struct block_device *bdev, int for_part) 1094{ 1095 int ret = 0; 1096 struct inode *bd_inode = bdev->bd_inode; 1097 struct gendisk *disk = bdev->bd_disk; 1098 struct block_device *victim = NULL; 1099 1100 mutex_lock_nested(&bdev->bd_mutex, for_part); 1101 lock_kernel(); 1102 if (for_part) 1103 bdev->bd_part_count--; 1104 1105 if (!--bdev->bd_openers) { 1106 sync_blockdev(bdev); 1107 kill_bdev(bdev); 1108 } 1109 if (bdev->bd_contains == bdev) { 1110 if (disk->fops->release) 1111 ret = disk->fops->release(bd_inode, NULL); 1112 } 1113 if (!bdev->bd_openers) { 1114 struct module *owner = disk->fops->owner; 1115 1116 put_disk(disk); 1117 module_put(owner); 1118 1119 if (bdev->bd_contains != bdev) { 1120 kobject_put(&bdev->bd_part->dev.kobj); 1121 bdev->bd_part = NULL; 1122 } 1123 bdev->bd_disk = NULL; 1124 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; 1125 if (bdev != bdev->bd_contains) 1126 victim = bdev->bd_contains; 1127 bdev->bd_contains = NULL; 1128 } 1129 unlock_kernel(); 1130 mutex_unlock(&bdev->bd_mutex); 1131 bdput(bdev); 1132 if (victim) 1133 __blkdev_put(victim, 1); 1134 return ret; 1135} 1136 1137int blkdev_put(struct block_device *bdev) 1138{ 1139 return __blkdev_put(bdev, 0); 1140} 1141EXPORT_SYMBOL(blkdev_put); 1142 1143static int blkdev_close(struct inode * inode, struct file * filp) 1144{ 1145 struct block_device *bdev = I_BDEV(filp->f_mapping->host); 1146 if (bdev->bd_holder == filp) 1147 bd_release(bdev); 1148 return blkdev_put(bdev); 1149} 1150 1151static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) 1152{ 1153 return blkdev_ioctl(file->f_mapping->host, file, cmd, arg); 1154} 1155 1156static const struct address_space_operations def_blk_aops = { 1157 .readpage = blkdev_readpage, 1158 .writepage = blkdev_writepage, 1159 .sync_page = block_sync_page, 1160 .write_begin = blkdev_write_begin, 1161 .write_end = blkdev_write_end, 1162 .writepages = generic_writepages, 1163 .direct_IO = blkdev_direct_IO, 1164}; 1165 1166const struct file_operations def_blk_fops = { 1167 .open = blkdev_open, 1168 .release = blkdev_close, 1169 .llseek = block_llseek, 1170 .read = do_sync_read, 1171 .write = do_sync_write, 1172 .aio_read = generic_file_aio_read, 1173 .aio_write = generic_file_aio_write_nolock, 1174 .mmap = generic_file_mmap, 1175 .fsync = block_fsync, 1176 .unlocked_ioctl = block_ioctl, 1177#ifdef CONFIG_COMPAT 1178 .compat_ioctl = compat_blkdev_ioctl, 1179#endif 1180 .splice_read = generic_file_splice_read, 1181 .splice_write = generic_file_splice_write, 1182}; 1183 1184int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) 1185{ 1186 int res; 1187 mm_segment_t old_fs = get_fs(); 1188 set_fs(KERNEL_DS); 1189 res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg); 1190 set_fs(old_fs); 1191 return res; 1192} 1193 1194EXPORT_SYMBOL(ioctl_by_bdev); 1195 1196/** 1197 * lookup_bdev - lookup a struct block_device by name 1198 * 1199 * @path: special file representing the block device 1200 * 1201 * Get a reference to the blockdevice at @path in the current 1202 * namespace if possible and return it. Return ERR_PTR(error) 1203 * otherwise. 1204 */ 1205struct block_device *lookup_bdev(const char *path) 1206{ 1207 struct block_device *bdev; 1208 struct inode *inode; 1209 struct nameidata nd; 1210 int error; 1211 1212 if (!path || !*path) 1213 return ERR_PTR(-EINVAL); 1214 1215 error = path_lookup(path, LOOKUP_FOLLOW, &nd); 1216 if (error) 1217 return ERR_PTR(error); 1218 1219 inode = nd.path.dentry->d_inode; 1220 error = -ENOTBLK; 1221 if (!S_ISBLK(inode->i_mode)) 1222 goto fail; 1223 error = -EACCES; 1224 if (nd.path.mnt->mnt_flags & MNT_NODEV) 1225 goto fail; 1226 error = -ENOMEM; 1227 bdev = bd_acquire(inode); 1228 if (!bdev) 1229 goto fail; 1230out: 1231 path_put(&nd.path); 1232 return bdev; 1233fail: 1234 bdev = ERR_PTR(error); 1235 goto out; 1236} 1237 1238/** 1239 * open_bdev_excl - open a block device by name and set it up for use 1240 * 1241 * @path: special file representing the block device 1242 * @flags: %MS_RDONLY for opening read-only 1243 * @holder: owner for exclusion 1244 * 1245 * Open the blockdevice described by the special file at @path, claim it 1246 * for the @holder. 1247 */ 1248struct block_device *open_bdev_excl(const char *path, int flags, void *holder) 1249{ 1250 struct block_device *bdev; 1251 mode_t mode = FMODE_READ; 1252 int error = 0; 1253 1254 bdev = lookup_bdev(path); 1255 if (IS_ERR(bdev)) 1256 return bdev; 1257 1258 if (!(flags & MS_RDONLY)) 1259 mode |= FMODE_WRITE; 1260 error = blkdev_get(bdev, mode, 0); 1261 if (error) 1262 return ERR_PTR(error); 1263 error = -EACCES; 1264 if (!(flags & MS_RDONLY) && bdev_read_only(bdev)) 1265 goto blkdev_put; 1266 error = bd_claim(bdev, holder); 1267 if (error) 1268 goto blkdev_put; 1269 1270 return bdev; 1271 1272blkdev_put: 1273 blkdev_put(bdev); 1274 return ERR_PTR(error); 1275} 1276 1277EXPORT_SYMBOL(open_bdev_excl); 1278 1279/** 1280 * close_bdev_excl - release a blockdevice openen by open_bdev_excl() 1281 * 1282 * @bdev: blockdevice to close 1283 * 1284 * This is the counterpart to open_bdev_excl(). 1285 */ 1286void close_bdev_excl(struct block_device *bdev) 1287{ 1288 bd_release(bdev); 1289 blkdev_put(bdev); 1290} 1291 1292EXPORT_SYMBOL(close_bdev_excl); 1293 1294int __invalidate_device(struct block_device *bdev) 1295{ 1296 struct super_block *sb = get_super(bdev); 1297 int res = 0; 1298 1299 if (sb) { 1300 /* 1301 * no need to lock the super, get_super holds the 1302 * read mutex so the filesystem cannot go away 1303 * under us (->put_super runs with the write lock 1304 * hold). 1305 */ 1306 shrink_dcache_sb(sb); 1307 res = invalidate_inodes(sb); 1308 drop_super(sb); 1309 } 1310 invalidate_bdev(bdev); 1311 return res; 1312} 1313EXPORT_SYMBOL(__invalidate_device);