Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/slab.h>
11#include <linux/kmod.h>
12#include <linux/major.h>
13#include <linux/device_cgroup.h>
14#include <linux/blkdev.h>
15#include <linux/blk-integrity.h>
16#include <linux/backing-dev.h>
17#include <linux/module.h>
18#include <linux/blkpg.h>
19#include <linux/magic.h>
20#include <linux/buffer_head.h>
21#include <linux/swap.h>
22#include <linux/writeback.h>
23#include <linux/mount.h>
24#include <linux/pseudo_fs.h>
25#include <linux/uio.h>
26#include <linux/namei.h>
27#include <linux/part_stat.h>
28#include <linux/uaccess.h>
29#include "../fs/internal.h"
30#include "blk.h"
31
32struct bdev_inode {
33 struct block_device bdev;
34 struct inode vfs_inode;
35};
36
37static inline struct bdev_inode *BDEV_I(struct inode *inode)
38{
39 return container_of(inode, struct bdev_inode, vfs_inode);
40}
41
42struct block_device *I_BDEV(struct inode *inode)
43{
44 return &BDEV_I(inode)->bdev;
45}
46EXPORT_SYMBOL(I_BDEV);
47
48static void bdev_write_inode(struct block_device *bdev)
49{
50 struct inode *inode = bdev->bd_inode;
51 int ret;
52
53 spin_lock(&inode->i_lock);
54 while (inode->i_state & I_DIRTY) {
55 spin_unlock(&inode->i_lock);
56 ret = write_inode_now(inode, true);
57 if (ret) {
58 char name[BDEVNAME_SIZE];
59 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
60 "for block device %s (err=%d).\n",
61 bdevname(bdev, name), ret);
62 }
63 spin_lock(&inode->i_lock);
64 }
65 spin_unlock(&inode->i_lock);
66}
67
68/* Kill _all_ buffers and pagecache , dirty or not.. */
69static void kill_bdev(struct block_device *bdev)
70{
71 struct address_space *mapping = bdev->bd_inode->i_mapping;
72
73 if (mapping_empty(mapping))
74 return;
75
76 invalidate_bh_lrus();
77 truncate_inode_pages(mapping, 0);
78}
79
80/* Invalidate clean unused buffers and pagecache. */
81void invalidate_bdev(struct block_device *bdev)
82{
83 struct address_space *mapping = bdev->bd_inode->i_mapping;
84
85 if (mapping->nrpages) {
86 invalidate_bh_lrus();
87 lru_add_drain_all(); /* make sure all lru add caches are flushed */
88 invalidate_mapping_pages(mapping, 0, -1);
89 }
90}
91EXPORT_SYMBOL(invalidate_bdev);
92
93/*
94 * Drop all buffers & page cache for given bdev range. This function bails
95 * with error if bdev has other exclusive owner (such as filesystem).
96 */
97int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
98 loff_t lstart, loff_t lend)
99{
100 /*
101 * If we don't hold exclusive handle for the device, upgrade to it
102 * while we discard the buffer cache to avoid discarding buffers
103 * under live filesystem.
104 */
105 if (!(mode & FMODE_EXCL)) {
106 int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
107 if (err)
108 goto invalidate;
109 }
110
111 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
112 if (!(mode & FMODE_EXCL))
113 bd_abort_claiming(bdev, truncate_bdev_range);
114 return 0;
115
116invalidate:
117 /*
118 * Someone else has handle exclusively open. Try invalidating instead.
119 * The 'end' argument is inclusive so the rounding is safe.
120 */
121 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
122 lstart >> PAGE_SHIFT,
123 lend >> PAGE_SHIFT);
124}
125
126static void set_init_blocksize(struct block_device *bdev)
127{
128 unsigned int bsize = bdev_logical_block_size(bdev);
129 loff_t size = i_size_read(bdev->bd_inode);
130
131 while (bsize < PAGE_SIZE) {
132 if (size & bsize)
133 break;
134 bsize <<= 1;
135 }
136 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
137}
138
139int set_blocksize(struct block_device *bdev, int size)
140{
141 /* Size must be a power of two, and between 512 and PAGE_SIZE */
142 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
143 return -EINVAL;
144
145 /* Size cannot be smaller than the size supported by the device */
146 if (size < bdev_logical_block_size(bdev))
147 return -EINVAL;
148
149 /* Don't change the size if it is same as current */
150 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
151 sync_blockdev(bdev);
152 bdev->bd_inode->i_blkbits = blksize_bits(size);
153 kill_bdev(bdev);
154 }
155 return 0;
156}
157
158EXPORT_SYMBOL(set_blocksize);
159
160int sb_set_blocksize(struct super_block *sb, int size)
161{
162 if (set_blocksize(sb->s_bdev, size))
163 return 0;
164 /* If we get here, we know size is power of two
165 * and it's value is between 512 and PAGE_SIZE */
166 sb->s_blocksize = size;
167 sb->s_blocksize_bits = blksize_bits(size);
168 return sb->s_blocksize;
169}
170
171EXPORT_SYMBOL(sb_set_blocksize);
172
173int sb_min_blocksize(struct super_block *sb, int size)
174{
175 int minsize = bdev_logical_block_size(sb->s_bdev);
176 if (size < minsize)
177 size = minsize;
178 return sb_set_blocksize(sb, size);
179}
180
181EXPORT_SYMBOL(sb_min_blocksize);
182
183int sync_blockdev_nowait(struct block_device *bdev)
184{
185 if (!bdev)
186 return 0;
187 return filemap_flush(bdev->bd_inode->i_mapping);
188}
189EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
190
191/*
192 * Write out and wait upon all the dirty data associated with a block
193 * device via its mapping. Does not take the superblock lock.
194 */
195int sync_blockdev(struct block_device *bdev)
196{
197 if (!bdev)
198 return 0;
199 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
200}
201EXPORT_SYMBOL(sync_blockdev);
202
203/*
204 * Write out and wait upon all dirty data associated with this
205 * device. Filesystem data as well as the underlying block
206 * device. Takes the superblock lock.
207 */
208int fsync_bdev(struct block_device *bdev)
209{
210 struct super_block *sb = get_super(bdev);
211 if (sb) {
212 int res = sync_filesystem(sb);
213 drop_super(sb);
214 return res;
215 }
216 return sync_blockdev(bdev);
217}
218EXPORT_SYMBOL(fsync_bdev);
219
220/**
221 * freeze_bdev -- lock a filesystem and force it into a consistent state
222 * @bdev: blockdevice to lock
223 *
224 * If a superblock is found on this device, we take the s_umount semaphore
225 * on it to make sure nobody unmounts until the snapshot creation is done.
226 * The reference counter (bd_fsfreeze_count) guarantees that only the last
227 * unfreeze process can unfreeze the frozen filesystem actually when multiple
228 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
229 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
230 * actually.
231 */
232int freeze_bdev(struct block_device *bdev)
233{
234 struct super_block *sb;
235 int error = 0;
236
237 mutex_lock(&bdev->bd_fsfreeze_mutex);
238 if (++bdev->bd_fsfreeze_count > 1)
239 goto done;
240
241 sb = get_active_super(bdev);
242 if (!sb)
243 goto sync;
244 if (sb->s_op->freeze_super)
245 error = sb->s_op->freeze_super(sb);
246 else
247 error = freeze_super(sb);
248 deactivate_super(sb);
249
250 if (error) {
251 bdev->bd_fsfreeze_count--;
252 goto done;
253 }
254 bdev->bd_fsfreeze_sb = sb;
255
256sync:
257 sync_blockdev(bdev);
258done:
259 mutex_unlock(&bdev->bd_fsfreeze_mutex);
260 return error;
261}
262EXPORT_SYMBOL(freeze_bdev);
263
264/**
265 * thaw_bdev -- unlock filesystem
266 * @bdev: blockdevice to unlock
267 *
268 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
269 */
270int thaw_bdev(struct block_device *bdev)
271{
272 struct super_block *sb;
273 int error = -EINVAL;
274
275 mutex_lock(&bdev->bd_fsfreeze_mutex);
276 if (!bdev->bd_fsfreeze_count)
277 goto out;
278
279 error = 0;
280 if (--bdev->bd_fsfreeze_count > 0)
281 goto out;
282
283 sb = bdev->bd_fsfreeze_sb;
284 if (!sb)
285 goto out;
286
287 if (sb->s_op->thaw_super)
288 error = sb->s_op->thaw_super(sb);
289 else
290 error = thaw_super(sb);
291 if (error)
292 bdev->bd_fsfreeze_count++;
293 else
294 bdev->bd_fsfreeze_sb = NULL;
295out:
296 mutex_unlock(&bdev->bd_fsfreeze_mutex);
297 return error;
298}
299EXPORT_SYMBOL(thaw_bdev);
300
301/**
302 * bdev_read_page() - Start reading a page from a block device
303 * @bdev: The device to read the page from
304 * @sector: The offset on the device to read the page to (need not be aligned)
305 * @page: The page to read
306 *
307 * On entry, the page should be locked. It will be unlocked when the page
308 * has been read. If the block driver implements rw_page synchronously,
309 * that will be true on exit from this function, but it need not be.
310 *
311 * Errors returned by this function are usually "soft", eg out of memory, or
312 * queue full; callers should try a different route to read this page rather
313 * than propagate an error back up the stack.
314 *
315 * Return: negative errno if an error occurs, 0 if submission was successful.
316 */
317int bdev_read_page(struct block_device *bdev, sector_t sector,
318 struct page *page)
319{
320 const struct block_device_operations *ops = bdev->bd_disk->fops;
321 int result = -EOPNOTSUPP;
322
323 if (!ops->rw_page || bdev_get_integrity(bdev))
324 return result;
325
326 result = blk_queue_enter(bdev_get_queue(bdev), 0);
327 if (result)
328 return result;
329 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
330 REQ_OP_READ);
331 blk_queue_exit(bdev_get_queue(bdev));
332 return result;
333}
334
335/**
336 * bdev_write_page() - Start writing a page to a block device
337 * @bdev: The device to write the page to
338 * @sector: The offset on the device to write the page to (need not be aligned)
339 * @page: The page to write
340 * @wbc: The writeback_control for the write
341 *
342 * On entry, the page should be locked and not currently under writeback.
343 * On exit, if the write started successfully, the page will be unlocked and
344 * under writeback. If the write failed already (eg the driver failed to
345 * queue the page to the device), the page will still be locked. If the
346 * caller is a ->writepage implementation, it will need to unlock the page.
347 *
348 * Errors returned by this function are usually "soft", eg out of memory, or
349 * queue full; callers should try a different route to write this page rather
350 * than propagate an error back up the stack.
351 *
352 * Return: negative errno if an error occurs, 0 if submission was successful.
353 */
354int bdev_write_page(struct block_device *bdev, sector_t sector,
355 struct page *page, struct writeback_control *wbc)
356{
357 int result;
358 const struct block_device_operations *ops = bdev->bd_disk->fops;
359
360 if (!ops->rw_page || bdev_get_integrity(bdev))
361 return -EOPNOTSUPP;
362 result = blk_queue_enter(bdev_get_queue(bdev), 0);
363 if (result)
364 return result;
365
366 set_page_writeback(page);
367 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
368 REQ_OP_WRITE);
369 if (result) {
370 end_page_writeback(page);
371 } else {
372 clean_page_buffers(page);
373 unlock_page(page);
374 }
375 blk_queue_exit(bdev_get_queue(bdev));
376 return result;
377}
378
379/*
380 * pseudo-fs
381 */
382
383static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
384static struct kmem_cache * bdev_cachep __read_mostly;
385
386static struct inode *bdev_alloc_inode(struct super_block *sb)
387{
388 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
389
390 if (!ei)
391 return NULL;
392 memset(&ei->bdev, 0, sizeof(ei->bdev));
393 return &ei->vfs_inode;
394}
395
396static void bdev_free_inode(struct inode *inode)
397{
398 struct block_device *bdev = I_BDEV(inode);
399
400 free_percpu(bdev->bd_stats);
401 kfree(bdev->bd_meta_info);
402
403 if (!bdev_is_partition(bdev)) {
404 if (bdev->bd_disk && bdev->bd_disk->bdi)
405 bdi_put(bdev->bd_disk->bdi);
406 kfree(bdev->bd_disk);
407 }
408
409 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
410 blk_free_ext_minor(MINOR(bdev->bd_dev));
411
412 kmem_cache_free(bdev_cachep, BDEV_I(inode));
413}
414
415static void init_once(void *data)
416{
417 struct bdev_inode *ei = data;
418
419 inode_init_once(&ei->vfs_inode);
420}
421
422static void bdev_evict_inode(struct inode *inode)
423{
424 truncate_inode_pages_final(&inode->i_data);
425 invalidate_inode_buffers(inode); /* is it needed here? */
426 clear_inode(inode);
427}
428
429static const struct super_operations bdev_sops = {
430 .statfs = simple_statfs,
431 .alloc_inode = bdev_alloc_inode,
432 .free_inode = bdev_free_inode,
433 .drop_inode = generic_delete_inode,
434 .evict_inode = bdev_evict_inode,
435};
436
437static int bd_init_fs_context(struct fs_context *fc)
438{
439 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
440 if (!ctx)
441 return -ENOMEM;
442 fc->s_iflags |= SB_I_CGROUPWB;
443 ctx->ops = &bdev_sops;
444 return 0;
445}
446
447static struct file_system_type bd_type = {
448 .name = "bdev",
449 .init_fs_context = bd_init_fs_context,
450 .kill_sb = kill_anon_super,
451};
452
453struct super_block *blockdev_superblock __read_mostly;
454EXPORT_SYMBOL_GPL(blockdev_superblock);
455
456void __init bdev_cache_init(void)
457{
458 int err;
459 static struct vfsmount *bd_mnt;
460
461 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
462 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
463 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
464 init_once);
465 err = register_filesystem(&bd_type);
466 if (err)
467 panic("Cannot register bdev pseudo-fs");
468 bd_mnt = kern_mount(&bd_type);
469 if (IS_ERR(bd_mnt))
470 panic("Cannot create bdev pseudo-fs");
471 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
472}
473
474struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
475{
476 struct block_device *bdev;
477 struct inode *inode;
478
479 inode = new_inode(blockdev_superblock);
480 if (!inode)
481 return NULL;
482 inode->i_mode = S_IFBLK;
483 inode->i_rdev = 0;
484 inode->i_data.a_ops = &def_blk_aops;
485 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
486
487 bdev = I_BDEV(inode);
488 mutex_init(&bdev->bd_fsfreeze_mutex);
489 spin_lock_init(&bdev->bd_size_lock);
490 bdev->bd_partno = partno;
491 bdev->bd_inode = inode;
492 bdev->bd_queue = disk->queue;
493 bdev->bd_stats = alloc_percpu(struct disk_stats);
494 if (!bdev->bd_stats) {
495 iput(inode);
496 return NULL;
497 }
498 bdev->bd_disk = disk;
499 return bdev;
500}
501
502void bdev_add(struct block_device *bdev, dev_t dev)
503{
504 bdev->bd_dev = dev;
505 bdev->bd_inode->i_rdev = dev;
506 bdev->bd_inode->i_ino = dev;
507 insert_inode_hash(bdev->bd_inode);
508}
509
510long nr_blockdev_pages(void)
511{
512 struct inode *inode;
513 long ret = 0;
514
515 spin_lock(&blockdev_superblock->s_inode_list_lock);
516 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
517 ret += inode->i_mapping->nrpages;
518 spin_unlock(&blockdev_superblock->s_inode_list_lock);
519
520 return ret;
521}
522
523/**
524 * bd_may_claim - test whether a block device can be claimed
525 * @bdev: block device of interest
526 * @whole: whole block device containing @bdev, may equal @bdev
527 * @holder: holder trying to claim @bdev
528 *
529 * Test whether @bdev can be claimed by @holder.
530 *
531 * CONTEXT:
532 * spin_lock(&bdev_lock).
533 *
534 * RETURNS:
535 * %true if @bdev can be claimed, %false otherwise.
536 */
537static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
538 void *holder)
539{
540 if (bdev->bd_holder == holder)
541 return true; /* already a holder */
542 else if (bdev->bd_holder != NULL)
543 return false; /* held by someone else */
544 else if (whole == bdev)
545 return true; /* is a whole device which isn't held */
546
547 else if (whole->bd_holder == bd_may_claim)
548 return true; /* is a partition of a device that is being partitioned */
549 else if (whole->bd_holder != NULL)
550 return false; /* is a partition of a held device */
551 else
552 return true; /* is a partition of an un-held device */
553}
554
555/**
556 * bd_prepare_to_claim - claim a block device
557 * @bdev: block device of interest
558 * @holder: holder trying to claim @bdev
559 *
560 * Claim @bdev. This function fails if @bdev is already claimed by another
561 * holder and waits if another claiming is in progress. return, the caller
562 * has ownership of bd_claiming and bd_holder[s].
563 *
564 * RETURNS:
565 * 0 if @bdev can be claimed, -EBUSY otherwise.
566 */
567int bd_prepare_to_claim(struct block_device *bdev, void *holder)
568{
569 struct block_device *whole = bdev_whole(bdev);
570
571 if (WARN_ON_ONCE(!holder))
572 return -EINVAL;
573retry:
574 spin_lock(&bdev_lock);
575 /* if someone else claimed, fail */
576 if (!bd_may_claim(bdev, whole, holder)) {
577 spin_unlock(&bdev_lock);
578 return -EBUSY;
579 }
580
581 /* if claiming is already in progress, wait for it to finish */
582 if (whole->bd_claiming) {
583 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
584 DEFINE_WAIT(wait);
585
586 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
587 spin_unlock(&bdev_lock);
588 schedule();
589 finish_wait(wq, &wait);
590 goto retry;
591 }
592
593 /* yay, all mine */
594 whole->bd_claiming = holder;
595 spin_unlock(&bdev_lock);
596 return 0;
597}
598EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
599
600static void bd_clear_claiming(struct block_device *whole, void *holder)
601{
602 lockdep_assert_held(&bdev_lock);
603 /* tell others that we're done */
604 BUG_ON(whole->bd_claiming != holder);
605 whole->bd_claiming = NULL;
606 wake_up_bit(&whole->bd_claiming, 0);
607}
608
609/**
610 * bd_finish_claiming - finish claiming of a block device
611 * @bdev: block device of interest
612 * @holder: holder that has claimed @bdev
613 *
614 * Finish exclusive open of a block device. Mark the device as exlusively
615 * open by the holder and wake up all waiters for exclusive open to finish.
616 */
617static void bd_finish_claiming(struct block_device *bdev, void *holder)
618{
619 struct block_device *whole = bdev_whole(bdev);
620
621 spin_lock(&bdev_lock);
622 BUG_ON(!bd_may_claim(bdev, whole, holder));
623 /*
624 * Note that for a whole device bd_holders will be incremented twice,
625 * and bd_holder will be set to bd_may_claim before being set to holder
626 */
627 whole->bd_holders++;
628 whole->bd_holder = bd_may_claim;
629 bdev->bd_holders++;
630 bdev->bd_holder = holder;
631 bd_clear_claiming(whole, holder);
632 spin_unlock(&bdev_lock);
633}
634
635/**
636 * bd_abort_claiming - abort claiming of a block device
637 * @bdev: block device of interest
638 * @holder: holder that has claimed @bdev
639 *
640 * Abort claiming of a block device when the exclusive open failed. This can be
641 * also used when exclusive open is not actually desired and we just needed
642 * to block other exclusive openers for a while.
643 */
644void bd_abort_claiming(struct block_device *bdev, void *holder)
645{
646 spin_lock(&bdev_lock);
647 bd_clear_claiming(bdev_whole(bdev), holder);
648 spin_unlock(&bdev_lock);
649}
650EXPORT_SYMBOL(bd_abort_claiming);
651
652static void blkdev_flush_mapping(struct block_device *bdev)
653{
654 WARN_ON_ONCE(bdev->bd_holders);
655 sync_blockdev(bdev);
656 kill_bdev(bdev);
657 bdev_write_inode(bdev);
658}
659
660static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
661{
662 struct gendisk *disk = bdev->bd_disk;
663 int ret;
664
665 if (disk->fops->open) {
666 ret = disk->fops->open(bdev, mode);
667 if (ret) {
668 /* avoid ghost partitions on a removed medium */
669 if (ret == -ENOMEDIUM &&
670 test_bit(GD_NEED_PART_SCAN, &disk->state))
671 bdev_disk_changed(disk, true);
672 return ret;
673 }
674 }
675
676 if (!bdev->bd_openers)
677 set_init_blocksize(bdev);
678 if (test_bit(GD_NEED_PART_SCAN, &disk->state))
679 bdev_disk_changed(disk, false);
680 bdev->bd_openers++;
681 return 0;;
682}
683
684static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
685{
686 if (!--bdev->bd_openers)
687 blkdev_flush_mapping(bdev);
688 if (bdev->bd_disk->fops->release)
689 bdev->bd_disk->fops->release(bdev->bd_disk, mode);
690}
691
692static int blkdev_get_part(struct block_device *part, fmode_t mode)
693{
694 struct gendisk *disk = part->bd_disk;
695 int ret;
696
697 if (part->bd_openers)
698 goto done;
699
700 ret = blkdev_get_whole(bdev_whole(part), mode);
701 if (ret)
702 return ret;
703
704 ret = -ENXIO;
705 if (!bdev_nr_sectors(part))
706 goto out_blkdev_put;
707
708 disk->open_partitions++;
709 set_init_blocksize(part);
710done:
711 part->bd_openers++;
712 return 0;
713
714out_blkdev_put:
715 blkdev_put_whole(bdev_whole(part), mode);
716 return ret;
717}
718
719static void blkdev_put_part(struct block_device *part, fmode_t mode)
720{
721 struct block_device *whole = bdev_whole(part);
722
723 if (--part->bd_openers)
724 return;
725 blkdev_flush_mapping(part);
726 whole->bd_disk->open_partitions--;
727 blkdev_put_whole(whole, mode);
728}
729
730struct block_device *blkdev_get_no_open(dev_t dev)
731{
732 struct block_device *bdev;
733 struct inode *inode;
734
735 inode = ilookup(blockdev_superblock, dev);
736 if (!inode) {
737 blk_request_module(dev);
738 inode = ilookup(blockdev_superblock, dev);
739 if (!inode)
740 return NULL;
741 }
742
743 /* switch from the inode reference to a device mode one: */
744 bdev = &BDEV_I(inode)->bdev;
745 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
746 bdev = NULL;
747 iput(inode);
748 return bdev;
749}
750
751void blkdev_put_no_open(struct block_device *bdev)
752{
753 put_device(&bdev->bd_device);
754}
755
756/**
757 * blkdev_get_by_dev - open a block device by device number
758 * @dev: device number of block device to open
759 * @mode: FMODE_* mask
760 * @holder: exclusive holder identifier
761 *
762 * Open the block device described by device number @dev. If @mode includes
763 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
764 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
765 * the same @holder.
766 *
767 * Use this interface ONLY if you really do not have anything better - i.e. when
768 * you are behind a truly sucky interface and all you are given is a device
769 * number. Everything else should use blkdev_get_by_path().
770 *
771 * CONTEXT:
772 * Might sleep.
773 *
774 * RETURNS:
775 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
776 */
777struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
778{
779 bool unblock_events = true;
780 struct block_device *bdev;
781 struct gendisk *disk;
782 int ret;
783
784 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
785 MAJOR(dev), MINOR(dev),
786 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) |
787 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0));
788 if (ret)
789 return ERR_PTR(ret);
790
791 bdev = blkdev_get_no_open(dev);
792 if (!bdev)
793 return ERR_PTR(-ENXIO);
794 disk = bdev->bd_disk;
795
796 if (mode & FMODE_EXCL) {
797 ret = bd_prepare_to_claim(bdev, holder);
798 if (ret)
799 goto put_blkdev;
800 }
801
802 disk_block_events(disk);
803
804 mutex_lock(&disk->open_mutex);
805 ret = -ENXIO;
806 if (!disk_live(disk))
807 goto abort_claiming;
808 if (!try_module_get(disk->fops->owner))
809 goto abort_claiming;
810 if (bdev_is_partition(bdev))
811 ret = blkdev_get_part(bdev, mode);
812 else
813 ret = blkdev_get_whole(bdev, mode);
814 if (ret)
815 goto put_module;
816 if (mode & FMODE_EXCL) {
817 bd_finish_claiming(bdev, holder);
818
819 /*
820 * Block event polling for write claims if requested. Any write
821 * holder makes the write_holder state stick until all are
822 * released. This is good enough and tracking individual
823 * writeable reference is too fragile given the way @mode is
824 * used in blkdev_get/put().
825 */
826 if ((mode & FMODE_WRITE) && !bdev->bd_write_holder &&
827 (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
828 bdev->bd_write_holder = true;
829 unblock_events = false;
830 }
831 }
832 mutex_unlock(&disk->open_mutex);
833
834 if (unblock_events)
835 disk_unblock_events(disk);
836 return bdev;
837put_module:
838 module_put(disk->fops->owner);
839abort_claiming:
840 if (mode & FMODE_EXCL)
841 bd_abort_claiming(bdev, holder);
842 mutex_unlock(&disk->open_mutex);
843 disk_unblock_events(disk);
844put_blkdev:
845 blkdev_put_no_open(bdev);
846 return ERR_PTR(ret);
847}
848EXPORT_SYMBOL(blkdev_get_by_dev);
849
850/**
851 * blkdev_get_by_path - open a block device by name
852 * @path: path to the block device to open
853 * @mode: FMODE_* mask
854 * @holder: exclusive holder identifier
855 *
856 * Open the block device described by the device file at @path. If @mode
857 * includes %FMODE_EXCL, the block device is opened with exclusive access.
858 * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may
859 * nest for the same @holder.
860 *
861 * CONTEXT:
862 * Might sleep.
863 *
864 * RETURNS:
865 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
866 */
867struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
868 void *holder)
869{
870 struct block_device *bdev;
871 dev_t dev;
872 int error;
873
874 error = lookup_bdev(path, &dev);
875 if (error)
876 return ERR_PTR(error);
877
878 bdev = blkdev_get_by_dev(dev, mode, holder);
879 if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
880 blkdev_put(bdev, mode);
881 return ERR_PTR(-EACCES);
882 }
883
884 return bdev;
885}
886EXPORT_SYMBOL(blkdev_get_by_path);
887
888void blkdev_put(struct block_device *bdev, fmode_t mode)
889{
890 struct gendisk *disk = bdev->bd_disk;
891
892 /*
893 * Sync early if it looks like we're the last one. If someone else
894 * opens the block device between now and the decrement of bd_openers
895 * then we did a sync that we didn't need to, but that's not the end
896 * of the world and we want to avoid long (could be several minute)
897 * syncs while holding the mutex.
898 */
899 if (bdev->bd_openers == 1)
900 sync_blockdev(bdev);
901
902 mutex_lock(&disk->open_mutex);
903 if (mode & FMODE_EXCL) {
904 struct block_device *whole = bdev_whole(bdev);
905 bool bdev_free;
906
907 /*
908 * Release a claim on the device. The holder fields
909 * are protected with bdev_lock. open_mutex is to
910 * synchronize disk_holder unlinking.
911 */
912 spin_lock(&bdev_lock);
913
914 WARN_ON_ONCE(--bdev->bd_holders < 0);
915 WARN_ON_ONCE(--whole->bd_holders < 0);
916
917 if ((bdev_free = !bdev->bd_holders))
918 bdev->bd_holder = NULL;
919 if (!whole->bd_holders)
920 whole->bd_holder = NULL;
921
922 spin_unlock(&bdev_lock);
923
924 /*
925 * If this was the last claim, remove holder link and
926 * unblock evpoll if it was a write holder.
927 */
928 if (bdev_free && bdev->bd_write_holder) {
929 disk_unblock_events(disk);
930 bdev->bd_write_holder = false;
931 }
932 }
933
934 /*
935 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
936 * event. This is to ensure detection of media removal commanded
937 * from userland - e.g. eject(1).
938 */
939 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
940
941 if (bdev_is_partition(bdev))
942 blkdev_put_part(bdev, mode);
943 else
944 blkdev_put_whole(bdev, mode);
945 mutex_unlock(&disk->open_mutex);
946
947 module_put(disk->fops->owner);
948 blkdev_put_no_open(bdev);
949}
950EXPORT_SYMBOL(blkdev_put);
951
952/**
953 * lookup_bdev() - Look up a struct block_device by name.
954 * @pathname: Name of the block device in the filesystem.
955 * @dev: Pointer to the block device's dev_t, if found.
956 *
957 * Lookup the block device's dev_t at @pathname in the current
958 * namespace if possible and return it in @dev.
959 *
960 * Context: May sleep.
961 * Return: 0 if succeeded, negative errno otherwise.
962 */
963int lookup_bdev(const char *pathname, dev_t *dev)
964{
965 struct inode *inode;
966 struct path path;
967 int error;
968
969 if (!pathname || !*pathname)
970 return -EINVAL;
971
972 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
973 if (error)
974 return error;
975
976 inode = d_backing_inode(path.dentry);
977 error = -ENOTBLK;
978 if (!S_ISBLK(inode->i_mode))
979 goto out_path_put;
980 error = -EACCES;
981 if (!may_open_dev(&path))
982 goto out_path_put;
983
984 *dev = inode->i_rdev;
985 error = 0;
986out_path_put:
987 path_put(&path);
988 return error;
989}
990EXPORT_SYMBOL(lookup_bdev);
991
992int __invalidate_device(struct block_device *bdev, bool kill_dirty)
993{
994 struct super_block *sb = get_super(bdev);
995 int res = 0;
996
997 if (sb) {
998 /*
999 * no need to lock the super, get_super holds the
1000 * read mutex so the filesystem cannot go away
1001 * under us (->put_super runs with the write lock
1002 * hold).
1003 */
1004 shrink_dcache_sb(sb);
1005 res = invalidate_inodes(sb, kill_dirty);
1006 drop_super(sb);
1007 }
1008 invalidate_bdev(bdev);
1009 return res;
1010}
1011EXPORT_SYMBOL(__invalidate_device);
1012
1013void sync_bdevs(bool wait)
1014{
1015 struct inode *inode, *old_inode = NULL;
1016
1017 spin_lock(&blockdev_superblock->s_inode_list_lock);
1018 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1019 struct address_space *mapping = inode->i_mapping;
1020 struct block_device *bdev;
1021
1022 spin_lock(&inode->i_lock);
1023 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1024 mapping->nrpages == 0) {
1025 spin_unlock(&inode->i_lock);
1026 continue;
1027 }
1028 __iget(inode);
1029 spin_unlock(&inode->i_lock);
1030 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1031 /*
1032 * We hold a reference to 'inode' so it couldn't have been
1033 * removed from s_inodes list while we dropped the
1034 * s_inode_list_lock We cannot iput the inode now as we can
1035 * be holding the last reference and we cannot iput it under
1036 * s_inode_list_lock. So we keep the reference and iput it
1037 * later.
1038 */
1039 iput(old_inode);
1040 old_inode = inode;
1041 bdev = I_BDEV(inode);
1042
1043 mutex_lock(&bdev->bd_disk->open_mutex);
1044 if (!bdev->bd_openers) {
1045 ; /* skip */
1046 } else if (wait) {
1047 /*
1048 * We keep the error status of individual mapping so
1049 * that applications can catch the writeback error using
1050 * fsync(2). See filemap_fdatawait_keep_errors() for
1051 * details.
1052 */
1053 filemap_fdatawait_keep_errors(inode->i_mapping);
1054 } else {
1055 filemap_fdatawrite(inode->i_mapping);
1056 }
1057 mutex_unlock(&bdev->bd_disk->open_mutex);
1058
1059 spin_lock(&blockdev_superblock->s_inode_list_lock);
1060 }
1061 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1062 iput(old_inode);
1063}