Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/blkdev.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/uio.h>
13#include <linux/namei.h>
14#include <linux/task_io_accounting_ops.h>
15#include <linux/falloc.h>
16#include <linux/suspend.h>
17#include <linux/fs.h>
18#include <linux/iomap.h>
19#include <linux/module.h>
20#include "blk.h"
21
22static inline struct inode *bdev_file_inode(struct file *file)
23{
24 return file->f_mapping->host;
25}
26
27static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
28{
29 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
30
31 /* avoid the need for a I/O completion work item */
32 if (iocb_is_dsync(iocb))
33 opf |= REQ_FUA;
34 return opf;
35}
36
37static bool blkdev_dio_invalid(struct block_device *bdev, loff_t pos,
38 struct iov_iter *iter, bool is_atomic)
39{
40 if (is_atomic && !generic_atomic_write_valid(iter, pos))
41 return true;
42
43 return pos & (bdev_logical_block_size(bdev) - 1) ||
44 !bdev_iter_is_aligned(bdev, iter);
45}
46
47#define DIO_INLINE_BIO_VECS 4
48
49static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
50 struct iov_iter *iter, struct block_device *bdev,
51 unsigned int nr_pages)
52{
53 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
54 loff_t pos = iocb->ki_pos;
55 bool should_dirty = false;
56 struct bio bio;
57 ssize_t ret;
58
59 if (nr_pages <= DIO_INLINE_BIO_VECS)
60 vecs = inline_vecs;
61 else {
62 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
63 GFP_KERNEL);
64 if (!vecs)
65 return -ENOMEM;
66 }
67
68 if (iov_iter_rw(iter) == READ) {
69 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
70 if (user_backed_iter(iter))
71 should_dirty = true;
72 } else {
73 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
74 }
75 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
76 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
77 bio.bi_ioprio = iocb->ki_ioprio;
78 if (iocb->ki_flags & IOCB_ATOMIC)
79 bio.bi_opf |= REQ_ATOMIC;
80
81 ret = bio_iov_iter_get_pages(&bio, iter);
82 if (unlikely(ret))
83 goto out;
84 ret = bio.bi_iter.bi_size;
85
86 if (iov_iter_rw(iter) == WRITE)
87 task_io_account_write(ret);
88
89 if (iocb->ki_flags & IOCB_NOWAIT)
90 bio.bi_opf |= REQ_NOWAIT;
91
92 submit_bio_wait(&bio);
93
94 bio_release_pages(&bio, should_dirty);
95 if (unlikely(bio.bi_status))
96 ret = blk_status_to_errno(bio.bi_status);
97
98out:
99 if (vecs != inline_vecs)
100 kfree(vecs);
101
102 bio_uninit(&bio);
103
104 return ret;
105}
106
107enum {
108 DIO_SHOULD_DIRTY = 1,
109 DIO_IS_SYNC = 2,
110};
111
112struct blkdev_dio {
113 union {
114 struct kiocb *iocb;
115 struct task_struct *waiter;
116 };
117 size_t size;
118 atomic_t ref;
119 unsigned int flags;
120 struct bio bio ____cacheline_aligned_in_smp;
121};
122
123static struct bio_set blkdev_dio_pool;
124
125static void blkdev_bio_end_io(struct bio *bio)
126{
127 struct blkdev_dio *dio = bio->bi_private;
128 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
129
130 if (bio->bi_status && !dio->bio.bi_status)
131 dio->bio.bi_status = bio->bi_status;
132
133 if (atomic_dec_and_test(&dio->ref)) {
134 if (!(dio->flags & DIO_IS_SYNC)) {
135 struct kiocb *iocb = dio->iocb;
136 ssize_t ret;
137
138 WRITE_ONCE(iocb->private, NULL);
139
140 if (likely(!dio->bio.bi_status)) {
141 ret = dio->size;
142 iocb->ki_pos += ret;
143 } else {
144 ret = blk_status_to_errno(dio->bio.bi_status);
145 }
146
147 dio->iocb->ki_complete(iocb, ret);
148 bio_put(&dio->bio);
149 } else {
150 struct task_struct *waiter = dio->waiter;
151
152 WRITE_ONCE(dio->waiter, NULL);
153 blk_wake_io_task(waiter);
154 }
155 }
156
157 if (should_dirty) {
158 bio_check_pages_dirty(bio);
159 } else {
160 bio_release_pages(bio, false);
161 bio_put(bio);
162 }
163}
164
165static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
166 struct block_device *bdev, unsigned int nr_pages)
167{
168 struct blk_plug plug;
169 struct blkdev_dio *dio;
170 struct bio *bio;
171 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
172 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
173 loff_t pos = iocb->ki_pos;
174 int ret = 0;
175
176 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
177 opf |= REQ_ALLOC_CACHE;
178 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
179 &blkdev_dio_pool);
180 dio = container_of(bio, struct blkdev_dio, bio);
181 atomic_set(&dio->ref, 1);
182 /*
183 * Grab an extra reference to ensure the dio structure which is embedded
184 * into the first bio stays around.
185 */
186 bio_get(bio);
187
188 is_sync = is_sync_kiocb(iocb);
189 if (is_sync) {
190 dio->flags = DIO_IS_SYNC;
191 dio->waiter = current;
192 } else {
193 dio->flags = 0;
194 dio->iocb = iocb;
195 }
196
197 dio->size = 0;
198 if (is_read && user_backed_iter(iter))
199 dio->flags |= DIO_SHOULD_DIRTY;
200
201 blk_start_plug(&plug);
202
203 for (;;) {
204 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
205 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
206 bio->bi_private = dio;
207 bio->bi_end_io = blkdev_bio_end_io;
208 bio->bi_ioprio = iocb->ki_ioprio;
209
210 ret = bio_iov_iter_get_pages(bio, iter);
211 if (unlikely(ret)) {
212 bio->bi_status = BLK_STS_IOERR;
213 bio_endio(bio);
214 break;
215 }
216 if (iocb->ki_flags & IOCB_NOWAIT) {
217 /*
218 * This is nonblocking IO, and we need to allocate
219 * another bio if we have data left to map. As we
220 * cannot guarantee that one of the sub bios will not
221 * fail getting issued FOR NOWAIT and as error results
222 * are coalesced across all of them, be safe and ask for
223 * a retry of this from blocking context.
224 */
225 if (unlikely(iov_iter_count(iter))) {
226 bio_release_pages(bio, false);
227 bio_clear_flag(bio, BIO_REFFED);
228 bio_put(bio);
229 blk_finish_plug(&plug);
230 return -EAGAIN;
231 }
232 bio->bi_opf |= REQ_NOWAIT;
233 }
234
235 if (is_read) {
236 if (dio->flags & DIO_SHOULD_DIRTY)
237 bio_set_pages_dirty(bio);
238 } else {
239 task_io_account_write(bio->bi_iter.bi_size);
240 }
241 dio->size += bio->bi_iter.bi_size;
242 pos += bio->bi_iter.bi_size;
243
244 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
245 if (!nr_pages) {
246 submit_bio(bio);
247 break;
248 }
249 atomic_inc(&dio->ref);
250 submit_bio(bio);
251 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
252 }
253
254 blk_finish_plug(&plug);
255
256 if (!is_sync)
257 return -EIOCBQUEUED;
258
259 for (;;) {
260 set_current_state(TASK_UNINTERRUPTIBLE);
261 if (!READ_ONCE(dio->waiter))
262 break;
263 blk_io_schedule();
264 }
265 __set_current_state(TASK_RUNNING);
266
267 if (!ret)
268 ret = blk_status_to_errno(dio->bio.bi_status);
269 if (likely(!ret))
270 ret = dio->size;
271
272 bio_put(&dio->bio);
273 return ret;
274}
275
276static void blkdev_bio_end_io_async(struct bio *bio)
277{
278 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
279 struct kiocb *iocb = dio->iocb;
280 ssize_t ret;
281
282 WRITE_ONCE(iocb->private, NULL);
283
284 if (likely(!bio->bi_status)) {
285 ret = dio->size;
286 iocb->ki_pos += ret;
287 } else {
288 ret = blk_status_to_errno(bio->bi_status);
289 }
290
291 iocb->ki_complete(iocb, ret);
292
293 if (dio->flags & DIO_SHOULD_DIRTY) {
294 bio_check_pages_dirty(bio);
295 } else {
296 bio_release_pages(bio, false);
297 bio_put(bio);
298 }
299}
300
301static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
302 struct iov_iter *iter,
303 struct block_device *bdev,
304 unsigned int nr_pages)
305{
306 bool is_read = iov_iter_rw(iter) == READ;
307 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
308 struct blkdev_dio *dio;
309 struct bio *bio;
310 loff_t pos = iocb->ki_pos;
311 int ret = 0;
312
313 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
314 opf |= REQ_ALLOC_CACHE;
315 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
316 &blkdev_dio_pool);
317 dio = container_of(bio, struct blkdev_dio, bio);
318 dio->flags = 0;
319 dio->iocb = iocb;
320 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
321 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
322 bio->bi_end_io = blkdev_bio_end_io_async;
323 bio->bi_ioprio = iocb->ki_ioprio;
324
325 if (iov_iter_is_bvec(iter)) {
326 /*
327 * Users don't rely on the iterator being in any particular
328 * state for async I/O returning -EIOCBQUEUED, hence we can
329 * avoid expensive iov_iter_advance(). Bypass
330 * bio_iov_iter_get_pages() and set the bvec directly.
331 */
332 bio_iov_bvec_set(bio, iter);
333 } else {
334 ret = bio_iov_iter_get_pages(bio, iter);
335 if (unlikely(ret)) {
336 bio_put(bio);
337 return ret;
338 }
339 }
340 dio->size = bio->bi_iter.bi_size;
341
342 if (is_read) {
343 if (user_backed_iter(iter)) {
344 dio->flags |= DIO_SHOULD_DIRTY;
345 bio_set_pages_dirty(bio);
346 }
347 } else {
348 task_io_account_write(bio->bi_iter.bi_size);
349 }
350
351 if (iocb->ki_flags & IOCB_ATOMIC)
352 bio->bi_opf |= REQ_ATOMIC;
353
354 if (iocb->ki_flags & IOCB_NOWAIT)
355 bio->bi_opf |= REQ_NOWAIT;
356
357 if (iocb->ki_flags & IOCB_HIPRI) {
358 bio->bi_opf |= REQ_POLLED;
359 submit_bio(bio);
360 WRITE_ONCE(iocb->private, bio);
361 } else {
362 submit_bio(bio);
363 }
364 return -EIOCBQUEUED;
365}
366
367static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
368{
369 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
370 bool is_atomic = iocb->ki_flags & IOCB_ATOMIC;
371 unsigned int nr_pages;
372
373 if (!iov_iter_count(iter))
374 return 0;
375
376 if (blkdev_dio_invalid(bdev, iocb->ki_pos, iter, is_atomic))
377 return -EINVAL;
378
379 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
380 if (likely(nr_pages <= BIO_MAX_VECS)) {
381 if (is_sync_kiocb(iocb))
382 return __blkdev_direct_IO_simple(iocb, iter, bdev,
383 nr_pages);
384 return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
385 } else if (is_atomic) {
386 return -EINVAL;
387 }
388 return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
389}
390
391static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
392 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
393{
394 struct block_device *bdev = I_BDEV(inode);
395 loff_t isize = i_size_read(inode);
396
397 if (offset >= isize)
398 return -EIO;
399
400 iomap->bdev = bdev;
401 iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
402 iomap->type = IOMAP_MAPPED;
403 iomap->addr = iomap->offset;
404 iomap->length = isize - iomap->offset;
405 iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
406 return 0;
407}
408
409static const struct iomap_ops blkdev_iomap_ops = {
410 .iomap_begin = blkdev_iomap_begin,
411};
412
413#ifdef CONFIG_BUFFER_HEAD
414static int blkdev_get_block(struct inode *inode, sector_t iblock,
415 struct buffer_head *bh, int create)
416{
417 bh->b_bdev = I_BDEV(inode);
418 bh->b_blocknr = iblock;
419 set_buffer_mapped(bh);
420 return 0;
421}
422
423/*
424 * We cannot call mpage_writepages() as it does not take the buffer lock.
425 * We must use block_write_full_folio() directly which holds the buffer
426 * lock. The buffer lock provides the synchronisation with writeback
427 * that filesystems rely on when they use the blockdev's mapping.
428 */
429static int blkdev_writepages(struct address_space *mapping,
430 struct writeback_control *wbc)
431{
432 struct blk_plug plug;
433 int err;
434
435 blk_start_plug(&plug);
436 err = write_cache_pages(mapping, wbc, block_write_full_folio,
437 blkdev_get_block);
438 blk_finish_plug(&plug);
439
440 return err;
441}
442
443static int blkdev_read_folio(struct file *file, struct folio *folio)
444{
445 return block_read_full_folio(folio, blkdev_get_block);
446}
447
448static void blkdev_readahead(struct readahead_control *rac)
449{
450 mpage_readahead(rac, blkdev_get_block);
451}
452
453static int blkdev_write_begin(struct file *file, struct address_space *mapping,
454 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
455{
456 return block_write_begin(mapping, pos, len, pagep, blkdev_get_block);
457}
458
459static int blkdev_write_end(struct file *file, struct address_space *mapping,
460 loff_t pos, unsigned len, unsigned copied, struct page *page,
461 void *fsdata)
462{
463 int ret;
464 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
465
466 unlock_page(page);
467 put_page(page);
468
469 return ret;
470}
471
472const struct address_space_operations def_blk_aops = {
473 .dirty_folio = block_dirty_folio,
474 .invalidate_folio = block_invalidate_folio,
475 .read_folio = blkdev_read_folio,
476 .readahead = blkdev_readahead,
477 .writepages = blkdev_writepages,
478 .write_begin = blkdev_write_begin,
479 .write_end = blkdev_write_end,
480 .migrate_folio = buffer_migrate_folio_norefs,
481 .is_dirty_writeback = buffer_check_dirty_writeback,
482};
483#else /* CONFIG_BUFFER_HEAD */
484static int blkdev_read_folio(struct file *file, struct folio *folio)
485{
486 return iomap_read_folio(folio, &blkdev_iomap_ops);
487}
488
489static void blkdev_readahead(struct readahead_control *rac)
490{
491 iomap_readahead(rac, &blkdev_iomap_ops);
492}
493
494static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
495 struct inode *inode, loff_t offset, unsigned int len)
496{
497 loff_t isize = i_size_read(inode);
498
499 if (WARN_ON_ONCE(offset >= isize))
500 return -EIO;
501 if (offset >= wpc->iomap.offset &&
502 offset < wpc->iomap.offset + wpc->iomap.length)
503 return 0;
504 return blkdev_iomap_begin(inode, offset, isize - offset,
505 IOMAP_WRITE, &wpc->iomap, NULL);
506}
507
508static const struct iomap_writeback_ops blkdev_writeback_ops = {
509 .map_blocks = blkdev_map_blocks,
510};
511
512static int blkdev_writepages(struct address_space *mapping,
513 struct writeback_control *wbc)
514{
515 struct iomap_writepage_ctx wpc = { };
516
517 return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops);
518}
519
520const struct address_space_operations def_blk_aops = {
521 .dirty_folio = filemap_dirty_folio,
522 .release_folio = iomap_release_folio,
523 .invalidate_folio = iomap_invalidate_folio,
524 .read_folio = blkdev_read_folio,
525 .readahead = blkdev_readahead,
526 .writepages = blkdev_writepages,
527 .is_partially_uptodate = iomap_is_partially_uptodate,
528 .error_remove_folio = generic_error_remove_folio,
529 .migrate_folio = filemap_migrate_folio,
530};
531#endif /* CONFIG_BUFFER_HEAD */
532
533/*
534 * for a block special file file_inode(file)->i_size is zero
535 * so we compute the size by hand (just as in block_read/write above)
536 */
537static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
538{
539 struct inode *bd_inode = bdev_file_inode(file);
540 loff_t retval;
541
542 inode_lock(bd_inode);
543 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
544 inode_unlock(bd_inode);
545 return retval;
546}
547
548static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
549 int datasync)
550{
551 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
552 int error;
553
554 error = file_write_and_wait_range(filp, start, end);
555 if (error)
556 return error;
557
558 /*
559 * There is no need to serialise calls to blkdev_issue_flush with
560 * i_mutex and doing so causes performance issues with concurrent
561 * O_SYNC writers to a block device.
562 */
563 error = blkdev_issue_flush(bdev);
564 if (error == -EOPNOTSUPP)
565 error = 0;
566
567 return error;
568}
569
570/**
571 * file_to_blk_mode - get block open flags from file flags
572 * @file: file whose open flags should be converted
573 *
574 * Look at file open flags and generate corresponding block open flags from
575 * them. The function works both for file just being open (e.g. during ->open
576 * callback) and for file that is already open. This is actually non-trivial
577 * (see comment in the function).
578 */
579blk_mode_t file_to_blk_mode(struct file *file)
580{
581 blk_mode_t mode = 0;
582
583 if (file->f_mode & FMODE_READ)
584 mode |= BLK_OPEN_READ;
585 if (file->f_mode & FMODE_WRITE)
586 mode |= BLK_OPEN_WRITE;
587 /*
588 * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
589 * to determine whether the open was exclusive for already open files.
590 */
591 if (file->private_data)
592 mode |= BLK_OPEN_EXCL;
593 else if (file->f_flags & O_EXCL)
594 mode |= BLK_OPEN_EXCL;
595 if (file->f_flags & O_NDELAY)
596 mode |= BLK_OPEN_NDELAY;
597
598 /*
599 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
600 * driver has historically allowed ioctls as if the file was opened for
601 * writing, but does not allow and actual reads or writes.
602 */
603 if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
604 mode |= BLK_OPEN_WRITE_IOCTL;
605
606 return mode;
607}
608
609static int blkdev_open(struct inode *inode, struct file *filp)
610{
611 struct block_device *bdev;
612 blk_mode_t mode;
613 int ret;
614
615 mode = file_to_blk_mode(filp);
616 /* Use the file as the holder. */
617 if (mode & BLK_OPEN_EXCL)
618 filp->private_data = filp;
619 ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
620 if (ret)
621 return ret;
622
623 bdev = blkdev_get_no_open(inode->i_rdev);
624 if (!bdev)
625 return -ENXIO;
626
627 if (bdev_can_atomic_write(bdev) && filp->f_flags & O_DIRECT)
628 filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
629
630 ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
631 if (ret)
632 blkdev_put_no_open(bdev);
633 return ret;
634}
635
636static int blkdev_release(struct inode *inode, struct file *filp)
637{
638 bdev_release(filp);
639 return 0;
640}
641
642static ssize_t
643blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
644{
645 size_t count = iov_iter_count(from);
646 ssize_t written;
647
648 written = kiocb_invalidate_pages(iocb, count);
649 if (written) {
650 if (written == -EBUSY)
651 return 0;
652 return written;
653 }
654
655 written = blkdev_direct_IO(iocb, from);
656 if (written > 0) {
657 kiocb_invalidate_post_direct_write(iocb, count);
658 iocb->ki_pos += written;
659 count -= written;
660 }
661 if (written != -EIOCBQUEUED)
662 iov_iter_revert(from, count - iov_iter_count(from));
663 return written;
664}
665
666static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
667{
668 return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops);
669}
670
671/*
672 * Write data to the block device. Only intended for the block device itself
673 * and the raw driver which basically is a fake block device.
674 *
675 * Does not take i_mutex for the write and thus is not for general purpose
676 * use.
677 */
678static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
679{
680 struct file *file = iocb->ki_filp;
681 struct inode *bd_inode = bdev_file_inode(file);
682 struct block_device *bdev = I_BDEV(bd_inode);
683 loff_t size = bdev_nr_bytes(bdev);
684 size_t shorted = 0;
685 ssize_t ret;
686
687 if (bdev_read_only(bdev))
688 return -EPERM;
689
690 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
691 return -ETXTBSY;
692
693 if (!iov_iter_count(from))
694 return 0;
695
696 if (iocb->ki_pos >= size)
697 return -ENOSPC;
698
699 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
700 return -EOPNOTSUPP;
701
702 size -= iocb->ki_pos;
703 if (iov_iter_count(from) > size) {
704 shorted = iov_iter_count(from) - size;
705 iov_iter_truncate(from, size);
706 }
707
708 ret = file_update_time(file);
709 if (ret)
710 return ret;
711
712 if (iocb->ki_flags & IOCB_DIRECT) {
713 ret = blkdev_direct_write(iocb, from);
714 if (ret >= 0 && iov_iter_count(from))
715 ret = direct_write_fallback(iocb, from, ret,
716 blkdev_buffered_write(iocb, from));
717 } else {
718 ret = blkdev_buffered_write(iocb, from);
719 }
720
721 if (ret > 0)
722 ret = generic_write_sync(iocb, ret);
723 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
724 return ret;
725}
726
727static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
728{
729 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
730 loff_t size = bdev_nr_bytes(bdev);
731 loff_t pos = iocb->ki_pos;
732 size_t shorted = 0;
733 ssize_t ret = 0;
734 size_t count;
735
736 if (unlikely(pos + iov_iter_count(to) > size)) {
737 if (pos >= size)
738 return 0;
739 size -= pos;
740 shorted = iov_iter_count(to) - size;
741 iov_iter_truncate(to, size);
742 }
743
744 count = iov_iter_count(to);
745 if (!count)
746 goto reexpand; /* skip atime */
747
748 if (iocb->ki_flags & IOCB_DIRECT) {
749 ret = kiocb_write_and_wait(iocb, count);
750 if (ret < 0)
751 goto reexpand;
752 file_accessed(iocb->ki_filp);
753
754 ret = blkdev_direct_IO(iocb, to);
755 if (ret >= 0) {
756 iocb->ki_pos += ret;
757 count -= ret;
758 }
759 iov_iter_revert(to, count - iov_iter_count(to));
760 if (ret < 0 || !count)
761 goto reexpand;
762 }
763
764 ret = filemap_read(iocb, to, ret);
765
766reexpand:
767 if (unlikely(shorted))
768 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
769 return ret;
770}
771
772#define BLKDEV_FALLOC_FL_SUPPORTED \
773 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
774 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
775
776static long blkdev_fallocate(struct file *file, int mode, loff_t start,
777 loff_t len)
778{
779 struct inode *inode = bdev_file_inode(file);
780 struct block_device *bdev = I_BDEV(inode);
781 loff_t end = start + len - 1;
782 loff_t isize;
783 int error;
784
785 /* Fail if we don't recognize the flags. */
786 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
787 return -EOPNOTSUPP;
788
789 /* Don't go off the end of the device. */
790 isize = bdev_nr_bytes(bdev);
791 if (start >= isize)
792 return -EINVAL;
793 if (end >= isize) {
794 if (mode & FALLOC_FL_KEEP_SIZE) {
795 len = isize - start;
796 end = start + len - 1;
797 } else
798 return -EINVAL;
799 }
800
801 /*
802 * Don't allow IO that isn't aligned to logical block size.
803 */
804 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
805 return -EINVAL;
806
807 filemap_invalidate_lock(inode->i_mapping);
808
809 /*
810 * Invalidate the page cache, including dirty pages, for valid
811 * de-allocate mode calls to fallocate().
812 */
813 switch (mode) {
814 case FALLOC_FL_ZERO_RANGE:
815 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
816 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
817 if (error)
818 goto fail;
819
820 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
821 len >> SECTOR_SHIFT, GFP_KERNEL,
822 BLKDEV_ZERO_NOUNMAP);
823 break;
824 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
825 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
826 if (error)
827 goto fail;
828
829 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
830 len >> SECTOR_SHIFT, GFP_KERNEL,
831 BLKDEV_ZERO_NOFALLBACK);
832 break;
833 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
834 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
835 if (error)
836 goto fail;
837
838 error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
839 len >> SECTOR_SHIFT, GFP_KERNEL);
840 break;
841 default:
842 error = -EOPNOTSUPP;
843 }
844
845 fail:
846 filemap_invalidate_unlock(inode->i_mapping);
847 return error;
848}
849
850static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
851{
852 struct inode *bd_inode = bdev_file_inode(file);
853
854 if (bdev_read_only(I_BDEV(bd_inode)))
855 return generic_file_readonly_mmap(file, vma);
856
857 return generic_file_mmap(file, vma);
858}
859
860const struct file_operations def_blk_fops = {
861 .open = blkdev_open,
862 .release = blkdev_release,
863 .llseek = blkdev_llseek,
864 .read_iter = blkdev_read_iter,
865 .write_iter = blkdev_write_iter,
866 .iopoll = iocb_bio_iopoll,
867 .mmap = blkdev_mmap,
868 .fsync = blkdev_fsync,
869 .unlocked_ioctl = blkdev_ioctl,
870#ifdef CONFIG_COMPAT
871 .compat_ioctl = compat_blkdev_ioctl,
872#endif
873 .splice_read = filemap_splice_read,
874 .splice_write = iter_file_splice_write,
875 .fallocate = blkdev_fallocate,
876 .fop_flags = FOP_BUFFER_RASYNC,
877};
878
879static __init int blkdev_init(void)
880{
881 return bioset_init(&blkdev_dio_pool, 4,
882 offsetof(struct blkdev_dio, bio),
883 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
884}
885module_init(blkdev_init);