Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.18-rc1 1456 lines 36 kB view raw
1/* 2 * Copyright (C) 2010 Red Hat, Inc. 3 * Copyright (c) 2016 Christoph Hellwig. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14#include <linux/module.h> 15#include <linux/compiler.h> 16#include <linux/fs.h> 17#include <linux/iomap.h> 18#include <linux/uaccess.h> 19#include <linux/gfp.h> 20#include <linux/mm.h> 21#include <linux/swap.h> 22#include <linux/pagemap.h> 23#include <linux/pagevec.h> 24#include <linux/file.h> 25#include <linux/uio.h> 26#include <linux/backing-dev.h> 27#include <linux/buffer_head.h> 28#include <linux/task_io_accounting_ops.h> 29#include <linux/dax.h> 30#include <linux/sched/signal.h> 31#include <linux/swap.h> 32 33#include "internal.h" 34 35/* 36 * Execute a iomap write on a segment of the mapping that spans a 37 * contiguous range of pages that have identical block mapping state. 38 * 39 * This avoids the need to map pages individually, do individual allocations 40 * for each page and most importantly avoid the need for filesystem specific 41 * locking per page. Instead, all the operations are amortised over the entire 42 * range of pages. It is assumed that the filesystems will lock whatever 43 * resources they require in the iomap_begin call, and release them in the 44 * iomap_end call. 45 */ 46loff_t 47iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, 48 const struct iomap_ops *ops, void *data, iomap_actor_t actor) 49{ 50 struct iomap iomap = { 0 }; 51 loff_t written = 0, ret; 52 53 /* 54 * Need to map a range from start position for length bytes. This can 55 * span multiple pages - it is only guaranteed to return a range of a 56 * single type of pages (e.g. all into a hole, all mapped or all 57 * unwritten). Failure at this point has nothing to undo. 58 * 59 * If allocation is required for this range, reserve the space now so 60 * that the allocation is guaranteed to succeed later on. Once we copy 61 * the data into the page cache pages, then we cannot fail otherwise we 62 * expose transient stale data. If the reserve fails, we can safely 63 * back out at this point as there is nothing to undo. 64 */ 65 ret = ops->iomap_begin(inode, pos, length, flags, &iomap); 66 if (ret) 67 return ret; 68 if (WARN_ON(iomap.offset > pos)) 69 return -EIO; 70 if (WARN_ON(iomap.length == 0)) 71 return -EIO; 72 73 /* 74 * Cut down the length to the one actually provided by the filesystem, 75 * as it might not be able to give us the whole size that we requested. 76 */ 77 if (iomap.offset + iomap.length < pos + length) 78 length = iomap.offset + iomap.length - pos; 79 80 /* 81 * Now that we have guaranteed that the space allocation will succeed. 82 * we can do the copy-in page by page without having to worry about 83 * failures exposing transient data. 84 */ 85 written = actor(inode, pos, length, data, &iomap); 86 87 /* 88 * Now the data has been copied, commit the range we've copied. This 89 * should not fail unless the filesystem has had a fatal error. 90 */ 91 if (ops->iomap_end) { 92 ret = ops->iomap_end(inode, pos, length, 93 written > 0 ? written : 0, 94 flags, &iomap); 95 } 96 97 return written ? written : ret; 98} 99 100static sector_t 101iomap_sector(struct iomap *iomap, loff_t pos) 102{ 103 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; 104} 105 106static void 107iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 108{ 109 loff_t i_size = i_size_read(inode); 110 111 /* 112 * Only truncate newly allocated pages beyoned EOF, even if the 113 * write started inside the existing inode size. 114 */ 115 if (pos + len > i_size) 116 truncate_pagecache_range(inode, max(pos, i_size), pos + len); 117} 118 119static int 120iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, 121 struct page **pagep, struct iomap *iomap) 122{ 123 pgoff_t index = pos >> PAGE_SHIFT; 124 struct page *page; 125 int status = 0; 126 127 BUG_ON(pos + len > iomap->offset + iomap->length); 128 129 if (fatal_signal_pending(current)) 130 return -EINTR; 131 132 page = grab_cache_page_write_begin(inode->i_mapping, index, flags); 133 if (!page) 134 return -ENOMEM; 135 136 status = __block_write_begin_int(page, pos, len, NULL, iomap); 137 if (unlikely(status)) { 138 unlock_page(page); 139 put_page(page); 140 page = NULL; 141 142 iomap_write_failed(inode, pos, len); 143 } 144 145 *pagep = page; 146 return status; 147} 148 149static int 150iomap_write_end(struct inode *inode, loff_t pos, unsigned len, 151 unsigned copied, struct page *page) 152{ 153 int ret; 154 155 ret = generic_write_end(NULL, inode->i_mapping, pos, len, 156 copied, page, NULL); 157 if (ret < len) 158 iomap_write_failed(inode, pos, len); 159 return ret; 160} 161 162static loff_t 163iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 164 struct iomap *iomap) 165{ 166 struct iov_iter *i = data; 167 long status = 0; 168 ssize_t written = 0; 169 unsigned int flags = AOP_FLAG_NOFS; 170 171 do { 172 struct page *page; 173 unsigned long offset; /* Offset into pagecache page */ 174 unsigned long bytes; /* Bytes to write to page */ 175 size_t copied; /* Bytes copied from user */ 176 177 offset = (pos & (PAGE_SIZE - 1)); 178 bytes = min_t(unsigned long, PAGE_SIZE - offset, 179 iov_iter_count(i)); 180again: 181 if (bytes > length) 182 bytes = length; 183 184 /* 185 * Bring in the user page that we will copy from _first_. 186 * Otherwise there's a nasty deadlock on copying from the 187 * same page as we're writing to, without it being marked 188 * up-to-date. 189 * 190 * Not only is this an optimisation, but it is also required 191 * to check that the address is actually valid, when atomic 192 * usercopies are used, below. 193 */ 194 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 195 status = -EFAULT; 196 break; 197 } 198 199 status = iomap_write_begin(inode, pos, bytes, flags, &page, 200 iomap); 201 if (unlikely(status)) 202 break; 203 204 if (mapping_writably_mapped(inode->i_mapping)) 205 flush_dcache_page(page); 206 207 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 208 209 flush_dcache_page(page); 210 211 status = iomap_write_end(inode, pos, bytes, copied, page); 212 if (unlikely(status < 0)) 213 break; 214 copied = status; 215 216 cond_resched(); 217 218 iov_iter_advance(i, copied); 219 if (unlikely(copied == 0)) { 220 /* 221 * If we were unable to copy any data at all, we must 222 * fall back to a single segment length write. 223 * 224 * If we didn't fallback here, we could livelock 225 * because not all segments in the iov can be copied at 226 * once without a pagefault. 227 */ 228 bytes = min_t(unsigned long, PAGE_SIZE - offset, 229 iov_iter_single_seg_count(i)); 230 goto again; 231 } 232 pos += copied; 233 written += copied; 234 length -= copied; 235 236 balance_dirty_pages_ratelimited(inode->i_mapping); 237 } while (iov_iter_count(i) && length); 238 239 return written ? written : status; 240} 241 242ssize_t 243iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, 244 const struct iomap_ops *ops) 245{ 246 struct inode *inode = iocb->ki_filp->f_mapping->host; 247 loff_t pos = iocb->ki_pos, ret = 0, written = 0; 248 249 while (iov_iter_count(iter)) { 250 ret = iomap_apply(inode, pos, iov_iter_count(iter), 251 IOMAP_WRITE, ops, iter, iomap_write_actor); 252 if (ret <= 0) 253 break; 254 pos += ret; 255 written += ret; 256 } 257 258 return written ? written : ret; 259} 260EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 261 262static struct page * 263__iomap_read_page(struct inode *inode, loff_t offset) 264{ 265 struct address_space *mapping = inode->i_mapping; 266 struct page *page; 267 268 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); 269 if (IS_ERR(page)) 270 return page; 271 if (!PageUptodate(page)) { 272 put_page(page); 273 return ERR_PTR(-EIO); 274 } 275 return page; 276} 277 278static loff_t 279iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 280 struct iomap *iomap) 281{ 282 long status = 0; 283 ssize_t written = 0; 284 285 do { 286 struct page *page, *rpage; 287 unsigned long offset; /* Offset into pagecache page */ 288 unsigned long bytes; /* Bytes to write to page */ 289 290 offset = (pos & (PAGE_SIZE - 1)); 291 bytes = min_t(loff_t, PAGE_SIZE - offset, length); 292 293 rpage = __iomap_read_page(inode, pos); 294 if (IS_ERR(rpage)) 295 return PTR_ERR(rpage); 296 297 status = iomap_write_begin(inode, pos, bytes, 298 AOP_FLAG_NOFS, &page, iomap); 299 put_page(rpage); 300 if (unlikely(status)) 301 return status; 302 303 WARN_ON_ONCE(!PageUptodate(page)); 304 305 status = iomap_write_end(inode, pos, bytes, bytes, page); 306 if (unlikely(status <= 0)) { 307 if (WARN_ON_ONCE(status == 0)) 308 return -EIO; 309 return status; 310 } 311 312 cond_resched(); 313 314 pos += status; 315 written += status; 316 length -= status; 317 318 balance_dirty_pages_ratelimited(inode->i_mapping); 319 } while (length); 320 321 return written; 322} 323 324int 325iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, 326 const struct iomap_ops *ops) 327{ 328 loff_t ret; 329 330 while (len) { 331 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, 332 iomap_dirty_actor); 333 if (ret <= 0) 334 return ret; 335 pos += ret; 336 len -= ret; 337 } 338 339 return 0; 340} 341EXPORT_SYMBOL_GPL(iomap_file_dirty); 342 343static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, 344 unsigned bytes, struct iomap *iomap) 345{ 346 struct page *page; 347 int status; 348 349 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page, 350 iomap); 351 if (status) 352 return status; 353 354 zero_user(page, offset, bytes); 355 mark_page_accessed(page); 356 357 return iomap_write_end(inode, pos, bytes, bytes, page); 358} 359 360static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, 361 struct iomap *iomap) 362{ 363 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, 364 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); 365} 366 367static loff_t 368iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, 369 void *data, struct iomap *iomap) 370{ 371 bool *did_zero = data; 372 loff_t written = 0; 373 int status; 374 375 /* already zeroed? we're done. */ 376 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 377 return count; 378 379 do { 380 unsigned offset, bytes; 381 382 offset = pos & (PAGE_SIZE - 1); /* Within page */ 383 bytes = min_t(loff_t, PAGE_SIZE - offset, count); 384 385 if (IS_DAX(inode)) 386 status = iomap_dax_zero(pos, offset, bytes, iomap); 387 else 388 status = iomap_zero(inode, pos, offset, bytes, iomap); 389 if (status < 0) 390 return status; 391 392 pos += bytes; 393 count -= bytes; 394 written += bytes; 395 if (did_zero) 396 *did_zero = true; 397 } while (count > 0); 398 399 return written; 400} 401 402int 403iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 404 const struct iomap_ops *ops) 405{ 406 loff_t ret; 407 408 while (len > 0) { 409 ret = iomap_apply(inode, pos, len, IOMAP_ZERO, 410 ops, did_zero, iomap_zero_range_actor); 411 if (ret <= 0) 412 return ret; 413 414 pos += ret; 415 len -= ret; 416 } 417 418 return 0; 419} 420EXPORT_SYMBOL_GPL(iomap_zero_range); 421 422int 423iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 424 const struct iomap_ops *ops) 425{ 426 unsigned int blocksize = i_blocksize(inode); 427 unsigned int off = pos & (blocksize - 1); 428 429 /* Block boundary? Nothing to do */ 430 if (!off) 431 return 0; 432 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 433} 434EXPORT_SYMBOL_GPL(iomap_truncate_page); 435 436static loff_t 437iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, 438 void *data, struct iomap *iomap) 439{ 440 struct page *page = data; 441 int ret; 442 443 ret = __block_write_begin_int(page, pos, length, NULL, iomap); 444 if (ret) 445 return ret; 446 447 block_commit_write(page, 0, length); 448 return length; 449} 450 451int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 452{ 453 struct page *page = vmf->page; 454 struct inode *inode = file_inode(vmf->vma->vm_file); 455 unsigned long length; 456 loff_t offset, size; 457 ssize_t ret; 458 459 lock_page(page); 460 size = i_size_read(inode); 461 if ((page->mapping != inode->i_mapping) || 462 (page_offset(page) > size)) { 463 /* We overload EFAULT to mean page got truncated */ 464 ret = -EFAULT; 465 goto out_unlock; 466 } 467 468 /* page is wholly or partially inside EOF */ 469 if (((page->index + 1) << PAGE_SHIFT) > size) 470 length = size & ~PAGE_MASK; 471 else 472 length = PAGE_SIZE; 473 474 offset = page_offset(page); 475 while (length > 0) { 476 ret = iomap_apply(inode, offset, length, 477 IOMAP_WRITE | IOMAP_FAULT, ops, page, 478 iomap_page_mkwrite_actor); 479 if (unlikely(ret <= 0)) 480 goto out_unlock; 481 offset += ret; 482 length -= ret; 483 } 484 485 set_page_dirty(page); 486 wait_for_stable_page(page); 487 return VM_FAULT_LOCKED; 488out_unlock: 489 unlock_page(page); 490 return block_page_mkwrite_return(ret); 491} 492EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 493 494struct fiemap_ctx { 495 struct fiemap_extent_info *fi; 496 struct iomap prev; 497}; 498 499static int iomap_to_fiemap(struct fiemap_extent_info *fi, 500 struct iomap *iomap, u32 flags) 501{ 502 switch (iomap->type) { 503 case IOMAP_HOLE: 504 /* skip holes */ 505 return 0; 506 case IOMAP_DELALLOC: 507 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN; 508 break; 509 case IOMAP_MAPPED: 510 break; 511 case IOMAP_UNWRITTEN: 512 flags |= FIEMAP_EXTENT_UNWRITTEN; 513 break; 514 case IOMAP_INLINE: 515 flags |= FIEMAP_EXTENT_DATA_INLINE; 516 break; 517 } 518 519 if (iomap->flags & IOMAP_F_MERGED) 520 flags |= FIEMAP_EXTENT_MERGED; 521 if (iomap->flags & IOMAP_F_SHARED) 522 flags |= FIEMAP_EXTENT_SHARED; 523 524 return fiemap_fill_next_extent(fi, iomap->offset, 525 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0, 526 iomap->length, flags); 527} 528 529static loff_t 530iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 531 struct iomap *iomap) 532{ 533 struct fiemap_ctx *ctx = data; 534 loff_t ret = length; 535 536 if (iomap->type == IOMAP_HOLE) 537 return length; 538 539 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0); 540 ctx->prev = *iomap; 541 switch (ret) { 542 case 0: /* success */ 543 return length; 544 case 1: /* extent array full */ 545 return 0; 546 default: 547 return ret; 548 } 549} 550 551int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, 552 loff_t start, loff_t len, const struct iomap_ops *ops) 553{ 554 struct fiemap_ctx ctx; 555 loff_t ret; 556 557 memset(&ctx, 0, sizeof(ctx)); 558 ctx.fi = fi; 559 ctx.prev.type = IOMAP_HOLE; 560 561 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC); 562 if (ret) 563 return ret; 564 565 if (fi->fi_flags & FIEMAP_FLAG_SYNC) { 566 ret = filemap_write_and_wait(inode->i_mapping); 567 if (ret) 568 return ret; 569 } 570 571 while (len > 0) { 572 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, 573 iomap_fiemap_actor); 574 /* inode with no (attribute) mapping will give ENOENT */ 575 if (ret == -ENOENT) 576 break; 577 if (ret < 0) 578 return ret; 579 if (ret == 0) 580 break; 581 582 start += ret; 583 len -= ret; 584 } 585 586 if (ctx.prev.type != IOMAP_HOLE) { 587 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST); 588 if (ret < 0) 589 return ret; 590 } 591 592 return 0; 593} 594EXPORT_SYMBOL_GPL(iomap_fiemap); 595 596/* 597 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff. 598 * Returns true if found and updates @lastoff to the offset in file. 599 */ 600static bool 601page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff, 602 int whence) 603{ 604 const struct address_space_operations *ops = inode->i_mapping->a_ops; 605 unsigned int bsize = i_blocksize(inode), off; 606 bool seek_data = whence == SEEK_DATA; 607 loff_t poff = page_offset(page); 608 609 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE)) 610 return false; 611 612 if (*lastoff < poff) { 613 /* 614 * Last offset smaller than the start of the page means we found 615 * a hole: 616 */ 617 if (whence == SEEK_HOLE) 618 return true; 619 *lastoff = poff; 620 } 621 622 /* 623 * Just check the page unless we can and should check block ranges: 624 */ 625 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate) 626 return PageUptodate(page) == seek_data; 627 628 lock_page(page); 629 if (unlikely(page->mapping != inode->i_mapping)) 630 goto out_unlock_not_found; 631 632 for (off = 0; off < PAGE_SIZE; off += bsize) { 633 if ((*lastoff & ~PAGE_MASK) >= off + bsize) 634 continue; 635 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) { 636 unlock_page(page); 637 return true; 638 } 639 *lastoff = poff + off + bsize; 640 } 641 642out_unlock_not_found: 643 unlock_page(page); 644 return false; 645} 646 647/* 648 * Seek for SEEK_DATA / SEEK_HOLE in the page cache. 649 * 650 * Within unwritten extents, the page cache determines which parts are holes 651 * and which are data: uptodate buffer heads count as data; everything else 652 * counts as a hole. 653 * 654 * Returns the resulting offset on successs, and -ENOENT otherwise. 655 */ 656static loff_t 657page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length, 658 int whence) 659{ 660 pgoff_t index = offset >> PAGE_SHIFT; 661 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE); 662 loff_t lastoff = offset; 663 struct pagevec pvec; 664 665 if (length <= 0) 666 return -ENOENT; 667 668 pagevec_init(&pvec); 669 670 do { 671 unsigned nr_pages, i; 672 673 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, 674 end - 1); 675 if (nr_pages == 0) 676 break; 677 678 for (i = 0; i < nr_pages; i++) { 679 struct page *page = pvec.pages[i]; 680 681 if (page_seek_hole_data(inode, page, &lastoff, whence)) 682 goto check_range; 683 lastoff = page_offset(page) + PAGE_SIZE; 684 } 685 pagevec_release(&pvec); 686 } while (index < end); 687 688 /* When no page at lastoff and we are not done, we found a hole. */ 689 if (whence != SEEK_HOLE) 690 goto not_found; 691 692check_range: 693 if (lastoff < offset + length) 694 goto out; 695not_found: 696 lastoff = -ENOENT; 697out: 698 pagevec_release(&pvec); 699 return lastoff; 700} 701 702 703static loff_t 704iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length, 705 void *data, struct iomap *iomap) 706{ 707 switch (iomap->type) { 708 case IOMAP_UNWRITTEN: 709 offset = page_cache_seek_hole_data(inode, offset, length, 710 SEEK_HOLE); 711 if (offset < 0) 712 return length; 713 /* fall through */ 714 case IOMAP_HOLE: 715 *(loff_t *)data = offset; 716 return 0; 717 default: 718 return length; 719 } 720} 721 722loff_t 723iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) 724{ 725 loff_t size = i_size_read(inode); 726 loff_t length = size - offset; 727 loff_t ret; 728 729 /* Nothing to be found before or beyond the end of the file. */ 730 if (offset < 0 || offset >= size) 731 return -ENXIO; 732 733 while (length > 0) { 734 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, 735 &offset, iomap_seek_hole_actor); 736 if (ret < 0) 737 return ret; 738 if (ret == 0) 739 break; 740 741 offset += ret; 742 length -= ret; 743 } 744 745 return offset; 746} 747EXPORT_SYMBOL_GPL(iomap_seek_hole); 748 749static loff_t 750iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length, 751 void *data, struct iomap *iomap) 752{ 753 switch (iomap->type) { 754 case IOMAP_HOLE: 755 return length; 756 case IOMAP_UNWRITTEN: 757 offset = page_cache_seek_hole_data(inode, offset, length, 758 SEEK_DATA); 759 if (offset < 0) 760 return length; 761 /*FALLTHRU*/ 762 default: 763 *(loff_t *)data = offset; 764 return 0; 765 } 766} 767 768loff_t 769iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) 770{ 771 loff_t size = i_size_read(inode); 772 loff_t length = size - offset; 773 loff_t ret; 774 775 /* Nothing to be found before or beyond the end of the file. */ 776 if (offset < 0 || offset >= size) 777 return -ENXIO; 778 779 while (length > 0) { 780 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, 781 &offset, iomap_seek_data_actor); 782 if (ret < 0) 783 return ret; 784 if (ret == 0) 785 break; 786 787 offset += ret; 788 length -= ret; 789 } 790 791 if (length <= 0) 792 return -ENXIO; 793 return offset; 794} 795EXPORT_SYMBOL_GPL(iomap_seek_data); 796 797/* 798 * Private flags for iomap_dio, must not overlap with the public ones in 799 * iomap.h: 800 */ 801#define IOMAP_DIO_WRITE_FUA (1 << 28) 802#define IOMAP_DIO_NEED_SYNC (1 << 29) 803#define IOMAP_DIO_WRITE (1 << 30) 804#define IOMAP_DIO_DIRTY (1 << 31) 805 806struct iomap_dio { 807 struct kiocb *iocb; 808 iomap_dio_end_io_t *end_io; 809 loff_t i_size; 810 loff_t size; 811 atomic_t ref; 812 unsigned flags; 813 int error; 814 815 union { 816 /* used during submission and for synchronous completion: */ 817 struct { 818 struct iov_iter *iter; 819 struct task_struct *waiter; 820 struct request_queue *last_queue; 821 blk_qc_t cookie; 822 } submit; 823 824 /* used for aio completion: */ 825 struct { 826 struct work_struct work; 827 } aio; 828 }; 829}; 830 831static ssize_t iomap_dio_complete(struct iomap_dio *dio) 832{ 833 struct kiocb *iocb = dio->iocb; 834 struct inode *inode = file_inode(iocb->ki_filp); 835 loff_t offset = iocb->ki_pos; 836 ssize_t ret; 837 838 if (dio->end_io) { 839 ret = dio->end_io(iocb, 840 dio->error ? dio->error : dio->size, 841 dio->flags); 842 } else { 843 ret = dio->error; 844 } 845 846 if (likely(!ret)) { 847 ret = dio->size; 848 /* check for short read */ 849 if (offset + ret > dio->i_size && 850 !(dio->flags & IOMAP_DIO_WRITE)) 851 ret = dio->i_size - offset; 852 iocb->ki_pos += ret; 853 } 854 855 /* 856 * Try again to invalidate clean pages which might have been cached by 857 * non-direct readahead, or faulted in by get_user_pages() if the source 858 * of the write was an mmap'ed region of the file we're writing. Either 859 * one is a pretty crazy thing to do, so we don't support it 100%. If 860 * this invalidation fails, tough, the write still worked... 861 * 862 * And this page cache invalidation has to be after dio->end_io(), as 863 * some filesystems convert unwritten extents to real allocations in 864 * end_io() when necessary, otherwise a racing buffer read would cache 865 * zeros from unwritten extents. 866 */ 867 if (!dio->error && 868 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { 869 int err; 870 err = invalidate_inode_pages2_range(inode->i_mapping, 871 offset >> PAGE_SHIFT, 872 (offset + dio->size - 1) >> PAGE_SHIFT); 873 if (err) 874 dio_warn_stale_pagecache(iocb->ki_filp); 875 } 876 877 /* 878 * If this is a DSYNC write, make sure we push it to stable storage now 879 * that we've written data. 880 */ 881 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) 882 ret = generic_write_sync(iocb, ret); 883 884 inode_dio_end(file_inode(iocb->ki_filp)); 885 kfree(dio); 886 887 return ret; 888} 889 890static void iomap_dio_complete_work(struct work_struct *work) 891{ 892 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); 893 struct kiocb *iocb = dio->iocb; 894 895 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); 896} 897 898/* 899 * Set an error in the dio if none is set yet. We have to use cmpxchg 900 * as the submission context and the completion context(s) can race to 901 * update the error. 902 */ 903static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) 904{ 905 cmpxchg(&dio->error, 0, ret); 906} 907 908static void iomap_dio_bio_end_io(struct bio *bio) 909{ 910 struct iomap_dio *dio = bio->bi_private; 911 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); 912 913 if (bio->bi_status) 914 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); 915 916 if (atomic_dec_and_test(&dio->ref)) { 917 if (is_sync_kiocb(dio->iocb)) { 918 struct task_struct *waiter = dio->submit.waiter; 919 920 WRITE_ONCE(dio->submit.waiter, NULL); 921 wake_up_process(waiter); 922 } else if (dio->flags & IOMAP_DIO_WRITE) { 923 struct inode *inode = file_inode(dio->iocb->ki_filp); 924 925 INIT_WORK(&dio->aio.work, iomap_dio_complete_work); 926 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); 927 } else { 928 iomap_dio_complete_work(&dio->aio.work); 929 } 930 } 931 932 if (should_dirty) { 933 bio_check_pages_dirty(bio); 934 } else { 935 struct bio_vec *bvec; 936 int i; 937 938 bio_for_each_segment_all(bvec, bio, i) 939 put_page(bvec->bv_page); 940 bio_put(bio); 941 } 942} 943 944static blk_qc_t 945iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, 946 unsigned len) 947{ 948 struct page *page = ZERO_PAGE(0); 949 struct bio *bio; 950 951 bio = bio_alloc(GFP_KERNEL, 1); 952 bio_set_dev(bio, iomap->bdev); 953 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 954 bio->bi_private = dio; 955 bio->bi_end_io = iomap_dio_bio_end_io; 956 957 get_page(page); 958 __bio_add_page(bio, page, len, 0); 959 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE); 960 961 atomic_inc(&dio->ref); 962 return submit_bio(bio); 963} 964 965static loff_t 966iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, 967 void *data, struct iomap *iomap) 968{ 969 struct iomap_dio *dio = data; 970 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); 971 unsigned int fs_block_size = i_blocksize(inode), pad; 972 unsigned int align = iov_iter_alignment(dio->submit.iter); 973 struct iov_iter iter; 974 struct bio *bio; 975 bool need_zeroout = false; 976 bool use_fua = false; 977 int nr_pages, ret; 978 size_t copied = 0; 979 980 if ((pos | length | align) & ((1 << blkbits) - 1)) 981 return -EINVAL; 982 983 switch (iomap->type) { 984 case IOMAP_HOLE: 985 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) 986 return -EIO; 987 /*FALLTHRU*/ 988 case IOMAP_UNWRITTEN: 989 if (!(dio->flags & IOMAP_DIO_WRITE)) { 990 length = iov_iter_zero(length, dio->submit.iter); 991 dio->size += length; 992 return length; 993 } 994 dio->flags |= IOMAP_DIO_UNWRITTEN; 995 need_zeroout = true; 996 break; 997 case IOMAP_MAPPED: 998 if (iomap->flags & IOMAP_F_SHARED) 999 dio->flags |= IOMAP_DIO_COW; 1000 if (iomap->flags & IOMAP_F_NEW) { 1001 need_zeroout = true; 1002 } else { 1003 /* 1004 * Use a FUA write if we need datasync semantics, this 1005 * is a pure data IO that doesn't require any metadata 1006 * updates and the underlying device supports FUA. This 1007 * allows us to avoid cache flushes on IO completion. 1008 */ 1009 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && 1010 (dio->flags & IOMAP_DIO_WRITE_FUA) && 1011 blk_queue_fua(bdev_get_queue(iomap->bdev))) 1012 use_fua = true; 1013 } 1014 break; 1015 default: 1016 WARN_ON_ONCE(1); 1017 return -EIO; 1018 } 1019 1020 /* 1021 * Operate on a partial iter trimmed to the extent we were called for. 1022 * We'll update the iter in the dio once we're done with this extent. 1023 */ 1024 iter = *dio->submit.iter; 1025 iov_iter_truncate(&iter, length); 1026 1027 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); 1028 if (nr_pages <= 0) 1029 return nr_pages; 1030 1031 if (need_zeroout) { 1032 /* zero out from the start of the block to the write offset */ 1033 pad = pos & (fs_block_size - 1); 1034 if (pad) 1035 iomap_dio_zero(dio, iomap, pos - pad, pad); 1036 } 1037 1038 do { 1039 size_t n; 1040 if (dio->error) { 1041 iov_iter_revert(dio->submit.iter, copied); 1042 return 0; 1043 } 1044 1045 bio = bio_alloc(GFP_KERNEL, nr_pages); 1046 bio_set_dev(bio, iomap->bdev); 1047 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 1048 bio->bi_write_hint = dio->iocb->ki_hint; 1049 bio->bi_ioprio = dio->iocb->ki_ioprio; 1050 bio->bi_private = dio; 1051 bio->bi_end_io = iomap_dio_bio_end_io; 1052 1053 ret = bio_iov_iter_get_pages(bio, &iter); 1054 if (unlikely(ret)) { 1055 bio_put(bio); 1056 return copied ? copied : ret; 1057 } 1058 1059 n = bio->bi_iter.bi_size; 1060 if (dio->flags & IOMAP_DIO_WRITE) { 1061 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; 1062 if (use_fua) 1063 bio->bi_opf |= REQ_FUA; 1064 else 1065 dio->flags &= ~IOMAP_DIO_WRITE_FUA; 1066 task_io_account_write(n); 1067 } else { 1068 bio->bi_opf = REQ_OP_READ; 1069 if (dio->flags & IOMAP_DIO_DIRTY) 1070 bio_set_pages_dirty(bio); 1071 } 1072 1073 iov_iter_advance(dio->submit.iter, n); 1074 1075 dio->size += n; 1076 pos += n; 1077 copied += n; 1078 1079 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); 1080 1081 atomic_inc(&dio->ref); 1082 1083 dio->submit.last_queue = bdev_get_queue(iomap->bdev); 1084 dio->submit.cookie = submit_bio(bio); 1085 } while (nr_pages); 1086 1087 if (need_zeroout) { 1088 /* zero out from the end of the write to the end of the block */ 1089 pad = pos & (fs_block_size - 1); 1090 if (pad) 1091 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); 1092 } 1093 return copied; 1094} 1095 1096/* 1097 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO 1098 * is being issued as AIO or not. This allows us to optimise pure data writes 1099 * to use REQ_FUA rather than requiring generic_write_sync() to issue a 1100 * REQ_FLUSH post write. This is slightly tricky because a single request here 1101 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued 1102 * may be pure data writes. In that case, we still need to do a full data sync 1103 * completion. 1104 */ 1105ssize_t 1106iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 1107 const struct iomap_ops *ops, iomap_dio_end_io_t end_io) 1108{ 1109 struct address_space *mapping = iocb->ki_filp->f_mapping; 1110 struct inode *inode = file_inode(iocb->ki_filp); 1111 size_t count = iov_iter_count(iter); 1112 loff_t pos = iocb->ki_pos, start = pos; 1113 loff_t end = iocb->ki_pos + count - 1, ret = 0; 1114 unsigned int flags = IOMAP_DIRECT; 1115 struct blk_plug plug; 1116 struct iomap_dio *dio; 1117 1118 lockdep_assert_held(&inode->i_rwsem); 1119 1120 if (!count) 1121 return 0; 1122 1123 dio = kmalloc(sizeof(*dio), GFP_KERNEL); 1124 if (!dio) 1125 return -ENOMEM; 1126 1127 dio->iocb = iocb; 1128 atomic_set(&dio->ref, 1); 1129 dio->size = 0; 1130 dio->i_size = i_size_read(inode); 1131 dio->end_io = end_io; 1132 dio->error = 0; 1133 dio->flags = 0; 1134 1135 dio->submit.iter = iter; 1136 if (is_sync_kiocb(iocb)) { 1137 dio->submit.waiter = current; 1138 dio->submit.cookie = BLK_QC_T_NONE; 1139 dio->submit.last_queue = NULL; 1140 } 1141 1142 if (iov_iter_rw(iter) == READ) { 1143 if (pos >= dio->i_size) 1144 goto out_free_dio; 1145 1146 if (iter->type == ITER_IOVEC) 1147 dio->flags |= IOMAP_DIO_DIRTY; 1148 } else { 1149 flags |= IOMAP_WRITE; 1150 dio->flags |= IOMAP_DIO_WRITE; 1151 1152 /* for data sync or sync, we need sync completion processing */ 1153 if (iocb->ki_flags & IOCB_DSYNC) 1154 dio->flags |= IOMAP_DIO_NEED_SYNC; 1155 1156 /* 1157 * For datasync only writes, we optimistically try using FUA for 1158 * this IO. Any non-FUA write that occurs will clear this flag, 1159 * hence we know before completion whether a cache flush is 1160 * necessary. 1161 */ 1162 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) 1163 dio->flags |= IOMAP_DIO_WRITE_FUA; 1164 } 1165 1166 if (iocb->ki_flags & IOCB_NOWAIT) { 1167 if (filemap_range_has_page(mapping, start, end)) { 1168 ret = -EAGAIN; 1169 goto out_free_dio; 1170 } 1171 flags |= IOMAP_NOWAIT; 1172 } 1173 1174 ret = filemap_write_and_wait_range(mapping, start, end); 1175 if (ret) 1176 goto out_free_dio; 1177 1178 /* 1179 * Try to invalidate cache pages for the range we're direct 1180 * writing. If this invalidation fails, tough, the write will 1181 * still work, but racing two incompatible write paths is a 1182 * pretty crazy thing to do, so we don't support it 100%. 1183 */ 1184 ret = invalidate_inode_pages2_range(mapping, 1185 start >> PAGE_SHIFT, end >> PAGE_SHIFT); 1186 if (ret) 1187 dio_warn_stale_pagecache(iocb->ki_filp); 1188 ret = 0; 1189 1190 if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) && 1191 !inode->i_sb->s_dio_done_wq) { 1192 ret = sb_init_dio_done_wq(inode->i_sb); 1193 if (ret < 0) 1194 goto out_free_dio; 1195 } 1196 1197 inode_dio_begin(inode); 1198 1199 blk_start_plug(&plug); 1200 do { 1201 ret = iomap_apply(inode, pos, count, flags, ops, dio, 1202 iomap_dio_actor); 1203 if (ret <= 0) { 1204 /* magic error code to fall back to buffered I/O */ 1205 if (ret == -ENOTBLK) 1206 ret = 0; 1207 break; 1208 } 1209 pos += ret; 1210 1211 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) 1212 break; 1213 } while ((count = iov_iter_count(iter)) > 0); 1214 blk_finish_plug(&plug); 1215 1216 if (ret < 0) 1217 iomap_dio_set_error(dio, ret); 1218 1219 /* 1220 * If all the writes we issued were FUA, we don't need to flush the 1221 * cache on IO completion. Clear the sync flag for this case. 1222 */ 1223 if (dio->flags & IOMAP_DIO_WRITE_FUA) 1224 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 1225 1226 if (!atomic_dec_and_test(&dio->ref)) { 1227 if (!is_sync_kiocb(iocb)) 1228 return -EIOCBQUEUED; 1229 1230 for (;;) { 1231 set_current_state(TASK_UNINTERRUPTIBLE); 1232 if (!READ_ONCE(dio->submit.waiter)) 1233 break; 1234 1235 if (!(iocb->ki_flags & IOCB_HIPRI) || 1236 !dio->submit.last_queue || 1237 !blk_poll(dio->submit.last_queue, 1238 dio->submit.cookie)) 1239 io_schedule(); 1240 } 1241 __set_current_state(TASK_RUNNING); 1242 } 1243 1244 ret = iomap_dio_complete(dio); 1245 1246 return ret; 1247 1248out_free_dio: 1249 kfree(dio); 1250 return ret; 1251} 1252EXPORT_SYMBOL_GPL(iomap_dio_rw); 1253 1254/* Swapfile activation */ 1255 1256#ifdef CONFIG_SWAP 1257struct iomap_swapfile_info { 1258 struct iomap iomap; /* accumulated iomap */ 1259 struct swap_info_struct *sis; 1260 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */ 1261 uint64_t highest_ppage; /* highest physical addr seen (pages) */ 1262 unsigned long nr_pages; /* number of pages collected */ 1263 int nr_extents; /* extent count */ 1264}; 1265 1266/* 1267 * Collect physical extents for this swap file. Physical extents reported to 1268 * the swap code must be trimmed to align to a page boundary. The logical 1269 * offset within the file is irrelevant since the swapfile code maps logical 1270 * page numbers of the swap device to the physical page-aligned extents. 1271 */ 1272static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi) 1273{ 1274 struct iomap *iomap = &isi->iomap; 1275 unsigned long nr_pages; 1276 uint64_t first_ppage; 1277 uint64_t first_ppage_reported; 1278 uint64_t next_ppage; 1279 int error; 1280 1281 /* 1282 * Round the start up and the end down so that the physical 1283 * extent aligns to a page boundary. 1284 */ 1285 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT; 1286 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >> 1287 PAGE_SHIFT; 1288 1289 /* Skip too-short physical extents. */ 1290 if (first_ppage >= next_ppage) 1291 return 0; 1292 nr_pages = next_ppage - first_ppage; 1293 1294 /* 1295 * Calculate how much swap space we're adding; the first page contains 1296 * the swap header and doesn't count. The mm still wants that first 1297 * page fed to add_swap_extent, however. 1298 */ 1299 first_ppage_reported = first_ppage; 1300 if (iomap->offset == 0) 1301 first_ppage_reported++; 1302 if (isi->lowest_ppage > first_ppage_reported) 1303 isi->lowest_ppage = first_ppage_reported; 1304 if (isi->highest_ppage < (next_ppage - 1)) 1305 isi->highest_ppage = next_ppage - 1; 1306 1307 /* Add extent, set up for the next call. */ 1308 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); 1309 if (error < 0) 1310 return error; 1311 isi->nr_extents += error; 1312 isi->nr_pages += nr_pages; 1313 return 0; 1314} 1315 1316/* 1317 * Accumulate iomaps for this swap file. We have to accumulate iomaps because 1318 * swap only cares about contiguous page-aligned physical extents and makes no 1319 * distinction between written and unwritten extents. 1320 */ 1321static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos, 1322 loff_t count, void *data, struct iomap *iomap) 1323{ 1324 struct iomap_swapfile_info *isi = data; 1325 int error; 1326 1327 switch (iomap->type) { 1328 case IOMAP_MAPPED: 1329 case IOMAP_UNWRITTEN: 1330 /* Only real or unwritten extents. */ 1331 break; 1332 case IOMAP_INLINE: 1333 /* No inline data. */ 1334 pr_err("swapon: file is inline\n"); 1335 return -EINVAL; 1336 default: 1337 pr_err("swapon: file has unallocated extents\n"); 1338 return -EINVAL; 1339 } 1340 1341 /* No uncommitted metadata or shared blocks. */ 1342 if (iomap->flags & IOMAP_F_DIRTY) { 1343 pr_err("swapon: file is not committed\n"); 1344 return -EINVAL; 1345 } 1346 if (iomap->flags & IOMAP_F_SHARED) { 1347 pr_err("swapon: file has shared extents\n"); 1348 return -EINVAL; 1349 } 1350 1351 /* Only one bdev per swap file. */ 1352 if (iomap->bdev != isi->sis->bdev) { 1353 pr_err("swapon: file is on multiple devices\n"); 1354 return -EINVAL; 1355 } 1356 1357 if (isi->iomap.length == 0) { 1358 /* No accumulated extent, so just store it. */ 1359 memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); 1360 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) { 1361 /* Append this to the accumulated extent. */ 1362 isi->iomap.length += iomap->length; 1363 } else { 1364 /* Otherwise, add the retained iomap and store this one. */ 1365 error = iomap_swapfile_add_extent(isi); 1366 if (error) 1367 return error; 1368 memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); 1369 } 1370 return count; 1371} 1372 1373/* 1374 * Iterate a swap file's iomaps to construct physical extents that can be 1375 * passed to the swapfile subsystem. 1376 */ 1377int iomap_swapfile_activate(struct swap_info_struct *sis, 1378 struct file *swap_file, sector_t *pagespan, 1379 const struct iomap_ops *ops) 1380{ 1381 struct iomap_swapfile_info isi = { 1382 .sis = sis, 1383 .lowest_ppage = (sector_t)-1ULL, 1384 }; 1385 struct address_space *mapping = swap_file->f_mapping; 1386 struct inode *inode = mapping->host; 1387 loff_t pos = 0; 1388 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE); 1389 loff_t ret; 1390 1391 /* 1392 * Persist all file mapping metadata so that we won't have any 1393 * IOMAP_F_DIRTY iomaps. 1394 */ 1395 ret = vfs_fsync(swap_file, 1); 1396 if (ret) 1397 return ret; 1398 1399 while (len > 0) { 1400 ret = iomap_apply(inode, pos, len, IOMAP_REPORT, 1401 ops, &isi, iomap_swapfile_activate_actor); 1402 if (ret <= 0) 1403 return ret; 1404 1405 pos += ret; 1406 len -= ret; 1407 } 1408 1409 if (isi.iomap.length) { 1410 ret = iomap_swapfile_add_extent(&isi); 1411 if (ret) 1412 return ret; 1413 } 1414 1415 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; 1416 sis->max = isi.nr_pages; 1417 sis->pages = isi.nr_pages - 1; 1418 sis->highest_bit = isi.nr_pages - 1; 1419 return isi.nr_extents; 1420} 1421EXPORT_SYMBOL_GPL(iomap_swapfile_activate); 1422#endif /* CONFIG_SWAP */ 1423 1424static loff_t 1425iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length, 1426 void *data, struct iomap *iomap) 1427{ 1428 sector_t *bno = data, addr; 1429 1430 if (iomap->type == IOMAP_MAPPED) { 1431 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits; 1432 if (addr > INT_MAX) 1433 WARN(1, "would truncate bmap result\n"); 1434 else 1435 *bno = addr; 1436 } 1437 return 0; 1438} 1439 1440/* legacy ->bmap interface. 0 is the error return (!) */ 1441sector_t 1442iomap_bmap(struct address_space *mapping, sector_t bno, 1443 const struct iomap_ops *ops) 1444{ 1445 struct inode *inode = mapping->host; 1446 loff_t pos = bno >> inode->i_blkbits; 1447 unsigned blocksize = i_blocksize(inode); 1448 1449 if (filemap_write_and_wait(mapping)) 1450 return 0; 1451 1452 bno = 0; 1453 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor); 1454 return bno; 1455} 1456EXPORT_SYMBOL_GPL(iomap_bmap);