Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.19-rc7 2110 lines 53 kB view raw
1/* 2 * Copyright (C) 2010 Red Hat, Inc. 3 * Copyright (c) 2016-2018 Christoph Hellwig. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14#include <linux/module.h> 15#include <linux/compiler.h> 16#include <linux/fs.h> 17#include <linux/iomap.h> 18#include <linux/uaccess.h> 19#include <linux/gfp.h> 20#include <linux/migrate.h> 21#include <linux/mm.h> 22#include <linux/mm_inline.h> 23#include <linux/swap.h> 24#include <linux/pagemap.h> 25#include <linux/pagevec.h> 26#include <linux/file.h> 27#include <linux/uio.h> 28#include <linux/backing-dev.h> 29#include <linux/buffer_head.h> 30#include <linux/task_io_accounting_ops.h> 31#include <linux/dax.h> 32#include <linux/sched/signal.h> 33#include <linux/swap.h> 34 35#include "internal.h" 36 37/* 38 * Execute a iomap write on a segment of the mapping that spans a 39 * contiguous range of pages that have identical block mapping state. 40 * 41 * This avoids the need to map pages individually, do individual allocations 42 * for each page and most importantly avoid the need for filesystem specific 43 * locking per page. Instead, all the operations are amortised over the entire 44 * range of pages. It is assumed that the filesystems will lock whatever 45 * resources they require in the iomap_begin call, and release them in the 46 * iomap_end call. 47 */ 48loff_t 49iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, 50 const struct iomap_ops *ops, void *data, iomap_actor_t actor) 51{ 52 struct iomap iomap = { 0 }; 53 loff_t written = 0, ret; 54 55 /* 56 * Need to map a range from start position for length bytes. This can 57 * span multiple pages - it is only guaranteed to return a range of a 58 * single type of pages (e.g. all into a hole, all mapped or all 59 * unwritten). Failure at this point has nothing to undo. 60 * 61 * If allocation is required for this range, reserve the space now so 62 * that the allocation is guaranteed to succeed later on. Once we copy 63 * the data into the page cache pages, then we cannot fail otherwise we 64 * expose transient stale data. If the reserve fails, we can safely 65 * back out at this point as there is nothing to undo. 66 */ 67 ret = ops->iomap_begin(inode, pos, length, flags, &iomap); 68 if (ret) 69 return ret; 70 if (WARN_ON(iomap.offset > pos)) 71 return -EIO; 72 if (WARN_ON(iomap.length == 0)) 73 return -EIO; 74 75 /* 76 * Cut down the length to the one actually provided by the filesystem, 77 * as it might not be able to give us the whole size that we requested. 78 */ 79 if (iomap.offset + iomap.length < pos + length) 80 length = iomap.offset + iomap.length - pos; 81 82 /* 83 * Now that we have guaranteed that the space allocation will succeed. 84 * we can do the copy-in page by page without having to worry about 85 * failures exposing transient data. 86 */ 87 written = actor(inode, pos, length, data, &iomap); 88 89 /* 90 * Now the data has been copied, commit the range we've copied. This 91 * should not fail unless the filesystem has had a fatal error. 92 */ 93 if (ops->iomap_end) { 94 ret = ops->iomap_end(inode, pos, length, 95 written > 0 ? written : 0, 96 flags, &iomap); 97 } 98 99 return written ? written : ret; 100} 101 102static sector_t 103iomap_sector(struct iomap *iomap, loff_t pos) 104{ 105 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; 106} 107 108static struct iomap_page * 109iomap_page_create(struct inode *inode, struct page *page) 110{ 111 struct iomap_page *iop = to_iomap_page(page); 112 113 if (iop || i_blocksize(inode) == PAGE_SIZE) 114 return iop; 115 116 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); 117 atomic_set(&iop->read_count, 0); 118 atomic_set(&iop->write_count, 0); 119 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); 120 set_page_private(page, (unsigned long)iop); 121 SetPagePrivate(page); 122 return iop; 123} 124 125static void 126iomap_page_release(struct page *page) 127{ 128 struct iomap_page *iop = to_iomap_page(page); 129 130 if (!iop) 131 return; 132 WARN_ON_ONCE(atomic_read(&iop->read_count)); 133 WARN_ON_ONCE(atomic_read(&iop->write_count)); 134 ClearPagePrivate(page); 135 set_page_private(page, 0); 136 kfree(iop); 137} 138 139/* 140 * Calculate the range inside the page that we actually need to read. 141 */ 142static void 143iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, 144 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) 145{ 146 unsigned block_bits = inode->i_blkbits; 147 unsigned block_size = (1 << block_bits); 148 unsigned poff = offset_in_page(*pos); 149 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); 150 unsigned first = poff >> block_bits; 151 unsigned last = (poff + plen - 1) >> block_bits; 152 unsigned end = offset_in_page(i_size_read(inode)) >> block_bits; 153 154 /* 155 * If the block size is smaller than the page size we need to check the 156 * per-block uptodate status and adjust the offset and length if needed 157 * to avoid reading in already uptodate ranges. 158 */ 159 if (iop) { 160 unsigned int i; 161 162 /* move forward for each leading block marked uptodate */ 163 for (i = first; i <= last; i++) { 164 if (!test_bit(i, iop->uptodate)) 165 break; 166 *pos += block_size; 167 poff += block_size; 168 plen -= block_size; 169 first++; 170 } 171 172 /* truncate len if we find any trailing uptodate block(s) */ 173 for ( ; i <= last; i++) { 174 if (test_bit(i, iop->uptodate)) { 175 plen -= (last - i + 1) * block_size; 176 last = i - 1; 177 break; 178 } 179 } 180 } 181 182 /* 183 * If the extent spans the block that contains the i_size we need to 184 * handle both halves separately so that we properly zero data in the 185 * page cache for blocks that are entirely outside of i_size. 186 */ 187 if (first <= end && last > end) 188 plen -= (last - end) * block_size; 189 190 *offp = poff; 191 *lenp = plen; 192} 193 194static void 195iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) 196{ 197 struct iomap_page *iop = to_iomap_page(page); 198 struct inode *inode = page->mapping->host; 199 unsigned first = off >> inode->i_blkbits; 200 unsigned last = (off + len - 1) >> inode->i_blkbits; 201 unsigned int i; 202 bool uptodate = true; 203 204 if (iop) { 205 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { 206 if (i >= first && i <= last) 207 set_bit(i, iop->uptodate); 208 else if (!test_bit(i, iop->uptodate)) 209 uptodate = false; 210 } 211 } 212 213 if (uptodate && !PageError(page)) 214 SetPageUptodate(page); 215} 216 217static void 218iomap_read_finish(struct iomap_page *iop, struct page *page) 219{ 220 if (!iop || atomic_dec_and_test(&iop->read_count)) 221 unlock_page(page); 222} 223 224static void 225iomap_read_page_end_io(struct bio_vec *bvec, int error) 226{ 227 struct page *page = bvec->bv_page; 228 struct iomap_page *iop = to_iomap_page(page); 229 230 if (unlikely(error)) { 231 ClearPageUptodate(page); 232 SetPageError(page); 233 } else { 234 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); 235 } 236 237 iomap_read_finish(iop, page); 238} 239 240static void 241iomap_read_inline_data(struct inode *inode, struct page *page, 242 struct iomap *iomap) 243{ 244 size_t size = i_size_read(inode); 245 void *addr; 246 247 if (PageUptodate(page)) 248 return; 249 250 BUG_ON(page->index); 251 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); 252 253 addr = kmap_atomic(page); 254 memcpy(addr, iomap->inline_data, size); 255 memset(addr + size, 0, PAGE_SIZE - size); 256 kunmap_atomic(addr); 257 SetPageUptodate(page); 258} 259 260static void 261iomap_read_end_io(struct bio *bio) 262{ 263 int error = blk_status_to_errno(bio->bi_status); 264 struct bio_vec *bvec; 265 int i; 266 267 bio_for_each_segment_all(bvec, bio, i) 268 iomap_read_page_end_io(bvec, error); 269 bio_put(bio); 270} 271 272struct iomap_readpage_ctx { 273 struct page *cur_page; 274 bool cur_page_in_bio; 275 bool is_readahead; 276 struct bio *bio; 277 struct list_head *pages; 278}; 279 280static loff_t 281iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 282 struct iomap *iomap) 283{ 284 struct iomap_readpage_ctx *ctx = data; 285 struct page *page = ctx->cur_page; 286 struct iomap_page *iop = iomap_page_create(inode, page); 287 bool is_contig = false; 288 loff_t orig_pos = pos; 289 unsigned poff, plen; 290 sector_t sector; 291 292 if (iomap->type == IOMAP_INLINE) { 293 WARN_ON_ONCE(pos); 294 iomap_read_inline_data(inode, page, iomap); 295 return PAGE_SIZE; 296 } 297 298 /* zero post-eof blocks as the page may be mapped */ 299 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); 300 if (plen == 0) 301 goto done; 302 303 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) { 304 zero_user(page, poff, plen); 305 iomap_set_range_uptodate(page, poff, plen); 306 goto done; 307 } 308 309 ctx->cur_page_in_bio = true; 310 311 /* 312 * Try to merge into a previous segment if we can. 313 */ 314 sector = iomap_sector(iomap, pos); 315 if (ctx->bio && bio_end_sector(ctx->bio) == sector) { 316 if (__bio_try_merge_page(ctx->bio, page, plen, poff)) 317 goto done; 318 is_contig = true; 319 } 320 321 /* 322 * If we start a new segment we need to increase the read count, and we 323 * need to do so before submitting any previous full bio to make sure 324 * that we don't prematurely unlock the page. 325 */ 326 if (iop) 327 atomic_inc(&iop->read_count); 328 329 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) { 330 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); 331 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; 332 333 if (ctx->bio) 334 submit_bio(ctx->bio); 335 336 if (ctx->is_readahead) /* same as readahead_gfp_mask */ 337 gfp |= __GFP_NORETRY | __GFP_NOWARN; 338 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); 339 ctx->bio->bi_opf = REQ_OP_READ; 340 if (ctx->is_readahead) 341 ctx->bio->bi_opf |= REQ_RAHEAD; 342 ctx->bio->bi_iter.bi_sector = sector; 343 bio_set_dev(ctx->bio, iomap->bdev); 344 ctx->bio->bi_end_io = iomap_read_end_io; 345 } 346 347 __bio_add_page(ctx->bio, page, plen, poff); 348done: 349 /* 350 * Move the caller beyond our range so that it keeps making progress. 351 * For that we have to include any leading non-uptodate ranges, but 352 * we can skip trailing ones as they will be handled in the next 353 * iteration. 354 */ 355 return pos - orig_pos + plen; 356} 357 358int 359iomap_readpage(struct page *page, const struct iomap_ops *ops) 360{ 361 struct iomap_readpage_ctx ctx = { .cur_page = page }; 362 struct inode *inode = page->mapping->host; 363 unsigned poff; 364 loff_t ret; 365 366 for (poff = 0; poff < PAGE_SIZE; poff += ret) { 367 ret = iomap_apply(inode, page_offset(page) + poff, 368 PAGE_SIZE - poff, 0, ops, &ctx, 369 iomap_readpage_actor); 370 if (ret <= 0) { 371 WARN_ON_ONCE(ret == 0); 372 SetPageError(page); 373 break; 374 } 375 } 376 377 if (ctx.bio) { 378 submit_bio(ctx.bio); 379 WARN_ON_ONCE(!ctx.cur_page_in_bio); 380 } else { 381 WARN_ON_ONCE(ctx.cur_page_in_bio); 382 unlock_page(page); 383 } 384 385 /* 386 * Just like mpage_readpages and block_read_full_page we always 387 * return 0 and just mark the page as PageError on errors. This 388 * should be cleaned up all through the stack eventually. 389 */ 390 return 0; 391} 392EXPORT_SYMBOL_GPL(iomap_readpage); 393 394static struct page * 395iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, 396 loff_t length, loff_t *done) 397{ 398 while (!list_empty(pages)) { 399 struct page *page = lru_to_page(pages); 400 401 if (page_offset(page) >= (u64)pos + length) 402 break; 403 404 list_del(&page->lru); 405 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, 406 GFP_NOFS)) 407 return page; 408 409 /* 410 * If we already have a page in the page cache at index we are 411 * done. Upper layers don't care if it is uptodate after the 412 * readpages call itself as every page gets checked again once 413 * actually needed. 414 */ 415 *done += PAGE_SIZE; 416 put_page(page); 417 } 418 419 return NULL; 420} 421 422static loff_t 423iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, 424 void *data, struct iomap *iomap) 425{ 426 struct iomap_readpage_ctx *ctx = data; 427 loff_t done, ret; 428 429 for (done = 0; done < length; done += ret) { 430 if (ctx->cur_page && offset_in_page(pos + done) == 0) { 431 if (!ctx->cur_page_in_bio) 432 unlock_page(ctx->cur_page); 433 put_page(ctx->cur_page); 434 ctx->cur_page = NULL; 435 } 436 if (!ctx->cur_page) { 437 ctx->cur_page = iomap_next_page(inode, ctx->pages, 438 pos, length, &done); 439 if (!ctx->cur_page) 440 break; 441 ctx->cur_page_in_bio = false; 442 } 443 ret = iomap_readpage_actor(inode, pos + done, length - done, 444 ctx, iomap); 445 } 446 447 return done; 448} 449 450int 451iomap_readpages(struct address_space *mapping, struct list_head *pages, 452 unsigned nr_pages, const struct iomap_ops *ops) 453{ 454 struct iomap_readpage_ctx ctx = { 455 .pages = pages, 456 .is_readahead = true, 457 }; 458 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); 459 loff_t last = page_offset(list_entry(pages->next, struct page, lru)); 460 loff_t length = last - pos + PAGE_SIZE, ret = 0; 461 462 while (length > 0) { 463 ret = iomap_apply(mapping->host, pos, length, 0, ops, 464 &ctx, iomap_readpages_actor); 465 if (ret <= 0) { 466 WARN_ON_ONCE(ret == 0); 467 goto done; 468 } 469 pos += ret; 470 length -= ret; 471 } 472 ret = 0; 473done: 474 if (ctx.bio) 475 submit_bio(ctx.bio); 476 if (ctx.cur_page) { 477 if (!ctx.cur_page_in_bio) 478 unlock_page(ctx.cur_page); 479 put_page(ctx.cur_page); 480 } 481 482 /* 483 * Check that we didn't lose a page due to the arcance calling 484 * conventions.. 485 */ 486 WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); 487 return ret; 488} 489EXPORT_SYMBOL_GPL(iomap_readpages); 490 491int 492iomap_is_partially_uptodate(struct page *page, unsigned long from, 493 unsigned long count) 494{ 495 struct iomap_page *iop = to_iomap_page(page); 496 struct inode *inode = page->mapping->host; 497 unsigned first = from >> inode->i_blkbits; 498 unsigned last = (from + count - 1) >> inode->i_blkbits; 499 unsigned i; 500 501 if (iop) { 502 for (i = first; i <= last; i++) 503 if (!test_bit(i, iop->uptodate)) 504 return 0; 505 return 1; 506 } 507 508 return 0; 509} 510EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 511 512int 513iomap_releasepage(struct page *page, gfp_t gfp_mask) 514{ 515 /* 516 * mm accommodates an old ext3 case where clean pages might not have had 517 * the dirty bit cleared. Thus, it can send actual dirty pages to 518 * ->releasepage() via shrink_active_list(), skip those here. 519 */ 520 if (PageDirty(page) || PageWriteback(page)) 521 return 0; 522 iomap_page_release(page); 523 return 1; 524} 525EXPORT_SYMBOL_GPL(iomap_releasepage); 526 527void 528iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) 529{ 530 /* 531 * If we are invalidating the entire page, clear the dirty state from it 532 * and release it to avoid unnecessary buildup of the LRU. 533 */ 534 if (offset == 0 && len == PAGE_SIZE) { 535 WARN_ON_ONCE(PageWriteback(page)); 536 cancel_dirty_page(page); 537 iomap_page_release(page); 538 } 539} 540EXPORT_SYMBOL_GPL(iomap_invalidatepage); 541 542#ifdef CONFIG_MIGRATION 543int 544iomap_migrate_page(struct address_space *mapping, struct page *newpage, 545 struct page *page, enum migrate_mode mode) 546{ 547 int ret; 548 549 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); 550 if (ret != MIGRATEPAGE_SUCCESS) 551 return ret; 552 553 if (page_has_private(page)) { 554 ClearPagePrivate(page); 555 set_page_private(newpage, page_private(page)); 556 set_page_private(page, 0); 557 SetPagePrivate(newpage); 558 } 559 560 if (mode != MIGRATE_SYNC_NO_COPY) 561 migrate_page_copy(newpage, page); 562 else 563 migrate_page_states(newpage, page); 564 return MIGRATEPAGE_SUCCESS; 565} 566EXPORT_SYMBOL_GPL(iomap_migrate_page); 567#endif /* CONFIG_MIGRATION */ 568 569static void 570iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 571{ 572 loff_t i_size = i_size_read(inode); 573 574 /* 575 * Only truncate newly allocated pages beyoned EOF, even if the 576 * write started inside the existing inode size. 577 */ 578 if (pos + len > i_size) 579 truncate_pagecache_range(inode, max(pos, i_size), pos + len); 580} 581 582static int 583iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page, 584 unsigned poff, unsigned plen, unsigned from, unsigned to, 585 struct iomap *iomap) 586{ 587 struct bio_vec bvec; 588 struct bio bio; 589 590 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) { 591 zero_user_segments(page, poff, from, to, poff + plen); 592 iomap_set_range_uptodate(page, poff, plen); 593 return 0; 594 } 595 596 bio_init(&bio, &bvec, 1); 597 bio.bi_opf = REQ_OP_READ; 598 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 599 bio_set_dev(&bio, iomap->bdev); 600 __bio_add_page(&bio, page, plen, poff); 601 return submit_bio_wait(&bio); 602} 603 604static int 605__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, 606 struct page *page, struct iomap *iomap) 607{ 608 struct iomap_page *iop = iomap_page_create(inode, page); 609 loff_t block_size = i_blocksize(inode); 610 loff_t block_start = pos & ~(block_size - 1); 611 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1); 612 unsigned from = offset_in_page(pos), to = from + len, poff, plen; 613 int status = 0; 614 615 if (PageUptodate(page)) 616 return 0; 617 618 do { 619 iomap_adjust_read_range(inode, iop, &block_start, 620 block_end - block_start, &poff, &plen); 621 if (plen == 0) 622 break; 623 624 if ((from > poff && from < poff + plen) || 625 (to > poff && to < poff + plen)) { 626 status = iomap_read_page_sync(inode, block_start, page, 627 poff, plen, from, to, iomap); 628 if (status) 629 break; 630 } 631 632 } while ((block_start += plen) < block_end); 633 634 return status; 635} 636 637static int 638iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, 639 struct page **pagep, struct iomap *iomap) 640{ 641 pgoff_t index = pos >> PAGE_SHIFT; 642 struct page *page; 643 int status = 0; 644 645 BUG_ON(pos + len > iomap->offset + iomap->length); 646 647 if (fatal_signal_pending(current)) 648 return -EINTR; 649 650 page = grab_cache_page_write_begin(inode->i_mapping, index, flags); 651 if (!page) 652 return -ENOMEM; 653 654 if (iomap->type == IOMAP_INLINE) 655 iomap_read_inline_data(inode, page, iomap); 656 else if (iomap->flags & IOMAP_F_BUFFER_HEAD) 657 status = __block_write_begin_int(page, pos, len, NULL, iomap); 658 else 659 status = __iomap_write_begin(inode, pos, len, page, iomap); 660 if (unlikely(status)) { 661 unlock_page(page); 662 put_page(page); 663 page = NULL; 664 665 iomap_write_failed(inode, pos, len); 666 } 667 668 *pagep = page; 669 return status; 670} 671 672int 673iomap_set_page_dirty(struct page *page) 674{ 675 struct address_space *mapping = page_mapping(page); 676 int newly_dirty; 677 678 if (unlikely(!mapping)) 679 return !TestSetPageDirty(page); 680 681 /* 682 * Lock out page->mem_cgroup migration to keep PageDirty 683 * synchronized with per-memcg dirty page counters. 684 */ 685 lock_page_memcg(page); 686 newly_dirty = !TestSetPageDirty(page); 687 if (newly_dirty) 688 __set_page_dirty(page, mapping, 0); 689 unlock_page_memcg(page); 690 691 if (newly_dirty) 692 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 693 return newly_dirty; 694} 695EXPORT_SYMBOL_GPL(iomap_set_page_dirty); 696 697static int 698__iomap_write_end(struct inode *inode, loff_t pos, unsigned len, 699 unsigned copied, struct page *page, struct iomap *iomap) 700{ 701 flush_dcache_page(page); 702 703 /* 704 * The blocks that were entirely written will now be uptodate, so we 705 * don't have to worry about a readpage reading them and overwriting a 706 * partial write. However if we have encountered a short write and only 707 * partially written into a block, it will not be marked uptodate, so a 708 * readpage might come in and destroy our partial write. 709 * 710 * Do the simplest thing, and just treat any short write to a non 711 * uptodate page as a zero-length write, and force the caller to redo 712 * the whole thing. 713 */ 714 if (unlikely(copied < len && !PageUptodate(page))) { 715 copied = 0; 716 } else { 717 iomap_set_range_uptodate(page, offset_in_page(pos), len); 718 iomap_set_page_dirty(page); 719 } 720 return __generic_write_end(inode, pos, copied, page); 721} 722 723static int 724iomap_write_end_inline(struct inode *inode, struct page *page, 725 struct iomap *iomap, loff_t pos, unsigned copied) 726{ 727 void *addr; 728 729 WARN_ON_ONCE(!PageUptodate(page)); 730 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data)); 731 732 addr = kmap_atomic(page); 733 memcpy(iomap->inline_data + pos, addr + pos, copied); 734 kunmap_atomic(addr); 735 736 mark_inode_dirty(inode); 737 __generic_write_end(inode, pos, copied, page); 738 return copied; 739} 740 741static int 742iomap_write_end(struct inode *inode, loff_t pos, unsigned len, 743 unsigned copied, struct page *page, struct iomap *iomap) 744{ 745 int ret; 746 747 if (iomap->type == IOMAP_INLINE) { 748 ret = iomap_write_end_inline(inode, page, iomap, pos, copied); 749 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) { 750 ret = generic_write_end(NULL, inode->i_mapping, pos, len, 751 copied, page, NULL); 752 } else { 753 ret = __iomap_write_end(inode, pos, len, copied, page, iomap); 754 } 755 756 if (iomap->page_done) 757 iomap->page_done(inode, pos, copied, page, iomap); 758 759 if (ret < len) 760 iomap_write_failed(inode, pos, len); 761 return ret; 762} 763 764static loff_t 765iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 766 struct iomap *iomap) 767{ 768 struct iov_iter *i = data; 769 long status = 0; 770 ssize_t written = 0; 771 unsigned int flags = AOP_FLAG_NOFS; 772 773 do { 774 struct page *page; 775 unsigned long offset; /* Offset into pagecache page */ 776 unsigned long bytes; /* Bytes to write to page */ 777 size_t copied; /* Bytes copied from user */ 778 779 offset = offset_in_page(pos); 780 bytes = min_t(unsigned long, PAGE_SIZE - offset, 781 iov_iter_count(i)); 782again: 783 if (bytes > length) 784 bytes = length; 785 786 /* 787 * Bring in the user page that we will copy from _first_. 788 * Otherwise there's a nasty deadlock on copying from the 789 * same page as we're writing to, without it being marked 790 * up-to-date. 791 * 792 * Not only is this an optimisation, but it is also required 793 * to check that the address is actually valid, when atomic 794 * usercopies are used, below. 795 */ 796 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 797 status = -EFAULT; 798 break; 799 } 800 801 status = iomap_write_begin(inode, pos, bytes, flags, &page, 802 iomap); 803 if (unlikely(status)) 804 break; 805 806 if (mapping_writably_mapped(inode->i_mapping)) 807 flush_dcache_page(page); 808 809 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 810 811 flush_dcache_page(page); 812 813 status = iomap_write_end(inode, pos, bytes, copied, page, 814 iomap); 815 if (unlikely(status < 0)) 816 break; 817 copied = status; 818 819 cond_resched(); 820 821 iov_iter_advance(i, copied); 822 if (unlikely(copied == 0)) { 823 /* 824 * If we were unable to copy any data at all, we must 825 * fall back to a single segment length write. 826 * 827 * If we didn't fallback here, we could livelock 828 * because not all segments in the iov can be copied at 829 * once without a pagefault. 830 */ 831 bytes = min_t(unsigned long, PAGE_SIZE - offset, 832 iov_iter_single_seg_count(i)); 833 goto again; 834 } 835 pos += copied; 836 written += copied; 837 length -= copied; 838 839 balance_dirty_pages_ratelimited(inode->i_mapping); 840 } while (iov_iter_count(i) && length); 841 842 return written ? written : status; 843} 844 845ssize_t 846iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, 847 const struct iomap_ops *ops) 848{ 849 struct inode *inode = iocb->ki_filp->f_mapping->host; 850 loff_t pos = iocb->ki_pos, ret = 0, written = 0; 851 852 while (iov_iter_count(iter)) { 853 ret = iomap_apply(inode, pos, iov_iter_count(iter), 854 IOMAP_WRITE, ops, iter, iomap_write_actor); 855 if (ret <= 0) 856 break; 857 pos += ret; 858 written += ret; 859 } 860 861 return written ? written : ret; 862} 863EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 864 865static struct page * 866__iomap_read_page(struct inode *inode, loff_t offset) 867{ 868 struct address_space *mapping = inode->i_mapping; 869 struct page *page; 870 871 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); 872 if (IS_ERR(page)) 873 return page; 874 if (!PageUptodate(page)) { 875 put_page(page); 876 return ERR_PTR(-EIO); 877 } 878 return page; 879} 880 881static loff_t 882iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 883 struct iomap *iomap) 884{ 885 long status = 0; 886 ssize_t written = 0; 887 888 do { 889 struct page *page, *rpage; 890 unsigned long offset; /* Offset into pagecache page */ 891 unsigned long bytes; /* Bytes to write to page */ 892 893 offset = offset_in_page(pos); 894 bytes = min_t(loff_t, PAGE_SIZE - offset, length); 895 896 rpage = __iomap_read_page(inode, pos); 897 if (IS_ERR(rpage)) 898 return PTR_ERR(rpage); 899 900 status = iomap_write_begin(inode, pos, bytes, 901 AOP_FLAG_NOFS, &page, iomap); 902 put_page(rpage); 903 if (unlikely(status)) 904 return status; 905 906 WARN_ON_ONCE(!PageUptodate(page)); 907 908 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap); 909 if (unlikely(status <= 0)) { 910 if (WARN_ON_ONCE(status == 0)) 911 return -EIO; 912 return status; 913 } 914 915 cond_resched(); 916 917 pos += status; 918 written += status; 919 length -= status; 920 921 balance_dirty_pages_ratelimited(inode->i_mapping); 922 } while (length); 923 924 return written; 925} 926 927int 928iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, 929 const struct iomap_ops *ops) 930{ 931 loff_t ret; 932 933 while (len) { 934 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, 935 iomap_dirty_actor); 936 if (ret <= 0) 937 return ret; 938 pos += ret; 939 len -= ret; 940 } 941 942 return 0; 943} 944EXPORT_SYMBOL_GPL(iomap_file_dirty); 945 946static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, 947 unsigned bytes, struct iomap *iomap) 948{ 949 struct page *page; 950 int status; 951 952 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page, 953 iomap); 954 if (status) 955 return status; 956 957 zero_user(page, offset, bytes); 958 mark_page_accessed(page); 959 960 return iomap_write_end(inode, pos, bytes, bytes, page, iomap); 961} 962 963static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, 964 struct iomap *iomap) 965{ 966 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, 967 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); 968} 969 970static loff_t 971iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, 972 void *data, struct iomap *iomap) 973{ 974 bool *did_zero = data; 975 loff_t written = 0; 976 int status; 977 978 /* already zeroed? we're done. */ 979 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 980 return count; 981 982 do { 983 unsigned offset, bytes; 984 985 offset = offset_in_page(pos); 986 bytes = min_t(loff_t, PAGE_SIZE - offset, count); 987 988 if (IS_DAX(inode)) 989 status = iomap_dax_zero(pos, offset, bytes, iomap); 990 else 991 status = iomap_zero(inode, pos, offset, bytes, iomap); 992 if (status < 0) 993 return status; 994 995 pos += bytes; 996 count -= bytes; 997 written += bytes; 998 if (did_zero) 999 *did_zero = true; 1000 } while (count > 0); 1001 1002 return written; 1003} 1004 1005int 1006iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1007 const struct iomap_ops *ops) 1008{ 1009 loff_t ret; 1010 1011 while (len > 0) { 1012 ret = iomap_apply(inode, pos, len, IOMAP_ZERO, 1013 ops, did_zero, iomap_zero_range_actor); 1014 if (ret <= 0) 1015 return ret; 1016 1017 pos += ret; 1018 len -= ret; 1019 } 1020 1021 return 0; 1022} 1023EXPORT_SYMBOL_GPL(iomap_zero_range); 1024 1025int 1026iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1027 const struct iomap_ops *ops) 1028{ 1029 unsigned int blocksize = i_blocksize(inode); 1030 unsigned int off = pos & (blocksize - 1); 1031 1032 /* Block boundary? Nothing to do */ 1033 if (!off) 1034 return 0; 1035 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 1036} 1037EXPORT_SYMBOL_GPL(iomap_truncate_page); 1038 1039static loff_t 1040iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, 1041 void *data, struct iomap *iomap) 1042{ 1043 struct page *page = data; 1044 int ret; 1045 1046 if (iomap->flags & IOMAP_F_BUFFER_HEAD) { 1047 ret = __block_write_begin_int(page, pos, length, NULL, iomap); 1048 if (ret) 1049 return ret; 1050 block_commit_write(page, 0, length); 1051 } else { 1052 WARN_ON_ONCE(!PageUptodate(page)); 1053 iomap_page_create(inode, page); 1054 set_page_dirty(page); 1055 } 1056 1057 return length; 1058} 1059 1060int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 1061{ 1062 struct page *page = vmf->page; 1063 struct inode *inode = file_inode(vmf->vma->vm_file); 1064 unsigned long length; 1065 loff_t offset, size; 1066 ssize_t ret; 1067 1068 lock_page(page); 1069 size = i_size_read(inode); 1070 if ((page->mapping != inode->i_mapping) || 1071 (page_offset(page) > size)) { 1072 /* We overload EFAULT to mean page got truncated */ 1073 ret = -EFAULT; 1074 goto out_unlock; 1075 } 1076 1077 /* page is wholly or partially inside EOF */ 1078 if (((page->index + 1) << PAGE_SHIFT) > size) 1079 length = offset_in_page(size); 1080 else 1081 length = PAGE_SIZE; 1082 1083 offset = page_offset(page); 1084 while (length > 0) { 1085 ret = iomap_apply(inode, offset, length, 1086 IOMAP_WRITE | IOMAP_FAULT, ops, page, 1087 iomap_page_mkwrite_actor); 1088 if (unlikely(ret <= 0)) 1089 goto out_unlock; 1090 offset += ret; 1091 length -= ret; 1092 } 1093 1094 wait_for_stable_page(page); 1095 return VM_FAULT_LOCKED; 1096out_unlock: 1097 unlock_page(page); 1098 return block_page_mkwrite_return(ret); 1099} 1100EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1101 1102struct fiemap_ctx { 1103 struct fiemap_extent_info *fi; 1104 struct iomap prev; 1105}; 1106 1107static int iomap_to_fiemap(struct fiemap_extent_info *fi, 1108 struct iomap *iomap, u32 flags) 1109{ 1110 switch (iomap->type) { 1111 case IOMAP_HOLE: 1112 /* skip holes */ 1113 return 0; 1114 case IOMAP_DELALLOC: 1115 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN; 1116 break; 1117 case IOMAP_MAPPED: 1118 break; 1119 case IOMAP_UNWRITTEN: 1120 flags |= FIEMAP_EXTENT_UNWRITTEN; 1121 break; 1122 case IOMAP_INLINE: 1123 flags |= FIEMAP_EXTENT_DATA_INLINE; 1124 break; 1125 } 1126 1127 if (iomap->flags & IOMAP_F_MERGED) 1128 flags |= FIEMAP_EXTENT_MERGED; 1129 if (iomap->flags & IOMAP_F_SHARED) 1130 flags |= FIEMAP_EXTENT_SHARED; 1131 1132 return fiemap_fill_next_extent(fi, iomap->offset, 1133 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0, 1134 iomap->length, flags); 1135} 1136 1137static loff_t 1138iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1139 struct iomap *iomap) 1140{ 1141 struct fiemap_ctx *ctx = data; 1142 loff_t ret = length; 1143 1144 if (iomap->type == IOMAP_HOLE) 1145 return length; 1146 1147 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0); 1148 ctx->prev = *iomap; 1149 switch (ret) { 1150 case 0: /* success */ 1151 return length; 1152 case 1: /* extent array full */ 1153 return 0; 1154 default: 1155 return ret; 1156 } 1157} 1158 1159int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, 1160 loff_t start, loff_t len, const struct iomap_ops *ops) 1161{ 1162 struct fiemap_ctx ctx; 1163 loff_t ret; 1164 1165 memset(&ctx, 0, sizeof(ctx)); 1166 ctx.fi = fi; 1167 ctx.prev.type = IOMAP_HOLE; 1168 1169 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC); 1170 if (ret) 1171 return ret; 1172 1173 if (fi->fi_flags & FIEMAP_FLAG_SYNC) { 1174 ret = filemap_write_and_wait(inode->i_mapping); 1175 if (ret) 1176 return ret; 1177 } 1178 1179 while (len > 0) { 1180 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, 1181 iomap_fiemap_actor); 1182 /* inode with no (attribute) mapping will give ENOENT */ 1183 if (ret == -ENOENT) 1184 break; 1185 if (ret < 0) 1186 return ret; 1187 if (ret == 0) 1188 break; 1189 1190 start += ret; 1191 len -= ret; 1192 } 1193 1194 if (ctx.prev.type != IOMAP_HOLE) { 1195 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST); 1196 if (ret < 0) 1197 return ret; 1198 } 1199 1200 return 0; 1201} 1202EXPORT_SYMBOL_GPL(iomap_fiemap); 1203 1204/* 1205 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff. 1206 * Returns true if found and updates @lastoff to the offset in file. 1207 */ 1208static bool 1209page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff, 1210 int whence) 1211{ 1212 const struct address_space_operations *ops = inode->i_mapping->a_ops; 1213 unsigned int bsize = i_blocksize(inode), off; 1214 bool seek_data = whence == SEEK_DATA; 1215 loff_t poff = page_offset(page); 1216 1217 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE)) 1218 return false; 1219 1220 if (*lastoff < poff) { 1221 /* 1222 * Last offset smaller than the start of the page means we found 1223 * a hole: 1224 */ 1225 if (whence == SEEK_HOLE) 1226 return true; 1227 *lastoff = poff; 1228 } 1229 1230 /* 1231 * Just check the page unless we can and should check block ranges: 1232 */ 1233 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate) 1234 return PageUptodate(page) == seek_data; 1235 1236 lock_page(page); 1237 if (unlikely(page->mapping != inode->i_mapping)) 1238 goto out_unlock_not_found; 1239 1240 for (off = 0; off < PAGE_SIZE; off += bsize) { 1241 if (offset_in_page(*lastoff) >= off + bsize) 1242 continue; 1243 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) { 1244 unlock_page(page); 1245 return true; 1246 } 1247 *lastoff = poff + off + bsize; 1248 } 1249 1250out_unlock_not_found: 1251 unlock_page(page); 1252 return false; 1253} 1254 1255/* 1256 * Seek for SEEK_DATA / SEEK_HOLE in the page cache. 1257 * 1258 * Within unwritten extents, the page cache determines which parts are holes 1259 * and which are data: uptodate buffer heads count as data; everything else 1260 * counts as a hole. 1261 * 1262 * Returns the resulting offset on successs, and -ENOENT otherwise. 1263 */ 1264static loff_t 1265page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length, 1266 int whence) 1267{ 1268 pgoff_t index = offset >> PAGE_SHIFT; 1269 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1270 loff_t lastoff = offset; 1271 struct pagevec pvec; 1272 1273 if (length <= 0) 1274 return -ENOENT; 1275 1276 pagevec_init(&pvec); 1277 1278 do { 1279 unsigned nr_pages, i; 1280 1281 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, 1282 end - 1); 1283 if (nr_pages == 0) 1284 break; 1285 1286 for (i = 0; i < nr_pages; i++) { 1287 struct page *page = pvec.pages[i]; 1288 1289 if (page_seek_hole_data(inode, page, &lastoff, whence)) 1290 goto check_range; 1291 lastoff = page_offset(page) + PAGE_SIZE; 1292 } 1293 pagevec_release(&pvec); 1294 } while (index < end); 1295 1296 /* When no page at lastoff and we are not done, we found a hole. */ 1297 if (whence != SEEK_HOLE) 1298 goto not_found; 1299 1300check_range: 1301 if (lastoff < offset + length) 1302 goto out; 1303not_found: 1304 lastoff = -ENOENT; 1305out: 1306 pagevec_release(&pvec); 1307 return lastoff; 1308} 1309 1310 1311static loff_t 1312iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length, 1313 void *data, struct iomap *iomap) 1314{ 1315 switch (iomap->type) { 1316 case IOMAP_UNWRITTEN: 1317 offset = page_cache_seek_hole_data(inode, offset, length, 1318 SEEK_HOLE); 1319 if (offset < 0) 1320 return length; 1321 /* fall through */ 1322 case IOMAP_HOLE: 1323 *(loff_t *)data = offset; 1324 return 0; 1325 default: 1326 return length; 1327 } 1328} 1329 1330loff_t 1331iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) 1332{ 1333 loff_t size = i_size_read(inode); 1334 loff_t length = size - offset; 1335 loff_t ret; 1336 1337 /* Nothing to be found before or beyond the end of the file. */ 1338 if (offset < 0 || offset >= size) 1339 return -ENXIO; 1340 1341 while (length > 0) { 1342 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, 1343 &offset, iomap_seek_hole_actor); 1344 if (ret < 0) 1345 return ret; 1346 if (ret == 0) 1347 break; 1348 1349 offset += ret; 1350 length -= ret; 1351 } 1352 1353 return offset; 1354} 1355EXPORT_SYMBOL_GPL(iomap_seek_hole); 1356 1357static loff_t 1358iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length, 1359 void *data, struct iomap *iomap) 1360{ 1361 switch (iomap->type) { 1362 case IOMAP_HOLE: 1363 return length; 1364 case IOMAP_UNWRITTEN: 1365 offset = page_cache_seek_hole_data(inode, offset, length, 1366 SEEK_DATA); 1367 if (offset < 0) 1368 return length; 1369 /*FALLTHRU*/ 1370 default: 1371 *(loff_t *)data = offset; 1372 return 0; 1373 } 1374} 1375 1376loff_t 1377iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) 1378{ 1379 loff_t size = i_size_read(inode); 1380 loff_t length = size - offset; 1381 loff_t ret; 1382 1383 /* Nothing to be found before or beyond the end of the file. */ 1384 if (offset < 0 || offset >= size) 1385 return -ENXIO; 1386 1387 while (length > 0) { 1388 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, 1389 &offset, iomap_seek_data_actor); 1390 if (ret < 0) 1391 return ret; 1392 if (ret == 0) 1393 break; 1394 1395 offset += ret; 1396 length -= ret; 1397 } 1398 1399 if (length <= 0) 1400 return -ENXIO; 1401 return offset; 1402} 1403EXPORT_SYMBOL_GPL(iomap_seek_data); 1404 1405/* 1406 * Private flags for iomap_dio, must not overlap with the public ones in 1407 * iomap.h: 1408 */ 1409#define IOMAP_DIO_WRITE_FUA (1 << 28) 1410#define IOMAP_DIO_NEED_SYNC (1 << 29) 1411#define IOMAP_DIO_WRITE (1 << 30) 1412#define IOMAP_DIO_DIRTY (1 << 31) 1413 1414struct iomap_dio { 1415 struct kiocb *iocb; 1416 iomap_dio_end_io_t *end_io; 1417 loff_t i_size; 1418 loff_t size; 1419 atomic_t ref; 1420 unsigned flags; 1421 int error; 1422 bool wait_for_completion; 1423 1424 union { 1425 /* used during submission and for synchronous completion: */ 1426 struct { 1427 struct iov_iter *iter; 1428 struct task_struct *waiter; 1429 struct request_queue *last_queue; 1430 blk_qc_t cookie; 1431 } submit; 1432 1433 /* used for aio completion: */ 1434 struct { 1435 struct work_struct work; 1436 } aio; 1437 }; 1438}; 1439 1440static ssize_t iomap_dio_complete(struct iomap_dio *dio) 1441{ 1442 struct kiocb *iocb = dio->iocb; 1443 struct inode *inode = file_inode(iocb->ki_filp); 1444 loff_t offset = iocb->ki_pos; 1445 ssize_t ret; 1446 1447 if (dio->end_io) { 1448 ret = dio->end_io(iocb, 1449 dio->error ? dio->error : dio->size, 1450 dio->flags); 1451 } else { 1452 ret = dio->error; 1453 } 1454 1455 if (likely(!ret)) { 1456 ret = dio->size; 1457 /* check for short read */ 1458 if (offset + ret > dio->i_size && 1459 !(dio->flags & IOMAP_DIO_WRITE)) 1460 ret = dio->i_size - offset; 1461 iocb->ki_pos += ret; 1462 } 1463 1464 /* 1465 * Try again to invalidate clean pages which might have been cached by 1466 * non-direct readahead, or faulted in by get_user_pages() if the source 1467 * of the write was an mmap'ed region of the file we're writing. Either 1468 * one is a pretty crazy thing to do, so we don't support it 100%. If 1469 * this invalidation fails, tough, the write still worked... 1470 * 1471 * And this page cache invalidation has to be after dio->end_io(), as 1472 * some filesystems convert unwritten extents to real allocations in 1473 * end_io() when necessary, otherwise a racing buffer read would cache 1474 * zeros from unwritten extents. 1475 */ 1476 if (!dio->error && 1477 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { 1478 int err; 1479 err = invalidate_inode_pages2_range(inode->i_mapping, 1480 offset >> PAGE_SHIFT, 1481 (offset + dio->size - 1) >> PAGE_SHIFT); 1482 if (err) 1483 dio_warn_stale_pagecache(iocb->ki_filp); 1484 } 1485 1486 /* 1487 * If this is a DSYNC write, make sure we push it to stable storage now 1488 * that we've written data. 1489 */ 1490 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) 1491 ret = generic_write_sync(iocb, ret); 1492 1493 inode_dio_end(file_inode(iocb->ki_filp)); 1494 kfree(dio); 1495 1496 return ret; 1497} 1498 1499static void iomap_dio_complete_work(struct work_struct *work) 1500{ 1501 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); 1502 struct kiocb *iocb = dio->iocb; 1503 1504 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); 1505} 1506 1507/* 1508 * Set an error in the dio if none is set yet. We have to use cmpxchg 1509 * as the submission context and the completion context(s) can race to 1510 * update the error. 1511 */ 1512static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) 1513{ 1514 cmpxchg(&dio->error, 0, ret); 1515} 1516 1517static void iomap_dio_bio_end_io(struct bio *bio) 1518{ 1519 struct iomap_dio *dio = bio->bi_private; 1520 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); 1521 1522 if (bio->bi_status) 1523 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); 1524 1525 if (atomic_dec_and_test(&dio->ref)) { 1526 if (dio->wait_for_completion) { 1527 struct task_struct *waiter = dio->submit.waiter; 1528 WRITE_ONCE(dio->submit.waiter, NULL); 1529 wake_up_process(waiter); 1530 } else if (dio->flags & IOMAP_DIO_WRITE) { 1531 struct inode *inode = file_inode(dio->iocb->ki_filp); 1532 1533 INIT_WORK(&dio->aio.work, iomap_dio_complete_work); 1534 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); 1535 } else { 1536 iomap_dio_complete_work(&dio->aio.work); 1537 } 1538 } 1539 1540 if (should_dirty) { 1541 bio_check_pages_dirty(bio); 1542 } else { 1543 struct bio_vec *bvec; 1544 int i; 1545 1546 bio_for_each_segment_all(bvec, bio, i) 1547 put_page(bvec->bv_page); 1548 bio_put(bio); 1549 } 1550} 1551 1552static blk_qc_t 1553iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, 1554 unsigned len) 1555{ 1556 struct page *page = ZERO_PAGE(0); 1557 struct bio *bio; 1558 1559 bio = bio_alloc(GFP_KERNEL, 1); 1560 bio_set_dev(bio, iomap->bdev); 1561 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 1562 bio->bi_private = dio; 1563 bio->bi_end_io = iomap_dio_bio_end_io; 1564 1565 get_page(page); 1566 __bio_add_page(bio, page, len, 0); 1567 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE); 1568 1569 atomic_inc(&dio->ref); 1570 return submit_bio(bio); 1571} 1572 1573static loff_t 1574iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, 1575 struct iomap_dio *dio, struct iomap *iomap) 1576{ 1577 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); 1578 unsigned int fs_block_size = i_blocksize(inode), pad; 1579 unsigned int align = iov_iter_alignment(dio->submit.iter); 1580 struct iov_iter iter; 1581 struct bio *bio; 1582 bool need_zeroout = false; 1583 bool use_fua = false; 1584 int nr_pages, ret; 1585 size_t copied = 0; 1586 1587 if ((pos | length | align) & ((1 << blkbits) - 1)) 1588 return -EINVAL; 1589 1590 if (iomap->type == IOMAP_UNWRITTEN) { 1591 dio->flags |= IOMAP_DIO_UNWRITTEN; 1592 need_zeroout = true; 1593 } 1594 1595 if (iomap->flags & IOMAP_F_SHARED) 1596 dio->flags |= IOMAP_DIO_COW; 1597 1598 if (iomap->flags & IOMAP_F_NEW) { 1599 need_zeroout = true; 1600 } else { 1601 /* 1602 * Use a FUA write if we need datasync semantics, this 1603 * is a pure data IO that doesn't require any metadata 1604 * updates and the underlying device supports FUA. This 1605 * allows us to avoid cache flushes on IO completion. 1606 */ 1607 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && 1608 (dio->flags & IOMAP_DIO_WRITE_FUA) && 1609 blk_queue_fua(bdev_get_queue(iomap->bdev))) 1610 use_fua = true; 1611 } 1612 1613 /* 1614 * Operate on a partial iter trimmed to the extent we were called for. 1615 * We'll update the iter in the dio once we're done with this extent. 1616 */ 1617 iter = *dio->submit.iter; 1618 iov_iter_truncate(&iter, length); 1619 1620 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); 1621 if (nr_pages <= 0) 1622 return nr_pages; 1623 1624 if (need_zeroout) { 1625 /* zero out from the start of the block to the write offset */ 1626 pad = pos & (fs_block_size - 1); 1627 if (pad) 1628 iomap_dio_zero(dio, iomap, pos - pad, pad); 1629 } 1630 1631 do { 1632 size_t n; 1633 if (dio->error) { 1634 iov_iter_revert(dio->submit.iter, copied); 1635 return 0; 1636 } 1637 1638 bio = bio_alloc(GFP_KERNEL, nr_pages); 1639 bio_set_dev(bio, iomap->bdev); 1640 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 1641 bio->bi_write_hint = dio->iocb->ki_hint; 1642 bio->bi_ioprio = dio->iocb->ki_ioprio; 1643 bio->bi_private = dio; 1644 bio->bi_end_io = iomap_dio_bio_end_io; 1645 1646 ret = bio_iov_iter_get_pages(bio, &iter); 1647 if (unlikely(ret)) { 1648 bio_put(bio); 1649 return copied ? copied : ret; 1650 } 1651 1652 n = bio->bi_iter.bi_size; 1653 if (dio->flags & IOMAP_DIO_WRITE) { 1654 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; 1655 if (use_fua) 1656 bio->bi_opf |= REQ_FUA; 1657 else 1658 dio->flags &= ~IOMAP_DIO_WRITE_FUA; 1659 task_io_account_write(n); 1660 } else { 1661 bio->bi_opf = REQ_OP_READ; 1662 if (dio->flags & IOMAP_DIO_DIRTY) 1663 bio_set_pages_dirty(bio); 1664 } 1665 1666 iov_iter_advance(dio->submit.iter, n); 1667 1668 dio->size += n; 1669 pos += n; 1670 copied += n; 1671 1672 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); 1673 1674 atomic_inc(&dio->ref); 1675 1676 dio->submit.last_queue = bdev_get_queue(iomap->bdev); 1677 dio->submit.cookie = submit_bio(bio); 1678 } while (nr_pages); 1679 1680 if (need_zeroout) { 1681 /* zero out from the end of the write to the end of the block */ 1682 pad = pos & (fs_block_size - 1); 1683 if (pad) 1684 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); 1685 } 1686 return copied; 1687} 1688 1689static loff_t 1690iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio) 1691{ 1692 length = iov_iter_zero(length, dio->submit.iter); 1693 dio->size += length; 1694 return length; 1695} 1696 1697static loff_t 1698iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length, 1699 struct iomap_dio *dio, struct iomap *iomap) 1700{ 1701 struct iov_iter *iter = dio->submit.iter; 1702 size_t copied; 1703 1704 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data)); 1705 1706 if (dio->flags & IOMAP_DIO_WRITE) { 1707 loff_t size = inode->i_size; 1708 1709 if (pos > size) 1710 memset(iomap->inline_data + size, 0, pos - size); 1711 copied = copy_from_iter(iomap->inline_data + pos, length, iter); 1712 if (copied) { 1713 if (pos + copied > size) 1714 i_size_write(inode, pos + copied); 1715 mark_inode_dirty(inode); 1716 } 1717 } else { 1718 copied = copy_to_iter(iomap->inline_data + pos, length, iter); 1719 } 1720 dio->size += copied; 1721 return copied; 1722} 1723 1724static loff_t 1725iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, 1726 void *data, struct iomap *iomap) 1727{ 1728 struct iomap_dio *dio = data; 1729 1730 switch (iomap->type) { 1731 case IOMAP_HOLE: 1732 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) 1733 return -EIO; 1734 return iomap_dio_hole_actor(length, dio); 1735 case IOMAP_UNWRITTEN: 1736 if (!(dio->flags & IOMAP_DIO_WRITE)) 1737 return iomap_dio_hole_actor(length, dio); 1738 return iomap_dio_bio_actor(inode, pos, length, dio, iomap); 1739 case IOMAP_MAPPED: 1740 return iomap_dio_bio_actor(inode, pos, length, dio, iomap); 1741 case IOMAP_INLINE: 1742 return iomap_dio_inline_actor(inode, pos, length, dio, iomap); 1743 default: 1744 WARN_ON_ONCE(1); 1745 return -EIO; 1746 } 1747} 1748 1749/* 1750 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO 1751 * is being issued as AIO or not. This allows us to optimise pure data writes 1752 * to use REQ_FUA rather than requiring generic_write_sync() to issue a 1753 * REQ_FLUSH post write. This is slightly tricky because a single request here 1754 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued 1755 * may be pure data writes. In that case, we still need to do a full data sync 1756 * completion. 1757 */ 1758ssize_t 1759iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 1760 const struct iomap_ops *ops, iomap_dio_end_io_t end_io) 1761{ 1762 struct address_space *mapping = iocb->ki_filp->f_mapping; 1763 struct inode *inode = file_inode(iocb->ki_filp); 1764 size_t count = iov_iter_count(iter); 1765 loff_t pos = iocb->ki_pos, start = pos; 1766 loff_t end = iocb->ki_pos + count - 1, ret = 0; 1767 unsigned int flags = IOMAP_DIRECT; 1768 struct blk_plug plug; 1769 struct iomap_dio *dio; 1770 1771 lockdep_assert_held(&inode->i_rwsem); 1772 1773 if (!count) 1774 return 0; 1775 1776 dio = kmalloc(sizeof(*dio), GFP_KERNEL); 1777 if (!dio) 1778 return -ENOMEM; 1779 1780 dio->iocb = iocb; 1781 atomic_set(&dio->ref, 1); 1782 dio->size = 0; 1783 dio->i_size = i_size_read(inode); 1784 dio->end_io = end_io; 1785 dio->error = 0; 1786 dio->flags = 0; 1787 dio->wait_for_completion = is_sync_kiocb(iocb); 1788 1789 dio->submit.iter = iter; 1790 dio->submit.waiter = current; 1791 dio->submit.cookie = BLK_QC_T_NONE; 1792 dio->submit.last_queue = NULL; 1793 1794 if (iov_iter_rw(iter) == READ) { 1795 if (pos >= dio->i_size) 1796 goto out_free_dio; 1797 1798 if (iter->type == ITER_IOVEC) 1799 dio->flags |= IOMAP_DIO_DIRTY; 1800 } else { 1801 flags |= IOMAP_WRITE; 1802 dio->flags |= IOMAP_DIO_WRITE; 1803 1804 /* for data sync or sync, we need sync completion processing */ 1805 if (iocb->ki_flags & IOCB_DSYNC) 1806 dio->flags |= IOMAP_DIO_NEED_SYNC; 1807 1808 /* 1809 * For datasync only writes, we optimistically try using FUA for 1810 * this IO. Any non-FUA write that occurs will clear this flag, 1811 * hence we know before completion whether a cache flush is 1812 * necessary. 1813 */ 1814 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) 1815 dio->flags |= IOMAP_DIO_WRITE_FUA; 1816 } 1817 1818 if (iocb->ki_flags & IOCB_NOWAIT) { 1819 if (filemap_range_has_page(mapping, start, end)) { 1820 ret = -EAGAIN; 1821 goto out_free_dio; 1822 } 1823 flags |= IOMAP_NOWAIT; 1824 } 1825 1826 ret = filemap_write_and_wait_range(mapping, start, end); 1827 if (ret) 1828 goto out_free_dio; 1829 1830 /* 1831 * Try to invalidate cache pages for the range we're direct 1832 * writing. If this invalidation fails, tough, the write will 1833 * still work, but racing two incompatible write paths is a 1834 * pretty crazy thing to do, so we don't support it 100%. 1835 */ 1836 ret = invalidate_inode_pages2_range(mapping, 1837 start >> PAGE_SHIFT, end >> PAGE_SHIFT); 1838 if (ret) 1839 dio_warn_stale_pagecache(iocb->ki_filp); 1840 ret = 0; 1841 1842 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && 1843 !inode->i_sb->s_dio_done_wq) { 1844 ret = sb_init_dio_done_wq(inode->i_sb); 1845 if (ret < 0) 1846 goto out_free_dio; 1847 } 1848 1849 inode_dio_begin(inode); 1850 1851 blk_start_plug(&plug); 1852 do { 1853 ret = iomap_apply(inode, pos, count, flags, ops, dio, 1854 iomap_dio_actor); 1855 if (ret <= 0) { 1856 /* magic error code to fall back to buffered I/O */ 1857 if (ret == -ENOTBLK) { 1858 dio->wait_for_completion = true; 1859 ret = 0; 1860 } 1861 break; 1862 } 1863 pos += ret; 1864 1865 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) 1866 break; 1867 } while ((count = iov_iter_count(iter)) > 0); 1868 blk_finish_plug(&plug); 1869 1870 if (ret < 0) 1871 iomap_dio_set_error(dio, ret); 1872 1873 /* 1874 * If all the writes we issued were FUA, we don't need to flush the 1875 * cache on IO completion. Clear the sync flag for this case. 1876 */ 1877 if (dio->flags & IOMAP_DIO_WRITE_FUA) 1878 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 1879 1880 if (!atomic_dec_and_test(&dio->ref)) { 1881 if (!dio->wait_for_completion) 1882 return -EIOCBQUEUED; 1883 1884 for (;;) { 1885 set_current_state(TASK_UNINTERRUPTIBLE); 1886 if (!READ_ONCE(dio->submit.waiter)) 1887 break; 1888 1889 if (!(iocb->ki_flags & IOCB_HIPRI) || 1890 !dio->submit.last_queue || 1891 !blk_poll(dio->submit.last_queue, 1892 dio->submit.cookie)) 1893 io_schedule(); 1894 } 1895 __set_current_state(TASK_RUNNING); 1896 } 1897 1898 ret = iomap_dio_complete(dio); 1899 1900 return ret; 1901 1902out_free_dio: 1903 kfree(dio); 1904 return ret; 1905} 1906EXPORT_SYMBOL_GPL(iomap_dio_rw); 1907 1908/* Swapfile activation */ 1909 1910#ifdef CONFIG_SWAP 1911struct iomap_swapfile_info { 1912 struct iomap iomap; /* accumulated iomap */ 1913 struct swap_info_struct *sis; 1914 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */ 1915 uint64_t highest_ppage; /* highest physical addr seen (pages) */ 1916 unsigned long nr_pages; /* number of pages collected */ 1917 int nr_extents; /* extent count */ 1918}; 1919 1920/* 1921 * Collect physical extents for this swap file. Physical extents reported to 1922 * the swap code must be trimmed to align to a page boundary. The logical 1923 * offset within the file is irrelevant since the swapfile code maps logical 1924 * page numbers of the swap device to the physical page-aligned extents. 1925 */ 1926static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi) 1927{ 1928 struct iomap *iomap = &isi->iomap; 1929 unsigned long nr_pages; 1930 uint64_t first_ppage; 1931 uint64_t first_ppage_reported; 1932 uint64_t next_ppage; 1933 int error; 1934 1935 /* 1936 * Round the start up and the end down so that the physical 1937 * extent aligns to a page boundary. 1938 */ 1939 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT; 1940 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >> 1941 PAGE_SHIFT; 1942 1943 /* Skip too-short physical extents. */ 1944 if (first_ppage >= next_ppage) 1945 return 0; 1946 nr_pages = next_ppage - first_ppage; 1947 1948 /* 1949 * Calculate how much swap space we're adding; the first page contains 1950 * the swap header and doesn't count. The mm still wants that first 1951 * page fed to add_swap_extent, however. 1952 */ 1953 first_ppage_reported = first_ppage; 1954 if (iomap->offset == 0) 1955 first_ppage_reported++; 1956 if (isi->lowest_ppage > first_ppage_reported) 1957 isi->lowest_ppage = first_ppage_reported; 1958 if (isi->highest_ppage < (next_ppage - 1)) 1959 isi->highest_ppage = next_ppage - 1; 1960 1961 /* Add extent, set up for the next call. */ 1962 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); 1963 if (error < 0) 1964 return error; 1965 isi->nr_extents += error; 1966 isi->nr_pages += nr_pages; 1967 return 0; 1968} 1969 1970/* 1971 * Accumulate iomaps for this swap file. We have to accumulate iomaps because 1972 * swap only cares about contiguous page-aligned physical extents and makes no 1973 * distinction between written and unwritten extents. 1974 */ 1975static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos, 1976 loff_t count, void *data, struct iomap *iomap) 1977{ 1978 struct iomap_swapfile_info *isi = data; 1979 int error; 1980 1981 switch (iomap->type) { 1982 case IOMAP_MAPPED: 1983 case IOMAP_UNWRITTEN: 1984 /* Only real or unwritten extents. */ 1985 break; 1986 case IOMAP_INLINE: 1987 /* No inline data. */ 1988 pr_err("swapon: file is inline\n"); 1989 return -EINVAL; 1990 default: 1991 pr_err("swapon: file has unallocated extents\n"); 1992 return -EINVAL; 1993 } 1994 1995 /* No uncommitted metadata or shared blocks. */ 1996 if (iomap->flags & IOMAP_F_DIRTY) { 1997 pr_err("swapon: file is not committed\n"); 1998 return -EINVAL; 1999 } 2000 if (iomap->flags & IOMAP_F_SHARED) { 2001 pr_err("swapon: file has shared extents\n"); 2002 return -EINVAL; 2003 } 2004 2005 /* Only one bdev per swap file. */ 2006 if (iomap->bdev != isi->sis->bdev) { 2007 pr_err("swapon: file is on multiple devices\n"); 2008 return -EINVAL; 2009 } 2010 2011 if (isi->iomap.length == 0) { 2012 /* No accumulated extent, so just store it. */ 2013 memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); 2014 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) { 2015 /* Append this to the accumulated extent. */ 2016 isi->iomap.length += iomap->length; 2017 } else { 2018 /* Otherwise, add the retained iomap and store this one. */ 2019 error = iomap_swapfile_add_extent(isi); 2020 if (error) 2021 return error; 2022 memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); 2023 } 2024 return count; 2025} 2026 2027/* 2028 * Iterate a swap file's iomaps to construct physical extents that can be 2029 * passed to the swapfile subsystem. 2030 */ 2031int iomap_swapfile_activate(struct swap_info_struct *sis, 2032 struct file *swap_file, sector_t *pagespan, 2033 const struct iomap_ops *ops) 2034{ 2035 struct iomap_swapfile_info isi = { 2036 .sis = sis, 2037 .lowest_ppage = (sector_t)-1ULL, 2038 }; 2039 struct address_space *mapping = swap_file->f_mapping; 2040 struct inode *inode = mapping->host; 2041 loff_t pos = 0; 2042 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE); 2043 loff_t ret; 2044 2045 /* 2046 * Persist all file mapping metadata so that we won't have any 2047 * IOMAP_F_DIRTY iomaps. 2048 */ 2049 ret = vfs_fsync(swap_file, 1); 2050 if (ret) 2051 return ret; 2052 2053 while (len > 0) { 2054 ret = iomap_apply(inode, pos, len, IOMAP_REPORT, 2055 ops, &isi, iomap_swapfile_activate_actor); 2056 if (ret <= 0) 2057 return ret; 2058 2059 pos += ret; 2060 len -= ret; 2061 } 2062 2063 if (isi.iomap.length) { 2064 ret = iomap_swapfile_add_extent(&isi); 2065 if (ret) 2066 return ret; 2067 } 2068 2069 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; 2070 sis->max = isi.nr_pages; 2071 sis->pages = isi.nr_pages - 1; 2072 sis->highest_bit = isi.nr_pages - 1; 2073 return isi.nr_extents; 2074} 2075EXPORT_SYMBOL_GPL(iomap_swapfile_activate); 2076#endif /* CONFIG_SWAP */ 2077 2078static loff_t 2079iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length, 2080 void *data, struct iomap *iomap) 2081{ 2082 sector_t *bno = data, addr; 2083 2084 if (iomap->type == IOMAP_MAPPED) { 2085 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits; 2086 if (addr > INT_MAX) 2087 WARN(1, "would truncate bmap result\n"); 2088 else 2089 *bno = addr; 2090 } 2091 return 0; 2092} 2093 2094/* legacy ->bmap interface. 0 is the error return (!) */ 2095sector_t 2096iomap_bmap(struct address_space *mapping, sector_t bno, 2097 const struct iomap_ops *ops) 2098{ 2099 struct inode *inode = mapping->host; 2100 loff_t pos = bno << inode->i_blkbits; 2101 unsigned blocksize = i_blocksize(inode); 2102 2103 if (filemap_write_and_wait(mapping)) 2104 return 0; 2105 2106 bno = 0; 2107 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor); 2108 return bno; 2109} 2110EXPORT_SYMBOL_GPL(iomap_bmap);