Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.39 1506 lines 38 kB view raw
1/* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18#include "xfs.h" 19#include "xfs_bit.h" 20#include "xfs_log.h" 21#include "xfs_inum.h" 22#include "xfs_sb.h" 23#include "xfs_ag.h" 24#include "xfs_trans.h" 25#include "xfs_mount.h" 26#include "xfs_bmap_btree.h" 27#include "xfs_dinode.h" 28#include "xfs_inode.h" 29#include "xfs_alloc.h" 30#include "xfs_error.h" 31#include "xfs_rw.h" 32#include "xfs_iomap.h" 33#include "xfs_vnodeops.h" 34#include "xfs_trace.h" 35#include "xfs_bmap.h" 36#include <linux/gfp.h> 37#include <linux/mpage.h> 38#include <linux/pagevec.h> 39#include <linux/writeback.h> 40 41 42/* 43 * Prime number of hash buckets since address is used as the key. 44 */ 45#define NVSYNC 37 46#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC]) 47static wait_queue_head_t xfs_ioend_wq[NVSYNC]; 48 49void __init 50xfs_ioend_init(void) 51{ 52 int i; 53 54 for (i = 0; i < NVSYNC; i++) 55 init_waitqueue_head(&xfs_ioend_wq[i]); 56} 57 58void 59xfs_ioend_wait( 60 xfs_inode_t *ip) 61{ 62 wait_queue_head_t *wq = to_ioend_wq(ip); 63 64 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); 65} 66 67STATIC void 68xfs_ioend_wake( 69 xfs_inode_t *ip) 70{ 71 if (atomic_dec_and_test(&ip->i_iocount)) 72 wake_up(to_ioend_wq(ip)); 73} 74 75void 76xfs_count_page_state( 77 struct page *page, 78 int *delalloc, 79 int *unwritten) 80{ 81 struct buffer_head *bh, *head; 82 83 *delalloc = *unwritten = 0; 84 85 bh = head = page_buffers(page); 86 do { 87 if (buffer_unwritten(bh)) 88 (*unwritten) = 1; 89 else if (buffer_delay(bh)) 90 (*delalloc) = 1; 91 } while ((bh = bh->b_this_page) != head); 92} 93 94STATIC struct block_device * 95xfs_find_bdev_for_inode( 96 struct inode *inode) 97{ 98 struct xfs_inode *ip = XFS_I(inode); 99 struct xfs_mount *mp = ip->i_mount; 100 101 if (XFS_IS_REALTIME_INODE(ip)) 102 return mp->m_rtdev_targp->bt_bdev; 103 else 104 return mp->m_ddev_targp->bt_bdev; 105} 106 107/* 108 * We're now finished for good with this ioend structure. 109 * Update the page state via the associated buffer_heads, 110 * release holds on the inode and bio, and finally free 111 * up memory. Do not use the ioend after this. 112 */ 113STATIC void 114xfs_destroy_ioend( 115 xfs_ioend_t *ioend) 116{ 117 struct buffer_head *bh, *next; 118 struct xfs_inode *ip = XFS_I(ioend->io_inode); 119 120 for (bh = ioend->io_buffer_head; bh; bh = next) { 121 next = bh->b_private; 122 bh->b_end_io(bh, !ioend->io_error); 123 } 124 125 /* 126 * Volume managers supporting multiple paths can send back ENODEV 127 * when the final path disappears. In this case continuing to fill 128 * the page cache with dirty data which cannot be written out is 129 * evil, so prevent that. 130 */ 131 if (unlikely(ioend->io_error == -ENODEV)) { 132 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, 133 __FILE__, __LINE__); 134 } 135 136 xfs_ioend_wake(ip); 137 mempool_free(ioend, xfs_ioend_pool); 138} 139 140/* 141 * If the end of the current ioend is beyond the current EOF, 142 * return the new EOF value, otherwise zero. 143 */ 144STATIC xfs_fsize_t 145xfs_ioend_new_eof( 146 xfs_ioend_t *ioend) 147{ 148 xfs_inode_t *ip = XFS_I(ioend->io_inode); 149 xfs_fsize_t isize; 150 xfs_fsize_t bsize; 151 152 bsize = ioend->io_offset + ioend->io_size; 153 isize = MAX(ip->i_size, ip->i_new_size); 154 isize = MIN(isize, bsize); 155 return isize > ip->i_d.di_size ? isize : 0; 156} 157 158/* 159 * Update on-disk file size now that data has been written to disk. The 160 * current in-memory file size is i_size. If a write is beyond eof i_new_size 161 * will be the intended file size until i_size is updated. If this write does 162 * not extend all the way to the valid file size then restrict this update to 163 * the end of the write. 164 * 165 * This function does not block as blocking on the inode lock in IO completion 166 * can lead to IO completion order dependency deadlocks.. If it can't get the 167 * inode ilock it will return EAGAIN. Callers must handle this. 168 */ 169STATIC int 170xfs_setfilesize( 171 xfs_ioend_t *ioend) 172{ 173 xfs_inode_t *ip = XFS_I(ioend->io_inode); 174 xfs_fsize_t isize; 175 176 if (unlikely(ioend->io_error)) 177 return 0; 178 179 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) 180 return EAGAIN; 181 182 isize = xfs_ioend_new_eof(ioend); 183 if (isize) { 184 ip->i_d.di_size = isize; 185 xfs_mark_inode_dirty(ip); 186 } 187 188 xfs_iunlock(ip, XFS_ILOCK_EXCL); 189 return 0; 190} 191 192/* 193 * Schedule IO completion handling on the final put of an ioend. 194 */ 195STATIC void 196xfs_finish_ioend( 197 struct xfs_ioend *ioend) 198{ 199 if (atomic_dec_and_test(&ioend->io_remaining)) { 200 if (ioend->io_type == IO_UNWRITTEN) 201 queue_work(xfsconvertd_workqueue, &ioend->io_work); 202 else 203 queue_work(xfsdatad_workqueue, &ioend->io_work); 204 } 205} 206 207/* 208 * IO write completion. 209 */ 210STATIC void 211xfs_end_io( 212 struct work_struct *work) 213{ 214 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); 215 struct xfs_inode *ip = XFS_I(ioend->io_inode); 216 int error = 0; 217 218 /* 219 * For unwritten extents we need to issue transactions to convert a 220 * range to normal written extens after the data I/O has finished. 221 */ 222 if (ioend->io_type == IO_UNWRITTEN && 223 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { 224 225 error = xfs_iomap_write_unwritten(ip, ioend->io_offset, 226 ioend->io_size); 227 if (error) 228 ioend->io_error = error; 229 } 230 231 /* 232 * We might have to update the on-disk file size after extending 233 * writes. 234 */ 235 error = xfs_setfilesize(ioend); 236 ASSERT(!error || error == EAGAIN); 237 238 /* 239 * If we didn't complete processing of the ioend, requeue it to the 240 * tail of the workqueue for another attempt later. Otherwise destroy 241 * it. 242 */ 243 if (error == EAGAIN) { 244 atomic_inc(&ioend->io_remaining); 245 xfs_finish_ioend(ioend); 246 /* ensure we don't spin on blocked ioends */ 247 delay(1); 248 } else { 249 if (ioend->io_iocb) 250 aio_complete(ioend->io_iocb, ioend->io_result, 0); 251 xfs_destroy_ioend(ioend); 252 } 253} 254 255/* 256 * Call IO completion handling in caller context on the final put of an ioend. 257 */ 258STATIC void 259xfs_finish_ioend_sync( 260 struct xfs_ioend *ioend) 261{ 262 if (atomic_dec_and_test(&ioend->io_remaining)) 263 xfs_end_io(&ioend->io_work); 264} 265 266/* 267 * Allocate and initialise an IO completion structure. 268 * We need to track unwritten extent write completion here initially. 269 * We'll need to extend this for updating the ondisk inode size later 270 * (vs. incore size). 271 */ 272STATIC xfs_ioend_t * 273xfs_alloc_ioend( 274 struct inode *inode, 275 unsigned int type) 276{ 277 xfs_ioend_t *ioend; 278 279 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS); 280 281 /* 282 * Set the count to 1 initially, which will prevent an I/O 283 * completion callback from happening before we have started 284 * all the I/O from calling the completion routine too early. 285 */ 286 atomic_set(&ioend->io_remaining, 1); 287 ioend->io_error = 0; 288 ioend->io_list = NULL; 289 ioend->io_type = type; 290 ioend->io_inode = inode; 291 ioend->io_buffer_head = NULL; 292 ioend->io_buffer_tail = NULL; 293 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); 294 ioend->io_offset = 0; 295 ioend->io_size = 0; 296 ioend->io_iocb = NULL; 297 ioend->io_result = 0; 298 299 INIT_WORK(&ioend->io_work, xfs_end_io); 300 return ioend; 301} 302 303STATIC int 304xfs_map_blocks( 305 struct inode *inode, 306 loff_t offset, 307 struct xfs_bmbt_irec *imap, 308 int type, 309 int nonblocking) 310{ 311 struct xfs_inode *ip = XFS_I(inode); 312 struct xfs_mount *mp = ip->i_mount; 313 ssize_t count = 1 << inode->i_blkbits; 314 xfs_fileoff_t offset_fsb, end_fsb; 315 int error = 0; 316 int bmapi_flags = XFS_BMAPI_ENTIRE; 317 int nimaps = 1; 318 319 if (XFS_FORCED_SHUTDOWN(mp)) 320 return -XFS_ERROR(EIO); 321 322 if (type == IO_UNWRITTEN) 323 bmapi_flags |= XFS_BMAPI_IGSTATE; 324 325 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { 326 if (nonblocking) 327 return -XFS_ERROR(EAGAIN); 328 xfs_ilock(ip, XFS_ILOCK_SHARED); 329 } 330 331 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 332 (ip->i_df.if_flags & XFS_IFEXTENTS)); 333 ASSERT(offset <= mp->m_maxioffset); 334 335 if (offset + count > mp->m_maxioffset) 336 count = mp->m_maxioffset - offset; 337 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 338 offset_fsb = XFS_B_TO_FSBT(mp, offset); 339 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, 340 bmapi_flags, NULL, 0, imap, &nimaps, NULL); 341 xfs_iunlock(ip, XFS_ILOCK_SHARED); 342 343 if (error) 344 return -XFS_ERROR(error); 345 346 if (type == IO_DELALLOC && 347 (!nimaps || isnullstartblock(imap->br_startblock))) { 348 error = xfs_iomap_write_allocate(ip, offset, count, imap); 349 if (!error) 350 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); 351 return -XFS_ERROR(error); 352 } 353 354#ifdef DEBUG 355 if (type == IO_UNWRITTEN) { 356 ASSERT(nimaps); 357 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 358 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 359 } 360#endif 361 if (nimaps) 362 trace_xfs_map_blocks_found(ip, offset, count, type, imap); 363 return 0; 364} 365 366STATIC int 367xfs_imap_valid( 368 struct inode *inode, 369 struct xfs_bmbt_irec *imap, 370 xfs_off_t offset) 371{ 372 offset >>= inode->i_blkbits; 373 374 return offset >= imap->br_startoff && 375 offset < imap->br_startoff + imap->br_blockcount; 376} 377 378/* 379 * BIO completion handler for buffered IO. 380 */ 381STATIC void 382xfs_end_bio( 383 struct bio *bio, 384 int error) 385{ 386 xfs_ioend_t *ioend = bio->bi_private; 387 388 ASSERT(atomic_read(&bio->bi_cnt) >= 1); 389 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; 390 391 /* Toss bio and pass work off to an xfsdatad thread */ 392 bio->bi_private = NULL; 393 bio->bi_end_io = NULL; 394 bio_put(bio); 395 396 xfs_finish_ioend(ioend); 397} 398 399STATIC void 400xfs_submit_ioend_bio( 401 struct writeback_control *wbc, 402 xfs_ioend_t *ioend, 403 struct bio *bio) 404{ 405 atomic_inc(&ioend->io_remaining); 406 bio->bi_private = ioend; 407 bio->bi_end_io = xfs_end_bio; 408 409 /* 410 * If the I/O is beyond EOF we mark the inode dirty immediately 411 * but don't update the inode size until I/O completion. 412 */ 413 if (xfs_ioend_new_eof(ioend)) 414 xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); 415 416 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); 417} 418 419STATIC struct bio * 420xfs_alloc_ioend_bio( 421 struct buffer_head *bh) 422{ 423 int nvecs = bio_get_nr_vecs(bh->b_bdev); 424 struct bio *bio = bio_alloc(GFP_NOIO, nvecs); 425 426 ASSERT(bio->bi_private == NULL); 427 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 428 bio->bi_bdev = bh->b_bdev; 429 return bio; 430} 431 432STATIC void 433xfs_start_buffer_writeback( 434 struct buffer_head *bh) 435{ 436 ASSERT(buffer_mapped(bh)); 437 ASSERT(buffer_locked(bh)); 438 ASSERT(!buffer_delay(bh)); 439 ASSERT(!buffer_unwritten(bh)); 440 441 mark_buffer_async_write(bh); 442 set_buffer_uptodate(bh); 443 clear_buffer_dirty(bh); 444} 445 446STATIC void 447xfs_start_page_writeback( 448 struct page *page, 449 int clear_dirty, 450 int buffers) 451{ 452 ASSERT(PageLocked(page)); 453 ASSERT(!PageWriteback(page)); 454 if (clear_dirty) 455 clear_page_dirty_for_io(page); 456 set_page_writeback(page); 457 unlock_page(page); 458 /* If no buffers on the page are to be written, finish it here */ 459 if (!buffers) 460 end_page_writeback(page); 461} 462 463static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) 464{ 465 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 466} 467 468/* 469 * Submit all of the bios for all of the ioends we have saved up, covering the 470 * initial writepage page and also any probed pages. 471 * 472 * Because we may have multiple ioends spanning a page, we need to start 473 * writeback on all the buffers before we submit them for I/O. If we mark the 474 * buffers as we got, then we can end up with a page that only has buffers 475 * marked async write and I/O complete on can occur before we mark the other 476 * buffers async write. 477 * 478 * The end result of this is that we trip a bug in end_page_writeback() because 479 * we call it twice for the one page as the code in end_buffer_async_write() 480 * assumes that all buffers on the page are started at the same time. 481 * 482 * The fix is two passes across the ioend list - one to start writeback on the 483 * buffer_heads, and then submit them for I/O on the second pass. 484 */ 485STATIC void 486xfs_submit_ioend( 487 struct writeback_control *wbc, 488 xfs_ioend_t *ioend) 489{ 490 xfs_ioend_t *head = ioend; 491 xfs_ioend_t *next; 492 struct buffer_head *bh; 493 struct bio *bio; 494 sector_t lastblock = 0; 495 496 /* Pass 1 - start writeback */ 497 do { 498 next = ioend->io_list; 499 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) 500 xfs_start_buffer_writeback(bh); 501 } while ((ioend = next) != NULL); 502 503 /* Pass 2 - submit I/O */ 504 ioend = head; 505 do { 506 next = ioend->io_list; 507 bio = NULL; 508 509 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { 510 511 if (!bio) { 512 retry: 513 bio = xfs_alloc_ioend_bio(bh); 514 } else if (bh->b_blocknr != lastblock + 1) { 515 xfs_submit_ioend_bio(wbc, ioend, bio); 516 goto retry; 517 } 518 519 if (bio_add_buffer(bio, bh) != bh->b_size) { 520 xfs_submit_ioend_bio(wbc, ioend, bio); 521 goto retry; 522 } 523 524 lastblock = bh->b_blocknr; 525 } 526 if (bio) 527 xfs_submit_ioend_bio(wbc, ioend, bio); 528 xfs_finish_ioend(ioend); 529 } while ((ioend = next) != NULL); 530} 531 532/* 533 * Cancel submission of all buffer_heads so far in this endio. 534 * Toss the endio too. Only ever called for the initial page 535 * in a writepage request, so only ever one page. 536 */ 537STATIC void 538xfs_cancel_ioend( 539 xfs_ioend_t *ioend) 540{ 541 xfs_ioend_t *next; 542 struct buffer_head *bh, *next_bh; 543 544 do { 545 next = ioend->io_list; 546 bh = ioend->io_buffer_head; 547 do { 548 next_bh = bh->b_private; 549 clear_buffer_async_write(bh); 550 unlock_buffer(bh); 551 } while ((bh = next_bh) != NULL); 552 553 xfs_ioend_wake(XFS_I(ioend->io_inode)); 554 mempool_free(ioend, xfs_ioend_pool); 555 } while ((ioend = next) != NULL); 556} 557 558/* 559 * Test to see if we've been building up a completion structure for 560 * earlier buffers -- if so, we try to append to this ioend if we 561 * can, otherwise we finish off any current ioend and start another. 562 * Return true if we've finished the given ioend. 563 */ 564STATIC void 565xfs_add_to_ioend( 566 struct inode *inode, 567 struct buffer_head *bh, 568 xfs_off_t offset, 569 unsigned int type, 570 xfs_ioend_t **result, 571 int need_ioend) 572{ 573 xfs_ioend_t *ioend = *result; 574 575 if (!ioend || need_ioend || type != ioend->io_type) { 576 xfs_ioend_t *previous = *result; 577 578 ioend = xfs_alloc_ioend(inode, type); 579 ioend->io_offset = offset; 580 ioend->io_buffer_head = bh; 581 ioend->io_buffer_tail = bh; 582 if (previous) 583 previous->io_list = ioend; 584 *result = ioend; 585 } else { 586 ioend->io_buffer_tail->b_private = bh; 587 ioend->io_buffer_tail = bh; 588 } 589 590 bh->b_private = NULL; 591 ioend->io_size += bh->b_size; 592} 593 594STATIC void 595xfs_map_buffer( 596 struct inode *inode, 597 struct buffer_head *bh, 598 struct xfs_bmbt_irec *imap, 599 xfs_off_t offset) 600{ 601 sector_t bn; 602 struct xfs_mount *m = XFS_I(inode)->i_mount; 603 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); 604 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); 605 606 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 607 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 608 609 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + 610 ((offset - iomap_offset) >> inode->i_blkbits); 611 612 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); 613 614 bh->b_blocknr = bn; 615 set_buffer_mapped(bh); 616} 617 618STATIC void 619xfs_map_at_offset( 620 struct inode *inode, 621 struct buffer_head *bh, 622 struct xfs_bmbt_irec *imap, 623 xfs_off_t offset) 624{ 625 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 626 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 627 628 xfs_map_buffer(inode, bh, imap, offset); 629 set_buffer_mapped(bh); 630 clear_buffer_delay(bh); 631 clear_buffer_unwritten(bh); 632} 633 634/* 635 * Test if a given page is suitable for writing as part of an unwritten 636 * or delayed allocate extent. 637 */ 638STATIC int 639xfs_is_delayed_page( 640 struct page *page, 641 unsigned int type) 642{ 643 if (PageWriteback(page)) 644 return 0; 645 646 if (page->mapping && page_has_buffers(page)) { 647 struct buffer_head *bh, *head; 648 int acceptable = 0; 649 650 bh = head = page_buffers(page); 651 do { 652 if (buffer_unwritten(bh)) 653 acceptable = (type == IO_UNWRITTEN); 654 else if (buffer_delay(bh)) 655 acceptable = (type == IO_DELALLOC); 656 else if (buffer_dirty(bh) && buffer_mapped(bh)) 657 acceptable = (type == IO_OVERWRITE); 658 else 659 break; 660 } while ((bh = bh->b_this_page) != head); 661 662 if (acceptable) 663 return 1; 664 } 665 666 return 0; 667} 668 669/* 670 * Allocate & map buffers for page given the extent map. Write it out. 671 * except for the original page of a writepage, this is called on 672 * delalloc/unwritten pages only, for the original page it is possible 673 * that the page has no mapping at all. 674 */ 675STATIC int 676xfs_convert_page( 677 struct inode *inode, 678 struct page *page, 679 loff_t tindex, 680 struct xfs_bmbt_irec *imap, 681 xfs_ioend_t **ioendp, 682 struct writeback_control *wbc) 683{ 684 struct buffer_head *bh, *head; 685 xfs_off_t end_offset; 686 unsigned long p_offset; 687 unsigned int type; 688 int len, page_dirty; 689 int count = 0, done = 0, uptodate = 1; 690 xfs_off_t offset = page_offset(page); 691 692 if (page->index != tindex) 693 goto fail; 694 if (!trylock_page(page)) 695 goto fail; 696 if (PageWriteback(page)) 697 goto fail_unlock_page; 698 if (page->mapping != inode->i_mapping) 699 goto fail_unlock_page; 700 if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) 701 goto fail_unlock_page; 702 703 /* 704 * page_dirty is initially a count of buffers on the page before 705 * EOF and is decremented as we move each into a cleanable state. 706 * 707 * Derivation: 708 * 709 * End offset is the highest offset that this page should represent. 710 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) 711 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and 712 * hence give us the correct page_dirty count. On any other page, 713 * it will be zero and in that case we need page_dirty to be the 714 * count of buffers on the page. 715 */ 716 end_offset = min_t(unsigned long long, 717 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, 718 i_size_read(inode)); 719 720 len = 1 << inode->i_blkbits; 721 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), 722 PAGE_CACHE_SIZE); 723 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 724 page_dirty = p_offset / len; 725 726 bh = head = page_buffers(page); 727 do { 728 if (offset >= end_offset) 729 break; 730 if (!buffer_uptodate(bh)) 731 uptodate = 0; 732 if (!(PageUptodate(page) || buffer_uptodate(bh))) { 733 done = 1; 734 continue; 735 } 736 737 if (buffer_unwritten(bh) || buffer_delay(bh) || 738 buffer_mapped(bh)) { 739 if (buffer_unwritten(bh)) 740 type = IO_UNWRITTEN; 741 else if (buffer_delay(bh)) 742 type = IO_DELALLOC; 743 else 744 type = IO_OVERWRITE; 745 746 if (!xfs_imap_valid(inode, imap, offset)) { 747 done = 1; 748 continue; 749 } 750 751 lock_buffer(bh); 752 if (type != IO_OVERWRITE) 753 xfs_map_at_offset(inode, bh, imap, offset); 754 xfs_add_to_ioend(inode, bh, offset, type, 755 ioendp, done); 756 757 page_dirty--; 758 count++; 759 } else { 760 done = 1; 761 } 762 } while (offset += len, (bh = bh->b_this_page) != head); 763 764 if (uptodate && bh == head) 765 SetPageUptodate(page); 766 767 if (count) { 768 if (--wbc->nr_to_write <= 0 && 769 wbc->sync_mode == WB_SYNC_NONE) 770 done = 1; 771 } 772 xfs_start_page_writeback(page, !page_dirty, count); 773 774 return done; 775 fail_unlock_page: 776 unlock_page(page); 777 fail: 778 return 1; 779} 780 781/* 782 * Convert & write out a cluster of pages in the same extent as defined 783 * by mp and following the start page. 784 */ 785STATIC void 786xfs_cluster_write( 787 struct inode *inode, 788 pgoff_t tindex, 789 struct xfs_bmbt_irec *imap, 790 xfs_ioend_t **ioendp, 791 struct writeback_control *wbc, 792 pgoff_t tlast) 793{ 794 struct pagevec pvec; 795 int done = 0, i; 796 797 pagevec_init(&pvec, 0); 798 while (!done && tindex <= tlast) { 799 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 800 801 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 802 break; 803 804 for (i = 0; i < pagevec_count(&pvec); i++) { 805 done = xfs_convert_page(inode, pvec.pages[i], tindex++, 806 imap, ioendp, wbc); 807 if (done) 808 break; 809 } 810 811 pagevec_release(&pvec); 812 cond_resched(); 813 } 814} 815 816STATIC void 817xfs_vm_invalidatepage( 818 struct page *page, 819 unsigned long offset) 820{ 821 trace_xfs_invalidatepage(page->mapping->host, page, offset); 822 block_invalidatepage(page, offset); 823} 824 825/* 826 * If the page has delalloc buffers on it, we need to punch them out before we 827 * invalidate the page. If we don't, we leave a stale delalloc mapping on the 828 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read 829 * is done on that same region - the delalloc extent is returned when none is 830 * supposed to be there. 831 * 832 * We prevent this by truncating away the delalloc regions on the page before 833 * invalidating it. Because they are delalloc, we can do this without needing a 834 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this 835 * truncation without a transaction as there is no space left for block 836 * reservation (typically why we see a ENOSPC in writeback). 837 * 838 * This is not a performance critical path, so for now just do the punching a 839 * buffer head at a time. 840 */ 841STATIC void 842xfs_aops_discard_page( 843 struct page *page) 844{ 845 struct inode *inode = page->mapping->host; 846 struct xfs_inode *ip = XFS_I(inode); 847 struct buffer_head *bh, *head; 848 loff_t offset = page_offset(page); 849 850 if (!xfs_is_delayed_page(page, IO_DELALLOC)) 851 goto out_invalidate; 852 853 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 854 goto out_invalidate; 855 856 xfs_alert(ip->i_mount, 857 "page discard on page %p, inode 0x%llx, offset %llu.", 858 page, ip->i_ino, offset); 859 860 xfs_ilock(ip, XFS_ILOCK_EXCL); 861 bh = head = page_buffers(page); 862 do { 863 int error; 864 xfs_fileoff_t start_fsb; 865 866 if (!buffer_delay(bh)) 867 goto next_buffer; 868 869 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 870 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1); 871 if (error) { 872 /* something screwed, just bail */ 873 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 874 xfs_alert(ip->i_mount, 875 "page discard unable to remove delalloc mapping."); 876 } 877 break; 878 } 879next_buffer: 880 offset += 1 << inode->i_blkbits; 881 882 } while ((bh = bh->b_this_page) != head); 883 884 xfs_iunlock(ip, XFS_ILOCK_EXCL); 885out_invalidate: 886 xfs_vm_invalidatepage(page, 0); 887 return; 888} 889 890/* 891 * Write out a dirty page. 892 * 893 * For delalloc space on the page we need to allocate space and flush it. 894 * For unwritten space on the page we need to start the conversion to 895 * regular allocated space. 896 * For any other dirty buffer heads on the page we should flush them. 897 * 898 * If we detect that a transaction would be required to flush the page, we 899 * have to check the process flags first, if we are already in a transaction 900 * or disk I/O during allocations is off, we need to fail the writepage and 901 * redirty the page. 902 */ 903STATIC int 904xfs_vm_writepage( 905 struct page *page, 906 struct writeback_control *wbc) 907{ 908 struct inode *inode = page->mapping->host; 909 int delalloc, unwritten; 910 struct buffer_head *bh, *head; 911 struct xfs_bmbt_irec imap; 912 xfs_ioend_t *ioend = NULL, *iohead = NULL; 913 loff_t offset; 914 unsigned int type; 915 __uint64_t end_offset; 916 pgoff_t end_index, last_index; 917 ssize_t len; 918 int err, imap_valid = 0, uptodate = 1; 919 int count = 0; 920 int nonblocking = 0; 921 922 trace_xfs_writepage(inode, page, 0); 923 924 ASSERT(page_has_buffers(page)); 925 926 /* 927 * Refuse to write the page out if we are called from reclaim context. 928 * 929 * This avoids stack overflows when called from deeply used stacks in 930 * random callers for direct reclaim or memcg reclaim. We explicitly 931 * allow reclaim from kswapd as the stack usage there is relatively low. 932 * 933 * This should really be done by the core VM, but until that happens 934 * filesystems like XFS, btrfs and ext4 have to take care of this 935 * by themselves. 936 */ 937 if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) 938 goto redirty; 939 940 /* 941 * We need a transaction if there are delalloc or unwritten buffers 942 * on the page. 943 * 944 * If we need a transaction and the process flags say we are already 945 * in a transaction, or no IO is allowed then mark the page dirty 946 * again and leave the page as is. 947 */ 948 xfs_count_page_state(page, &delalloc, &unwritten); 949 if ((current->flags & PF_FSTRANS) && (delalloc || unwritten)) 950 goto redirty; 951 952 /* Is this page beyond the end of the file? */ 953 offset = i_size_read(inode); 954 end_index = offset >> PAGE_CACHE_SHIFT; 955 last_index = (offset - 1) >> PAGE_CACHE_SHIFT; 956 if (page->index >= end_index) { 957 if ((page->index >= end_index + 1) || 958 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { 959 unlock_page(page); 960 return 0; 961 } 962 } 963 964 end_offset = min_t(unsigned long long, 965 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, 966 offset); 967 len = 1 << inode->i_blkbits; 968 969 bh = head = page_buffers(page); 970 offset = page_offset(page); 971 type = IO_OVERWRITE; 972 973 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) 974 nonblocking = 1; 975 976 do { 977 int new_ioend = 0; 978 979 if (offset >= end_offset) 980 break; 981 if (!buffer_uptodate(bh)) 982 uptodate = 0; 983 984 /* 985 * set_page_dirty dirties all buffers in a page, independent 986 * of their state. The dirty state however is entirely 987 * meaningless for holes (!mapped && uptodate), so skip 988 * buffers covering holes here. 989 */ 990 if (!buffer_mapped(bh) && buffer_uptodate(bh)) { 991 imap_valid = 0; 992 continue; 993 } 994 995 if (buffer_unwritten(bh)) { 996 if (type != IO_UNWRITTEN) { 997 type = IO_UNWRITTEN; 998 imap_valid = 0; 999 } 1000 } else if (buffer_delay(bh)) { 1001 if (type != IO_DELALLOC) { 1002 type = IO_DELALLOC; 1003 imap_valid = 0; 1004 } 1005 } else if (buffer_uptodate(bh)) { 1006 if (type != IO_OVERWRITE) { 1007 type = IO_OVERWRITE; 1008 imap_valid = 0; 1009 } 1010 } else { 1011 if (PageUptodate(page)) { 1012 ASSERT(buffer_mapped(bh)); 1013 imap_valid = 0; 1014 } 1015 continue; 1016 } 1017 1018 if (imap_valid) 1019 imap_valid = xfs_imap_valid(inode, &imap, offset); 1020 if (!imap_valid) { 1021 /* 1022 * If we didn't have a valid mapping then we need to 1023 * put the new mapping into a separate ioend structure. 1024 * This ensures non-contiguous extents always have 1025 * separate ioends, which is particularly important 1026 * for unwritten extent conversion at I/O completion 1027 * time. 1028 */ 1029 new_ioend = 1; 1030 err = xfs_map_blocks(inode, offset, &imap, type, 1031 nonblocking); 1032 if (err) 1033 goto error; 1034 imap_valid = xfs_imap_valid(inode, &imap, offset); 1035 } 1036 if (imap_valid) { 1037 lock_buffer(bh); 1038 if (type != IO_OVERWRITE) 1039 xfs_map_at_offset(inode, bh, &imap, offset); 1040 xfs_add_to_ioend(inode, bh, offset, type, &ioend, 1041 new_ioend); 1042 count++; 1043 } 1044 1045 if (!iohead) 1046 iohead = ioend; 1047 1048 } while (offset += len, ((bh = bh->b_this_page) != head)); 1049 1050 if (uptodate && bh == head) 1051 SetPageUptodate(page); 1052 1053 xfs_start_page_writeback(page, 1, count); 1054 1055 if (ioend && imap_valid) { 1056 xfs_off_t end_index; 1057 1058 end_index = imap.br_startoff + imap.br_blockcount; 1059 1060 /* to bytes */ 1061 end_index <<= inode->i_blkbits; 1062 1063 /* to pages */ 1064 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT; 1065 1066 /* check against file size */ 1067 if (end_index > last_index) 1068 end_index = last_index; 1069 1070 xfs_cluster_write(inode, page->index + 1, &imap, &ioend, 1071 wbc, end_index); 1072 } 1073 1074 if (iohead) 1075 xfs_submit_ioend(wbc, iohead); 1076 1077 return 0; 1078 1079error: 1080 if (iohead) 1081 xfs_cancel_ioend(iohead); 1082 1083 if (err == -EAGAIN) 1084 goto redirty; 1085 1086 xfs_aops_discard_page(page); 1087 ClearPageUptodate(page); 1088 unlock_page(page); 1089 return err; 1090 1091redirty: 1092 redirty_page_for_writepage(wbc, page); 1093 unlock_page(page); 1094 return 0; 1095} 1096 1097STATIC int 1098xfs_vm_writepages( 1099 struct address_space *mapping, 1100 struct writeback_control *wbc) 1101{ 1102 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); 1103 return generic_writepages(mapping, wbc); 1104} 1105 1106/* 1107 * Called to move a page into cleanable state - and from there 1108 * to be released. The page should already be clean. We always 1109 * have buffer heads in this call. 1110 * 1111 * Returns 1 if the page is ok to release, 0 otherwise. 1112 */ 1113STATIC int 1114xfs_vm_releasepage( 1115 struct page *page, 1116 gfp_t gfp_mask) 1117{ 1118 int delalloc, unwritten; 1119 1120 trace_xfs_releasepage(page->mapping->host, page, 0); 1121 1122 xfs_count_page_state(page, &delalloc, &unwritten); 1123 1124 if (WARN_ON(delalloc)) 1125 return 0; 1126 if (WARN_ON(unwritten)) 1127 return 0; 1128 1129 return try_to_free_buffers(page); 1130} 1131 1132STATIC int 1133__xfs_get_blocks( 1134 struct inode *inode, 1135 sector_t iblock, 1136 struct buffer_head *bh_result, 1137 int create, 1138 int direct) 1139{ 1140 struct xfs_inode *ip = XFS_I(inode); 1141 struct xfs_mount *mp = ip->i_mount; 1142 xfs_fileoff_t offset_fsb, end_fsb; 1143 int error = 0; 1144 int lockmode = 0; 1145 struct xfs_bmbt_irec imap; 1146 int nimaps = 1; 1147 xfs_off_t offset; 1148 ssize_t size; 1149 int new = 0; 1150 1151 if (XFS_FORCED_SHUTDOWN(mp)) 1152 return -XFS_ERROR(EIO); 1153 1154 offset = (xfs_off_t)iblock << inode->i_blkbits; 1155 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); 1156 size = bh_result->b_size; 1157 1158 if (!create && direct && offset >= i_size_read(inode)) 1159 return 0; 1160 1161 if (create) { 1162 lockmode = XFS_ILOCK_EXCL; 1163 xfs_ilock(ip, lockmode); 1164 } else { 1165 lockmode = xfs_ilock_map_shared(ip); 1166 } 1167 1168 ASSERT(offset <= mp->m_maxioffset); 1169 if (offset + size > mp->m_maxioffset) 1170 size = mp->m_maxioffset - offset; 1171 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); 1172 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1173 1174 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, 1175 XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL); 1176 if (error) 1177 goto out_unlock; 1178 1179 if (create && 1180 (!nimaps || 1181 (imap.br_startblock == HOLESTARTBLOCK || 1182 imap.br_startblock == DELAYSTARTBLOCK))) { 1183 if (direct) { 1184 error = xfs_iomap_write_direct(ip, offset, size, 1185 &imap, nimaps); 1186 } else { 1187 error = xfs_iomap_write_delay(ip, offset, size, &imap); 1188 } 1189 if (error) 1190 goto out_unlock; 1191 1192 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); 1193 } else if (nimaps) { 1194 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); 1195 } else { 1196 trace_xfs_get_blocks_notfound(ip, offset, size); 1197 goto out_unlock; 1198 } 1199 xfs_iunlock(ip, lockmode); 1200 1201 if (imap.br_startblock != HOLESTARTBLOCK && 1202 imap.br_startblock != DELAYSTARTBLOCK) { 1203 /* 1204 * For unwritten extents do not report a disk address on 1205 * the read case (treat as if we're reading into a hole). 1206 */ 1207 if (create || !ISUNWRITTEN(&imap)) 1208 xfs_map_buffer(inode, bh_result, &imap, offset); 1209 if (create && ISUNWRITTEN(&imap)) { 1210 if (direct) 1211 bh_result->b_private = inode; 1212 set_buffer_unwritten(bh_result); 1213 } 1214 } 1215 1216 /* 1217 * If this is a realtime file, data may be on a different device. 1218 * to that pointed to from the buffer_head b_bdev currently. 1219 */ 1220 bh_result->b_bdev = xfs_find_bdev_for_inode(inode); 1221 1222 /* 1223 * If we previously allocated a block out beyond eof and we are now 1224 * coming back to use it then we will need to flag it as new even if it 1225 * has a disk address. 1226 * 1227 * With sub-block writes into unwritten extents we also need to mark 1228 * the buffer as new so that the unwritten parts of the buffer gets 1229 * correctly zeroed. 1230 */ 1231 if (create && 1232 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || 1233 (offset >= i_size_read(inode)) || 1234 (new || ISUNWRITTEN(&imap)))) 1235 set_buffer_new(bh_result); 1236 1237 if (imap.br_startblock == DELAYSTARTBLOCK) { 1238 BUG_ON(direct); 1239 if (create) { 1240 set_buffer_uptodate(bh_result); 1241 set_buffer_mapped(bh_result); 1242 set_buffer_delay(bh_result); 1243 } 1244 } 1245 1246 /* 1247 * If this is O_DIRECT or the mpage code calling tell them how large 1248 * the mapping is, so that we can avoid repeated get_blocks calls. 1249 */ 1250 if (direct || size > (1 << inode->i_blkbits)) { 1251 xfs_off_t mapping_size; 1252 1253 mapping_size = imap.br_startoff + imap.br_blockcount - iblock; 1254 mapping_size <<= inode->i_blkbits; 1255 1256 ASSERT(mapping_size > 0); 1257 if (mapping_size > size) 1258 mapping_size = size; 1259 if (mapping_size > LONG_MAX) 1260 mapping_size = LONG_MAX; 1261 1262 bh_result->b_size = mapping_size; 1263 } 1264 1265 return 0; 1266 1267out_unlock: 1268 xfs_iunlock(ip, lockmode); 1269 return -error; 1270} 1271 1272int 1273xfs_get_blocks( 1274 struct inode *inode, 1275 sector_t iblock, 1276 struct buffer_head *bh_result, 1277 int create) 1278{ 1279 return __xfs_get_blocks(inode, iblock, bh_result, create, 0); 1280} 1281 1282STATIC int 1283xfs_get_blocks_direct( 1284 struct inode *inode, 1285 sector_t iblock, 1286 struct buffer_head *bh_result, 1287 int create) 1288{ 1289 return __xfs_get_blocks(inode, iblock, bh_result, create, 1); 1290} 1291 1292/* 1293 * Complete a direct I/O write request. 1294 * 1295 * If the private argument is non-NULL __xfs_get_blocks signals us that we 1296 * need to issue a transaction to convert the range from unwritten to written 1297 * extents. In case this is regular synchronous I/O we just call xfs_end_io 1298 * to do this and we are done. But in case this was a successful AIO 1299 * request this handler is called from interrupt context, from which we 1300 * can't start transactions. In that case offload the I/O completion to 1301 * the workqueues we also use for buffered I/O completion. 1302 */ 1303STATIC void 1304xfs_end_io_direct_write( 1305 struct kiocb *iocb, 1306 loff_t offset, 1307 ssize_t size, 1308 void *private, 1309 int ret, 1310 bool is_async) 1311{ 1312 struct xfs_ioend *ioend = iocb->private; 1313 1314 /* 1315 * blockdev_direct_IO can return an error even after the I/O 1316 * completion handler was called. Thus we need to protect 1317 * against double-freeing. 1318 */ 1319 iocb->private = NULL; 1320 1321 ioend->io_offset = offset; 1322 ioend->io_size = size; 1323 if (private && size > 0) 1324 ioend->io_type = IO_UNWRITTEN; 1325 1326 if (is_async) { 1327 /* 1328 * If we are converting an unwritten extent we need to delay 1329 * the AIO completion until after the unwrittent extent 1330 * conversion has completed, otherwise do it ASAP. 1331 */ 1332 if (ioend->io_type == IO_UNWRITTEN) { 1333 ioend->io_iocb = iocb; 1334 ioend->io_result = ret; 1335 } else { 1336 aio_complete(iocb, ret, 0); 1337 } 1338 xfs_finish_ioend(ioend); 1339 } else { 1340 xfs_finish_ioend_sync(ioend); 1341 } 1342} 1343 1344STATIC ssize_t 1345xfs_vm_direct_IO( 1346 int rw, 1347 struct kiocb *iocb, 1348 const struct iovec *iov, 1349 loff_t offset, 1350 unsigned long nr_segs) 1351{ 1352 struct inode *inode = iocb->ki_filp->f_mapping->host; 1353 struct block_device *bdev = xfs_find_bdev_for_inode(inode); 1354 ssize_t ret; 1355 1356 if (rw & WRITE) { 1357 iocb->private = xfs_alloc_ioend(inode, IO_DIRECT); 1358 1359 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, 1360 offset, nr_segs, 1361 xfs_get_blocks_direct, 1362 xfs_end_io_direct_write, NULL, 0); 1363 if (ret != -EIOCBQUEUED && iocb->private) 1364 xfs_destroy_ioend(iocb->private); 1365 } else { 1366 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, 1367 offset, nr_segs, 1368 xfs_get_blocks_direct, 1369 NULL, NULL, 0); 1370 } 1371 1372 return ret; 1373} 1374 1375STATIC void 1376xfs_vm_write_failed( 1377 struct address_space *mapping, 1378 loff_t to) 1379{ 1380 struct inode *inode = mapping->host; 1381 1382 if (to > inode->i_size) { 1383 /* 1384 * punch out the delalloc blocks we have already allocated. We 1385 * don't call xfs_setattr() to do this as we may be in the 1386 * middle of a multi-iovec write and so the vfs inode->i_size 1387 * will not match the xfs ip->i_size and so it will zero too 1388 * much. Hence we jus truncate the page cache to zero what is 1389 * necessary and punch the delalloc blocks directly. 1390 */ 1391 struct xfs_inode *ip = XFS_I(inode); 1392 xfs_fileoff_t start_fsb; 1393 xfs_fileoff_t end_fsb; 1394 int error; 1395 1396 truncate_pagecache(inode, to, inode->i_size); 1397 1398 /* 1399 * Check if there are any blocks that are outside of i_size 1400 * that need to be trimmed back. 1401 */ 1402 start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1; 1403 end_fsb = XFS_B_TO_FSB(ip->i_mount, to); 1404 if (end_fsb <= start_fsb) 1405 return; 1406 1407 xfs_ilock(ip, XFS_ILOCK_EXCL); 1408 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1409 end_fsb - start_fsb); 1410 if (error) { 1411 /* something screwed, just bail */ 1412 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1413 xfs_alert(ip->i_mount, 1414 "xfs_vm_write_failed: unable to clean up ino %lld", 1415 ip->i_ino); 1416 } 1417 } 1418 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1419 } 1420} 1421 1422STATIC int 1423xfs_vm_write_begin( 1424 struct file *file, 1425 struct address_space *mapping, 1426 loff_t pos, 1427 unsigned len, 1428 unsigned flags, 1429 struct page **pagep, 1430 void **fsdata) 1431{ 1432 int ret; 1433 1434 ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS, 1435 pagep, xfs_get_blocks); 1436 if (unlikely(ret)) 1437 xfs_vm_write_failed(mapping, pos + len); 1438 return ret; 1439} 1440 1441STATIC int 1442xfs_vm_write_end( 1443 struct file *file, 1444 struct address_space *mapping, 1445 loff_t pos, 1446 unsigned len, 1447 unsigned copied, 1448 struct page *page, 1449 void *fsdata) 1450{ 1451 int ret; 1452 1453 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 1454 if (unlikely(ret < len)) 1455 xfs_vm_write_failed(mapping, pos + len); 1456 return ret; 1457} 1458 1459STATIC sector_t 1460xfs_vm_bmap( 1461 struct address_space *mapping, 1462 sector_t block) 1463{ 1464 struct inode *inode = (struct inode *)mapping->host; 1465 struct xfs_inode *ip = XFS_I(inode); 1466 1467 trace_xfs_vm_bmap(XFS_I(inode)); 1468 xfs_ilock(ip, XFS_IOLOCK_SHARED); 1469 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); 1470 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 1471 return generic_block_bmap(mapping, block, xfs_get_blocks); 1472} 1473 1474STATIC int 1475xfs_vm_readpage( 1476 struct file *unused, 1477 struct page *page) 1478{ 1479 return mpage_readpage(page, xfs_get_blocks); 1480} 1481 1482STATIC int 1483xfs_vm_readpages( 1484 struct file *unused, 1485 struct address_space *mapping, 1486 struct list_head *pages, 1487 unsigned nr_pages) 1488{ 1489 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); 1490} 1491 1492const struct address_space_operations xfs_address_space_operations = { 1493 .readpage = xfs_vm_readpage, 1494 .readpages = xfs_vm_readpages, 1495 .writepage = xfs_vm_writepage, 1496 .writepages = xfs_vm_writepages, 1497 .releasepage = xfs_vm_releasepage, 1498 .invalidatepage = xfs_vm_invalidatepage, 1499 .write_begin = xfs_vm_write_begin, 1500 .write_end = xfs_vm_write_end, 1501 .bmap = xfs_vm_bmap, 1502 .direct_IO = xfs_vm_direct_IO, 1503 .migratepage = buffer_migrate_page, 1504 .is_partially_uptodate = block_is_partially_uptodate, 1505 .error_remove_page = generic_error_remove_page, 1506};