[XFS] use pagevec lookups This reduces the time spend in the radix tree lookups and avoids unessecary look roundtrips.

SGI-PV: 947118
SGI-Modid: xfs-linux-melb:xfs-kern:203823a

Signed-off-by: Christoph Hellwig <hch@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>

authored by Christoph Hellwig and committed by Nathan Scott 10ce4444 78539fdf

+88 -56
+88 -56
fs/xfs/linux-2.6/xfs_aops.c
··· 40 #include "xfs_rw.h" 41 #include "xfs_iomap.h" 42 #include <linux/mpage.h> 43 #include <linux/writeback.h> 44 45 STATIC void xfs_count_page_state(struct page *, int *, int *, int *); ··· 502 */ 503 STATIC unsigned int 504 xfs_probe_unmapped_page( 505 - struct address_space *mapping, 506 - pgoff_t index, 507 unsigned int pg_offset) 508 { 509 - struct page *page; 510 int ret = 0; 511 512 - page = find_trylock_page(mapping, index); 513 - if (!page) 514 - return 0; 515 if (PageWriteback(page)) 516 - goto out; 517 518 if (page->mapping && PageDirty(page)) { 519 if (page_has_buffers(page)) { ··· 526 ret = PAGE_CACHE_SIZE; 527 } 528 529 - out: 530 - unlock_page(page); 531 return ret; 532 } 533 ··· 536 struct buffer_head *bh, 537 struct buffer_head *head) 538 { 539 - size_t len, total = 0; 540 pgoff_t tindex, tlast, tloff; 541 - unsigned int pg_offset; 542 - struct address_space *mapping = inode->i_mapping; 543 544 /* First sum forwards in this page */ 545 do { 546 if (buffer_mapped(bh)) 547 - break; 548 total += bh->b_size; 549 } while ((bh = bh->b_this_page) != head); 550 551 - /* If we reached the end of the page, sum forwards in 552 - * following pages. 553 - */ 554 - if (bh == head) { 555 - tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 556 - /* Prune this back to avoid pathological behavior */ 557 - tloff = min(tlast, startpage->index + 64); 558 - for (tindex = startpage->index + 1; tindex < tloff; tindex++) { 559 - len = xfs_probe_unmapped_page(mapping, tindex, 560 - PAGE_CACHE_SIZE); 561 - if (!len) 562 - return total; 563 total += len; 564 } 565 - if (tindex == tlast && 566 - (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { 567 - total += xfs_probe_unmapped_page(mapping, 568 - tindex, pg_offset); 569 - } 570 } 571 return total; 572 } 573 574 /* 575 - * Probe for a given page (index) in the inode and test if it is suitable 576 - * for writing as part of an unwritten or delayed allocate extent. 577 - * Returns page locked and with an extra reference count if so, else NULL. 578 */ 579 - STATIC struct page * 580 - xfs_probe_delayed_page( 581 - struct inode *inode, 582 - pgoff_t index, 583 unsigned int type) 584 { 585 - struct page *page; 586 - 587 - page = find_trylock_page(inode->i_mapping, index); 588 - if (!page) 589 - return NULL; 590 if (PageWriteback(page)) 591 - goto out; 592 593 if (page->mapping && page_has_buffers(page)) { 594 struct buffer_head *bh, *head; ··· 621 } while ((bh = bh->b_this_page) != head); 622 623 if (acceptable) 624 - return page; 625 } 626 627 - out: 628 - unlock_page(page); 629 - return NULL; 630 } 631 632 /* ··· 637 xfs_convert_page( 638 struct inode *inode, 639 struct page *page, 640 xfs_iomap_t *iomapp, 641 xfs_ioend_t **ioendp, 642 struct writeback_control *wbc, 643 - void *private, 644 int startio, 645 int all_bh) 646 { ··· 651 int bbits = inode->i_blkbits; 652 int len, page_dirty; 653 int count = 0, done = 0, uptodate = 1; 654 655 end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)); 656 ··· 734 } 735 736 return done; 737 } 738 739 /* ··· 755 int all_bh, 756 pgoff_t tlast) 757 { 758 - struct page *page; 759 - unsigned int type = (*ioendp)->io_type; 760 - int done; 761 762 - for (done = 0; tindex <= tlast && !done; tindex++) { 763 - page = xfs_probe_delayed_page(inode, tindex, type); 764 - if (!page) 765 break; 766 - done = xfs_convert_page(inode, page, iomapp, ioendp, 767 - wbc, NULL, startio, all_bh); 768 } 769 } 770
··· 40 #include "xfs_rw.h" 41 #include "xfs_iomap.h" 42 #include <linux/mpage.h> 43 + #include <linux/pagevec.h> 44 #include <linux/writeback.h> 45 46 STATIC void xfs_count_page_state(struct page *, int *, int *, int *); ··· 501 */ 502 STATIC unsigned int 503 xfs_probe_unmapped_page( 504 + struct page *page, 505 unsigned int pg_offset) 506 { 507 int ret = 0; 508 509 if (PageWriteback(page)) 510 + return 0; 511 512 if (page->mapping && PageDirty(page)) { 513 if (page_has_buffers(page)) { ··· 530 ret = PAGE_CACHE_SIZE; 531 } 532 533 return ret; 534 } 535 ··· 542 struct buffer_head *bh, 543 struct buffer_head *head) 544 { 545 + struct pagevec pvec; 546 pgoff_t tindex, tlast, tloff; 547 + size_t total = 0; 548 + int done = 0, i; 549 550 /* First sum forwards in this page */ 551 do { 552 if (buffer_mapped(bh)) 553 + return total; 554 total += bh->b_size; 555 } while ((bh = bh->b_this_page) != head); 556 557 + /* if we reached the end of the page, sum forwards in following pages */ 558 + tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 559 + tindex = startpage->index + 1; 560 + 561 + /* Prune this back to avoid pathological behavior */ 562 + tloff = min(tlast, startpage->index + 64); 563 + 564 + pagevec_init(&pvec, 0); 565 + while (!done && tindex <= tloff) { 566 + unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 567 + 568 + if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 569 + break; 570 + 571 + for (i = 0; i < pagevec_count(&pvec); i++) { 572 + struct page *page = pvec.pages[i]; 573 + size_t pg_offset, len = 0; 574 + 575 + if (tindex == tlast) { 576 + pg_offset = 577 + i_size_read(inode) & (PAGE_CACHE_SIZE - 1); 578 + if (!pg_offset) 579 + break; 580 + } else 581 + pg_offset = PAGE_CACHE_SIZE; 582 + 583 + if (page->index == tindex && !TestSetPageLocked(page)) { 584 + len = xfs_probe_unmapped_page(page, pg_offset); 585 + unlock_page(page); 586 + } 587 + 588 + if (!len) { 589 + done = 1; 590 + break; 591 + } 592 + 593 total += len; 594 } 595 + 596 + pagevec_release(&pvec); 597 + cond_resched(); 598 } 599 + 600 return total; 601 } 602 603 /* 604 + * Test if a given page is suitable for writing as part of an unwritten 605 + * or delayed allocate extent. 606 */ 607 + STATIC int 608 + xfs_is_delayed_page( 609 + struct page *page, 610 unsigned int type) 611 { 612 if (PageWriteback(page)) 613 + return 0; 614 615 if (page->mapping && page_has_buffers(page)) { 616 struct buffer_head *bh, *head; ··· 611 } while ((bh = bh->b_this_page) != head); 612 613 if (acceptable) 614 + return 1; 615 } 616 617 + return 0; 618 } 619 620 /* ··· 629 xfs_convert_page( 630 struct inode *inode, 631 struct page *page, 632 + loff_t tindex, 633 xfs_iomap_t *iomapp, 634 xfs_ioend_t **ioendp, 635 struct writeback_control *wbc, 636 int startio, 637 int all_bh) 638 { ··· 643 int bbits = inode->i_blkbits; 644 int len, page_dirty; 645 int count = 0, done = 0, uptodate = 1; 646 + 647 + if (page->index != tindex) 648 + goto fail; 649 + if (TestSetPageLocked(page)) 650 + goto fail; 651 + if (PageWriteback(page)) 652 + goto fail_unlock_page; 653 + if (page->mapping != inode->i_mapping) 654 + goto fail_unlock_page; 655 + if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) 656 + goto fail_unlock_page; 657 658 end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)); 659 ··· 715 } 716 717 return done; 718 + fail_unlock_page: 719 + unlock_page(page); 720 + fail: 721 + return 1; 722 } 723 724 /* ··· 732 int all_bh, 733 pgoff_t tlast) 734 { 735 + struct pagevec pvec; 736 + int done = 0, i; 737 738 + pagevec_init(&pvec, 0); 739 + while (!done && tindex <= tlast) { 740 + unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 741 + 742 + if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 743 break; 744 + 745 + for (i = 0; i < pagevec_count(&pvec); i++) { 746 + done = xfs_convert_page(inode, pvec.pages[i], tindex++, 747 + iomapp, ioendp, wbc, startio, all_bh); 748 + if (done) 749 + break; 750 + } 751 + 752 + pagevec_release(&pvec); 753 + cond_resched(); 754 } 755 } 756