[XFS] use pagevec lookups This reduces the time spend in the radix tree lookups and avoids unessecary look roundtrips.

SGI-PV: 947118
SGI-Modid: xfs-linux-melb:xfs-kern:203823a

Signed-off-by: Christoph Hellwig <hch@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>

authored by Christoph Hellwig and committed by Nathan Scott 10ce4444 78539fdf

+88 -56
+88 -56
fs/xfs/linux-2.6/xfs_aops.c
··· 40 40 #include "xfs_rw.h" 41 41 #include "xfs_iomap.h" 42 42 #include <linux/mpage.h> 43 + #include <linux/pagevec.h> 43 44 #include <linux/writeback.h> 44 45 45 46 STATIC void xfs_count_page_state(struct page *, int *, int *, int *); ··· 502 501 */ 503 502 STATIC unsigned int 504 503 xfs_probe_unmapped_page( 505 - struct address_space *mapping, 506 - pgoff_t index, 504 + struct page *page, 507 505 unsigned int pg_offset) 508 506 { 509 - struct page *page; 510 507 int ret = 0; 511 508 512 - page = find_trylock_page(mapping, index); 513 - if (!page) 514 - return 0; 515 509 if (PageWriteback(page)) 516 - goto out; 510 + return 0; 517 511 518 512 if (page->mapping && PageDirty(page)) { 519 513 if (page_has_buffers(page)) { ··· 526 530 ret = PAGE_CACHE_SIZE; 527 531 } 528 532 529 - out: 530 - unlock_page(page); 531 533 return ret; 532 534 } 533 535 ··· 536 542 struct buffer_head *bh, 537 543 struct buffer_head *head) 538 544 { 539 - size_t len, total = 0; 545 + struct pagevec pvec; 540 546 pgoff_t tindex, tlast, tloff; 541 - unsigned int pg_offset; 542 - struct address_space *mapping = inode->i_mapping; 547 + size_t total = 0; 548 + int done = 0, i; 543 549 544 550 /* First sum forwards in this page */ 545 551 do { 546 552 if (buffer_mapped(bh)) 547 - break; 553 + return total; 548 554 total += bh->b_size; 549 555 } while ((bh = bh->b_this_page) != head); 550 556 551 - /* If we reached the end of the page, sum forwards in 552 - * following pages. 553 - */ 554 - if (bh == head) { 555 - tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 556 - /* Prune this back to avoid pathological behavior */ 557 - tloff = min(tlast, startpage->index + 64); 558 - for (tindex = startpage->index + 1; tindex < tloff; tindex++) { 559 - len = xfs_probe_unmapped_page(mapping, tindex, 560 - PAGE_CACHE_SIZE); 561 - if (!len) 562 - return total; 557 + /* if we reached the end of the page, sum forwards in following pages */ 558 + tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 559 + tindex = startpage->index + 1; 560 + 561 + /* Prune this back to avoid pathological behavior */ 562 + tloff = min(tlast, startpage->index + 64); 563 + 564 + pagevec_init(&pvec, 0); 565 + while (!done && tindex <= tloff) { 566 + unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 567 + 568 + if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 569 + break; 570 + 571 + for (i = 0; i < pagevec_count(&pvec); i++) { 572 + struct page *page = pvec.pages[i]; 573 + size_t pg_offset, len = 0; 574 + 575 + if (tindex == tlast) { 576 + pg_offset = 577 + i_size_read(inode) & (PAGE_CACHE_SIZE - 1); 578 + if (!pg_offset) 579 + break; 580 + } else 581 + pg_offset = PAGE_CACHE_SIZE; 582 + 583 + if (page->index == tindex && !TestSetPageLocked(page)) { 584 + len = xfs_probe_unmapped_page(page, pg_offset); 585 + unlock_page(page); 586 + } 587 + 588 + if (!len) { 589 + done = 1; 590 + break; 591 + } 592 + 563 593 total += len; 564 594 } 565 - if (tindex == tlast && 566 - (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { 567 - total += xfs_probe_unmapped_page(mapping, 568 - tindex, pg_offset); 569 - } 595 + 596 + pagevec_release(&pvec); 597 + cond_resched(); 570 598 } 599 + 571 600 return total; 572 601 } 573 602 574 603 /* 575 - * Probe for a given page (index) in the inode and test if it is suitable 576 - * for writing as part of an unwritten or delayed allocate extent. 577 - * Returns page locked and with an extra reference count if so, else NULL. 604 + * Test if a given page is suitable for writing as part of an unwritten 605 + * or delayed allocate extent. 578 606 */ 579 - STATIC struct page * 580 - xfs_probe_delayed_page( 581 - struct inode *inode, 582 - pgoff_t index, 607 + STATIC int 608 + xfs_is_delayed_page( 609 + struct page *page, 583 610 unsigned int type) 584 611 { 585 - struct page *page; 586 - 587 - page = find_trylock_page(inode->i_mapping, index); 588 - if (!page) 589 - return NULL; 590 612 if (PageWriteback(page)) 591 - goto out; 613 + return 0; 592 614 593 615 if (page->mapping && page_has_buffers(page)) { 594 616 struct buffer_head *bh, *head; ··· 621 611 } while ((bh = bh->b_this_page) != head); 622 612 623 613 if (acceptable) 624 - return page; 614 + return 1; 625 615 } 626 616 627 - out: 628 - unlock_page(page); 629 - return NULL; 617 + return 0; 630 618 } 631 619 632 620 /* ··· 637 629 xfs_convert_page( 638 630 struct inode *inode, 639 631 struct page *page, 632 + loff_t tindex, 640 633 xfs_iomap_t *iomapp, 641 634 xfs_ioend_t **ioendp, 642 635 struct writeback_control *wbc, 643 - void *private, 644 636 int startio, 645 637 int all_bh) 646 638 { ··· 651 643 int bbits = inode->i_blkbits; 652 644 int len, page_dirty; 653 645 int count = 0, done = 0, uptodate = 1; 646 + 647 + if (page->index != tindex) 648 + goto fail; 649 + if (TestSetPageLocked(page)) 650 + goto fail; 651 + if (PageWriteback(page)) 652 + goto fail_unlock_page; 653 + if (page->mapping != inode->i_mapping) 654 + goto fail_unlock_page; 655 + if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) 656 + goto fail_unlock_page; 654 657 655 658 end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)); 656 659 ··· 734 715 } 735 716 736 717 return done; 718 + fail_unlock_page: 719 + unlock_page(page); 720 + fail: 721 + return 1; 737 722 } 738 723 739 724 /* ··· 755 732 int all_bh, 756 733 pgoff_t tlast) 757 734 { 758 - struct page *page; 759 - unsigned int type = (*ioendp)->io_type; 760 - int done; 735 + struct pagevec pvec; 736 + int done = 0, i; 761 737 762 - for (done = 0; tindex <= tlast && !done; tindex++) { 763 - page = xfs_probe_delayed_page(inode, tindex, type); 764 - if (!page) 738 + pagevec_init(&pvec, 0); 739 + while (!done && tindex <= tlast) { 740 + unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 741 + 742 + if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 765 743 break; 766 - done = xfs_convert_page(inode, page, iomapp, ioendp, 767 - wbc, NULL, startio, all_bh); 744 + 745 + for (i = 0; i < pagevec_count(&pvec); i++) { 746 + done = xfs_convert_page(inode, pvec.pages[i], tindex++, 747 + iomapp, ioendp, wbc, startio, all_bh); 748 + if (done) 749 + break; 750 + } 751 + 752 + pagevec_release(&pvec); 753 + cond_resched(); 768 754 } 769 755 } 770 756