[XFS] cluster rewrites We can cluster mapped pages aswell, this improves performances on rewrites since we can reduce the number of allocator calls.

SGI-PV: 947118
SGI-Modid: xfs-linux-melb:xfs-kern:203829a

Signed-off-by: Christoph Hellwig <hch@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>

authored by Christoph Hellwig and committed by Nathan Scott 6c4fe19f 7336cea8

+47 -16
+47 -16
fs/xfs/linux-2.6/xfs_aops.c
··· 470 } 471 472 /* 473 - * Look for a page at index which is unlocked and not mapped 474 - * yet - clustering for mmap write case. 475 */ 476 STATIC unsigned int 477 - xfs_probe_unmapped_page( 478 struct page *page, 479 - unsigned int pg_offset) 480 { 481 int ret = 0; 482 ··· 489 490 bh = head = page_buffers(page); 491 do { 492 - if (buffer_mapped(bh) || !buffer_uptodate(bh)) 493 break; 494 ret += bh->b_size; 495 if (ret >= pg_offset) 496 break; 497 } while ((bh = bh->b_this_page) != head); 498 } else 499 - ret = PAGE_CACHE_SIZE; 500 } 501 502 return ret; 503 } 504 505 STATIC size_t 506 - xfs_probe_unmapped_cluster( 507 struct inode *inode, 508 struct page *startpage, 509 struct buffer_head *bh, 510 - struct buffer_head *head) 511 { 512 struct pagevec pvec; 513 pgoff_t tindex, tlast, tloff; ··· 519 520 /* First sum forwards in this page */ 521 do { 522 - if (buffer_mapped(bh)) 523 return total; 524 total += bh->b_size; 525 } while ((bh = bh->b_this_page) != head); ··· 553 pg_offset = PAGE_CACHE_SIZE; 554 555 if (page->index == tindex && !TestSetPageLocked(page)) { 556 - len = xfs_probe_unmapped_page(page, pg_offset); 557 unlock_page(page); 558 } 559 ··· 595 acceptable = (type == IOMAP_UNWRITTEN); 596 else if (buffer_delay(bh)) 597 acceptable = (type == IOMAP_DELAY); 598 else 599 break; 600 } while ((bh = bh->b_this_page) != head); ··· 809 ssize_t size, len; 810 int flags, err, iomap_valid = 0, uptodate = 1; 811 int page_dirty, count = 0, trylock_flag = 0; 812 813 /* wait for other IO threads? */ 814 if (startio && wbc->sync_mode != WB_SYNC_NONE) ··· 851 852 bh = head = page_buffers(page); 853 offset = page_offset(page); 854 855 /* TODO: cleanup count and page_dirty */ 856 ··· 886 if (buffer_unwritten(bh) || buffer_delay(bh) || 887 ((buffer_uptodate(bh) || PageUptodate(page)) && 888 !buffer_mapped(bh) && (unmapped || startio))) { 889 if (buffer_unwritten(bh)) { 890 type = IOMAP_UNWRITTEN; 891 flags = BMAPI_WRITE|BMAPI_IGNSTATE; ··· 901 if (!startio) 902 flags |= trylock_flag; 903 } else { 904 - type = 0; 905 flags = BMAPI_WRITE|BMAPI_MMAP; 906 } 907 908 if (!iomap_valid) { 909 - if (type == 0) { 910 - size = xfs_probe_unmapped_cluster(inode, 911 - page, bh, head); 912 } else { 913 size = len; 914 } ··· 935 count++; 936 } 937 } else if (buffer_uptodate(bh) && startio) { 938 - type = 0; 939 940 if (!test_and_set_bit(BH_Lock, &bh->b_state)) { 941 ASSERT(buffer_mapped(bh)); 942 xfs_add_to_ioend(inode, bh, offset, type, 943 &ioend, !iomap_valid); 944 page_dirty--; ··· 984 PAGE_CACHE_SHIFT; 985 tlast = min_t(pgoff_t, offset, last_index); 986 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend, 987 - wbc, startio, unmapped, tlast); 988 } 989 990 if (iohead)
··· 470 } 471 472 /* 473 + * Look for a page at index that is suitable for clustering. 474 */ 475 STATIC unsigned int 476 + xfs_probe_page( 477 struct page *page, 478 + unsigned int pg_offset, 479 + int mapped) 480 { 481 int ret = 0; 482 ··· 489 490 bh = head = page_buffers(page); 491 do { 492 + if (!buffer_uptodate(bh)) 493 + break; 494 + if (mapped != buffer_mapped(bh)) 495 break; 496 ret += bh->b_size; 497 if (ret >= pg_offset) 498 break; 499 } while ((bh = bh->b_this_page) != head); 500 } else 501 + ret = mapped ? 0 : PAGE_CACHE_SIZE; 502 } 503 504 return ret; 505 } 506 507 STATIC size_t 508 + xfs_probe_cluster( 509 struct inode *inode, 510 struct page *startpage, 511 struct buffer_head *bh, 512 + struct buffer_head *head, 513 + int mapped) 514 { 515 struct pagevec pvec; 516 pgoff_t tindex, tlast, tloff; ··· 516 517 /* First sum forwards in this page */ 518 do { 519 + if (mapped != buffer_mapped(bh)) 520 return total; 521 total += bh->b_size; 522 } while ((bh = bh->b_this_page) != head); ··· 550 pg_offset = PAGE_CACHE_SIZE; 551 552 if (page->index == tindex && !TestSetPageLocked(page)) { 553 + len = xfs_probe_page(page, pg_offset, mapped); 554 unlock_page(page); 555 } 556 ··· 592 acceptable = (type == IOMAP_UNWRITTEN); 593 else if (buffer_delay(bh)) 594 acceptable = (type == IOMAP_DELAY); 595 + else if (buffer_mapped(bh)) 596 + acceptable = (type == 0); 597 else 598 break; 599 } while ((bh = bh->b_this_page) != head); ··· 804 ssize_t size, len; 805 int flags, err, iomap_valid = 0, uptodate = 1; 806 int page_dirty, count = 0, trylock_flag = 0; 807 + int all_bh = unmapped; 808 809 /* wait for other IO threads? */ 810 if (startio && wbc->sync_mode != WB_SYNC_NONE) ··· 845 846 bh = head = page_buffers(page); 847 offset = page_offset(page); 848 + flags = -1; 849 + type = 0; 850 851 /* TODO: cleanup count and page_dirty */ 852 ··· 878 if (buffer_unwritten(bh) || buffer_delay(bh) || 879 ((buffer_uptodate(bh) || PageUptodate(page)) && 880 !buffer_mapped(bh) && (unmapped || startio))) { 881 + /* 882 + * Make sure we don't use a read-only iomap 883 + */ 884 + if (flags == BMAPI_READ) 885 + iomap_valid = 0; 886 + 887 if (buffer_unwritten(bh)) { 888 type = IOMAP_UNWRITTEN; 889 flags = BMAPI_WRITE|BMAPI_IGNSTATE; ··· 887 if (!startio) 888 flags |= trylock_flag; 889 } else { 890 + type = IOMAP_NEW; 891 flags = BMAPI_WRITE|BMAPI_MMAP; 892 } 893 894 if (!iomap_valid) { 895 + if (type == IOMAP_NEW) { 896 + size = xfs_probe_cluster(inode, 897 + page, bh, head, 0); 898 } else { 899 size = len; 900 } ··· 921 count++; 922 } 923 } else if (buffer_uptodate(bh) && startio) { 924 + /* 925 + * we got here because the buffer is already mapped. 926 + * That means it must already have extents allocated 927 + * underneath it. Map the extent by reading it. 928 + */ 929 + if (!iomap_valid || type != 0) { 930 + flags = BMAPI_READ; 931 + size = xfs_probe_cluster(inode, page, bh, 932 + head, 1); 933 + err = xfs_map_blocks(inode, offset, size, 934 + &iomap, flags); 935 + if (err) 936 + goto error; 937 + iomap_valid = xfs_iomap_valid(&iomap, offset); 938 + } 939 940 + type = 0; 941 if (!test_and_set_bit(BH_Lock, &bh->b_state)) { 942 ASSERT(buffer_mapped(bh)); 943 + if (iomap_valid) 944 + all_bh = 1; 945 xfs_add_to_ioend(inode, bh, offset, type, 946 &ioend, !iomap_valid); 947 page_dirty--; ··· 953 PAGE_CACHE_SHIFT; 954 tlast = min_t(pgoff_t, offset, last_index); 955 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend, 956 + wbc, startio, all_bh, tlast); 957 } 958 959 if (iohead)