[XFS] cluster rewrites We can cluster mapped pages aswell, this improves performances on rewrites since we can reduce the number of allocator calls.

SGI-PV: 947118
SGI-Modid: xfs-linux-melb:xfs-kern:203829a

Signed-off-by: Christoph Hellwig <hch@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>

authored by Christoph Hellwig and committed by Nathan Scott 6c4fe19f 7336cea8

+47 -16
+47 -16
fs/xfs/linux-2.6/xfs_aops.c
··· 470 470 } 471 471 472 472 /* 473 - * Look for a page at index which is unlocked and not mapped 474 - * yet - clustering for mmap write case. 473 + * Look for a page at index that is suitable for clustering. 475 474 */ 476 475 STATIC unsigned int 477 - xfs_probe_unmapped_page( 476 + xfs_probe_page( 478 477 struct page *page, 479 - unsigned int pg_offset) 478 + unsigned int pg_offset, 479 + int mapped) 480 480 { 481 481 int ret = 0; 482 482 ··· 489 489 490 490 bh = head = page_buffers(page); 491 491 do { 492 - if (buffer_mapped(bh) || !buffer_uptodate(bh)) 492 + if (!buffer_uptodate(bh)) 493 + break; 494 + if (mapped != buffer_mapped(bh)) 493 495 break; 494 496 ret += bh->b_size; 495 497 if (ret >= pg_offset) 496 498 break; 497 499 } while ((bh = bh->b_this_page) != head); 498 500 } else 499 - ret = PAGE_CACHE_SIZE; 501 + ret = mapped ? 0 : PAGE_CACHE_SIZE; 500 502 } 501 503 502 504 return ret; 503 505 } 504 506 505 507 STATIC size_t 506 - xfs_probe_unmapped_cluster( 508 + xfs_probe_cluster( 507 509 struct inode *inode, 508 510 struct page *startpage, 509 511 struct buffer_head *bh, 510 - struct buffer_head *head) 512 + struct buffer_head *head, 513 + int mapped) 511 514 { 512 515 struct pagevec pvec; 513 516 pgoff_t tindex, tlast, tloff; ··· 519 516 520 517 /* First sum forwards in this page */ 521 518 do { 522 - if (buffer_mapped(bh)) 519 + if (mapped != buffer_mapped(bh)) 523 520 return total; 524 521 total += bh->b_size; 525 522 } while ((bh = bh->b_this_page) != head); ··· 553 550 pg_offset = PAGE_CACHE_SIZE; 554 551 555 552 if (page->index == tindex && !TestSetPageLocked(page)) { 556 - len = xfs_probe_unmapped_page(page, pg_offset); 553 + len = xfs_probe_page(page, pg_offset, mapped); 557 554 unlock_page(page); 558 555 } 559 556 ··· 595 592 acceptable = (type == IOMAP_UNWRITTEN); 596 593 else if (buffer_delay(bh)) 597 594 acceptable = (type == IOMAP_DELAY); 595 + else if (buffer_mapped(bh)) 596 + acceptable = (type == 0); 598 597 else 599 598 break; 600 599 } while ((bh = bh->b_this_page) != head); ··· 809 804 ssize_t size, len; 810 805 int flags, err, iomap_valid = 0, uptodate = 1; 811 806 int page_dirty, count = 0, trylock_flag = 0; 807 + int all_bh = unmapped; 812 808 813 809 /* wait for other IO threads? */ 814 810 if (startio && wbc->sync_mode != WB_SYNC_NONE) ··· 851 845 852 846 bh = head = page_buffers(page); 853 847 offset = page_offset(page); 848 + flags = -1; 849 + type = 0; 854 850 855 851 /* TODO: cleanup count and page_dirty */ 856 852 ··· 886 878 if (buffer_unwritten(bh) || buffer_delay(bh) || 887 879 ((buffer_uptodate(bh) || PageUptodate(page)) && 888 880 !buffer_mapped(bh) && (unmapped || startio))) { 881 + /* 882 + * Make sure we don't use a read-only iomap 883 + */ 884 + if (flags == BMAPI_READ) 885 + iomap_valid = 0; 886 + 889 887 if (buffer_unwritten(bh)) { 890 888 type = IOMAP_UNWRITTEN; 891 889 flags = BMAPI_WRITE|BMAPI_IGNSTATE; ··· 901 887 if (!startio) 902 888 flags |= trylock_flag; 903 889 } else { 904 - type = 0; 890 + type = IOMAP_NEW; 905 891 flags = BMAPI_WRITE|BMAPI_MMAP; 906 892 } 907 893 908 894 if (!iomap_valid) { 909 - if (type == 0) { 910 - size = xfs_probe_unmapped_cluster(inode, 911 - page, bh, head); 895 + if (type == IOMAP_NEW) { 896 + size = xfs_probe_cluster(inode, 897 + page, bh, head, 0); 912 898 } else { 913 899 size = len; 914 900 } ··· 935 921 count++; 936 922 } 937 923 } else if (buffer_uptodate(bh) && startio) { 938 - type = 0; 924 + /* 925 + * we got here because the buffer is already mapped. 926 + * That means it must already have extents allocated 927 + * underneath it. Map the extent by reading it. 928 + */ 929 + if (!iomap_valid || type != 0) { 930 + flags = BMAPI_READ; 931 + size = xfs_probe_cluster(inode, page, bh, 932 + head, 1); 933 + err = xfs_map_blocks(inode, offset, size, 934 + &iomap, flags); 935 + if (err) 936 + goto error; 937 + iomap_valid = xfs_iomap_valid(&iomap, offset); 938 + } 939 939 940 + type = 0; 940 941 if (!test_and_set_bit(BH_Lock, &bh->b_state)) { 941 942 ASSERT(buffer_mapped(bh)); 943 + if (iomap_valid) 944 + all_bh = 1; 942 945 xfs_add_to_ioend(inode, bh, offset, type, 943 946 &ioend, !iomap_valid); 944 947 page_dirty--; ··· 984 953 PAGE_CACHE_SHIFT; 985 954 tlast = min_t(pgoff_t, offset, last_index); 986 955 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend, 987 - wbc, startio, unmapped, tlast); 956 + wbc, startio, all_bh, tlast); 988 957 } 989 958 990 959 if (iohead)