[XFS] clean up the xfs_offset_to_map interface Currently we pass a struct page and a relative offset into that page around, and returns the current xfs_iomap_t if the block at the specified offset fits into it, or a NULL pointer otherwise. This patch passed the full 64bit offset into the inode that all callers have anyway, and changes the return value to a simple boolean. Also the function gets a more descriptive name: xfs_iomap_valid.

SGI-PV: 947118
SGI-Modid: xfs-linux-melb:xfs-kern:203825a

Signed-off-by: Christoph Hellwig <hch@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>

authored by Christoph Hellwig and committed by Nathan Scott 1defeac9 10ce4444

+53 -83
+53 -83
fs/xfs/linux-2.6/xfs_aops.c
··· 228 return -error; 229 } 230 231 - /* 232 - * Finds the corresponding mapping in block @map array of the 233 - * given @offset within a @page. 234 - */ 235 - STATIC xfs_iomap_t * 236 - xfs_offset_to_map( 237 - struct page *page, 238 xfs_iomap_t *iomapp, 239 - unsigned long offset) 240 { 241 - xfs_off_t full_offset; /* offset from start of file */ 242 - 243 - ASSERT(offset < PAGE_CACHE_SIZE); 244 - 245 - full_offset = page->index; /* NB: using 64bit number */ 246 - full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */ 247 - full_offset += offset; /* offset from page start */ 248 - 249 - if (full_offset < iomapp->iomap_offset) 250 - return NULL; 251 - if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset) 252 - return iomapp; 253 - return NULL; 254 } 255 256 /* ··· 445 446 STATIC void 447 xfs_map_at_offset( 448 - struct page *page, 449 struct buffer_head *bh, 450 - unsigned long offset, 451 int block_bits, 452 - xfs_iomap_t *iomapp, 453 - xfs_ioend_t *ioend) 454 { 455 xfs_daddr_t bn; 456 - xfs_off_t delta; 457 int sector_shift; 458 459 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); 460 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); 461 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); 462 463 - delta = page->index; 464 - delta <<= PAGE_CACHE_SHIFT; 465 - delta += offset; 466 - delta -= iomapp->iomap_offset; 467 - delta >>= block_bits; 468 - 469 sector_shift = block_bits - BBSHIFT; 470 - bn = iomapp->iomap_bn >> sector_shift; 471 - bn += delta; 472 - BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME)); 473 ASSERT((bn << sector_shift) >= iomapp->iomap_bn); 474 475 lock_buffer(bh); ··· 545 if (tindex == tlast) { 546 pg_offset = 547 i_size_read(inode) & (PAGE_CACHE_SIZE - 1); 548 - if (!pg_offset) 549 break; 550 } else 551 pg_offset = PAGE_CACHE_SIZE; 552 ··· 563 } 564 565 total += len; 566 } 567 568 pagevec_release(&pvec); ··· 617 struct inode *inode, 618 struct page *page, 619 loff_t tindex, 620 - xfs_iomap_t *iomapp, 621 xfs_ioend_t **ioendp, 622 struct writeback_control *wbc, 623 int startio, 624 int all_bh) 625 { 626 struct buffer_head *bh, *head; 627 - xfs_iomap_t *mp = iomapp, *tmp; 628 unsigned long p_offset, end_offset; 629 unsigned int type; 630 int bbits = inode->i_blkbits; 631 int len, page_dirty; 632 int count = 0, done = 0, uptodate = 1; 633 634 if (page->index != tindex) 635 goto fail; ··· 682 } 683 continue; 684 } 685 - tmp = xfs_offset_to_map(page, mp, p_offset); 686 - if (!tmp) { 687 done = 1; 688 continue; 689 } 690 - ASSERT(!(tmp->iomap_flags & IOMAP_HOLE)); 691 - ASSERT(!(tmp->iomap_flags & IOMAP_DELAY)); 692 693 - xfs_map_at_offset(page, bh, p_offset, bbits, tmp, *ioendp); 694 if (startio) { 695 xfs_add_to_ioend(inode, bh, p_offset, 696 type, ioendp, done); ··· 784 int unmapped) /* also implies page uptodate */ 785 { 786 struct buffer_head *bh, *head; 787 - xfs_iomap_t *iomp, iomap; 788 xfs_ioend_t *ioend = NULL, *iohead = NULL; 789 loff_t offset; 790 unsigned long p_offset = 0; 791 unsigned int type; 792 __uint64_t end_offset; 793 pgoff_t end_index, last_index, tlast; 794 - int flags, len, err, done = 1; 795 - int uptodate = 1; 796 int page_dirty, count = 0, trylock_flag = 0; 797 798 /* wait for other IO threads? */ ··· 832 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 833 page_dirty = p_offset / len; 834 835 - iomp = NULL; 836 bh = head = page_buffers(page); 837 offset = page_offset(page); 838 839 - /* TODO: fix up "done" variable and iomap pointer (boolean) */ 840 /* TODO: cleanup count and page_dirty */ 841 842 do { ··· 843 if (!buffer_uptodate(bh)) 844 uptodate = 0; 845 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) { 846 - done = 1; 847 continue; 848 } 849 850 - if (iomp) { 851 - iomp = xfs_offset_to_map(page, &iomap, p_offset); 852 - done = (iomp == NULL); 853 - } 854 855 /* 856 * First case, map an unwritten extent and prepare for ··· 872 flags |= trylock_flag; 873 } 874 875 - if (!iomp) { 876 - done = 1; 877 err = xfs_map_blocks(inode, offset, len, &iomap, 878 flags); 879 if (err) 880 goto error; 881 - iomp = xfs_offset_to_map(page, &iomap, 882 - p_offset); 883 - done = (iomp == NULL); 884 } 885 - if (iomp) { 886 - xfs_map_at_offset(page, bh, p_offset, 887 - inode->i_blkbits, iomp, ioend); 888 if (startio) { 889 xfs_add_to_ioend(inode, bh, p_offset, 890 - type, &ioend, done); 891 } else { 892 set_buffer_dirty(bh); 893 unlock_buffer(bh); ··· 893 } 894 page_dirty--; 895 count++; 896 - } else { 897 - done = 1; 898 } 899 } else if ((buffer_uptodate(bh) || PageUptodate(page)) && 900 (unmapped || startio)) { ··· 905 * was found, and we are in a path where we 906 * need to write the whole page out. 907 */ 908 - if (!iomp) { 909 int size; 910 911 size = xfs_probe_unmapped_cluster( ··· 913 err = xfs_map_blocks(inode, offset, 914 size, &iomap, 915 BMAPI_WRITE|BMAPI_MMAP); 916 - if (err) { 917 goto error; 918 - } 919 - iomp = xfs_offset_to_map(page, &iomap, 920 - p_offset); 921 - done = (iomp == NULL); 922 } 923 - if (iomp) { 924 - xfs_map_at_offset(page, bh, p_offset, 925 - inode->i_blkbits, iomp, 926 - ioend); 927 if (startio) { 928 xfs_add_to_ioend(inode, 929 bh, p_offset, type, 930 - &ioend, done); 931 } else { 932 set_buffer_dirty(bh); 933 unlock_buffer(bh); ··· 933 } 934 page_dirty--; 935 count++; 936 - } else { 937 - done = 1; 938 } 939 } else if (startio) { 940 if (buffer_uptodate(bh) && ··· 940 ASSERT(buffer_mapped(bh)); 941 xfs_add_to_ioend(inode, 942 bh, p_offset, type, 943 - &ioend, done); 944 page_dirty--; 945 count++; 946 } else { 947 - done = 1; 948 } 949 } else { 950 - done = 1; 951 } 952 } 953 ··· 962 if (startio) 963 xfs_start_page_writeback(page, wbc, 1, count); 964 965 - if (ioend && iomp && !done) { 966 - offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> 967 PAGE_CACHE_SHIFT; 968 tlast = min_t(pgoff_t, offset, last_index); 969 - xfs_cluster_write(inode, page->index + 1, iomp, &ioend, 970 wbc, startio, unmapped, tlast); 971 } 972
··· 228 return -error; 229 } 230 231 + STATIC inline int 232 + xfs_iomap_valid( 233 xfs_iomap_t *iomapp, 234 + loff_t offset) 235 { 236 + return offset >= iomapp->iomap_offset && 237 + offset < iomapp->iomap_offset + iomapp->iomap_bsize; 238 } 239 240 /* ··· 461 462 STATIC void 463 xfs_map_at_offset( 464 struct buffer_head *bh, 465 + loff_t offset, 466 int block_bits, 467 + xfs_iomap_t *iomapp) 468 { 469 xfs_daddr_t bn; 470 int sector_shift; 471 472 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); 473 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); 474 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); 475 476 sector_shift = block_bits - BBSHIFT; 477 + bn = (iomapp->iomap_bn >> sector_shift) + 478 + ((offset - iomapp->iomap_offset) >> block_bits); 479 + 480 + ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME)); 481 ASSERT((bn << sector_shift) >= iomapp->iomap_bn); 482 483 lock_buffer(bh); ··· 569 if (tindex == tlast) { 570 pg_offset = 571 i_size_read(inode) & (PAGE_CACHE_SIZE - 1); 572 + if (!pg_offset) { 573 + done = 1; 574 break; 575 + } 576 } else 577 pg_offset = PAGE_CACHE_SIZE; 578 ··· 585 } 586 587 total += len; 588 + tindex++; 589 } 590 591 pagevec_release(&pvec); ··· 638 struct inode *inode, 639 struct page *page, 640 loff_t tindex, 641 + xfs_iomap_t *mp, 642 xfs_ioend_t **ioendp, 643 struct writeback_control *wbc, 644 int startio, 645 int all_bh) 646 { 647 struct buffer_head *bh, *head; 648 unsigned long p_offset, end_offset; 649 unsigned int type; 650 int bbits = inode->i_blkbits; 651 int len, page_dirty; 652 int count = 0, done = 0, uptodate = 1; 653 + xfs_off_t f_offset = page_offset(page); 654 655 if (page->index != tindex) 656 goto fail; ··· 703 } 704 continue; 705 } 706 + 707 + if (!xfs_iomap_valid(mp, f_offset + p_offset)) { 708 done = 1; 709 continue; 710 } 711 + ASSERT(!(mp->iomap_flags & IOMAP_HOLE)); 712 + ASSERT(!(mp->iomap_flags & IOMAP_DELAY)); 713 714 + xfs_map_at_offset(bh, f_offset + p_offset, bbits, mp); 715 if (startio) { 716 xfs_add_to_ioend(inode, bh, p_offset, 717 type, ioendp, done); ··· 805 int unmapped) /* also implies page uptodate */ 806 { 807 struct buffer_head *bh, *head; 808 + xfs_iomap_t iomap; 809 xfs_ioend_t *ioend = NULL, *iohead = NULL; 810 loff_t offset; 811 unsigned long p_offset = 0; 812 unsigned int type; 813 __uint64_t end_offset; 814 pgoff_t end_index, last_index, tlast; 815 + int flags, len, err, iomap_valid = 0, uptodate = 1; 816 int page_dirty, count = 0, trylock_flag = 0; 817 818 /* wait for other IO threads? */ ··· 854 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 855 page_dirty = p_offset / len; 856 857 bh = head = page_buffers(page); 858 offset = page_offset(page); 859 860 /* TODO: cleanup count and page_dirty */ 861 862 do { ··· 867 if (!buffer_uptodate(bh)) 868 uptodate = 0; 869 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) { 870 + /* 871 + * the iomap is actually still valid, but the ioend 872 + * isn't. shouldn't happen too often. 873 + */ 874 + iomap_valid = 0; 875 continue; 876 } 877 878 + if (iomap_valid) 879 + iomap_valid = xfs_iomap_valid(&iomap, offset); 880 881 /* 882 * First case, map an unwritten extent and prepare for ··· 894 flags |= trylock_flag; 895 } 896 897 + if (!iomap_valid) { 898 err = xfs_map_blocks(inode, offset, len, &iomap, 899 flags); 900 if (err) 901 goto error; 902 + iomap_valid = xfs_iomap_valid(&iomap, offset); 903 } 904 + if (iomap_valid) { 905 + xfs_map_at_offset(bh, offset, 906 + inode->i_blkbits, &iomap); 907 if (startio) { 908 xfs_add_to_ioend(inode, bh, p_offset, 909 + type, &ioend, 910 + !iomap_valid); 911 } else { 912 set_buffer_dirty(bh); 913 unlock_buffer(bh); ··· 917 } 918 page_dirty--; 919 count++; 920 } 921 } else if ((buffer_uptodate(bh) || PageUptodate(page)) && 922 (unmapped || startio)) { ··· 931 * was found, and we are in a path where we 932 * need to write the whole page out. 933 */ 934 + if (!iomap_valid) { 935 int size; 936 937 size = xfs_probe_unmapped_cluster( ··· 939 err = xfs_map_blocks(inode, offset, 940 size, &iomap, 941 BMAPI_WRITE|BMAPI_MMAP); 942 + if (err) 943 goto error; 944 + iomap_valid = xfs_iomap_valid(&iomap, 945 + offset); 946 } 947 + if (iomap_valid) { 948 + xfs_map_at_offset(bh, offset, 949 + inode->i_blkbits, 950 + &iomap); 951 if (startio) { 952 xfs_add_to_ioend(inode, 953 bh, p_offset, type, 954 + &ioend, !iomap_valid); 955 } else { 956 set_buffer_dirty(bh); 957 unlock_buffer(bh); ··· 961 } 962 page_dirty--; 963 count++; 964 } 965 } else if (startio) { 966 if (buffer_uptodate(bh) && ··· 970 ASSERT(buffer_mapped(bh)); 971 xfs_add_to_ioend(inode, 972 bh, p_offset, type, 973 + &ioend, !iomap_valid); 974 page_dirty--; 975 count++; 976 } else { 977 + iomap_valid = 0; 978 } 979 } else { 980 + iomap_valid = 0; 981 } 982 } 983 ··· 992 if (startio) 993 xfs_start_page_writeback(page, wbc, 1, count); 994 995 + if (ioend && iomap_valid) { 996 + offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >> 997 PAGE_CACHE_SHIFT; 998 tlast = min_t(pgoff_t, offset, last_index); 999 + xfs_cluster_write(inode, page->index + 1, &iomap, &ioend, 1000 wbc, startio, unmapped, tlast); 1001 } 1002