[XFS] clean up the xfs_offset_to_map interface Currently we pass a struct page and a relative offset into that page around, and returns the current xfs_iomap_t if the block at the specified offset fits into it, or a NULL pointer otherwise. This patch passed the full 64bit offset into the inode that all callers have anyway, and changes the return value to a simple boolean. Also the function gets a more descriptive name: xfs_iomap_valid.

SGI-PV: 947118
SGI-Modid: xfs-linux-melb:xfs-kern:203825a

Signed-off-by: Christoph Hellwig <hch@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>

authored by Christoph Hellwig and committed by Nathan Scott 1defeac9 10ce4444

+53 -83
+53 -83
fs/xfs/linux-2.6/xfs_aops.c
··· 228 228 return -error; 229 229 } 230 230 231 - /* 232 - * Finds the corresponding mapping in block @map array of the 233 - * given @offset within a @page. 234 - */ 235 - STATIC xfs_iomap_t * 236 - xfs_offset_to_map( 237 - struct page *page, 231 + STATIC inline int 232 + xfs_iomap_valid( 238 233 xfs_iomap_t *iomapp, 239 - unsigned long offset) 234 + loff_t offset) 240 235 { 241 - xfs_off_t full_offset; /* offset from start of file */ 242 - 243 - ASSERT(offset < PAGE_CACHE_SIZE); 244 - 245 - full_offset = page->index; /* NB: using 64bit number */ 246 - full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */ 247 - full_offset += offset; /* offset from page start */ 248 - 249 - if (full_offset < iomapp->iomap_offset) 250 - return NULL; 251 - if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset) 252 - return iomapp; 253 - return NULL; 236 + return offset >= iomapp->iomap_offset && 237 + offset < iomapp->iomap_offset + iomapp->iomap_bsize; 254 238 } 255 239 256 240 /* ··· 445 461 446 462 STATIC void 447 463 xfs_map_at_offset( 448 - struct page *page, 449 464 struct buffer_head *bh, 450 - unsigned long offset, 465 + loff_t offset, 451 466 int block_bits, 452 - xfs_iomap_t *iomapp, 453 - xfs_ioend_t *ioend) 467 + xfs_iomap_t *iomapp) 454 468 { 455 469 xfs_daddr_t bn; 456 - xfs_off_t delta; 457 470 int sector_shift; 458 471 459 472 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); 460 473 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); 461 474 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); 462 475 463 - delta = page->index; 464 - delta <<= PAGE_CACHE_SHIFT; 465 - delta += offset; 466 - delta -= iomapp->iomap_offset; 467 - delta >>= block_bits; 468 - 469 476 sector_shift = block_bits - BBSHIFT; 470 - bn = iomapp->iomap_bn >> sector_shift; 471 - bn += delta; 472 - BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME)); 477 + bn = (iomapp->iomap_bn >> sector_shift) + 478 + ((offset - iomapp->iomap_offset) >> block_bits); 479 + 480 + ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME)); 473 481 ASSERT((bn << sector_shift) >= iomapp->iomap_bn); 474 482 475 483 lock_buffer(bh); ··· 545 569 if (tindex == tlast) { 546 570 pg_offset = 547 571 i_size_read(inode) & (PAGE_CACHE_SIZE - 1); 548 - if (!pg_offset) 572 + if (!pg_offset) { 573 + done = 1; 549 574 break; 575 + } 550 576 } else 551 577 pg_offset = PAGE_CACHE_SIZE; 552 578 ··· 563 585 } 564 586 565 587 total += len; 588 + tindex++; 566 589 } 567 590 568 591 pagevec_release(&pvec); ··· 617 638 struct inode *inode, 618 639 struct page *page, 619 640 loff_t tindex, 620 - xfs_iomap_t *iomapp, 641 + xfs_iomap_t *mp, 621 642 xfs_ioend_t **ioendp, 622 643 struct writeback_control *wbc, 623 644 int startio, 624 645 int all_bh) 625 646 { 626 647 struct buffer_head *bh, *head; 627 - xfs_iomap_t *mp = iomapp, *tmp; 628 648 unsigned long p_offset, end_offset; 629 649 unsigned int type; 630 650 int bbits = inode->i_blkbits; 631 651 int len, page_dirty; 632 652 int count = 0, done = 0, uptodate = 1; 653 + xfs_off_t f_offset = page_offset(page); 633 654 634 655 if (page->index != tindex) 635 656 goto fail; ··· 682 703 } 683 704 continue; 684 705 } 685 - tmp = xfs_offset_to_map(page, mp, p_offset); 686 - if (!tmp) { 706 + 707 + if (!xfs_iomap_valid(mp, f_offset + p_offset)) { 687 708 done = 1; 688 709 continue; 689 710 } 690 - ASSERT(!(tmp->iomap_flags & IOMAP_HOLE)); 691 - ASSERT(!(tmp->iomap_flags & IOMAP_DELAY)); 711 + ASSERT(!(mp->iomap_flags & IOMAP_HOLE)); 712 + ASSERT(!(mp->iomap_flags & IOMAP_DELAY)); 692 713 693 - xfs_map_at_offset(page, bh, p_offset, bbits, tmp, *ioendp); 714 + xfs_map_at_offset(bh, f_offset + p_offset, bbits, mp); 694 715 if (startio) { 695 716 xfs_add_to_ioend(inode, bh, p_offset, 696 717 type, ioendp, done); ··· 784 805 int unmapped) /* also implies page uptodate */ 785 806 { 786 807 struct buffer_head *bh, *head; 787 - xfs_iomap_t *iomp, iomap; 808 + xfs_iomap_t iomap; 788 809 xfs_ioend_t *ioend = NULL, *iohead = NULL; 789 810 loff_t offset; 790 811 unsigned long p_offset = 0; 791 812 unsigned int type; 792 813 __uint64_t end_offset; 793 814 pgoff_t end_index, last_index, tlast; 794 - int flags, len, err, done = 1; 795 - int uptodate = 1; 815 + int flags, len, err, iomap_valid = 0, uptodate = 1; 796 816 int page_dirty, count = 0, trylock_flag = 0; 797 817 798 818 /* wait for other IO threads? */ ··· 832 854 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 833 855 page_dirty = p_offset / len; 834 856 835 - iomp = NULL; 836 857 bh = head = page_buffers(page); 837 858 offset = page_offset(page); 838 859 839 - /* TODO: fix up "done" variable and iomap pointer (boolean) */ 840 860 /* TODO: cleanup count and page_dirty */ 841 861 842 862 do { ··· 843 867 if (!buffer_uptodate(bh)) 844 868 uptodate = 0; 845 869 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) { 846 - done = 1; 870 + /* 871 + * the iomap is actually still valid, but the ioend 872 + * isn't. shouldn't happen too often. 873 + */ 874 + iomap_valid = 0; 847 875 continue; 848 876 } 849 877 850 - if (iomp) { 851 - iomp = xfs_offset_to_map(page, &iomap, p_offset); 852 - done = (iomp == NULL); 853 - } 878 + if (iomap_valid) 879 + iomap_valid = xfs_iomap_valid(&iomap, offset); 854 880 855 881 /* 856 882 * First case, map an unwritten extent and prepare for ··· 872 894 flags |= trylock_flag; 873 895 } 874 896 875 - if (!iomp) { 876 - done = 1; 897 + if (!iomap_valid) { 877 898 err = xfs_map_blocks(inode, offset, len, &iomap, 878 899 flags); 879 900 if (err) 880 901 goto error; 881 - iomp = xfs_offset_to_map(page, &iomap, 882 - p_offset); 883 - done = (iomp == NULL); 902 + iomap_valid = xfs_iomap_valid(&iomap, offset); 884 903 } 885 - if (iomp) { 886 - xfs_map_at_offset(page, bh, p_offset, 887 - inode->i_blkbits, iomp, ioend); 904 + if (iomap_valid) { 905 + xfs_map_at_offset(bh, offset, 906 + inode->i_blkbits, &iomap); 888 907 if (startio) { 889 908 xfs_add_to_ioend(inode, bh, p_offset, 890 - type, &ioend, done); 909 + type, &ioend, 910 + !iomap_valid); 891 911 } else { 892 912 set_buffer_dirty(bh); 893 913 unlock_buffer(bh); ··· 893 917 } 894 918 page_dirty--; 895 919 count++; 896 - } else { 897 - done = 1; 898 920 } 899 921 } else if ((buffer_uptodate(bh) || PageUptodate(page)) && 900 922 (unmapped || startio)) { ··· 905 931 * was found, and we are in a path where we 906 932 * need to write the whole page out. 907 933 */ 908 - if (!iomp) { 934 + if (!iomap_valid) { 909 935 int size; 910 936 911 937 size = xfs_probe_unmapped_cluster( ··· 913 939 err = xfs_map_blocks(inode, offset, 914 940 size, &iomap, 915 941 BMAPI_WRITE|BMAPI_MMAP); 916 - if (err) { 942 + if (err) 917 943 goto error; 918 - } 919 - iomp = xfs_offset_to_map(page, &iomap, 920 - p_offset); 921 - done = (iomp == NULL); 944 + iomap_valid = xfs_iomap_valid(&iomap, 945 + offset); 922 946 } 923 - if (iomp) { 924 - xfs_map_at_offset(page, bh, p_offset, 925 - inode->i_blkbits, iomp, 926 - ioend); 947 + if (iomap_valid) { 948 + xfs_map_at_offset(bh, offset, 949 + inode->i_blkbits, 950 + &iomap); 927 951 if (startio) { 928 952 xfs_add_to_ioend(inode, 929 953 bh, p_offset, type, 930 - &ioend, done); 954 + &ioend, !iomap_valid); 931 955 } else { 932 956 set_buffer_dirty(bh); 933 957 unlock_buffer(bh); ··· 933 961 } 934 962 page_dirty--; 935 963 count++; 936 - } else { 937 - done = 1; 938 964 } 939 965 } else if (startio) { 940 966 if (buffer_uptodate(bh) && ··· 940 970 ASSERT(buffer_mapped(bh)); 941 971 xfs_add_to_ioend(inode, 942 972 bh, p_offset, type, 943 - &ioend, done); 973 + &ioend, !iomap_valid); 944 974 page_dirty--; 945 975 count++; 946 976 } else { 947 - done = 1; 977 + iomap_valid = 0; 948 978 } 949 979 } else { 950 - done = 1; 980 + iomap_valid = 0; 951 981 } 952 982 } 953 983 ··· 962 992 if (startio) 963 993 xfs_start_page_writeback(page, wbc, 1, count); 964 994 965 - if (ioend && iomp && !done) { 966 - offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> 995 + if (ioend && iomap_valid) { 996 + offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >> 967 997 PAGE_CACHE_SHIFT; 968 998 tlast = min_t(pgoff_t, offset, last_index); 969 - xfs_cluster_write(inode, page->index + 1, iomp, &ioend, 999 + xfs_cluster_write(inode, page->index + 1, &iomap, &ioend, 970 1000 wbc, startio, unmapped, tlast); 971 1001 } 972 1002