[XFS] pass full 64bit offsets to xfs_add_to_ioend

SGI-PV: 947118
SGI-Modid: xfs-linux-melb:xfs-kern:203828a

Signed-off-by: Christoph Hellwig <hch@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>

authored by Christoph Hellwig and committed by Nathan Scott 7336cea8 d5cb48aa

+6 -12
+6 -12
fs/xfs/linux-2.6/xfs_aops.c
··· 414 xfs_add_to_ioend( 415 struct inode *inode, 416 struct buffer_head *bh, 417 - unsigned int p_offset, 418 unsigned int type, 419 xfs_ioend_t **result, 420 int need_ioend) ··· 423 424 if (!ioend || need_ioend || type != ioend->io_type) { 425 xfs_ioend_t *previous = *result; 426 - xfs_off_t offset; 427 428 - offset = (xfs_off_t)bh->b_page->index << PAGE_CACHE_SHIFT; 429 - offset += p_offset; 430 ioend = xfs_alloc_ioend(inode, type); 431 ioend->io_offset = offset; 432 ioend->io_buffer_head = bh; ··· 663 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 664 page_dirty = p_offset / len; 665 666 - p_offset = 0; 667 bh = head = page_buffers(page); 668 do { 669 if (offset >= end_offset) ··· 690 691 xfs_map_at_offset(bh, offset, bbits, mp); 692 if (startio) { 693 - xfs_add_to_ioend(inode, bh, p_offset, 694 type, ioendp, done); 695 } else { 696 set_buffer_dirty(bh); ··· 703 type = 0; 704 if (buffer_mapped(bh) && all_bh && startio) { 705 lock_buffer(bh); 706 - xfs_add_to_ioend(inode, bh, p_offset, 707 type, ioendp, done); 708 count++; 709 page_dirty--; ··· 711 done = 1; 712 } 713 } 714 - } while (offset += len, p_offset += len, 715 - (bh = bh->b_this_page) != head); 716 717 if (uptodate && bh == head) 718 SetPageUptodate(page); ··· 909 xfs_map_at_offset(bh, offset, 910 inode->i_blkbits, &iomap); 911 if (startio) { 912 - xfs_add_to_ioend(inode, bh, p_offset, 913 type, &ioend, 914 !iomap_valid); 915 } else { ··· 925 926 if (!test_and_set_bit(BH_Lock, &bh->b_state)) { 927 ASSERT(buffer_mapped(bh)); 928 - xfs_add_to_ioend(inode, 929 - bh, p_offset, type, 930 &ioend, !iomap_valid); 931 page_dirty--; 932 count++;
··· 414 xfs_add_to_ioend( 415 struct inode *inode, 416 struct buffer_head *bh, 417 + xfs_off_t offset, 418 unsigned int type, 419 xfs_ioend_t **result, 420 int need_ioend) ··· 423 424 if (!ioend || need_ioend || type != ioend->io_type) { 425 xfs_ioend_t *previous = *result; 426 427 ioend = xfs_alloc_ioend(inode, type); 428 ioend->io_offset = offset; 429 ioend->io_buffer_head = bh; ··· 666 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 667 page_dirty = p_offset / len; 668 669 bh = head = page_buffers(page); 670 do { 671 if (offset >= end_offset) ··· 694 695 xfs_map_at_offset(bh, offset, bbits, mp); 696 if (startio) { 697 + xfs_add_to_ioend(inode, bh, offset, 698 type, ioendp, done); 699 } else { 700 set_buffer_dirty(bh); ··· 707 type = 0; 708 if (buffer_mapped(bh) && all_bh && startio) { 709 lock_buffer(bh); 710 + xfs_add_to_ioend(inode, bh, offset, 711 type, ioendp, done); 712 count++; 713 page_dirty--; ··· 715 done = 1; 716 } 717 } 718 + } while (offset += len, (bh = bh->b_this_page) != head); 719 720 if (uptodate && bh == head) 721 SetPageUptodate(page); ··· 914 xfs_map_at_offset(bh, offset, 915 inode->i_blkbits, &iomap); 916 if (startio) { 917 + xfs_add_to_ioend(inode, bh, offset, 918 type, &ioend, 919 !iomap_valid); 920 } else { ··· 930 931 if (!test_and_set_bit(BH_Lock, &bh->b_state)) { 932 ASSERT(buffer_mapped(bh)); 933 + xfs_add_to_ioend(inode, bh, offset, type, 934 &ioend, !iomap_valid); 935 page_dirty--; 936 count++;