Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
xfs: don't warn about page discards on shutdown
xfs: use scalable vmap API
xfs: remove old vmap cache

+14 -80
+10 -3
fs/xfs/linux-2.6/xfs_aops.c
··· 932 if (!xfs_is_delayed_page(page, IOMAP_DELAY)) 933 goto out_invalidate; 934 935 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 936 "page discard on page %p, inode 0x%llx, offset %llu.", 937 page, ip->i_ino, offset); ··· 967 968 if (error) { 969 /* something screwed, just bail */ 970 - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 971 - "page discard failed delalloc mapping lookup."); 972 break; 973 } 974 if (!nimaps) { ··· 996 ASSERT(!flist.xbf_count && !flist.xbf_first); 997 if (error) { 998 /* something screwed, just bail */ 999 - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 1000 "page discard unable to remove delalloc mapping."); 1001 break; 1002 } 1003 next_buffer:
··· 932 if (!xfs_is_delayed_page(page, IOMAP_DELAY)) 933 goto out_invalidate; 934 935 + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 936 + goto out_invalidate; 937 + 938 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 939 "page discard on page %p, inode 0x%llx, offset %llu.", 940 page, ip->i_ino, offset); ··· 964 965 if (error) { 966 /* something screwed, just bail */ 967 + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 968 + xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 969 + "page discard failed delalloc mapping lookup."); 970 + } 971 break; 972 } 973 if (!nimaps) { ··· 991 ASSERT(!flist.xbf_count && !flist.xbf_first); 992 if (error) { 993 /* something screwed, just bail */ 994 + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 995 + xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 996 "page discard unable to remove delalloc mapping."); 997 + } 998 break; 999 } 1000 next_buffer:
+4 -77
fs/xfs/linux-2.6/xfs_buf.c
··· 168 } 169 170 /* 171 - * Mapping of multi-page buffers into contiguous virtual space 172 - */ 173 - 174 - typedef struct a_list { 175 - void *vm_addr; 176 - struct a_list *next; 177 - } a_list_t; 178 - 179 - static a_list_t *as_free_head; 180 - static int as_list_len; 181 - static DEFINE_SPINLOCK(as_lock); 182 - 183 - /* 184 - * Try to batch vunmaps because they are costly. 185 - */ 186 - STATIC void 187 - free_address( 188 - void *addr) 189 - { 190 - a_list_t *aentry; 191 - 192 - #ifdef CONFIG_XEN 193 - /* 194 - * Xen needs to be able to make sure it can get an exclusive 195 - * RO mapping of pages it wants to turn into a pagetable. If 196 - * a newly allocated page is also still being vmap()ed by xfs, 197 - * it will cause pagetable construction to fail. This is a 198 - * quick workaround to always eagerly unmap pages so that Xen 199 - * is happy. 200 - */ 201 - vunmap(addr); 202 - return; 203 - #endif 204 - 205 - aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT); 206 - if (likely(aentry)) { 207 - spin_lock(&as_lock); 208 - aentry->next = as_free_head; 209 - aentry->vm_addr = addr; 210 - as_free_head = aentry; 211 - as_list_len++; 212 - spin_unlock(&as_lock); 213 - } else { 214 - vunmap(addr); 215 - } 216 - } 217 - 218 - STATIC void 219 - purge_addresses(void) 220 - { 221 - a_list_t *aentry, *old; 222 - 223 - if (as_free_head == NULL) 224 - return; 225 - 226 - spin_lock(&as_lock); 227 - aentry = as_free_head; 228 - as_free_head = NULL; 229 - as_list_len = 0; 230 - spin_unlock(&as_lock); 231 - 232 - while ((old = aentry) != NULL) { 233 - vunmap(aentry->vm_addr); 234 - aentry = aentry->next; 235 - kfree(old); 236 - } 237 - } 238 - 239 - /* 240 * Internal xfs_buf_t object manipulation 241 */ 242 ··· 268 uint i; 269 270 if (xfs_buf_is_vmapped(bp)) 271 - free_address(bp->b_addr - bp->b_offset); 272 273 for (i = 0; i < bp->b_page_count; i++) { 274 struct page *page = bp->b_pages[i]; ··· 389 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 390 bp->b_flags |= XBF_MAPPED; 391 } else if (flags & XBF_MAPPED) { 392 - if (as_list_len > 64) 393 - purge_addresses(); 394 - bp->b_addr = vmap(bp->b_pages, bp->b_page_count, 395 - VM_MAP, PAGE_KERNEL); 396 if (unlikely(bp->b_addr == NULL)) 397 return -ENOMEM; 398 bp->b_addr += bp->b_offset; ··· 1885 xfs_buf_iostrategy(bp); 1886 count++; 1887 } 1888 - 1889 - if (as_list_len > 0) 1890 - purge_addresses(); 1891 if (count) 1892 blk_run_address_space(target->bt_mapping); 1893
··· 168 } 169 170 /* 171 * Internal xfs_buf_t object manipulation 172 */ 173 ··· 337 uint i; 338 339 if (xfs_buf_is_vmapped(bp)) 340 + vm_unmap_ram(bp->b_addr - bp->b_offset, 341 + bp->b_page_count); 342 343 for (i = 0; i < bp->b_page_count; i++) { 344 struct page *page = bp->b_pages[i]; ··· 457 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 458 bp->b_flags |= XBF_MAPPED; 459 } else if (flags & XBF_MAPPED) { 460 + bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 461 + -1, PAGE_KERNEL); 462 if (unlikely(bp->b_addr == NULL)) 463 return -ENOMEM; 464 bp->b_addr += bp->b_offset; ··· 1955 xfs_buf_iostrategy(bp); 1956 count++; 1957 } 1958 if (count) 1959 blk_run_address_space(target->bt_mapping); 1960