···932932 if (!xfs_is_delayed_page(page, IOMAP_DELAY))933933 goto out_invalidate;934934935935+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))936936+ goto out_invalidate;937937+935938 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,936939 "page discard on page %p, inode 0x%llx, offset %llu.",937940 page, ip->i_ino, offset);···967964968965 if (error) {969966 /* something screwed, just bail */970970- xfs_fs_cmn_err(CE_ALERT, ip->i_mount,971971- "page discard failed delalloc mapping lookup.");967967+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {968968+ xfs_fs_cmn_err(CE_ALERT, ip->i_mount,969969+ "page discard failed delalloc mapping lookup.");970970+ }972971 break;973972 }974973 if (!nimaps) {···996991 ASSERT(!flist.xbf_count && !flist.xbf_first);997992 if (error) {998993 /* something screwed, just bail */999999- xfs_fs_cmn_err(CE_ALERT, ip->i_mount,994994+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {995995+ xfs_fs_cmn_err(CE_ALERT, ip->i_mount,1000996 "page discard unable to remove delalloc mapping.");997997+ }1001998 break;1002999 }10031000next_buffer:
+4-77
fs/xfs/linux-2.6/xfs_buf.c
···168168}169169170170/*171171- * Mapping of multi-page buffers into contiguous virtual space172172- */173173-174174-typedef struct a_list {175175- void *vm_addr;176176- struct a_list *next;177177-} a_list_t;178178-179179-static a_list_t *as_free_head;180180-static int as_list_len;181181-static DEFINE_SPINLOCK(as_lock);182182-183183-/*184184- * Try to batch vunmaps because they are costly.185185- */186186-STATIC void187187-free_address(188188- void *addr)189189-{190190- a_list_t *aentry;191191-192192-#ifdef CONFIG_XEN193193- /*194194- * Xen needs to be able to make sure it can get an exclusive195195- * RO mapping of pages it wants to turn into a pagetable. If196196- * a newly allocated page is also still being vmap()ed by xfs,197197- * it will cause pagetable construction to fail. This is a198198- * quick workaround to always eagerly unmap pages so that Xen199199- * is happy.200200- */201201- vunmap(addr);202202- return;203203-#endif204204-205205- aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);206206- if (likely(aentry)) {207207- spin_lock(&as_lock);208208- aentry->next = as_free_head;209209- aentry->vm_addr = addr;210210- as_free_head = aentry;211211- as_list_len++;212212- spin_unlock(&as_lock);213213- } else {214214- vunmap(addr);215215- }216216-}217217-218218-STATIC void219219-purge_addresses(void)220220-{221221- a_list_t *aentry, *old;222222-223223- if (as_free_head == NULL)224224- return;225225-226226- spin_lock(&as_lock);227227- aentry = as_free_head;228228- as_free_head = NULL;229229- as_list_len = 0;230230- spin_unlock(&as_lock);231231-232232- while ((old = aentry) != NULL) {233233- vunmap(aentry->vm_addr);234234- aentry = aentry->next;235235- kfree(old);236236- }237237-}238238-239239-/*240171 * Internal xfs_buf_t object manipulation241172 */242173···268337 uint i;269338270339 if (xfs_buf_is_vmapped(bp))271271- free_address(bp->b_addr - bp->b_offset);340340+ vm_unmap_ram(bp->b_addr - bp->b_offset,341341+ bp->b_page_count);272342273343 for (i = 0; i < bp->b_page_count; i++) {274344 struct page *page = bp->b_pages[i];···389457 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;390458 bp->b_flags |= XBF_MAPPED;391459 } else if (flags & XBF_MAPPED) {392392- if (as_list_len > 64)393393- purge_addresses();394394- bp->b_addr = vmap(bp->b_pages, bp->b_page_count,395395- VM_MAP, PAGE_KERNEL);460460+ bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,461461+ -1, PAGE_KERNEL);396462 if (unlikely(bp->b_addr == NULL))397463 return -ENOMEM;398464 bp->b_addr += bp->b_offset;···18851955 xfs_buf_iostrategy(bp);18861956 count++;18871957 }18881888-18891889- if (as_list_len > 0)18901890- purge_addresses();18911958 if (count)18921959 blk_run_address_space(target->bt_mapping);18931960