arch/tile: support new kunmap_atomic() naming convention.

See commit 597781f3e51f48ef8e67be772196d9e9673752c4.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+3 -3
+1 -1
arch/tile/include/asm/highmem.h
··· 60 /* This macro is used only in map_new_virtual() to map "page". */ 61 #define kmap_prot page_to_kpgprot(page) 62 63 - void kunmap_atomic(void *kvaddr, enum km_type type); 64 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 65 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 66 struct page *kmap_atomic_to_page(void *ptr);
··· 60 /* This macro is used only in map_new_virtual() to map "page". */ 61 #define kmap_prot page_to_kpgprot(page) 62 63 + void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 64 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 65 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 66 struct page *kmap_atomic_to_page(void *ptr);
+2 -2
arch/tile/mm/highmem.c
··· 276 } 277 EXPORT_SYMBOL(kmap_atomic); 278 279 - void kunmap_atomic(void *kvaddr, enum km_type type) 280 { 281 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 282 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); ··· 300 arch_flush_lazy_mmu_mode(); 301 pagefault_enable(); 302 } 303 - EXPORT_SYMBOL(kunmap_atomic); 304 305 /* 306 * This API is supposed to allow us to map memory without a "struct page".
··· 276 } 277 EXPORT_SYMBOL(kmap_atomic); 278 279 + void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 280 { 281 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 282 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); ··· 300 arch_flush_lazy_mmu_mode(); 301 pagefault_enable(); 302 } 303 + EXPORT_SYMBOL(kunmap_atomic_notypecheck); 304 305 /* 306 * This API is supposed to allow us to map memory without a "struct page".