Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename]

Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Cong Wang <amwang@redhat.com>

authored by

Cong Wang and committed by
Cong Wang
a24401bc 589973a7

+24 -29
+1 -1
arch/arm/include/asm/highmem.h
··· 57 57 #ifdef CONFIG_HIGHMEM 58 58 extern void *kmap(struct page *page); 59 59 extern void kunmap(struct page *page); 60 - extern void *__kmap_atomic(struct page *page); 60 + extern void *kmap_atomic(struct page *page); 61 61 extern void __kunmap_atomic(void *kvaddr); 62 62 extern void *kmap_atomic_pfn(unsigned long pfn); 63 63 extern struct page *kmap_atomic_to_page(const void *ptr);
+2 -2
arch/arm/mm/highmem.c
··· 36 36 } 37 37 EXPORT_SYMBOL(kunmap); 38 38 39 - void *__kmap_atomic(struct page *page) 39 + void *kmap_atomic(struct page *page) 40 40 { 41 41 unsigned int idx; 42 42 unsigned long vaddr; ··· 81 81 82 82 return (void *)vaddr; 83 83 } 84 - EXPORT_SYMBOL(__kmap_atomic); 84 + EXPORT_SYMBOL(kmap_atomic); 85 85 86 86 void __kunmap_atomic(void *kvaddr) 87 87 {
+1 -1
arch/frv/include/asm/highmem.h
··· 157 157 pagefault_enable(); 158 158 } 159 159 160 - void *__kmap_atomic(struct page *page); 160 + void *kmap_atomic(struct page *page); 161 161 void __kunmap_atomic(void *kvaddr); 162 162 163 163 #endif /* !__ASSEMBLY__ */
+2 -2
arch/frv/mm/highmem.c
··· 37 37 return virt_to_page(ptr); 38 38 } 39 39 40 - void *__kmap_atomic(struct page *page) 40 + void *kmap_atomic(struct page *page) 41 41 { 42 42 unsigned long paddr; 43 43 int type; ··· 64 64 return NULL; 65 65 } 66 66 } 67 - EXPORT_SYMBOL(__kmap_atomic); 67 + EXPORT_SYMBOL(kmap_atomic); 68 68 69 69 void __kunmap_atomic(void *kvaddr) 70 70 {
+1 -1
arch/mips/include/asm/highmem.h
··· 47 47 48 48 extern void *kmap(struct page *page); 49 49 extern void kunmap(struct page *page); 50 - extern void *__kmap_atomic(struct page *page); 50 + extern void *kmap_atomic(struct page *page); 51 51 extern void __kunmap_atomic(void *kvaddr); 52 52 extern void *kmap_atomic_pfn(unsigned long pfn); 53 53 extern struct page *kmap_atomic_to_page(void *ptr);
+2 -2
arch/mips/mm/highmem.c
··· 41 41 * kmaps are appropriate for short, tight code paths only. 42 42 */ 43 43 44 - void *__kmap_atomic(struct page *page) 44 + void *kmap_atomic(struct page *page) 45 45 { 46 46 unsigned long vaddr; 47 47 int idx, type; ··· 62 62 63 63 return (void*) vaddr; 64 64 } 65 - EXPORT_SYMBOL(__kmap_atomic); 65 + EXPORT_SYMBOL(kmap_atomic); 66 66 67 67 void __kunmap_atomic(void *kvaddr) 68 68 {
+1 -1
arch/mn10300/include/asm/highmem.h
··· 70 70 * be used in IRQ contexts, so in some (very limited) cases we need 71 71 * it. 72 72 */ 73 - static inline unsigned long __kmap_atomic(struct page *page) 73 + static inline unsigned long kmap_atomic(struct page *page) 74 74 { 75 75 unsigned long vaddr; 76 76 int idx, type;
+1 -1
arch/parisc/include/asm/cacheflush.h
··· 140 140 141 141 #define kunmap(page) kunmap_parisc(page_address(page)) 142 142 143 - static inline void *__kmap_atomic(struct page *page) 143 + static inline void *kmap_atomic(struct page *page) 144 144 { 145 145 pagefault_disable(); 146 146 return page_address(page);
+1 -1
arch/powerpc/include/asm/highmem.h
··· 79 79 kunmap_high(page); 80 80 } 81 81 82 - static inline void *__kmap_atomic(struct page *page) 82 + static inline void *kmap_atomic(struct page *page) 83 83 { 84 84 return kmap_atomic_prot(page, kmap_prot); 85 85 }
+1 -1
arch/sparc/include/asm/highmem.h
··· 70 70 kunmap_high(page); 71 71 } 72 72 73 - extern void *__kmap_atomic(struct page *page); 73 + extern void *kmap_atomic(struct page *page); 74 74 extern void __kunmap_atomic(void *kvaddr); 75 75 extern struct page *kmap_atomic_to_page(void *vaddr); 76 76
+2 -2
arch/sparc/mm/highmem.c
··· 30 30 #include <asm/tlbflush.h> 31 31 #include <asm/fixmap.h> 32 32 33 - void *__kmap_atomic(struct page *page) 33 + void *kmap_atomic(struct page *page) 34 34 { 35 35 unsigned long vaddr; 36 36 long idx, type; ··· 64 64 65 65 return (void*) vaddr; 66 66 } 67 - EXPORT_SYMBOL(__kmap_atomic); 67 + EXPORT_SYMBOL(kmap_atomic); 68 68 69 69 void __kunmap_atomic(void *kvaddr) 70 70 {
+1 -1
arch/tile/include/asm/highmem.h
··· 59 59 /* This macro is used only in map_new_virtual() to map "page". */ 60 60 #define kmap_prot page_to_kpgprot(page) 61 61 62 - void *__kmap_atomic(struct page *page); 62 + void *kmap_atomic(struct page *page); 63 63 void __kunmap_atomic(void *kvaddr); 64 64 void *kmap_atomic_pfn(unsigned long pfn); 65 65 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
+2 -2
arch/tile/mm/highmem.c
··· 224 224 } 225 225 EXPORT_SYMBOL(kmap_atomic_prot); 226 226 227 - void *__kmap_atomic(struct page *page) 227 + void *kmap_atomic(struct page *page) 228 228 { 229 229 /* PAGE_NONE is a magic value that tells us to check immutability. */ 230 230 return kmap_atomic_prot(page, PAGE_NONE); 231 231 } 232 - EXPORT_SYMBOL(__kmap_atomic); 232 + EXPORT_SYMBOL(kmap_atomic); 233 233 234 234 void __kunmap_atomic(void *kvaddr) 235 235 {
+1 -1
arch/x86/include/asm/highmem.h
··· 61 61 void kunmap(struct page *page); 62 62 63 63 void *kmap_atomic_prot(struct page *page, pgprot_t prot); 64 - void *__kmap_atomic(struct page *page); 64 + void *kmap_atomic(struct page *page); 65 65 void __kunmap_atomic(void *kvaddr); 66 66 void *kmap_atomic_pfn(unsigned long pfn); 67 67 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
+2 -2
arch/x86/mm/highmem_32.c
··· 51 51 } 52 52 EXPORT_SYMBOL(kmap_atomic_prot); 53 53 54 - void *__kmap_atomic(struct page *page) 54 + void *kmap_atomic(struct page *page) 55 55 { 56 56 return kmap_atomic_prot(page, kmap_prot); 57 57 } 58 - EXPORT_SYMBOL(__kmap_atomic); 58 + EXPORT_SYMBOL(kmap_atomic); 59 59 60 60 /* 61 61 * This is the same as kmap_atomic() but can map memory that doesn't
+3 -8
include/linux/highmem.h
··· 55 55 { 56 56 } 57 57 58 - static inline void *__kmap_atomic(struct page *page) 58 + static inline void *kmap_atomic(struct page *page) 59 59 { 60 60 pagefault_disable(); 61 61 return page_address(page); 62 62 } 63 - #define kmap_atomic_prot(page, prot) __kmap_atomic(page) 63 + #define kmap_atomic_prot(page, prot) kmap_atomic(page) 64 64 65 65 static inline void __kunmap_atomic(void *addr) 66 66 { ··· 121 121 #define NARG_(_2, _1, n, ...) n 122 122 #define NARG(...) NARG_(__VA_ARGS__, 2, 1, :) 123 123 124 - static inline void *kmap_atomic(struct page *page) 125 - { 126 - return __kmap_atomic(page); 127 - } 128 - 129 124 static inline void __deprecated *kmap_atomic_deprecated(struct page *page, 130 125 enum km_type km) 131 126 { 132 - return __kmap_atomic(page); 127 + return kmap_atomic(page); 133 128 } 134 129 135 130 #define kmap_atomic1(...) kmap_atomic(__VA_ARGS__)