Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: stack based kmap_atomic()

Keep the current interface but ignore the KM_type and use a stack based
approach.

The advantage is that we get rid of crappy code like:

#define __KM_PTE \
(in_nmi() ? KM_NMI_PTE : \
in_irq() ? KM_IRQ_PTE : \
KM_PTE0)

and in general can stop worrying about what context we're in and what kmap
slots might be appropriate for that.

The downside is that FRV kmap_atomic() gets more expensive.

For now we use a CPP trick suggested by Andrew:

#define kmap_atomic(page, args...) __kmap_atomic(page)

to avoid having to touch all kmap_atomic() users in a single patch.

[ not compiled on:
- mn10300: the arch doesn't actually build with highmem to begin with ]

[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c]
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Peter Zijlstra and committed by
Linus Torvalds
3e4d3af5 61ecdb80

+371 -376
+3 -3
arch/arm/include/asm/highmem.h
··· 35 35 #ifdef CONFIG_HIGHMEM 36 36 extern void *kmap(struct page *page); 37 37 extern void kunmap(struct page *page); 38 - extern void *kmap_atomic(struct page *page, enum km_type type); 39 - extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 40 - extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 38 + extern void *__kmap_atomic(struct page *page); 39 + extern void __kunmap_atomic(void *kvaddr); 40 + extern void *kmap_atomic_pfn(unsigned long pfn); 41 41 extern struct page *kmap_atomic_to_page(const void *ptr); 42 42 #endif 43 43
+14 -9
arch/arm/mm/highmem.c
··· 36 36 } 37 37 EXPORT_SYMBOL(kunmap); 38 38 39 - void *kmap_atomic(struct page *page, enum km_type type) 39 + void *__kmap_atomic(struct page *page) 40 40 { 41 41 unsigned int idx; 42 42 unsigned long vaddr; 43 43 void *kmap; 44 + int type; 44 45 45 46 pagefault_disable(); 46 47 if (!PageHighMem(page)) 47 48 return page_address(page); 48 - 49 - debug_kmap_atomic(type); 50 49 51 50 #ifdef CONFIG_DEBUG_HIGHMEM 52 51 /* ··· 59 60 kmap = kmap_high_get(page); 60 61 if (kmap) 61 62 return kmap; 63 + 64 + type = kmap_atomic_idx_push(); 62 65 63 66 idx = type + KM_TYPE_NR * smp_processor_id(); 64 67 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ··· 81 80 82 81 return (void *)vaddr; 83 82 } 84 - EXPORT_SYMBOL(kmap_atomic); 83 + EXPORT_SYMBOL(__kmap_atomic); 85 84 86 - void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 85 + void __kunmap_atomic(void *kvaddr) 87 86 { 88 87 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 89 - unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 88 + int idx, type; 90 89 91 90 if (kvaddr >= (void *)FIXADDR_START) { 91 + type = kmap_atomic_idx_pop(); 92 + idx = type + KM_TYPE_NR * smp_processor_id(); 93 + 92 94 if (cache_is_vivt()) 93 95 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 94 96 #ifdef CONFIG_DEBUG_HIGHMEM ··· 107 103 } 108 104 pagefault_enable(); 109 105 } 110 - EXPORT_SYMBOL(kunmap_atomic_notypecheck); 106 + EXPORT_SYMBOL(__kunmap_atomic); 111 107 112 - void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 108 + void *kmap_atomic_pfn(unsigned long pfn) 113 109 { 114 - unsigned int idx; 115 110 unsigned long vaddr; 111 + int idx, type; 116 112 117 113 pagefault_disable(); 118 114 115 + type = kmap_atomic_idx_push(); 119 116 idx = type + KM_TYPE_NR * smp_processor_id(); 120 117 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 121 118 #ifdef CONFIG_DEBUG_HIGHMEM
+5 -20
arch/frv/include/asm/highmem.h
··· 112 112 (void *) damlr; \ 113 113 }) 114 114 115 - static inline void *kmap_atomic(struct page *page, enum km_type type) 115 + static inline void *kmap_atomic_primary(struct page *page, enum km_type type) 116 116 { 117 117 unsigned long paddr; 118 118 119 119 pagefault_disable(); 120 - debug_kmap_atomic(type); 121 120 paddr = page_to_phys(page); 122 121 123 122 switch (type) { ··· 124 125 case 1: return __kmap_atomic_primary(1, paddr, 3); 125 126 case 2: return __kmap_atomic_primary(2, paddr, 4); 126 127 case 3: return __kmap_atomic_primary(3, paddr, 5); 127 - case 4: return __kmap_atomic_primary(4, paddr, 6); 128 - case 5: return __kmap_atomic_primary(5, paddr, 7); 129 - case 6: return __kmap_atomic_primary(6, paddr, 8); 130 - case 7: return __kmap_atomic_primary(7, paddr, 9); 131 - case 8: return __kmap_atomic_primary(8, paddr, 10); 132 - 133 - case 9 ... 9 + NR_TLB_LINES - 1: 134 - return __kmap_atomic_secondary(type - 9, paddr); 135 128 136 129 default: 137 130 BUG(); ··· 143 152 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ 144 153 } while(0) 145 154 146 - static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 155 + static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type) 147 156 { 148 157 switch (type) { 149 158 case 0: __kunmap_atomic_primary(0, 2); break; 150 159 case 1: __kunmap_atomic_primary(1, 3); break; 151 160 case 2: __kunmap_atomic_primary(2, 4); break; 152 161 case 3: __kunmap_atomic_primary(3, 5); break; 153 - case 4: __kunmap_atomic_primary(4, 6); break; 154 - case 5: __kunmap_atomic_primary(5, 7); break; 155 - case 6: __kunmap_atomic_primary(6, 8); break; 156 - case 7: __kunmap_atomic_primary(7, 9); break; 157 - case 8: __kunmap_atomic_primary(8, 10); break; 158 - 159 - case 9 ... 9 + NR_TLB_LINES - 1: 160 - __kunmap_atomic_secondary(type - 9, kvaddr); 161 - break; 162 162 163 163 default: 164 164 BUG(); 165 165 } 166 166 pagefault_enable(); 167 167 } 168 + 169 + void *__kmap_atomic(struct page *page); 170 + void __kunmap_atomic(void *kvaddr); 168 171 169 172 #endif /* !__ASSEMBLY__ */ 170 173
+2 -2
arch/frv/mb93090-mb00/pci-dma.c
··· 61 61 dampr2 = __get_DAMPR(2); 62 62 63 63 for (i = 0; i < nents; i++) { 64 - vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE); 64 + vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE); 65 65 66 66 frv_dcache_writeback((unsigned long) vaddr, 67 67 (unsigned long) vaddr + PAGE_SIZE); 68 68 69 69 } 70 70 71 - kunmap_atomic(vaddr, __KM_CACHE); 71 + kunmap_atomic_primary(vaddr, __KM_CACHE); 72 72 if (dampr2) { 73 73 __set_DAMPR(2, dampr2); 74 74 __set_IAMPR(2, dampr2);
+4 -4
arch/frv/mm/cache-page.c
··· 26 26 27 27 dampr2 = __get_DAMPR(2); 28 28 29 - vaddr = kmap_atomic(page, __KM_CACHE); 29 + vaddr = kmap_atomic_primary(page, __KM_CACHE); 30 30 31 31 frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); 32 32 33 - kunmap_atomic(vaddr, __KM_CACHE); 33 + kunmap_atomic_primary(vaddr, __KM_CACHE); 34 34 35 35 if (dampr2) { 36 36 __set_DAMPR(2, dampr2); ··· 54 54 55 55 dampr2 = __get_DAMPR(2); 56 56 57 - vaddr = kmap_atomic(page, __KM_CACHE); 57 + vaddr = kmap_atomic_primary(page, __KM_CACHE); 58 58 59 59 start = (start & ~PAGE_MASK) | (unsigned long) vaddr; 60 60 frv_cache_wback_inv(start, start + len); 61 61 62 - kunmap_atomic(vaddr, __KM_CACHE); 62 + kunmap_atomic_primary(vaddr, __KM_CACHE); 63 63 64 64 if (dampr2) { 65 65 __set_DAMPR(2, dampr2);
+50
arch/frv/mm/highmem.c
··· 36 36 { 37 37 return virt_to_page(ptr); 38 38 } 39 + 40 + void *__kmap_atomic(struct page *page) 41 + { 42 + unsigned long paddr; 43 + int type; 44 + 45 + pagefault_disable(); 46 + type = kmap_atomic_idx_push(); 47 + paddr = page_to_phys(page); 48 + 49 + switch (type) { 50 + /* 51 + * The first 4 primary maps are reserved for architecture code 52 + */ 53 + case 0: return __kmap_atomic_primary(4, paddr, 6); 54 + case 1: return __kmap_atomic_primary(5, paddr, 7); 55 + case 2: return __kmap_atomic_primary(6, paddr, 8); 56 + case 3: return __kmap_atomic_primary(7, paddr, 9); 57 + case 4: return __kmap_atomic_primary(8, paddr, 10); 58 + 59 + case 5 ... 5 + NR_TLB_LINES - 1: 60 + return __kmap_atomic_secondary(type - 5, paddr); 61 + 62 + default: 63 + BUG(); 64 + return NULL; 65 + } 66 + } 67 + EXPORT_SYMBOL(__kmap_atomic); 68 + 69 + void __kunmap_atomic(void *kvaddr) 70 + { 71 + int type = kmap_atomic_idx_pop(); 72 + switch (type) { 73 + case 0: __kunmap_atomic_primary(4, 6); break; 74 + case 1: __kunmap_atomic_primary(5, 7); break; 75 + case 2: __kunmap_atomic_primary(6, 8); break; 76 + case 3: __kunmap_atomic_primary(7, 9); break; 77 + case 4: __kunmap_atomic_primary(8, 10); break; 78 + 79 + case 5 ... 5 + NR_TLB_LINES - 1: 80 + __kunmap_atomic_secondary(type - 5, kvaddr); 81 + break; 82 + 83 + default: 84 + BUG(); 85 + } 86 + pagefault_enable(); 87 + } 88 + EXPORT_SYMBOL(__kunmap_atomic);
+6 -12
arch/mips/include/asm/highmem.h
··· 45 45 extern void * kmap_high(struct page *page); 46 46 extern void kunmap_high(struct page *page); 47 47 48 - extern void *__kmap(struct page *page); 49 - extern void __kunmap(struct page *page); 50 - extern void *__kmap_atomic(struct page *page, enum km_type type); 51 - extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 52 - extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 53 - extern struct page *__kmap_atomic_to_page(void *ptr); 54 - 55 - #define kmap __kmap 56 - #define kunmap __kunmap 57 - #define kmap_atomic __kmap_atomic 58 - #define kunmap_atomic_notypecheck __kunmap_atomic_notypecheck 59 - #define kmap_atomic_to_page __kmap_atomic_to_page 48 + extern void *kmap(struct page *page); 49 + extern void kunmap(struct page *page); 50 + extern void *__kmap_atomic(struct page *page); 51 + extern void __kunmap_atomic(void *kvaddr); 52 + extern void *kmap_atomic_pfn(unsigned long pfn); 53 + extern struct page *kmap_atomic_to_page(void *ptr); 60 54 61 55 #define flush_cache_kmaps() flush_cache_all() 62 56
+27 -23
arch/mips/mm/highmem.c
··· 9 9 10 10 unsigned long highstart_pfn, highend_pfn; 11 11 12 - void *__kmap(struct page *page) 12 + void *kmap(struct page *page) 13 13 { 14 14 void *addr; 15 15 ··· 21 21 22 22 return addr; 23 23 } 24 - EXPORT_SYMBOL(__kmap); 24 + EXPORT_SYMBOL(kmap); 25 25 26 - void __kunmap(struct page *page) 26 + void kunmap(struct page *page) 27 27 { 28 28 BUG_ON(in_interrupt()); 29 29 if (!PageHighMem(page)) 30 30 return; 31 31 kunmap_high(page); 32 32 } 33 - EXPORT_SYMBOL(__kunmap); 33 + EXPORT_SYMBOL(kunmap); 34 34 35 35 /* 36 36 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because ··· 41 41 * kmaps are appropriate for short, tight code paths only. 42 42 */ 43 43 44 - void *__kmap_atomic(struct page *page, enum km_type type) 44 + void *__kmap_atomic(struct page *page) 45 45 { 46 - enum fixed_addresses idx; 47 46 unsigned long vaddr; 47 + int idx, type; 48 48 49 49 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 50 50 pagefault_disable(); 51 51 if (!PageHighMem(page)) 52 52 return page_address(page); 53 53 54 - debug_kmap_atomic(type); 54 + type = kmap_atomic_idx_push(); 55 55 idx = type + KM_TYPE_NR*smp_processor_id(); 56 56 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 57 57 #ifdef CONFIG_DEBUG_HIGHMEM ··· 64 64 } 65 65 EXPORT_SYMBOL(__kmap_atomic); 66 66 67 - void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 67 + void __kunmap_atomic(void *kvaddr) 68 68 { 69 - #ifdef CONFIG_DEBUG_HIGHMEM 70 69 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 71 - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 70 + int type; 72 71 73 72 if (vaddr < FIXADDR_START) { // FIXME 74 73 pagefault_enable(); 75 74 return; 76 75 } 77 76 78 - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 77 + type = kmap_atomic_idx_pop(); 78 + #ifdef CONFIG_DEBUG_HIGHMEM 79 + { 80 + int idx = type + KM_TYPE_NR * smp_processor_id(); 79 81 80 - /* 81 - * force other mappings to Oops if they'll try to access 82 - * this pte without first remap it 83 - */ 84 - pte_clear(&init_mm, vaddr, kmap_pte-idx); 85 - local_flush_tlb_one(vaddr); 82 + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 83 + 84 + /* 85 + * force other mappings to Oops if they'll try to access 86 + * this pte without first remap it 87 + */ 88 + pte_clear(&init_mm, vaddr, kmap_pte-idx); 89 + local_flush_tlb_one(vaddr); 90 + } 86 91 #endif 87 - 88 92 pagefault_enable(); 89 93 } 90 - EXPORT_SYMBOL(__kunmap_atomic_notypecheck); 94 + EXPORT_SYMBOL(__kunmap_atomic); 91 95 92 96 /* 93 97 * This is the same as kmap_atomic() but can map memory that doesn't 94 98 * have a struct page associated with it. 95 99 */ 96 - void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 100 + void *kmap_atomic_pfn(unsigned long pfn) 97 101 { 98 - enum fixed_addresses idx; 99 102 unsigned long vaddr; 103 + int idx, type; 100 104 101 105 pagefault_disable(); 102 106 103 - debug_kmap_atomic(type); 107 + type = kmap_atomic_idx_push(); 104 108 idx = type + KM_TYPE_NR*smp_processor_id(); 105 109 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 106 110 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); ··· 113 109 return (void*) vaddr; 114 110 } 115 111 116 - struct page *__kmap_atomic_to_page(void *ptr) 112 + struct page *kmap_atomic_to_page(void *ptr) 117 113 { 118 114 unsigned long idx, vaddr = (unsigned long)ptr; 119 115 pte_t *pte;
+26 -16
arch/mn10300/include/asm/highmem.h
··· 70 70 * be used in IRQ contexts, so in some (very limited) cases we need 71 71 * it. 72 72 */ 73 - static inline unsigned long kmap_atomic(struct page *page, enum km_type type) 73 + static inline unsigned long __kmap_atomic(struct page *page) 74 74 { 75 - enum fixed_addresses idx; 76 75 unsigned long vaddr; 76 + int idx, type; 77 77 78 + pagefault_disable(); 78 79 if (page < highmem_start_page) 79 80 return page_address(page); 80 81 81 - debug_kmap_atomic(type); 82 + type = kmap_atomic_idx_push(); 82 83 idx = type + KM_TYPE_NR * smp_processor_id(); 83 84 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 84 85 #if HIGHMEM_DEBUG ··· 92 91 return vaddr; 93 92 } 94 93 95 - static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type) 94 + static inline void __kunmap_atomic(unsigned long vaddr) 96 95 { 97 - #if HIGHMEM_DEBUG 98 - enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id(); 96 + int type; 99 97 100 - if (vaddr < FIXADDR_START) /* FIXME */ 98 + if (vaddr < FIXADDR_START) { /* FIXME */ 99 + pagefault_enable(); 101 100 return; 101 + } 102 102 103 - if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) 104 - BUG(); 103 + type = kmap_atomic_idx_pop(); 105 104 106 - /* 107 - * force other mappings to Oops if they'll try to access 108 - * this pte without first remap it 109 - */ 110 - pte_clear(kmap_pte - idx); 111 - __flush_tlb_one(vaddr); 105 + #if HIGHMEM_DEBUG 106 + { 107 + unsigned int idx; 108 + idx = type + KM_TYPE_NR * smp_processor_id(); 109 + 110 + if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) 111 + BUG(); 112 + 113 + /* 114 + * force other mappings to Oops if they'll try to access 115 + * this pte without first remap it 116 + */ 117 + pte_clear(kmap_pte - idx); 118 + __flush_tlb_one(vaddr); 119 + } 112 120 #endif 121 + pagefault_enable(); 113 122 } 114 - 115 123 #endif /* __KERNEL__ */ 116 124 117 125 #endif /* _ASM_HIGHMEM_H */
+4 -5
arch/powerpc/include/asm/highmem.h
··· 60 60 61 61 extern void *kmap_high(struct page *page); 62 62 extern void kunmap_high(struct page *page); 63 - extern void *kmap_atomic_prot(struct page *page, enum km_type type, 64 - pgprot_t prot); 65 - extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 63 + extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); 64 + extern void __kunmap_atomic(void *kvaddr); 66 65 67 66 static inline void *kmap(struct page *page) 68 67 { ··· 79 80 kunmap_high(page); 80 81 } 81 82 82 - static inline void *kmap_atomic(struct page *page, enum km_type type) 83 + static inline void *__kmap_atomic(struct page *page) 83 84 { 84 - return kmap_atomic_prot(page, type, kmap_prot); 85 + return kmap_atomic_prot(page, kmap_prot); 85 86 } 86 87 87 88 static inline struct page *kmap_atomic_to_page(void *ptr)
+21 -14
arch/powerpc/mm/highmem.c
··· 29 29 * be used in IRQ contexts, so in some (very limited) cases we need 30 30 * it. 31 31 */ 32 - void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 32 + void *kmap_atomic_prot(struct page *page, pgprot_t prot) 33 33 { 34 - unsigned int idx; 35 34 unsigned long vaddr; 35 + int idx, type; 36 36 37 37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 38 38 pagefault_disable(); 39 39 if (!PageHighMem(page)) 40 40 return page_address(page); 41 41 42 - debug_kmap_atomic(type); 42 + type = kmap_atomic_idx_push(); 43 43 idx = type + KM_TYPE_NR*smp_processor_id(); 44 44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 45 45 #ifdef CONFIG_DEBUG_HIGHMEM ··· 52 52 } 53 53 EXPORT_SYMBOL(kmap_atomic_prot); 54 54 55 - void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 55 + void __kunmap_atomic(void *kvaddr) 56 56 { 57 - #ifdef CONFIG_DEBUG_HIGHMEM 58 57 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 59 - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 58 + int type; 60 59 61 60 if (vaddr < __fix_to_virt(FIX_KMAP_END)) { 62 61 pagefault_enable(); 63 62 return; 64 63 } 65 64 66 - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 65 + type = kmap_atomic_idx_pop(); 67 66 68 - /* 69 - * force other mappings to Oops if they'll try to access 70 - * this pte without first remap it 71 - */ 72 - pte_clear(&init_mm, vaddr, kmap_pte-idx); 73 - local_flush_tlb_page(NULL, vaddr); 67 + #ifdef CONFIG_DEBUG_HIGHMEM 68 + { 69 + unsigned int idx; 70 + 71 + idx = type + KM_TYPE_NR * smp_processor_id(); 72 + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 73 + 74 + /* 75 + * force other mappings to Oops if they'll try to access 76 + * this pte without first remap it 77 + */ 78 + pte_clear(&init_mm, vaddr, kmap_pte-idx); 79 + local_flush_tlb_page(NULL, vaddr); 80 + } 74 81 #endif 75 82 pagefault_enable(); 76 83 } 77 - EXPORT_SYMBOL(kunmap_atomic_notypecheck); 84 + EXPORT_SYMBOL(__kunmap_atomic);
+2 -2
arch/sparc/include/asm/highmem.h
··· 70 70 kunmap_high(page); 71 71 } 72 72 73 - extern void *kmap_atomic(struct page *page, enum km_type type); 74 - extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 73 + extern void *__kmap_atomic(struct page *page); 74 + extern void __kunmap_atomic(void *kvaddr); 75 75 extern struct page *kmap_atomic_to_page(void *vaddr); 76 76 77 77 #define flush_cache_kmaps() flush_cache_all()
+27 -21
arch/sparc/mm/highmem.c
··· 29 29 #include <asm/tlbflush.h> 30 30 #include <asm/fixmap.h> 31 31 32 - void *kmap_atomic(struct page *page, enum km_type type) 32 + void *__kmap_atomic(struct page *page) 33 33 { 34 - unsigned long idx; 35 34 unsigned long vaddr; 35 + long idx, type; 36 36 37 37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 38 38 pagefault_disable(); 39 39 if (!PageHighMem(page)) 40 40 return page_address(page); 41 41 42 - debug_kmap_atomic(type); 42 + type = kmap_atomic_idx_push(); 43 43 idx = type + KM_TYPE_NR*smp_processor_id(); 44 44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 45 45 ··· 63 63 64 64 return (void*) vaddr; 65 65 } 66 - EXPORT_SYMBOL(kmap_atomic); 66 + EXPORT_SYMBOL(__kmap_atomic); 67 67 68 - void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 68 + void __kunmap_atomic(void *kvaddr) 69 69 { 70 - #ifdef CONFIG_DEBUG_HIGHMEM 71 70 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 72 - unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); 71 + int type; 73 72 74 73 if (vaddr < FIXADDR_START) { // FIXME 75 74 pagefault_enable(); 76 75 return; 77 76 } 78 77 79 - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); 78 + type = kmap_atomic_idx_pop(); 80 79 81 - /* XXX Fix - Anton */ 80 + #ifdef CONFIG_DEBUG_HIGHMEM 81 + { 82 + unsigned long idx; 83 + 84 + idx = type + KM_TYPE_NR * smp_processor_id(); 85 + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); 86 + 87 + /* XXX Fix - Anton */ 82 88 #if 0 83 - __flush_cache_one(vaddr); 89 + __flush_cache_one(vaddr); 84 90 #else 85 - flush_cache_all(); 91 + flush_cache_all(); 86 92 #endif 87 93 88 - /* 89 - * force other mappings to Oops if they'll try to access 90 - * this pte without first remap it 91 - */ 92 - pte_clear(&init_mm, vaddr, kmap_pte-idx); 93 - /* XXX Fix - Anton */ 94 + /* 95 + * force other mappings to Oops if they'll try to access 96 + * this pte without first remap it 97 + */ 98 + pte_clear(&init_mm, vaddr, kmap_pte-idx); 99 + /* XXX Fix - Anton */ 94 100 #if 0 95 - __flush_tlb_one(vaddr); 101 + __flush_tlb_one(vaddr); 96 102 #else 97 - flush_tlb_all(); 103 + flush_tlb_all(); 98 104 #endif 105 + } 99 106 #endif 100 - 101 107 pagefault_enable(); 102 108 } 103 - EXPORT_SYMBOL(kunmap_atomic_notypecheck); 109 + EXPORT_SYMBOL(__kunmap_atomic); 104 110 105 111 /* We may be fed a pagetable here by ptep_to_xxx and others. */ 106 112 struct page *kmap_atomic_to_page(void *ptr)
+5 -5
arch/tile/include/asm/highmem.h
··· 60 60 /* This macro is used only in map_new_virtual() to map "page". */ 61 61 #define kmap_prot page_to_kpgprot(page) 62 62 63 - void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 64 - void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 65 - void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 63 + void *__kmap_atomic(struct page *page); 64 + void __kunmap_atomic(void *kvaddr); 65 + void *kmap_atomic_pfn(unsigned long pfn); 66 + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); 66 67 struct page *kmap_atomic_to_page(void *ptr); 67 - void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); 68 - void *kmap_atomic(struct page *page, enum km_type type); 68 + void *kmap_atomic_prot(struct page *page, pgprot_t prot); 69 69 void kmap_atomic_fix_kpte(struct page *page, int finished); 70 70 71 71 #define flush_cache_kmaps() do { } while (0)
+23 -62
arch/tile/mm/highmem.c
··· 56 56 } 57 57 EXPORT_SYMBOL(kunmap); 58 58 59 - static void debug_kmap_atomic_prot(enum km_type type) 60 - { 61 - #ifdef CONFIG_DEBUG_HIGHMEM 62 - static unsigned warn_count = 10; 63 - 64 - if (unlikely(warn_count == 0)) 65 - return; 66 - 67 - if (unlikely(in_interrupt())) { 68 - if (in_irq()) { 69 - if (type != KM_IRQ0 && type != KM_IRQ1 && 70 - type != KM_BIO_SRC_IRQ && 71 - /* type != KM_BIO_DST_IRQ && */ 72 - type != KM_BOUNCE_READ) { 73 - WARN_ON(1); 74 - warn_count--; 75 - } 76 - } else if (!irqs_disabled()) { /* softirq */ 77 - if (type != KM_IRQ0 && type != KM_IRQ1 && 78 - type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && 79 - type != KM_SKB_SUNRPC_DATA && 80 - type != KM_SKB_DATA_SOFTIRQ && 81 - type != KM_BOUNCE_READ) { 82 - WARN_ON(1); 83 - warn_count--; 84 - } 85 - } 86 - } 87 - 88 - if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || 89 - type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) { 90 - if (!irqs_disabled()) { 91 - WARN_ON(1); 92 - warn_count--; 93 - } 94 - } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { 95 - if (irq_count() == 0 && !irqs_disabled()) { 96 - WARN_ON(1); 97 - warn_count--; 98 - } 99 - } 100 - #endif 101 - } 102 - 103 59 /* 104 60 * Describe a single atomic mapping of a page on a given cpu at a 105 61 * given address, and allow it to be linked into a list. ··· 196 240 * When holding an atomic kmap is is not legal to sleep, so atomic 197 241 * kmaps are appropriate for short, tight code paths only. 198 242 */ 199 - void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 243 + void *kmap_atomic_prot(struct page *page, pgprot_t prot) 200 244 { 201 - enum fixed_addresses idx; 202 245 unsigned long vaddr; 246 + int idx, type; 203 247 pte_t *pte; 204 248 205 249 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ ··· 211 255 if (!PageHighMem(page)) 212 256 return page_address(page); 213 257 214 - debug_kmap_atomic_prot(type); 215 - 258 + type = kmap_atomic_idx_push(); 216 259 idx = type + KM_TYPE_NR*smp_processor_id(); 217 260 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 218 261 pte = kmap_get_pte(vaddr); ··· 224 269 } 225 270 EXPORT_SYMBOL(kmap_atomic_prot); 226 271 227 - void *kmap_atomic(struct page *page, enum km_type type) 272 + void *__kmap_atomic(struct page *page) 228 273 { 229 274 /* PAGE_NONE is a magic value that tells us to check immutability. */ 230 275 return kmap_atomic_prot(page, type, PAGE_NONE); 231 276 } 232 - EXPORT_SYMBOL(kmap_atomic); 277 + EXPORT_SYMBOL(__kmap_atomic); 233 278 234 - void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 279 + void __kunmap_atomic(void *kvaddr) 235 280 { 236 281 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 237 - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 238 282 239 - /* 240 - * Force other mappings to Oops if they try to access this pte without 241 - * first remapping it. Keeping stale mappings around is a bad idea. 242 - */ 243 - if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) { 283 + if (vaddr >= __fix_to_virt(FIX_KMAP_END) && 284 + vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { 244 285 pte_t *pte = kmap_get_pte(vaddr); 245 286 pte_t pteval = *pte; 287 + int idx, type; 288 + 289 + type = kmap_atomic_idx_pop(); 290 + idx = type + KM_TYPE_NR*smp_processor_id(); 291 + 292 + /* 293 + * Force other mappings to Oops if they try to access this pte 294 + * without first remapping it. Keeping stale mappings around 295 + * is a bad idea. 296 + */ 246 297 BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); 247 298 kmap_atomic_unregister(pte_page(pteval), vaddr); 248 299 kpte_clear_flush(pte, vaddr); ··· 261 300 arch_flush_lazy_mmu_mode(); 262 301 pagefault_enable(); 263 302 } 264 - EXPORT_SYMBOL(kunmap_atomic_notypecheck); 303 + EXPORT_SYMBOL(__kunmap_atomic); 265 304 266 305 /* 267 306 * This API is supposed to allow us to map memory without a "struct page". 268 307 * Currently we don't support this, though this may change in the future. 269 308 */ 270 - void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 309 + void *kmap_atomic_pfn(unsigned long pfn) 271 310 { 272 - return kmap_atomic(pfn_to_page(pfn), type); 311 + return kmap_atomic(pfn_to_page(pfn)); 273 312 } 274 - void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 313 + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) 275 314 { 276 - return kmap_atomic_prot(pfn_to_page(pfn), type, prot); 315 + return kmap_atomic_prot(pfn_to_page(pfn), prot); 277 316 } 278 317 279 318 struct page *kmap_atomic_to_page(void *ptr)
+6 -5
arch/x86/include/asm/highmem.h
··· 59 59 60 60 void *kmap(struct page *page); 61 61 void kunmap(struct page *page); 62 - void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); 63 - void *kmap_atomic(struct page *page, enum km_type type); 64 - void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 65 - void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 66 - void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 62 + 63 + void *kmap_atomic_prot(struct page *page, pgprot_t prot); 64 + void *__kmap_atomic(struct page *page); 65 + void __kunmap_atomic(void *kvaddr); 66 + void *kmap_atomic_pfn(unsigned long pfn); 67 + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); 67 68 struct page *kmap_atomic_to_page(void *ptr); 68 69 69 70 #define flush_cache_kmaps() do { } while (0)
+2 -2
arch/x86/include/asm/iomap.h
··· 27 27 #include <asm/tlbflush.h> 28 28 29 29 void __iomem * 30 - iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30 + iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); 31 31 32 32 void 33 - iounmap_atomic(void __iomem *kvaddr, enum km_type type); 33 + iounmap_atomic(void __iomem *kvaddr); 34 34 35 35 int 36 36 iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
+1 -1
arch/x86/kernel/crash_dump_32.c
··· 61 61 if (!is_crashed_pfn_valid(pfn)) 62 62 return -EFAULT; 63 63 64 - vaddr = kmap_atomic_pfn(pfn, KM_PTE0); 64 + vaddr = kmap_atomic_pfn(pfn); 65 65 66 66 if (!userbuf) { 67 67 memcpy(buf, (vaddr + offset), csize);
+45 -38
arch/x86/mm/highmem_32.c
··· 9 9 return page_address(page); 10 10 return kmap_high(page); 11 11 } 12 + EXPORT_SYMBOL(kmap); 12 13 13 14 void kunmap(struct page *page) 14 15 { ··· 19 18 return; 20 19 kunmap_high(page); 21 20 } 21 + EXPORT_SYMBOL(kunmap); 22 22 23 23 /* 24 24 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because ··· 29 27 * However when holding an atomic kmap it is not legal to sleep, so atomic 30 28 * kmaps are appropriate for short, tight code paths only. 31 29 */ 32 - void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 30 + void *kmap_atomic_prot(struct page *page, pgprot_t prot) 33 31 { 34 - enum fixed_addresses idx; 35 32 unsigned long vaddr; 33 + int idx, type; 36 34 37 35 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 38 36 pagefault_disable(); ··· 40 38 if (!PageHighMem(page)) 41 39 return page_address(page); 42 40 43 - debug_kmap_atomic(type); 44 - 41 + type = kmap_atomic_idx_push(); 45 42 idx = type + KM_TYPE_NR*smp_processor_id(); 46 43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 47 44 BUG_ON(!pte_none(*(kmap_pte-idx))); ··· 48 47 49 48 return (void *)vaddr; 50 49 } 50 + EXPORT_SYMBOL(kmap_atomic_prot); 51 51 52 - void *kmap_atomic(struct page *page, enum km_type type) 52 + void *__kmap_atomic(struct page *page) 53 53 { 54 - return kmap_atomic_prot(page, type, kmap_prot); 54 + return kmap_atomic_prot(page, kmap_prot); 55 55 } 56 - 57 - void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 58 - { 59 - unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 60 - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 61 - 62 - /* 63 - * Force other mappings to Oops if they'll try to access this pte 64 - * without first remap it. Keeping stale mappings around is a bad idea 65 - * also, in case the page changes cacheability attributes or becomes 66 - * a protected page in a hypervisor. 67 - */ 68 - if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) 69 - kpte_clear_flush(kmap_pte-idx, vaddr); 70 - else { 71 - #ifdef CONFIG_DEBUG_HIGHMEM 72 - BUG_ON(vaddr < PAGE_OFFSET); 73 - BUG_ON(vaddr >= (unsigned long)high_memory); 74 - #endif 75 - } 76 - 77 - pagefault_enable(); 78 - } 56 + EXPORT_SYMBOL(__kmap_atomic); 79 57 80 58 /* 81 59 * This is the same as kmap_atomic() but can map memory that doesn't 82 60 * have a struct page associated with it. 83 61 */ 84 - void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 62 + void *kmap_atomic_pfn(unsigned long pfn) 85 63 { 86 - return kmap_atomic_prot_pfn(pfn, type, kmap_prot); 64 + return kmap_atomic_prot_pfn(pfn, kmap_prot); 87 65 } 88 - EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ 66 + EXPORT_SYMBOL_GPL(kmap_atomic_pfn); 67 + 68 + void __kunmap_atomic(void *kvaddr) 69 + { 70 + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 71 + 72 + if (vaddr >= __fix_to_virt(FIX_KMAP_END) && 73 + vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { 74 + int idx, type; 75 + 76 + type = kmap_atomic_idx_pop(); 77 + idx = type + KM_TYPE_NR * smp_processor_id(); 78 + 79 + #ifdef CONFIG_DEBUG_HIGHMEM 80 + WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 81 + #endif 82 + /* 83 + * Force other mappings to Oops if they'll try to access this 84 + * pte without first remap it. Keeping stale mappings around 85 + * is a bad idea also, in case the page changes cacheability 86 + * attributes or becomes a protected page in a hypervisor. 87 + */ 88 + kpte_clear_flush(kmap_pte-idx, vaddr); 89 + } 90 + #ifdef CONFIG_DEBUG_HIGHMEM 91 + else { 92 + BUG_ON(vaddr < PAGE_OFFSET); 93 + BUG_ON(vaddr >= (unsigned long)high_memory); 94 + } 95 + #endif 96 + 97 + pagefault_enable(); 98 + } 99 + EXPORT_SYMBOL(__kunmap_atomic); 89 100 90 101 struct page *kmap_atomic_to_page(void *ptr) 91 102 { ··· 111 98 pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 112 99 return pte_page(*pte); 113 100 } 114 - 115 - EXPORT_SYMBOL(kmap); 116 - EXPORT_SYMBOL(kunmap); 117 - EXPORT_SYMBOL(kmap_atomic); 118 - EXPORT_SYMBOL(kunmap_atomic_notypecheck); 119 - EXPORT_SYMBOL(kmap_atomic_prot); 120 101 EXPORT_SYMBOL(kmap_atomic_to_page); 121 102 122 103 void __init set_highmem_pages_init(void)
+25 -17
arch/x86/mm/iomap_32.c
··· 48 48 } 49 49 EXPORT_SYMBOL_GPL(iomap_create_wc); 50 50 51 - void 52 - iomap_free(resource_size_t base, unsigned long size) 51 + void iomap_free(resource_size_t base, unsigned long size) 53 52 { 54 53 io_free_memtype(base, base + size); 55 54 } 56 55 EXPORT_SYMBOL_GPL(iomap_free); 57 56 58 - void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 57 + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) 59 58 { 60 - enum fixed_addresses idx; 61 59 unsigned long vaddr; 60 + int idx, type; 62 61 63 62 pagefault_disable(); 64 63 65 - debug_kmap_atomic(type); 64 + type = kmap_atomic_idx_push(); 66 65 idx = type + KM_TYPE_NR * smp_processor_id(); 67 66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 68 67 set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ··· 71 72 } 72 73 73 74 /* 74 - * Map 'pfn' using fixed map 'type' and protections 'prot' 75 + * Map 'pfn' using protections 'prot' 75 76 */ 76 77 void __iomem * 77 - iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 78 + iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) 78 79 { 79 80 /* 80 81 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. ··· 85 86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 86 87 prot = PAGE_KERNEL_UC_MINUS; 87 88 88 - return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); 89 + return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); 89 90 } 90 91 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 91 92 92 93 void 93 - iounmap_atomic(void __iomem *kvaddr, enum km_type type) 94 + iounmap_atomic(void __iomem *kvaddr) 94 95 { 95 96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 96 - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 97 97 98 - /* 99 - * Force other mappings to Oops if they'll try to access this pte 100 - * without first remap it. Keeping stale mappings around is a bad idea 101 - * also, in case the page changes cacheability attributes or becomes 102 - * a protected page in a hypervisor. 103 - */ 104 - if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) 98 + if (vaddr >= __fix_to_virt(FIX_KMAP_END) && 99 + vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { 100 + int idx, type; 101 + 102 + type = kmap_atomic_idx_pop(); 103 + idx = type + KM_TYPE_NR * smp_processor_id(); 104 + 105 + #ifdef CONFIG_DEBUG_HIGHMEM 106 + WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 107 + #endif 108 + /* 109 + * Force other mappings to Oops if they'll try to access this 110 + * pte without first remap it. Keeping stale mappings around 111 + * is a bad idea also, in case the page changes cacheability 112 + * attributes or becomes a protected page in a hypervisor. 113 + */ 105 114 kpte_clear_flush(kmap_pte-idx, vaddr); 115 + } 106 116 107 117 pagefault_enable(); 108 118 }
+12 -13
drivers/gpu/drm/i915/i915_gem.c
··· 155 155 char __iomem *vaddr; 156 156 int unwritten; 157 157 158 - vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 158 + vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); 159 159 if (vaddr == NULL) 160 160 return -ENOMEM; 161 161 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); 162 - kunmap_atomic(vaddr, KM_USER0); 162 + kunmap_atomic(vaddr); 163 163 164 164 if (unwritten) 165 165 return -EFAULT; ··· 509 509 char *vaddr_atomic; 510 510 unsigned long unwritten; 511 511 512 - vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0); 512 + vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 513 513 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 514 514 user_data, length); 515 - io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); 515 + io_mapping_unmap_atomic(vaddr_atomic); 516 516 if (unwritten) 517 517 return -EFAULT; 518 518 return 0; ··· 551 551 char __iomem *vaddr; 552 552 unsigned long unwritten; 553 553 554 - vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 554 + vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); 555 555 if (vaddr == NULL) 556 556 return -ENOMEM; 557 557 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); 558 - kunmap_atomic(vaddr, KM_USER0); 558 + kunmap_atomic(vaddr); 559 559 560 560 if (unwritten) 561 561 return -EFAULT; ··· 3346 3346 reloc_offset = obj_priv->gtt_offset + reloc->offset; 3347 3347 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 3348 3348 (reloc_offset & 3349 - ~(PAGE_SIZE - 1)), 3350 - KM_USER0); 3349 + ~(PAGE_SIZE - 1))); 3351 3350 reloc_entry = (uint32_t __iomem *)(reloc_page + 3352 3351 (reloc_offset & (PAGE_SIZE - 1))); 3353 3352 reloc_val = target_obj_priv->gtt_offset + reloc->delta; ··· 3357 3358 readl(reloc_entry), reloc_val); 3358 3359 #endif 3359 3360 writel(reloc_val, reloc_entry); 3360 - io_mapping_unmap_atomic(reloc_page, KM_USER0); 3361 + io_mapping_unmap_atomic(reloc_page); 3361 3362 3362 3363 /* The updated presumed offset for this entry will be 3363 3364 * copied back out to the user. ··· 4771 4772 page_count = obj->size / PAGE_SIZE; 4772 4773 4773 4774 for (i = 0; i < page_count; i++) { 4774 - char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); 4775 + char *dst = kmap_atomic(obj_priv->pages[i]); 4775 4776 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4776 4777 4777 4778 memcpy(dst, src, PAGE_SIZE); 4778 - kunmap_atomic(dst, KM_USER0); 4779 + kunmap_atomic(dst); 4779 4780 } 4780 4781 drm_clflush_pages(obj_priv->pages, page_count); 4781 4782 drm_agp_chipset_flush(dev); ··· 4832 4833 page_count = obj->size / PAGE_SIZE; 4833 4834 4834 4835 for (i = 0; i < page_count; i++) { 4835 - char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); 4836 + char *src = kmap_atomic(obj_priv->pages[i]); 4836 4837 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4837 4838 4838 4839 memcpy(dst, src, PAGE_SIZE); 4839 - kunmap_atomic(src, KM_USER0); 4840 + kunmap_atomic(src); 4840 4841 } 4841 4842 4842 4843 i915_gem_object_put_pages(obj);
+2 -3
drivers/gpu/drm/i915/i915_irq.c
··· 456 456 457 457 local_irq_save(flags); 458 458 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 459 - reloc_offset, 460 - KM_IRQ0); 459 + reloc_offset); 461 460 memcpy_fromio(d, s, PAGE_SIZE); 462 - io_mapping_unmap_atomic(s, KM_IRQ0); 461 + io_mapping_unmap_atomic(s); 463 462 local_irq_restore(flags); 464 463 465 464 dst->pages[page] = d;
+2 -3
drivers/gpu/drm/i915/intel_overlay.c
··· 187 187 188 188 if (OVERLAY_NONPHYSICAL(overlay->dev)) { 189 189 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 190 - overlay->reg_bo->gtt_offset, 191 - KM_USER0); 190 + overlay->reg_bo->gtt_offset); 192 191 193 192 if (!regs) { 194 193 DRM_ERROR("failed to map overlay regs in GTT\n"); ··· 202 203 static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 203 204 { 204 205 if (OVERLAY_NONPHYSICAL(overlay->dev)) 205 - io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); 206 + io_mapping_unmap_atomic(overlay->virt_addr); 206 207 207 208 overlay->virt_addr = NULL; 208 209
+4 -4
drivers/gpu/drm/nouveau/nouveau_bios.c
··· 2167 2167 2168 2168 if (off < pci_resource_len(dev->pdev, 1)) { 2169 2169 uint8_t __iomem *p = 2170 - io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); 2170 + io_mapping_map_atomic_wc(fb, off & PAGE_MASK); 2171 2171 2172 2172 val = ioread32(p + (off & ~PAGE_MASK)); 2173 2173 2174 - io_mapping_unmap_atomic(p, KM_USER0); 2174 + io_mapping_unmap_atomic(p); 2175 2175 } 2176 2176 2177 2177 return val; ··· 2183 2183 { 2184 2184 if (off < pci_resource_len(dev->pdev, 1)) { 2185 2185 uint8_t __iomem *p = 2186 - io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); 2186 + io_mapping_map_atomic_wc(fb, off & PAGE_MASK); 2187 2187 2188 2188 iowrite32(val, p + (off & ~PAGE_MASK)); 2189 2189 wmb(); 2190 2190 2191 - io_mapping_unmap_atomic(p, KM_USER0); 2191 + io_mapping_unmap_atomic(p); 2192 2192 } 2193 2193 } 2194 2194
+4 -4
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 170 170 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 171 171 172 172 #ifdef CONFIG_X86 173 - dst = kmap_atomic_prot(d, KM_USER0, prot); 173 + dst = kmap_atomic_prot(d, prot); 174 174 #else 175 175 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 176 176 dst = vmap(&d, 1, 0, prot); ··· 183 183 memcpy_fromio(dst, src, PAGE_SIZE); 184 184 185 185 #ifdef CONFIG_X86 186 - kunmap_atomic(dst, KM_USER0); 186 + kunmap_atomic(dst); 187 187 #else 188 188 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 189 189 vunmap(dst); ··· 206 206 207 207 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 208 208 #ifdef CONFIG_X86 209 - src = kmap_atomic_prot(s, KM_USER0, prot); 209 + src = kmap_atomic_prot(s, prot); 210 210 #else 211 211 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 212 212 src = vmap(&s, 1, 0, prot); ··· 219 219 memcpy_toio(dst, src, PAGE_SIZE); 220 220 221 221 #ifdef CONFIG_X86 222 - kunmap_atomic(src, KM_USER0); 222 + kunmap_atomic(src); 223 223 #else 224 224 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 225 225 vunmap(src);
+39 -22
include/linux/highmem.h
··· 28 28 29 29 #include <asm/kmap_types.h> 30 30 31 - #ifdef CONFIG_DEBUG_HIGHMEM 32 - 33 - void debug_kmap_atomic(enum km_type type); 34 - 35 - #else 36 - 37 - static inline void debug_kmap_atomic(enum km_type type) 38 - { 39 - } 40 - 41 - #endif 42 - 43 31 #ifdef CONFIG_HIGHMEM 44 32 #include <asm/highmem.h> 45 33 ··· 36 48 extern unsigned long totalhigh_pages; 37 49 38 50 void kmap_flush_unused(void); 51 + 52 + DECLARE_PER_CPU(int, __kmap_atomic_idx); 53 + 54 + static inline int kmap_atomic_idx_push(void) 55 + { 56 + int idx = __get_cpu_var(__kmap_atomic_idx)++; 57 + #ifdef CONFIG_DEBUG_HIGHMEM 58 + WARN_ON_ONCE(in_irq() && !irqs_disabled()); 59 + BUG_ON(idx > KM_TYPE_NR); 60 + #endif 61 + return idx; 62 + } 63 + 64 + static inline int kmap_atomic_idx_pop(void) 65 + { 66 + int idx = --__get_cpu_var(__kmap_atomic_idx); 67 + #ifdef CONFIG_DEBUG_HIGHMEM 68 + BUG_ON(idx < 0); 69 + #endif 70 + return idx; 71 + } 39 72 40 73 #else /* CONFIG_HIGHMEM */ 41 74 ··· 75 66 { 76 67 } 77 68 78 - static inline void *kmap_atomic(struct page *page, enum km_type idx) 69 + static inline void *__kmap_atomic(struct page *page) 79 70 { 80 71 pagefault_disable(); 81 72 return page_address(page); 82 73 } 83 - #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) 74 + #define kmap_atomic_prot(page, prot) __kmap_atomic(page) 84 75 85 - static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) 76 + static inline void __kunmap_atomic(void *addr) 86 77 { 87 78 pagefault_enable(); 88 79 } 89 80 90 - #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 81 + #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) 91 82 #define kmap_atomic_to_page(ptr) virt_to_page(ptr) 92 83 93 84 #define kmap_flush_unused() do {} while(0) ··· 95 86 96 87 #endif /* CONFIG_HIGHMEM */ 97 88 98 - /* Prevent people trying to call kunmap_atomic() as if it were kunmap() */ 99 - /* kunmap_atomic() should get the return value of kmap_atomic, not the page. */ 100 - #define kunmap_atomic(addr, idx) do { \ 101 - BUILD_BUG_ON(__same_type((addr), struct page *)); \ 102 - kunmap_atomic_notypecheck((addr), (idx)); \ 103 - } while (0) 89 + /* 90 + * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work. 91 + */ 92 + #define kmap_atomic(page, args...) __kmap_atomic(page) 93 + 94 + /* 95 + * Prevent people trying to call kunmap_atomic() as if it were kunmap() 96 + * kunmap_atomic() should get the return value of kmap_atomic, not the page. 97 + */ 98 + #define kunmap_atomic(addr, args...) \ 99 + do { \ 100 + BUILD_BUG_ON(__same_type((addr), struct page *)); \ 101 + __kunmap_atomic(addr); \ 102 + } while (0) 104 103 105 104 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 106 105 #ifndef clear_user_highpage
+6 -8
include/linux/io-mapping.h
··· 81 81 /* Atomic map/unmap */ 82 82 static inline void __iomem * 83 83 io_mapping_map_atomic_wc(struct io_mapping *mapping, 84 - unsigned long offset, 85 - int slot) 84 + unsigned long offset) 86 85 { 87 86 resource_size_t phys_addr; 88 87 unsigned long pfn; ··· 89 90 BUG_ON(offset >= mapping->size); 90 91 phys_addr = mapping->base + offset; 91 92 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); 92 - return iomap_atomic_prot_pfn(pfn, slot, mapping->prot); 93 + return iomap_atomic_prot_pfn(pfn, mapping->prot); 93 94 } 94 95 95 96 static inline void 96 - io_mapping_unmap_atomic(void __iomem *vaddr, int slot) 97 + io_mapping_unmap_atomic(void __iomem *vaddr) 97 98 { 98 - iounmap_atomic(vaddr, slot); 99 + iounmap_atomic(vaddr); 99 100 } 100 101 101 102 static inline void __iomem * ··· 136 137 /* Atomic map/unmap */ 137 138 static inline void __iomem * 138 139 io_mapping_map_atomic_wc(struct io_mapping *mapping, 139 - unsigned long offset, 140 - int slot) 140 + unsigned long offset) 141 141 { 142 142 return ((char __force __iomem *) mapping) + offset; 143 143 } 144 144 145 145 static inline void 146 - io_mapping_unmap_atomic(void __iomem *vaddr, int slot) 146 + io_mapping_unmap_atomic(void __iomem *vaddr) 147 147 { 148 148 } 149 149
+4 -58
mm/highmem.c
··· 42 42 unsigned long totalhigh_pages __read_mostly; 43 43 EXPORT_SYMBOL(totalhigh_pages); 44 44 45 + 46 + DEFINE_PER_CPU(int, __kmap_atomic_idx); 47 + EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); 48 + 45 49 unsigned int nr_free_highpages (void) 46 50 { 47 51 pg_data_t *pgdat; ··· 426 422 } 427 423 428 424 #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ 429 - 430 - #ifdef CONFIG_DEBUG_HIGHMEM 431 - 432 - void debug_kmap_atomic(enum km_type type) 433 - { 434 - static int warn_count = 10; 435 - 436 - if (unlikely(warn_count < 0)) 437 - return; 438 - 439 - if (unlikely(in_interrupt())) { 440 - if (in_nmi()) { 441 - if (type != KM_NMI && type != KM_NMI_PTE) { 442 - WARN_ON(1); 443 - warn_count--; 444 - } 445 - } else if (in_irq()) { 446 - if (type != KM_IRQ0 && type != KM_IRQ1 && 447 - type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && 448 - type != KM_BOUNCE_READ && type != KM_IRQ_PTE) { 449 - WARN_ON(1); 450 - warn_count--; 451 - } 452 - } else if (!irqs_disabled()) { /* softirq */ 453 - if (type != KM_IRQ0 && type != KM_IRQ1 && 454 - type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && 455 - type != KM_SKB_SUNRPC_DATA && 456 - type != KM_SKB_DATA_SOFTIRQ && 457 - type != KM_BOUNCE_READ) { 458 - WARN_ON(1); 459 - warn_count--; 460 - } 461 - } 462 - } 463 - 464 - if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || 465 - type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ || 466 - type == KM_IRQ_PTE || type == KM_NMI || 467 - type == KM_NMI_PTE ) { 468 - if (!irqs_disabled()) { 469 - WARN_ON(1); 470 - warn_count--; 471 - } 472 - } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { 473 - if (irq_count() == 0 && !irqs_disabled()) { 474 - WARN_ON(1); 475 - warn_count--; 476 - } 477 - } 478 - #ifdef CONFIG_KGDB_KDB 479 - if (unlikely(type == KM_KDB && atomic_read(&kgdb_active) == -1)) { 480 - WARN_ON(1); 481 - warn_count--; 482 - } 483 - #endif /* CONFIG_KGDB_KDB */ 484 - } 485 - 486 - #endif