Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'kmap_atomic' of git://github.com/congwang/linux

Pull final kmap_atomic cleanups from Cong Wang:
"This should be the final round of cleanup, as the definitions of enum
km_type finally get removed from the whole tree. The patches have
been in linux-next for a long time."

* 'kmap_atomic' of git://github.com/congwang/linux:
pipe: remove KM_USER0 from comments
vmalloc: remove KM_USER0 from comments
feature-removal-schedule.txt: remove kmap_atomic(page, km_type)
tile: remove km_type definitions
um: remove km_type definitions
asm-generic: remove km_type definitions
avr32: remove km_type definitions
frv: remove km_type definitions
powerpc: remove km_type definitions
arm: remove km_type definitions
highmem: remove the deprecated form of kmap_atomic
tile: remove usage of enum km_type
frv: remove the second parameter of kmap_atomic_primary()
jbd2: remove the second argument of kmap_atomic

+44 -283
-8
Documentation/feature-removal-schedule.txt
··· 512 512 513 513 ---------------------------- 514 514 515 - What: kmap_atomic(page, km_type) 516 - When: 3.5 517 - Why: The old kmap_atomic() with two arguments is deprecated, we only 518 - keep it for backward compatibility for few cycles and then drop it. 519 - Who: Cong Wang <amwang@redhat.com> 520 - 521 - ---------------------------- 522 - 523 515 What: get_robust_list syscall 524 516 When: 2013 525 517 Why: There appear to be no production users of the get_robust_list syscall,
+1 -25
arch/arm/include/asm/kmap_types.h
··· 4 4 /* 5 5 * This is the "bare minimum". AIO seems to require this. 6 6 */ 7 - enum km_type { 8 - KM_BOUNCE_READ, 9 - KM_SKB_SUNRPC_DATA, 10 - KM_SKB_DATA_SOFTIRQ, 11 - KM_USER0, 12 - KM_USER1, 13 - KM_BIO_SRC_IRQ, 14 - KM_BIO_DST_IRQ, 15 - KM_PTE0, 16 - KM_PTE1, 17 - KM_IRQ0, 18 - KM_IRQ1, 19 - KM_SOFTIRQ0, 20 - KM_SOFTIRQ1, 21 - KM_L1_CACHE, 22 - KM_L2_CACHE, 23 - KM_KDB, 24 - KM_TYPE_NR 25 - }; 26 - 27 - #ifdef CONFIG_DEBUG_HIGHMEM 28 - #define KM_NMI (-1) 29 - #define KM_NMI_PTE (-1) 30 - #define KM_IRQ_PTE (-1) 31 - #endif 7 + #define KM_TYPE_NR 16 32 8 33 9 #endif
+2 -22
arch/avr32/include/asm/kmap_types.h
··· 2 2 #define __ASM_AVR32_KMAP_TYPES_H 3 3 4 4 #ifdef CONFIG_DEBUG_HIGHMEM 5 - # define D(n) __KM_FENCE_##n , 5 + # define KM_TYPE_NR 29 6 6 #else 7 - # define D(n) 7 + # define KM_TYPE_NR 14 8 8 #endif 9 - 10 - enum km_type { 11 - D(0) KM_BOUNCE_READ, 12 - D(1) KM_SKB_SUNRPC_DATA, 13 - D(2) KM_SKB_DATA_SOFTIRQ, 14 - D(3) KM_USER0, 15 - D(4) KM_USER1, 16 - D(5) KM_BIO_SRC_IRQ, 17 - D(6) KM_BIO_DST_IRQ, 18 - D(7) KM_PTE0, 19 - D(8) KM_PTE1, 20 - D(9) KM_PTE2, 21 - D(10) KM_IRQ0, 22 - D(11) KM_IRQ1, 23 - D(12) KM_SOFTIRQ0, 24 - D(13) KM_SOFTIRQ1, 25 - D(14) KM_TYPE_NR 26 - }; 27 - 28 - #undef D 29 9 30 10 #endif /* __ASM_AVR32_KMAP_TYPES_H */
+9 -25
arch/frv/include/asm/highmem.h
··· 76 76 77 77 #ifndef __ASSEMBLY__ 78 78 79 - #define __kmap_atomic_primary(type, paddr, ampr) \ 79 + #define __kmap_atomic_primary(cached, paddr, ampr) \ 80 80 ({ \ 81 81 unsigned long damlr, dampr; \ 82 82 \ 83 83 dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \ 84 84 \ 85 - if (type != __KM_CACHE) \ 85 + if (!cached) \ 86 86 asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \ 87 87 else \ 88 + /* cache flush page attachment point */ \ 88 89 asm volatile("movgs %0,iampr"#ampr"\n" \ 89 90 "movgs %0,dampr"#ampr"\n" \ 90 91 :: "r"(dampr) : "memory" \ ··· 113 112 (void *) damlr; \ 114 113 }) 115 114 116 - static inline void *kmap_atomic_primary(struct page *page, enum km_type type) 115 + static inline void *kmap_atomic_primary(struct page *page) 117 116 { 118 117 unsigned long paddr; 119 118 120 119 pagefault_disable(); 121 120 paddr = page_to_phys(page); 122 121 123 - switch (type) { 124 - case 0: return __kmap_atomic_primary(0, paddr, 2); 125 - case 1: return __kmap_atomic_primary(1, paddr, 3); 126 - case 2: return __kmap_atomic_primary(2, paddr, 4); 127 - case 3: return __kmap_atomic_primary(3, paddr, 5); 128 - 129 - default: 130 - BUG(); 131 - return NULL; 132 - } 122 + return __kmap_atomic_primary(1, paddr, 2); 133 123 } 134 124 135 - #define __kunmap_atomic_primary(type, ampr) \ 125 + #define __kunmap_atomic_primary(cached, ampr) \ 136 126 do { \ 137 127 asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \ 138 - if (type == __KM_CACHE) \ 128 + if (cached) \ 139 129 asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \ 140 130 } while(0) 141 131 ··· 135 143 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ 136 144 } while(0) 137 145 138 - static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type) 146 + static inline void kunmap_atomic_primary(void *kvaddr) 139 147 { 140 - switch (type) { 141 - case 0: __kunmap_atomic_primary(0, 2); break; 142 - case 1: __kunmap_atomic_primary(1, 3); break; 143 - case 2: __kunmap_atomic_primary(2, 4); break; 144 - case 3: __kunmap_atomic_primary(3, 5); break; 145 - 146 - default: 147 - BUG(); 148 - } 148 + __kunmap_atomic_primary(1, 2); 149 149 pagefault_enable(); 150 150 } 151 151
+1 -23
arch/frv/include/asm/kmap_types.h
··· 2 2 #ifndef _ASM_KMAP_TYPES_H 3 3 #define _ASM_KMAP_TYPES_H 4 4 5 - enum km_type { 6 - /* arch specific kmaps - change the numbers attached to these at your peril */ 7 - __KM_CACHE, /* cache flush page attachment point */ 8 - __KM_PGD, /* current page directory */ 9 - __KM_ITLB_PTD, /* current instruction TLB miss page table lookup */ 10 - __KM_DTLB_PTD, /* current data TLB miss page table lookup */ 11 - 12 - /* general kmaps */ 13 - KM_BOUNCE_READ, 14 - KM_SKB_SUNRPC_DATA, 15 - KM_SKB_DATA_SOFTIRQ, 16 - KM_USER0, 17 - KM_USER1, 18 - KM_BIO_SRC_IRQ, 19 - KM_BIO_DST_IRQ, 20 - KM_PTE0, 21 - KM_PTE1, 22 - KM_IRQ0, 23 - KM_IRQ1, 24 - KM_SOFTIRQ0, 25 - KM_SOFTIRQ1, 26 - KM_TYPE_NR 27 - }; 5 + #define KM_TYPE_NR 17 28 6 29 7 #endif
+2 -2
arch/frv/mb93090-mb00/pci-dma.c
··· 62 62 dampr2 = __get_DAMPR(2); 63 63 64 64 for (i = 0; i < nents; i++) { 65 - vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE); 65 + vaddr = kmap_atomic_primary(sg_page(&sg[i])); 66 66 67 67 frv_dcache_writeback((unsigned long) vaddr, 68 68 (unsigned long) vaddr + PAGE_SIZE); 69 69 70 70 } 71 71 72 - kunmap_atomic_primary(vaddr, __KM_CACHE); 72 + kunmap_atomic_primary(vaddr); 73 73 if (dampr2) { 74 74 __set_DAMPR(2, dampr2); 75 75 __set_IAMPR(2, dampr2);
+4 -4
arch/frv/mm/cache-page.c
··· 26 26 27 27 dampr2 = __get_DAMPR(2); 28 28 29 - vaddr = kmap_atomic_primary(page, __KM_CACHE); 29 + vaddr = kmap_atomic_primary(page); 30 30 31 31 frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); 32 32 33 - kunmap_atomic_primary(vaddr, __KM_CACHE); 33 + kunmap_atomic_primary(vaddr); 34 34 35 35 if (dampr2) { 36 36 __set_DAMPR(2, dampr2); ··· 54 54 55 55 dampr2 = __get_DAMPR(2); 56 56 57 - vaddr = kmap_atomic_primary(page, __KM_CACHE); 57 + vaddr = kmap_atomic_primary(page); 58 58 59 59 start = (start & ~PAGE_MASK) | (unsigned long) vaddr; 60 60 frv_cache_wback_inv(start, start + len); 61 61 62 - kunmap_atomic_primary(vaddr, __KM_CACHE); 62 + kunmap_atomic_primary(vaddr); 63 63 64 64 if (dampr2) { 65 65 __set_DAMPR(2, dampr2);
+10 -10
arch/frv/mm/highmem.c
··· 50 50 /* 51 51 * The first 4 primary maps are reserved for architecture code 52 52 */ 53 - case 0: return __kmap_atomic_primary(4, paddr, 6); 54 - case 1: return __kmap_atomic_primary(5, paddr, 7); 55 - case 2: return __kmap_atomic_primary(6, paddr, 8); 56 - case 3: return __kmap_atomic_primary(7, paddr, 9); 57 - case 4: return __kmap_atomic_primary(8, paddr, 10); 53 + case 0: return __kmap_atomic_primary(0, paddr, 6); 54 + case 1: return __kmap_atomic_primary(0, paddr, 7); 55 + case 2: return __kmap_atomic_primary(0, paddr, 8); 56 + case 3: return __kmap_atomic_primary(0, paddr, 9); 57 + case 4: return __kmap_atomic_primary(0, paddr, 10); 58 58 59 59 case 5 ... 5 + NR_TLB_LINES - 1: 60 60 return __kmap_atomic_secondary(type - 5, paddr); ··· 70 70 { 71 71 int type = kmap_atomic_idx(); 72 72 switch (type) { 73 - case 0: __kunmap_atomic_primary(4, 6); break; 74 - case 1: __kunmap_atomic_primary(5, 7); break; 75 - case 2: __kunmap_atomic_primary(6, 8); break; 76 - case 3: __kunmap_atomic_primary(7, 9); break; 77 - case 4: __kunmap_atomic_primary(8, 10); break; 73 + case 0: __kunmap_atomic_primary(0, 6); break; 74 + case 1: __kunmap_atomic_primary(0, 7); break; 75 + case 2: __kunmap_atomic_primary(0, 8); break; 76 + case 3: __kunmap_atomic_primary(0, 9); break; 77 + case 4: __kunmap_atomic_primary(0, 10); break; 78 78 79 79 case 5 ... 5 + NR_TLB_LINES - 1: 80 80 __kunmap_atomic_secondary(type - 5, kvaddr);
+1 -30
arch/powerpc/include/asm/kmap_types.h
··· 10 10 * 2 of the License, or (at your option) any later version. 11 11 */ 12 12 13 - enum km_type { 14 - KM_BOUNCE_READ, 15 - KM_SKB_SUNRPC_DATA, 16 - KM_SKB_DATA_SOFTIRQ, 17 - KM_USER0, 18 - KM_USER1, 19 - KM_BIO_SRC_IRQ, 20 - KM_BIO_DST_IRQ, 21 - KM_PTE0, 22 - KM_PTE1, 23 - KM_IRQ0, 24 - KM_IRQ1, 25 - KM_SOFTIRQ0, 26 - KM_SOFTIRQ1, 27 - KM_PPC_SYNC_PAGE, 28 - KM_PPC_SYNC_ICACHE, 29 - KM_KDB, 30 - KM_TYPE_NR 31 - }; 32 - 33 - /* 34 - * This is a temporary build fix that (so they say on lkml....) should no longer 35 - * be required after 2.6.33, because of changes planned to the kmap code. 36 - * Let's try to remove this cruft then. 37 - */ 38 - #ifdef CONFIG_DEBUG_HIGHMEM 39 - #define KM_NMI (-1) 40 - #define KM_NMI_PTE (-1) 41 - #define KM_IRQ_PTE (-1) 42 - #endif 13 + #define KM_TYPE_NR 16 43 14 44 15 #endif /* __KERNEL__ */ 45 16 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
+1 -30
arch/tile/include/asm/kmap_types.h
··· 23 23 * adds 4MB of required address-space. For now we leave KM_TYPE_NR 24 24 * set to depth 8. 25 25 */ 26 - enum km_type { 27 - KM_TYPE_NR = 8 28 - }; 29 - 30 - /* 31 - * We provide dummy definitions of all the stray values that used to be 32 - * required for kmap_atomic() and no longer are. 33 - */ 34 - enum { 35 - KM_BOUNCE_READ, 36 - KM_SKB_SUNRPC_DATA, 37 - KM_SKB_DATA_SOFTIRQ, 38 - KM_USER0, 39 - KM_USER1, 40 - KM_BIO_SRC_IRQ, 41 - KM_BIO_DST_IRQ, 42 - KM_PTE0, 43 - KM_PTE1, 44 - KM_IRQ0, 45 - KM_IRQ1, 46 - KM_SOFTIRQ0, 47 - KM_SOFTIRQ1, 48 - KM_SYNC_ICACHE, 49 - KM_SYNC_DCACHE, 50 - KM_UML_USERCOPY, 51 - KM_IRQ_PTE, 52 - KM_NMI, 53 - KM_NMI_PTE, 54 - KM_KDB 55 - }; 26 + #define KM_TYPE_NR 8 56 27 57 28 #endif /* _ASM_TILE_KMAP_TYPES_H */
+1 -1
arch/tile/mm/highmem.c
··· 93 93 * If we examine it earlier we are exposed to a race where it looks 94 94 * writable earlier, but becomes immutable before we write the PTE. 95 95 */ 96 - static void kmap_atomic_register(struct page *page, enum km_type type, 96 + static void kmap_atomic_register(struct page *page, int type, 97 97 unsigned long va, pte_t *ptep, pte_t pteval) 98 98 { 99 99 unsigned long flags;
+1 -17
arch/um/include/asm/kmap_types.h
··· 8 8 9 9 /* No more #include "asm/arch/kmap_types.h" ! */ 10 10 11 - enum km_type { 12 - KM_BOUNCE_READ, 13 - KM_SKB_SUNRPC_DATA, 14 - KM_SKB_DATA_SOFTIRQ, 15 - KM_USER0, 16 - KM_USER1, 17 - KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ 18 - KM_BIO_SRC_IRQ, 19 - KM_BIO_DST_IRQ, 20 - KM_PTE0, 21 - KM_PTE1, 22 - KM_IRQ0, 23 - KM_IRQ1, 24 - KM_SOFTIRQ0, 25 - KM_SOFTIRQ1, 26 - KM_TYPE_NR 27 - }; 11 + #define KM_TYPE_NR 14 28 12 29 13 #endif
+2 -2
fs/jbd2/commit.c
··· 349 349 return; 350 350 351 351 sequence = cpu_to_be32(sequence); 352 - addr = kmap_atomic(page, KM_USER0); 352 + addr = kmap_atomic(page); 353 353 csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, 354 354 sizeof(sequence)); 355 355 csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data), 356 356 bh->b_size); 357 - kunmap_atomic(addr, KM_USER0); 357 + kunmap_atomic(addr); 358 358 359 359 tag->t_checksum = cpu_to_be32(csum); 360 360 }
+1 -1
fs/pipe.c
··· 224 224 * and the caller has to be careful not to fault before calling 225 225 * the unmap function. 226 226 * 227 - * Note that this function occupies KM_USER0 if @atomic != 0. 227 + * Note that this function calls kmap_atomic() if @atomic != 0. 228 228 */ 229 229 void *generic_pipe_buf_map(struct pipe_inode_info *pipe, 230 230 struct pipe_buffer *buf, int atomic)
+2 -32
include/asm-generic/kmap_types.h
··· 2 2 #define _ASM_GENERIC_KMAP_TYPES_H 3 3 4 4 #ifdef __WITH_KM_FENCE 5 - # define KMAP_D(n) __KM_FENCE_##n , 5 + # define KM_TYPE_NR 41 6 6 #else 7 - # define KMAP_D(n) 7 + # define KM_TYPE_NR 20 8 8 #endif 9 - 10 - enum km_type { 11 - KMAP_D(0) KM_BOUNCE_READ, 12 - KMAP_D(1) KM_SKB_SUNRPC_DATA, 13 - KMAP_D(2) KM_SKB_DATA_SOFTIRQ, 14 - KMAP_D(3) KM_USER0, 15 - KMAP_D(4) KM_USER1, 16 - KMAP_D(5) KM_BIO_SRC_IRQ, 17 - KMAP_D(6) KM_BIO_DST_IRQ, 18 - KMAP_D(7) KM_PTE0, 19 - KMAP_D(8) KM_PTE1, 20 - KMAP_D(9) KM_IRQ0, 21 - KMAP_D(10) KM_IRQ1, 22 - KMAP_D(11) KM_SOFTIRQ0, 23 - KMAP_D(12) KM_SOFTIRQ1, 24 - KMAP_D(13) KM_SYNC_ICACHE, 25 - KMAP_D(14) KM_SYNC_DCACHE, 26 - /* UML specific, for copy_*_user - used in do_op_one_page */ 27 - KMAP_D(15) KM_UML_USERCOPY, 28 - KMAP_D(16) KM_IRQ_PTE, 29 - KMAP_D(17) KM_NMI, 30 - KMAP_D(18) KM_NMI_PTE, 31 - KMAP_D(19) KM_KDB, 32 - /* 33 - * Remember to update debug_kmap_atomic() when adding new kmap types! 34 - */ 35 - KMAP_D(20) KM_TYPE_NR 36 - }; 37 - 38 - #undef KMAP_D 39 9 40 10 #endif
+1 -40
include/linux/highmem.h
··· 110 110 #endif 111 111 112 112 /* 113 - * NOTE: 114 - * kmap_atomic() and kunmap_atomic() with two arguments are deprecated. 115 - * We only keep them for backward compatibility, any usage of them 116 - * are now warned. 117 - */ 118 - 119 - #define PASTE(a, b) a ## b 120 - #define PASTE2(a, b) PASTE(a, b) 121 - 122 - #define NARG_(_2, _1, n, ...) n 123 - #define NARG(...) NARG_(__VA_ARGS__, 2, 1, :) 124 - 125 - static inline void __deprecated *kmap_atomic_deprecated(struct page *page, 126 - enum km_type km) 127 - { 128 - return kmap_atomic(page); 129 - } 130 - 131 - #define kmap_atomic1(...) kmap_atomic(__VA_ARGS__) 132 - #define kmap_atomic2(...) kmap_atomic_deprecated(__VA_ARGS__) 133 - #define kmap_atomic(...) PASTE2(kmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__)) 134 - 135 - static inline void __deprecated __kunmap_atomic_deprecated(void *addr, 136 - enum km_type km) 137 - { 138 - __kunmap_atomic(addr); 139 - } 140 - 141 - /* 142 113 * Prevent people trying to call kunmap_atomic() as if it were kunmap() 143 114 * kunmap_atomic() should get the return value of kmap_atomic, not the page. 144 115 */ 145 - #define kunmap_atomic_deprecated(addr, km) \ 146 - do { \ 147 - BUILD_BUG_ON(__same_type((addr), struct page *)); \ 148 - __kunmap_atomic_deprecated(addr, km); \ 149 - } while (0) 150 - 151 - #define kunmap_atomic_withcheck(addr) \ 116 + #define kunmap_atomic(addr) \ 152 117 do { \ 153 118 BUILD_BUG_ON(__same_type((addr), struct page *)); \ 154 119 __kunmap_atomic(addr); \ 155 120 } while (0) 156 121 157 - #define kunmap_atomic1(...) kunmap_atomic_withcheck(__VA_ARGS__) 158 - #define kunmap_atomic2(...) kunmap_atomic_deprecated(__VA_ARGS__) 159 - #define kunmap_atomic(...) PASTE2(kunmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__)) 160 - /**** End of C pre-processor tricks for deprecated macros ****/ 161 122 162 123 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 163 124 #ifndef clear_user_highpage
+3 -5
include/linux/pipe_fs_i.h
··· 86 86 * mapping or not. The atomic map is faster, however you can't take 87 87 * page faults before calling ->unmap() again. So if you need to eg 88 88 * access user data through copy_to/from_user(), then you must get 89 - * a non-atomic map. ->map() uses the KM_USER0 atomic slot for 90 - * atomic maps, so you can't map more than one pipe_buffer at once 91 - * and you have to be careful if mapping another page as source 92 - * or destination for a copy (IOW, it has to use something else 93 - * than KM_USER0). 89 + * a non-atomic map. ->map() uses the kmap_atomic slot for 90 + * atomic maps, you have to be careful if mapping another page as 91 + * source or destination for a copy. 94 92 */ 95 93 void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int); 96 94
+2 -6
mm/vmalloc.c
··· 1975 1975 * IOREMAP area is treated as memory hole and no copy is done. 1976 1976 * 1977 1977 * If [addr...addr+count) doesn't includes any intersects with alive 1978 - * vm_struct area, returns 0. 1979 - * @buf should be kernel's buffer. Because this function uses KM_USER0, 1980 - * the caller should guarantee KM_USER0 is not used. 1978 + * vm_struct area, returns 0. @buf should be kernel's buffer. 1981 1979 * 1982 1980 * Note: In usual ops, vread() is never necessary because the caller 1983 1981 * should know vmalloc() area is valid and can use memcpy(). ··· 2049 2051 * IOREMAP area is treated as memory hole and no copy is done. 2050 2052 * 2051 2053 * If [addr...addr+count) doesn't includes any intersects with alive 2052 - * vm_struct area, returns 0. 2053 - * @buf should be kernel's buffer. Because this function uses KM_USER0, 2054 - * the caller should guarantee KM_USER0 is not used. 2054 + * vm_struct area, returns 0. @buf should be kernel's buffer. 2055 2055 * 2056 2056 * Note: In usual ops, vwrite() is never necessary because the caller 2057 2057 * should know vmalloc() area is valid and can use memcpy().