Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm: remove the second argument of k[un]map_atomic()

Signed-off-by: Cong Wang <amwang@redhat.com>

authored by

Cong Wang and committed by
Cong Wang
5472e862 1ec9c5dd

+50 -50
+6 -6
arch/arm/mm/copypage-fa.c
··· 44 44 { 45 45 void *kto, *kfrom; 46 46 47 - kto = kmap_atomic(to, KM_USER0); 48 - kfrom = kmap_atomic(from, KM_USER1); 47 + kto = kmap_atomic(to); 48 + kfrom = kmap_atomic(from); 49 49 fa_copy_user_page(kto, kfrom); 50 - kunmap_atomic(kfrom, KM_USER1); 51 - kunmap_atomic(kto, KM_USER0); 50 + kunmap_atomic(kfrom); 51 + kunmap_atomic(kto); 52 52 } 53 53 54 54 /* ··· 58 58 */ 59 59 void fa_clear_user_highpage(struct page *page, unsigned long vaddr) 60 60 { 61 - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 61 + void *ptr, *kaddr = kmap_atomic(page); 62 62 asm volatile("\ 63 63 mov r1, %2 @ 1\n\ 64 64 mov r2, #0 @ 1\n\ ··· 77 77 : "=r" (ptr) 78 78 : "0" (kaddr), "I" (PAGE_SIZE / 32) 79 79 : "r1", "r2", "r3", "ip", "lr"); 80 - kunmap_atomic(kaddr, KM_USER0); 80 + kunmap_atomic(kaddr); 81 81 } 82 82 83 83 struct cpu_user_fns fa_user_fns __initdata = {
+6 -6
arch/arm/mm/copypage-feroceon.c
··· 72 72 { 73 73 void *kto, *kfrom; 74 74 75 - kto = kmap_atomic(to, KM_USER0); 76 - kfrom = kmap_atomic(from, KM_USER1); 75 + kto = kmap_atomic(to); 76 + kfrom = kmap_atomic(from); 77 77 flush_cache_page(vma, vaddr, page_to_pfn(from)); 78 78 feroceon_copy_user_page(kto, kfrom); 79 - kunmap_atomic(kfrom, KM_USER1); 80 - kunmap_atomic(kto, KM_USER0); 79 + kunmap_atomic(kfrom); 80 + kunmap_atomic(kto); 81 81 } 82 82 83 83 void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) 84 84 { 85 - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 85 + void *ptr, *kaddr = kmap_atomic(page); 86 86 asm volatile ("\ 87 87 mov r1, %2 \n\ 88 88 mov r2, #0 \n\ ··· 102 102 : "=r" (ptr) 103 103 : "0" (kaddr), "I" (PAGE_SIZE / 32) 104 104 : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); 105 - kunmap_atomic(kaddr, KM_USER0); 105 + kunmap_atomic(kaddr); 106 106 } 107 107 108 108 struct cpu_user_fns feroceon_user_fns __initdata = {
+6 -6
arch/arm/mm/copypage-v3.c
··· 42 42 { 43 43 void *kto, *kfrom; 44 44 45 - kto = kmap_atomic(to, KM_USER0); 46 - kfrom = kmap_atomic(from, KM_USER1); 45 + kto = kmap_atomic(to); 46 + kfrom = kmap_atomic(from); 47 47 v3_copy_user_page(kto, kfrom); 48 - kunmap_atomic(kfrom, KM_USER1); 49 - kunmap_atomic(kto, KM_USER0); 48 + kunmap_atomic(kfrom); 49 + kunmap_atomic(kto); 50 50 } 51 51 52 52 /* ··· 56 56 */ 57 57 void v3_clear_user_highpage(struct page *page, unsigned long vaddr) 58 58 { 59 - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 59 + void *ptr, *kaddr = kmap_atomic(page); 60 60 asm volatile("\n\ 61 61 mov r1, %2 @ 1\n\ 62 62 mov r2, #0 @ 1\n\ ··· 72 72 : "=r" (ptr) 73 73 : "0" (kaddr), "I" (PAGE_SIZE / 64) 74 74 : "r1", "r2", "r3", "ip", "lr"); 75 - kunmap_atomic(kaddr, KM_USER0); 75 + kunmap_atomic(kaddr); 76 76 } 77 77 78 78 struct cpu_user_fns v3_user_fns __initdata = {
+4 -4
arch/arm/mm/copypage-v4mc.c
··· 71 71 void v4_mc_copy_user_highpage(struct page *to, struct page *from, 72 72 unsigned long vaddr, struct vm_area_struct *vma) 73 73 { 74 - void *kto = kmap_atomic(to, KM_USER1); 74 + void *kto = kmap_atomic(to); 75 75 76 76 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 77 77 __flush_dcache_page(page_mapping(from), from); ··· 85 85 86 86 raw_spin_unlock(&minicache_lock); 87 87 88 - kunmap_atomic(kto, KM_USER1); 88 + kunmap_atomic(kto); 89 89 } 90 90 91 91 /* ··· 93 93 */ 94 94 void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 95 95 { 96 - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 96 + void *ptr, *kaddr = kmap_atomic(page); 97 97 asm volatile("\ 98 98 mov r1, %2 @ 1\n\ 99 99 mov r2, #0 @ 1\n\ ··· 111 111 : "=r" (ptr) 112 112 : "0" (kaddr), "I" (PAGE_SIZE / 64) 113 113 : "r1", "r2", "r3", "ip", "lr"); 114 - kunmap_atomic(kaddr, KM_USER0); 114 + kunmap_atomic(kaddr); 115 115 } 116 116 117 117 struct cpu_user_fns v4_mc_user_fns __initdata = {
+6 -6
arch/arm/mm/copypage-v4wb.c
··· 52 52 { 53 53 void *kto, *kfrom; 54 54 55 - kto = kmap_atomic(to, KM_USER0); 56 - kfrom = kmap_atomic(from, KM_USER1); 55 + kto = kmap_atomic(to); 56 + kfrom = kmap_atomic(from); 57 57 flush_cache_page(vma, vaddr, page_to_pfn(from)); 58 58 v4wb_copy_user_page(kto, kfrom); 59 - kunmap_atomic(kfrom, KM_USER1); 60 - kunmap_atomic(kto, KM_USER0); 59 + kunmap_atomic(kfrom); 60 + kunmap_atomic(kto); 61 61 } 62 62 63 63 /* ··· 67 67 */ 68 68 void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) 69 69 { 70 - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 70 + void *ptr, *kaddr = kmap_atomic(page); 71 71 asm volatile("\ 72 72 mov r1, %2 @ 1\n\ 73 73 mov r2, #0 @ 1\n\ ··· 86 86 : "=r" (ptr) 87 87 : "0" (kaddr), "I" (PAGE_SIZE / 64) 88 88 : "r1", "r2", "r3", "ip", "lr"); 89 - kunmap_atomic(kaddr, KM_USER0); 89 + kunmap_atomic(kaddr); 90 90 } 91 91 92 92 struct cpu_user_fns v4wb_user_fns __initdata = {
+6 -6
arch/arm/mm/copypage-v4wt.c
··· 48 48 { 49 49 void *kto, *kfrom; 50 50 51 - kto = kmap_atomic(to, KM_USER0); 52 - kfrom = kmap_atomic(from, KM_USER1); 51 + kto = kmap_atomic(to); 52 + kfrom = kmap_atomic(from); 53 53 v4wt_copy_user_page(kto, kfrom); 54 - kunmap_atomic(kfrom, KM_USER1); 55 - kunmap_atomic(kto, KM_USER0); 54 + kunmap_atomic(kfrom); 55 + kunmap_atomic(kto); 56 56 } 57 57 58 58 /* ··· 62 62 */ 63 63 void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) 64 64 { 65 - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 65 + void *ptr, *kaddr = kmap_atomic(page); 66 66 asm volatile("\ 67 67 mov r1, %2 @ 1\n\ 68 68 mov r2, #0 @ 1\n\ ··· 79 79 : "=r" (ptr) 80 80 : "0" (kaddr), "I" (PAGE_SIZE / 64) 81 81 : "r1", "r2", "r3", "ip", "lr"); 82 - kunmap_atomic(kaddr, KM_USER0); 82 + kunmap_atomic(kaddr); 83 83 } 84 84 85 85 struct cpu_user_fns v4wt_user_fns __initdata = {
+6 -6
arch/arm/mm/copypage-v6.c
··· 38 38 { 39 39 void *kto, *kfrom; 40 40 41 - kfrom = kmap_atomic(from, KM_USER0); 42 - kto = kmap_atomic(to, KM_USER1); 41 + kfrom = kmap_atomic(from); 42 + kto = kmap_atomic(to); 43 43 copy_page(kto, kfrom); 44 - kunmap_atomic(kto, KM_USER1); 45 - kunmap_atomic(kfrom, KM_USER0); 44 + kunmap_atomic(kto); 45 + kunmap_atomic(kfrom); 46 46 } 47 47 48 48 /* ··· 51 51 */ 52 52 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) 53 53 { 54 - void *kaddr = kmap_atomic(page, KM_USER0); 54 + void *kaddr = kmap_atomic(page); 55 55 clear_page(kaddr); 56 - kunmap_atomic(kaddr, KM_USER0); 56 + kunmap_atomic(kaddr); 57 57 } 58 58 59 59 /*
+6 -6
arch/arm/mm/copypage-xsc3.c
··· 75 75 { 76 76 void *kto, *kfrom; 77 77 78 - kto = kmap_atomic(to, KM_USER0); 79 - kfrom = kmap_atomic(from, KM_USER1); 78 + kto = kmap_atomic(to); 79 + kfrom = kmap_atomic(from); 80 80 flush_cache_page(vma, vaddr, page_to_pfn(from)); 81 81 xsc3_mc_copy_user_page(kto, kfrom); 82 - kunmap_atomic(kfrom, KM_USER1); 83 - kunmap_atomic(kto, KM_USER0); 82 + kunmap_atomic(kfrom); 83 + kunmap_atomic(kto); 84 84 } 85 85 86 86 /* ··· 90 90 */ 91 91 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 92 92 { 93 - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 93 + void *ptr, *kaddr = kmap_atomic(page); 94 94 asm volatile ("\ 95 95 mov r1, %2 \n\ 96 96 mov r2, #0 \n\ ··· 105 105 : "=r" (ptr) 106 106 : "0" (kaddr), "I" (PAGE_SIZE / 32) 107 107 : "r1", "r2", "r3"); 108 - kunmap_atomic(kaddr, KM_USER0); 108 + kunmap_atomic(kaddr); 109 109 } 110 110 111 111 struct cpu_user_fns xsc3_mc_user_fns __initdata = {
+4 -4
arch/arm/mm/copypage-xscale.c
··· 93 93 void xscale_mc_copy_user_highpage(struct page *to, struct page *from, 94 94 unsigned long vaddr, struct vm_area_struct *vma) 95 95 { 96 - void *kto = kmap_atomic(to, KM_USER1); 96 + void *kto = kmap_atomic(to); 97 97 98 98 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 99 99 __flush_dcache_page(page_mapping(from), from); ··· 107 107 108 108 raw_spin_unlock(&minicache_lock); 109 109 110 - kunmap_atomic(kto, KM_USER1); 110 + kunmap_atomic(kto); 111 111 } 112 112 113 113 /* ··· 116 116 void 117 117 xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 118 118 { 119 - void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 119 + void *ptr, *kaddr = kmap_atomic(page); 120 120 asm volatile( 121 121 "mov r1, %2 \n\ 122 122 mov r2, #0 \n\ ··· 133 133 : "=r" (ptr) 134 134 : "0" (kaddr), "I" (PAGE_SIZE / 32) 135 135 : "r1", "r2", "r3", "ip"); 136 - kunmap_atomic(kaddr, KM_USER0); 136 + kunmap_atomic(kaddr); 137 137 } 138 138 139 139 struct cpu_user_fns xscale_mc_user_fns __initdata = {