Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: strictly nested kmap_atomic()

Ensure kmap_atomic() usage is strictly nested

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Peter Zijlstra and committed by
Linus Torvalds
61ecdb80 2e30244a

+8 -8
+1 -1
crypto/async_tx/async_memcpy.c
··· 83 83 84 84 memcpy(dest_buf, src_buf, len); 85 85 86 - kunmap_atomic(dest_buf, KM_USER0); 87 86 kunmap_atomic(src_buf, KM_USER1); 87 + kunmap_atomic(dest_buf, KM_USER0); 88 88 89 89 async_tx_sync_epilog(submit); 90 90 }
+1 -1
crypto/blkcipher.c
··· 89 89 memcpy(walk->dst.virt.addr, walk->page, n); 90 90 blkcipher_unmap_dst(walk); 91 91 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { 92 - blkcipher_unmap_src(walk); 93 92 if (walk->flags & BLKCIPHER_WALK_DIFF) 94 93 blkcipher_unmap_dst(walk); 94 + blkcipher_unmap_src(walk); 95 95 } 96 96 97 97 scatterwalk_advance(&walk->in, n);
+2 -2
drivers/block/loop.c
··· 101 101 else 102 102 memcpy(raw_buf, loop_buf, size); 103 103 104 - kunmap_atomic(raw_buf, KM_USER0); 105 104 kunmap_atomic(loop_buf, KM_USER1); 105 + kunmap_atomic(raw_buf, KM_USER0); 106 106 cond_resched(); 107 107 return 0; 108 108 } ··· 130 130 for (i = 0; i < size; i++) 131 131 *out++ = *in++ ^ key[(i & 511) % keysize]; 132 132 133 - kunmap_atomic(raw_buf, KM_USER0); 134 133 kunmap_atomic(loop_buf, KM_USER1); 134 + kunmap_atomic(raw_buf, KM_USER0); 135 135 cond_resched(); 136 136 return 0; 137 137 }
+2 -2
include/linux/highmem.h
··· 201 201 vfrom = kmap_atomic(from, KM_USER0); 202 202 vto = kmap_atomic(to, KM_USER1); 203 203 copy_user_page(vto, vfrom, vaddr, to); 204 - kunmap_atomic(vfrom, KM_USER0); 205 204 kunmap_atomic(vto, KM_USER1); 205 + kunmap_atomic(vfrom, KM_USER0); 206 206 } 207 207 208 208 #endif ··· 214 214 vfrom = kmap_atomic(from, KM_USER0); 215 215 vto = kmap_atomic(to, KM_USER1); 216 216 copy_page(vto, vfrom); 217 - kunmap_atomic(vfrom, KM_USER0); 218 217 kunmap_atomic(vto, KM_USER1); 218 + kunmap_atomic(vfrom, KM_USER0); 219 219 } 220 220 221 221 #endif /* _LINUX_HIGHMEM_H */
+2 -2
kernel/power/snapshot.c
··· 984 984 src = kmap_atomic(s_page, KM_USER0); 985 985 dst = kmap_atomic(d_page, KM_USER1); 986 986 do_copy_page(dst, src); 987 - kunmap_atomic(src, KM_USER0); 988 987 kunmap_atomic(dst, KM_USER1); 988 + kunmap_atomic(src, KM_USER0); 989 989 } else { 990 990 if (PageHighMem(d_page)) { 991 991 /* Page pointed to by src may contain some kernel ··· 2273 2273 memcpy(buf, kaddr1, PAGE_SIZE); 2274 2274 memcpy(kaddr1, kaddr2, PAGE_SIZE); 2275 2275 memcpy(kaddr2, buf, PAGE_SIZE); 2276 - kunmap_atomic(kaddr1, KM_USER0); 2277 2276 kunmap_atomic(kaddr2, KM_USER1); 2277 + kunmap_atomic(kaddr1, KM_USER0); 2278 2278 } 2279 2279 2280 2280 /**