Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

m68k: Fix xchg/cmpxchg to fail to link if given an inappropriate pointer

Fix the m68k versions of xchg() and cmpxchg() to fail to link if given an
inappropriately sized pointer rather than BUG()'ing at runtime.

Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Greg Ungerer <gerg@uclinux.org>
cc: linux-m68k@lists.linux-m68k.org

+16 -4
+16 -4
arch/m68k/include/asm/system.h
··· 68 68 struct __xchg_dummy { unsigned long a[100]; }; 69 69 #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 70 70 71 + extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int); 72 + 71 73 #ifndef CONFIG_RMW_INSNS 72 74 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 73 75 { ··· 94 92 x = tmp; 95 93 break; 96 94 default: 97 - BUG(); 95 + tmp = __invalid_xchg_size(x, ptr, size); 96 + break; 98 97 } 99 98 100 99 local_irq_restore(flags); ··· 105 102 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 106 103 { 107 104 switch (size) { 108 - case 1: 105 + case 1: 109 106 __asm__ __volatile__ 110 107 ("moveb %2,%0\n\t" 111 108 "1:\n\t" ··· 113 110 "jne 1b" 114 111 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 115 112 break; 116 - case 2: 113 + case 2: 117 114 __asm__ __volatile__ 118 115 ("movew %2,%0\n\t" 119 116 "1:\n\t" ··· 121 118 "jne 1b" 122 119 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 123 120 break; 124 - case 4: 121 + case 4: 125 122 __asm__ __volatile__ 126 123 ("movel %2,%0\n\t" 127 124 "1:\n\t" 128 125 "casl %0,%1,%2\n\t" 129 126 "jne 1b" 130 127 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 128 + break; 129 + default: 130 + x = __invalid_xchg_size(x, ptr, size); 131 131 break; 132 132 } 133 133 return x; ··· 140 134 #include <asm-generic/cmpxchg-local.h> 141 135 142 136 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 137 + 138 + extern unsigned long __invalid_cmpxchg_size(volatile void *, 139 + unsigned long, unsigned long, int); 143 140 144 141 /* 145 142 * Atomic compare and exchange. Compare OLD with MEM, if identical, ··· 170 161 __asm__ __volatile__ ("casl %0,%2,%1" 171 162 : "=d" (old), "=m" (*(int *)p) 172 163 : "d" (new), "0" (old), "m" (*(int *)p)); 164 + break; 165 + default: 166 + old = __invalid_cmpxchg_size(p, old, new, size); 173 167 break; 174 168 } 175 169 return old;