Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Move inline asm eieio to using eieio inline function

Use the eieio function so we can redefine what eieio does rather
than direct inline asm. This is part code clean up and partially
because not all PPCs have eieio (book-e has mbar that maps to eieio).

Signed-off-by: Kumar Gala <galak@kernel.crashing.org>

+10 -10
+6 -6
arch/powerpc/kernel/io.c
··· 35 35 asm volatile("sync"); 36 36 do { 37 37 tmp = *port; 38 - asm volatile("eieio"); 38 + eieio(); 39 39 *tbuf++ = tmp; 40 40 } while (--count != 0); 41 41 asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); ··· 66 66 asm volatile("sync"); 67 67 do { 68 68 tmp = *port; 69 - asm volatile("eieio"); 69 + eieio(); 70 70 *tbuf++ = tmp; 71 71 } while (--count != 0); 72 72 asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); ··· 97 97 asm volatile("sync"); 98 98 do { 99 99 tmp = *port; 100 - asm volatile("eieio"); 100 + eieio(); 101 101 *tbuf++ = tmp; 102 102 } while (--count != 0); 103 103 asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); ··· 155 155 __asm__ __volatile__ ("sync" : : : "memory"); 156 156 while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) { 157 157 *((u8 *)dest) = *((volatile u8 *)vsrc); 158 - __asm__ __volatile__ ("eieio" : : : "memory"); 158 + eieio(); 159 159 vsrc++; 160 160 dest++; 161 161 n--; 162 162 } 163 163 while(n > 4) { 164 164 *((u32 *)dest) = *((volatile u32 *)vsrc); 165 - __asm__ __volatile__ ("eieio" : : : "memory"); 165 + eieio(); 166 166 vsrc += 4; 167 167 dest += 4; 168 168 n -= 4; 169 169 } 170 170 while(n) { 171 171 *((u8 *)dest) = *((volatile u8 *)vsrc); 172 - __asm__ __volatile__ ("eieio" : : : "memory"); 172 + eieio(); 173 173 vsrc++; 174 174 dest++; 175 175 n--;
+1 -1
arch/powerpc/mm/hash_native_64.c
··· 163 163 164 164 hptep->r = hpte_r; 165 165 /* Guarantee the second dword is visible before the valid bit */ 166 - __asm__ __volatile__ ("eieio" : : : "memory"); 166 + eieio(); 167 167 /* 168 168 * Now set the first dword including the valid bit 169 169 * NOTE: this also unlocks the hpte
+2 -2
arch/powerpc/mm/stab.c
··· 55 55 for (entry = 0; entry < 8; entry++, ste++) { 56 56 if (!(ste->esid_data & STE_ESID_V)) { 57 57 ste->vsid_data = vsid_data; 58 - asm volatile("eieio":::"memory"); 58 + eieio(); 59 59 ste->esid_data = esid_data; 60 60 return (global_entry | entry); 61 61 } ··· 101 101 asm volatile("sync" : : : "memory"); /* Order update */ 102 102 103 103 castout_ste->vsid_data = vsid_data; 104 - asm volatile("eieio" : : : "memory"); /* Order update */ 104 + eieio(); /* Order update */ 105 105 castout_ste->esid_data = esid_data; 106 106 107 107 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
+1 -1
include/asm-powerpc/system.h
··· 43 43 #ifdef CONFIG_SMP 44 44 #define smp_mb() mb() 45 45 #define smp_rmb() rmb() 46 - #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") 46 + #define smp_wmb() eieio() 47 47 #define smp_read_barrier_depends() read_barrier_depends() 48 48 #else 49 49 #define smp_mb() barrier()