Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] powerpc: merge atomic.h, memory.h

powerpc: Merge atomic.h and memory.h into powerpc

Merged atomic.h into include/powerpc. Moved asm-style HMT_ defines from
memory.h into ppc_asm.h, where there were already HMT_defines; moved c-style
HMT_ defines to processor.h. Renamed memory.h to synch.h to better reflect
its contents.

Signed-off-by: Kumar Gala <kumar.gala@freescale.com>
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Jon Loeliger <linuxppc@jdl.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Becky Bruce and committed by
Paul Mackerras
feaf7cf1 2bfadee3

+114 -324
+3
include/asm-powerpc/ppc_asm.h
··· 75 75 #define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) 76 76 77 77 /* Macros to adjust thread priority for Iseries hardware multithreading */ 78 + #define HMT_VERY_LOW or 31,31,31 # very low priority\n" 78 79 #define HMT_LOW or 1,1,1 80 + #define HMT_MEDIUM_LOW or 6,6,6 # medium low priority\n" 79 81 #define HMT_MEDIUM or 2,2,2 82 + #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority\n" 80 83 #define HMT_HIGH or 3,3,3 81 84 82 85 /* handle instructions that older assemblers may not know */
+51
include/asm-powerpc/synch.h
··· 1 + #ifndef _ASM_POWERPC_SYNCH_H 2 + #define _ASM_POWERPC_SYNCH_H 3 + 4 + #include <linux/config.h> 5 + 6 + #ifdef __powerpc64__ 7 + #define __SUBARCH_HAS_LWSYNC 8 + #endif 9 + 10 + #ifdef __SUBARCH_HAS_LWSYNC 11 + # define LWSYNC lwsync 12 + #else 13 + # define LWSYNC sync 14 + #endif 15 + 16 + 17 + /* 18 + * Arguably the bitops and *xchg operations don't imply any memory barrier 19 + * or SMP ordering, but in fact a lot of drivers expect them to imply 20 + * both, since they do on x86 cpus. 21 + */ 22 + #ifdef CONFIG_SMP 23 + #define EIEIO_ON_SMP "eieio\n" 24 + #define ISYNC_ON_SMP "\n\tisync" 25 + #define SYNC_ON_SMP __stringify(LWSYNC) "\n" 26 + #else 27 + #define EIEIO_ON_SMP 28 + #define ISYNC_ON_SMP 29 + #define SYNC_ON_SMP 30 + #endif 31 + 32 + static inline void eieio(void) 33 + { 34 + __asm__ __volatile__ ("eieio" : : : "memory"); 35 + } 36 + 37 + static inline void isync(void) 38 + { 39 + __asm__ __volatile__ ("isync" : : : "memory"); 40 + } 41 + 42 + #ifdef CONFIG_SMP 43 + #define eieio_on_smp() eieio() 44 + #define isync_on_smp() isync() 45 + #else 46 + #define eieio_on_smp() __asm__ __volatile__("": : :"memory") 47 + #define isync_on_smp() __asm__ __volatile__("": : :"memory") 48 + #endif 49 + 50 + #endif /* _ASM_POWERPC_SYNCH_H */ 51 +
-214
include/asm-ppc/atomic.h
··· 1 - /* 2 - * PowerPC atomic operations 3 - */ 4 - 5 - #ifndef _ASM_PPC_ATOMIC_H_ 6 - #define _ASM_PPC_ATOMIC_H_ 7 - 8 - typedef struct { volatile int counter; } atomic_t; 9 - 10 - #ifdef __KERNEL__ 11 - 12 - #define ATOMIC_INIT(i) { (i) } 13 - 14 - #define atomic_read(v) ((v)->counter) 15 - #define atomic_set(v,i) (((v)->counter) = (i)) 16 - 17 - extern void atomic_clear_mask(unsigned long mask, unsigned long *addr); 18 - 19 - #ifdef CONFIG_SMP 20 - #define SMP_SYNC "sync" 21 - #define SMP_ISYNC "\n\tisync" 22 - #else 23 - #define SMP_SYNC "" 24 - #define SMP_ISYNC 25 - #endif 26 - 27 - /* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. 28 - * The old ATOMIC_SYNC_FIX covered some but not all of this. 29 - */ 30 - #ifdef CONFIG_IBM405_ERR77 31 - #define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";" 32 - #else 33 - #define PPC405_ERR77(ra,rb) 34 - #endif 35 - 36 - static __inline__ void atomic_add(int a, atomic_t *v) 37 - { 38 - int t; 39 - 40 - __asm__ __volatile__( 41 - "1: lwarx %0,0,%3 # atomic_add\n\ 42 - add %0,%2,%0\n" 43 - PPC405_ERR77(0,%3) 44 - " stwcx. %0,0,%3 \n\ 45 - bne- 1b" 46 - : "=&r" (t), "=m" (v->counter) 47 - : "r" (a), "r" (&v->counter), "m" (v->counter) 48 - : "cc"); 49 - } 50 - 51 - static __inline__ int atomic_add_return(int a, atomic_t *v) 52 - { 53 - int t; 54 - 55 - __asm__ __volatile__( 56 - "1: lwarx %0,0,%2 # atomic_add_return\n\ 57 - add %0,%1,%0\n" 58 - PPC405_ERR77(0,%2) 59 - " stwcx. %0,0,%2 \n\ 60 - bne- 1b" 61 - SMP_ISYNC 62 - : "=&r" (t) 63 - : "r" (a), "r" (&v->counter) 64 - : "cc", "memory"); 65 - 66 - return t; 67 - } 68 - 69 - #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 70 - 71 - static __inline__ void atomic_sub(int a, atomic_t *v) 72 - { 73 - int t; 74 - 75 - __asm__ __volatile__( 76 - "1: lwarx %0,0,%3 # atomic_sub\n\ 77 - subf %0,%2,%0\n" 78 - PPC405_ERR77(0,%3) 79 - " stwcx. %0,0,%3 \n\ 80 - bne- 1b" 81 - : "=&r" (t), "=m" (v->counter) 82 - : "r" (a), "r" (&v->counter), "m" (v->counter) 83 - : "cc"); 84 - } 85 - 86 - static __inline__ int atomic_sub_return(int a, atomic_t *v) 87 - { 88 - int t; 89 - 90 - __asm__ __volatile__( 91 - "1: lwarx %0,0,%2 # atomic_sub_return\n\ 92 - subf %0,%1,%0\n" 93 - PPC405_ERR77(0,%2) 94 - " stwcx. %0,0,%2 \n\ 95 - bne- 1b" 96 - SMP_ISYNC 97 - : "=&r" (t) 98 - : "r" (a), "r" (&v->counter) 99 - : "cc", "memory"); 100 - 101 - return t; 102 - } 103 - 104 - static __inline__ void atomic_inc(atomic_t *v) 105 - { 106 - int t; 107 - 108 - __asm__ __volatile__( 109 - "1: lwarx %0,0,%2 # atomic_inc\n\ 110 - addic %0,%0,1\n" 111 - PPC405_ERR77(0,%2) 112 - " stwcx. %0,0,%2 \n\ 113 - bne- 1b" 114 - : "=&r" (t), "=m" (v->counter) 115 - : "r" (&v->counter), "m" (v->counter) 116 - : "cc"); 117 - } 118 - 119 - static __inline__ int atomic_inc_return(atomic_t *v) 120 - { 121 - int t; 122 - 123 - __asm__ __volatile__( 124 - "1: lwarx %0,0,%1 # atomic_inc_return\n\ 125 - addic %0,%0,1\n" 126 - PPC405_ERR77(0,%1) 127 - " stwcx. %0,0,%1 \n\ 128 - bne- 1b" 129 - SMP_ISYNC 130 - : "=&r" (t) 131 - : "r" (&v->counter) 132 - : "cc", "memory"); 133 - 134 - return t; 135 - } 136 - 137 - /* 138 - * atomic_inc_and_test - increment and test 139 - * @v: pointer of type atomic_t 140 - * 141 - * Atomically increments @v by 1 142 - * and returns true if the result is zero, or false for all 143 - * other cases. 144 - */ 145 - #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 146 - 147 - static __inline__ void atomic_dec(atomic_t *v) 148 - { 149 - int t; 150 - 151 - __asm__ __volatile__( 152 - "1: lwarx %0,0,%2 # atomic_dec\n\ 153 - addic %0,%0,-1\n" 154 - PPC405_ERR77(0,%2)\ 155 - " stwcx. %0,0,%2\n\ 156 - bne- 1b" 157 - : "=&r" (t), "=m" (v->counter) 158 - : "r" (&v->counter), "m" (v->counter) 159 - : "cc"); 160 - } 161 - 162 - static __inline__ int atomic_dec_return(atomic_t *v) 163 - { 164 - int t; 165 - 166 - __asm__ __volatile__( 167 - "1: lwarx %0,0,%1 # atomic_dec_return\n\ 168 - addic %0,%0,-1\n" 169 - PPC405_ERR77(0,%1) 170 - " stwcx. %0,0,%1\n\ 171 - bne- 1b" 172 - SMP_ISYNC 173 - : "=&r" (t) 174 - : "r" (&v->counter) 175 - : "cc", "memory"); 176 - 177 - return t; 178 - } 179 - 180 - #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) 181 - #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) 182 - 183 - /* 184 - * Atomically test *v and decrement if it is greater than 0. 185 - * The function returns the old value of *v minus 1. 186 - */ 187 - static __inline__ int atomic_dec_if_positive(atomic_t *v) 188 - { 189 - int t; 190 - 191 - __asm__ __volatile__( 192 - "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 193 - addic. %0,%0,-1\n\ 194 - blt- 2f\n" 195 - PPC405_ERR77(0,%1) 196 - " stwcx. %0,0,%1\n\ 197 - bne- 1b" 198 - SMP_ISYNC 199 - "\n\ 200 - 2:" : "=&r" (t) 201 - : "r" (&v->counter) 202 - : "cc", "memory"); 203 - 204 - return t; 205 - } 206 - 207 - #define __MB __asm__ __volatile__ (SMP_SYNC : : : "memory") 208 - #define smp_mb__before_atomic_dec() __MB 209 - #define smp_mb__after_atomic_dec() __MB 210 - #define smp_mb__before_atomic_inc() __MB 211 - #define smp_mb__after_atomic_inc() __MB 212 - 213 - #endif /* __KERNEL__ */ 214 - #endif /* _ASM_PPC_ATOMIC_H_ */
+1 -10
include/asm-ppc/io.h
··· 8 8 9 9 #include <asm/page.h> 10 10 #include <asm/byteorder.h> 11 + #include <asm/synch.h> 11 12 #include <asm/mmu.h> 12 13 13 14 #define SIO_CONFIG_RA 0x398 ··· 440 439 */ 441 440 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 442 441 #define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET) 443 - 444 - /* 445 - * Enforce In-order Execution of I/O: 446 - * Acts as a barrier to ensure all previous I/O accesses have 447 - * completed before any further ones are issued. 448 - */ 449 - extern inline void eieio(void) 450 - { 451 - __asm__ __volatile__ ("eieio" : : : "memory"); 452 - } 453 442 454 443 /* Enforce in-order execution of data I/O. 455 444 * No distinction between read/write on PPC; use eieio for all three.
+46 -34
include/asm-ppc64/atomic.h include/asm-powerpc/atomic.h
··· 1 + #ifndef _ASM_POWERPC_ATOMIC_H_ 2 + #define _ASM_POWERPC_ATOMIC_H_ 3 + 1 4 /* 2 - * PowerPC64 atomic operations 3 - * 4 - * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM 5 - * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 6 - * 7 - * This program is free software; you can redistribute it and/or 8 - * modify it under the terms of the GNU General Public License 9 - * as published by the Free Software Foundation; either version 10 - * 2 of the License, or (at your option) any later version. 5 + * PowerPC atomic operations 11 6 */ 12 - 13 - #ifndef _ASM_PPC64_ATOMIC_H_ 14 - #define _ASM_PPC64_ATOMIC_H_ 15 - 16 - #include <asm/memory.h> 17 7 18 8 typedef struct { volatile int counter; } atomic_t; 19 9 20 - #define ATOMIC_INIT(i) { (i) } 10 + #ifdef __KERNEL__ 11 + #include <asm/synch.h> 12 + 13 + #define ATOMIC_INIT(i) { (i) } 21 14 22 15 #define atomic_read(v) ((v)->counter) 23 16 #define atomic_set(v,i) (((v)->counter) = (i)) 17 + 18 + /* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. 19 + * The old ATOMIC_SYNC_FIX covered some but not all of this. 20 + */ 21 + #ifdef CONFIG_IBM405_ERR77 22 + #define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";" 23 + #else 24 + #define PPC405_ERR77(ra,rb) 25 + #endif 24 26 25 27 static __inline__ void atomic_add(int a, atomic_t *v) 26 28 { ··· 30 28 31 29 __asm__ __volatile__( 32 30 "1: lwarx %0,0,%3 # atomic_add\n\ 33 - add %0,%2,%0\n\ 34 - stwcx. %0,0,%3\n\ 31 + add %0,%2,%0\n" 32 + PPC405_ERR77(0,%3) 33 + " stwcx. %0,0,%3 \n\ 35 34 bne- 1b" 36 35 : "=&r" (t), "=m" (v->counter) 37 36 : "r" (a), "r" (&v->counter), "m" (v->counter) ··· 46 43 __asm__ __volatile__( 47 44 EIEIO_ON_SMP 48 45 "1: lwarx %0,0,%2 # atomic_add_return\n\ 49 - add %0,%1,%0\n\ 50 - stwcx. %0,0,%2\n\ 46 + add %0,%1,%0\n" 47 + PPC405_ERR77(0,%2) 48 + " stwcx. %0,0,%2 \n\ 51 49 bne- 1b" 52 50 ISYNC_ON_SMP 53 51 : "=&r" (t) ··· 66 62 67 63 __asm__ __volatile__( 68 64 "1: lwarx %0,0,%3 # atomic_sub\n\ 69 - subf %0,%2,%0\n\ 70 - stwcx. %0,0,%3\n\ 65 + subf %0,%2,%0\n" 66 + PPC405_ERR77(0,%3) 67 + " stwcx. %0,0,%3 \n\ 71 68 bne- 1b" 72 69 : "=&r" (t), "=m" (v->counter) 73 70 : "r" (a), "r" (&v->counter), "m" (v->counter) ··· 82 77 __asm__ __volatile__( 83 78 EIEIO_ON_SMP 84 79 "1: lwarx %0,0,%2 # atomic_sub_return\n\ 85 - subf %0,%1,%0\n\ 86 - stwcx. %0,0,%2\n\ 80 + subf %0,%1,%0\n" 81 + PPC405_ERR77(0,%2) 82 + " stwcx. %0,0,%2 \n\ 87 83 bne- 1b" 88 84 ISYNC_ON_SMP 89 85 : "=&r" (t) ··· 100 94 101 95 __asm__ __volatile__( 102 96 "1: lwarx %0,0,%2 # atomic_inc\n\ 103 - addic %0,%0,1\n\ 104 - stwcx. %0,0,%2\n\ 97 + addic %0,%0,1\n" 98 + PPC405_ERR77(0,%2) 99 + " stwcx. %0,0,%2 \n\ 105 100 bne- 1b" 106 101 : "=&r" (t), "=m" (v->counter) 107 102 : "r" (&v->counter), "m" (v->counter) ··· 116 109 __asm__ __volatile__( 117 110 EIEIO_ON_SMP 118 111 "1: lwarx %0,0,%1 # atomic_inc_return\n\ 119 - addic %0,%0,1\n\ 120 - stwcx. %0,0,%1\n\ 112 + addic %0,%0,1\n" 113 + PPC405_ERR77(0,%1) 114 + " stwcx. %0,0,%1 \n\ 121 115 bne- 1b" 122 116 ISYNC_ON_SMP 123 117 : "=&r" (t) ··· 144 136 145 137 __asm__ __volatile__( 146 138 "1: lwarx %0,0,%2 # atomic_dec\n\ 147 - addic %0,%0,-1\n\ 148 - stwcx. %0,0,%2\n\ 139 + addic %0,%0,-1\n" 140 + PPC405_ERR77(0,%2)\ 141 + " stwcx. %0,0,%2\n\ 149 142 bne- 1b" 150 143 : "=&r" (t), "=m" (v->counter) 151 144 : "r" (&v->counter), "m" (v->counter) ··· 160 151 __asm__ __volatile__( 161 152 EIEIO_ON_SMP 162 153 "1: lwarx %0,0,%1 # atomic_dec_return\n\ 163 - addic %0,%0,-1\n\ 164 - stwcx. %0,0,%1\n\ 154 + addic %0,%0,-1\n" 155 + PPC405_ERR77(0,%1) 156 + " stwcx. %0,0,%1\n\ 165 157 bne- 1b" 166 158 ISYNC_ON_SMP 167 159 : "=&r" (t) ··· 187 177 EIEIO_ON_SMP 188 178 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 189 179 addic. %0,%0,-1\n\ 190 - blt- 2f\n\ 191 - stwcx. %0,0,%1\n\ 180 + blt- 2f\n" 181 + PPC405_ERR77(0,%1) 182 + " stwcx. %0,0,%1\n\ 192 183 bne- 1b" 193 184 ISYNC_ON_SMP 194 185 "\n\ ··· 205 194 #define smp_mb__before_atomic_inc() smp_mb() 206 195 #define smp_mb__after_atomic_inc() smp_mb() 207 196 208 - #endif /* _ASM_PPC64_ATOMIC_H_ */ 197 + #endif /* __KERNEL__ */ 198 + #endif /* _ASM_POWERPC_ATOMIC_H_ */
+1 -1
include/asm-ppc64/bitops.h
··· 42 42 43 43 #ifdef __KERNEL__ 44 44 45 - #include <asm/memory.h> 45 + #include <asm/synch.h> 46 46 47 47 /* 48 48 * clear_bit doesn't imply a memory barrier
+1 -1
include/asm-ppc64/futex.h
··· 5 5 6 6 #include <linux/futex.h> 7 7 #include <asm/errno.h> 8 - #include <asm/memory.h> 8 + #include <asm/synch.h> 9 9 #include <asm/uaccess.h> 10 10 11 11 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+1 -1
include/asm-ppc64/io.h
··· 15 15 #ifdef CONFIG_PPC_ISERIES 16 16 #include <asm/iSeries/iSeries_io.h> 17 17 #endif 18 - #include <asm/memory.h> 18 + #include <asm/synch.h> 19 19 #include <asm/delay.h> 20 20 21 21 #include <asm-generic/iomap.h>
-61
include/asm-ppc64/memory.h
··· 1 - #ifndef _ASM_PPC64_MEMORY_H_ 2 - #define _ASM_PPC64_MEMORY_H_ 3 - 4 - /* 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License 7 - * as published by the Free Software Foundation; either version 8 - * 2 of the License, or (at your option) any later version. 9 - */ 10 - 11 - #include <linux/config.h> 12 - 13 - /* 14 - * Arguably the bitops and *xchg operations don't imply any memory barrier 15 - * or SMP ordering, but in fact a lot of drivers expect them to imply 16 - * both, since they do on x86 cpus. 17 - */ 18 - #ifdef CONFIG_SMP 19 - #define EIEIO_ON_SMP "eieio\n" 20 - #define ISYNC_ON_SMP "\n\tisync" 21 - #define SYNC_ON_SMP "lwsync\n\t" 22 - #else 23 - #define EIEIO_ON_SMP 24 - #define ISYNC_ON_SMP 25 - #define SYNC_ON_SMP 26 - #endif 27 - 28 - static inline void eieio(void) 29 - { 30 - __asm__ __volatile__ ("eieio" : : : "memory"); 31 - } 32 - 33 - static inline void isync(void) 34 - { 35 - __asm__ __volatile__ ("isync" : : : "memory"); 36 - } 37 - 38 - #ifdef CONFIG_SMP 39 - #define eieio_on_smp() eieio() 40 - #define isync_on_smp() isync() 41 - #else 42 - #define eieio_on_smp() __asm__ __volatile__("": : :"memory") 43 - #define isync_on_smp() __asm__ __volatile__("": : :"memory") 44 - #endif 45 - 46 - /* Macros for adjusting thread priority (hardware multi-threading) */ 47 - #define HMT_very_low() asm volatile("or 31,31,31 # very low priority") 48 - #define HMT_low() asm volatile("or 1,1,1 # low priority") 49 - #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority") 50 - #define HMT_medium() asm volatile("or 2,2,2 # medium priority") 51 - #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority") 52 - #define HMT_high() asm volatile("or 3,3,3 # high priority") 53 - 54 - #define HMT_VERY_LOW "\tor 31,31,31 # very low priority\n" 55 - #define HMT_LOW "\tor 1,1,1 # low priority\n" 56 - #define HMT_MEDIUM_LOW "\tor 6,6,6 # medium low priority\n" 57 - #define HMT_MEDIUM "\tor 2,2,2 # medium priority\n" 58 - #define HMT_MEDIUM_HIGH "\tor 5,5,5 # medium high priority\n" 59 - #define HMT_HIGH "\tor 3,3,3 # high priority\n" 60 - 61 - #endif
+8
include/asm-ppc64/processor.h
··· 368 368 #define mfasr() ({unsigned long rval; \ 369 369 asm volatile("mfasr %0" : "=r" (rval)); rval;}) 370 370 371 + /* Macros for adjusting thread priority (hardware multi-threading) */ 372 + #define HMT_very_low() asm volatile("or 31,31,31 # very low priority") 373 + #define HMT_low() asm volatile("or 1,1,1 # low priority") 374 + #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority") 375 + #define HMT_medium() asm volatile("or 2,2,2 # medium priority") 376 + #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority") 377 + #define HMT_high() asm volatile("or 3,3,3 # high priority") 378 + 371 379 static inline void set_tb(unsigned int upper, unsigned int lower) 372 380 { 373 381 mttbl(0);
+2 -2
include/asm-ppc64/system.h
··· 13 13 #include <asm/page.h> 14 14 #include <asm/processor.h> 15 15 #include <asm/hw_irq.h> 16 - #include <asm/memory.h> 16 + #include <asm/synch.h> 17 17 18 18 /* 19 19 * Memory barrier. ··· 48 48 #ifdef CONFIG_SMP 49 49 #define smp_mb() mb() 50 50 #define smp_rmb() rmb() 51 - #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") 51 + #define smp_wmb() eieio() 52 52 #define smp_read_barrier_depends() read_barrier_depends() 53 53 #else 54 54 #define smp_mb() __asm__ __volatile__("": : :"memory")