at v2.6.14-rc2 308 lines 8.9 kB view raw
1#ifndef __PPC64_SYSTEM_H 2#define __PPC64_SYSTEM_H 3 4/* 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11#include <linux/config.h> 12#include <linux/compiler.h> 13#include <asm/page.h> 14#include <asm/processor.h> 15#include <asm/hw_irq.h> 16#include <asm/memory.h> 17 18/* 19 * Memory barrier. 20 * The sync instruction guarantees that all memory accesses initiated 21 * by this processor have been performed (with respect to all other 22 * mechanisms that access memory). The eieio instruction is a barrier 23 * providing an ordering (separately) for (a) cacheable stores and (b) 24 * loads and stores to non-cacheable memory (e.g. I/O devices). 25 * 26 * mb() prevents loads and stores being reordered across this point. 27 * rmb() prevents loads being reordered across this point. 28 * wmb() prevents stores being reordered across this point. 29 * read_barrier_depends() prevents data-dependent loads being reordered 30 * across this point (nop on PPC). 31 * 32 * We have to use the sync instructions for mb(), since lwsync doesn't 33 * order loads with respect to previous stores. Lwsync is fine for 34 * rmb(), though. 35 * For wmb(), we use sync since wmb is used in drivers to order 36 * stores to system memory with respect to writes to the device. 37 * However, smp_wmb() can be a lighter-weight eieio barrier on 38 * SMP since it is only used to order updates to system memory. 39 */ 40#define mb() __asm__ __volatile__ ("sync" : : : "memory") 41#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") 42#define wmb() __asm__ __volatile__ ("sync" : : : "memory") 43#define read_barrier_depends() do { } while(0) 44 45#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 46#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) 47 48#ifdef CONFIG_SMP 49#define smp_mb() mb() 50#define smp_rmb() rmb() 51#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") 52#define smp_read_barrier_depends() read_barrier_depends() 53#else 54#define smp_mb() __asm__ __volatile__("": : :"memory") 55#define smp_rmb() __asm__ __volatile__("": : :"memory") 56#define smp_wmb() __asm__ __volatile__("": : :"memory") 57#define smp_read_barrier_depends() do { } while(0) 58#endif /* CONFIG_SMP */ 59 60#ifdef __KERNEL__ 61struct task_struct; 62struct pt_regs; 63 64#ifdef CONFIG_DEBUGGER 65 66extern int (*__debugger)(struct pt_regs *regs); 67extern int (*__debugger_ipi)(struct pt_regs *regs); 68extern int (*__debugger_bpt)(struct pt_regs *regs); 69extern int (*__debugger_sstep)(struct pt_regs *regs); 70extern int (*__debugger_iabr_match)(struct pt_regs *regs); 71extern int (*__debugger_dabr_match)(struct pt_regs *regs); 72extern int (*__debugger_fault_handler)(struct pt_regs *regs); 73 74#define DEBUGGER_BOILERPLATE(__NAME) \ 75static inline int __NAME(struct pt_regs *regs) \ 76{ \ 77 if (unlikely(__ ## __NAME)) \ 78 return __ ## __NAME(regs); \ 79 return 0; \ 80} 81 82DEBUGGER_BOILERPLATE(debugger) 83DEBUGGER_BOILERPLATE(debugger_ipi) 84DEBUGGER_BOILERPLATE(debugger_bpt) 85DEBUGGER_BOILERPLATE(debugger_sstep) 86DEBUGGER_BOILERPLATE(debugger_iabr_match) 87DEBUGGER_BOILERPLATE(debugger_dabr_match) 88DEBUGGER_BOILERPLATE(debugger_fault_handler) 89 90#ifdef CONFIG_XMON 91extern void xmon_init(int enable); 92#endif 93 94#else 95static inline int debugger(struct pt_regs *regs) { return 0; } 96static inline int debugger_ipi(struct pt_regs *regs) { return 0; } 97static inline int debugger_bpt(struct pt_regs *regs) { return 0; } 98static inline int debugger_sstep(struct pt_regs *regs) { return 0; } 99static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } 100static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } 101static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } 102#endif 103 104extern int set_dabr(unsigned long dabr); 105extern void _exception(int signr, struct pt_regs *regs, int code, 106 unsigned long addr); 107extern int fix_alignment(struct pt_regs *regs); 108extern void bad_page_fault(struct pt_regs *regs, unsigned long address, 109 int sig); 110extern void show_regs(struct pt_regs * regs); 111extern void low_hash_fault(struct pt_regs *regs, unsigned long address); 112extern int die(const char *str, struct pt_regs *regs, long err); 113 114extern int _get_PVR(void); 115extern void giveup_fpu(struct task_struct *); 116extern void disable_kernel_fp(void); 117extern void flush_fp_to_thread(struct task_struct *); 118extern void enable_kernel_fp(void); 119extern void giveup_altivec(struct task_struct *); 120extern void disable_kernel_altivec(void); 121extern void enable_kernel_altivec(void); 122extern int emulate_altivec(struct pt_regs *); 123extern void cvt_fd(float *from, double *to, unsigned long *fpscr); 124extern void cvt_df(double *from, float *to, unsigned long *fpscr); 125 126#ifdef CONFIG_ALTIVEC 127extern void flush_altivec_to_thread(struct task_struct *); 128#else 129static inline void flush_altivec_to_thread(struct task_struct *t) 130{ 131} 132#endif 133 134extern int mem_init_done; /* set on boot once kmalloc can be called */ 135 136/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ 137extern unsigned char e2a(unsigned char); 138 139extern struct task_struct *__switch_to(struct task_struct *, 140 struct task_struct *); 141#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) 142 143struct thread_struct; 144extern struct task_struct * _switch(struct thread_struct *prev, 145 struct thread_struct *next); 146 147static inline int __is_processor(unsigned long pv) 148{ 149 unsigned long pvr; 150 asm("mfspr %0, 0x11F" : "=r" (pvr)); 151 return(PVR_VER(pvr) == pv); 152} 153 154/* 155 * Atomic exchange 156 * 157 * Changes the memory location '*ptr' to be val and returns 158 * the previous value stored there. 159 * 160 * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64 161 * is more like most of the other architectures. 162 */ 163static __inline__ unsigned long 164__xchg_u32(volatile unsigned int *m, unsigned long val) 165{ 166 unsigned long dummy; 167 168 __asm__ __volatile__( 169 EIEIO_ON_SMP 170"1: lwarx %0,0,%3 # __xchg_u32\n\ 171 stwcx. %2,0,%3\n\ 1722: bne- 1b" 173 ISYNC_ON_SMP 174 : "=&r" (dummy), "=m" (*m) 175 : "r" (val), "r" (m) 176 : "cc", "memory"); 177 178 return (dummy); 179} 180 181static __inline__ unsigned long 182__xchg_u64(volatile long *m, unsigned long val) 183{ 184 unsigned long dummy; 185 186 __asm__ __volatile__( 187 EIEIO_ON_SMP 188"1: ldarx %0,0,%3 # __xchg_u64\n\ 189 stdcx. %2,0,%3\n\ 1902: bne- 1b" 191 ISYNC_ON_SMP 192 : "=&r" (dummy), "=m" (*m) 193 : "r" (val), "r" (m) 194 : "cc", "memory"); 195 196 return (dummy); 197} 198 199/* 200 * This function doesn't exist, so you'll get a linker error 201 * if something tries to do an invalid xchg(). 202 */ 203extern void __xchg_called_with_bad_pointer(void); 204 205static __inline__ unsigned long 206__xchg(volatile void *ptr, unsigned long x, unsigned int size) 207{ 208 switch (size) { 209 case 4: 210 return __xchg_u32(ptr, x); 211 case 8: 212 return __xchg_u64(ptr, x); 213 } 214 __xchg_called_with_bad_pointer(); 215 return x; 216} 217 218#define xchg(ptr,x) \ 219 ({ \ 220 __typeof__(*(ptr)) _x_ = (x); \ 221 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 222 }) 223 224#define tas(ptr) (xchg((ptr),1)) 225 226#define __HAVE_ARCH_CMPXCHG 1 227 228static __inline__ unsigned long 229__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) 230{ 231 unsigned int prev; 232 233 __asm__ __volatile__ ( 234 EIEIO_ON_SMP 235"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ 236 cmpw 0,%0,%3\n\ 237 bne- 2f\n\ 238 stwcx. %4,0,%2\n\ 239 bne- 1b" 240 ISYNC_ON_SMP 241 "\n\ 2422:" 243 : "=&r" (prev), "=m" (*p) 244 : "r" (p), "r" (old), "r" (new), "m" (*p) 245 : "cc", "memory"); 246 247 return prev; 248} 249 250static __inline__ unsigned long 251__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) 252{ 253 unsigned long prev; 254 255 __asm__ __volatile__ ( 256 EIEIO_ON_SMP 257"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ 258 cmpd 0,%0,%3\n\ 259 bne- 2f\n\ 260 stdcx. %4,0,%2\n\ 261 bne- 1b" 262 ISYNC_ON_SMP 263 "\n\ 2642:" 265 : "=&r" (prev), "=m" (*p) 266 : "r" (p), "r" (old), "r" (new), "m" (*p) 267 : "cc", "memory"); 268 269 return prev; 270} 271 272/* This function doesn't exist, so you'll get a linker error 273 if something tries to do an invalid cmpxchg(). */ 274extern void __cmpxchg_called_with_bad_pointer(void); 275 276static __inline__ unsigned long 277__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, 278 unsigned int size) 279{ 280 switch (size) { 281 case 4: 282 return __cmpxchg_u32(ptr, old, new); 283 case 8: 284 return __cmpxchg_u64(ptr, old, new); 285 } 286 __cmpxchg_called_with_bad_pointer(); 287 return old; 288} 289 290#define cmpxchg(ptr,o,n)\ 291 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 292 (unsigned long)(n),sizeof(*(ptr)))) 293 294/* 295 * We handle most unaligned accesses in hardware. On the other hand 296 * unaligned DMA can be very expensive on some ppc64 IO chips (it does 297 * powers of 2 writes until it reaches sufficient alignment). 298 * 299 * Based on this we disable the IP header alignment in network drivers. 300 */ 301#define NET_IP_ALIGN 0 302 303#define arch_align_stack(x) (x) 304 305extern unsigned long reloc_offset(void); 306 307#endif /* __KERNEL__ */ 308#endif