at v2.6.23-rc8 241 lines 6.9 kB view raw
1/* 2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 3 */ 4#ifndef __PPC_SYSTEM_H 5#define __PPC_SYSTEM_H 6 7#include <linux/kernel.h> 8 9#include <asm/hw_irq.h> 10 11/* 12 * Memory barrier. 13 * The sync instruction guarantees that all memory accesses initiated 14 * by this processor have been performed (with respect to all other 15 * mechanisms that access memory). The eieio instruction is a barrier 16 * providing an ordering (separately) for (a) cacheable stores and (b) 17 * loads and stores to non-cacheable memory (e.g. I/O devices). 18 * 19 * mb() prevents loads and stores being reordered across this point. 20 * rmb() prevents loads being reordered across this point. 21 * wmb() prevents stores being reordered across this point. 22 * read_barrier_depends() prevents data-dependent loads being reordered 23 * across this point (nop on PPC). 24 * 25 * We can use the eieio instruction for wmb, but since it doesn't 26 * give any ordering guarantees about loads, we have to use the 27 * stronger but slower sync instruction for mb and rmb. 28 */ 29#define mb() __asm__ __volatile__ ("sync" : : : "memory") 30#define rmb() __asm__ __volatile__ ("sync" : : : "memory") 31#define wmb() __asm__ __volatile__ ("eieio" : : : "memory") 32#define read_barrier_depends() do { } while(0) 33 34#define set_mb(var, value) do { var = value; mb(); } while (0) 35 36#ifdef CONFIG_SMP 37#define smp_mb() mb() 38#define smp_rmb() rmb() 39#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") 40#define smp_read_barrier_depends() read_barrier_depends() 41#else 42#define smp_mb() barrier() 43#define smp_rmb() barrier() 44#define smp_wmb() barrier() 45#define smp_read_barrier_depends() do { } while(0) 46#endif /* CONFIG_SMP */ 47 48#ifdef __KERNEL__ 49struct task_struct; 50struct pt_regs; 51 52extern void print_backtrace(unsigned long *); 53extern void show_regs(struct pt_regs * regs); 54extern void flush_instruction_cache(void); 55extern void hard_reset_now(void); 56extern void poweroff_now(void); 57extern int set_dabr(unsigned long dabr); 58#ifdef CONFIG_6xx 59extern long _get_L2CR(void); 60extern long _get_L3CR(void); 61extern void _set_L2CR(unsigned long); 62extern void _set_L3CR(unsigned long); 63#else 64#define _get_L2CR() 0L 65#define _get_L3CR() 0L 66#define _set_L2CR(val) do { } while(0) 67#define _set_L3CR(val) do { } while(0) 68#endif 69extern void via_cuda_init(void); 70extern void pmac_nvram_init(void); 71extern void chrp_nvram_init(void); 72extern void read_rtc_time(void); 73extern void pmac_find_display(void); 74extern void giveup_fpu(struct task_struct *); 75extern void disable_kernel_fp(void); 76extern void enable_kernel_fp(void); 77extern void flush_fp_to_thread(struct task_struct *); 78extern void enable_kernel_altivec(void); 79extern void giveup_altivec(struct task_struct *); 80extern void load_up_altivec(struct task_struct *); 81extern int emulate_altivec(struct pt_regs *); 82extern void giveup_spe(struct task_struct *); 83extern void load_up_spe(struct task_struct *); 84extern int fix_alignment(struct pt_regs *); 85extern void cvt_fd(float *from, double *to, struct thread_struct *thread); 86extern void cvt_df(double *from, float *to, struct thread_struct *thread); 87 88#ifndef CONFIG_SMP 89extern void discard_lazy_cpu_state(void); 90#else 91static inline void discard_lazy_cpu_state(void) 92{ 93} 94#endif 95 96#ifdef CONFIG_ALTIVEC 97extern void flush_altivec_to_thread(struct task_struct *); 98#else 99static inline void flush_altivec_to_thread(struct task_struct *t) 100{ 101} 102#endif 103 104#ifdef CONFIG_SPE 105extern void flush_spe_to_thread(struct task_struct *); 106#else 107static inline void flush_spe_to_thread(struct task_struct *t) 108{ 109} 110#endif 111 112extern int call_rtas(const char *, int, int, unsigned long *, ...); 113extern void cacheable_memzero(void *p, unsigned int nb); 114extern void *cacheable_memcpy(void *, const void *, unsigned int); 115extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); 116extern void bad_page_fault(struct pt_regs *, unsigned long, int); 117extern int die(const char *, struct pt_regs *, long); 118extern void _exception(int, struct pt_regs *, int, unsigned long); 119void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); 120 121#ifdef CONFIG_BOOKE_WDT 122extern u32 booke_wdt_enabled; 123extern u32 booke_wdt_period; 124#endif /* CONFIG_BOOKE_WDT */ 125 126struct device_node; 127extern void note_scsi_host(struct device_node *, void *); 128 129extern struct task_struct *__switch_to(struct task_struct *, 130 struct task_struct *); 131#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) 132 133struct thread_struct; 134extern struct task_struct *_switch(struct thread_struct *prev, 135 struct thread_struct *next); 136 137extern unsigned int rtas_data; 138 139static __inline__ unsigned long 140xchg_u32(volatile void *p, unsigned long val) 141{ 142 unsigned long prev; 143 144 __asm__ __volatile__ ("\n\ 1451: lwarx %0,0,%2 \n" 146 PPC405_ERR77(0,%2) 147" stwcx. %3,0,%2 \n\ 148 bne- 1b" 149 : "=&r" (prev), "=m" (*(volatile unsigned long *)p) 150 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) 151 : "cc", "memory"); 152 153 return prev; 154} 155 156/* 157 * This function doesn't exist, so you'll get a linker error 158 * if something tries to do an invalid xchg(). 159 */ 160extern void __xchg_called_with_bad_pointer(void); 161 162#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 163 164static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 165{ 166 switch (size) { 167 case 4: 168 return (unsigned long) xchg_u32(ptr, x); 169#if 0 /* xchg_u64 doesn't exist on 32-bit PPC */ 170 case 8: 171 return (unsigned long) xchg_u64(ptr, x); 172#endif /* 0 */ 173 } 174 __xchg_called_with_bad_pointer(); 175 return x; 176 177 178} 179 180extern inline void * xchg_ptr(void * m, void * val) 181{ 182 return (void *) xchg_u32(m, (unsigned long) val); 183} 184 185 186#define __HAVE_ARCH_CMPXCHG 1 187 188static __inline__ unsigned long 189__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) 190{ 191 unsigned int prev; 192 193 __asm__ __volatile__ ("\n\ 1941: lwarx %0,0,%2 \n\ 195 cmpw 0,%0,%3 \n\ 196 bne 2f \n" 197 PPC405_ERR77(0,%2) 198" stwcx. %4,0,%2 \n\ 199 bne- 1b\n" 200#ifdef CONFIG_SMP 201" sync\n" 202#endif /* CONFIG_SMP */ 203"2:" 204 : "=&r" (prev), "=m" (*p) 205 : "r" (p), "r" (old), "r" (new), "m" (*p) 206 : "cc", "memory"); 207 208 return prev; 209} 210 211/* This function doesn't exist, so you'll get a linker error 212 if something tries to do an invalid cmpxchg(). */ 213extern void __cmpxchg_called_with_bad_pointer(void); 214 215static __inline__ unsigned long 216__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 217{ 218 switch (size) { 219 case 4: 220 return __cmpxchg_u32(ptr, old, new); 221#if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */ 222 case 8: 223 return __cmpxchg_u64(ptr, old, new); 224#endif /* 0 */ 225 } 226 __cmpxchg_called_with_bad_pointer(); 227 return old; 228} 229 230#define cmpxchg(ptr,o,n) \ 231 ({ \ 232 __typeof__(*(ptr)) _o_ = (o); \ 233 __typeof__(*(ptr)) _n_ = (n); \ 234 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 235 (unsigned long)_n_, sizeof(*(ptr))); \ 236 }) 237 238#define arch_align_stack(x) (x) 239 240#endif /* __KERNEL__ */ 241#endif /* __PPC_SYSTEM_H */