at v2.6.16 4.8 kB view raw
1#ifndef __ASM_SH64_SYSTEM_H 2#define __ASM_SH64_SYSTEM_H 3 4/* 5 * This file is subject to the terms and conditions of the GNU General Public 6 * License. See the file "COPYING" in the main directory of this archive 7 * for more details. 8 * 9 * include/asm-sh64/system.h 10 * 11 * Copyright (C) 2000, 2001 Paolo Alberelli 12 * Copyright (C) 2003 Paul Mundt 13 * Copyright (C) 2004 Richard Curnow 14 * 15 */ 16 17#include <linux/config.h> 18#include <asm/registers.h> 19#include <asm/processor.h> 20 21/* 22 * switch_to() should switch tasks to task nr n, first 23 */ 24 25typedef struct { 26 unsigned long seg; 27} mm_segment_t; 28 29extern struct task_struct *sh64_switch_to(struct task_struct *prev, 30 struct thread_struct *prev_thread, 31 struct task_struct *next, 32 struct thread_struct *next_thread); 33 34#define switch_to(prev,next,last) \ 35 do {\ 36 if (last_task_used_math != next) {\ 37 struct pt_regs *regs = next->thread.uregs;\ 38 if (regs) regs->sr |= SR_FD;\ 39 }\ 40 last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\ 41 } while(0) 42 43#define nop() __asm__ __volatile__ ("nop") 44 45#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 46 47#define tas(ptr) (xchg((ptr), 1)) 48 49extern void __xchg_called_with_bad_pointer(void); 50 51#define mb() __asm__ __volatile__ ("synco": : :"memory") 52#define rmb() mb() 53#define wmb() __asm__ __volatile__ ("synco": : :"memory") 54#define read_barrier_depends() do { } while (0) 55 56#ifdef CONFIG_SMP 57#define smp_mb() mb() 58#define smp_rmb() rmb() 59#define smp_wmb() wmb() 60#define smp_read_barrier_depends() read_barrier_depends() 61#else 62#define smp_mb() barrier() 63#define smp_rmb() barrier() 64#define smp_wmb() barrier() 65#define smp_read_barrier_depends() do { } while (0) 66#endif /* CONFIG_SMP */ 67 68#define set_rmb(var, value) do { xchg(&var, value); } while (0) 69#define set_mb(var, value) set_rmb(var, value) 70#define set_wmb(var, value) do { var = value; wmb(); } while (0) 71 72/* Interrupt Control */ 73#ifndef HARD_CLI 74#define SR_MASK_L 0x000000f0L 75#define SR_MASK_LL 0x00000000000000f0LL 76#else 77#define SR_MASK_L 0x10000000L 78#define SR_MASK_LL 0x0000000010000000LL 79#endif 80 81static __inline__ void local_irq_enable(void) 82{ 83 /* cli/sti based on SR.BL */ 84 unsigned long long __dummy0, __dummy1=~SR_MASK_LL; 85 86 __asm__ __volatile__("getcon " __SR ", %0\n\t" 87 "and %0, %1, %0\n\t" 88 "putcon %0, " __SR "\n\t" 89 : "=&r" (__dummy0) 90 : "r" (__dummy1)); 91} 92 93static __inline__ void local_irq_disable(void) 94{ 95 /* cli/sti based on SR.BL */ 96 unsigned long long __dummy0, __dummy1=SR_MASK_LL; 97 __asm__ __volatile__("getcon " __SR ", %0\n\t" 98 "or %0, %1, %0\n\t" 99 "putcon %0, " __SR "\n\t" 100 : "=&r" (__dummy0) 101 : "r" (__dummy1)); 102} 103 104#define local_save_flags(x) \ 105(__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \ 106 __asm__ __volatile__( \ 107 "getcon " __SR ", %0\n\t" \ 108 "and %0, %1, %0" \ 109 : "=&r" (x) \ 110 : "r" (__dummy));})) 111 112#define local_irq_save(x) \ 113(__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \ 114 __asm__ __volatile__( \ 115 "getcon " __SR ", %1\n\t" \ 116 "or %1, r63, %0\n\t" \ 117 "or %1, %2, %1\n\t" \ 118 "putcon %1, " __SR "\n\t" \ 119 "and %0, %2, %0" \ 120 : "=&r" (x), "=&r" (__d1) \ 121 : "r" (__d2));})); 122 123#define local_irq_restore(x) do { \ 124 if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \ 125 local_irq_enable(); /* yes...re-enable */ \ 126} while (0) 127 128#define irqs_disabled() \ 129({ \ 130 unsigned long flags; \ 131 local_save_flags(flags); \ 132 (flags != 0); \ 133}) 134 135static inline unsigned long xchg_u32(volatile int * m, unsigned long val) 136{ 137 unsigned long flags, retval; 138 139 local_irq_save(flags); 140 retval = *m; 141 *m = val; 142 local_irq_restore(flags); 143 return retval; 144} 145 146static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) 147{ 148 unsigned long flags, retval; 149 150 local_irq_save(flags); 151 retval = *m; 152 *m = val & 0xff; 153 local_irq_restore(flags); 154 return retval; 155} 156 157static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 158{ 159 switch (size) { 160 case 4: 161 return xchg_u32(ptr, x); 162 break; 163 case 1: 164 return xchg_u8(ptr, x); 165 break; 166 } 167 __xchg_called_with_bad_pointer(); 168 return x; 169} 170 171/* XXX 172 * disable hlt during certain critical i/o operations 173 */ 174#define HAVE_DISABLE_HLT 175void disable_hlt(void); 176void enable_hlt(void); 177 178 179#define smp_mb() barrier() 180#define smp_rmb() barrier() 181#define smp_wmb() barrier() 182 183#ifdef CONFIG_SH_ALPHANUMERIC 184/* This is only used for debugging. */ 185extern void print_seg(char *file,int line); 186#define PLS() print_seg(__FILE__,__LINE__) 187#else /* CONFIG_SH_ALPHANUMERIC */ 188#define PLS() 189#endif /* CONFIG_SH_ALPHANUMERIC */ 190 191#define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__) 192 193#define arch_align_stack(x) (x) 194 195#endif /* __ASM_SH64_SYSTEM_H */