at v2.6.38 184 lines 5.1 kB view raw
1#ifndef __ASM_SH_SYSTEM_H 2#define __ASM_SH_SYSTEM_H 3 4/* 5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 6 * Copyright (C) 2002 Paul Mundt 7 */ 8 9#include <linux/irqflags.h> 10#include <linux/compiler.h> 11#include <linux/linkage.h> 12#include <asm/types.h> 13#include <asm/uncached.h> 14 15#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ 16 17/* 18 * A brief note on ctrl_barrier(), the control register write barrier. 19 * 20 * Legacy SH cores typically require a sequence of 8 nops after 21 * modification of a control register in order for the changes to take 22 * effect. On newer cores (like the sh4a and sh5) this is accomplished 23 * with icbi. 24 * 25 * Also note that on sh4a in the icbi case we can forego a synco for the 26 * write barrier, as it's not necessary for control registers. 27 * 28 * Historically we have only done this type of barrier for the MMUCR, but 29 * it's also necessary for the CCR, so we make it generic here instead. 30 */ 31#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) 32#define mb() __asm__ __volatile__ ("synco": : :"memory") 33#define rmb() mb() 34#define wmb() __asm__ __volatile__ ("synco": : :"memory") 35#define ctrl_barrier() __icbi(PAGE_OFFSET) 36#define read_barrier_depends() do { } while(0) 37#else 38#define mb() __asm__ __volatile__ ("": : :"memory") 39#define rmb() mb() 40#define wmb() __asm__ __volatile__ ("": : :"memory") 41#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") 42#define read_barrier_depends() do { } while(0) 43#endif 44 45#ifdef CONFIG_SMP 46#define smp_mb() mb() 47#define smp_rmb() rmb() 48#define smp_wmb() wmb() 49#define smp_read_barrier_depends() read_barrier_depends() 50#else 51#define smp_mb() barrier() 52#define smp_rmb() barrier() 53#define smp_wmb() barrier() 54#define smp_read_barrier_depends() do { } while(0) 55#endif 56 57#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 58 59#ifdef CONFIG_GUSA_RB 60#include <asm/cmpxchg-grb.h> 61#elif defined(CONFIG_CPU_SH4A) 62#include <asm/cmpxchg-llsc.h> 63#else 64#include <asm/cmpxchg-irq.h> 65#endif 66 67extern void __xchg_called_with_bad_pointer(void); 68 69#define __xchg(ptr, x, size) \ 70({ \ 71 unsigned long __xchg__res; \ 72 volatile void *__xchg_ptr = (ptr); \ 73 switch (size) { \ 74 case 4: \ 75 __xchg__res = xchg_u32(__xchg_ptr, x); \ 76 break; \ 77 case 1: \ 78 __xchg__res = xchg_u8(__xchg_ptr, x); \ 79 break; \ 80 default: \ 81 __xchg_called_with_bad_pointer(); \ 82 __xchg__res = x; \ 83 break; \ 84 } \ 85 \ 86 __xchg__res; \ 87}) 88 89#define xchg(ptr,x) \ 90 ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) 91 92/* This function doesn't exist, so you'll get a linker error 93 * if something tries to do an invalid cmpxchg(). */ 94extern void __cmpxchg_called_with_bad_pointer(void); 95 96#define __HAVE_ARCH_CMPXCHG 1 97 98static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, 99 unsigned long new, int size) 100{ 101 switch (size) { 102 case 4: 103 return __cmpxchg_u32(ptr, old, new); 104 } 105 __cmpxchg_called_with_bad_pointer(); 106 return old; 107} 108 109#define cmpxchg(ptr,o,n) \ 110 ({ \ 111 __typeof__(*(ptr)) _o_ = (o); \ 112 __typeof__(*(ptr)) _n_ = (n); \ 113 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 114 (unsigned long)_n_, sizeof(*(ptr))); \ 115 }) 116 117struct pt_regs; 118 119extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); 120void free_initmem(void); 121void free_initrd_mem(unsigned long start, unsigned long end); 122 123extern void *set_exception_table_vec(unsigned int vec, void *handler); 124 125static inline void *set_exception_table_evt(unsigned int evt, void *handler) 126{ 127 return set_exception_table_vec(evt >> 5, handler); 128} 129 130/* 131 * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks. 132 */ 133#ifdef CONFIG_CPU_SH2A 134extern unsigned int instruction_size(unsigned int insn); 135#elif defined(CONFIG_SUPERH32) 136#define instruction_size(insn) (2) 137#else 138#define instruction_size(insn) (4) 139#endif 140 141void per_cpu_trap_init(void); 142void default_idle(void); 143void cpu_idle_wait(void); 144void stop_this_cpu(void *); 145 146#ifdef CONFIG_SUPERH32 147#define BUILD_TRAP_HANDLER(name) \ 148asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ 149 unsigned long r6, unsigned long r7, \ 150 struct pt_regs __regs) 151 152#define TRAP_HANDLER_DECL \ 153 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \ 154 unsigned int vec = regs->tra; \ 155 (void)vec; 156#else 157#define BUILD_TRAP_HANDLER(name) \ 158asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) 159#define TRAP_HANDLER_DECL 160#endif 161 162BUILD_TRAP_HANDLER(address_error); 163BUILD_TRAP_HANDLER(debug); 164BUILD_TRAP_HANDLER(bug); 165BUILD_TRAP_HANDLER(breakpoint); 166BUILD_TRAP_HANDLER(singlestep); 167BUILD_TRAP_HANDLER(fpu_error); 168BUILD_TRAP_HANDLER(fpu_state_restore); 169BUILD_TRAP_HANDLER(nmi); 170 171#define arch_align_stack(x) (x) 172 173struct mem_access { 174 unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt); 175 unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt); 176}; 177 178#ifdef CONFIG_SUPERH32 179# include "system_32.h" 180#else 181# include "system_64.h" 182#endif 183 184#endif