at v2.6.24 6.6 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 Waldorf GMBH 7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle 8 * Copyright (C) 1996 Paul M. Antoine 9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 10 */ 11#ifndef _ASM_PROCESSOR_H 12#define _ASM_PROCESSOR_H 13 14#include <linux/cpumask.h> 15#include <linux/threads.h> 16 17#include <asm/cachectl.h> 18#include <asm/cpu.h> 19#include <asm/cpu-info.h> 20#include <asm/mipsregs.h> 21#include <asm/prefetch.h> 22#include <asm/system.h> 23 24/* 25 * Return current * instruction pointer ("program counter"). 26 */ 27#define current_text_addr() ({ __label__ _l; _l: &&_l;}) 28 29/* 30 * System setup and hardware flags.. 31 */ 32extern void (*cpu_wait)(void); 33 34extern unsigned int vced_count, vcei_count; 35 36#ifdef CONFIG_32BIT 37/* 38 * User space process size: 2GB. This is hardcoded into a few places, 39 * so don't change it unless you know what you are doing. 40 */ 41#define TASK_SIZE 0x7fff8000UL 42 43/* 44 * This decides where the kernel will search for a free chunk of vm 45 * space during mmap's. 46 */ 47#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 48#endif 49 50#ifdef CONFIG_64BIT 51/* 52 * User space process size: 1TB. This is hardcoded into a few places, 53 * so don't change it unless you know what you are doing. TASK_SIZE 54 * is limited to 1TB by the R4000 architecture; R10000 and better can 55 * support 16TB; the architectural reserve for future expansion is 56 * 8192EB ... 57 */ 58#define TASK_SIZE32 0x7fff8000UL 59#define TASK_SIZE 0x10000000000UL 60 61/* 62 * This decides where the kernel will search for a free chunk of vm 63 * space during mmap's. 64 */ 65#define TASK_UNMAPPED_BASE \ 66 (test_thread_flag(TIF_32BIT_ADDR) ? \ 67 PAGE_ALIGN(TASK_SIZE32 / 3) : PAGE_ALIGN(TASK_SIZE / 3)) 68#endif 69 70#define NUM_FPU_REGS 32 71 72typedef __u64 fpureg_t; 73 74/* 75 * It would be nice to add some more fields for emulator statistics, but there 76 * are a number of fixed offsets in offset.h and elsewhere that would have to 77 * be recalculated by hand. So the additional information will be private to 78 * the FPU emulator for now. See asm-mips/fpu_emulator.h. 79 */ 80 81struct mips_fpu_struct { 82 fpureg_t fpr[NUM_FPU_REGS]; 83 unsigned int fcr31; 84}; 85 86#define NUM_DSP_REGS 6 87 88typedef __u32 dspreg_t; 89 90struct mips_dsp_state { 91 dspreg_t dspr[NUM_DSP_REGS]; 92 unsigned int dspcontrol; 93}; 94 95#define INIT_CPUMASK { \ 96 {0,} \ 97} 98 99typedef struct { 100 unsigned long seg; 101} mm_segment_t; 102 103#define ARCH_MIN_TASKALIGN 8 104 105struct mips_abi; 106 107/* 108 * If you change thread_struct remember to change the #defines below too! 109 */ 110struct thread_struct { 111 /* Saved main processor registers. */ 112 unsigned long reg16; 113 unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23; 114 unsigned long reg29, reg30, reg31; 115 116 /* Saved cp0 stuff. */ 117 unsigned long cp0_status; 118 119 /* Saved fpu/fpu emulator stuff. */ 120 struct mips_fpu_struct fpu; 121#ifdef CONFIG_MIPS_MT_FPAFF 122 /* Emulated instruction count */ 123 unsigned long emulated_fp; 124 /* Saved per-thread scheduler affinity mask */ 125 cpumask_t user_cpus_allowed; 126#endif /* CONFIG_MIPS_MT_FPAFF */ 127 128 /* Saved state of the DSP ASE, if available. */ 129 struct mips_dsp_state dsp; 130 131 /* Other stuff associated with the thread. */ 132 unsigned long cp0_badvaddr; /* Last user fault */ 133 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ 134 unsigned long error_code; 135 unsigned long trap_no; 136 unsigned long irix_trampoline; /* Wheee... */ 137 unsigned long irix_oldctx; 138 struct mips_abi *abi; 139}; 140 141#ifdef CONFIG_MIPS_MT_FPAFF 142#define FPAFF_INIT \ 143 .emulated_fp = 0, \ 144 .user_cpus_allowed = INIT_CPUMASK, 145#else 146#define FPAFF_INIT 147#endif /* CONFIG_MIPS_MT_FPAFF */ 148 149#define INIT_THREAD { \ 150 /* \ 151 * Saved main processor registers \ 152 */ \ 153 .reg16 = 0, \ 154 .reg17 = 0, \ 155 .reg18 = 0, \ 156 .reg19 = 0, \ 157 .reg20 = 0, \ 158 .reg21 = 0, \ 159 .reg22 = 0, \ 160 .reg23 = 0, \ 161 .reg29 = 0, \ 162 .reg30 = 0, \ 163 .reg31 = 0, \ 164 /* \ 165 * Saved cp0 stuff \ 166 */ \ 167 .cp0_status = 0, \ 168 /* \ 169 * Saved FPU/FPU emulator stuff \ 170 */ \ 171 .fpu = { \ 172 .fpr = {0,}, \ 173 .fcr31 = 0, \ 174 }, \ 175 /* \ 176 * FPU affinity state (null if not FPAFF) \ 177 */ \ 178 FPAFF_INIT \ 179 /* \ 180 * Saved DSP stuff \ 181 */ \ 182 .dsp = { \ 183 .dspr = {0, }, \ 184 .dspcontrol = 0, \ 185 }, \ 186 /* \ 187 * Other stuff associated with the process \ 188 */ \ 189 .cp0_badvaddr = 0, \ 190 .cp0_baduaddr = 0, \ 191 .error_code = 0, \ 192 .trap_no = 0, \ 193 .irix_trampoline = 0, \ 194 .irix_oldctx = 0, \ 195} 196 197struct task_struct; 198 199/* Free all resources held by a thread. */ 200#define release_thread(thread) do { } while(0) 201 202/* Prepare to copy thread state - unlazy all lazy status */ 203#define prepare_to_copy(tsk) do { } while (0) 204 205extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 206 207extern unsigned long thread_saved_pc(struct task_struct *tsk); 208 209/* 210 * Do necessary setup to start up a newly executed thread. 211 */ 212extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp); 213 214unsigned long get_wchan(struct task_struct *p); 215 216#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32) 217#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk) - 1) 218#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) 219#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) 220#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) 221 222#define cpu_relax() barrier() 223 224/* 225 * Return_address is a replacement for __builtin_return_address(count) 226 * which on certain architectures cannot reasonably be implemented in GCC 227 * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386). 228 * Note that __builtin_return_address(x>=1) is forbidden because GCC 229 * aborts compilation on some CPUs. It's simply not possible to unwind 230 * some CPU's stackframes. 231 * 232 * __builtin_return_address works only for non-leaf functions. We avoid the 233 * overhead of a function call by forcing the compiler to save the return 234 * address register on the stack. 235 */ 236#define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);}) 237 238#ifdef CONFIG_CPU_HAS_PREFETCH 239 240#define ARCH_HAS_PREFETCH 241 242static inline void prefetch(const void *addr) 243{ 244 __asm__ __volatile__( 245 " .set mips4 \n" 246 " pref %0, (%1) \n" 247 " .set mips0 \n" 248 : 249 : "i" (Pref_Load), "r" (addr)); 250} 251 252#endif 253 254#endif /* _ASM_PROCESSOR_H */