Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.24-rc4 452 lines 12 kB view raw
1/* 2 * include/asm-x86_64/processor.h 3 * 4 * Copyright (C) 1994 Linus Torvalds 5 */ 6 7#ifndef __ASM_X86_64_PROCESSOR_H 8#define __ASM_X86_64_PROCESSOR_H 9 10#include <asm/segment.h> 11#include <asm/page.h> 12#include <asm/types.h> 13#include <asm/sigcontext.h> 14#include <asm/cpufeature.h> 15#include <linux/threads.h> 16#include <asm/msr.h> 17#include <asm/current.h> 18#include <asm/system.h> 19#include <asm/mmsegment.h> 20#include <asm/percpu.h> 21#include <linux/personality.h> 22#include <linux/cpumask.h> 23#include <asm/processor-flags.h> 24 25#define TF_MASK 0x00000100 26#define IF_MASK 0x00000200 27#define IOPL_MASK 0x00003000 28#define NT_MASK 0x00004000 29#define VM_MASK 0x00020000 30#define AC_MASK 0x00040000 31#define VIF_MASK 0x00080000 /* virtual interrupt flag */ 32#define VIP_MASK 0x00100000 /* virtual interrupt pending */ 33#define ID_MASK 0x00200000 34 35#define desc_empty(desc) \ 36 (!((desc)->a | (desc)->b)) 37 38#define desc_equal(desc1, desc2) \ 39 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) 40 41/* 42 * Default implementation of macro that returns current 43 * instruction pointer ("program counter"). 44 */ 45#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; }) 46 47/* 48 * CPU type and hardware bug flags. Kept separately for each CPU. 49 */ 50 51struct cpuinfo_x86 { 52 __u8 x86; /* CPU family */ 53 __u8 x86_vendor; /* CPU vendor */ 54 __u8 x86_model; 55 __u8 x86_mask; 56 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ 57 __u32 x86_capability[NCAPINTS]; 58 char x86_vendor_id[16]; 59 char x86_model_id[64]; 60 int x86_cache_size; /* in KB */ 61 int x86_clflush_size; 62 int x86_cache_alignment; 63 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ 64 __u8 x86_virt_bits, x86_phys_bits; 65 __u8 x86_max_cores; /* cpuid returned max cores value */ 66 __u32 x86_power; 67 __u32 extended_cpuid_level; /* Max extended CPUID function supported */ 68 unsigned long loops_per_jiffy; 69#ifdef CONFIG_SMP 70 cpumask_t llc_shared_map; /* cpus sharing the last level cache */ 71#endif 72 __u8 apicid; 73#ifdef CONFIG_SMP 74 __u8 booted_cores; /* number of cores as seen by OS */ 75 __u8 phys_proc_id; /* Physical Processor id. */ 76 __u8 cpu_core_id; /* Core id. */ 77 __u8 cpu_index; /* index into per_cpu list */ 78#endif 79} ____cacheline_aligned; 80 81#define X86_VENDOR_INTEL 0 82#define X86_VENDOR_CYRIX 1 83#define X86_VENDOR_AMD 2 84#define X86_VENDOR_UMC 3 85#define X86_VENDOR_NEXGEN 4 86#define X86_VENDOR_CENTAUR 5 87#define X86_VENDOR_TRANSMETA 7 88#define X86_VENDOR_NUM 8 89#define X86_VENDOR_UNKNOWN 0xff 90 91#ifdef CONFIG_SMP 92DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 93#define cpu_data(cpu) per_cpu(cpu_info, cpu) 94#define current_cpu_data cpu_data(smp_processor_id()) 95#else 96#define cpu_data(cpu) boot_cpu_data 97#define current_cpu_data boot_cpu_data 98#endif 99 100extern char ignore_irq13; 101 102extern void identify_cpu(struct cpuinfo_x86 *); 103extern void print_cpu_info(struct cpuinfo_x86 *); 104extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 105extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 106extern unsigned short num_cache_leaves; 107 108/* 109 * Save the cr4 feature set we're using (ie 110 * Pentium 4MB enable and PPro Global page 111 * enable), so that any CPU's that boot up 112 * after us can get the correct flags. 113 */ 114extern unsigned long mmu_cr4_features; 115 116static inline void set_in_cr4 (unsigned long mask) 117{ 118 mmu_cr4_features |= mask; 119 __asm__("movq %%cr4,%%rax\n\t" 120 "orq %0,%%rax\n\t" 121 "movq %%rax,%%cr4\n" 122 : : "irg" (mask) 123 :"ax"); 124} 125 126static inline void clear_in_cr4 (unsigned long mask) 127{ 128 mmu_cr4_features &= ~mask; 129 __asm__("movq %%cr4,%%rax\n\t" 130 "andq %0,%%rax\n\t" 131 "movq %%rax,%%cr4\n" 132 : : "irg" (~mask) 133 :"ax"); 134} 135 136 137/* 138 * User space process size. 47bits minus one guard page. 139 */ 140#define TASK_SIZE64 (0x800000000000UL - 4096) 141 142/* This decides where the kernel will search for a free chunk of vm 143 * space during mmap's. 144 */ 145#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000) 146 147#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) 148#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) 149 150#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) 151 152/* 153 * Size of io_bitmap. 154 */ 155#define IO_BITMAP_BITS 65536 156#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 157#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 158#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) 159#define INVALID_IO_BITMAP_OFFSET 0x8000 160 161struct i387_fxsave_struct { 162 u16 cwd; 163 u16 swd; 164 u16 twd; 165 u16 fop; 166 u64 rip; 167 u64 rdp; 168 u32 mxcsr; 169 u32 mxcsr_mask; 170 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ 171 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ 172 u32 padding[24]; 173} __attribute__ ((aligned (16))); 174 175union i387_union { 176 struct i387_fxsave_struct fxsave; 177}; 178 179struct tss_struct { 180 u32 reserved1; 181 u64 rsp0; 182 u64 rsp1; 183 u64 rsp2; 184 u64 reserved2; 185 u64 ist[7]; 186 u32 reserved3; 187 u32 reserved4; 188 u16 reserved5; 189 u16 io_bitmap_base; 190 /* 191 * The extra 1 is there because the CPU will access an 192 * additional byte beyond the end of the IO permission 193 * bitmap. The extra byte must be all 1 bits, and must 194 * be within the limit. Thus we have: 195 * 196 * 128 bytes, the bitmap itself, for ports 0..0x3ff 197 * 8 bytes, for an extra "long" of ~0UL 198 */ 199 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 200} __attribute__((packed)) ____cacheline_aligned; 201 202 203extern struct cpuinfo_x86 boot_cpu_data; 204DECLARE_PER_CPU(struct tss_struct,init_tss); 205/* Save the original ist values for checking stack pointers during debugging */ 206struct orig_ist { 207 unsigned long ist[7]; 208}; 209DECLARE_PER_CPU(struct orig_ist, orig_ist); 210 211#ifdef CONFIG_X86_VSMP 212#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 213#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 214#else 215#define ARCH_MIN_TASKALIGN 16 216#define ARCH_MIN_MMSTRUCT_ALIGN 0 217#endif 218 219struct thread_struct { 220 unsigned long rsp0; 221 unsigned long rsp; 222 unsigned long userrsp; /* Copy from PDA */ 223 unsigned long fs; 224 unsigned long gs; 225 unsigned short es, ds, fsindex, gsindex; 226/* Hardware debugging registers */ 227 unsigned long debugreg0; 228 unsigned long debugreg1; 229 unsigned long debugreg2; 230 unsigned long debugreg3; 231 unsigned long debugreg6; 232 unsigned long debugreg7; 233/* fault info */ 234 unsigned long cr2, trap_no, error_code; 235/* floating point info */ 236 union i387_union i387 __attribute__((aligned(16))); 237/* IO permissions. the bitmap could be moved into the GDT, that would make 238 switch faster for a limited number of ioperm using tasks. -AK */ 239 int ioperm; 240 unsigned long *io_bitmap_ptr; 241 unsigned io_bitmap_max; 242/* cached TLS descriptors. */ 243 u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; 244} __attribute__((aligned(16))); 245 246#define INIT_THREAD { \ 247 .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 248} 249 250#define INIT_TSS { \ 251 .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 252} 253 254#define INIT_MMAP \ 255{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } 256 257#define start_thread(regs,new_rip,new_rsp) do { \ 258 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ 259 load_gs_index(0); \ 260 (regs)->rip = (new_rip); \ 261 (regs)->rsp = (new_rsp); \ 262 write_pda(oldrsp, (new_rsp)); \ 263 (regs)->cs = __USER_CS; \ 264 (regs)->ss = __USER_DS; \ 265 (regs)->eflags = 0x200; \ 266 set_fs(USER_DS); \ 267} while(0) 268 269#define get_debugreg(var, register) \ 270 __asm__("movq %%db" #register ", %0" \ 271 :"=r" (var)) 272#define set_debugreg(value, register) \ 273 __asm__("movq %0,%%db" #register \ 274 : /* no output */ \ 275 :"r" (value)) 276 277struct task_struct; 278struct mm_struct; 279 280/* Free all resources held by a thread. */ 281extern void release_thread(struct task_struct *); 282 283/* Prepare to copy thread state - unlazy all lazy status */ 284extern void prepare_to_copy(struct task_struct *tsk); 285 286/* 287 * create a kernel thread without removing it from tasklists 288 */ 289extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 290 291/* 292 * Return saved PC of a blocked thread. 293 * What is this good for? it will be always the scheduler or ret_from_fork. 294 */ 295#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) 296 297extern unsigned long get_wchan(struct task_struct *p); 298#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1) 299#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip) 300#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ 301 302 303struct microcode_header { 304 unsigned int hdrver; 305 unsigned int rev; 306 unsigned int date; 307 unsigned int sig; 308 unsigned int cksum; 309 unsigned int ldrver; 310 unsigned int pf; 311 unsigned int datasize; 312 unsigned int totalsize; 313 unsigned int reserved[3]; 314}; 315 316struct microcode { 317 struct microcode_header hdr; 318 unsigned int bits[0]; 319}; 320 321typedef struct microcode microcode_t; 322typedef struct microcode_header microcode_header_t; 323 324/* microcode format is extended from prescott processors */ 325struct extended_signature { 326 unsigned int sig; 327 unsigned int pf; 328 unsigned int cksum; 329}; 330 331struct extended_sigtable { 332 unsigned int count; 333 unsigned int cksum; 334 unsigned int reserved[3]; 335 struct extended_signature sigs[0]; 336}; 337 338 339#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) 340#define ASM_NOP1 P6_NOP1 341#define ASM_NOP2 P6_NOP2 342#define ASM_NOP3 P6_NOP3 343#define ASM_NOP4 P6_NOP4 344#define ASM_NOP5 P6_NOP5 345#define ASM_NOP6 P6_NOP6 346#define ASM_NOP7 P6_NOP7 347#define ASM_NOP8 P6_NOP8 348#else 349#define ASM_NOP1 K8_NOP1 350#define ASM_NOP2 K8_NOP2 351#define ASM_NOP3 K8_NOP3 352#define ASM_NOP4 K8_NOP4 353#define ASM_NOP5 K8_NOP5 354#define ASM_NOP6 K8_NOP6 355#define ASM_NOP7 K8_NOP7 356#define ASM_NOP8 K8_NOP8 357#endif 358 359/* Opteron nops */ 360#define K8_NOP1 ".byte 0x90\n" 361#define K8_NOP2 ".byte 0x66,0x90\n" 362#define K8_NOP3 ".byte 0x66,0x66,0x90\n" 363#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" 364#define K8_NOP5 K8_NOP3 K8_NOP2 365#define K8_NOP6 K8_NOP3 K8_NOP3 366#define K8_NOP7 K8_NOP4 K8_NOP3 367#define K8_NOP8 K8_NOP4 K8_NOP4 368 369/* P6 nops */ 370/* uses eax dependencies (Intel-recommended choice) */ 371#define P6_NOP1 ".byte 0x90\n" 372#define P6_NOP2 ".byte 0x66,0x90\n" 373#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" 374#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" 375#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" 376#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" 377#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" 378#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" 379 380#define ASM_NOP_MAX 8 381 382/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 383static inline void rep_nop(void) 384{ 385 __asm__ __volatile__("rep;nop": : :"memory"); 386} 387 388/* Stop speculative execution */ 389static inline void sync_core(void) 390{ 391 int tmp; 392 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); 393} 394 395#define ARCH_HAS_PREFETCHW 1 396static inline void prefetchw(void *x) 397{ 398 alternative_input("prefetcht0 (%1)", 399 "prefetchw (%1)", 400 X86_FEATURE_3DNOW, 401 "r" (x)); 402} 403 404#define ARCH_HAS_SPINLOCK_PREFETCH 1 405 406#define spin_lock_prefetch(x) prefetchw(x) 407 408#define cpu_relax() rep_nop() 409 410static inline void __monitor(const void *eax, unsigned long ecx, 411 unsigned long edx) 412{ 413 /* "monitor %eax,%ecx,%edx;" */ 414 asm volatile( 415 ".byte 0x0f,0x01,0xc8;" 416 : :"a" (eax), "c" (ecx), "d"(edx)); 417} 418 419static inline void __mwait(unsigned long eax, unsigned long ecx) 420{ 421 /* "mwait %eax,%ecx;" */ 422 asm volatile( 423 ".byte 0x0f,0x01,0xc9;" 424 : :"a" (eax), "c" (ecx)); 425} 426 427static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 428{ 429 /* "mwait %eax,%ecx;" */ 430 asm volatile( 431 "sti; .byte 0x0f,0x01,0xc9;" 432 : :"a" (eax), "c" (ecx)); 433} 434 435extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 436 437#define stack_current() \ 438({ \ 439 struct thread_info *ti; \ 440 asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ 441 ti->task; \ 442}) 443 444#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 445 446extern unsigned long boot_option_idle_override; 447/* Boot loader type from the setup header */ 448extern int bootloader_type; 449 450#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 451 452#endif /* __ASM_X86_64_PROCESSOR_H */