Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.17 371 lines 11 kB view raw
1#ifndef __ASM_SYSTEM_H 2#define __ASM_SYSTEM_H 3 4#include <linux/config.h> 5#include <linux/kernel.h> 6#include <asm/segment.h> 7 8#ifdef __KERNEL__ 9 10#ifdef CONFIG_SMP 11#define LOCK_PREFIX "lock ; " 12#else 13#define LOCK_PREFIX "" 14#endif 15 16#define __STR(x) #x 17#define STR(x) __STR(x) 18 19#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" 20#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" 21 22/* frame pointer must be last for get_wchan */ 23#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" 24#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t" 25 26#define __EXTRA_CLOBBER \ 27 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" 28 29#define switch_to(prev,next,last) \ 30 asm volatile(SAVE_CONTEXT \ 31 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 32 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ 33 "call __switch_to\n\t" \ 34 ".globl thread_return\n" \ 35 "thread_return:\n\t" \ 36 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ 37 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 38 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ 39 "movq %%rax,%%rdi\n\t" \ 40 "jc ret_from_fork\n\t" \ 41 RESTORE_CONTEXT \ 42 : "=a" (last) \ 43 : [next] "S" (next), [prev] "D" (prev), \ 44 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ 45 [ti_flags] "i" (offsetof(struct thread_info, flags)),\ 46 [tif_fork] "i" (TIF_FORK), \ 47 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ 48 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ 49 : "memory", "cc" __EXTRA_CLOBBER) 50 51extern void load_gs_index(unsigned); 52 53/* 54 * Load a segment. Fall back on loading the zero 55 * segment if something goes wrong.. 56 */ 57#define loadsegment(seg,value) \ 58 asm volatile("\n" \ 59 "1:\t" \ 60 "movl %k0,%%" #seg "\n" \ 61 "2:\n" \ 62 ".section .fixup,\"ax\"\n" \ 63 "3:\t" \ 64 "movl %1,%%" #seg "\n\t" \ 65 "jmp 2b\n" \ 66 ".previous\n" \ 67 ".section __ex_table,\"a\"\n\t" \ 68 ".align 8\n\t" \ 69 ".quad 1b,3b\n" \ 70 ".previous" \ 71 : :"r" (value), "r" (0)) 72 73#ifdef __KERNEL__ 74struct alt_instr { 75 __u8 *instr; /* original instruction */ 76 __u8 *replacement; 77 __u8 cpuid; /* cpuid bit set for replacement */ 78 __u8 instrlen; /* length of original instruction */ 79 __u8 replacementlen; /* length of new instruction, <= instrlen */ 80 __u8 pad[5]; 81}; 82#endif 83 84/* 85 * Alternative instructions for different CPU types or capabilities. 86 * 87 * This allows to use optimized instructions even on generic binary 88 * kernels. 89 * 90 * length of oldinstr must be longer or equal the length of newinstr 91 * It can be padded with nops as needed. 92 * 93 * For non barrier like inlines please define new variants 94 * without volatile and memory clobber. 95 */ 96#define alternative(oldinstr, newinstr, feature) \ 97 asm volatile ("661:\n\t" oldinstr "\n662:\n" \ 98 ".section .altinstructions,\"a\"\n" \ 99 " .align 8\n" \ 100 " .quad 661b\n" /* label */ \ 101 " .quad 663f\n" /* new instruction */ \ 102 " .byte %c0\n" /* feature bit */ \ 103 " .byte 662b-661b\n" /* sourcelen */ \ 104 " .byte 664f-663f\n" /* replacementlen */ \ 105 ".previous\n" \ 106 ".section .altinstr_replacement,\"ax\"\n" \ 107 "663:\n\t" newinstr "\n664:\n" /* replacement */ \ 108 ".previous" :: "i" (feature) : "memory") 109 110/* 111 * Alternative inline assembly with input. 112 * 113 * Peculiarities: 114 * No memory clobber here. 115 * Argument numbers start with 1. 116 * Best is to use constraints that are fixed size (like (%1) ... "r") 117 * If you use variable sized constraints like "m" or "g" in the 118 * replacement make sure to pad to the worst case length. 119 */ 120#define alternative_input(oldinstr, newinstr, feature, input...) \ 121 asm volatile ("661:\n\t" oldinstr "\n662:\n" \ 122 ".section .altinstructions,\"a\"\n" \ 123 " .align 8\n" \ 124 " .quad 661b\n" /* label */ \ 125 " .quad 663f\n" /* new instruction */ \ 126 " .byte %c0\n" /* feature bit */ \ 127 " .byte 662b-661b\n" /* sourcelen */ \ 128 " .byte 664f-663f\n" /* replacementlen */ \ 129 ".previous\n" \ 130 ".section .altinstr_replacement,\"ax\"\n" \ 131 "663:\n\t" newinstr "\n664:\n" /* replacement */ \ 132 ".previous" :: "i" (feature), ##input) 133 134/* Like alternative_input, but with a single output argument */ 135#define alternative_io(oldinstr, newinstr, feature, output, input...) \ 136 asm volatile ("661:\n\t" oldinstr "\n662:\n" \ 137 ".section .altinstructions,\"a\"\n" \ 138 " .align 8\n" \ 139 " .quad 661b\n" /* label */ \ 140 " .quad 663f\n" /* new instruction */ \ 141 " .byte %c[feat]\n" /* feature bit */ \ 142 " .byte 662b-661b\n" /* sourcelen */ \ 143 " .byte 664f-663f\n" /* replacementlen */ \ 144 ".previous\n" \ 145 ".section .altinstr_replacement,\"ax\"\n" \ 146 "663:\n\t" newinstr "\n664:\n" /* replacement */ \ 147 ".previous" : output : [feat] "i" (feature), ##input) 148 149/* 150 * Clear and set 'TS' bit respectively 151 */ 152#define clts() __asm__ __volatile__ ("clts") 153 154static inline unsigned long read_cr0(void) 155{ 156 unsigned long cr0; 157 asm volatile("movq %%cr0,%0" : "=r" (cr0)); 158 return cr0; 159} 160 161static inline void write_cr0(unsigned long val) 162{ 163 asm volatile("movq %0,%%cr0" :: "r" (val)); 164} 165 166static inline unsigned long read_cr3(void) 167{ 168 unsigned long cr3; 169 asm("movq %%cr3,%0" : "=r" (cr3)); 170 return cr3; 171} 172 173static inline unsigned long read_cr4(void) 174{ 175 unsigned long cr4; 176 asm("movq %%cr4,%0" : "=r" (cr4)); 177 return cr4; 178} 179 180static inline void write_cr4(unsigned long val) 181{ 182 asm volatile("movq %0,%%cr4" :: "r" (val)); 183} 184 185#define stts() write_cr0(8 | read_cr0()) 186 187#define wbinvd() \ 188 __asm__ __volatile__ ("wbinvd": : :"memory"); 189 190/* 191 * On SMP systems, when the scheduler does migration-cost autodetection, 192 * it needs a way to flush as much of the CPU's caches as possible. 193 */ 194static inline void sched_cacheflush(void) 195{ 196 wbinvd(); 197} 198 199#endif /* __KERNEL__ */ 200 201#define nop() __asm__ __volatile__ ("nop") 202 203#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 204 205#define tas(ptr) (xchg((ptr),1)) 206 207#define __xg(x) ((volatile long *)(x)) 208 209static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) 210{ 211 *ptr = val; 212} 213 214#define _set_64bit set_64bit 215 216/* 217 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 218 * Note 2: xchg has side effect, so that attribute volatile is necessary, 219 * but generally the primitive is invalid, *ptr is output argument. --ANK 220 */ 221static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 222{ 223 switch (size) { 224 case 1: 225 __asm__ __volatile__("xchgb %b0,%1" 226 :"=q" (x) 227 :"m" (*__xg(ptr)), "0" (x) 228 :"memory"); 229 break; 230 case 2: 231 __asm__ __volatile__("xchgw %w0,%1" 232 :"=r" (x) 233 :"m" (*__xg(ptr)), "0" (x) 234 :"memory"); 235 break; 236 case 4: 237 __asm__ __volatile__("xchgl %k0,%1" 238 :"=r" (x) 239 :"m" (*__xg(ptr)), "0" (x) 240 :"memory"); 241 break; 242 case 8: 243 __asm__ __volatile__("xchgq %0,%1" 244 :"=r" (x) 245 :"m" (*__xg(ptr)), "0" (x) 246 :"memory"); 247 break; 248 } 249 return x; 250} 251 252/* 253 * Atomic compare and exchange. Compare OLD with MEM, if identical, 254 * store NEW in MEM. Return the initial value in MEM. Success is 255 * indicated by comparing RETURN with OLD. 256 */ 257 258#define __HAVE_ARCH_CMPXCHG 1 259 260static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 261 unsigned long new, int size) 262{ 263 unsigned long prev; 264 switch (size) { 265 case 1: 266 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" 267 : "=a"(prev) 268 : "q"(new), "m"(*__xg(ptr)), "0"(old) 269 : "memory"); 270 return prev; 271 case 2: 272 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" 273 : "=a"(prev) 274 : "r"(new), "m"(*__xg(ptr)), "0"(old) 275 : "memory"); 276 return prev; 277 case 4: 278 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" 279 : "=a"(prev) 280 : "r"(new), "m"(*__xg(ptr)), "0"(old) 281 : "memory"); 282 return prev; 283 case 8: 284 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" 285 : "=a"(prev) 286 : "r"(new), "m"(*__xg(ptr)), "0"(old) 287 : "memory"); 288 return prev; 289 } 290 return old; 291} 292 293#define cmpxchg(ptr,o,n)\ 294 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 295 (unsigned long)(n),sizeof(*(ptr)))) 296 297#ifdef CONFIG_SMP 298#define smp_mb() mb() 299#define smp_rmb() rmb() 300#define smp_wmb() wmb() 301#define smp_read_barrier_depends() do {} while(0) 302#else 303#define smp_mb() barrier() 304#define smp_rmb() barrier() 305#define smp_wmb() barrier() 306#define smp_read_barrier_depends() do {} while(0) 307#endif 308 309 310/* 311 * Force strict CPU ordering. 312 * And yes, this is required on UP too when we're talking 313 * to devices. 314 */ 315#define mb() asm volatile("mfence":::"memory") 316#define rmb() asm volatile("lfence":::"memory") 317 318#ifdef CONFIG_UNORDERED_IO 319#define wmb() asm volatile("sfence" ::: "memory") 320#else 321#define wmb() asm volatile("" ::: "memory") 322#endif 323#define read_barrier_depends() do {} while(0) 324#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) 325#define set_wmb(var, value) do { var = value; wmb(); } while (0) 326 327#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) 328 329/* interrupt control.. */ 330#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) 331#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") 332 333#ifdef CONFIG_X86_VSMP 334/* Interrupt control for VSMP architecture */ 335#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) 336#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) 337 338#define irqs_disabled() \ 339({ \ 340 unsigned long flags; \ 341 local_save_flags(flags); \ 342 (flags & (1<<18)) || !(flags & (1<<9)); \ 343}) 344 345/* For spinlocks etc */ 346#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) 347#else /* CONFIG_X86_VSMP */ 348#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") 349#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") 350 351#define irqs_disabled() \ 352({ \ 353 unsigned long flags; \ 354 local_save_flags(flags); \ 355 !(flags & (1<<9)); \ 356}) 357 358/* For spinlocks etc */ 359#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) 360#endif 361 362/* used in the idle loop; sti takes one instruction cycle to complete */ 363#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") 364/* used when interrupts are already enabled or to shutdown the processor */ 365#define halt() __asm__ __volatile__("hlt": : :"memory") 366 367void cpu_idle_wait(void); 368 369extern unsigned long arch_align_stack(unsigned long sp); 370 371#endif