Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Disintegrate asm/system.h for X86

Disintegrate asm/system.h for X86.

Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: H. Peter Anvin <hpa@zytor.com>
cc: x86@kernel.org

+554 -562
-1
arch/x86/ia32/ia32_aout.c
··· 26 26 #include <linux/init.h> 27 27 #include <linux/jiffies.h> 28 28 29 - #include <asm/system.h> 30 29 #include <asm/uaccess.h> 31 30 #include <asm/pgalloc.h> 32 31 #include <asm/cacheflush.h>
-1
arch/x86/include/asm/apic.h
··· 11 11 #include <linux/atomic.h> 12 12 #include <asm/fixmap.h> 13 13 #include <asm/mpspec.h> 14 - #include <asm/system.h> 15 14 #include <asm/msr.h> 16 15 17 16 #define ARCH_APICTIMER_STOPS_ON_C3 1
+7
arch/x86/include/asm/auxvec.h
··· 9 9 #endif 10 10 #define AT_SYSINFO_EHDR 33 11 11 12 + /* entries in ARCH_DLINFO: */ 13 + #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) 14 + # define AT_VECTOR_SIZE_ARCH 2 15 + #else /* else it's non-compat x86-64 */ 16 + # define AT_VECTOR_SIZE_ARCH 1 17 + #endif 18 + 12 19 #endif /* _ASM_X86_AUXVEC_H */
+116
arch/x86/include/asm/barrier.h
··· 1 + #ifndef _ASM_X86_BARRIER_H 2 + #define _ASM_X86_BARRIER_H 3 + 4 + #include <asm/alternative.h> 5 + #include <asm/nops.h> 6 + 7 + /* 8 + * Force strict CPU ordering. 9 + * And yes, this is required on UP too when we're talking 10 + * to devices. 11 + */ 12 + 13 + #ifdef CONFIG_X86_32 14 + /* 15 + * Some non-Intel clones support out of order store. wmb() ceases to be a 16 + * nop for these. 17 + */ 18 + #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) 19 + #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) 20 + #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) 21 + #else 22 + #define mb() asm volatile("mfence":::"memory") 23 + #define rmb() asm volatile("lfence":::"memory") 24 + #define wmb() asm volatile("sfence" ::: "memory") 25 + #endif 26 + 27 + /** 28 + * read_barrier_depends - Flush all pending reads that subsequents reads 29 + * depend on. 30 + * 31 + * No data-dependent reads from memory-like regions are ever reordered 32 + * over this barrier. All reads preceding this primitive are guaranteed 33 + * to access memory (but not necessarily other CPUs' caches) before any 34 + * reads following this primitive that depend on the data return by 35 + * any of the preceding reads. This primitive is much lighter weight than 36 + * rmb() on most CPUs, and is never heavier weight than is 37 + * rmb(). 38 + * 39 + * These ordering constraints are respected by both the local CPU 40 + * and the compiler. 41 + * 42 + * Ordering is not guaranteed by anything other than these primitives, 43 + * not even by data dependencies. See the documentation for 44 + * memory_barrier() for examples and URLs to more information. 45 + * 46 + * For example, the following code would force ordering (the initial 47 + * value of "a" is zero, "b" is one, and "p" is "&a"): 48 + * 49 + * <programlisting> 50 + * CPU 0 CPU 1 51 + * 52 + * b = 2; 53 + * memory_barrier(); 54 + * p = &b; q = p; 55 + * read_barrier_depends(); 56 + * d = *q; 57 + * </programlisting> 58 + * 59 + * because the read of "*q" depends on the read of "p" and these 60 + * two reads are separated by a read_barrier_depends(). However, 61 + * the following code, with the same initial values for "a" and "b": 62 + * 63 + * <programlisting> 64 + * CPU 0 CPU 1 65 + * 66 + * a = 2; 67 + * memory_barrier(); 68 + * b = 3; y = b; 69 + * read_barrier_depends(); 70 + * x = a; 71 + * </programlisting> 72 + * 73 + * does not enforce ordering, since there is no data dependency between 74 + * the read of "a" and the read of "b". Therefore, on some CPUs, such 75 + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 76 + * in cases like this where there are no data dependencies. 77 + **/ 78 + 79 + #define read_barrier_depends() do { } while (0) 80 + 81 + #ifdef CONFIG_SMP 82 + #define smp_mb() mb() 83 + #ifdef CONFIG_X86_PPRO_FENCE 84 + # define smp_rmb() rmb() 85 + #else 86 + # define smp_rmb() barrier() 87 + #endif 88 + #ifdef CONFIG_X86_OOSTORE 89 + # define smp_wmb() wmb() 90 + #else 91 + # define smp_wmb() barrier() 92 + #endif 93 + #define smp_read_barrier_depends() read_barrier_depends() 94 + #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 95 + #else 96 + #define smp_mb() barrier() 97 + #define smp_rmb() barrier() 98 + #define smp_wmb() barrier() 99 + #define smp_read_barrier_depends() do { } while (0) 100 + #define set_mb(var, value) do { var = value; barrier(); } while (0) 101 + #endif 102 + 103 + /* 104 + * Stop RDTSC speculation. This is needed when you need to use RDTSC 105 + * (or get_cycles or vread that possibly accesses the TSC) in a defined 106 + * code region. 107 + * 108 + * (Could use an alternative three way for this if there was one.) 109 + */ 110 + static __always_inline void rdtsc_barrier(void) 111 + { 112 + alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); 113 + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); 114 + } 115 + 116 + #endif /* _ASM_X86_BARRIER_H */
+4
arch/x86/include/asm/bug.h
··· 36 36 #endif /* !CONFIG_BUG */ 37 37 38 38 #include <asm-generic/bug.h> 39 + 40 + 41 + extern void show_regs_common(void); 42 + 39 43 #endif /* _ASM_X86_BUG_H */
+1
arch/x86/include/asm/cacheflush.h
··· 3 3 4 4 /* Caches aren't brain-dead on the intel. */ 5 5 #include <asm-generic/cacheflush.h> 6 + #include <asm/special_insns.h> 6 7 7 8 #ifdef CONFIG_X86_PAT 8 9 /*
-1
arch/x86/include/asm/elf.h
··· 84 84 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) 85 85 86 86 #include <asm/processor.h> 87 - #include <asm/system.h> 88 87 89 88 #ifdef CONFIG_X86_32 90 89 #include <asm/desc.h>
+1
arch/x86/include/asm/exec.h
··· 1 + /* define arch_align_stack() here */
-1
arch/x86/include/asm/futex.h
··· 9 9 #include <asm/asm.h> 10 10 #include <asm/errno.h> 11 11 #include <asm/processor.h> 12 - #include <asm/system.h> 13 12 14 13 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ 15 14 asm volatile("1:\t" insn "\n" \
-1
arch/x86/include/asm/i387.h
··· 14 14 15 15 #include <linux/sched.h> 16 16 #include <linux/hardirq.h> 17 - #include <asm/system.h> 18 17 19 18 struct pt_regs; 20 19 struct user_i387_struct;
-1
arch/x86/include/asm/local.h
··· 3 3 4 4 #include <linux/percpu.h> 5 5 6 - #include <asm/system.h> 7 6 #include <linux/atomic.h> 8 7 #include <asm/asm.h> 9 8
-1
arch/x86/include/asm/mc146818rtc.h
··· 5 5 #define _ASM_X86_MC146818RTC_H 6 6 7 7 #include <asm/io.h> 8 - #include <asm/system.h> 9 8 #include <asm/processor.h> 10 9 #include <linux/mc146818rtc.h> 11 10
+30 -1
arch/x86/include/asm/processor.h
··· 14 14 #include <asm/sigcontext.h> 15 15 #include <asm/current.h> 16 16 #include <asm/cpufeature.h> 17 - #include <asm/system.h> 18 17 #include <asm/page.h> 19 18 #include <asm/pgtable_types.h> 20 19 #include <asm/percpu.h> 21 20 #include <asm/msr.h> 22 21 #include <asm/desc_defs.h> 23 22 #include <asm/nops.h> 23 + #include <asm/special_insns.h> 24 24 25 25 #include <linux/personality.h> 26 26 #include <linux/cpumask.h> ··· 29 29 #include <linux/math64.h> 30 30 #include <linux/init.h> 31 31 #include <linux/err.h> 32 + #include <linux/irqflags.h> 33 + 34 + /* 35 + * We handle most unaligned accesses in hardware. On the other hand 36 + * unaligned DMA can be quite expensive on some Nehalem processors. 37 + * 38 + * Based on this we disable the IP header alignment in network drivers. 39 + */ 40 + #define NET_IP_ALIGN 0 32 41 33 42 #define HBP_NUM 4 34 43 /* ··· 1030 1021 #else 1031 1022 #define cpu_has_amd_erratum(x) (false) 1032 1023 #endif /* CONFIG_CPU_SUP_AMD */ 1024 + 1025 + #ifdef CONFIG_X86_32 1026 + /* 1027 + * disable hlt during certain critical i/o operations 1028 + */ 1029 + #define HAVE_DISABLE_HLT 1030 + #endif 1031 + 1032 + void disable_hlt(void); 1033 + void enable_hlt(void); 1034 + 1035 + void cpu_idle_wait(void); 1036 + 1037 + extern unsigned long arch_align_stack(unsigned long sp); 1038 + extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 1039 + 1040 + void default_idle(void); 1041 + bool set_pm_idle_to_default(void); 1042 + 1043 + void stop_this_cpu(void *dummy); 1033 1044 1034 1045 #endif /* _ASM_X86_PROCESSOR_H */
+56 -2
arch/x86/include/asm/segment.h
··· 212 212 #ifdef __KERNEL__ 213 213 #ifndef __ASSEMBLY__ 214 214 extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; 215 - #endif 216 - #endif 215 + 216 + /* 217 + * Load a segment. Fall back on loading the zero 218 + * segment if something goes wrong.. 219 + */ 220 + #define loadsegment(seg, value) \ 221 + do { \ 222 + unsigned short __val = (value); \ 223 + \ 224 + asm volatile(" \n" \ 225 + "1: movl %k0,%%" #seg " \n" \ 226 + \ 227 + ".section .fixup,\"ax\" \n" \ 228 + "2: xorl %k0,%k0 \n" \ 229 + " jmp 1b \n" \ 230 + ".previous \n" \ 231 + \ 232 + _ASM_EXTABLE(1b, 2b) \ 233 + \ 234 + : "+r" (__val) : : "memory"); \ 235 + } while (0) 236 + 237 + /* 238 + * Save a segment register away 239 + */ 240 + #define savesegment(seg, value) \ 241 + asm("mov %%" #seg ",%0":"=r" (value) : : "memory") 242 + 243 + /* 244 + * x86_32 user gs accessors. 245 + */ 246 + #ifdef CONFIG_X86_32 247 + #ifdef CONFIG_X86_32_LAZY_GS 248 + #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) 249 + #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) 250 + #define task_user_gs(tsk) ((tsk)->thread.gs) 251 + #define lazy_save_gs(v) savesegment(gs, (v)) 252 + #define lazy_load_gs(v) loadsegment(gs, (v)) 253 + #else /* X86_32_LAZY_GS */ 254 + #define get_user_gs(regs) (u16)((regs)->gs) 255 + #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) 256 + #define task_user_gs(tsk) (task_pt_regs(tsk)->gs) 257 + #define lazy_save_gs(v) do { } while (0) 258 + #define lazy_load_gs(v) do { } while (0) 259 + #endif /* X86_32_LAZY_GS */ 260 + #endif /* X86_32 */ 261 + 262 + static inline unsigned long get_limit(unsigned long segment) 263 + { 264 + unsigned long __limit; 265 + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); 266 + return __limit + 1; 267 + } 268 + 269 + #endif /* !__ASSEMBLY__ */ 270 + #endif /* __KERNEL__ */ 217 271 218 272 #endif /* _ASM_X86_SEGMENT_H */
+199
arch/x86/include/asm/special_insns.h
··· 1 + #ifndef _ASM_X86_SPECIAL_INSNS_H 2 + #define _ASM_X86_SPECIAL_INSNS_H 3 + 4 + 5 + #ifdef __KERNEL__ 6 + 7 + static inline void native_clts(void) 8 + { 9 + asm volatile("clts"); 10 + } 11 + 12 + /* 13 + * Volatile isn't enough to prevent the compiler from reordering the 14 + * read/write functions for the control registers and messing everything up. 15 + * A memory clobber would solve the problem, but would prevent reordering of 16 + * all loads stores around it, which can hurt performance. Solution is to 17 + * use a variable and mimic reads and writes to it to enforce serialization 18 + */ 19 + static unsigned long __force_order; 20 + 21 + static inline unsigned long native_read_cr0(void) 22 + { 23 + unsigned long val; 24 + asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); 25 + return val; 26 + } 27 + 28 + static inline void native_write_cr0(unsigned long val) 29 + { 30 + asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); 31 + } 32 + 33 + static inline unsigned long native_read_cr2(void) 34 + { 35 + unsigned long val; 36 + asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); 37 + return val; 38 + } 39 + 40 + static inline void native_write_cr2(unsigned long val) 41 + { 42 + asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); 43 + } 44 + 45 + static inline unsigned long native_read_cr3(void) 46 + { 47 + unsigned long val; 48 + asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); 49 + return val; 50 + } 51 + 52 + static inline void native_write_cr3(unsigned long val) 53 + { 54 + asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); 55 + } 56 + 57 + static inline unsigned long native_read_cr4(void) 58 + { 59 + unsigned long val; 60 + asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); 61 + return val; 62 + } 63 + 64 + static inline unsigned long native_read_cr4_safe(void) 65 + { 66 + unsigned long val; 67 + /* This could fault if %cr4 does not exist. In x86_64, a cr4 always 68 + * exists, so it will never fail. */ 69 + #ifdef CONFIG_X86_32 70 + asm volatile("1: mov %%cr4, %0\n" 71 + "2:\n" 72 + _ASM_EXTABLE(1b, 2b) 73 + : "=r" (val), "=m" (__force_order) : "0" (0)); 74 + #else 75 + val = native_read_cr4(); 76 + #endif 77 + return val; 78 + } 79 + 80 + static inline void native_write_cr4(unsigned long val) 81 + { 82 + asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); 83 + } 84 + 85 + #ifdef CONFIG_X86_64 86 + static inline unsigned long native_read_cr8(void) 87 + { 88 + unsigned long cr8; 89 + asm volatile("movq %%cr8,%0" : "=r" (cr8)); 90 + return cr8; 91 + } 92 + 93 + static inline void native_write_cr8(unsigned long val) 94 + { 95 + asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); 96 + } 97 + #endif 98 + 99 + static inline void native_wbinvd(void) 100 + { 101 + asm volatile("wbinvd": : :"memory"); 102 + } 103 + 104 + extern void native_load_gs_index(unsigned); 105 + 106 + #ifdef CONFIG_PARAVIRT 107 + #include <asm/paravirt.h> 108 + #else 109 + 110 + static inline unsigned long read_cr0(void) 111 + { 112 + return native_read_cr0(); 113 + } 114 + 115 + static inline void write_cr0(unsigned long x) 116 + { 117 + native_write_cr0(x); 118 + } 119 + 120 + static inline unsigned long read_cr2(void) 121 + { 122 + return native_read_cr2(); 123 + } 124 + 125 + static inline void write_cr2(unsigned long x) 126 + { 127 + native_write_cr2(x); 128 + } 129 + 130 + static inline unsigned long read_cr3(void) 131 + { 132 + return native_read_cr3(); 133 + } 134 + 135 + static inline void write_cr3(unsigned long x) 136 + { 137 + native_write_cr3(x); 138 + } 139 + 140 + static inline unsigned long read_cr4(void) 141 + { 142 + return native_read_cr4(); 143 + } 144 + 145 + static inline unsigned long read_cr4_safe(void) 146 + { 147 + return native_read_cr4_safe(); 148 + } 149 + 150 + static inline void write_cr4(unsigned long x) 151 + { 152 + native_write_cr4(x); 153 + } 154 + 155 + static inline void wbinvd(void) 156 + { 157 + native_wbinvd(); 158 + } 159 + 160 + #ifdef CONFIG_X86_64 161 + 162 + static inline unsigned long read_cr8(void) 163 + { 164 + return native_read_cr8(); 165 + } 166 + 167 + static inline void write_cr8(unsigned long x) 168 + { 169 + native_write_cr8(x); 170 + } 171 + 172 + static inline void load_gs_index(unsigned selector) 173 + { 174 + native_load_gs_index(selector); 175 + } 176 + 177 + #endif 178 + 179 + /* Clear the 'TS' bit */ 180 + static inline void clts(void) 181 + { 182 + native_clts(); 183 + } 184 + 185 + #endif/* CONFIG_PARAVIRT */ 186 + 187 + #define stts() write_cr0(read_cr0() | X86_CR0_TS) 188 + 189 + static inline void clflush(volatile void *__p) 190 + { 191 + asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); 192 + } 193 + 194 + #define nop() asm volatile ("nop") 195 + 196 + 197 + #endif /* __KERNEL__ */ 198 + 199 + #endif /* _ASM_X86_SPECIAL_INSNS_H */
-1
arch/x86/include/asm/stackprotector.h
··· 38 38 #include <asm/tsc.h> 39 39 #include <asm/processor.h> 40 40 #include <asm/percpu.h> 41 - #include <asm/system.h> 42 41 #include <asm/desc.h> 43 42 #include <linux/random.h> 44 43
+129
arch/x86/include/asm/switch_to.h
··· 1 + #ifndef _ASM_X86_SWITCH_TO_H 2 + #define _ASM_X86_SWITCH_TO_H 3 + 4 + struct task_struct; /* one of the stranger aspects of C forward declarations */ 5 + struct task_struct *__switch_to(struct task_struct *prev, 6 + struct task_struct *next); 7 + struct tss_struct; 8 + void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 9 + struct tss_struct *tss); 10 + 11 + #ifdef CONFIG_X86_32 12 + 13 + #ifdef CONFIG_CC_STACKPROTECTOR 14 + #define __switch_canary \ 15 + "movl %P[task_canary](%[next]), %%ebx\n\t" \ 16 + "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" 17 + #define __switch_canary_oparam \ 18 + , [stack_canary] "=m" (stack_canary.canary) 19 + #define __switch_canary_iparam \ 20 + , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 21 + #else /* CC_STACKPROTECTOR */ 22 + #define __switch_canary 23 + #define __switch_canary_oparam 24 + #define __switch_canary_iparam 25 + #endif /* CC_STACKPROTECTOR */ 26 + 27 + /* 28 + * Saving eflags is important. It switches not only IOPL between tasks, 29 + * it also protects other tasks from NT leaking through sysenter etc. 30 + */ 31 + #define switch_to(prev, next, last) \ 32 + do { \ 33 + /* \ 34 + * Context-switching clobbers all registers, so we clobber \ 35 + * them explicitly, via unused output variables. \ 36 + * (EAX and EBP is not listed because EBP is saved/restored \ 37 + * explicitly for wchan access and EAX is the return value of \ 38 + * __switch_to()) \ 39 + */ \ 40 + unsigned long ebx, ecx, edx, esi, edi; \ 41 + \ 42 + asm volatile("pushfl\n\t" /* save flags */ \ 43 + "pushl %%ebp\n\t" /* save EBP */ \ 44 + "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ 45 + "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ 46 + "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ 47 + "pushl %[next_ip]\n\t" /* restore EIP */ \ 48 + __switch_canary \ 49 + "jmp __switch_to\n" /* regparm call */ \ 50 + "1:\t" \ 51 + "popl %%ebp\n\t" /* restore EBP */ \ 52 + "popfl\n" /* restore flags */ \ 53 + \ 54 + /* output parameters */ \ 55 + : [prev_sp] "=m" (prev->thread.sp), \ 56 + [prev_ip] "=m" (prev->thread.ip), \ 57 + "=a" (last), \ 58 + \ 59 + /* clobbered output registers: */ \ 60 + "=b" (ebx), "=c" (ecx), "=d" (edx), \ 61 + "=S" (esi), "=D" (edi) \ 62 + \ 63 + __switch_canary_oparam \ 64 + \ 65 + /* input parameters: */ \ 66 + : [next_sp] "m" (next->thread.sp), \ 67 + [next_ip] "m" (next->thread.ip), \ 68 + \ 69 + /* regparm parameters for __switch_to(): */ \ 70 + [prev] "a" (prev), \ 71 + [next] "d" (next) \ 72 + \ 73 + __switch_canary_iparam \ 74 + \ 75 + : /* reloaded segment registers */ \ 76 + "memory"); \ 77 + } while (0) 78 + 79 + #else /* CONFIG_X86_32 */ 80 + 81 + /* frame pointer must be last for get_wchan */ 82 + #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" 83 + #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" 84 + 85 + #define __EXTRA_CLOBBER \ 86 + , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 87 + "r12", "r13", "r14", "r15" 88 + 89 + #ifdef CONFIG_CC_STACKPROTECTOR 90 + #define __switch_canary \ 91 + "movq %P[task_canary](%%rsi),%%r8\n\t" \ 92 + "movq %%r8,"__percpu_arg([gs_canary])"\n\t" 93 + #define __switch_canary_oparam \ 94 + , [gs_canary] "=m" (irq_stack_union.stack_canary) 95 + #define __switch_canary_iparam \ 96 + , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 97 + #else /* CC_STACKPROTECTOR */ 98 + #define __switch_canary 99 + #define __switch_canary_oparam 100 + #define __switch_canary_iparam 101 + #endif /* CC_STACKPROTECTOR */ 102 + 103 + /* Save restore flags to clear handle leaking NT */ 104 + #define switch_to(prev, next, last) \ 105 + asm volatile(SAVE_CONTEXT \ 106 + "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 107 + "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ 108 + "call __switch_to\n\t" \ 109 + "movq "__percpu_arg([current_task])",%%rsi\n\t" \ 110 + __switch_canary \ 111 + "movq %P[thread_info](%%rsi),%%r8\n\t" \ 112 + "movq %%rax,%%rdi\n\t" \ 113 + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ 114 + "jnz ret_from_fork\n\t" \ 115 + RESTORE_CONTEXT \ 116 + : "=a" (last) \ 117 + __switch_canary_oparam \ 118 + : [next] "S" (next), [prev] "D" (prev), \ 119 + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ 120 + [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 121 + [_tif_fork] "i" (_TIF_FORK), \ 122 + [thread_info] "i" (offsetof(struct task_struct, stack)), \ 123 + [current_task] "m" (current_task) \ 124 + __switch_canary_iparam \ 125 + : "memory", "cc" __EXTRA_CLOBBER) 126 + 127 + #endif /* CONFIG_X86_32 */ 128 + 129 + #endif /* _ASM_X86_SWITCH_TO_H */
+5 -522
arch/x86/include/asm/system.h
··· 1 - #ifndef _ASM_X86_SYSTEM_H 2 - #define _ASM_X86_SYSTEM_H 3 - 4 - #include <asm/asm.h> 5 - #include <asm/segment.h> 6 - #include <asm/cpufeature.h> 1 + /* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ 2 + #include <asm/barrier.h> 7 3 #include <asm/cmpxchg.h> 8 - #include <asm/nops.h> 9 - 10 - #include <linux/kernel.h> 11 - #include <linux/irqflags.h> 12 - 13 - /* entries in ARCH_DLINFO: */ 14 - #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) 15 - # define AT_VECTOR_SIZE_ARCH 2 16 - #else /* else it's non-compat x86-64 */ 17 - # define AT_VECTOR_SIZE_ARCH 1 18 - #endif 19 - 20 - struct task_struct; /* one of the stranger aspects of C forward declarations */ 21 - struct task_struct *__switch_to(struct task_struct *prev, 22 - struct task_struct *next); 23 - struct tss_struct; 24 - void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 25 - struct tss_struct *tss); 26 - extern void show_regs_common(void); 27 - 28 - #ifdef CONFIG_X86_32 29 - 30 - #ifdef CONFIG_CC_STACKPROTECTOR 31 - #define __switch_canary \ 32 - "movl %P[task_canary](%[next]), %%ebx\n\t" \ 33 - "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" 34 - #define __switch_canary_oparam \ 35 - , [stack_canary] "=m" (stack_canary.canary) 36 - #define __switch_canary_iparam \ 37 - , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 38 - #else /* CC_STACKPROTECTOR */ 39 - #define __switch_canary 40 - #define __switch_canary_oparam 41 - #define __switch_canary_iparam 42 - #endif /* CC_STACKPROTECTOR */ 43 - 44 - /* 45 - * Saving eflags is important. It switches not only IOPL between tasks, 46 - * it also protects other tasks from NT leaking through sysenter etc. 47 - */ 48 - #define switch_to(prev, next, last) \ 49 - do { \ 50 - /* \ 51 - * Context-switching clobbers all registers, so we clobber \ 52 - * them explicitly, via unused output variables. \ 53 - * (EAX and EBP is not listed because EBP is saved/restored \ 54 - * explicitly for wchan access and EAX is the return value of \ 55 - * __switch_to()) \ 56 - */ \ 57 - unsigned long ebx, ecx, edx, esi, edi; \ 58 - \ 59 - asm volatile("pushfl\n\t" /* save flags */ \ 60 - "pushl %%ebp\n\t" /* save EBP */ \ 61 - "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ 62 - "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ 63 - "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ 64 - "pushl %[next_ip]\n\t" /* restore EIP */ \ 65 - __switch_canary \ 66 - "jmp __switch_to\n" /* regparm call */ \ 67 - "1:\t" \ 68 - "popl %%ebp\n\t" /* restore EBP */ \ 69 - "popfl\n" /* restore flags */ \ 70 - \ 71 - /* output parameters */ \ 72 - : [prev_sp] "=m" (prev->thread.sp), \ 73 - [prev_ip] "=m" (prev->thread.ip), \ 74 - "=a" (last), \ 75 - \ 76 - /* clobbered output registers: */ \ 77 - "=b" (ebx), "=c" (ecx), "=d" (edx), \ 78 - "=S" (esi), "=D" (edi) \ 79 - \ 80 - __switch_canary_oparam \ 81 - \ 82 - /* input parameters: */ \ 83 - : [next_sp] "m" (next->thread.sp), \ 84 - [next_ip] "m" (next->thread.ip), \ 85 - \ 86 - /* regparm parameters for __switch_to(): */ \ 87 - [prev] "a" (prev), \ 88 - [next] "d" (next) \ 89 - \ 90 - __switch_canary_iparam \ 91 - \ 92 - : /* reloaded segment registers */ \ 93 - "memory"); \ 94 - } while (0) 95 - 96 - /* 97 - * disable hlt during certain critical i/o operations 98 - */ 99 - #define HAVE_DISABLE_HLT 100 - #else 101 - 102 - /* frame pointer must be last for get_wchan */ 103 - #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" 104 - #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" 105 - 106 - #define __EXTRA_CLOBBER \ 107 - , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 108 - "r12", "r13", "r14", "r15" 109 - 110 - #ifdef CONFIG_CC_STACKPROTECTOR 111 - #define __switch_canary \ 112 - "movq %P[task_canary](%%rsi),%%r8\n\t" \ 113 - "movq %%r8,"__percpu_arg([gs_canary])"\n\t" 114 - #define __switch_canary_oparam \ 115 - , [gs_canary] "=m" (irq_stack_union.stack_canary) 116 - #define __switch_canary_iparam \ 117 - , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 118 - #else /* CC_STACKPROTECTOR */ 119 - #define __switch_canary 120 - #define __switch_canary_oparam 121 - #define __switch_canary_iparam 122 - #endif /* CC_STACKPROTECTOR */ 123 - 124 - /* Save restore flags to clear handle leaking NT */ 125 - #define switch_to(prev, next, last) \ 126 - asm volatile(SAVE_CONTEXT \ 127 - "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 128 - "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ 129 - "call __switch_to\n\t" \ 130 - "movq "__percpu_arg([current_task])",%%rsi\n\t" \ 131 - __switch_canary \ 132 - "movq %P[thread_info](%%rsi),%%r8\n\t" \ 133 - "movq %%rax,%%rdi\n\t" \ 134 - "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ 135 - "jnz ret_from_fork\n\t" \ 136 - RESTORE_CONTEXT \ 137 - : "=a" (last) \ 138 - __switch_canary_oparam \ 139 - : [next] "S" (next), [prev] "D" (prev), \ 140 - [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ 141 - [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 142 - [_tif_fork] "i" (_TIF_FORK), \ 143 - [thread_info] "i" (offsetof(struct task_struct, stack)), \ 144 - [current_task] "m" (current_task) \ 145 - __switch_canary_iparam \ 146 - : "memory", "cc" __EXTRA_CLOBBER) 147 - #endif 148 - 149 - #ifdef __KERNEL__ 150 - 151 - extern void native_load_gs_index(unsigned); 152 - 153 - /* 154 - * Load a segment. Fall back on loading the zero 155 - * segment if something goes wrong.. 156 - */ 157 - #define loadsegment(seg, value) \ 158 - do { \ 159 - unsigned short __val = (value); \ 160 - \ 161 - asm volatile(" \n" \ 162 - "1: movl %k0,%%" #seg " \n" \ 163 - \ 164 - ".section .fixup,\"ax\" \n" \ 165 - "2: xorl %k0,%k0 \n" \ 166 - " jmp 1b \n" \ 167 - ".previous \n" \ 168 - \ 169 - _ASM_EXTABLE(1b, 2b) \ 170 - \ 171 - : "+r" (__val) : : "memory"); \ 172 - } while (0) 173 - 174 - /* 175 - * Save a segment register away 176 - */ 177 - #define savesegment(seg, value) \ 178 - asm("mov %%" #seg ",%0":"=r" (value) : : "memory") 179 - 180 - /* 181 - * x86_32 user gs accessors. 182 - */ 183 - #ifdef CONFIG_X86_32 184 - #ifdef CONFIG_X86_32_LAZY_GS 185 - #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) 186 - #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) 187 - #define task_user_gs(tsk) ((tsk)->thread.gs) 188 - #define lazy_save_gs(v) savesegment(gs, (v)) 189 - #define lazy_load_gs(v) loadsegment(gs, (v)) 190 - #else /* X86_32_LAZY_GS */ 191 - #define get_user_gs(regs) (u16)((regs)->gs) 192 - #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) 193 - #define task_user_gs(tsk) (task_pt_regs(tsk)->gs) 194 - #define lazy_save_gs(v) do { } while (0) 195 - #define lazy_load_gs(v) do { } while (0) 196 - #endif /* X86_32_LAZY_GS */ 197 - #endif /* X86_32 */ 198 - 199 - static inline unsigned long get_limit(unsigned long segment) 200 - { 201 - unsigned long __limit; 202 - asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); 203 - return __limit + 1; 204 - } 205 - 206 - static inline void native_clts(void) 207 - { 208 - asm volatile("clts"); 209 - } 210 - 211 - /* 212 - * Volatile isn't enough to prevent the compiler from reordering the 213 - * read/write functions for the control registers and messing everything up. 214 - * A memory clobber would solve the problem, but would prevent reordering of 215 - * all loads stores around it, which can hurt performance. Solution is to 216 - * use a variable and mimic reads and writes to it to enforce serialization 217 - */ 218 - static unsigned long __force_order; 219 - 220 - static inline unsigned long native_read_cr0(void) 221 - { 222 - unsigned long val; 223 - asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); 224 - return val; 225 - } 226 - 227 - static inline void native_write_cr0(unsigned long val) 228 - { 229 - asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); 230 - } 231 - 232 - static inline unsigned long native_read_cr2(void) 233 - { 234 - unsigned long val; 235 - asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); 236 - return val; 237 - } 238 - 239 - static inline void native_write_cr2(unsigned long val) 240 - { 241 - asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); 242 - } 243 - 244 - static inline unsigned long native_read_cr3(void) 245 - { 246 - unsigned long val; 247 - asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); 248 - return val; 249 - } 250 - 251 - static inline void native_write_cr3(unsigned long val) 252 - { 253 - asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); 254 - } 255 - 256 - static inline unsigned long native_read_cr4(void) 257 - { 258 - unsigned long val; 259 - asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); 260 - return val; 261 - } 262 - 263 - static inline unsigned long native_read_cr4_safe(void) 264 - { 265 - unsigned long val; 266 - /* This could fault if %cr4 does not exist. In x86_64, a cr4 always 267 - * exists, so it will never fail. */ 268 - #ifdef CONFIG_X86_32 269 - asm volatile("1: mov %%cr4, %0\n" 270 - "2:\n" 271 - _ASM_EXTABLE(1b, 2b) 272 - : "=r" (val), "=m" (__force_order) : "0" (0)); 273 - #else 274 - val = native_read_cr4(); 275 - #endif 276 - return val; 277 - } 278 - 279 - static inline void native_write_cr4(unsigned long val) 280 - { 281 - asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); 282 - } 283 - 284 - #ifdef CONFIG_X86_64 285 - static inline unsigned long native_read_cr8(void) 286 - { 287 - unsigned long cr8; 288 - asm volatile("movq %%cr8,%0" : "=r" (cr8)); 289 - return cr8; 290 - } 291 - 292 - static inline void native_write_cr8(unsigned long val) 293 - { 294 - asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); 295 - } 296 - #endif 297 - 298 - static inline void native_wbinvd(void) 299 - { 300 - asm volatile("wbinvd": : :"memory"); 301 - } 302 - 303 - #ifdef CONFIG_PARAVIRT 304 - #include <asm/paravirt.h> 305 - #else 306 - 307 - static inline unsigned long read_cr0(void) 308 - { 309 - return native_read_cr0(); 310 - } 311 - 312 - static inline void write_cr0(unsigned long x) 313 - { 314 - native_write_cr0(x); 315 - } 316 - 317 - static inline unsigned long read_cr2(void) 318 - { 319 - return native_read_cr2(); 320 - } 321 - 322 - static inline void write_cr2(unsigned long x) 323 - { 324 - native_write_cr2(x); 325 - } 326 - 327 - static inline unsigned long read_cr3(void) 328 - { 329 - return native_read_cr3(); 330 - } 331 - 332 - static inline void write_cr3(unsigned long x) 333 - { 334 - native_write_cr3(x); 335 - } 336 - 337 - static inline unsigned long read_cr4(void) 338 - { 339 - return native_read_cr4(); 340 - } 341 - 342 - static inline unsigned long read_cr4_safe(void) 343 - { 344 - return native_read_cr4_safe(); 345 - } 346 - 347 - static inline void write_cr4(unsigned long x) 348 - { 349 - native_write_cr4(x); 350 - } 351 - 352 - static inline void wbinvd(void) 353 - { 354 - native_wbinvd(); 355 - } 356 - 357 - #ifdef CONFIG_X86_64 358 - 359 - static inline unsigned long read_cr8(void) 360 - { 361 - return native_read_cr8(); 362 - } 363 - 364 - static inline void write_cr8(unsigned long x) 365 - { 366 - native_write_cr8(x); 367 - } 368 - 369 - static inline void load_gs_index(unsigned selector) 370 - { 371 - native_load_gs_index(selector); 372 - } 373 - 374 - #endif 375 - 376 - /* Clear the 'TS' bit */ 377 - static inline void clts(void) 378 - { 379 - native_clts(); 380 - } 381 - 382 - #endif/* CONFIG_PARAVIRT */ 383 - 384 - #define stts() write_cr0(read_cr0() | X86_CR0_TS) 385 - 386 - #endif /* __KERNEL__ */ 387 - 388 - static inline void clflush(volatile void *__p) 389 - { 390 - asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); 391 - } 392 - 393 - #define nop() asm volatile ("nop") 394 - 395 - void disable_hlt(void); 396 - void enable_hlt(void); 397 - 398 - void cpu_idle_wait(void); 399 - 400 - extern unsigned long arch_align_stack(unsigned long sp); 401 - extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 402 - 403 - void default_idle(void); 404 - bool set_pm_idle_to_default(void); 405 - 406 - void stop_this_cpu(void *dummy); 407 - 408 - /* 409 - * Force strict CPU ordering. 410 - * And yes, this is required on UP too when we're talking 411 - * to devices. 412 - */ 413 - #ifdef CONFIG_X86_32 414 - /* 415 - * Some non-Intel clones support out of order store. wmb() ceases to be a 416 - * nop for these. 417 - */ 418 - #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) 419 - #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) 420 - #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) 421 - #else 422 - #define mb() asm volatile("mfence":::"memory") 423 - #define rmb() asm volatile("lfence":::"memory") 424 - #define wmb() asm volatile("sfence" ::: "memory") 425 - #endif 426 - 427 - /** 428 - * read_barrier_depends - Flush all pending reads that subsequents reads 429 - * depend on. 430 - * 431 - * No data-dependent reads from memory-like regions are ever reordered 432 - * over this barrier. All reads preceding this primitive are guaranteed 433 - * to access memory (but not necessarily other CPUs' caches) before any 434 - * reads following this primitive that depend on the data return by 435 - * any of the preceding reads. This primitive is much lighter weight than 436 - * rmb() on most CPUs, and is never heavier weight than is 437 - * rmb(). 438 - * 439 - * These ordering constraints are respected by both the local CPU 440 - * and the compiler. 441 - * 442 - * Ordering is not guaranteed by anything other than these primitives, 443 - * not even by data dependencies. See the documentation for 444 - * memory_barrier() for examples and URLs to more information. 445 - * 446 - * For example, the following code would force ordering (the initial 447 - * value of "a" is zero, "b" is one, and "p" is "&a"): 448 - * 449 - * <programlisting> 450 - * CPU 0 CPU 1 451 - * 452 - * b = 2; 453 - * memory_barrier(); 454 - * p = &b; q = p; 455 - * read_barrier_depends(); 456 - * d = *q; 457 - * </programlisting> 458 - * 459 - * because the read of "*q" depends on the read of "p" and these 460 - * two reads are separated by a read_barrier_depends(). However, 461 - * the following code, with the same initial values for "a" and "b": 462 - * 463 - * <programlisting> 464 - * CPU 0 CPU 1 465 - * 466 - * a = 2; 467 - * memory_barrier(); 468 - * b = 3; y = b; 469 - * read_barrier_depends(); 470 - * x = a; 471 - * </programlisting> 472 - * 473 - * does not enforce ordering, since there is no data dependency between 474 - * the read of "a" and the read of "b". Therefore, on some CPUs, such 475 - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 476 - * in cases like this where there are no data dependencies. 477 - **/ 478 - 479 - #define read_barrier_depends() do { } while (0) 480 - 481 - #ifdef CONFIG_SMP 482 - #define smp_mb() mb() 483 - #ifdef CONFIG_X86_PPRO_FENCE 484 - # define smp_rmb() rmb() 485 - #else 486 - # define smp_rmb() barrier() 487 - #endif 488 - #ifdef CONFIG_X86_OOSTORE 489 - # define smp_wmb() wmb() 490 - #else 491 - # define smp_wmb() barrier() 492 - #endif 493 - #define smp_read_barrier_depends() read_barrier_depends() 494 - #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 495 - #else 496 - #define smp_mb() barrier() 497 - #define smp_rmb() barrier() 498 - #define smp_wmb() barrier() 499 - #define smp_read_barrier_depends() do { } while (0) 500 - #define set_mb(var, value) do { var = value; barrier(); } while (0) 501 - #endif 502 - 503 - /* 504 - * Stop RDTSC speculation. This is needed when you need to use RDTSC 505 - * (or get_cycles or vread that possibly accesses the TSC) in a defined 506 - * code region. 507 - * 508 - * (Could use an alternative three way for this if there was one.) 509 - */ 510 - static __always_inline void rdtsc_barrier(void) 511 - { 512 - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); 513 - alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); 514 - } 515 - 516 - /* 517 - * We handle most unaligned accesses in hardware. On the other hand 518 - * unaligned DMA can be quite expensive on some Nehalem processors. 519 - * 520 - * Based on this we disable the IP header alignment in network drivers. 521 - */ 522 - #define NET_IP_ALIGN 0 523 - #endif /* _ASM_X86_SYSTEM_H */ 4 + #include <asm/exec.h> 5 + #include <asm/special_insns.h> 6 + #include <asm/switch_to.h>
+1 -1
arch/x86/include/asm/tlbflush.h
··· 5 5 #include <linux/sched.h> 6 6 7 7 #include <asm/processor.h> 8 - #include <asm/system.h> 8 + #include <asm/special_insns.h> 9 9 10 10 #ifdef CONFIG_PARAVIRT 11 11 #include <asm/paravirt.h>
-1
arch/x86/include/asm/virtext.h
··· 16 16 #define _ASM_X86_VIRTEX_H 17 17 18 18 #include <asm/processor.h> 19 - #include <asm/system.h> 20 19 21 20 #include <asm/vmx.h> 22 21 #include <asm/svm.h>
+1
arch/x86/kernel/acpi/cstate.c
··· 14 14 #include <acpi/processor.h> 15 15 #include <asm/acpi.h> 16 16 #include <asm/mwait.h> 17 + #include <asm/special_insns.h> 17 18 18 19 /* 19 20 * Initialize bm_flags based on the CPU cache properties
-1
arch/x86/kernel/apm_32.c
··· 231 231 #include <linux/syscore_ops.h> 232 232 #include <linux/i8253.h> 233 233 234 - #include <asm/system.h> 235 234 #include <asm/uaccess.h> 236 235 #include <asm/desc.h> 237 236 #include <asm/olpc.h>
-1
arch/x86/kernel/cpu/mcheck/p5.c
··· 9 9 #include <linux/smp.h> 10 10 11 11 #include <asm/processor.h> 12 - #include <asm/system.h> 13 12 #include <asm/mce.h> 14 13 #include <asm/msr.h> 15 14
-1
arch/x86/kernel/cpu/mcheck/therm_throt.c
··· 25 25 #include <linux/cpu.h> 26 26 27 27 #include <asm/processor.h> 28 - #include <asm/system.h> 29 28 #include <asm/apic.h> 30 29 #include <asm/idle.h> 31 30 #include <asm/mce.h>
-1
arch/x86/kernel/cpu/mcheck/winchip.c
··· 8 8 #include <linux/init.h> 9 9 10 10 #include <asm/processor.h> 11 - #include <asm/system.h> 12 11 #include <asm/mce.h> 13 12 #include <asm/msr.h> 14 13
-1
arch/x86/kernel/cpu/mtrr/generic.c
··· 12 12 #include <asm/processor-flags.h> 13 13 #include <asm/cpufeature.h> 14 14 #include <asm/tlbflush.h> 15 - #include <asm/system.h> 16 15 #include <asm/mtrr.h> 17 16 #include <asm/msr.h> 18 17 #include <asm/pat.h>
-1
arch/x86/kernel/cpuid.c
··· 43 43 44 44 #include <asm/processor.h> 45 45 #include <asm/msr.h> 46 - #include <asm/system.h> 47 46 48 47 static struct class *cpuid_class; 49 48
-1
arch/x86/kernel/i8259.c
··· 15 15 #include <linux/delay.h> 16 16 17 17 #include <linux/atomic.h> 18 - #include <asm/system.h> 19 18 #include <asm/timer.h> 20 19 #include <asm/hw_irq.h> 21 20 #include <asm/pgtable.h>
-1
arch/x86/kernel/irqinit.c
··· 16 16 #include <linux/delay.h> 17 17 18 18 #include <linux/atomic.h> 19 - #include <asm/system.h> 20 19 #include <asm/timer.h> 21 20 #include <asm/hw_irq.h> 22 21 #include <asm/pgtable.h>
-1
arch/x86/kernel/kgdb.c
··· 46 46 47 47 #include <asm/debugreg.h> 48 48 #include <asm/apicdef.h> 49 - #include <asm/system.h> 50 49 #include <asm/apic.h> 51 50 #include <asm/nmi.h> 52 51
-1
arch/x86/kernel/ldt.c
··· 15 15 #include <linux/vmalloc.h> 16 16 #include <linux/uaccess.h> 17 17 18 - #include <asm/system.h> 19 18 #include <asm/ldt.h> 20 19 #include <asm/desc.h> 21 20 #include <asm/mmu_context.h>
-1
arch/x86/kernel/machine_kexec_32.c
··· 23 23 #include <asm/apic.h> 24 24 #include <asm/cpufeature.h> 25 25 #include <asm/desc.h> 26 - #include <asm/system.h> 27 26 #include <asm/cacheflush.h> 28 27 #include <asm/debugreg.h> 29 28
-1
arch/x86/kernel/mca_32.c
··· 43 43 #include <linux/mca.h> 44 44 #include <linux/kprobes.h> 45 45 #include <linux/slab.h> 46 - #include <asm/system.h> 47 46 #include <asm/io.h> 48 47 #include <linux/proc_fs.h> 49 48 #include <linux/mman.h>
-1
arch/x86/kernel/module.c
··· 26 26 #include <linux/gfp.h> 27 27 #include <linux/jump_label.h> 28 28 29 - #include <asm/system.h> 30 29 #include <asm/page.h> 31 30 #include <asm/pgtable.h> 32 31
-1
arch/x86/kernel/msr.c
··· 40 40 41 41 #include <asm/processor.h> 42 42 #include <asm/msr.h> 43 - #include <asm/system.h> 44 43 45 44 static struct class *msr_class; 46 45
+1
arch/x86/kernel/paravirt.c
··· 37 37 #include <asm/apic.h> 38 38 #include <asm/tlbflush.h> 39 39 #include <asm/timer.h> 40 + #include <asm/special_insns.h> 40 41 41 42 /* nop stub */ 42 43 void _paravirt_nop(void)
-1
arch/x86/kernel/pci-calgary_64.c
··· 42 42 #include <asm/calgary.h> 43 43 #include <asm/tce.h> 44 44 #include <asm/pci-direct.h> 45 - #include <asm/system.h> 46 45 #include <asm/dma.h> 47 46 #include <asm/rio.h> 48 47 #include <asm/bios_ebda.h>
-1
arch/x86/kernel/process.c
··· 15 15 #include <trace/events/power.h> 16 16 #include <linux/hw_breakpoint.h> 17 17 #include <asm/cpu.h> 18 - #include <asm/system.h> 19 18 #include <asm/apic.h> 20 19 #include <asm/syscalls.h> 21 20 #include <asm/idle.h>
+1 -1
arch/x86/kernel/process_32.c
··· 41 41 #include <linux/cpuidle.h> 42 42 43 43 #include <asm/pgtable.h> 44 - #include <asm/system.h> 45 44 #include <asm/ldt.h> 46 45 #include <asm/processor.h> 47 46 #include <asm/i387.h> ··· 58 59 #include <asm/syscalls.h> 59 60 #include <asm/debugreg.h> 60 61 #include <asm/nmi.h> 62 + #include <asm/switch_to.h> 61 63 62 64 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 63 65
+1 -1
arch/x86/kernel/process_64.c
··· 40 40 #include <linux/cpuidle.h> 41 41 42 42 #include <asm/pgtable.h> 43 - #include <asm/system.h> 44 43 #include <asm/processor.h> 45 44 #include <asm/i387.h> 46 45 #include <asm/fpu-internal.h> ··· 52 53 #include <asm/syscalls.h> 53 54 #include <asm/debugreg.h> 54 55 #include <asm/nmi.h> 56 + #include <asm/switch_to.h> 55 57 56 58 asmlinkage extern void ret_from_fork(void); 57 59
-1
arch/x86/kernel/ptrace.c
··· 24 24 25 25 #include <asm/uaccess.h> 26 26 #include <asm/pgtable.h> 27 - #include <asm/system.h> 28 27 #include <asm/processor.h> 29 28 #include <asm/i387.h> 30 29 #include <asm/fpu-internal.h>
-1
arch/x86/kernel/setup.c
··· 90 90 #include <asm/processor.h> 91 91 #include <asm/bugs.h> 92 92 93 - #include <asm/system.h> 94 93 #include <asm/vsyscall.h> 95 94 #include <asm/cpu.h> 96 95 #include <asm/desc.h>
+1
arch/x86/kernel/tce_64.c
··· 34 34 #include <asm/tce.h> 35 35 #include <asm/calgary.h> 36 36 #include <asm/proto.h> 37 + #include <asm/cacheflush.h> 37 38 38 39 /* flush a tce at 'tceaddr' to main memory */ 39 40 static inline void flush_tce(void* tceaddr)
-1
arch/x86/kernel/tls.c
··· 6 6 7 7 #include <asm/uaccess.h> 8 8 #include <asm/desc.h> 9 - #include <asm/system.h> 10 9 #include <asm/ldt.h> 11 10 #include <asm/processor.h> 12 11 #include <asm/proto.h>
-1
arch/x86/kernel/traps.c
··· 50 50 #include <asm/processor.h> 51 51 #include <asm/debugreg.h> 52 52 #include <linux/atomic.h> 53 - #include <asm/system.h> 54 53 #include <asm/traps.h> 55 54 #include <asm/desc.h> 56 55 #include <asm/i387.h>
-1
arch/x86/mm/init.c
··· 12 12 #include <asm/page_types.h> 13 13 #include <asm/sections.h> 14 14 #include <asm/setup.h> 15 - #include <asm/system.h> 16 15 #include <asm/tlbflush.h> 17 16 #include <asm/tlb.h> 18 17 #include <asm/proto.h>
-1
arch/x86/mm/init_32.c
··· 35 35 #include <asm/asm.h> 36 36 #include <asm/bios_ebda.h> 37 37 #include <asm/processor.h> 38 - #include <asm/system.h> 39 38 #include <asm/uaccess.h> 40 39 #include <asm/pgtable.h> 41 40 #include <asm/dma.h>
-1
arch/x86/mm/init_64.c
··· 35 35 36 36 #include <asm/processor.h> 37 37 #include <asm/bios_ebda.h> 38 - #include <asm/system.h> 39 38 #include <asm/uaccess.h> 40 39 #include <asm/pgtable.h> 41 40 #include <asm/pgalloc.h>
-1
arch/x86/mm/pgtable_32.c
··· 10 10 #include <linux/spinlock.h> 11 11 #include <linux/module.h> 12 12 13 - #include <asm/system.h> 14 13 #include <asm/pgtable.h> 15 14 #include <asm/pgalloc.h> 16 15 #include <asm/fixmap.h>
-1
arch/x86/power/hibernate_32.c
··· 10 10 #include <linux/suspend.h> 11 11 #include <linux/bootmem.h> 12 12 13 - #include <asm/system.h> 14 13 #include <asm/page.h> 15 14 #include <asm/pgtable.h> 16 15 #include <asm/mmzone.h>