Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (78 commits)
x86: fix RTC lockdep warning: potential hardirq recursion
x86: cpa, micro-optimization
x86: cpa, clean up code flow
x86: cpa, eliminate CPA_ enum
x86: cpa, cleanups
x86: implement gbpages support in change_page_attr()
x86: support gbpages in pagetable dump
x86: add gbpages support to lookup_address
x86: add pgtable accessor functions for gbpages
x86: add PUD_PAGE_SIZE
x86: add feature macros for the gbpages cpuid bit
x86: switch direct mapping setup over to set_pte
x86: fix page-present check in cpa_flush_range
x86: remove cpa warning
x86: remove now unused clear_kernel_mapping
x86: switch pci-gart over to using set_memory_np() instead of clear_kernel_mapping()
x86: cpa selftest, skip non present entries
x86: CPA fix pagetable split
x86: rename LARGE_PAGE_SIZE to PMD_PAGE_SIZE
x86: cpa, fix lookup_address
...

+823 -816
+16
Documentation/x86_64/00-INDEX
··· 1 + 00-INDEX 2 + - This file 3 + boot-options.txt 4 + - AMD64-specific boot options. 5 + cpu-hotplug-spec 6 + - Firmware support for CPU hotplug under Linux/x86-64 7 + fake-numa-for-cpusets 8 + - Using numa=fake and CPUSets for Resource Management 9 + kernel-stacks 10 + - Context-specific per-processor interrupt stacks. 11 + machinecheck 12 + - Configurable sysfs parameters for the x86-64 machine check code. 13 + mm.txt 14 + - Memory layout of x86-64 (4 level page tables, 46 bits physical). 15 + uefi.txt 16 + - Booting Linux via Unified Extensible Firmware Interface.
+1
arch/x86/Kconfig
··· 306 306 select M486 307 307 select X86_REBOOTFIXUPS 308 308 select GENERIC_GPIO 309 + select LEDS_CLASS 309 310 select LEDS_GPIO 310 311 help 311 312 This option is needed for RDC R-321x system-on-chip, also known
-1
arch/x86/Makefile
··· 92 92 KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) 93 93 94 94 LDFLAGS := -m elf_$(UTS_MACHINE) 95 - OBJCOPYFLAGS := -O binary -R .note -R .comment -S 96 95 97 96 # Speed up the build 98 97 KBUILD_CFLAGS += -pipe
+13 -5
arch/x86/boot/Makefile
··· 26 26 #RAMDISK := -DRAMDISK=512 27 27 28 28 targets := vmlinux.bin setup.bin setup.elf zImage bzImage 29 - subdir- := compressed 29 + subdir- := compressed 30 30 31 31 setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o 32 32 setup-y += header.o main.o mca.o memory.o pm.o pmjump.o ··· 43 43 setup-y += video-bios.o 44 44 45 45 targets += $(setup-y) 46 - hostprogs-y := tools/build 46 + hostprogs-y := mkcpustr tools/build 47 47 48 - HOSTCFLAGS_build.o := $(LINUXINCLUDE) 48 + HOST_EXTRACFLAGS += $(LINUXINCLUDE) 49 + 50 + $(obj)/cpu.o: $(obj)/cpustr.h 51 + 52 + quiet_cmd_cpustr = CPUSTR $@ 53 + cmd_cpustr = $(obj)/mkcpustr > $@ 54 + targets += cpustr.h 55 + $(obj)/cpustr.h: $(obj)/mkcpustr FORCE 56 + $(call if_changed,cpustr) 49 57 50 58 # --------------------------------------------------------------------------- 51 59 ··· 88 80 $(call if_changed,image) 89 81 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 90 82 83 + OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S 91 84 $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE 92 85 $(call if_changed,objcopy) 93 86 ··· 99 90 $(call if_changed,ld) 100 91 101 92 OBJCOPYFLAGS_setup.bin := -O binary 102 - 103 93 $(obj)/setup.bin: $(obj)/setup.elf FORCE 104 94 $(call if_changed,objcopy) 105 95 ··· 106 98 $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ 107 99 108 100 # Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel 109 - FDARGS = 101 + FDARGS = 110 102 # Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel 111 103 FDINITRD = 112 104
+1
arch/x86/boot/compressed/Makefile
··· 22 22 $(call if_changed,ld) 23 23 @: 24 24 25 + OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S 25 26 $(obj)/vmlinux.bin: vmlinux FORCE 26 27 $(call if_changed,objcopy) 27 28
+4 -4
arch/x86/boot/compressed/head_64.S
··· 80 80 81 81 #ifdef CONFIG_RELOCATABLE 82 82 movl %ebp, %ebx 83 - addl $(LARGE_PAGE_SIZE -1), %ebx 84 - andl $LARGE_PAGE_MASK, %ebx 83 + addl $(PMD_PAGE_SIZE -1), %ebx 84 + andl $PMD_PAGE_MASK, %ebx 85 85 #else 86 86 movl $CONFIG_PHYSICAL_START, %ebx 87 87 #endif ··· 220 220 /* Start with the delta to where the kernel will run at. */ 221 221 #ifdef CONFIG_RELOCATABLE 222 222 leaq startup_32(%rip) /* - $startup_32 */, %rbp 223 - addq $(LARGE_PAGE_SIZE - 1), %rbp 224 - andq $LARGE_PAGE_MASK, %rbp 223 + addq $(PMD_PAGE_SIZE - 1), %rbp 224 + andq $PMD_PAGE_MASK, %rbp 225 225 movq %rbp, %rbx 226 226 #else 227 227 movq $CONFIG_PHYSICAL_START, %rbp
+21 -5
arch/x86/boot/cpu.c
··· 1 1 /* -*- linux-c -*- ------------------------------------------------------- * 2 2 * 3 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 - * Copyright 2007 rPath, Inc. - All Rights Reserved 4 + * Copyright 2007-2008 rPath, Inc. - All Rights Reserved 5 5 * 6 6 * This file is part of the Linux kernel, and is made available under 7 7 * the terms of the GNU General Public License version 2. ··· 9 9 * ----------------------------------------------------------------------- */ 10 10 11 11 /* 12 - * arch/i386/boot/cpu.c 12 + * arch/x86/boot/cpu.c 13 13 * 14 14 * Check for obligatory CPU features and abort if the features are not 15 15 * present. ··· 18 18 #include "boot.h" 19 19 #include "bitops.h" 20 20 #include <asm/cpufeature.h> 21 + 22 + #include "cpustr.h" 21 23 22 24 static char *cpu_name(int level) 23 25 { ··· 37 35 { 38 36 u32 *err_flags; 39 37 int cpu_level, req_level; 38 + const unsigned char *msg_strs; 40 39 41 40 check_cpu(&cpu_level, &req_level, &err_flags); 42 41 ··· 54 51 puts("This kernel requires the following features " 55 52 "not present on the CPU:\n"); 56 53 54 + msg_strs = (const unsigned char *)x86_cap_strs; 55 + 57 56 for (i = 0; i < NCAPINTS; i++) { 58 57 u32 e = err_flags[i]; 59 58 60 59 for (j = 0; j < 32; j++) { 61 - if (e & 1) 62 - printf("%d:%d ", i, j); 63 - 60 + int n = (i << 5)+j; 61 + if (*msg_strs < n) { 62 + /* Skip to the next string */ 63 + do { 64 + msg_strs++; 65 + } while (*msg_strs); 66 + msg_strs++; 67 + } 68 + if (e & 1) { 69 + if (*msg_strs == n && msg_strs[1]) 70 + printf("%s ", msg_strs+1); 71 + else 72 + printf("%d:%d ", i, j); 73 + } 64 74 e >>= 1; 65 75 } 66 76 }
+49
arch/x86/boot/mkcpustr.c
··· 1 + /* ----------------------------------------------------------------------- * 2 + * 3 + * Copyright 2008 rPath, Inc. - All Rights Reserved 4 + * 5 + * This file is part of the Linux kernel, and is made available under 6 + * the terms of the GNU General Public License version 2 or (at your 7 + * option) any later version; incorporated herein by reference. 8 + * 9 + * ----------------------------------------------------------------------- */ 10 + 11 + /* 12 + * This is a host program to preprocess the CPU strings into a 13 + * compact format suitable for the setup code. 14 + */ 15 + 16 + #include <stdio.h> 17 + 18 + #include "../kernel/cpu/feature_names.c" 19 + 20 + #if NCAPFLAGS > 8 21 + # error "Need to adjust the boot code handling of CPUID strings" 22 + #endif 23 + 24 + int main(void) 25 + { 26 + int i; 27 + const char *str; 28 + 29 + printf("static const char x86_cap_strs[] = \n"); 30 + 31 + for (i = 0; i < NCAPINTS*32; i++) { 32 + str = x86_cap_flags[i]; 33 + 34 + if (i == NCAPINTS*32-1) { 35 + /* The last entry must be unconditional; this 36 + also consumes the compiler-added null character */ 37 + if (!str) 38 + str = ""; 39 + printf("\t\"\\x%02x\"\"%s\"\n", i, str); 40 + } else if (str) { 41 + printf("#if REQUIRED_MASK%d & (1 << %d)\n" 42 + "\t\"\\x%02x\"\"%s\\0\"\n" 43 + "#endif\n", 44 + i >> 5, i & 31, i, str); 45 + } 46 + } 47 + printf("\t;\n"); 48 + return 0; 49 + }
+4 -2
arch/x86/kernel/Makefile
··· 37 37 obj-$(CONFIG_X86_CPUID) += cpuid.o 38 38 obj-$(CONFIG_MICROCODE) += microcode.o 39 39 obj-$(CONFIG_PCI) += early-quirks.o 40 - obj-$(CONFIG_APM) += apm_32.o 40 + apm-y := apm_32.o 41 + obj-$(CONFIG_APM) += apm.o 41 42 obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o tsc_sync.o 42 43 obj-$(CONFIG_X86_32_SMP) += smpcommon_32.o 43 44 obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o ··· 75 74 obj-y += pcspeaker.o 76 75 endif 77 76 78 - obj-$(CONFIG_SCx200) += scx200_32.o 77 + obj-$(CONFIG_SCx200) += scx200.o 78 + scx200-y += scx200_32.o 79 79 80 80 ### 81 81 # 64 bit specific files
+1
arch/x86/kernel/cpu/Makefile
··· 3 3 # 4 4 5 5 obj-y := intel_cacheinfo.o addon_cpuid_features.o 6 + obj-y += feature_names.o 6 7 7 8 obj-$(CONFIG_X86_32) += common.o proc.o bugs.o 8 9 obj-$(CONFIG_X86_32) += amd.o
-10
arch/x86/kernel/cpu/common.c
··· 623 623 * They will insert themselves into the cpu_devs structure. 624 624 * Then, when cpu_init() is called, we can just iterate over that array. 625 625 */ 626 - 627 - extern int intel_cpu_init(void); 628 - extern int cyrix_init_cpu(void); 629 - extern int nsc_init_cpu(void); 630 - extern int amd_init_cpu(void); 631 - extern int centaur_init_cpu(void); 632 - extern int transmeta_init_cpu(void); 633 - extern int nexgen_init_cpu(void); 634 - extern int umc_init_cpu(void); 635 - 636 626 void __init early_cpu_init(void) 637 627 { 638 628 intel_cpu_init();
+9
arch/x86/kernel/cpu/cpu.h
··· 27 27 extern void early_init_intel(struct cpuinfo_x86 *c); 28 28 extern void early_init_amd(struct cpuinfo_x86 *c); 29 29 30 + /* Specific CPU type init functions */ 31 + int intel_cpu_init(void); 32 + int amd_init_cpu(void); 33 + int cyrix_init_cpu(void); 34 + int nsc_init_cpu(void); 35 + int centaur_init_cpu(void); 36 + int transmeta_init_cpu(void); 37 + int nexgen_init_cpu(void); 38 + int umc_init_cpu(void);
+83
arch/x86/kernel/cpu/feature_names.c
··· 1 + /* 2 + * Strings for the various x86 capability flags. 3 + * 4 + * This file must not contain any executable code. 5 + */ 6 + 7 + #include "asm/cpufeature.h" 8 + 9 + /* 10 + * These flag bits must match the definitions in <asm/cpufeature.h>. 11 + * NULL means this bit is undefined or reserved; either way it doesn't 12 + * have meaning as far as Linux is concerned. Note that it's important 13 + * to realize there is a difference between this table and CPUID -- if 14 + * applications want to get the raw CPUID data, they should access 15 + * /dev/cpu/<cpu_nr>/cpuid instead. 16 + */ 17 + const char * const x86_cap_flags[NCAPINTS*32] = { 18 + /* Intel-defined */ 19 + "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 20 + "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 21 + "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", 22 + "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", 23 + 24 + /* AMD-defined */ 25 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 26 + NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, 27 + NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, 28 + NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", 29 + "3dnowext", "3dnow", 30 + 31 + /* Transmeta-defined */ 32 + "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, 33 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 34 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 35 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 36 + 37 + /* Other (Linux-defined) */ 38 + "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", 39 + NULL, NULL, NULL, NULL, 40 + "constant_tsc", "up", NULL, "arch_perfmon", 41 + "pebs", "bts", NULL, NULL, 42 + "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 43 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 + 45 + /* Intel-defined (#2) */ 46 + "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", 47 + "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, 48 + NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt", 49 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 50 + 51 + /* VIA/Cyrix/Centaur-defined */ 52 + NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", 53 + "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL, 54 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 55 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 56 + 57 + /* AMD-defined (#2) */ 58 + "lahf_lm", "cmp_legacy", "svm", "extapic", 59 + "cr8_legacy", "abm", "sse4a", "misalignsse", 60 + "3dnowprefetch", "osvw", "ibs", "sse5", 61 + "skinit", "wdt", NULL, NULL, 62 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 63 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 64 + 65 + /* Auxiliary (Linux-defined) */ 66 + "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 67 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 68 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 69 + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 70 + }; 71 + 72 + const char *const x86_power_flags[32] = { 73 + "ts", /* temperature sensor */ 74 + "fid", /* frequency id control */ 75 + "vid", /* voltage id control */ 76 + "ttp", /* thermal trip */ 77 + "tm", 78 + "stc", 79 + "100mhzsteps", 80 + "hwpstate", 81 + "", /* tsc invariant mapped to constant_tsc */ 82 + /* nothing */ 83 + };
+1
arch/x86/kernel/cpu/intel.c
··· 13 13 #include <asm/uaccess.h> 14 14 #include <asm/ptrace.h> 15 15 #include <asm/ds.h> 16 + #include <asm/bugs.h> 16 17 17 18 #include "cpu.h" 18 19
-107
arch/x86/kernel/cpu/mtrr/cyrix.c
··· 7 7 #include <asm/processor-flags.h> 8 8 #include "mtrr.h" 9 9 10 - int arr3_protected; 11 - 12 10 static void 13 11 cyrix_get_arr(unsigned int reg, unsigned long *base, 14 12 unsigned long *size, mtrr_type * type) ··· 97 99 case 4: 98 100 return replace_reg; 99 101 case 3: 100 - if (arr3_protected) 101 - break; 102 102 case 2: 103 103 case 1: 104 104 case 0: ··· 111 115 } else { 112 116 for (i = 0; i < 7; i++) { 113 117 cyrix_get_arr(i, &lbase, &lsize, &ltype); 114 - if ((i == 3) && arr3_protected) 115 - continue; 116 118 if (lsize == 0) 117 119 return i; 118 120 } ··· 253 259 254 260 post_set(); 255 261 } 256 - 257 - #if 0 258 - /* 259 - * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection 260 - * with the SMM (System Management Mode) mode. So we need the following: 261 - * Check whether SMI_LOCK (CCR3 bit 0) is set 262 - * if it is set, write a warning message: ARR3 cannot be changed! 263 - * (it cannot be changed until the next processor reset) 264 - * if it is reset, then we can change it, set all the needed bits: 265 - * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset) 266 - * - disable access to SMM memory (CCR1 bit 2 reset) 267 - * - disable SMM mode (CCR1 bit 1 reset) 268 - * - disable write protection of ARR3 (CCR6 bit 1 reset) 269 - * - (maybe) disable ARR3 270 - * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set) 271 - */ 272 - static void __init 273 - cyrix_arr_init(void) 274 - { 275 - struct set_mtrr_context ctxt; 276 - unsigned char ccr[7]; 277 - int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 }; 278 - #ifdef CONFIG_SMP 279 - int i; 280 - #endif 281 - 282 - /* flush cache and enable MAPEN */ 283 - set_mtrr_prepare_save(&ctxt); 284 - set_mtrr_cache_disable(&ctxt); 285 - 286 - /* Save all CCRs locally */ 287 - ccr[0] = getCx86(CX86_CCR0); 288 - ccr[1] = getCx86(CX86_CCR1); 289 - ccr[2] = getCx86(CX86_CCR2); 290 - ccr[3] = ctxt.ccr3; 291 - ccr[4] = getCx86(CX86_CCR4); 292 - ccr[5] = getCx86(CX86_CCR5); 293 - ccr[6] = getCx86(CX86_CCR6); 294 - 295 - if (ccr[3] & 1) { 296 - ccrc[3] = 1; 297 - arr3_protected = 1; 298 - } else { 299 - /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and 300 - * access to SMM memory through ARR3 (bit 7). 301 - */ 302 - if (ccr[1] & 0x80) { 303 - ccr[1] &= 0x7f; 304 - ccrc[1] |= 0x80; 305 - } 306 - if (ccr[1] & 0x04) { 307 - ccr[1] &= 0xfb; 308 - ccrc[1] |= 0x04; 309 - } 310 - if (ccr[1] & 0x02) { 311 - ccr[1] &= 0xfd; 312 - ccrc[1] |= 0x02; 313 - } 314 - arr3_protected = 0; 315 - if (ccr[6] & 0x02) { 316 - ccr[6] &= 0xfd; 317 - ccrc[6] = 1; /* Disable write protection of ARR3 */ 318 - setCx86(CX86_CCR6, ccr[6]); 319 - } 320 - /* Disable ARR3. This is safe now that we disabled SMM. */ 321 - /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */ 322 - } 323 - /* If we changed CCR1 in memory, change it in the processor, too. */ 324 - if (ccrc[1]) 325 - setCx86(CX86_CCR1, ccr[1]); 326 - 327 - /* Enable ARR usage by the processor */ 328 - if (!(ccr[5] & 0x20)) { 329 - ccr[5] |= 0x20; 330 - ccrc[5] = 1; 331 - setCx86(CX86_CCR5, ccr[5]); 332 - } 333 - #ifdef CONFIG_SMP 334 - for (i = 0; i < 7; i++) 335 - ccr_state[i] = ccr[i]; 336 - for (i = 0; i < 8; i++) 337 - cyrix_get_arr(i, 338 - &arr_state[i].base, &arr_state[i].size, 339 - &arr_state[i].type); 340 - #endif 341 - 342 - set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */ 343 - 344 - if (ccrc[5]) 345 - printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n"); 346 - if (ccrc[3]) 347 - printk(KERN_INFO "mtrr: ARR3 cannot be changed\n"); 348 - /* 349 - if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n"); 350 - if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n"); 351 - if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n"); 352 - */ 353 - if (ccrc[6]) 354 - printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n"); 355 - } 356 - #endif 357 262 358 263 static struct mtrr_ops cyrix_mtrr_ops = { 359 264 .vendor = X86_VENDOR_CYRIX,
-16
arch/x86/kernel/cpu/mtrr/main.c
··· 59 59 static void set_mtrr(unsigned int reg, unsigned long base, 60 60 unsigned long size, mtrr_type type); 61 61 62 - #ifndef CONFIG_X86_64 63 - extern int arr3_protected; 64 - #else 65 - #define arr3_protected 0 66 - #endif 67 - 68 62 void set_mtrr_ops(struct mtrr_ops * ops) 69 63 { 70 64 if (ops->vendor && ops->vendor < X86_VENDOR_NUM) ··· 507 513 printk(KERN_WARNING "mtrr: register: %d too big\n", reg); 508 514 goto out; 509 515 } 510 - if (is_cpu(CYRIX) && !use_intel()) { 511 - if ((reg == 3) && arr3_protected) { 512 - printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n"); 513 - goto out; 514 - } 515 - } 516 516 mtrr_if->get(reg, &lbase, &lsize, &ltype); 517 517 if (lsize < 1) { 518 518 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); ··· 554 566 * These should be called implicitly, but we can't yet until all the initcall 555 567 * stuff is done... 556 568 */ 557 - extern void amd_init_mtrr(void); 558 - extern void cyrix_init_mtrr(void); 559 - extern void centaur_init_mtrr(void); 560 - 561 569 static void __init init_ifs(void) 562 570 { 563 571 #ifndef CONFIG_X86_64
+4
arch/x86/kernel/cpu/mtrr/mtrr.h
··· 97 97 const char *mtrr_attrib_to_str(int x); 98 98 void mtrr_wrmsr(unsigned, unsigned, unsigned); 99 99 100 + /* CPU specific mtrr init functions */ 101 + int amd_init_mtrr(void); 102 + int cyrix_init_mtrr(void); 103 + int centaur_init_mtrr(void);
-74
arch/x86/kernel/cpu/proc.c
··· 10 10 */ 11 11 static int show_cpuinfo(struct seq_file *m, void *v) 12 12 { 13 - /* 14 - * These flag bits must match the definitions in <asm/cpufeature.h>. 15 - * NULL means this bit is undefined or reserved; either way it doesn't 16 - * have meaning as far as Linux is concerned. Note that it's important 17 - * to realize there is a difference between this table and CPUID -- if 18 - * applications want to get the raw CPUID data, they should access 19 - * /dev/cpu/<cpu_nr>/cpuid instead. 20 - */ 21 - static const char * const x86_cap_flags[] = { 22 - /* Intel-defined */ 23 - "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 24 - "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 25 - "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", 26 - "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", 27 - 28 - /* AMD-defined */ 29 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 30 - NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, 31 - NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, 32 - NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", 33 - "3dnowext", "3dnow", 34 - 35 - /* Transmeta-defined */ 36 - "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, 37 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 38 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 39 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 40 - 41 - /* Other (Linux-defined) */ 42 - "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", 43 - NULL, NULL, NULL, NULL, 44 - "constant_tsc", "up", NULL, "arch_perfmon", 45 - "pebs", "bts", NULL, "sync_rdtsc", 46 - "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 47 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 48 - 49 - /* Intel-defined (#2) */ 50 - "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", 51 - "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, 52 - NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt", 53 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 54 - 55 - /* VIA/Cyrix/Centaur-defined */ 56 - NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", 57 - "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL, 58 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 59 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 60 - 61 - /* AMD-defined (#2) */ 62 - "lahf_lm", "cmp_legacy", "svm", "extapic", 63 - "cr8_legacy", "abm", "sse4a", "misalignsse", 64 - "3dnowprefetch", "osvw", "ibs", "sse5", 65 - "skinit", "wdt", NULL, NULL, 66 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 67 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 68 - 69 - /* Auxiliary (Linux-defined) */ 70 - "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 71 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 72 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 73 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 74 - }; 75 - static const char * const x86_power_flags[] = { 76 - "ts", /* temperature sensor */ 77 - "fid", /* frequency id control */ 78 - "vid", /* voltage id control */ 79 - "ttp", /* thermal trip */ 80 - "tm", 81 - "stc", 82 - "100mhzsteps", 83 - "hwpstate", 84 - "", /* constant_tsc - moved to flags */ 85 - /* nothing */ 86 - }; 87 13 struct cpuinfo_x86 *c = v; 88 14 int i, n = 0; 89 15 int fpu_exception;
+23 -29
arch/x86/kernel/cpuid.c
··· 1 1 /* ----------------------------------------------------------------------- * 2 - * 3 - * Copyright 2000 H. Peter Anvin - All Rights Reserved 2 + * 3 + * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved 4 4 * 5 5 * This program is free software; you can redistribute it and/or modify 6 6 * it under the terms of the GNU General Public License as published by ··· 16 16 * This device is accessed by lseek() to the appropriate CPUID level 17 17 * and then read in chunks of 16 bytes. A larger size means multiple 18 18 * reads of consecutive levels. 19 + * 20 + * The lower 32 bits of the file position is used as the incoming %eax, 21 + * and the upper 32 bits of the file position as the incoming %ecx, 22 + * the latter intended for "counting" eax levels like eax=4. 19 23 * 20 24 * This driver uses /dev/cpu/%d/cpuid where %d is the minor number, and on 21 25 * an SMP box will direct the access to CPU %d. ··· 47 43 48 44 static struct class *cpuid_class; 49 45 50 - struct cpuid_command { 51 - u32 reg; 52 - u32 *data; 46 + struct cpuid_regs { 47 + u32 eax, ebx, ecx, edx; 53 48 }; 54 49 55 50 static void cpuid_smp_cpuid(void *cmd_block) 56 51 { 57 - struct cpuid_command *cmd = cmd_block; 52 + struct cpuid_regs *cmd = (struct cpuid_regs *)cmd_block; 58 53 59 - cpuid(cmd->reg, &cmd->data[0], &cmd->data[1], &cmd->data[2], 60 - &cmd->data[3]); 61 - } 62 - 63 - static inline void do_cpuid(int cpu, u32 reg, u32 * data) 64 - { 65 - struct cpuid_command cmd; 66 - 67 - cmd.reg = reg; 68 - cmd.data = data; 69 - 70 - smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); 54 + cpuid_count(cmd->eax, cmd->ecx, 55 + &cmd->eax, &cmd->ebx, &cmd->ecx, &cmd->edx); 71 56 } 72 57 73 58 static loff_t cpuid_seek(struct file *file, loff_t offset, int orig) 74 59 { 75 60 loff_t ret; 61 + struct inode *inode = file->f_mapping->host; 76 62 77 - lock_kernel(); 78 - 63 + mutex_lock(&inode->i_mutex); 79 64 switch (orig) { 80 65 case 0: 81 66 file->f_pos = offset; ··· 77 84 default: 78 85 ret = -EINVAL; 79 86 } 80 - 81 - unlock_kernel(); 87 + mutex_unlock(&inode->i_mutex); 82 88 return ret; 83 89 } 84 90 ··· 85 93 size_t count, loff_t * ppos) 86 94 { 87 95 char __user *tmp = buf; 88 - u32 data[4]; 89 - u32 reg = *ppos; 96 + struct cpuid_regs cmd; 90 97 int cpu = iminor(file->f_path.dentry->d_inode); 98 + u64 pos = *ppos; 91 99 92 100 if (count % 16) 93 101 return -EINVAL; /* Invalid chunk size */ 94 102 95 103 for (; count; count -= 16) { 96 - do_cpuid(cpu, reg, data); 97 - if (copy_to_user(tmp, &data, 16)) 104 + cmd.eax = pos; 105 + cmd.ecx = pos >> 32; 106 + smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); 107 + if (copy_to_user(tmp, &cmd, 16)) 98 108 return -EFAULT; 99 109 tmp += 16; 100 - *ppos = reg++; 110 + *ppos = ++pos; 101 111 } 102 112 103 113 return tmp - buf; ··· 187 193 } 188 194 for_each_online_cpu(i) { 189 195 err = cpuid_device_create(i); 190 - if (err != 0) 196 + if (err != 0) 191 197 goto out_class; 192 198 } 193 199 register_hotcpu_notifier(&cpuid_class_cpu_notifier); ··· 202 208 } 203 209 class_destroy(cpuid_class); 204 210 out_chrdev: 205 - unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); 211 + unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); 206 212 out: 207 213 return err; 208 214 }
+30 -27
arch/x86/kernel/efi.c
··· 379 379 #endif 380 380 } 381 381 382 - #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 383 382 static void __init runtime_code_page_mkexec(void) 384 383 { 385 384 efi_memory_desc_t *md; 386 - unsigned long end; 387 385 void *p; 388 386 389 387 if (!(__supported_pte_mask & _PAGE_NX)) ··· 390 392 /* Make EFI runtime service code area executable */ 391 393 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 392 394 md = p; 393 - end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); 394 - if (md->type == EFI_RUNTIME_SERVICES_CODE && 395 - (end >> PAGE_SHIFT) <= max_pfn_mapped) { 396 - set_memory_x(md->virt_addr, md->num_pages); 397 - set_memory_uc(md->virt_addr, md->num_pages); 398 - } 395 + 396 + if (md->type != EFI_RUNTIME_SERVICES_CODE) 397 + continue; 398 + 399 + set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT); 399 400 } 400 - __flush_tlb_all(); 401 401 } 402 - #else 403 - static inline void __init runtime_code_page_mkexec(void) { } 404 - #endif 405 402 406 403 /* 407 404 * This function will switch the EFI runtime services to virtual mode. ··· 410 417 { 411 418 efi_memory_desc_t *md; 412 419 efi_status_t status; 413 - unsigned long end; 414 - void *p; 420 + unsigned long size; 421 + u64 end, systab; 422 + void *p, *va; 415 423 416 424 efi.systab = NULL; 417 425 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 418 426 md = p; 419 427 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 420 428 continue; 421 - end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); 422 - if ((md->attribute & EFI_MEMORY_WB) && 423 - ((end >> PAGE_SHIFT) <= max_pfn_mapped)) 424 - md->virt_addr = (unsigned long)__va(md->phys_addr); 429 + 430 + size = md->num_pages << EFI_PAGE_SHIFT; 431 + end = md->phys_addr + size; 432 + 433 + if ((end >> PAGE_SHIFT) <= max_pfn_mapped) 434 + va = __va(md->phys_addr); 425 435 else 426 - md->virt_addr = (unsigned long) 427 - efi_ioremap(md->phys_addr, 428 - md->num_pages << EFI_PAGE_SHIFT); 429 - if (!md->virt_addr) 436 + va = efi_ioremap(md->phys_addr, size); 437 + 438 + if (md->attribute & EFI_MEMORY_WB) 439 + set_memory_uc(md->virt_addr, size); 440 + 441 + md->virt_addr = (u64) (unsigned long) va; 442 + 443 + if (!va) { 430 444 printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n", 431 445 (unsigned long long)md->phys_addr); 432 - if ((md->phys_addr <= (unsigned long)efi_phys.systab) && 433 - ((unsigned long)efi_phys.systab < end)) 434 - efi.systab = (efi_system_table_t *)(unsigned long) 435 - (md->virt_addr - md->phys_addr + 436 - (unsigned long)efi_phys.systab); 446 + continue; 447 + } 448 + 449 + systab = (u64) (unsigned long) efi_phys.systab; 450 + if (md->phys_addr <= systab && systab < end) { 451 + systab += md->virt_addr - md->phys_addr; 452 + efi.systab = (efi_system_table_t *) (unsigned long) systab; 453 + } 437 454 } 438 455 439 456 BUG_ON(!efi.systab);
+11 -11
arch/x86/kernel/efi_64.c
··· 54 54 else 55 55 set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \ 56 56 __supported_pte_mask)); 57 - if (level == 4) 58 - start = (start + PMD_SIZE) & PMD_MASK; 59 - else 57 + if (level == PG_LEVEL_4K) 60 58 start = (start + PAGE_SIZE) & PAGE_MASK; 59 + else 60 + start = (start + PMD_SIZE) & PMD_MASK; 61 61 } 62 62 } 63 63 ··· 109 109 memmap.nr_map * memmap.desc_size); 110 110 } 111 111 112 - void __iomem * __init efi_ioremap(unsigned long offset, 113 - unsigned long size) 112 + void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size) 114 113 { 115 114 static unsigned pages_mapped; 116 - unsigned long last_addr; 117 115 unsigned i, pages; 118 116 119 - last_addr = offset + size - 1; 120 - offset &= PAGE_MASK; 121 - pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT; 117 + /* phys_addr and size must be page aligned */ 118 + if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK)) 119 + return NULL; 120 + 121 + pages = size >> PAGE_SHIFT; 122 122 if (pages_mapped + pages > MAX_EFI_IO_PAGES) 123 123 return NULL; 124 124 125 125 for (i = 0; i < pages; i++) { 126 126 __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, 127 - offset, PAGE_KERNEL_EXEC_NOCACHE); 128 - offset += PAGE_SIZE; 127 + phys_addr, PAGE_KERNEL); 128 + phys_addr += PAGE_SIZE; 129 129 pages_mapped++; 130 130 } 131 131
+2 -2
arch/x86/kernel/head_64.S
··· 63 63 64 64 /* Is the address not 2M aligned? */ 65 65 movq %rbp, %rax 66 - andl $~LARGE_PAGE_MASK, %eax 66 + andl $~PMD_PAGE_MASK, %eax 67 67 testl %eax, %eax 68 68 jnz bad_address 69 69 ··· 88 88 89 89 /* Add an Identity mapping if I am above 1G */ 90 90 leaq _text(%rip), %rdi 91 - andq $LARGE_PAGE_MASK, %rdi 91 + andq $PMD_PAGE_MASK, %rdi 92 92 93 93 movq %rdi, %rax 94 94 shrq $PUD_SHIFT, %rax
+2 -1
arch/x86/kernel/ldt.c
··· 35 35 if (mincount <= pc->size) 36 36 return 0; 37 37 oldsize = pc->size; 38 - mincount = (mincount + 511) & (~511); 38 + mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & 39 + (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); 39 40 if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) 40 41 newldt = vmalloc(mincount * LDT_ENTRY_SIZE); 41 42 else
+9 -5
arch/x86/kernel/msr.c
··· 1 1 /* ----------------------------------------------------------------------- * 2 - * 3 - * Copyright 2000 H. Peter Anvin - All Rights Reserved 2 + * 3 + * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved 4 4 * 5 5 * This program is free software; you can redistribute it and/or modify 6 6 * it under the terms of the GNU General Public License as published by ··· 45 45 46 46 static loff_t msr_seek(struct file *file, loff_t offset, int orig) 47 47 { 48 - loff_t ret = -EINVAL; 48 + loff_t ret; 49 + struct inode *inode = file->f_mapping->host; 49 50 50 - lock_kernel(); 51 + mutex_lock(&inode->i_mutex); 51 52 switch (orig) { 52 53 case 0: 53 54 file->f_pos = offset; ··· 57 56 case 1: 58 57 file->f_pos += offset; 59 58 ret = file->f_pos; 59 + break; 60 + default: 61 + ret = -EINVAL; 60 62 } 61 - unlock_kernel(); 63 + mutex_unlock(&inode->i_mutex); 62 64 return ret; 63 65 } 64 66
+3 -2
arch/x86/kernel/pci-gart_64.c
··· 501 501 } 502 502 503 503 a = aper + iommu_size; 504 - iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; 504 + iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; 505 505 506 506 if (iommu_size < 64*1024*1024) { 507 507 printk(KERN_WARNING ··· 731 731 * the backing memory. The GART address is only used by PCI 732 732 * devices. 733 733 */ 734 - clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size); 734 + set_memory_np((unsigned long)__va(iommu_bus_base), 735 + iommu_size >> PAGE_SHIFT); 735 736 736 737 /* 737 738 * Try to workaround a bug (thanks to BenH)
+1 -1
arch/x86/kernel/process_32.c
··· 251 251 * because it has nothing to do. 252 252 * Give all the remaining CPUS a kick. 253 253 */ 254 - smp_call_function_mask(map, do_nothing, 0, 0); 254 + smp_call_function_mask(map, do_nothing, NULL, 0); 255 255 } while (!cpus_empty(map)); 256 256 257 257 set_cpus_allowed(current, tmp);
-76
arch/x86/kernel/setup_64.c
··· 1068 1068 struct cpuinfo_x86 *c = v; 1069 1069 int cpu = 0, i; 1070 1070 1071 - /* 1072 - * These flag bits must match the definitions in <asm/cpufeature.h>. 1073 - * NULL means this bit is undefined or reserved; either way it doesn't 1074 - * have meaning as far as Linux is concerned. Note that it's important 1075 - * to realize there is a difference between this table and CPUID -- if 1076 - * applications want to get the raw CPUID data, they should access 1077 - * /dev/cpu/<cpu_nr>/cpuid instead. 1078 - */ 1079 - static const char *const x86_cap_flags[] = { 1080 - /* Intel-defined */ 1081 - "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 1082 - "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 1083 - "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", 1084 - "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", 1085 - 1086 - /* AMD-defined */ 1087 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1088 - NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, 1089 - NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, 1090 - NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", 1091 - "3dnowext", "3dnow", 1092 - 1093 - /* Transmeta-defined */ 1094 - "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, 1095 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1096 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1097 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1098 - 1099 - /* Other (Linux-defined) */ 1100 - "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", 1101 - NULL, NULL, NULL, NULL, 1102 - "constant_tsc", "up", NULL, "arch_perfmon", 1103 - "pebs", "bts", NULL, "sync_rdtsc", 1104 - "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1105 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1106 - 1107 - /* Intel-defined (#2) */ 1108 - "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", 1109 - "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, 1110 - NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt", 1111 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1112 - 1113 - /* VIA/Cyrix/Centaur-defined */ 1114 - NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", 1115 - "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL, 1116 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1117 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1118 - 1119 - /* AMD-defined (#2) */ 1120 - "lahf_lm", "cmp_legacy", "svm", "extapic", 1121 - "cr8_legacy", "abm", "sse4a", "misalignsse", 1122 - "3dnowprefetch", "osvw", "ibs", "sse5", 1123 - "skinit", "wdt", NULL, NULL, 1124 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1125 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1126 - 1127 - /* Auxiliary (Linux-defined) */ 1128 - "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1129 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1130 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1131 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1132 - }; 1133 - static const char *const x86_power_flags[] = { 1134 - "ts", /* temperature sensor */ 1135 - "fid", /* frequency id control */ 1136 - "vid", /* voltage id control */ 1137 - "ttp", /* thermal trip */ 1138 - "tm", 1139 - "stc", 1140 - "100mhzsteps", 1141 - "hwpstate", 1142 - "", /* tsc invariant mapped to constant_tsc */ 1143 - /* nothing */ 1144 - }; 1145 - 1146 - 1147 1071 #ifdef CONFIG_SMP 1148 1072 cpu = c->cpu_index; 1149 1073 #endif
+2 -10
arch/x86/kernel/test_nx.c
··· 12 12 #include <linux/module.h> 13 13 #include <linux/sort.h> 14 14 #include <asm/uaccess.h> 15 + #include <asm/asm.h> 15 16 16 17 extern int rodata_test_data; 17 18 ··· 90 89 "2: mov %[zero], %[rslt]\n" 91 90 " ret\n" 92 91 ".previous\n" 93 - ".section __ex_table,\"a\"\n" 94 - " .align 8\n" 95 - #ifdef CONFIG_X86_32 96 - " .long 0b\n" 97 - " .long 2b\n" 98 - #else 99 - " .quad 0b\n" 100 - " .quad 2b\n" 101 - #endif 102 - ".previous\n" 92 + _ASM_EXTABLE(0b,2b) 103 93 : [rslt] "=r" (result) 104 94 : [fake_code] "r" (address), [zero] "r" (0UL), "0" (result) 105 95 );
+1 -6
arch/x86/kernel/trampoline_32.S
··· 11 11 * trampoline page to make our stack and everything else 12 12 * is a mystery. 13 13 * 14 - * In fact we don't actually need a stack so we don't 15 - * set one up. 16 - * 17 - * We jump into the boot/compressed/head.S code. So you'd 18 - * better be running a compressed kernel image or you 19 - * won't get very far. 14 + * We jump into arch/x86/kernel/head_32.S. 20 15 * 21 16 * On entry to trampoline_data, the processor is in real mode 22 17 * with 16-bit addressing and 16-bit data. CS has some value
-3
arch/x86/kernel/trampoline_64.S
··· 10 10 * trampoline page to make our stack and everything else 11 11 * is a mystery. 12 12 * 13 - * In fact we don't actually need a stack so we don't 14 - * set one up. 15 - * 16 13 * On entry to trampoline_data, the processor is in real mode 17 14 * with 16-bit addressing and 16-bit data. CS has some value 18 15 * and IP is zero. Thus, data addresses need to be absolute
+3 -3
arch/x86/kernel/vmi_32.c
··· 220 220 static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) 221 221 { 222 222 u32 *idt_entry = (u32 *)g; 223 - vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[2]); 223 + vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[1]); 224 224 } 225 225 226 226 static void vmi_write_gdt_entry(struct desc_struct *dt, int entry, 227 227 const void *desc, int type) 228 228 { 229 229 u32 *gdt_entry = (u32 *)desc; 230 - vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[2]); 230 + vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[1]); 231 231 } 232 232 233 233 static void vmi_write_ldt_entry(struct desc_struct *dt, int entry, 234 234 const void *desc) 235 235 { 236 236 u32 *ldt_entry = (u32 *)desc; 237 - vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[2]); 237 + vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); 238 238 } 239 239 240 240 static void vmi_load_sp0(struct tss_struct *tss,
+7 -24
arch/x86/lib/mmx_32.c
··· 4 4 #include <linux/hardirq.h> 5 5 #include <linux/module.h> 6 6 7 + #include <asm/asm.h> 7 8 #include <asm/i387.h> 8 9 9 10 ··· 51 50 "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ 52 51 " jmp 2b\n" 53 52 ".previous\n" 54 - ".section __ex_table,\"a\"\n" 55 - " .align 4\n" 56 - " .long 1b, 3b\n" 57 - ".previous" 53 + _ASM_EXTABLE(1b,3b) 58 54 : : "r" (from) ); 59 55 60 56 ··· 79 81 "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ 80 82 " jmp 2b\n" 81 83 ".previous\n" 82 - ".section __ex_table,\"a\"\n" 83 - " .align 4\n" 84 - " .long 1b, 3b\n" 85 - ".previous" 84 + _ASM_EXTABLE(1b,3b) 86 85 : : "r" (from), "r" (to) : "memory"); 87 86 from+=64; 88 87 to+=64; ··· 176 181 "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ 177 182 " jmp 2b\n" 178 183 ".previous\n" 179 - ".section __ex_table,\"a\"\n" 180 - " .align 4\n" 181 - " .long 1b, 3b\n" 182 - ".previous" 184 + _ASM_EXTABLE(1b,3b) 183 185 : : "r" (from) ); 184 186 185 187 for(i=0; i<(4096-320)/64; i++) ··· 203 211 "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ 204 212 " jmp 2b\n" 205 213 ".previous\n" 206 - ".section __ex_table,\"a\"\n" 207 - " .align 4\n" 208 - " .long 1b, 3b\n" 209 - ".previous" 214 + _ASM_EXTABLE(1b,3b) 210 215 : : "r" (from), "r" (to) : "memory"); 211 216 from+=64; 212 217 to+=64; ··· 300 311 "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ 301 312 " jmp 2b\n" 302 313 ".previous\n" 303 - ".section __ex_table,\"a\"\n" 304 - " .align 4\n" 305 - " .long 1b, 3b\n" 306 - ".previous" 314 + _ASM_EXTABLE(1b,3b) 307 315 : : "r" (from) ); 308 316 309 317 for(i=0; i<4096/64; i++) ··· 327 341 "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ 328 342 " jmp 2b\n" 329 343 ".previous\n" 330 - ".section __ex_table,\"a\"\n" 331 - " .align 4\n" 332 - " .long 1b, 3b\n" 333 - ".previous" 344 + _ASM_EXTABLE(1b,3b) 334 345 : : "r" (from), "r" (to) : "memory"); 335 346 from+=64; 336 347 to+=64;
+3 -9
arch/x86/lib/usercopy_32.c
··· 48 48 "3: movl %5,%0\n" \ 49 49 " jmp 2b\n" \ 50 50 ".previous\n" \ 51 - ".section __ex_table,\"a\"\n" \ 52 - " .align 4\n" \ 53 - " .long 0b,3b\n" \ 54 - ".previous" \ 51 + _ASM_EXTABLE(0b,3b) \ 55 52 : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ 56 53 "=&D" (__d2) \ 57 54 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ ··· 129 132 "3: lea 0(%2,%0,4),%0\n" \ 130 133 " jmp 2b\n" \ 131 134 ".previous\n" \ 132 - ".section __ex_table,\"a\"\n" \ 133 - " .align 4\n" \ 134 - " .long 0b,3b\n" \ 135 - " .long 1b,2b\n" \ 136 - ".previous" \ 135 + _ASM_EXTABLE(0b,3b) \ 136 + _ASM_EXTABLE(1b,2b) \ 137 137 : "=&c"(size), "=&D" (__d0) \ 138 138 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ 139 139 } while (0)
+3 -9
arch/x86/lib/usercopy_64.c
··· 31 31 "3: movq %5,%0\n" \ 32 32 " jmp 2b\n" \ 33 33 ".previous\n" \ 34 - ".section __ex_table,\"a\"\n" \ 35 - " .align 8\n" \ 36 - " .quad 0b,3b\n" \ 37 - ".previous" \ 34 + _ASM_EXTABLE(0b,3b) \ 38 35 : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ 39 36 "=&D" (__d2) \ 40 37 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ ··· 84 87 "3: lea 0(%[size1],%[size8],8),%[size8]\n" 85 88 " jmp 2b\n" 86 89 ".previous\n" 87 - ".section __ex_table,\"a\"\n" 88 - " .align 8\n" 89 - " .quad 0b,3b\n" 90 - " .quad 1b,2b\n" 91 - ".previous" 90 + _ASM_EXTABLE(0b,3b) 91 + _ASM_EXTABLE(1b,2b) 92 92 : [size8] "=c"(size), [dst] "=&D" (__d0) 93 93 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), 94 94 [zero] "r" (0UL), [eight] "r" (8UL));
+11 -23
arch/x86/mm/fault.c
··· 240 240 pud = pud_offset(pgd, address); 241 241 if (bad_address(pud)) goto bad; 242 242 printk("PUD %lx ", pud_val(*pud)); 243 - if (!pud_present(*pud)) goto ret; 243 + if (!pud_present(*pud) || pud_large(*pud)) 244 + goto ret; 244 245 245 246 pmd = pmd_offset(pud, address); 246 247 if (bad_address(pmd)) goto bad; ··· 509 508 pmd_t *pmd, *pmd_ref; 510 509 pte_t *pte, *pte_ref; 511 510 511 + /* Make sure we are in vmalloc area */ 512 + if (!(address >= VMALLOC_START && address < VMALLOC_END)) 513 + return -1; 514 + 512 515 /* Copy kernel mappings over when needed. This can also 513 516 happen within a race in page table update. In the later 514 517 case just flush. */ ··· 608 603 */ 609 604 #ifdef CONFIG_X86_32 610 605 if (unlikely(address >= TASK_SIZE)) { 606 + #else 607 + if (unlikely(address >= TASK_SIZE64)) { 608 + #endif 611 609 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && 612 610 vmalloc_fault(address) >= 0) 613 611 return; ··· 626 618 goto bad_area_nosemaphore; 627 619 } 628 620 621 + 622 + #ifdef CONFIG_X86_32 629 623 /* It's safe to allow irq's after cr2 has been saved and the vmalloc 630 624 fault has been handled. */ 631 625 if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) ··· 640 630 if (in_atomic() || !mm) 641 631 goto bad_area_nosemaphore; 642 632 #else /* CONFIG_X86_64 */ 643 - if (unlikely(address >= TASK_SIZE64)) { 644 - /* 645 - * Don't check for the module range here: its PML4 646 - * is always initialized because it's shared with the main 647 - * kernel text. Only vmalloc may need PML4 syncups. 648 - */ 649 - if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && 650 - ((address >= VMALLOC_START && address < VMALLOC_END))) { 651 - if (vmalloc_fault(address) >= 0) 652 - return; 653 - } 654 - 655 - /* Can handle a stale RO->RW TLB */ 656 - if (spurious_fault(address, error_code)) 657 - return; 658 - 659 - /* 660 - * Don't take the mm semaphore here. If we fixup a prefetch 661 - * fault we could otherwise deadlock. 662 - */ 663 - goto bad_area_nosemaphore; 664 - } 665 633 if (likely(regs->flags & X86_EFLAGS_IF)) 666 634 local_irq_enable(); 667 635
+2 -4
arch/x86/mm/init_32.c
··· 31 31 #include <linux/initrd.h> 32 32 #include <linux/cpumask.h> 33 33 34 + #include <asm/asm.h> 34 35 #include <asm/processor.h> 35 36 #include <asm/system.h> 36 37 #include <asm/uaccess.h> ··· 719 718 "1: movb %1, %0 \n" 720 719 " xorl %2, %2 \n" 721 720 "2: \n" 722 - ".section __ex_table, \"a\"\n" 723 - " .align 4 \n" 724 - " .long 1b, 2b \n" 725 - ".previous \n" 721 + _ASM_EXTABLE(1b,2b) 726 722 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), 727 723 "=q" (tmp_reg), 728 724 "=r" (flag)
+2 -47
arch/x86/mm/init_64.c
··· 273 273 int i = pmd_index(address); 274 274 275 275 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { 276 - unsigned long entry; 277 276 pmd_t *pmd = pmd_page + pmd_index(address); 278 277 279 278 if (address >= end) { ··· 286 287 if (pmd_val(*pmd)) 287 288 continue; 288 289 289 - entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address; 290 - entry &= __supported_pte_mask; 291 - set_pmd(pmd, __pmd(entry)); 290 + set_pte((pte_t *)pmd, 291 + pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 292 292 } 293 293 } 294 294 ··· 431 433 free_area_init_nodes(max_zone_pfns); 432 434 } 433 435 #endif 434 - 435 - /* 436 - * Unmap a kernel mapping if it exists. This is useful to avoid 437 - * prefetches from the CPU leading to inconsistent cache lines. 438 - * address and size must be aligned to 2MB boundaries. 439 - * Does nothing when the mapping doesn't exist. 440 - */ 441 - void __init clear_kernel_mapping(unsigned long address, unsigned long size) 442 - { 443 - unsigned long end = address + size; 444 - 445 - BUG_ON(address & ~LARGE_PAGE_MASK); 446 - BUG_ON(size & ~LARGE_PAGE_MASK); 447 - 448 - for (; address < end; address += LARGE_PAGE_SIZE) { 449 - pgd_t *pgd = pgd_offset_k(address); 450 - pud_t *pud; 451 - pmd_t *pmd; 452 - 453 - if (pgd_none(*pgd)) 454 - continue; 455 - 456 - pud = pud_offset(pgd, address); 457 - if (pud_none(*pud)) 458 - continue; 459 - 460 - pmd = pmd_offset(pud, address); 461 - if (!pmd || pmd_none(*pmd)) 462 - continue; 463 - 464 - if (!(pmd_val(*pmd) & _PAGE_PSE)) { 465 - /* 466 - * Could handle this, but it should not happen 467 - * currently: 468 - */ 469 - printk(KERN_ERR "clear_kernel_mapping: " 470 - "mapping has been split. will leak memory\n"); 471 - pmd_ERROR(*pmd); 472 - } 473 - set_pmd(pmd, __pmd(0)); 474 - } 475 - __flush_tlb_all(); 476 - } 477 436 478 437 /* 479 438 * Memory hotplug specific functions
+12 -29
arch/x86/mm/ioremap.c
··· 70 70 * Fix up the linear direct mapping of the kernel to avoid cache attribute 71 71 * conflicts. 72 72 */ 73 - static int ioremap_change_attr(unsigned long paddr, unsigned long size, 73 + static int ioremap_change_attr(unsigned long vaddr, unsigned long size, 74 74 enum ioremap_mode mode) 75 75 { 76 - unsigned long vaddr = (unsigned long)__va(paddr); 77 76 unsigned long nrpages = size >> PAGE_SHIFT; 78 - unsigned int level; 79 77 int err; 80 - 81 - /* No change for pages after the last mapping */ 82 - if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT)) 83 - return 0; 84 - 85 - /* 86 - * If there is no identity map for this address, 87 - * change_page_attr_addr is unnecessary 88 - */ 89 - if (!lookup_address(vaddr, &level)) 90 - return 0; 91 78 92 79 switch (mode) { 93 80 case IOR_MODE_UNCACHED: ··· 101 114 static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, 102 115 enum ioremap_mode mode) 103 116 { 104 - void __iomem *addr; 117 + unsigned long pfn, offset, last_addr, vaddr; 105 118 struct vm_struct *area; 106 - unsigned long offset, last_addr; 107 119 pgprot_t prot; 108 120 109 121 /* Don't allow wraparound or zero size */ ··· 119 133 /* 120 134 * Don't allow anybody to remap normal RAM that we're using.. 121 135 */ 122 - for (offset = phys_addr >> PAGE_SHIFT; offset < max_pfn_mapped && 123 - (offset << PAGE_SHIFT) < last_addr; offset++) { 124 - if (page_is_ram(offset)) 136 + for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped && 137 + (pfn << PAGE_SHIFT) < last_addr; pfn++) { 138 + if (page_is_ram(pfn) && pfn_valid(pfn) && 139 + !PageReserved(pfn_to_page(pfn))) 125 140 return NULL; 126 141 } 127 142 ··· 150 163 if (!area) 151 164 return NULL; 152 165 area->phys_addr = phys_addr; 153 - addr = (void __iomem *) area->addr; 154 - if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, 155 - phys_addr, prot)) { 156 - remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); 166 + vaddr = (unsigned long) area->addr; 167 + if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { 168 + remove_vm_area((void *)(vaddr & PAGE_MASK)); 157 169 return NULL; 158 170 } 159 171 160 - if (ioremap_change_attr(phys_addr, size, mode) < 0) { 161 - vunmap(addr); 172 + if (ioremap_change_attr(vaddr, size, mode) < 0) { 173 + vunmap(area->addr); 162 174 return NULL; 163 175 } 164 176 165 - return (void __iomem *) (offset + (char __iomem *)addr); 177 + return (void __iomem *) (vaddr + offset); 166 178 } 167 179 168 180 /** ··· 239 253 dump_stack(); 240 254 return; 241 255 } 242 - 243 - /* Reset the direct mapping. Can block */ 244 - ioremap_change_attr(p->phys_addr, p->size, IOR_MODE_CACHED); 245 256 246 257 /* Finally remove it */ 247 258 o = remove_vm_area((void *)addr);
+6 -1
arch/x86/mm/numa_64.c
··· 202 202 if (node_data[nodeid] == NULL) 203 203 return; 204 204 nodedata_phys = __pa(node_data[nodeid]); 205 + printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, 206 + nodedata_phys + pgdat_size - 1); 205 207 206 208 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); 207 209 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid]; ··· 227 225 return; 228 226 } 229 227 bootmap_start = __pa(bootmap); 230 - Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages); 231 228 232 229 bootmap_size = init_bootmem_node(NODE_DATA(nodeid), 233 230 bootmap_start >> PAGE_SHIFT, 234 231 start_pfn, end_pfn); 232 + 233 + printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n", 234 + bootmap_start, bootmap_start + bootmap_size - 1, 235 + bootmap_pages); 235 236 236 237 free_bootmem_with_active_regions(nodeid, end); 237 238
+2 -1
arch/x86/mm/pageattr-test.c
··· 137 137 138 138 for (k = 0; k < len[i]; k++) { 139 139 pte = lookup_address(addr[i] + k*PAGE_SIZE, &level); 140 - if (!pte || pgprot_val(pte_pgprot(*pte)) == 0) { 140 + if (!pte || pgprot_val(pte_pgprot(*pte)) == 0 || 141 + !(pte_val(*pte) & _PAGE_PRESENT)) { 141 142 addr[i] = 0; 142 143 break; 143 144 }
+318 -84
arch/x86/mm/pageattr.c
··· 16 16 #include <asm/uaccess.h> 17 17 #include <asm/pgalloc.h> 18 18 19 + /* 20 + * The current flushing context - we pass it instead of 5 arguments: 21 + */ 22 + struct cpa_data { 23 + unsigned long vaddr; 24 + pgprot_t mask_set; 25 + pgprot_t mask_clr; 26 + int numpages; 27 + int flushtlb; 28 + }; 29 + 19 30 static inline int 20 31 within(unsigned long addr, unsigned long start, unsigned long end) 21 32 { ··· 63 52 64 53 static void __cpa_flush_all(void *arg) 65 54 { 55 + unsigned long cache = (unsigned long)arg; 56 + 66 57 /* 67 58 * Flush all to work around Errata in early athlons regarding 68 59 * large page flushing. 69 60 */ 70 61 __flush_tlb_all(); 71 62 72 - if (boot_cpu_data.x86_model >= 4) 63 + if (cache && boot_cpu_data.x86_model >= 4) 73 64 wbinvd(); 74 65 } 75 66 76 - static void cpa_flush_all(void) 67 + static void cpa_flush_all(unsigned long cache) 77 68 { 78 69 BUG_ON(irqs_disabled()); 79 70 80 - on_each_cpu(__cpa_flush_all, NULL, 1, 1); 71 + on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); 81 72 } 82 73 83 74 static void __cpa_flush_range(void *arg) ··· 92 79 __flush_tlb_all(); 93 80 } 94 81 95 - static void cpa_flush_range(unsigned long start, int numpages) 82 + static void cpa_flush_range(unsigned long start, int numpages, int cache) 96 83 { 97 84 unsigned int i, level; 98 85 unsigned long addr; ··· 101 88 WARN_ON(PAGE_ALIGN(start) != start); 102 89 103 90 on_each_cpu(__cpa_flush_range, NULL, 1, 1); 91 + 92 + if (!cache) 93 + return; 104 94 105 95 /* 106 96 * We only need to flush on one CPU, ··· 117 101 /* 118 102 * Only flush present addresses: 119 103 */ 120 - if (pte && pte_present(*pte)) 104 + if (pte && (pte_val(*pte) & _PAGE_PRESENT)) 121 105 clflush_cache_range((void *) addr, PAGE_SIZE); 122 106 } 107 + } 108 + 109 + #define HIGH_MAP_START __START_KERNEL_map 110 + #define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE) 111 + 112 + 113 + /* 114 + * Converts a virtual address to a X86-64 highmap address 115 + */ 116 + static unsigned long virt_to_highmap(void *address) 117 + { 118 + #ifdef CONFIG_X86_64 119 + return __pa((unsigned long)address) + HIGH_MAP_START - phys_base; 120 + #else 121 + return (unsigned long)address; 122 + #endif 123 123 } 124 124 125 125 /* ··· 161 129 */ 162 130 if (within(address, (unsigned long)_text, (unsigned long)_etext)) 163 131 pgprot_val(forbidden) |= _PAGE_NX; 132 + /* 133 + * Do the same for the x86-64 high kernel mapping 134 + */ 135 + if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext))) 136 + pgprot_val(forbidden) |= _PAGE_NX; 137 + 164 138 165 139 #ifdef CONFIG_DEBUG_RODATA 166 140 /* The .rodata section needs to be read-only */ 167 141 if (within(address, (unsigned long)__start_rodata, 168 142 (unsigned long)__end_rodata)) 143 + pgprot_val(forbidden) |= _PAGE_RW; 144 + /* 145 + * Do the same for the x86-64 high kernel mapping 146 + */ 147 + if (within(address, virt_to_highmap(__start_rodata), 148 + virt_to_highmap(__end_rodata))) 169 149 pgprot_val(forbidden) |= _PAGE_RW; 170 150 #endif 171 151 ··· 186 142 return prot; 187 143 } 188 144 145 + /* 146 + * Lookup the page table entry for a virtual address. Return a pointer 147 + * to the entry and the level of the mapping. 148 + * 149 + * Note: We return pud and pmd either when the entry is marked large 150 + * or when the present bit is not set. Otherwise we would return a 151 + * pointer to a nonexisting mapping. 152 + */ 189 153 pte_t *lookup_address(unsigned long address, int *level) 190 154 { 191 155 pgd_t *pgd = pgd_offset_k(address); ··· 204 152 205 153 if (pgd_none(*pgd)) 206 154 return NULL; 155 + 207 156 pud = pud_offset(pgd, address); 208 157 if (pud_none(*pud)) 209 158 return NULL; 159 + 160 + *level = PG_LEVEL_1G; 161 + if (pud_large(*pud) || !pud_present(*pud)) 162 + return (pte_t *)pud; 163 + 210 164 pmd = pmd_offset(pud, address); 211 165 if (pmd_none(*pmd)) 212 166 return NULL; 213 167 214 168 *level = PG_LEVEL_2M; 215 - if (pmd_large(*pmd)) 169 + if (pmd_large(*pmd) || !pmd_present(*pmd)) 216 170 return (pte_t *)pmd; 217 171 218 172 *level = PG_LEVEL_4K; 173 + 219 174 return pte_offset_kernel(pmd, address); 220 175 } 221 176 177 + /* 178 + * Set the new pmd in all the pgds we know about: 179 + */ 222 180 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 223 181 { 224 182 /* change init_mm */ ··· 237 175 if (!SHARED_KERNEL_PMD) { 238 176 struct page *page; 239 177 178 + address = __pa(address); 240 179 list_for_each_entry(page, &pgd_list, lru) { 241 180 pgd_t *pgd; 242 181 pud_t *pud; ··· 252 189 #endif 253 190 } 254 191 192 + static int 193 + try_preserve_large_page(pte_t *kpte, unsigned long address, 194 + struct cpa_data *cpa) 195 + { 196 + unsigned long nextpage_addr, numpages, pmask, psize, flags; 197 + pte_t new_pte, old_pte, *tmp; 198 + pgprot_t old_prot, new_prot; 199 + int level, do_split = 1; 200 + 201 + /* 202 + * An Athlon 64 X2 showed hard hangs if we tried to preserve 203 + * largepages and changed the PSE entry from RW to RO. 204 + * 205 + * As AMD CPUs have a long series of erratas in this area, 206 + * (and none of the known ones seem to explain this hang), 207 + * disable this code until the hang can be debugged: 208 + */ 209 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 210 + return 1; 211 + 212 + spin_lock_irqsave(&pgd_lock, flags); 213 + /* 214 + * Check for races, another CPU might have split this page 215 + * up already: 216 + */ 217 + tmp = lookup_address(address, &level); 218 + if (tmp != kpte) 219 + goto out_unlock; 220 + 221 + switch (level) { 222 + case PG_LEVEL_2M: 223 + psize = PMD_PAGE_SIZE; 224 + pmask = PMD_PAGE_MASK; 225 + break; 226 + #ifdef CONFIG_X86_64 227 + case PG_LEVEL_1G: 228 + psize = PMD_PAGE_SIZE; 229 + pmask = PMD_PAGE_MASK; 230 + break; 231 + #endif 232 + default: 233 + do_split = -EINVAL; 234 + goto out_unlock; 235 + } 236 + 237 + /* 238 + * Calculate the number of pages, which fit into this large 239 + * page starting at address: 240 + */ 241 + nextpage_addr = (address + psize) & pmask; 242 + numpages = (nextpage_addr - address) >> PAGE_SHIFT; 243 + if (numpages < cpa->numpages) 244 + cpa->numpages = numpages; 245 + 246 + /* 247 + * We are safe now. Check whether the new pgprot is the same: 248 + */ 249 + old_pte = *kpte; 250 + old_prot = new_prot = pte_pgprot(old_pte); 251 + 252 + pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); 253 + pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 254 + new_prot = static_protections(new_prot, address); 255 + 256 + /* 257 + * If there are no changes, return. maxpages has been updated 258 + * above: 259 + */ 260 + if (pgprot_val(new_prot) == pgprot_val(old_prot)) { 261 + do_split = 0; 262 + goto out_unlock; 263 + } 264 + 265 + /* 266 + * We need to change the attributes. Check, whether we can 267 + * change the large page in one go. We request a split, when 268 + * the address is not aligned and the number of pages is 269 + * smaller than the number of pages in the large page. Note 270 + * that we limited the number of possible pages already to 271 + * the number of pages in the large page. 272 + */ 273 + if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { 274 + /* 275 + * The address is aligned and the number of pages 276 + * covers the full page. 277 + */ 278 + new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); 279 + __set_pmd_pte(kpte, address, new_pte); 280 + cpa->flushtlb = 1; 281 + do_split = 0; 282 + } 283 + 284 + out_unlock: 285 + spin_unlock_irqrestore(&pgd_lock, flags); 286 + 287 + return do_split; 288 + } 289 + 255 290 static int split_large_page(pte_t *kpte, unsigned long address) 256 291 { 257 - pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 292 + unsigned long flags, pfn, pfninc = 1; 258 293 gfp_t gfp_flags = GFP_KERNEL; 259 - unsigned long flags; 260 - unsigned long addr; 261 - pte_t *pbase, *tmp; 262 - struct page *base; 263 294 unsigned int i, level; 295 + pte_t *pbase, *tmp; 296 + pgprot_t ref_prot; 297 + struct page *base; 264 298 265 299 #ifdef CONFIG_DEBUG_PAGEALLOC 266 - gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN; 267 300 gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 268 301 #endif 269 302 base = alloc_pages(gfp_flags, 0); ··· 372 213 * up for us already: 373 214 */ 374 215 tmp = lookup_address(address, &level); 375 - if (tmp != kpte) { 376 - WARN_ON_ONCE(1); 216 + if (tmp != kpte) 377 217 goto out_unlock; 378 - } 379 218 380 - address = __pa(address); 381 - addr = address & LARGE_PAGE_MASK; 382 219 pbase = (pte_t *)page_address(base); 383 220 #ifdef CONFIG_X86_32 384 221 paravirt_alloc_pt(&init_mm, page_to_pfn(base)); 385 222 #endif 223 + ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 386 224 387 - pgprot_val(ref_prot) &= ~_PAGE_NX; 388 - for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) 389 - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot)); 225 + #ifdef CONFIG_X86_64 226 + if (level == PG_LEVEL_1G) { 227 + pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; 228 + pgprot_val(ref_prot) |= _PAGE_PSE; 229 + } 230 + #endif 390 231 391 232 /* 392 - * Install the new, split up pagetable. Important detail here: 233 + * Get the target pfn from the original entry: 234 + */ 235 + pfn = pte_pfn(*kpte); 236 + for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) 237 + set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); 238 + 239 + /* 240 + * Install the new, split up pagetable. Important details here: 393 241 * 394 242 * On Intel the NX bit of all levels must be cleared to make a 395 243 * page executable. See section 4.13.2 of Intel 64 and IA-32 396 244 * Architectures Software Developer's Manual). 245 + * 246 + * Mark the entry present. The current mapping might be 247 + * set to not present, which we preserved above. 397 248 */ 398 249 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); 250 + pgprot_val(ref_prot) |= _PAGE_PRESENT; 399 251 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); 400 252 base = NULL; 401 253 ··· 419 249 return 0; 420 250 } 421 251 422 - static int 423 - __change_page_attr(unsigned long address, unsigned long pfn, 424 - pgprot_t mask_set, pgprot_t mask_clr) 252 + static int __change_page_attr(unsigned long address, struct cpa_data *cpa) 425 253 { 254 + int level, do_split, err; 426 255 struct page *kpte_page; 427 - int level, err = 0; 428 256 pte_t *kpte; 429 - 430 - #ifdef CONFIG_X86_32 431 - BUG_ON(pfn > max_low_pfn); 432 - #endif 433 257 434 258 repeat: 435 259 kpte = lookup_address(address, &level); ··· 435 271 BUG_ON(PageCompound(kpte_page)); 436 272 437 273 if (level == PG_LEVEL_4K) { 438 - pgprot_t new_prot = pte_pgprot(*kpte); 439 274 pte_t new_pte, old_pte = *kpte; 275 + pgprot_t new_prot = pte_pgprot(old_pte); 440 276 441 - pgprot_val(new_prot) &= ~pgprot_val(mask_clr); 442 - pgprot_val(new_prot) |= pgprot_val(mask_set); 277 + if(!pte_val(old_pte)) { 278 + printk(KERN_WARNING "CPA: called for zero pte. " 279 + "vaddr = %lx cpa->vaddr = %lx\n", address, 280 + cpa->vaddr); 281 + WARN_ON(1); 282 + return -EINVAL; 283 + } 284 + 285 + pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); 286 + pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 443 287 444 288 new_prot = static_protections(new_prot, address); 445 289 446 - new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); 447 - BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte)); 290 + /* 291 + * We need to keep the pfn from the existing PTE, 292 + * after all we're only going to change it's attributes 293 + * not the memory it points to 294 + */ 295 + new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); 448 296 449 - set_pte_atomic(kpte, new_pte); 450 - } else { 451 - err = split_large_page(kpte, address); 452 - if (!err) 453 - goto repeat; 297 + /* 298 + * Do we really change anything ? 299 + */ 300 + if (pte_val(old_pte) != pte_val(new_pte)) { 301 + set_pte_atomic(kpte, new_pte); 302 + cpa->flushtlb = 1; 303 + } 304 + cpa->numpages = 1; 305 + return 0; 454 306 } 307 + 308 + /* 309 + * Check, whether we can keep the large page intact 310 + * and just change the pte: 311 + */ 312 + do_split = try_preserve_large_page(kpte, address, cpa); 313 + /* 314 + * When the range fits into the existing large page, 315 + * return. cp->numpages and cpa->tlbflush have been updated in 316 + * try_large_page: 317 + */ 318 + if (do_split <= 0) 319 + return do_split; 320 + 321 + /* 322 + * We have to split the large page: 323 + */ 324 + err = split_large_page(kpte, address); 325 + if (!err) { 326 + cpa->flushtlb = 1; 327 + goto repeat; 328 + } 329 + 455 330 return err; 456 331 } 457 332 ··· 507 304 * 508 305 * Modules and drivers should use the set_memory_* APIs instead. 509 306 */ 510 - 511 - #define HIGH_MAP_START __START_KERNEL_map 512 - #define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE) 513 - 514 - static int 515 - change_page_attr_addr(unsigned long address, pgprot_t mask_set, 516 - pgprot_t mask_clr) 307 + static int change_page_attr_addr(struct cpa_data *cpa) 517 308 { 518 - unsigned long phys_addr = __pa(address); 519 - unsigned long pfn = phys_addr >> PAGE_SHIFT; 520 309 int err; 310 + unsigned long address = cpa->vaddr; 521 311 522 312 #ifdef CONFIG_X86_64 313 + unsigned long phys_addr = __pa(address); 314 + 523 315 /* 524 316 * If we are inside the high mapped kernel range, then we 525 317 * fixup the low mapping first. __va() returns the virtual ··· 524 326 address = (unsigned long) __va(phys_addr); 525 327 #endif 526 328 527 - err = __change_page_attr(address, pfn, mask_set, mask_clr); 329 + err = __change_page_attr(address, cpa); 528 330 if (err) 529 331 return err; 530 332 ··· 537 339 /* 538 340 * Calc the high mapping address. See __phys_addr() 539 341 * for the non obvious details. 342 + * 343 + * Note that NX and other required permissions are 344 + * checked in static_protections(). 540 345 */ 541 346 address = phys_addr + HIGH_MAP_START - phys_base; 542 - /* Make sure the kernel mappings stay executable */ 543 - pgprot_val(mask_clr) |= _PAGE_NX; 544 347 545 348 /* 546 349 * Our high aliases are imprecise, because we check 547 350 * everything between 0 and KERNEL_TEXT_SIZE, so do 548 351 * not propagate lookup failures back to users: 549 352 */ 550 - __change_page_attr(address, pfn, mask_set, mask_clr); 353 + __change_page_attr(address, cpa); 551 354 } 552 355 #endif 553 356 return err; 554 357 } 555 358 556 - static int __change_page_attr_set_clr(unsigned long addr, int numpages, 557 - pgprot_t mask_set, pgprot_t mask_clr) 359 + static int __change_page_attr_set_clr(struct cpa_data *cpa) 558 360 { 559 - unsigned int i; 560 - int ret; 361 + int ret, numpages = cpa->numpages; 561 362 562 - for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) { 563 - ret = change_page_attr_addr(addr, mask_set, mask_clr); 363 + while (numpages) { 364 + /* 365 + * Store the remaining nr of pages for the large page 366 + * preservation check. 367 + */ 368 + cpa->numpages = numpages; 369 + ret = change_page_attr_addr(cpa); 564 370 if (ret) 565 371 return ret; 566 - } 567 372 373 + /* 374 + * Adjust the number of pages with the result of the 375 + * CPA operation. Either a large page has been 376 + * preserved or a single page update happened. 377 + */ 378 + BUG_ON(cpa->numpages > numpages); 379 + numpages -= cpa->numpages; 380 + cpa->vaddr += cpa->numpages * PAGE_SIZE; 381 + } 568 382 return 0; 383 + } 384 + 385 + static inline int cache_attr(pgprot_t attr) 386 + { 387 + return pgprot_val(attr) & 388 + (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); 569 389 } 570 390 571 391 static int change_page_attr_set_clr(unsigned long addr, int numpages, 572 392 pgprot_t mask_set, pgprot_t mask_clr) 573 393 { 574 - int ret = __change_page_attr_set_clr(addr, numpages, mask_set, 575 - mask_clr); 394 + struct cpa_data cpa; 395 + int ret, cache; 396 + 397 + /* 398 + * Check, if we are requested to change a not supported 399 + * feature: 400 + */ 401 + mask_set = canon_pgprot(mask_set); 402 + mask_clr = canon_pgprot(mask_clr); 403 + if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) 404 + return 0; 405 + 406 + cpa.vaddr = addr; 407 + cpa.numpages = numpages; 408 + cpa.mask_set = mask_set; 409 + cpa.mask_clr = mask_clr; 410 + cpa.flushtlb = 0; 411 + 412 + ret = __change_page_attr_set_clr(&cpa); 413 + 414 + /* 415 + * Check whether we really changed something: 416 + */ 417 + if (!cpa.flushtlb) 418 + return ret; 419 + 420 + /* 421 + * No need to flush, when we did not set any of the caching 422 + * attributes: 423 + */ 424 + cache = cache_attr(mask_set); 576 425 577 426 /* 578 427 * On success we use clflush, when the CPU supports it to ··· 628 383 * wbindv): 629 384 */ 630 385 if (!ret && cpu_has_clflush) 631 - cpa_flush_range(addr, numpages); 386 + cpa_flush_range(addr, numpages, cache); 632 387 else 633 - cpa_flush_all(); 388 + cpa_flush_all(cache); 634 389 635 390 return ret; 636 391 } ··· 734 489 return set_memory_rw(addr, numpages); 735 490 } 736 491 737 - 738 - #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG) 739 - static inline int __change_page_attr_set(unsigned long addr, int numpages, 740 - pgprot_t mask) 741 - { 742 - return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); 743 - } 744 - 745 - static inline int __change_page_attr_clear(unsigned long addr, int numpages, 746 - pgprot_t mask) 747 - { 748 - return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); 749 - } 750 - #endif 751 - 752 492 #ifdef CONFIG_DEBUG_PAGEALLOC 753 493 754 494 static int __set_pages_p(struct page *page, int numpages) 755 495 { 756 - unsigned long addr = (unsigned long)page_address(page); 496 + struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), 497 + .numpages = numpages, 498 + .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), 499 + .mask_clr = __pgprot(0)}; 757 500 758 - return __change_page_attr_set(addr, numpages, 759 - __pgprot(_PAGE_PRESENT | _PAGE_RW)); 501 + return __change_page_attr_set_clr(&cpa); 760 502 } 761 503 762 504 static int __set_pages_np(struct page *page, int numpages) 763 505 { 764 - unsigned long addr = (unsigned long)page_address(page); 506 + struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), 507 + .numpages = numpages, 508 + .mask_set = __pgprot(0), 509 + .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; 765 510 766 - return __change_page_attr_clear(addr, numpages, 767 - __pgprot(_PAGE_PRESENT)); 511 + return __change_page_attr_set_clr(&cpa); 768 512 } 769 513 770 514 void kernel_map_pages(struct page *page, int numpages, int enable)
+21 -42
arch/x86/mm/pgtable_32.c
··· 219 219 list_del(&page->lru); 220 220 } 221 221 222 + #define UNSHARED_PTRS_PER_PGD \ 223 + (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) 222 224 223 - 224 - #if (PTRS_PER_PMD == 1) 225 - /* Non-PAE pgd constructor */ 226 - static void pgd_ctor(void *pgd) 225 + static void pgd_ctor(void *p) 227 226 { 227 + pgd_t *pgd = p; 228 228 unsigned long flags; 229 229 230 - /* !PAE, no pagetable sharing */ 230 + /* Clear usermode parts of PGD */ 231 231 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 232 232 233 233 spin_lock_irqsave(&pgd_lock, flags); 234 234 235 - /* must happen under lock */ 236 - clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, 237 - swapper_pg_dir + USER_PTRS_PER_PGD, 238 - KERNEL_PGD_PTRS); 239 - paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, 240 - __pa(swapper_pg_dir) >> PAGE_SHIFT, 241 - USER_PTRS_PER_PGD, 242 - KERNEL_PGD_PTRS); 243 - pgd_list_add(pgd); 244 - spin_unlock_irqrestore(&pgd_lock, flags); 245 - } 246 - #else /* PTRS_PER_PMD > 1 */ 247 - /* PAE pgd constructor */ 248 - static void pgd_ctor(void *pgd) 249 - { 250 - /* PAE, kernel PMD may be shared */ 251 - 252 - if (SHARED_KERNEL_PMD) { 253 - clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, 235 + /* If the pgd points to a shared pagetable level (either the 236 + ptes in non-PAE, or shared PMD in PAE), then just copy the 237 + references from swapper_pg_dir. */ 238 + if (PAGETABLE_LEVELS == 2 || 239 + (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { 240 + clone_pgd_range(pgd + USER_PTRS_PER_PGD, 254 241 swapper_pg_dir + USER_PTRS_PER_PGD, 255 242 KERNEL_PGD_PTRS); 256 - } else { 257 - unsigned long flags; 258 - 259 - memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 260 - spin_lock_irqsave(&pgd_lock, flags); 261 - pgd_list_add(pgd); 262 - spin_unlock_irqrestore(&pgd_lock, flags); 243 + paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, 244 + __pa(swapper_pg_dir) >> PAGE_SHIFT, 245 + USER_PTRS_PER_PGD, 246 + KERNEL_PGD_PTRS); 263 247 } 248 + 249 + /* list required to sync kernel mapping updates */ 250 + if (!SHARED_KERNEL_PMD) 251 + pgd_list_add(pgd); 252 + 253 + spin_unlock_irqrestore(&pgd_lock, flags); 264 254 } 265 - #endif /* PTRS_PER_PMD */ 266 255 267 256 static void pgd_dtor(void *pgd) 268 257 { ··· 264 275 pgd_list_del(pgd); 265 276 spin_unlock_irqrestore(&pgd_lock, flags); 266 277 } 267 - 268 - #define UNSHARED_PTRS_PER_PGD \ 269 - (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) 270 278 271 279 #ifdef CONFIG_X86_PAE 272 280 /* ··· 373 387 374 388 void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 375 389 { 376 - /* This is called just after the pmd has been detached from 377 - the pgd, which requires a full tlb flush to be recognized 378 - by the CPU. Rather than incurring multiple tlb flushes 379 - while the address space is being pulled down, make the tlb 380 - gathering machinery do a full flush when we're done. */ 381 - tlb->fullmm = 1; 382 - 383 390 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); 384 391 tlb_remove_page(tlb, virt_to_page(pmd)); 385 392 }
+44 -8
arch/x86/pci/numa.c
··· 5 5 #include <linux/pci.h> 6 6 #include <linux/init.h> 7 7 #include <linux/nodemask.h> 8 + #include <mach_apic.h> 8 9 #include "pci.h" 10 + 11 + #define XQUAD_PORTIO_BASE 0xfe400000 12 + #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ 9 13 10 14 #define BUS2QUAD(global) (mp_bus_id_to_node[global]) 11 15 #define BUS2LOCAL(global) (mp_bus_id_to_local[global]) 12 16 #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) 13 17 18 + extern void *xquad_portio; /* Where the IO area was mapped */ 19 + #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) 20 + 14 21 #define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \ 15 22 (0x80000000 | (BUS2LOCAL(bus) << 16) | (devfn << 8) | (reg & ~3)) 23 + 24 + static void write_cf8(unsigned bus, unsigned devfn, unsigned reg) 25 + { 26 + unsigned val = PCI_CONF1_MQ_ADDRESS(bus, devfn, reg); 27 + if (xquad_portio) 28 + writel(val, XQUAD_PORT_ADDR(0xcf8, BUS2QUAD(bus))); 29 + else 30 + outl(val, 0xCF8); 31 + } 16 32 17 33 static int pci_conf1_mq_read(unsigned int seg, unsigned int bus, 18 34 unsigned int devfn, int reg, int len, u32 *value) 19 35 { 20 36 unsigned long flags; 37 + void *adr __iomem = XQUAD_PORT_ADDR(0xcfc, BUS2QUAD(bus)); 21 38 22 39 if (!value || (bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255)) 23 40 return -EINVAL; 24 41 25 42 spin_lock_irqsave(&pci_config_lock, flags); 26 43 27 - outl_quad(PCI_CONF1_MQ_ADDRESS(bus, devfn, reg), 0xCF8, BUS2QUAD(bus)); 44 + write_cf8(bus, devfn, reg); 28 45 29 46 switch (len) { 30 47 case 1: 31 - *value = inb_quad(0xCFC + (reg & 3), BUS2QUAD(bus)); 48 + if (xquad_portio) 49 + *value = readb(adr + (reg & 3)); 50 + else 51 + *value = inb(0xCFC + (reg & 3)); 32 52 break; 33 53 case 2: 34 - *value = inw_quad(0xCFC + (reg & 2), BUS2QUAD(bus)); 54 + if (xquad_portio) 55 + *value = readw(adr + (reg & 2)); 56 + else 57 + *value = inw(0xCFC + (reg & 2)); 35 58 break; 36 59 case 4: 37 - *value = inl_quad(0xCFC, BUS2QUAD(bus)); 60 + if (xquad_portio) 61 + *value = readl(adr); 62 + else 63 + *value = inl(0xCFC); 38 64 break; 39 65 } 40 66 ··· 73 47 unsigned int devfn, int reg, int len, u32 value) 74 48 { 75 49 unsigned long flags; 50 + void *adr __iomem = XQUAD_PORT_ADDR(0xcfc, BUS2QUAD(bus)); 76 51 77 52 if ((bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255)) 78 53 return -EINVAL; 79 54 80 55 spin_lock_irqsave(&pci_config_lock, flags); 81 56 82 - outl_quad(PCI_CONF1_MQ_ADDRESS(bus, devfn, reg), 0xCF8, BUS2QUAD(bus)); 57 + write_cf8(bus, devfn, reg); 83 58 84 59 switch (len) { 85 60 case 1: 86 - outb_quad((u8)value, 0xCFC + (reg & 3), BUS2QUAD(bus)); 61 + if (xquad_portio) 62 + writeb(value, adr + (reg & 3)); 63 + else 64 + outb((u8)value, 0xCFC + (reg & 3)); 87 65 break; 88 66 case 2: 89 - outw_quad((u16)value, 0xCFC + (reg & 2), BUS2QUAD(bus)); 67 + if (xquad_portio) 68 + writew(value, adr + (reg & 2)); 69 + else 70 + outw((u16)value, 0xCFC + (reg & 2)); 90 71 break; 91 72 case 4: 92 - outl_quad((u32)value, 0xCFC, BUS2QUAD(bus)); 73 + if (xquad_portio) 74 + writel(value, adr + reg); 75 + else 76 + outl((u32)value, 0xCFC); 93 77 break; 94 78 } 95 79
+7 -4
include/asm-generic/rtc.h
··· 35 35 static inline unsigned char rtc_is_updating(void) 36 36 { 37 37 unsigned char uip; 38 + unsigned long flags; 38 39 39 - spin_lock_irq(&rtc_lock); 40 + spin_lock_irqsave(&rtc_lock, flags); 40 41 uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); 41 - spin_unlock_irq(&rtc_lock); 42 + spin_unlock_irqrestore(&rtc_lock, flags); 42 43 return uip; 43 44 } 44 45 ··· 47 46 { 48 47 unsigned long uip_watchdog = jiffies; 49 48 unsigned char ctrl; 49 + unsigned long flags; 50 + 50 51 #ifdef CONFIG_MACH_DECSTATION 51 52 unsigned int real_year; 52 53 #endif ··· 75 72 * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated 76 73 * by the RTC when initially set to a non-zero value. 77 74 */ 78 - spin_lock_irq(&rtc_lock); 75 + spin_lock_irqsave(&rtc_lock, flags); 79 76 time->tm_sec = CMOS_READ(RTC_SECONDS); 80 77 time->tm_min = CMOS_READ(RTC_MINUTES); 81 78 time->tm_hour = CMOS_READ(RTC_HOURS); ··· 86 83 real_year = CMOS_READ(RTC_DEC_YEAR); 87 84 #endif 88 85 ctrl = CMOS_READ(RTC_CONTROL); 89 - spin_unlock_irq(&rtc_lock); 86 + spin_unlock_irqrestore(&rtc_lock, flags); 90 87 91 88 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 92 89 {
-1
include/asm-generic/tlb.h
··· 14 14 #define _ASM_GENERIC__TLB_H 15 15 16 16 #include <linux/swap.h> 17 - #include <linux/quicklist.h> 18 17 #include <asm/pgalloc.h> 19 18 #include <asm/tlbflush.h> 20 19
+7
include/asm-x86/asm.h
··· 29 29 30 30 #endif /* CONFIG_X86_32 */ 31 31 32 + /* Exception table entry */ 33 + # define _ASM_EXTABLE(from,to) \ 34 + " .section __ex_table,\"a\"\n" \ 35 + _ASM_ALIGN "\n" \ 36 + _ASM_PTR #from "," #to "\n" \ 37 + " .previous\n" 38 + 32 39 #endif /* _ASM_X86_ASM_H */
+1 -1
include/asm-x86/bugs.h
··· 2 2 #define _ASM_X86_BUGS_H 3 3 4 4 extern void check_bugs(void); 5 - extern int ppro_with_ram_bug(void); 5 + int ppro_with_ram_bug(void); 6 6 7 7 #endif /* _ASM_X86_BUGS_H */
+11 -3
include/asm-x86/cpufeature.h
··· 4 4 #ifndef _ASM_X86_CPUFEATURE_H 5 5 #define _ASM_X86_CPUFEATURE_H 6 6 7 - #ifndef __ASSEMBLY__ 8 - #include <linux/bitops.h> 9 - #endif 10 7 #include <asm/required-features.h> 11 8 12 9 #define NCAPINTS 8 /* N 32-bit words worth of info */ ··· 46 49 #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ 47 50 #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ 48 51 #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ 52 + #define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */ 49 53 #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ 50 54 #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ 51 55 #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ ··· 113 115 */ 114 116 #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 115 117 118 + #if defined(__KERNEL__) && !defined(__ASSEMBLY__) 119 + 120 + #include <linux/bitops.h> 121 + 122 + extern const char * const x86_cap_flags[NCAPINTS*32]; 123 + extern const char * const x86_power_flags[32]; 124 + 116 125 #define cpu_has(c, bit) \ 117 126 (__builtin_constant_p(bit) && \ 118 127 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ ··· 180 175 #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) 181 176 #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) 182 177 #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) 178 + #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 183 179 184 180 #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 185 181 # define cpu_has_invlpg 1 ··· 209 203 #define cpu_has_centaur_mcr 0 210 204 211 205 #endif /* CONFIG_X86_64 */ 206 + 207 + #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 212 208 213 209 #endif /* _ASM_X86_CPUFEATURE_H */
+2 -2
include/asm-x86/efi.h
··· 33 33 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 34 34 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 35 35 36 - #define efi_ioremap(addr, size) ioremap(addr, size) 36 + #define efi_ioremap(addr, size) ioremap_cache(addr, size) 37 37 38 38 #else /* !CONFIG_X86_32 */ 39 39 ··· 86 86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 87 87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 88 88 89 - extern void *efi_ioremap(unsigned long offset, unsigned long size); 89 + extern void *efi_ioremap(unsigned long addr, unsigned long size); 90 90 91 91 #endif /* CONFIG_X86_32 */ 92 92
+6 -17
include/asm-x86/futex.h
··· 17 17 "2: .section .fixup,\"ax\"\n \ 18 18 3: mov %3, %1\n \ 19 19 jmp 2b\n \ 20 - .previous\n \ 21 - .section __ex_table,\"a\"\n \ 22 - .align 8\n" \ 23 - _ASM_PTR "1b,3b\n \ 24 - .previous" \ 20 + .previous\n" \ 21 + _ASM_EXTABLE(1b,3b) \ 25 22 : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ 26 23 : "i" (-EFAULT), "0" (oparg), "1" (0)) 27 24 ··· 32 35 3: .section .fixup,\"ax\"\n \ 33 36 4: mov %5, %1\n \ 34 37 jmp 3b\n \ 35 - .previous\n \ 36 - .section __ex_table,\"a\"\n \ 37 - .align 8\n" \ 38 - _ASM_PTR "1b,4b,2b,4b\n \ 39 - .previous" \ 38 + .previous\n" \ 39 + _ASM_EXTABLE(1b,4b) \ 40 + _ASM_EXTABLE(2b,4b) \ 40 41 : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ 41 42 "=&r" (tem) \ 42 43 : "r" (oparg), "i" (-EFAULT), "1" (0)) ··· 106 111 return -EFAULT; 107 112 108 113 __asm__ __volatile__( 109 - 110 114 "1: lock; cmpxchgl %3, %1 \n" 111 115 "2: .section .fixup, \"ax\" \n" 112 116 "3: mov %2, %0 \n" 113 117 " jmp 2b \n" 114 118 " .previous \n" 115 - 116 - " .section __ex_table, \"a\" \n" 117 - " .align 8 \n" 118 - _ASM_PTR " 1b,3b \n" 119 - " .previous \n" 120 - 119 + _ASM_EXTABLE(1b,3b) 121 120 : "=a" (oldval), "+m" (*uaddr) 122 121 : "i" (-EFAULT), "r" (newval), "0" (oldval) 123 122 : "memory"
+2 -2
include/asm-x86/highmem.h
··· 63 63 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 64 64 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 65 65 66 - extern void * FASTCALL(kmap_high(struct page *page)); 67 - extern void FASTCALL(kunmap_high(struct page *page)); 66 + extern void *kmap_high(struct page *page); 67 + extern void kunmap_high(struct page *page); 68 68 69 69 void *kmap(struct page *page); 70 70 void kunmap(struct page *page);
+1 -1
include/asm-x86/hw_irq_32.h
··· 47 47 int i8259A_irq_pending(unsigned int irq); 48 48 void make_8259A_irq(unsigned int irq); 49 49 void init_8259A(int aeoi); 50 - void FASTCALL(send_IPI_self(int vector)); 50 + void send_IPI_self(int vector); 51 51 void init_VISWS_APIC_irqs(void); 52 52 void setup_IO_APIC(void); 53 53 void disable_IO_APIC(void);
+4 -12
include/asm-x86/i387.h
··· 13 13 #include <linux/sched.h> 14 14 #include <linux/kernel_stat.h> 15 15 #include <linux/regset.h> 16 + #include <asm/asm.h> 16 17 #include <asm/processor.h> 17 18 #include <asm/sigcontext.h> 18 19 #include <asm/user.h> ··· 42 41 { 43 42 asm volatile("1: fwait\n" 44 43 "2:\n" 45 - " .section __ex_table,\"a\"\n" 46 - " .align 8\n" 47 - " .quad 1b,2b\n" 48 - " .previous\n"); 44 + _ASM_EXTABLE(1b,2b)); 49 45 } 50 46 51 47 static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) ··· 55 57 "3: movl $-1,%[err]\n" 56 58 " jmp 2b\n" 57 59 ".previous\n" 58 - ".section __ex_table,\"a\"\n" 59 - " .align 8\n" 60 - " .quad 1b,3b\n" 61 - ".previous" 60 + _ASM_EXTABLE(1b,3b) 62 61 : [err] "=r" (err) 63 62 #if 0 /* See comment in __save_init_fpu() below. */ 64 63 : [fx] "r" (fx), "m" (*fx), "0" (0)); ··· 94 99 "3: movl $-1,%[err]\n" 95 100 " jmp 2b\n" 96 101 ".previous\n" 97 - ".section __ex_table,\"a\"\n" 98 - " .align 8\n" 99 - " .quad 1b,3b\n" 100 - ".previous" 102 + _ASM_EXTABLE(1b,3b) 101 103 : [err] "=r" (err), "=m" (*fx) 102 104 #if 0 /* See comment in __fxsave_clear() below. */ 103 105 : [fx] "r" (fx), "0" (0));
-25
include/asm-x86/io_32.h
··· 275 275 276 276 #endif 277 277 278 - #ifdef CONFIG_X86_NUMAQ 279 - extern void *xquad_portio; /* Where the IO area was mapped */ 280 - #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) 281 - #define __BUILDIO(bwl,bw,type) \ 282 - static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ 283 - if (xquad_portio) \ 284 - write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ 285 - else \ 286 - out##bwl##_local(value, port); \ 287 - } \ 288 - static inline void out##bwl(unsigned type value, int port) { \ 289 - out##bwl##_quad(value, port, 0); \ 290 - } \ 291 - static inline unsigned type in##bwl##_quad(int port, int quad) { \ 292 - if (xquad_portio) \ 293 - return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ 294 - else \ 295 - return in##bwl##_local(port); \ 296 - } \ 297 - static inline unsigned type in##bwl(int port) { \ 298 - return in##bwl##_quad(port, 0); \ 299 - } 300 - #else 301 278 #define __BUILDIO(bwl,bw,type) \ 302 279 static inline void out##bwl(unsigned type value, int port) { \ 303 280 out##bwl##_local(value, port); \ ··· 282 305 static inline unsigned type in##bwl(int port) { \ 283 306 return in##bwl##_local(port); \ 284 307 } 285 - #endif 286 - 287 308 288 309 #define BUILDIO(bwl,bw,type) \ 289 310 static inline void out##bwl##_local(unsigned type value, int port) { \
+2
include/asm-x86/mach-numaq/mach_apic.h
··· 109 109 return logical_apicid; 110 110 } 111 111 112 + extern void *xquad_portio; 113 + 112 114 static inline void setup_portio_remap(void) 113 115 { 114 116 int num_quads = num_online_nodes();
+2 -8
include/asm-x86/msr.h
··· 57 57 ".section .fixup,\"ax\"\n\t" 58 58 "3: mov %3,%0 ; jmp 1b\n\t" 59 59 ".previous\n\t" 60 - ".section __ex_table,\"a\"\n" 61 - _ASM_ALIGN "\n\t" 62 - _ASM_PTR " 2b,3b\n\t" 63 - ".previous" 60 + _ASM_EXTABLE(2b,3b) 64 61 : "=r" (*err), EAX_EDX_RET(val, low, high) 65 62 : "c" (msr), "i" (-EFAULT)); 66 63 return EAX_EDX_VAL(val, low, high); ··· 78 81 ".section .fixup,\"ax\"\n\t" 79 82 "3: mov %4,%0 ; jmp 1b\n\t" 80 83 ".previous\n\t" 81 - ".section __ex_table,\"a\"\n" 82 - _ASM_ALIGN "\n\t" 83 - _ASM_PTR " 2b,3b\n\t" 84 - ".previous" 84 + _ASM_EXTABLE(2b,3b) 85 85 : "=a" (err) 86 86 : "c" (msr), "0" (low), "d" (high), 87 87 "i" (-EFAULT));
+2 -2
include/asm-x86/page.h
··· 13 13 #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK) 14 14 #define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK)) 15 15 16 - #define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT) 17 - #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) 16 + #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 17 + #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 18 18 19 19 #define HPAGE_SHIFT PMD_SHIFT 20 20 #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
+3
include/asm-x86/page_64.h
··· 23 23 #define MCE_STACK 5 24 24 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ 25 25 26 + #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) 27 + #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) 28 + 26 29 #define __PAGE_OFFSET _AC(0xffff810000000000, UL) 27 30 28 31 #define __PHYSICAL_START CONFIG_PHYSICAL_START
+4 -2
include/asm-x86/pgalloc_32.h
··· 80 80 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); 81 81 82 82 /* 83 - * Pentium-II erratum A13: in PAE mode we explicitly have to flush 84 - * the TLB via cr3 if the top-level pgd is changed... 83 + * According to Intel App note "TLBs, Paging-Structure Caches, 84 + * and Their Invalidation", April 2007, document 317080-001, 85 + * section 8.1: in PAE mode we explicitly have to flush the 86 + * TLB via cr3 if the top-level pgd is changed... 85 87 */ 86 88 if (mm == current->active_mm) 87 89 write_cr3(read_cr3());
+11 -15
include/asm-x86/pgtable-3level.h
··· 93 93 94 94 static inline void pud_clear(pud_t *pudp) 95 95 { 96 + unsigned long pgd; 97 + 96 98 set_pud(pudp, __pud(0)); 97 99 98 100 /* 99 - * In principle we need to do a cr3 reload here to make sure 100 - * the processor recognizes the changed pgd. In practice, all 101 - * the places where pud_clear() gets called are followed by 102 - * full tlb flushes anyway, so we can defer the cost here. 101 + * According to Intel App note "TLBs, Paging-Structure Caches, 102 + * and Their Invalidation", April 2007, document 317080-001, 103 + * section 8.1: in PAE mode we explicitly have to flush the 104 + * TLB via cr3 if the top-level pgd is changed... 103 105 * 104 - * Specifically: 105 - * 106 - * mm/memory.c:free_pmd_range() - immediately after the 107 - * pud_clear() it does a pmd_free_tlb(). We change the 108 - * mmu_gather structure to do a full tlb flush (which has the 109 - * effect of reloading cr3) when the pagetable free is 110 - * complete. 111 - * 112 - * arch/x86/mm/hugetlbpage.c:huge_pmd_unshare() - the call to 113 - * this is followed by a flush_tlb_range, which on x86 does a 114 - * full tlb flush. 106 + * Make sure the pud entry we're updating is within the 107 + * current pgd to avoid unnecessary TLB flushes. 115 108 */ 109 + pgd = read_cr3(); 110 + if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) 111 + write_cr3(pgd); 116 112 } 117 113 118 114 #define pud_page(pud) \
+4
include/asm-x86/pgtable.h
··· 13 13 #define _PAGE_BIT_DIRTY 6 14 14 #define _PAGE_BIT_FILE 6 15 15 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ 16 + #define _PAGE_BIT_PAT 7 /* on 4KB pages */ 16 17 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ 17 18 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ 18 19 #define _PAGE_BIT_UNUSED2 10 19 20 #define _PAGE_BIT_UNUSED3 11 21 + #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 20 22 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 21 23 22 24 /* ··· 38 36 #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) 39 37 #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) 40 38 #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3) 39 + #define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT) 40 + #define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE) 41 41 42 42 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 43 43 #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
+2
include/asm-x86/pgtable_32.h
··· 148 148 */ 149 149 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 150 150 151 + static inline int pud_large(pud_t pud) { return 0; } 152 + 151 153 /* 152 154 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 153 155 *
+6 -1
include/asm-x86/pgtable_64.h
··· 21 21 #define swapper_pg_dir init_level4_pgt 22 22 23 23 extern void paging_init(void); 24 - extern void clear_kernel_mapping(unsigned long addr, unsigned long size); 25 24 26 25 #endif /* !__ASSEMBLY__ */ 27 26 ··· 197 198 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 198 199 #define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address)) 199 200 #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) 201 + 202 + static inline int pud_large(pud_t pte) 203 + { 204 + return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == 205 + (_PAGE_PSE|_PAGE_PRESENT); 206 + } 200 207 201 208 /* PMD - Level 2 access */ 202 209 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
+4 -4
include/asm-x86/string_32.h
··· 213 213 case 0: 214 214 return s; 215 215 case 1: 216 - *(unsigned char *)s = pattern; 216 + *(unsigned char *)s = pattern & 0xff; 217 217 return s; 218 218 case 2: 219 - *(unsigned short *)s = pattern; 219 + *(unsigned short *)s = pattern & 0xffff; 220 220 return s; 221 221 case 3: 222 - *(unsigned short *)s = pattern; 223 - *(2+(unsigned char *)s) = pattern; 222 + *(unsigned short *)s = pattern & 0xffff; 223 + *(2+(unsigned char *)s) = pattern & 0xff; 224 224 return s; 225 225 case 4: 226 226 *(unsigned long *)s = pattern;
+9 -14
include/asm-x86/system.h
··· 20 20 #ifdef CONFIG_X86_32 21 21 22 22 struct task_struct; /* one of the stranger aspects of C forward declarations */ 23 - extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev, 24 - struct task_struct *next)); 23 + struct task_struct *__switch_to(struct task_struct *prev, 24 + struct task_struct *next); 25 25 26 26 /* 27 27 * Saving eflags is important. It switches not only IOPL between tasks, ··· 130 130 "movl %k1, %%" #seg "\n\t" \ 131 131 "jmp 2b\n" \ 132 132 ".previous\n" \ 133 - ".section __ex_table,\"a\"\n\t" \ 134 - _ASM_ALIGN "\n\t" \ 135 - _ASM_PTR " 1b,3b\n" \ 136 - ".previous" \ 133 + _ASM_EXTABLE(1b,3b) \ 137 134 : :"r" (value), "r" (0)) 138 135 139 136 ··· 211 214 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always 212 215 * exists, so it will never fail. */ 213 216 #ifdef CONFIG_X86_32 214 - asm volatile("1: mov %%cr4, %0 \n" 215 - "2: \n" 216 - ".section __ex_table,\"a\" \n" 217 - ".long 1b,2b \n" 218 - ".previous \n" 219 - : "=r" (val), "=m" (__force_order) : "0" (0)); 217 + asm volatile("1: mov %%cr4, %0\n" 218 + "2:\n" 219 + _ASM_EXTABLE(1b,2b) 220 + : "=r" (val), "=m" (__force_order) : "0" (0)); 220 221 #else 221 222 val = native_read_cr4(); 222 223 #endif ··· 271 276 272 277 #endif /* __KERNEL__ */ 273 278 274 - static inline void clflush(void *__p) 279 + static inline void clflush(volatile void *__p) 275 280 { 276 - asm volatile("clflush %0" : "+m" (*(char __force *)__p)); 281 + asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); 277 282 } 278 283 279 284 #define nop() __asm__ __volatile__ ("nop")
+5 -13
include/asm-x86/uaccess_32.h
··· 8 8 #include <linux/thread_info.h> 9 9 #include <linux/prefetch.h> 10 10 #include <linux/string.h> 11 + #include <asm/asm.h> 11 12 #include <asm/page.h> 12 13 13 14 #define VERIFY_READ 0 ··· 288 287 "4: movl %3,%0\n" \ 289 288 " jmp 3b\n" \ 290 289 ".previous\n" \ 291 - ".section __ex_table,\"a\"\n" \ 292 - " .align 4\n" \ 293 - " .long 1b,4b\n" \ 294 - " .long 2b,4b\n" \ 295 - ".previous" \ 290 + _ASM_EXTABLE(1b,4b) \ 291 + _ASM_EXTABLE(2b,4b) \ 296 292 : "=r"(err) \ 297 293 : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) 298 294 ··· 336 338 "3: movl %3,%0\n" \ 337 339 " jmp 2b\n" \ 338 340 ".previous\n" \ 339 - ".section __ex_table,\"a\"\n" \ 340 - " .align 4\n" \ 341 - " .long 1b,3b\n" \ 342 - ".previous" \ 341 + _ASM_EXTABLE(1b,3b) \ 343 342 : "=r"(err) \ 344 343 : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) 345 344 ··· 373 378 " xor"itype" %"rtype"1,%"rtype"1\n" \ 374 379 " jmp 2b\n" \ 375 380 ".previous\n" \ 376 - ".section __ex_table,\"a\"\n" \ 377 - " .align 4\n" \ 378 - " .long 1b,3b\n" \ 379 - ".previous" \ 381 + _ASM_EXTABLE(1b,3b) \ 380 382 : "=r"(err), ltype (x) \ 381 383 : "m"(__m(addr)), "i"(errret), "0"(err)) 382 384
+2 -8
include/asm-x86/uaccess_64.h
··· 181 181 "3: mov %3,%0\n" \ 182 182 " jmp 2b\n" \ 183 183 ".previous\n" \ 184 - ".section __ex_table,\"a\"\n" \ 185 - " .align 8\n" \ 186 - " .quad 1b,3b\n" \ 187 - ".previous" \ 184 + _ASM_EXTABLE(1b,3b) \ 188 185 : "=r"(err) \ 189 186 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) 190 187 ··· 223 226 " xor"itype" %"rtype"1,%"rtype"1\n" \ 224 227 " jmp 2b\n" \ 225 228 ".previous\n" \ 226 - ".section __ex_table,\"a\"\n" \ 227 - " .align 8\n" \ 228 - " .quad 1b,3b\n" \ 229 - ".previous" \ 229 + _ASM_EXTABLE(1b,3b) \ 230 230 : "=r"(err), ltype (x) \ 231 231 : "m"(__m(addr)), "i"(errno), "0"(err)) 232 232
+1
include/asm-x86/vm86.h
··· 195 195 196 196 void handle_vm86_fault(struct kernel_vm86_regs *, long); 197 197 int handle_vm86_trap(struct kernel_vm86_regs *, long, int); 198 + struct pt_regs *save_v86_state(struct kernel_vm86_regs *); 198 199 199 200 struct task_struct; 200 201 void release_vm86_irqs(struct task_struct *);