Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (32 commits)
x86: cpa, strict range check in try_preserve_large_page()
x86: cpa, enable CONFIG_DEBUG_PAGEALLOC on 64-bit
x86: cpa, use page pool
x86: introduce page pool in cpa
x86: DEBUG_PAGEALLOC: enable after mem_init()
brk: help text typo fix
lguest: accept guest _PAGE_PWT page table entries
x86 PM: update stale comments
x86 PM: consolidate suspend and hibernation code
x86 PM: rename 32-bit files in arch/x86/power
x86 PM: move 64-bit hibernation files to arch/x86/power
x86: trivial printk optimizations
x86: fix early_ioremap pagetable ops
x86: construct 32-bit boot time page tables in native format.
x86, core: remove CONFIG_FORCED_INLINING
x86: avoid unused variable warning in mm/init_64.c
x86: fixup more paravirt fallout
brk: document randomize_va_space and CONFIG_COMPAT_BRK (was Re:
x86: fix sparse warnings in acpi/bus.c
x86: fix sparse warning in topology.c
...

+684 -467
-9
Documentation/feature-removal-schedule.txt
··· 111 112 --------------------------- 113 114 - What: CONFIG_FORCED_INLINING 115 - When: June 2006 116 - Why: Config option is there to see if gcc is good enough. (in january 117 - 2006). If it is, the behavior should just be the default. If it's not, 118 - the option should just go away entirely. 119 - Who: Arjan van de Ven 120 - 121 - --------------------------- 122 - 123 What: eepro100 network driver 124 When: January 2007 125 Why: replaced by the e100 driver
··· 111 112 --------------------------- 113 114 What: eepro100 network driver 115 When: January 2007 116 Why: replaced by the e100 driver
+29
Documentation/sysctl/kernel.txt
··· 41 - pid_max 42 - powersave-nap [ PPC only ] 43 - printk 44 - real-root-dev ==> Documentation/initrd.txt 45 - reboot-cmd [ SPARC only ] 46 - rtsig-max ··· 278 seconds, we do allow a burst of messages to pass through. 279 printk_ratelimit_burst specifies the number of messages we can 280 send before ratelimiting kicks in. 281 282 ============================================================== 283
··· 41 - pid_max 42 - powersave-nap [ PPC only ] 43 - printk 44 + - randomize_va_space 45 - real-root-dev ==> Documentation/initrd.txt 46 - reboot-cmd [ SPARC only ] 47 - rtsig-max ··· 277 seconds, we do allow a burst of messages to pass through. 278 printk_ratelimit_burst specifies the number of messages we can 279 send before ratelimiting kicks in. 280 + 281 + ============================================================== 282 + 283 + randomize-va-space: 284 + 285 + This option can be used to select the type of process address 286 + space randomization that is used in the system, for architectures 287 + that support this feature. 288 + 289 + 0 - Turn the process address space randomization off by default. 290 + 291 + 1 - Make the addresses of mmap base, stack and VDSO page randomized. 292 + This, among other things, implies that shared libraries will be 293 + loaded to random addresses. Also for PIE-linked binaries, the location 294 + of code start is randomized. 295 + 296 + With heap randomization, the situation is a little bit more 297 + complicated. 298 + There a few legacy applications out there (such as some ancient 299 + versions of libc.so.5 from 1996) that assume that brk area starts 300 + just after the end of the code+bss. These applications break when 301 + start of the brk area is randomized. There are however no known 302 + non-legacy applications that would be broken this way, so for most 303 + systems it is safe to choose full randomization. However there is 304 + a CONFIG_COMPAT_BRK option for systems with ancient and/or broken 305 + binaries, that makes heap non-randomized, but keeps all other 306 + parts of process address space randomized if randomize_va_space 307 + sysctl is turned on. 308 309 ============================================================== 310
+1 -5
arch/x86/Kconfig.debug
··· 34 35 This option will slow down process creation somewhat. 36 37 - comment "Page alloc debug is incompatible with Software Suspend on i386" 38 - depends on DEBUG_KERNEL && HIBERNATION 39 - depends on X86_32 40 - 41 config DEBUG_PAGEALLOC 42 bool "Debug page memory allocations" 43 - depends on DEBUG_KERNEL && X86_32 44 help 45 Unmap pages from the kernel linear mapping after free_pages(). 46 This results in a large slowdown, but helps to find certain types
··· 34 35 This option will slow down process creation somewhat. 36 37 config DEBUG_PAGEALLOC 38 bool "Debug page memory allocations" 39 + depends on DEBUG_KERNEL 40 help 41 Unmap pages from the kernel linear mapping after free_pages(). 42 This results in a large slowdown, but helps to find certain types
+3 -1
arch/x86/Makefile
··· 191 # must be linked after kernel/ 192 drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/ 193 194 - ifeq ($(CONFIG_X86_32),y) 195 drivers-$(CONFIG_PM) += arch/x86/power/ 196 drivers-$(CONFIG_FB) += arch/x86/video/ 197 endif 198
··· 191 # must be linked after kernel/ 192 drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/ 193 194 + # suspend and hibernation support 195 drivers-$(CONFIG_PM) += arch/x86/power/ 196 + 197 + ifeq ($(CONFIG_X86_32),y) 198 drivers-$(CONFIG_FB) += arch/x86/video/ 199 endif 200
+14 -10
arch/x86/boot/printf.c
··· 33 #define PLUS 4 /* show plus */ 34 #define SPACE 8 /* space if plus */ 35 #define LEFT 16 /* left justified */ 36 - #define SPECIAL 32 /* 0x */ 37 - #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ 38 39 #define do_div(n,base) ({ \ 40 int __res; \ ··· 45 static char *number(char *str, long num, int base, int size, int precision, 46 int type) 47 { 48 - char c, sign, tmp[66]; 49 - const char *digits = "0123456789abcdefghijklmnopqrstuvwxyz"; 50 int i; 51 52 - if (type & LARGE) 53 - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; 54 if (type & LEFT) 55 type &= ~ZEROPAD; 56 if (base < 2 || base > 36) ··· 85 tmp[i++] = '0'; 86 else 87 while (num != 0) 88 - tmp[i++] = digits[do_div(num, base)]; 89 if (i > precision) 90 precision = i; 91 size -= precision; ··· 99 *str++ = '0'; 100 else if (base == 16) { 101 *str++ = '0'; 102 - *str++ = digits[33]; 103 } 104 } 105 if (!(type & LEFT)) ··· 248 base = 8; 249 break; 250 251 - case 'X': 252 - flags |= LARGE; 253 case 'x': 254 base = 16; 255 break; 256
··· 33 #define PLUS 4 /* show plus */ 34 #define SPACE 8 /* space if plus */ 35 #define LEFT 16 /* left justified */ 36 + #define SMALL 32 /* Must be 32 == 0x20 */ 37 + #define SPECIAL 64 /* 0x */ 38 39 #define do_div(n,base) ({ \ 40 int __res; \ ··· 45 static char *number(char *str, long num, int base, int size, int precision, 46 int type) 47 { 48 + /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 49 + static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 50 + 51 + char tmp[66]; 52 + char c, sign, locase; 53 int i; 54 55 + /* locase = 0 or 0x20. ORing digits or letters with 'locase' 56 + * produces same digits or (maybe lowercased) letters */ 57 + locase = (type & SMALL); 58 if (type & LEFT) 59 type &= ~ZEROPAD; 60 if (base < 2 || base > 36) ··· 81 tmp[i++] = '0'; 82 else 83 while (num != 0) 84 + tmp[i++] = (digits[do_div(num, base)] | locase); 85 if (i > precision) 86 precision = i; 87 size -= precision; ··· 95 *str++ = '0'; 96 else if (base == 16) { 97 *str++ = '0'; 98 + *str++ = ('X' | locase); 99 } 100 } 101 if (!(type & LEFT)) ··· 244 base = 8; 245 break; 246 247 case 'x': 248 + flags |= SMALL; 249 + case 'X': 250 base = 16; 251 break; 252
-1
arch/x86/configs/i386_defconfig
··· 1421 # CONFIG_DEBUG_VM is not set 1422 # CONFIG_DEBUG_LIST is not set 1423 # CONFIG_FRAME_POINTER is not set 1424 - # CONFIG_FORCED_INLINING is not set 1425 # CONFIG_RCU_TORTURE_TEST is not set 1426 # CONFIG_LKDTM is not set 1427 # CONFIG_FAULT_INJECTION is not set
··· 1421 # CONFIG_DEBUG_VM is not set 1422 # CONFIG_DEBUG_LIST is not set 1423 # CONFIG_FRAME_POINTER is not set 1424 # CONFIG_RCU_TORTURE_TEST is not set 1425 # CONFIG_LKDTM is not set 1426 # CONFIG_FAULT_INJECTION is not set
-1
arch/x86/configs/x86_64_defconfig
··· 1346 # CONFIG_DEBUG_VM is not set 1347 # CONFIG_DEBUG_LIST is not set 1348 # CONFIG_FRAME_POINTER is not set 1349 - # CONFIG_FORCED_INLINING is not set 1350 # CONFIG_RCU_TORTURE_TEST is not set 1351 # CONFIG_LKDTM is not set 1352 # CONFIG_FAULT_INJECTION is not set
··· 1346 # CONFIG_DEBUG_VM is not set 1347 # CONFIG_DEBUG_LIST is not set 1348 # CONFIG_FRAME_POINTER is not set 1349 # CONFIG_RCU_TORTURE_TEST is not set 1350 # CONFIG_LKDTM is not set 1351 # CONFIG_FAULT_INJECTION is not set
-2
arch/x86/kernel/Makefile
··· 84 obj-y += genapic_64.o genapic_flat_64.o 85 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 86 obj-$(CONFIG_AUDIT) += audit_64.o 87 - obj-$(CONFIG_PM) += suspend_64.o 88 - obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o 89 90 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o 91 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
··· 84 obj-y += genapic_64.o genapic_flat_64.o 85 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 86 obj-$(CONFIG_AUDIT) += audit_64.o 87 88 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o 89 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
+1 -1
arch/x86/kernel/cpu/mcheck/therm_throt.c
··· 118 119 static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) 120 { 121 - return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); 122 } 123 124 /* Mutex protecting device creation against CPU hotplug */
··· 118 119 static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) 120 { 121 + sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); 122 } 123 124 /* Mutex protecting device creation against CPU hotplug */
+6 -9
arch/x86/kernel/entry_32.S
··· 409 RESTORE_REGS 410 addl $4, %esp # skip orig_eax/error_code 411 CFI_ADJUST_CFA_OFFSET -4 412 - 1: INTERRUPT_RETURN 413 .section .fixup,"ax" 414 iret_exc: 415 pushl $0 # no error code ··· 419 .previous 420 .section __ex_table,"a" 421 .align 4 422 - .long 1b,iret_exc 423 .previous 424 425 CFI_RESTORE_STATE ··· 866 RESTORE_REGS 867 lss 12+4(%esp), %esp # back to espfix stack 868 CFI_ADJUST_CFA_OFFSET -24 869 - 1: INTERRUPT_RETURN 870 CFI_ENDPROC 871 - .section __ex_table,"a" 872 - .align 4 873 - .long 1b,iret_exc 874 - .previous 875 KPROBE_END(nmi) 876 877 #ifdef CONFIG_PARAVIRT 878 ENTRY(native_iret) 879 - 1: iret 880 .section __ex_table,"a" 881 .align 4 882 - .long 1b,iret_exc 883 .previous 884 END(native_iret) 885
··· 409 RESTORE_REGS 410 addl $4, %esp # skip orig_eax/error_code 411 CFI_ADJUST_CFA_OFFSET -4 412 + ENTRY(irq_return) 413 + INTERRUPT_RETURN 414 .section .fixup,"ax" 415 iret_exc: 416 pushl $0 # no error code ··· 418 .previous 419 .section __ex_table,"a" 420 .align 4 421 + .long irq_return,iret_exc 422 .previous 423 424 CFI_RESTORE_STATE ··· 865 RESTORE_REGS 866 lss 12+4(%esp), %esp # back to espfix stack 867 CFI_ADJUST_CFA_OFFSET -24 868 + jmp irq_return 869 CFI_ENDPROC 870 KPROBE_END(nmi) 871 872 #ifdef CONFIG_PARAVIRT 873 ENTRY(native_iret) 874 + iret 875 .section __ex_table,"a" 876 .align 4 877 + .long native_iret, iret_exc 878 .previous 879 END(native_iret) 880
+13 -5
arch/x86/kernel/entry_64.S
··· 581 */ 582 TRACE_IRQS_IRETQ 583 restore_args: 584 - RESTORE_ARGS 0,8,0 585 - #ifdef CONFIG_PARAVIRT 586 INTERRUPT_RETURN 587 - #endif 588 ENTRY(native_iret) 589 iretq 590 591 .section __ex_table,"a" 592 .quad native_iret, bad_iret 593 .previous 594 .section .fixup,"ax" 595 bad_iret: 596 /* ··· 812 SWAPGS_UNSAFE_STACK 813 paranoid_restore\trace: 814 RESTORE_ALL 8 815 - INTERRUPT_RETURN 816 paranoid_userspace\trace: 817 GET_THREAD_INFO(%rcx) 818 movl threadinfo_flags(%rcx),%ebx ··· 927 iret run with kernel gs again, so don't set the user space flag. 928 B stepping K8s sometimes report an truncated RIP for IRET 929 exceptions returning to compat mode. Check for these here too. */ 930 - leaq native_iret(%rip),%rbp 931 cmpq %rbp,RIP(%rsp) 932 je error_swapgs 933 movl %ebp,%ebp /* zero extend */
··· 581 */ 582 TRACE_IRQS_IRETQ 583 restore_args: 584 + RESTORE_ARGS 0,8,0 585 + 586 + ENTRY(irq_return) 587 INTERRUPT_RETURN 588 + 589 + .section __ex_table, "a" 590 + .quad irq_return, bad_iret 591 + .previous 592 + 593 + #ifdef CONFIG_PARAVIRT 594 ENTRY(native_iret) 595 iretq 596 597 .section __ex_table,"a" 598 .quad native_iret, bad_iret 599 .previous 600 + #endif 601 + 602 .section .fixup,"ax" 603 bad_iret: 604 /* ··· 804 SWAPGS_UNSAFE_STACK 805 paranoid_restore\trace: 806 RESTORE_ALL 8 807 + jmp irq_return 808 paranoid_userspace\trace: 809 GET_THREAD_INFO(%rcx) 810 movl threadinfo_flags(%rcx),%ebx ··· 919 iret run with kernel gs again, so don't set the user space flag. 920 B stepping K8s sometimes report an truncated RIP for IRET 921 exceptions returning to compat mode. Check for these here too. */ 922 + leaq irq_return(%rip),%rbp 923 cmpq %rbp,RIP(%rsp) 924 je error_swapgs 925 movl %ebp,%ebp /* zero extend */
+1 -4
arch/x86/kernel/geode_32.c
··· 163 164 static int __init geode_southbridge_init(void) 165 { 166 - int timers; 167 - 168 if (!is_geode()) 169 return -ENODEV; 170 171 init_lbars(); 172 - timers = geode_mfgpt_detect(); 173 - printk(KERN_INFO "geode: %d MFGPT timers available.\n", timers); 174 return 0; 175 } 176
··· 163 164 static int __init geode_southbridge_init(void) 165 { 166 if (!is_geode()) 167 return -ENODEV; 168 169 init_lbars(); 170 + (void) mfgpt_timer_setup(); 171 return 0; 172 } 173
+116 -35
arch/x86/kernel/head_32.S
··· 19 #include <asm/thread_info.h> 20 #include <asm/asm-offsets.h> 21 #include <asm/setup.h> 22 23 /* 24 * References to members of the new_cpu_data structure. ··· 84 */ 85 .section .text.head,"ax",@progbits 86 ENTRY(startup_32) 87 - /* check to see if KEEP_SEGMENTS flag is meaningful */ 88 - cmpw $0x207, BP_version(%esi) 89 - jb 1f 90 - 91 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 92 us to not reload segments */ 93 testb $(1<<6), BP_loadflags(%esi) ··· 92 /* 93 * Set segments to known values. 94 */ 95 - 1: lgdt boot_gdt_descr - __PAGE_OFFSET 96 movl $(__BOOT_DS),%eax 97 movl %eax,%ds 98 movl %eax,%es ··· 105 */ 106 cld 107 xorl %eax,%eax 108 - movl $__bss_start - __PAGE_OFFSET,%edi 109 - movl $__bss_stop - __PAGE_OFFSET,%ecx 110 subl %edi,%ecx 111 shrl $2,%ecx 112 rep ; stosl ··· 118 * (kexec on panic case). Hence copy out the parameters before initializing 119 * page tables. 120 */ 121 - movl $(boot_params - __PAGE_OFFSET),%edi 122 movl $(PARAM_SIZE/4),%ecx 123 cld 124 rep 125 movsl 126 - movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi 127 andl %esi,%esi 128 jz 1f # No comand line 129 - movl $(boot_command_line - __PAGE_OFFSET),%edi 130 movl $(COMMAND_LINE_SIZE/4),%ecx 131 rep 132 movsl 133 1: 134 135 #ifdef CONFIG_PARAVIRT 136 - cmpw $0x207, (boot_params + BP_version - __PAGE_OFFSET) 137 jb default_entry 138 139 /* Paravirt-compatible boot parameters. Look to see what architecture 140 we're booting under. */ 141 - movl (boot_params + BP_hardware_subarch - __PAGE_OFFSET), %eax 142 cmpl $num_subarch_entries, %eax 143 jae bad_subarch 144 145 - movl subarch_entries - __PAGE_OFFSET(,%eax,4), %eax 146 subl $__PAGE_OFFSET, %eax 147 jmp *%eax 148 ··· 171 * Mappings are created both at virtual address 0 (identity mapping) 172 * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END. 173 * 174 - * Warning: don't use %esi or the stack in this code. However, %esp 175 - * can be used as a GPR if you really need it... 176 */ 177 - page_pde_offset = (__PAGE_OFFSET >> 20); 178 179 default_entry: 180 - movl $(pg0 - __PAGE_OFFSET), %edi 181 - movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 182 - movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */ 183 10: 184 - leal 0x007(%edi),%ecx /* Create PDE entry */ 185 movl %ecx,(%edx) /* Store identity PDE entry */ 186 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 187 addl $4,%edx ··· 241 stosl 242 addl $0x1000,%eax 243 loop 11b 244 - /* End condition: we must map up to and including INIT_MAP_BEYOND_END */ 245 - /* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */ 246 - leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp 247 cmpl %ebp,%eax 248 jb 10b 249 - movl %edi,(init_pg_tables_end - __PAGE_OFFSET) 250 251 - /* Do an early initialization of the fixmap area */ 252 - movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 253 - movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax 254 - addl $0x67, %eax /* 0x67 == _PAGE_TABLE */ 255 - movl %eax, 4092(%edx) 256 - 257 jmp 3f 258 /* 259 * Non-boot CPU entry point; entered from trampoline.S ··· 294 * NOTE! We have to correct for the fact that we're 295 * not yet offset PAGE_OFFSET.. 296 */ 297 - #define cr4_bits mmu_cr4_features-__PAGE_OFFSET 298 movl cr4_bits,%edx 299 andl %edx,%edx 300 jz 6f ··· 329 /* 330 * Enable paging 331 */ 332 - movl $swapper_pg_dir-__PAGE_OFFSET,%eax 333 movl %eax,%cr3 /* set the page table pointer.. */ 334 movl %cr0,%eax 335 - orl $0x80000000,%eax 336 movl %eax,%cr0 /* ..and set paging (PG) bit */ 337 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 338 1: ··· 605 */ 606 .section ".bss.page_aligned","wa" 607 .align PAGE_SIZE_asm 608 ENTRY(swapper_pg_dir) 609 .fill 1024,4,0 610 - ENTRY(swapper_pg_pmd) 611 .fill 1024,4,0 612 ENTRY(empty_zero_page) 613 .fill 4096,1,0 614 - 615 /* 616 * This starts the data section. 617 */ 618 .data 619 ENTRY(stack_start) 620 .long init_thread_union+THREAD_SIZE
··· 19 #include <asm/thread_info.h> 20 #include <asm/asm-offsets.h> 21 #include <asm/setup.h> 22 + #include <asm/processor-flags.h> 23 + 24 + /* Physical address */ 25 + #define pa(X) ((X) - __PAGE_OFFSET) 26 27 /* 28 * References to members of the new_cpu_data structure. ··· 80 */ 81 .section .text.head,"ax",@progbits 82 ENTRY(startup_32) 83 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 84 us to not reload segments */ 85 testb $(1<<6), BP_loadflags(%esi) ··· 92 /* 93 * Set segments to known values. 94 */ 95 + lgdt pa(boot_gdt_descr) 96 movl $(__BOOT_DS),%eax 97 movl %eax,%ds 98 movl %eax,%es ··· 105 */ 106 cld 107 xorl %eax,%eax 108 + movl $pa(__bss_start),%edi 109 + movl $pa(__bss_stop),%ecx 110 subl %edi,%ecx 111 shrl $2,%ecx 112 rep ; stosl ··· 118 * (kexec on panic case). Hence copy out the parameters before initializing 119 * page tables. 120 */ 121 + movl $pa(boot_params),%edi 122 movl $(PARAM_SIZE/4),%ecx 123 cld 124 rep 125 movsl 126 + movl pa(boot_params) + NEW_CL_POINTER,%esi 127 andl %esi,%esi 128 jz 1f # No comand line 129 + movl $pa(boot_command_line),%edi 130 movl $(COMMAND_LINE_SIZE/4),%ecx 131 rep 132 movsl 133 1: 134 135 #ifdef CONFIG_PARAVIRT 136 + /* This is can only trip for a broken bootloader... */ 137 + cmpw $0x207, pa(boot_params + BP_version) 138 jb default_entry 139 140 /* Paravirt-compatible boot parameters. Look to see what architecture 141 we're booting under. */ 142 + movl pa(boot_params + BP_hardware_subarch), %eax 143 cmpl $num_subarch_entries, %eax 144 jae bad_subarch 145 146 + movl pa(subarch_entries)(,%eax,4), %eax 147 subl $__PAGE_OFFSET, %eax 148 jmp *%eax 149 ··· 170 * Mappings are created both at virtual address 0 (identity mapping) 171 * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END. 172 * 173 + * Note that the stack is not yet set up! 174 */ 175 + #define PTE_ATTR 0x007 /* PRESENT+RW+USER */ 176 + #define PDE_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ 177 + #define PGD_ATTR 0x001 /* PRESENT (no other attributes) */ 178 179 default_entry: 180 + #ifdef CONFIG_X86_PAE 181 + 182 + /* 183 + * In PAE mode swapper_pg_dir is statically defined to contain enough 184 + * entries to cover the VMSPLIT option (that is the top 1, 2 or 3 185 + * entries). The identity mapping is handled by pointing two PGD 186 + * entries to the first kernel PMD. 187 + * 188 + * Note the upper half of each PMD or PTE are always zero at 189 + * this stage. 190 + */ 191 + 192 + #define KPMDS ((0x100000000-__PAGE_OFFSET) >> 30) /* Number of kernel PMDs */ 193 + 194 + xorl %ebx,%ebx /* %ebx is kept at zero */ 195 + 196 + movl $pa(pg0), %edi 197 + movl $pa(swapper_pg_pmd), %edx 198 + movl $PTE_ATTR, %eax 199 10: 200 + leal PDE_ATTR(%edi),%ecx /* Create PMD entry */ 201 + movl %ecx,(%edx) /* Store PMD entry */ 202 + /* Upper half already zero */ 203 + addl $8,%edx 204 + movl $512,%ecx 205 + 11: 206 + stosl 207 + xchgl %eax,%ebx 208 + stosl 209 + xchgl %eax,%ebx 210 + addl $0x1000,%eax 211 + loop 11b 212 + 213 + /* 214 + * End condition: we must map up to and including INIT_MAP_BEYOND_END 215 + * bytes beyond the end of our own page tables. 216 + */ 217 + leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp 218 + cmpl %ebp,%eax 219 + jb 10b 220 + 1: 221 + movl %edi,pa(init_pg_tables_end) 222 + 223 + /* Do early initialization of the fixmap area */ 224 + movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax 225 + movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) 226 + #else /* Not PAE */ 227 + 228 + page_pde_offset = (__PAGE_OFFSET >> 20); 229 + 230 + movl $pa(pg0), %edi 231 + movl $pa(swapper_pg_dir), %edx 232 + movl $PTE_ATTR, %eax 233 + 10: 234 + leal PDE_ATTR(%edi),%ecx /* Create PDE entry */ 235 movl %ecx,(%edx) /* Store identity PDE entry */ 236 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 237 addl $4,%edx ··· 189 stosl 190 addl $0x1000,%eax 191 loop 11b 192 + /* 193 + * End condition: we must map up to and including INIT_MAP_BEYOND_END 194 + * bytes beyond the end of our own page tables; the +0x007 is 195 + * the attribute bits 196 + */ 197 + leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp 198 cmpl %ebp,%eax 199 jb 10b 200 + movl %edi,pa(init_pg_tables_end) 201 202 + /* Do early initialization of the fixmap area */ 203 + movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax 204 + movl %eax,pa(swapper_pg_dir+0xffc) 205 + #endif 206 jmp 3f 207 /* 208 * Non-boot CPU entry point; entered from trampoline.S ··· 241 * NOTE! We have to correct for the fact that we're 242 * not yet offset PAGE_OFFSET.. 243 */ 244 + #define cr4_bits pa(mmu_cr4_features) 245 movl cr4_bits,%edx 246 andl %edx,%edx 247 jz 6f ··· 276 /* 277 * Enable paging 278 */ 279 + movl $pa(swapper_pg_dir),%eax 280 movl %eax,%cr3 /* set the page table pointer.. */ 281 movl %cr0,%eax 282 + orl $X86_CR0_PG,%eax 283 movl %eax,%cr0 /* ..and set paging (PG) bit */ 284 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 285 1: ··· 552 */ 553 .section ".bss.page_aligned","wa" 554 .align PAGE_SIZE_asm 555 + #ifdef CONFIG_X86_PAE 556 + ENTRY(swapper_pg_pmd) 557 + .fill 1024*KPMDS,4,0 558 + #else 559 ENTRY(swapper_pg_dir) 560 .fill 1024,4,0 561 + #endif 562 + ENTRY(swapper_pg_fixmap) 563 .fill 1024,4,0 564 ENTRY(empty_zero_page) 565 .fill 4096,1,0 566 /* 567 * This starts the data section. 568 */ 569 + #ifdef CONFIG_X86_PAE 570 + .section ".data.page_aligned","wa" 571 + /* Page-aligned for the benefit of paravirt? */ 572 + .align PAGE_SIZE_asm 573 + ENTRY(swapper_pg_dir) 574 + .long pa(swapper_pg_pmd+PGD_ATTR),0 /* low identity map */ 575 + # if KPMDS == 3 576 + .long pa(swapper_pg_pmd+PGD_ATTR),0 577 + .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0 578 + .long pa(swapper_pg_pmd+PGD_ATTR+0x2000),0 579 + # elif KPMDS == 2 580 + .long 0,0 581 + .long pa(swapper_pg_pmd+PGD_ATTR),0 582 + .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0 583 + # elif KPMDS == 1 584 + .long 0,0 585 + .long 0,0 586 + .long pa(swapper_pg_pmd+PGD_ATTR),0 587 + # else 588 + # error "Kernel PMDs should be 1, 2 or 3" 589 + # endif 590 + .align PAGE_SIZE_asm /* needs to be page-sized too */ 591 + #endif 592 + 593 .data 594 ENTRY(stack_start) 595 .long init_thread_union+THREAD_SIZE
+65 -58
arch/x86/kernel/mfgpt_32.c
··· 12 */ 13 14 /* 15 - * We are using the 32Khz input clock - its the only one that has the 16 * ranges we find desirable. The following table lists the suitable 17 - * divisors and the associated hz, minimum interval 18 - * and the maximum interval: 19 * 20 - * Divisor Hz Min Delta (S) Max Delta (S) 21 - * 1 32000 .0005 2.048 22 - * 2 16000 .001 4.096 23 - * 4 8000 .002 8.192 24 - * 8 4000 .004 16.384 25 - * 16 2000 .008 32.768 26 - * 32 1000 .016 65.536 27 - * 64 500 .032 131.072 28 - * 128 250 .064 262.144 29 - * 256 125 .128 524.288 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/interrupt.h> 34 - #include <linux/module.h> 35 #include <asm/geode.h> 36 37 - #define F_AVAIL 0x01 38 - 39 static struct mfgpt_timer_t { 40 - int flags; 41 - struct module *owner; 42 } mfgpt_timers[MFGPT_MAX_TIMERS]; 43 44 /* Selected from the table above */ 45 46 #define MFGPT_DIVISOR 16 47 #define MFGPT_SCALE 4 /* divisor = 2^(scale) */ 48 - #define MFGPT_HZ (32000 / MFGPT_DIVISOR) 49 #define MFGPT_PERIODIC (MFGPT_HZ / HZ) 50 - 51 - #ifdef CONFIG_GEODE_MFGPT_TIMER 52 - static int __init mfgpt_timer_setup(void); 53 - #else 54 - #define mfgpt_timer_setup() (0) 55 - #endif 56 57 /* Allow for disabling of MFGPTs */ 58 static int disable; ··· 74 * In other cases (such as with VSAless OpenFirmware), the system firmware 75 * leaves timers available for us to use. 76 */ 77 - int __init geode_mfgpt_detect(void) 78 { 79 - int count = 0, i; 80 u16 val; 81 82 if (disable) { 83 - printk(KERN_INFO "geode-mfgpt: Skipping MFGPT setup\n"); 84 - return 0; 85 } 86 87 for (i = 0; i < MFGPT_MAX_TIMERS; i++) { 88 val = geode_mfgpt_read(i, MFGPT_REG_SETUP); 89 if (!(val & MFGPT_SETUP_SETUP)) { 90 - mfgpt_timers[i].flags = F_AVAIL; 91 - count++; 92 } 93 } 94 95 - /* set up clock event device, if desired */ 96 - i = mfgpt_timer_setup(); 97 - 98 - return count; 99 } 100 101 int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable) ··· 181 return 0; 182 } 183 184 - static int mfgpt_get(int timer, struct module *owner) 185 { 186 - mfgpt_timers[timer].flags &= ~F_AVAIL; 187 - mfgpt_timers[timer].owner = owner; 188 printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer); 189 return timer; 190 } 191 192 - int geode_mfgpt_alloc_timer(int timer, int domain, struct module *owner) 193 { 194 int i; 195 196 - if (!geode_get_dev_base(GEODE_DEV_MFGPT)) 197 - return -ENODEV; 198 if (timer >= MFGPT_MAX_TIMERS) 199 - return -EIO; 200 201 if (timer < 0) { 202 /* Try to find an available timer */ 203 for (i = 0; i < MFGPT_MAX_TIMERS; i++) { 204 - if (mfgpt_timers[i].flags & F_AVAIL) 205 - return mfgpt_get(i, owner); 206 207 if (i == 5 && domain == MFGPT_DOMAIN_WORKING) 208 break; 209 } 210 } else { 211 /* If they requested a specific timer, try to honor that */ 212 - if (mfgpt_timers[timer].flags & F_AVAIL) 213 - return mfgpt_get(timer, owner); 214 } 215 216 /* No timers available - too bad */ ··· 247 } 248 __setup("mfgpt_irq=", mfgpt_setup); 249 250 - static inline void mfgpt_disable_timer(u16 clock) 251 { 252 - u16 val = geode_mfgpt_read(clock, MFGPT_REG_SETUP); 253 - geode_mfgpt_write(clock, MFGPT_REG_SETUP, val & ~MFGPT_SETUP_CNTEN); 254 } 255 256 static int mfgpt_next_event(unsigned long, struct clock_event_device *); ··· 267 .shift = 32 268 }; 269 270 - static inline void mfgpt_start_timer(u16 clock, u16 delta) 271 { 272 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta); 273 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0); ··· 282 mfgpt_disable_timer(mfgpt_event_clock); 283 284 if (mode == CLOCK_EVT_MODE_PERIODIC) 285 - mfgpt_start_timer(mfgpt_event_clock, MFGPT_PERIODIC); 286 287 mfgpt_tick_mode = mode; 288 } 289 290 static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) 291 { 292 - mfgpt_start_timer(mfgpt_event_clock, delta); 293 return 0; 294 } 295 296 - /* Assume (foolishly?), that this interrupt was due to our tick */ 297 - 298 static irqreturn_t mfgpt_tick(int irq, void *dev_id) 299 { 300 /* Turn off the clock (and clear the event) */ 301 mfgpt_disable_timer(mfgpt_event_clock); 302 ··· 328 .name = "mfgpt-timer" 329 }; 330 331 - static int __init mfgpt_timer_setup(void) 332 { 333 int timer, ret; 334 u16 val; 335 336 - timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING, 337 - THIS_MODULE); 338 if (timer < 0) { 339 printk(KERN_ERR 340 "mfgpt-timer: Could not allocate a MFPGT timer\n"); ··· 370 &mfgpt_clockevent); 371 372 printk(KERN_INFO 373 - "mfgpt-timer: registering the MFGT timer as a clock event.\n"); 374 clockevents_register_device(&mfgpt_clockevent); 375 376 return 0;
··· 12 */ 13 14 /* 15 + * We are using the 32.768kHz input clock - it's the only one that has the 16 * ranges we find desirable. The following table lists the suitable 17 + * divisors and the associated Hz, minimum interval and the maximum interval: 18 * 19 + * Divisor Hz Min Delta (s) Max Delta (s) 20 + * 1 32768 .00048828125 2.000 21 + * 2 16384 .0009765625 4.000 22 + * 4 8192 .001953125 8.000 23 + * 8 4096 .00390625 16.000 24 + * 16 2048 .0078125 32.000 25 + * 32 1024 .015625 64.000 26 + * 64 512 .03125 128.000 27 + * 128 256 .0625 256.000 28 + * 256 128 .125 512.000 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/interrupt.h> 33 #include <asm/geode.h> 34 35 static struct mfgpt_timer_t { 36 + unsigned int avail:1; 37 } mfgpt_timers[MFGPT_MAX_TIMERS]; 38 39 /* Selected from the table above */ 40 41 #define MFGPT_DIVISOR 16 42 #define MFGPT_SCALE 4 /* divisor = 2^(scale) */ 43 + #define MFGPT_HZ (32768 / MFGPT_DIVISOR) 44 #define MFGPT_PERIODIC (MFGPT_HZ / HZ) 45 46 /* Allow for disabling of MFGPTs */ 47 static int disable; ··· 85 * In other cases (such as with VSAless OpenFirmware), the system firmware 86 * leaves timers available for us to use. 87 */ 88 + 89 + 90 + static int timers = -1; 91 + 92 + static void geode_mfgpt_detect(void) 93 { 94 + int i; 95 u16 val; 96 97 + timers = 0; 98 + 99 if (disable) { 100 + printk(KERN_INFO "geode-mfgpt: MFGPT support is disabled\n"); 101 + goto done; 102 + } 103 + 104 + if (!geode_get_dev_base(GEODE_DEV_MFGPT)) { 105 + printk(KERN_INFO "geode-mfgpt: MFGPT LBAR is not set up\n"); 106 + goto done; 107 } 108 109 for (i = 0; i < MFGPT_MAX_TIMERS; i++) { 110 val = geode_mfgpt_read(i, MFGPT_REG_SETUP); 111 if (!(val & MFGPT_SETUP_SETUP)) { 112 + mfgpt_timers[i].avail = 1; 113 + timers++; 114 } 115 } 116 117 + done: 118 + printk(KERN_INFO "geode-mfgpt: %d MFGPT timers available.\n", timers); 119 } 120 121 int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable) ··· 183 return 0; 184 } 185 186 + static int mfgpt_get(int timer) 187 { 188 + mfgpt_timers[timer].avail = 0; 189 printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer); 190 return timer; 191 } 192 193 + int geode_mfgpt_alloc_timer(int timer, int domain) 194 { 195 int i; 196 197 + if (timers == -1) { 198 + /* timers haven't been detected yet */ 199 + geode_mfgpt_detect(); 200 + } 201 + 202 + if (!timers) 203 + return -1; 204 + 205 if (timer >= MFGPT_MAX_TIMERS) 206 + return -1; 207 208 if (timer < 0) { 209 /* Try to find an available timer */ 210 for (i = 0; i < MFGPT_MAX_TIMERS; i++) { 211 + if (mfgpt_timers[i].avail) 212 + return mfgpt_get(i); 213 214 if (i == 5 && domain == MFGPT_DOMAIN_WORKING) 215 break; 216 } 217 } else { 218 /* If they requested a specific timer, try to honor that */ 219 + if (mfgpt_timers[timer].avail) 220 + return mfgpt_get(timer); 221 } 222 223 /* No timers available - too bad */ ··· 244 } 245 __setup("mfgpt_irq=", mfgpt_setup); 246 247 + static void mfgpt_disable_timer(u16 clock) 248 { 249 + /* avoid races by clearing CMP1 and CMP2 unconditionally */ 250 + geode_mfgpt_write(clock, MFGPT_REG_SETUP, (u16) ~MFGPT_SETUP_CNTEN | 251 + MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2); 252 } 253 254 static int mfgpt_next_event(unsigned long, struct clock_event_device *); ··· 263 .shift = 32 264 }; 265 266 + static void mfgpt_start_timer(u16 delta) 267 { 268 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta); 269 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0); ··· 278 mfgpt_disable_timer(mfgpt_event_clock); 279 280 if (mode == CLOCK_EVT_MODE_PERIODIC) 281 + mfgpt_start_timer(MFGPT_PERIODIC); 282 283 mfgpt_tick_mode = mode; 284 } 285 286 static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) 287 { 288 + mfgpt_start_timer(delta); 289 return 0; 290 } 291 292 static irqreturn_t mfgpt_tick(int irq, void *dev_id) 293 { 294 + u16 val = geode_mfgpt_read(mfgpt_event_clock, MFGPT_REG_SETUP); 295 + 296 + /* See if the interrupt was for us */ 297 + if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1))) 298 + return IRQ_NONE; 299 + 300 /* Turn off the clock (and clear the event) */ 301 mfgpt_disable_timer(mfgpt_event_clock); 302 ··· 320 .name = "mfgpt-timer" 321 }; 322 323 + int __init mfgpt_timer_setup(void) 324 { 325 int timer, ret; 326 u16 val; 327 328 + timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); 329 if (timer < 0) { 330 printk(KERN_ERR 331 "mfgpt-timer: Could not allocate a MFPGT timer\n"); ··· 363 &mfgpt_clockevent); 364 365 printk(KERN_INFO 366 + "mfgpt-timer: registering the MFGPT timer as a clock event.\n"); 367 clockevents_register_device(&mfgpt_clockevent); 368 369 return 0;
+4
arch/x86/kernel/setup_32.c
··· 154 struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 155 EXPORT_SYMBOL(boot_cpu_data); 156 157 unsigned long mmu_cr4_features; 158 159 /* for MCA, but anyone else can use it if they want */ 160 unsigned int machine_id;
··· 154 struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 155 EXPORT_SYMBOL(boot_cpu_data); 156 157 + #ifndef CONFIG_X86_PAE 158 unsigned long mmu_cr4_features; 159 + #else 160 + unsigned long mmu_cr4_features = X86_CR4_PAE; 161 + #endif 162 163 /* for MCA, but anyone else can use it if they want */ 164 unsigned int machine_id;
+3 -157
arch/x86/kernel/suspend_64.c arch/x86/power/cpu_64.c
··· 1 /* 2 - * Suspend support specific for i386. 3 * 4 * Distribute under GPLv2 5 * 6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> 7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 8 */ ··· 14 #include <asm/page.h> 15 #include <asm/pgtable.h> 16 #include <asm/mtrr.h> 17 - 18 - /* References to section boundaries */ 19 - extern const void __nosave_begin, __nosave_end; 20 21 static void fix_processor_context(void); 22 ··· 61 mtrr_save_fixed_ranges(NULL); 62 63 /* 64 - * control registers 65 */ 66 rdmsrl(MSR_EFER, ctxt->efer); 67 ctxt->cr0 = read_cr0(); ··· 164 loaddebug(&current->thread, 7); 165 } 166 } 167 - 168 - #ifdef CONFIG_HIBERNATION 169 - /* Defined in arch/x86_64/kernel/suspend_asm.S */ 170 - extern int restore_image(void); 171 - 172 - /* 173 - * Address to jump to in the last phase of restore in order to get to the image 174 - * kernel's text (this value is passed in the image header). 175 - */ 176 - unsigned long restore_jump_address; 177 - 178 - /* 179 - * Value of the cr3 register from before the hibernation (this value is passed 180 - * in the image header). 181 - */ 182 - unsigned long restore_cr3; 183 - 184 - pgd_t *temp_level4_pgt; 185 - 186 - void *relocated_restore_code; 187 - 188 - static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) 189 - { 190 - long i, j; 191 - 192 - i = pud_index(address); 193 - pud = pud + i; 194 - for (; i < PTRS_PER_PUD; pud++, i++) { 195 - unsigned long paddr; 196 - pmd_t *pmd; 197 - 198 - paddr = address + i*PUD_SIZE; 199 - if (paddr >= end) 200 - break; 201 - 202 - pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); 203 - if (!pmd) 204 - return -ENOMEM; 205 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 206 - for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { 207 - unsigned long pe; 208 - 209 - if (paddr >= end) 210 - break; 211 - pe = __PAGE_KERNEL_LARGE_EXEC | paddr; 212 - pe &= __supported_pte_mask; 213 - set_pmd(pmd, __pmd(pe)); 214 - } 215 - } 216 - return 0; 217 - } 218 - 219 - static int set_up_temporary_mappings(void) 220 - { 221 - unsigned long start, end, next; 222 - int error; 223 - 224 - temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); 225 - if (!temp_level4_pgt) 226 - return -ENOMEM; 227 - 228 - /* It is safe to reuse the original kernel mapping */ 229 - set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 230 - init_level4_pgt[pgd_index(__START_KERNEL_map)]); 231 - 232 - /* Set up the direct mapping from scratch */ 233 - start = (unsigned long)pfn_to_kaddr(0); 234 - end = (unsigned long)pfn_to_kaddr(end_pfn); 235 - 236 - for (; start < end; start = next) { 237 - pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); 238 - if (!pud) 239 - return -ENOMEM; 240 - next = start + PGDIR_SIZE; 241 - if (next > end) 242 - next = end; 243 - if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) 244 - return error; 245 - set_pgd(temp_level4_pgt + pgd_index(start), 246 - mk_kernel_pgd(__pa(pud))); 247 - } 248 - return 0; 249 - } 250 - 251 - int swsusp_arch_resume(void) 252 - { 253 - int error; 254 - 255 - /* We have got enough memory and from now on we cannot recover */ 256 - if ((error = set_up_temporary_mappings())) 257 - return error; 258 - 259 - relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); 260 - if (!relocated_restore_code) 261 - return -ENOMEM; 262 - memcpy(relocated_restore_code, &core_restore_code, 263 - &restore_registers - &core_restore_code); 264 - 265 - restore_image(); 266 - return 0; 267 - } 268 - 269 - /* 270 - * pfn_is_nosave - check if given pfn is in the 'nosave' section 271 - */ 272 - 273 - int pfn_is_nosave(unsigned long pfn) 274 - { 275 - unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; 276 - unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; 277 - return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); 278 - } 279 - 280 - struct restore_data_record { 281 - unsigned long jump_address; 282 - unsigned long cr3; 283 - unsigned long magic; 284 - }; 285 - 286 - #define RESTORE_MAGIC 0x0123456789ABCDEFUL 287 - 288 - /** 289 - * arch_hibernation_header_save - populate the architecture specific part 290 - * of a hibernation image header 291 - * @addr: address to save the data at 292 - */ 293 - int arch_hibernation_header_save(void *addr, unsigned int max_size) 294 - { 295 - struct restore_data_record *rdr = addr; 296 - 297 - if (max_size < sizeof(struct restore_data_record)) 298 - return -EOVERFLOW; 299 - rdr->jump_address = restore_jump_address; 300 - rdr->cr3 = restore_cr3; 301 - rdr->magic = RESTORE_MAGIC; 302 - return 0; 303 - } 304 - 305 - /** 306 - * arch_hibernation_header_restore - read the architecture specific data 307 - * from the hibernation image header 308 - * @addr: address to read the data from 309 - */ 310 - int arch_hibernation_header_restore(void *addr) 311 - { 312 - struct restore_data_record *rdr = addr; 313 - 314 - restore_jump_address = rdr->jump_address; 315 - restore_cr3 = rdr->cr3; 316 - return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; 317 - } 318 - #endif /* CONFIG_HIBERNATION */
··· 1 /* 2 + * Suspend and hibernation support for x86-64 3 * 4 * Distribute under GPLv2 5 * 6 + * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> 7 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> 8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 9 */ ··· 13 #include <asm/page.h> 14 #include <asm/pgtable.h> 15 #include <asm/mtrr.h> 16 17 static void fix_processor_context(void); 18 ··· 63 mtrr_save_fixed_ranges(NULL); 64 65 /* 66 + * control registers 67 */ 68 rdmsrl(MSR_EFER, ctxt->efer); 69 ctxt->cr0 = read_cr0(); ··· 166 loaddebug(&current->thread, 7); 167 } 168 }
+7 -2
arch/x86/kernel/suspend_asm_64.S arch/x86/power/hibernate_asm_64.S
··· 1 - /* Copyright 2004,2005 Pavel Machek <pavel@suse.cz>, Andi Kleen <ak@suse.de>, Rafael J. Wysocki <rjw@sisk.pl> 2 * 3 * Distribute under GPLv2. 4 * 5 * swsusp_arch_resume must not use any stack or any nonlocal variables while 6 * copying pages: ··· 14 * image could very well be data page in "new" image, and overwriting 15 * your own stack under you is bad idea. 16 */ 17 - 18 .text 19 #include <linux/linkage.h> 20 #include <asm/segment.h>
··· 1 + /* 2 + * Hibernation support for x86-64 3 * 4 * Distribute under GPLv2. 5 + * 6 + * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl> 7 + * Copyright 2005 Andi Kleen <ak@suse.de> 8 + * Copyright 2004 Pavel Machek <pavel@suse.cz> 9 * 10 * swsusp_arch_resume must not use any stack or any nonlocal variables while 11 * copying pages: ··· 9 * image could very well be data page in "new" image, and overwriting 10 * your own stack under you is bad idea. 11 */ 12 + 13 .text 14 #include <linux/linkage.h> 15 #include <asm/segment.h>
+1 -1
arch/x86/kernel/topology.c
··· 53 54 void arch_unregister_cpu(int num) 55 { 56 - return unregister_cpu(&per_cpu(cpu_devices, num).cpu); 57 } 58 EXPORT_SYMBOL(arch_unregister_cpu); 59 #else
··· 53 54 void arch_unregister_cpu(int num) 55 { 56 + unregister_cpu(&per_cpu(cpu_devices, num).cpu); 57 } 58 EXPORT_SYMBOL(arch_unregister_cpu); 59 #else
+28 -44
arch/x86/mm/init_32.c
··· 46 #include <asm/pgalloc.h> 47 #include <asm/sections.h> 48 #include <asm/paravirt.h> 49 50 unsigned int __VMALLOC_RESERVE = 128 << 20; 51 ··· 329 330 void __init native_pagetable_setup_start(pgd_t *base) 331 { 332 - #ifdef CONFIG_X86_PAE 333 - int i; 334 335 /* 336 - * Init entries of the first-level page table to the 337 - * zero page, if they haven't already been set up. 338 - * 339 - * In a normal native boot, we'll be running on a 340 - * pagetable rooted in swapper_pg_dir, but not in PAE 341 - * mode, so this will end up clobbering the mappings 342 - * for the lower 24Mbytes of the address space, 343 - * without affecting the kernel address space. 344 */ 345 - for (i = 0; i < USER_PTRS_PER_PGD; i++) 346 - set_pgd(&base[i], 347 - __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); 348 349 - /* Make sure kernel address space is empty so that a pagetable 350 - will be allocated for it. */ 351 - memset(&base[USER_PTRS_PER_PGD], 0, 352 - KERNEL_PGD_PTRS * sizeof(pgd_t)); 353 - #else 354 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); 355 - #endif 356 } 357 358 void __init native_pagetable_setup_done(pgd_t *base) 359 { 360 - #ifdef CONFIG_X86_PAE 361 - /* 362 - * Add low memory identity-mappings - SMP needs it when 363 - * starting up on an AP from real-mode. In the non-PAE 364 - * case we already have these mappings through head.S. 365 - * All user-space mappings are explicitly cleared after 366 - * SMP startup. 367 - */ 368 - set_pgd(&base[0], base[USER_PTRS_PER_PGD]); 369 - #endif 370 } 371 372 /* ··· 369 * the boot process. 370 * 371 * If we're booting on native hardware, this will be a pagetable 372 - * constructed in arch/i386/kernel/head.S, and not running in PAE mode 373 - * (even if we'll end up running in PAE). The root of the pagetable 374 - * will be swapper_pg_dir. 375 * 376 * If we're booting paravirtualized under a hypervisor, then there are 377 * more options: we may already be running PAE, and the pagetable may ··· 531 532 load_cr3(swapper_pg_dir); 533 534 - #ifdef CONFIG_X86_PAE 535 - /* 536 - * We will bail out later - printk doesn't work right now so 537 - * the user would just see a hanging kernel. 538 - */ 539 - if (cpu_has_pae) 540 - set_in_cr4(X86_CR4_PAE); 541 - #endif 542 __flush_tlb_all(); 543 544 kmap_init(); ··· 661 BUG_ON((unsigned long)high_memory > VMALLOC_START); 662 #endif /* double-sanity-check paranoia */ 663 664 - #ifdef CONFIG_X86_PAE 665 - if (!cpu_has_pae) 666 - panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); 667 - #endif 668 if (boot_cpu_data.wp_works_ok < 0) 669 test_wp_bit(); 670 671 /* 672 * Subtle. SMP is doing it's boot stuff late (because it has to
··· 46 #include <asm/pgalloc.h> 47 #include <asm/sections.h> 48 #include <asm/paravirt.h> 49 + #include <asm/setup.h> 50 51 unsigned int __VMALLOC_RESERVE = 128 << 20; 52 ··· 328 329 void __init native_pagetable_setup_start(pgd_t *base) 330 { 331 + unsigned long pfn, va; 332 + pgd_t *pgd; 333 + pud_t *pud; 334 + pmd_t *pmd; 335 + pte_t *pte; 336 337 /* 338 + * Remove any mappings which extend past the end of physical 339 + * memory from the boot time page table: 340 */ 341 + for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { 342 + va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); 343 + pgd = base + pgd_index(va); 344 + if (!pgd_present(*pgd)) 345 + break; 346 347 + pud = pud_offset(pgd, va); 348 + pmd = pmd_offset(pud, va); 349 + if (!pmd_present(*pmd)) 350 + break; 351 + 352 + pte = pte_offset_kernel(pmd, va); 353 + if (!pte_present(*pte)) 354 + break; 355 + 356 + pte_clear(NULL, va, pte); 357 + } 358 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); 359 } 360 361 void __init native_pagetable_setup_done(pgd_t *base) 362 { 363 } 364 365 /* ··· 374 * the boot process. 375 * 376 * If we're booting on native hardware, this will be a pagetable 377 + * constructed in arch/x86/kernel/head_32.S. The root of the 378 + * pagetable will be swapper_pg_dir. 379 * 380 * If we're booting paravirtualized under a hypervisor, then there are 381 * more options: we may already be running PAE, and the pagetable may ··· 537 538 load_cr3(swapper_pg_dir); 539 540 __flush_tlb_all(); 541 542 kmap_init(); ··· 675 BUG_ON((unsigned long)high_memory > VMALLOC_START); 676 #endif /* double-sanity-check paranoia */ 677 678 if (boot_cpu_data.wp_works_ok < 0) 679 test_wp_bit(); 680 + 681 + cpa_init(); 682 683 /* 684 * Subtle. SMP is doing it's boot stuff late (because it has to
+5 -3
arch/x86/mm/init_64.c
··· 528 reservedpages << (PAGE_SHIFT-10), 529 datasize >> 10, 530 initsize >> 10); 531 } 532 533 void free_init_pages(char *what, unsigned long begin, unsigned long end) 534 { 535 - unsigned long addr; 536 537 - if (begin >= end) 538 return; 539 540 /* ··· 551 #else 552 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 553 554 - for (addr = begin; addr < end; addr += PAGE_SIZE) { 555 ClearPageReserved(virt_to_page(addr)); 556 init_page_count(virt_to_page(addr)); 557 memset((void *)(addr & ~(PAGE_SIZE-1)),
··· 528 reservedpages << (PAGE_SHIFT-10), 529 datasize >> 10, 530 initsize >> 10); 531 + 532 + cpa_init(); 533 } 534 535 void free_init_pages(char *what, unsigned long begin, unsigned long end) 536 { 537 + unsigned long addr = begin; 538 539 + if (addr >= end) 540 return; 541 542 /* ··· 549 #else 550 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 551 552 + for (; addr < end; addr += PAGE_SIZE) { 553 ClearPageReserved(virt_to_page(addr)); 554 init_page_count(virt_to_page(addr)); 555 memset((void *)(addr & ~(PAGE_SIZE-1)),
+31 -24
arch/x86/mm/ioremap.c
··· 260 early_param("early_ioremap_debug", early_ioremap_debug_setup); 261 262 static __initdata int after_paging_init; 263 - static __initdata unsigned long bm_pte[1024] 264 __attribute__((aligned(PAGE_SIZE))); 265 266 - static inline unsigned long * __init early_ioremap_pgd(unsigned long addr) 267 { 268 - return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023); 269 } 270 271 - static inline unsigned long * __init early_ioremap_pte(unsigned long addr) 272 { 273 - return bm_pte + ((addr >> PAGE_SHIFT) & 1023); 274 } 275 276 void __init early_ioremap_init(void) 277 { 278 - unsigned long *pgd; 279 280 if (early_ioremap_debug) 281 printk(KERN_INFO "early_ioremap_init()\n"); 282 283 - pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); 284 - *pgd = __pa(bm_pte) | _PAGE_TABLE; 285 memset(bm_pte, 0, sizeof(bm_pte)); 286 /* 287 - * The boot-ioremap range spans multiple pgds, for which 288 * we are not prepared: 289 */ 290 - if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { 291 WARN_ON(1); 292 - printk(KERN_WARNING "pgd %p != %p\n", 293 - pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); 294 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 295 - fix_to_virt(FIX_BTMAP_BEGIN)); 296 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 297 - fix_to_virt(FIX_BTMAP_END)); 298 299 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 300 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", ··· 309 310 void __init early_ioremap_clear(void) 311 { 312 - unsigned long *pgd; 313 314 if (early_ioremap_debug) 315 printk(KERN_INFO "early_ioremap_clear()\n"); 316 317 - pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); 318 - *pgd = 0; 319 - paravirt_release_pt(__pa(pgd) >> PAGE_SHIFT); 320 __flush_tlb_all(); 321 } 322 323 void __init early_ioremap_reset(void) 324 { 325 enum fixed_addresses idx; 326 - unsigned long *pte, phys, addr; 327 328 after_paging_init = 1; 329 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { 330 addr = fix_to_virt(idx); 331 pte = early_ioremap_pte(addr); 332 - if (*pte & _PAGE_PRESENT) { 333 - phys = *pte & PAGE_MASK; 334 set_fixmap(idx, phys); 335 } 336 } ··· 340 static void __init __early_set_fixmap(enum fixed_addresses idx, 341 unsigned long phys, pgprot_t flags) 342 { 343 - unsigned long *pte, addr = __fix_to_virt(idx); 344 345 if (idx >= __end_of_fixed_addresses) { 346 BUG(); ··· 349 } 350 pte = early_ioremap_pte(addr); 351 if (pgprot_val(flags)) 352 - *pte = (phys & PAGE_MASK) | pgprot_val(flags); 353 else 354 - *pte = 0; 355 __flush_tlb_one(addr); 356 } 357
··· 260 early_param("early_ioremap_debug", early_ioremap_debug_setup); 261 262 static __initdata int after_paging_init; 263 + static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] 264 __attribute__((aligned(PAGE_SIZE))); 265 266 + static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 267 { 268 + pgd_t *pgd = &swapper_pg_dir[pgd_index(addr)]; 269 + pud_t *pud = pud_offset(pgd, addr); 270 + pmd_t *pmd = pmd_offset(pud, addr); 271 + 272 + return pmd; 273 } 274 275 + static inline pte_t * __init early_ioremap_pte(unsigned long addr) 276 { 277 + return &bm_pte[pte_index(addr)]; 278 } 279 280 void __init early_ioremap_init(void) 281 { 282 + pmd_t *pmd; 283 284 if (early_ioremap_debug) 285 printk(KERN_INFO "early_ioremap_init()\n"); 286 287 + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 288 memset(bm_pte, 0, sizeof(bm_pte)); 289 + pmd_populate_kernel(&init_mm, pmd, bm_pte); 290 + 291 /* 292 + * The boot-ioremap range spans multiple pmds, for which 293 * we are not prepared: 294 */ 295 + if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { 296 WARN_ON(1); 297 + printk(KERN_WARNING "pmd %p != %p\n", 298 + pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); 299 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 300 + fix_to_virt(FIX_BTMAP_BEGIN)); 301 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 302 + fix_to_virt(FIX_BTMAP_END)); 303 304 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 305 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", ··· 304 305 void __init early_ioremap_clear(void) 306 { 307 + pmd_t *pmd; 308 309 if (early_ioremap_debug) 310 printk(KERN_INFO "early_ioremap_clear()\n"); 311 312 + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 313 + pmd_clear(pmd); 314 + paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT); 315 __flush_tlb_all(); 316 } 317 318 void __init early_ioremap_reset(void) 319 { 320 enum fixed_addresses idx; 321 + unsigned long addr, phys; 322 + pte_t *pte; 323 324 after_paging_init = 1; 325 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { 326 addr = fix_to_virt(idx); 327 pte = early_ioremap_pte(addr); 328 + if (pte_present(*pte)) { 329 + phys = pte_val(*pte) & PAGE_MASK; 330 set_fixmap(idx, phys); 331 } 332 } ··· 334 static void __init __early_set_fixmap(enum fixed_addresses idx, 335 unsigned long phys, pgprot_t flags) 336 { 337 + unsigned long addr = __fix_to_virt(idx); 338 + pte_t *pte; 339 340 if (idx >= __end_of_fixed_addresses) { 341 BUG(); ··· 342 } 343 pte = early_ioremap_pte(addr); 344 if (pgprot_val(flags)) 345 + set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 346 else 347 + pte_clear(NULL, addr, pte); 348 __flush_tlb_one(addr); 349 } 350
+126 -16
arch/x86/mm/pageattr.c
··· 8 #include <linux/sched.h> 9 #include <linux/slab.h> 10 #include <linux/mm.h> 11 12 #include <asm/e820.h> 13 #include <asm/processor.h> ··· 192 * or when the present bit is not set. Otherwise we would return a 193 * pointer to a nonexisting mapping. 194 */ 195 - pte_t *lookup_address(unsigned long address, int *level) 196 { 197 pgd_t *pgd = pgd_offset_k(address); 198 pud_t *pud; ··· 253 try_preserve_large_page(pte_t *kpte, unsigned long address, 254 struct cpa_data *cpa) 255 { 256 - unsigned long nextpage_addr, numpages, pmask, psize, flags; 257 pte_t new_pte, old_pte, *tmp; 258 pgprot_t old_prot, new_prot; 259 - int level, do_split = 1; 260 261 spin_lock_irqsave(&pgd_lock, flags); 262 /* ··· 304 new_prot = static_protections(new_prot, address); 305 306 /* 307 * If there are no changes, return. maxpages has been updated 308 * above: 309 */ ··· 350 return do_split; 351 } 352 353 static int split_large_page(pte_t *kpte, unsigned long address) 354 { 355 unsigned long flags, pfn, pfninc = 1; 356 - gfp_t gfp_flags = GFP_KERNEL; 357 unsigned int i, level; 358 pte_t *pbase, *tmp; 359 pgprot_t ref_prot; 360 struct page *base; 361 362 - #ifdef CONFIG_DEBUG_PAGEALLOC 363 - gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 364 - #endif 365 - base = alloc_pages(gfp_flags, 0); 366 - if (!base) 367 - return -ENOMEM; 368 - 369 spin_lock_irqsave(&pgd_lock, flags); 370 /* 371 * Check for races, another CPU might have split this page 372 * up for us already: ··· 491 base = NULL; 492 493 out_unlock: 494 spin_unlock_irqrestore(&pgd_lock, flags); 495 - 496 - if (base) 497 - __free_pages(base, 0); 498 499 return 0; 500 } 501 502 static int __change_page_attr(unsigned long address, struct cpa_data *cpa) 503 { 504 - int level, do_split, err; 505 struct page *kpte_page; 506 pte_t *kpte; 507 ··· 700 * Check whether we really changed something: 701 */ 702 if (!cpa.flushtlb) 703 - return ret; 704 705 /* 706 * No need to flush, when we did not set any of the caching ··· 719 else 720 cpa_flush_all(cache); 721 722 return ret; 723 } 724 ··· 874 * but that can deadlock->flush only current cpu: 875 */ 876 __flush_tlb_all(); 877 } 878 #endif 879
··· 8 #include <linux/sched.h> 9 #include <linux/slab.h> 10 #include <linux/mm.h> 11 + #include <linux/interrupt.h> 12 13 #include <asm/e820.h> 14 #include <asm/processor.h> ··· 191 * or when the present bit is not set. Otherwise we would return a 192 * pointer to a nonexisting mapping. 193 */ 194 + pte_t *lookup_address(unsigned long address, unsigned int *level) 195 { 196 pgd_t *pgd = pgd_offset_k(address); 197 pud_t *pud; ··· 252 try_preserve_large_page(pte_t *kpte, unsigned long address, 253 struct cpa_data *cpa) 254 { 255 + unsigned long nextpage_addr, numpages, pmask, psize, flags, addr; 256 pte_t new_pte, old_pte, *tmp; 257 pgprot_t old_prot, new_prot; 258 + int i, do_split = 1; 259 + unsigned int level; 260 261 spin_lock_irqsave(&pgd_lock, flags); 262 /* ··· 302 new_prot = static_protections(new_prot, address); 303 304 /* 305 + * We need to check the full range, whether 306 + * static_protection() requires a different pgprot for one of 307 + * the pages in the range we try to preserve: 308 + */ 309 + addr = address + PAGE_SIZE; 310 + for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE) { 311 + pgprot_t chk_prot = static_protections(new_prot, addr); 312 + 313 + if (pgprot_val(chk_prot) != pgprot_val(new_prot)) 314 + goto out_unlock; 315 + } 316 + 317 + /* 318 * If there are no changes, return. maxpages has been updated 319 * above: 320 */ ··· 335 return do_split; 336 } 337 338 + static LIST_HEAD(page_pool); 339 + static unsigned long pool_size, pool_pages, pool_low; 340 + static unsigned long pool_used, pool_failed, pool_refill; 341 + 342 + static void cpa_fill_pool(void) 343 + { 344 + struct page *p; 345 + gfp_t gfp = GFP_KERNEL; 346 + 347 + /* Do not allocate from interrupt context */ 348 + if (in_irq() || irqs_disabled()) 349 + return; 350 + /* 351 + * Check unlocked. I does not matter when we have one more 352 + * page in the pool. The bit lock avoids recursive pool 353 + * allocations: 354 + */ 355 + if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill)) 356 + return; 357 + 358 + #ifdef CONFIG_DEBUG_PAGEALLOC 359 + /* 360 + * We could do: 361 + * gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; 362 + * but this fails on !PREEMPT kernels 363 + */ 364 + gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 365 + #endif 366 + 367 + while (pool_pages < pool_size) { 368 + p = alloc_pages(gfp, 0); 369 + if (!p) { 370 + pool_failed++; 371 + break; 372 + } 373 + spin_lock_irq(&pgd_lock); 374 + list_add(&p->lru, &page_pool); 375 + pool_pages++; 376 + spin_unlock_irq(&pgd_lock); 377 + } 378 + clear_bit_unlock(0, &pool_refill); 379 + } 380 + 381 + #define SHIFT_MB (20 - PAGE_SHIFT) 382 + #define ROUND_MB_GB ((1 << 10) - 1) 383 + #define SHIFT_MB_GB 10 384 + #define POOL_PAGES_PER_GB 16 385 + 386 + void __init cpa_init(void) 387 + { 388 + struct sysinfo si; 389 + unsigned long gb; 390 + 391 + si_meminfo(&si); 392 + /* 393 + * Calculate the number of pool pages: 394 + * 395 + * Convert totalram (nr of pages) to MiB and round to the next 396 + * GiB. Shift MiB to Gib and multiply the result by 397 + * POOL_PAGES_PER_GB: 398 + */ 399 + gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; 400 + pool_size = POOL_PAGES_PER_GB * gb; 401 + pool_low = pool_size; 402 + 403 + cpa_fill_pool(); 404 + printk(KERN_DEBUG 405 + "CPA: page pool initialized %lu of %lu pages preallocated\n", 406 + pool_pages, pool_size); 407 + } 408 + 409 static int split_large_page(pte_t *kpte, unsigned long address) 410 { 411 unsigned long flags, pfn, pfninc = 1; 412 unsigned int i, level; 413 pte_t *pbase, *tmp; 414 pgprot_t ref_prot; 415 struct page *base; 416 417 + /* 418 + * Get a page from the pool. The pool list is protected by the 419 + * pgd_lock, which we have to take anyway for the split 420 + * operation: 421 + */ 422 spin_lock_irqsave(&pgd_lock, flags); 423 + if (list_empty(&page_pool)) { 424 + spin_unlock_irqrestore(&pgd_lock, flags); 425 + return -ENOMEM; 426 + } 427 + 428 + base = list_first_entry(&page_pool, struct page, lru); 429 + list_del(&base->lru); 430 + pool_pages--; 431 + 432 + if (pool_pages < pool_low) 433 + pool_low = pool_pages; 434 + 435 /* 436 * Check for races, another CPU might have split this page 437 * up for us already: ··· 396 base = NULL; 397 398 out_unlock: 399 + /* 400 + * If we dropped out via the lookup_address check under 401 + * pgd_lock then stick the page back into the pool: 402 + */ 403 + if (base) { 404 + list_add(&base->lru, &page_pool); 405 + pool_pages++; 406 + } else 407 + pool_used++; 408 spin_unlock_irqrestore(&pgd_lock, flags); 409 410 return 0; 411 } 412 413 static int __change_page_attr(unsigned long address, struct cpa_data *cpa) 414 { 415 + int do_split, err; 416 + unsigned int level; 417 struct page *kpte_page; 418 pte_t *kpte; 419 ··· 598 * Check whether we really changed something: 599 */ 600 if (!cpa.flushtlb) 601 + goto out; 602 603 /* 604 * No need to flush, when we did not set any of the caching ··· 617 else 618 cpa_flush_all(cache); 619 620 + out: 621 + cpa_fill_pool(); 622 return ret; 623 } 624 ··· 770 * but that can deadlock->flush only current cpu: 771 */ 772 __flush_tlb_all(); 773 + 774 + /* 775 + * Try to refill the page pool here. We can do this only after 776 + * the tlb flush. 777 + */ 778 + cpa_fill_pool(); 779 } 780 #endif 781
+2 -2
arch/x86/power/Makefile
··· 1 - obj-$(CONFIG_PM) += cpu.o 2 - obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
··· 1 + obj-$(CONFIG_PM_SLEEP) += cpu_$(BITS).o 2 + obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o
+1 -1
arch/x86/power/cpu.c arch/x86/power/cpu_32.c
··· 40 savesegment(ss, ctxt->ss); 41 42 /* 43 - * control registers 44 */ 45 ctxt->cr0 = read_cr0(); 46 ctxt->cr2 = read_cr2();
··· 40 savesegment(ss, ctxt->ss); 41 42 /* 43 + * control registers 44 */ 45 ctxt->cr0 = read_cr0(); 46 ctxt->cr2 = read_cr2();
+169
arch/x86/power/hibernate_64.c
···
··· 1 + /* 2 + * Hibernation support for x86-64 3 + * 4 + * Distribute under GPLv2 5 + * 6 + * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> 7 + * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> 8 + * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 9 + */ 10 + 11 + #include <linux/smp.h> 12 + #include <linux/suspend.h> 13 + #include <asm/proto.h> 14 + #include <asm/page.h> 15 + #include <asm/pgtable.h> 16 + #include <asm/mtrr.h> 17 + 18 + /* References to section boundaries */ 19 + extern const void __nosave_begin, __nosave_end; 20 + 21 + /* Defined in hibernate_asm_64.S */ 22 + extern int restore_image(void); 23 + 24 + /* 25 + * Address to jump to in the last phase of restore in order to get to the image 26 + * kernel's text (this value is passed in the image header). 27 + */ 28 + unsigned long restore_jump_address; 29 + 30 + /* 31 + * Value of the cr3 register from before the hibernation (this value is passed 32 + * in the image header). 33 + */ 34 + unsigned long restore_cr3; 35 + 36 + pgd_t *temp_level4_pgt; 37 + 38 + void *relocated_restore_code; 39 + 40 + static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) 41 + { 42 + long i, j; 43 + 44 + i = pud_index(address); 45 + pud = pud + i; 46 + for (; i < PTRS_PER_PUD; pud++, i++) { 47 + unsigned long paddr; 48 + pmd_t *pmd; 49 + 50 + paddr = address + i*PUD_SIZE; 51 + if (paddr >= end) 52 + break; 53 + 54 + pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); 55 + if (!pmd) 56 + return -ENOMEM; 57 + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 58 + for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { 59 + unsigned long pe; 60 + 61 + if (paddr >= end) 62 + break; 63 + pe = __PAGE_KERNEL_LARGE_EXEC | paddr; 64 + pe &= __supported_pte_mask; 65 + set_pmd(pmd, __pmd(pe)); 66 + } 67 + } 68 + return 0; 69 + } 70 + 71 + static int set_up_temporary_mappings(void) 72 + { 73 + unsigned long start, end, next; 74 + int error; 75 + 76 + temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); 77 + if (!temp_level4_pgt) 78 + return -ENOMEM; 79 + 80 + /* It is safe to reuse the original kernel mapping */ 81 + set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 82 + init_level4_pgt[pgd_index(__START_KERNEL_map)]); 83 + 84 + /* Set up the direct mapping from scratch */ 85 + start = (unsigned long)pfn_to_kaddr(0); 86 + end = (unsigned long)pfn_to_kaddr(end_pfn); 87 + 88 + for (; start < end; start = next) { 89 + pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); 90 + if (!pud) 91 + return -ENOMEM; 92 + next = start + PGDIR_SIZE; 93 + if (next > end) 94 + next = end; 95 + if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) 96 + return error; 97 + set_pgd(temp_level4_pgt + pgd_index(start), 98 + mk_kernel_pgd(__pa(pud))); 99 + } 100 + return 0; 101 + } 102 + 103 + int swsusp_arch_resume(void) 104 + { 105 + int error; 106 + 107 + /* We have got enough memory and from now on we cannot recover */ 108 + if ((error = set_up_temporary_mappings())) 109 + return error; 110 + 111 + relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); 112 + if (!relocated_restore_code) 113 + return -ENOMEM; 114 + memcpy(relocated_restore_code, &core_restore_code, 115 + &restore_registers - &core_restore_code); 116 + 117 + restore_image(); 118 + return 0; 119 + } 120 + 121 + /* 122 + * pfn_is_nosave - check if given pfn is in the 'nosave' section 123 + */ 124 + 125 + int pfn_is_nosave(unsigned long pfn) 126 + { 127 + unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; 128 + unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; 129 + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); 130 + } 131 + 132 + struct restore_data_record { 133 + unsigned long jump_address; 134 + unsigned long cr3; 135 + unsigned long magic; 136 + }; 137 + 138 + #define RESTORE_MAGIC 0x0123456789ABCDEFUL 139 + 140 + /** 141 + * arch_hibernation_header_save - populate the architecture specific part 142 + * of a hibernation image header 143 + * @addr: address to save the data at 144 + */ 145 + int arch_hibernation_header_save(void *addr, unsigned int max_size) 146 + { 147 + struct restore_data_record *rdr = addr; 148 + 149 + if (max_size < sizeof(struct restore_data_record)) 150 + return -EOVERFLOW; 151 + rdr->jump_address = restore_jump_address; 152 + rdr->cr3 = restore_cr3; 153 + rdr->magic = RESTORE_MAGIC; 154 + return 0; 155 + } 156 + 157 + /** 158 + * arch_hibernation_header_restore - read the architecture specific data 159 + * from the hibernation image header 160 + * @addr: address to read the data from 161 + */ 162 + int arch_hibernation_header_restore(void *addr) 163 + { 164 + struct restore_data_record *rdr = addr; 165 + 166 + restore_jump_address = rdr->jump_address; 167 + restore_cr3 = rdr->cr3; 168 + return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; 169 + }
+3 -3
arch/x86/power/suspend.c arch/x86/power/hibernate_32.c
··· 1 /* 2 - * Suspend support specific for i386 - temporary page tables 3 * 4 * Distribute under GPLv2 5 * ··· 13 #include <asm/page.h> 14 #include <asm/pgtable.h> 15 16 - /* Defined in arch/i386/power/swsusp.S */ 17 extern int restore_image(void); 18 19 /* References to section boundaries */ ··· 23 pgd_t *resume_pg_dir; 24 25 /* The following three functions are based on the analogous code in 26 - * arch/i386/mm/init.c 27 */ 28 29 /*
··· 1 /* 2 + * Hibernation support specific for i386 - temporary page tables 3 * 4 * Distribute under GPLv2 5 * ··· 13 #include <asm/page.h> 14 #include <asm/pgtable.h> 15 16 + /* Defined in hibernate_asm_32.S */ 17 extern int restore_image(void); 18 19 /* References to section boundaries */ ··· 23 pgd_t *resume_pg_dir; 24 25 /* The following three functions are based on the analogous code in 26 + * arch/x86/mm/init_32.c 27 */ 28 29 /*
+1 -2
arch/x86/power/swsusp.S arch/x86/power/hibernate_asm_32.S
··· 1 .text 2 3 - /* Originally gcc generated, modified by hand 4 - * 5 * This may not use any stack, nor any variable that is not "NoSave": 6 * 7 * Its rewriting one kernel image with another. What is stack in "old"
··· 1 .text 2 3 + /* 4 * This may not use any stack, nor any variable that is not "NoSave": 5 * 6 * Its rewriting one kernel image with another. What is stack in "old"
+3 -3
arch/x86/xen/mmu.c
··· 58 59 xmaddr_t arbitrary_virt_to_machine(unsigned long address) 60 { 61 - int level; 62 pte_t *pte = lookup_address(address, &level); 63 unsigned offset = address & PAGE_MASK; 64 ··· 71 { 72 pte_t *pte, ptev; 73 unsigned long address = (unsigned long)vaddr; 74 - int level; 75 76 pte = lookup_address(address, &level); 77 BUG_ON(pte == NULL); ··· 86 { 87 pte_t *pte, ptev; 88 unsigned long address = (unsigned long)vaddr; 89 - int level; 90 91 pte = lookup_address(address, &level); 92 BUG_ON(pte == NULL);
··· 58 59 xmaddr_t arbitrary_virt_to_machine(unsigned long address) 60 { 61 + unsigned int level; 62 pte_t *pte = lookup_address(address, &level); 63 unsigned offset = address & PAGE_MASK; 64 ··· 71 { 72 pte_t *pte, ptev; 73 unsigned long address = (unsigned long)vaddr; 74 + unsigned int level; 75 76 pte = lookup_address(address, &level); 77 BUG_ON(pte == NULL); ··· 86 { 87 pte_t *pte, ptev; 88 unsigned long address = (unsigned long)vaddr; 89 + unsigned int level; 90 91 pte = lookup_address(address, &level); 92 BUG_ON(pte == NULL);
+5 -5
arch/x86/xen/time.c
··· 217 /* Get the CPU speed from Xen */ 218 unsigned long xen_cpu_khz(void) 219 { 220 - u64 cpu_khz = 1000000ULL << 32; 221 const struct vcpu_time_info *info = 222 &HYPERVISOR_shared_info->vcpu_info[0].time; 223 224 - do_div(cpu_khz, info->tsc_to_system_mul); 225 if (info->tsc_shift < 0) 226 - cpu_khz <<= -info->tsc_shift; 227 else 228 - cpu_khz >>= info->tsc_shift; 229 230 - return cpu_khz; 231 } 232 233 /*
··· 217 /* Get the CPU speed from Xen */ 218 unsigned long xen_cpu_khz(void) 219 { 220 + u64 xen_khz = 1000000ULL << 32; 221 const struct vcpu_time_info *info = 222 &HYPERVISOR_shared_info->vcpu_info[0].time; 223 224 + do_div(xen_khz, info->tsc_to_system_mul); 225 if (info->tsc_shift < 0) 226 + xen_khz <<= -info->tsc_shift; 227 else 228 + xen_khz >>= info->tsc_shift; 229 230 + return xen_khz; 231 } 232 233 /*
+1 -6
drivers/acpi/bus.c
··· 31 #include <linux/pm.h> 32 #include <linux/device.h> 33 #include <linux/proc_fs.h> 34 #ifdef CONFIG_X86 35 #include <asm/mpspec.h> 36 #endif ··· 40 41 #define _COMPONENT ACPI_BUS_COMPONENT 42 ACPI_MODULE_NAME("bus"); 43 - #ifdef CONFIG_X86 44 - extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger); 45 - #endif 46 47 struct acpi_device *acpi_root; 48 struct proc_dir_entry *acpi_root_dir; ··· 651 652 #ifdef CONFIG_X86 653 if (!acpi_ioapic) { 654 - extern u8 acpi_sci_flags; 655 - 656 /* compatible (0) means level (3) */ 657 if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) { 658 acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK; ··· 660 acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt, 661 (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2); 662 } else { 663 - extern int acpi_sci_override_gsi; 664 /* 665 * now that acpi_gbl_FADT is initialized, 666 * update it with result from INT_SRC_OVR parsing
··· 31 #include <linux/pm.h> 32 #include <linux/device.h> 33 #include <linux/proc_fs.h> 34 + #include <linux/acpi.h> 35 #ifdef CONFIG_X86 36 #include <asm/mpspec.h> 37 #endif ··· 39 40 #define _COMPONENT ACPI_BUS_COMPONENT 41 ACPI_MODULE_NAME("bus"); 42 43 struct acpi_device *acpi_root; 44 struct proc_dir_entry *acpi_root_dir; ··· 653 654 #ifdef CONFIG_X86 655 if (!acpi_ioapic) { 656 /* compatible (0) means level (3) */ 657 if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) { 658 acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK; ··· 664 acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt, 665 (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2); 666 } else { 667 /* 668 * now that acpi_gbl_FADT is initialized, 669 * update it with result from INT_SRC_OVR parsing
+2 -2
drivers/lguest/page_tables.c
··· 178 179 static void check_gpte(struct lg_cpu *cpu, pte_t gpte) 180 { 181 - if ((pte_flags(gpte) & (_PAGE_PWT|_PAGE_PSE)) 182 - || pte_pfn(gpte) >= cpu->lg->pfn_limit) 183 kill_guest(cpu, "bad page table entry"); 184 } 185
··· 178 179 static void check_gpte(struct lg_cpu *cpu, pte_t gpte) 180 { 181 + if ((pte_flags(gpte) & _PAGE_PSE) || 182 + pte_pfn(gpte) >= cpu->lg->pfn_limit) 183 kill_guest(cpu, "bad page table entry"); 184 } 185
+4
include/asm-x86/acpi.h
··· 89 extern int acpi_skip_timer_override; 90 extern int acpi_use_timer_override; 91 92 static inline void disable_acpi(void) 93 { 94 acpi_disabled = 1;
··· 89 extern int acpi_skip_timer_override; 90 extern int acpi_use_timer_override; 91 92 + extern u8 acpi_sci_flags; 93 + extern int acpi_sci_override_gsi; 94 + void acpi_pic_sci_set_trigger(unsigned int, u16); 95 + 96 static inline void disable_acpi(void) 97 { 98 acpi_disabled = 1;
+2
include/asm-x86/cacheflush.h
··· 44 45 void clflush_cache_range(void *addr, unsigned int size); 46 47 #ifdef CONFIG_DEBUG_RODATA 48 void mark_rodata_ro(void); 49 #endif
··· 44 45 void clflush_cache_range(void *addr, unsigned int size); 46 47 + void cpa_init(void); 48 + 49 #ifdef CONFIG_DEBUG_RODATA 50 void mark_rodata_ro(void); 51 #endif
+7 -2
include/asm-x86/geode.h
··· 206 return inw(base + reg + (timer * 8)); 207 } 208 209 - extern int __init geode_mfgpt_detect(void); 210 extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable); 211 extern int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable); 212 - extern int geode_mfgpt_alloc_timer(int timer, int domain, struct module *owner); 213 214 #define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1) 215 #define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0) 216 217 #endif
··· 206 return inw(base + reg + (timer * 8)); 207 } 208 209 extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable); 210 extern int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable); 211 + extern int geode_mfgpt_alloc_timer(int timer, int domain); 212 213 #define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1) 214 #define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0) 215 + 216 + #ifdef CONFIG_GEODE_MFGPT_TIMER 217 + extern int __init mfgpt_timer_setup(void); 218 + #else 219 + static inline int mfgpt_timer_setup(void) { return 0; } 220 + #endif 221 222 #endif
-1
include/asm-x86/page_32.h
··· 48 typedef unsigned long phys_addr_t; 49 50 typedef union { pteval_t pte, pte_low; } pte_t; 51 - typedef pte_t boot_pte_t; 52 53 #endif /* __ASSEMBLY__ */ 54 #endif /* CONFIG_X86_PAE */
··· 48 typedef unsigned long phys_addr_t; 49 50 typedef union { pteval_t pte, pte_low; } pte_t; 51 52 #endif /* __ASSEMBLY__ */ 53 #endif /* CONFIG_X86_PAE */
+1 -1
include/asm-x86/pgtable.h
··· 255 * NOTE: the return type is pte_t but if the pmd is PSE then we return it 256 * as a pte too. 257 */ 258 - extern pte_t *lookup_address(unsigned long address, int *level); 259 260 /* local pte updates need not use xchg for locking */ 261 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
··· 255 * NOTE: the return type is pte_t but if the pmd is PSE then we return it 256 * as a pte too. 257 */ 258 + extern pte_t *lookup_address(unsigned long address, unsigned int *level); 259 260 /* local pte updates need not use xchg for locking */ 261 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
-4
include/asm-x86/pgtable_32.h
··· 52 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 53 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) 54 55 - #define TWOLEVEL_PGDIR_SHIFT 22 56 - #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) 57 - #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) 58 - 59 /* Just any arbitrary offset to the start of the vmalloc VM area: the 60 * current 8MB value just means that there will be a 8MB "hole" after the 61 * physical memory until the kernel virtual memory starts. That means that
··· 52 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 53 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) 54 55 /* Just any arbitrary offset to the start of the vmalloc VM area: the 56 * current 8MB value just means that there will be a 8MB "hole" after the 57 * physical memory until the kernel virtual memory starts. That means that
-9
include/linux/compiler-gcc4.h
··· 5 /* These definitions are for GCC v4.x. */ 6 #include <linux/compiler-gcc.h> 7 8 - #ifdef CONFIG_FORCED_INLINING 9 - # undef inline 10 - # undef __inline__ 11 - # undef __inline 12 - # define inline inline __attribute__((always_inline)) 13 - # define __inline__ __inline__ __attribute__((always_inline)) 14 - # define __inline __inline __attribute__((always_inline)) 15 - #endif 16 - 17 #define __used __attribute__((__used__)) 18 #define __must_check __attribute__((warn_unused_result)) 19 #define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
··· 5 /* These definitions are for GCC v4.x. */ 6 #include <linux/compiler-gcc.h> 7 8 #define __used __attribute__((__used__)) 9 #define __must_check __attribute__((warn_unused_result)) 10 #define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+1 -1
init/Kconfig
··· 587 disabled, and can be overriden runtime by setting 588 /proc/sys/kernel/randomize_va_space to 2. 589 590 - On non-ancient distros (post-2000 ones) Y is usually a safe choice. 591 592 config BASE_FULL 593 default y
··· 587 disabled, and can be overriden runtime by setting 588 /proc/sys/kernel/randomize_va_space to 2. 589 590 + On non-ancient distros (post-2000 ones) N is usually a safe choice. 591 592 config BASE_FULL 593 default y
+1 -1
init/main.c
··· 558 preempt_disable(); 559 build_all_zonelists(); 560 page_alloc_init(); 561 - enable_debug_pagealloc(); 562 printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); 563 parse_early_param(); 564 parse_args("Booting kernel", static_command_line, __start___param, ··· 613 vfs_caches_init_early(); 614 cpuset_init_early(); 615 mem_init(); 616 cpu_hotplug_init(); 617 kmem_cache_init(); 618 setup_per_cpu_pageset();
··· 558 preempt_disable(); 559 build_all_zonelists(); 560 page_alloc_init(); 561 printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); 562 parse_early_param(); 563 parse_args("Booting kernel", static_command_line, __start___param, ··· 614 vfs_caches_init_early(); 615 cpuset_init_early(); 616 mem_init(); 617 + enable_debug_pagealloc(); 618 cpu_hotplug_init(); 619 kmem_cache_init(); 620 setup_per_cpu_pageset();
-14
lib/Kconfig.debug
··· 465 some architectures or if you use external debuggers. 466 If you don't debug the kernel, you can say N. 467 468 - config FORCED_INLINING 469 - bool "Force gcc to inline functions marked 'inline'" 470 - depends on DEBUG_KERNEL 471 - default y 472 - help 473 - This option determines if the kernel forces gcc to inline the functions 474 - developers have marked 'inline'. Doing so takes away freedom from gcc to 475 - do what it thinks is best, which is desirable for the gcc 3.x series of 476 - compilers. The gcc 4.x series have a rewritten inlining algorithm and 477 - disabling this option will generate a smaller kernel there. Hopefully 478 - this algorithm is so good that allowing gcc4 to make the decision can 479 - become the default in the future, until then this option is there to 480 - test gcc for this. 481 - 482 config BOOT_PRINTK_DELAY 483 bool "Delay each boot printk message by N milliseconds" 484 depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY
··· 465 some architectures or if you use external debuggers. 466 If you don't debug the kernel, you can say N. 467 468 config BOOT_PRINTK_DELAY 469 bool "Delay each boot printk message by N milliseconds" 470 depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY
+27 -22
lib/vsprintf.c
··· 26 #include <asm/page.h> /* for PAGE_SIZE */ 27 #include <asm/div64.h> 28 29 /** 30 * simple_strtoul - convert a string to an unsigned long 31 * @cp: The start of the string ··· 44 if (*cp == '0') { 45 base = 8; 46 cp++; 47 - if ((toupper(*cp) == 'X') && isxdigit(cp[1])) { 48 cp++; 49 base = 16; 50 } 51 } 52 } else if (base == 16) { 53 - if (cp[0] == '0' && toupper(cp[1]) == 'X') 54 cp += 2; 55 } 56 while (isxdigit(*cp) && 57 - (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { 58 result = result*base + value; 59 cp++; 60 } ··· 95 if (*cp == '0') { 96 base = 8; 97 cp++; 98 - if ((toupper(*cp) == 'X') && isxdigit(cp[1])) { 99 cp++; 100 base = 16; 101 } 102 } 103 } else if (base == 16) { 104 - if (cp[0] == '0' && toupper(cp[1]) == 'X') 105 cp += 2; 106 } 107 - while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp) 108 - ? toupper(*cp) : *cp)-'A'+10) < base) { 109 result = result*base + value; 110 cp++; 111 } ··· 363 #define PLUS 4 /* show plus */ 364 #define SPACE 8 /* space if plus */ 365 #define LEFT 16 /* left justified */ 366 - #define SPECIAL 32 /* 0x */ 367 - #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ 368 369 static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type) 370 { 371 - char sign,tmp[66]; 372 - const char *digits; 373 - /* we are called with base 8, 10 or 16, only, thus don't need "g..." */ 374 - static const char small_digits[] = "0123456789abcdefx"; /* "ghijklmnopqrstuvwxyz"; */ 375 - static const char large_digits[] = "0123456789ABCDEFX"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 376 int need_pfx = ((type & SPECIAL) && base != 10); 377 int i; 378 379 - digits = (type & LARGE) ? large_digits : small_digits; 380 if (type & LEFT) 381 type &= ~ZEROPAD; 382 - if (base < 2 || base > 36) 383 - return NULL; 384 sign = 0; 385 if (type & SIGN) { 386 if ((signed long long) num < 0) { ··· 408 tmp[i++] = '0'; 409 /* Generic code, for any base: 410 else do { 411 - tmp[i++] = digits[do_div(num,base)]; 412 } while (num != 0); 413 */ 414 else if (base != 10) { /* 8 or 16 */ ··· 416 int shift = 3; 417 if (base == 16) shift = 4; 418 do { 419 - tmp[i++] = digits[((unsigned char)num) & mask]; 420 num >>= shift; 421 } while (num); 422 } else { /* base 10 */ ··· 448 ++buf; 449 if (base == 16) { 450 if (buf < end) 451 - *buf = digits[16]; /* for arbitrary base: digits[33]; */ 452 ++buf; 453 } 454 } ··· 648 continue; 649 650 case 'p': 651 if (field_width == -1) { 652 field_width = 2*sizeof(void *); 653 flags |= ZEROPAD; ··· 685 base = 8; 686 break; 687 688 - case 'X': 689 - flags |= LARGE; 690 case 'x': 691 base = 16; 692 break; 693
··· 26 #include <asm/page.h> /* for PAGE_SIZE */ 27 #include <asm/div64.h> 28 29 + /* Works only for digits and letters, but small and fast */ 30 + #define TOLOWER(x) ((x) | 0x20) 31 + 32 /** 33 * simple_strtoul - convert a string to an unsigned long 34 * @cp: The start of the string ··· 41 if (*cp == '0') { 42 base = 8; 43 cp++; 44 + if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) { 45 cp++; 46 base = 16; 47 } 48 } 49 } else if (base == 16) { 50 + if (cp[0] == '0' && TOLOWER(cp[1]) == 'x') 51 cp += 2; 52 } 53 while (isxdigit(*cp) && 54 + (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) { 55 result = result*base + value; 56 cp++; 57 } ··· 92 if (*cp == '0') { 93 base = 8; 94 cp++; 95 + if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) { 96 cp++; 97 base = 16; 98 } 99 } 100 } else if (base == 16) { 101 + if (cp[0] == '0' && TOLOWER(cp[1]) == 'x') 102 cp += 2; 103 } 104 + while (isxdigit(*cp) 105 + && (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) { 106 result = result*base + value; 107 cp++; 108 } ··· 360 #define PLUS 4 /* show plus */ 361 #define SPACE 8 /* space if plus */ 362 #define LEFT 16 /* left justified */ 363 + #define SMALL 32 /* Must be 32 == 0x20 */ 364 + #define SPECIAL 64 /* 0x */ 365 366 static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type) 367 { 368 + /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 369 + static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 370 + 371 + char tmp[66]; 372 + char sign; 373 + char locase; 374 int need_pfx = ((type & SPECIAL) && base != 10); 375 int i; 376 377 + /* locase = 0 or 0x20. ORing digits or letters with 'locase' 378 + * produces same digits or (maybe lowercased) letters */ 379 + locase = (type & SMALL); 380 if (type & LEFT) 381 type &= ~ZEROPAD; 382 sign = 0; 383 if (type & SIGN) { 384 if ((signed long long) num < 0) { ··· 404 tmp[i++] = '0'; 405 /* Generic code, for any base: 406 else do { 407 + tmp[i++] = (digits[do_div(num,base)] | locase); 408 } while (num != 0); 409 */ 410 else if (base != 10) { /* 8 or 16 */ ··· 412 int shift = 3; 413 if (base == 16) shift = 4; 414 do { 415 + tmp[i++] = (digits[((unsigned char)num) & mask] | locase); 416 num >>= shift; 417 } while (num); 418 } else { /* base 10 */ ··· 444 ++buf; 445 if (base == 16) { 446 if (buf < end) 447 + *buf = ('X' | locase); 448 ++buf; 449 } 450 } ··· 644 continue; 645 646 case 'p': 647 + flags |= SMALL; 648 if (field_width == -1) { 649 field_width = 2*sizeof(void *); 650 flags |= ZEROPAD; ··· 680 base = 8; 681 break; 682 683 case 'x': 684 + flags |= SMALL; 685 + case 'X': 686 base = 16; 687 break; 688