Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (32 commits)
x86: cpa, strict range check in try_preserve_large_page()
x86: cpa, enable CONFIG_DEBUG_PAGEALLOC on 64-bit
x86: cpa, use page pool
x86: introduce page pool in cpa
x86: DEBUG_PAGEALLOC: enable after mem_init()
brk: help text typo fix
lguest: accept guest _PAGE_PWT page table entries
x86 PM: update stale comments
x86 PM: consolidate suspend and hibernation code
x86 PM: rename 32-bit files in arch/x86/power
x86 PM: move 64-bit hibernation files to arch/x86/power
x86: trivial printk optimizations
x86: fix early_ioremap pagetable ops
x86: construct 32-bit boot time page tables in native format.
x86, core: remove CONFIG_FORCED_INLINING
x86: avoid unused variable warning in mm/init_64.c
x86: fixup more paravirt fallout
brk: document randomize_va_space and CONFIG_COMPAT_BRK (was Re:
x86: fix sparse warnings in acpi/bus.c
x86: fix sparse warning in topology.c
...

+684 -467
-9
Documentation/feature-removal-schedule.txt
··· 111 111 112 112 --------------------------- 113 113 114 - What: CONFIG_FORCED_INLINING 115 - When: June 2006 116 - Why: Config option is there to see if gcc is good enough. (in january 117 - 2006). If it is, the behavior should just be the default. If it's not, 118 - the option should just go away entirely. 119 - Who: Arjan van de Ven 120 - 121 - --------------------------- 122 - 123 114 What: eepro100 network driver 124 115 When: January 2007 125 116 Why: replaced by the e100 driver
+29
Documentation/sysctl/kernel.txt
··· 41 41 - pid_max 42 42 - powersave-nap [ PPC only ] 43 43 - printk 44 + - randomize_va_space 44 45 - real-root-dev ==> Documentation/initrd.txt 45 46 - reboot-cmd [ SPARC only ] 46 47 - rtsig-max ··· 278 277 seconds, we do allow a burst of messages to pass through. 279 278 printk_ratelimit_burst specifies the number of messages we can 280 279 send before ratelimiting kicks in. 280 + 281 + ============================================================== 282 + 283 + randomize-va-space: 284 + 285 + This option can be used to select the type of process address 286 + space randomization that is used in the system, for architectures 287 + that support this feature. 288 + 289 + 0 - Turn the process address space randomization off by default. 290 + 291 + 1 - Make the addresses of mmap base, stack and VDSO page randomized. 292 + This, among other things, implies that shared libraries will be 293 + loaded to random addresses. Also for PIE-linked binaries, the location 294 + of code start is randomized. 295 + 296 + With heap randomization, the situation is a little bit more 297 + complicated. 298 + There a few legacy applications out there (such as some ancient 299 + versions of libc.so.5 from 1996) that assume that brk area starts 300 + just after the end of the code+bss. These applications break when 301 + start of the brk area is randomized. There are however no known 302 + non-legacy applications that would be broken this way, so for most 303 + systems it is safe to choose full randomization. However there is 304 + a CONFIG_COMPAT_BRK option for systems with ancient and/or broken 305 + binaries, that makes heap non-randomized, but keeps all other 306 + parts of process address space randomized if randomize_va_space 307 + sysctl is turned on. 281 308 282 309 ============================================================== 283 310
+1 -5
arch/x86/Kconfig.debug
··· 34 34 35 35 This option will slow down process creation somewhat. 36 36 37 - comment "Page alloc debug is incompatible with Software Suspend on i386" 38 - depends on DEBUG_KERNEL && HIBERNATION 39 - depends on X86_32 40 - 41 37 config DEBUG_PAGEALLOC 42 38 bool "Debug page memory allocations" 43 - depends on DEBUG_KERNEL && X86_32 39 + depends on DEBUG_KERNEL 44 40 help 45 41 Unmap pages from the kernel linear mapping after free_pages(). 46 42 This results in a large slowdown, but helps to find certain types
+3 -1
arch/x86/Makefile
··· 191 191 # must be linked after kernel/ 192 192 drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/ 193 193 194 - ifeq ($(CONFIG_X86_32),y) 194 + # suspend and hibernation support 195 195 drivers-$(CONFIG_PM) += arch/x86/power/ 196 + 197 + ifeq ($(CONFIG_X86_32),y) 196 198 drivers-$(CONFIG_FB) += arch/x86/video/ 197 199 endif 198 200
+14 -10
arch/x86/boot/printf.c
··· 33 33 #define PLUS 4 /* show plus */ 34 34 #define SPACE 8 /* space if plus */ 35 35 #define LEFT 16 /* left justified */ 36 - #define SPECIAL 32 /* 0x */ 37 - #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ 36 + #define SMALL 32 /* Must be 32 == 0x20 */ 37 + #define SPECIAL 64 /* 0x */ 38 38 39 39 #define do_div(n,base) ({ \ 40 40 int __res; \ ··· 45 45 static char *number(char *str, long num, int base, int size, int precision, 46 46 int type) 47 47 { 48 - char c, sign, tmp[66]; 49 - const char *digits = "0123456789abcdefghijklmnopqrstuvwxyz"; 48 + /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 49 + static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 50 + 51 + char tmp[66]; 52 + char c, sign, locase; 50 53 int i; 51 54 52 - if (type & LARGE) 53 - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; 55 + /* locase = 0 or 0x20. ORing digits or letters with 'locase' 56 + * produces same digits or (maybe lowercased) letters */ 57 + locase = (type & SMALL); 54 58 if (type & LEFT) 55 59 type &= ~ZEROPAD; 56 60 if (base < 2 || base > 36) ··· 85 81 tmp[i++] = '0'; 86 82 else 87 83 while (num != 0) 88 - tmp[i++] = digits[do_div(num, base)]; 84 + tmp[i++] = (digits[do_div(num, base)] | locase); 89 85 if (i > precision) 90 86 precision = i; 91 87 size -= precision; ··· 99 95 *str++ = '0'; 100 96 else if (base == 16) { 101 97 *str++ = '0'; 102 - *str++ = digits[33]; 98 + *str++ = ('X' | locase); 103 99 } 104 100 } 105 101 if (!(type & LEFT)) ··· 248 244 base = 8; 249 245 break; 250 246 251 - case 'X': 252 - flags |= LARGE; 253 247 case 'x': 248 + flags |= SMALL; 249 + case 'X': 254 250 base = 16; 255 251 break; 256 252
-1
arch/x86/configs/i386_defconfig
··· 1421 1421 # CONFIG_DEBUG_VM is not set 1422 1422 # CONFIG_DEBUG_LIST is not set 1423 1423 # CONFIG_FRAME_POINTER is not set 1424 - # CONFIG_FORCED_INLINING is not set 1425 1424 # CONFIG_RCU_TORTURE_TEST is not set 1426 1425 # CONFIG_LKDTM is not set 1427 1426 # CONFIG_FAULT_INJECTION is not set
-1
arch/x86/configs/x86_64_defconfig
··· 1346 1346 # CONFIG_DEBUG_VM is not set 1347 1347 # CONFIG_DEBUG_LIST is not set 1348 1348 # CONFIG_FRAME_POINTER is not set 1349 - # CONFIG_FORCED_INLINING is not set 1350 1349 # CONFIG_RCU_TORTURE_TEST is not set 1351 1350 # CONFIG_LKDTM is not set 1352 1351 # CONFIG_FAULT_INJECTION is not set
-2
arch/x86/kernel/Makefile
··· 84 84 obj-y += genapic_64.o genapic_flat_64.o 85 85 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 86 86 obj-$(CONFIG_AUDIT) += audit_64.o 87 - obj-$(CONFIG_PM) += suspend_64.o 88 - obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o 89 87 90 88 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o 91 89 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
+1 -1
arch/x86/kernel/cpu/mcheck/therm_throt.c
··· 118 118 119 119 static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) 120 120 { 121 - return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); 121 + sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); 122 122 } 123 123 124 124 /* Mutex protecting device creation against CPU hotplug */
+6 -9
arch/x86/kernel/entry_32.S
··· 409 409 RESTORE_REGS 410 410 addl $4, %esp # skip orig_eax/error_code 411 411 CFI_ADJUST_CFA_OFFSET -4 412 - 1: INTERRUPT_RETURN 412 + ENTRY(irq_return) 413 + INTERRUPT_RETURN 413 414 .section .fixup,"ax" 414 415 iret_exc: 415 416 pushl $0 # no error code ··· 419 418 .previous 420 419 .section __ex_table,"a" 421 420 .align 4 422 - .long 1b,iret_exc 421 + .long irq_return,iret_exc 423 422 .previous 424 423 425 424 CFI_RESTORE_STATE ··· 866 865 RESTORE_REGS 867 866 lss 12+4(%esp), %esp # back to espfix stack 868 867 CFI_ADJUST_CFA_OFFSET -24 869 - 1: INTERRUPT_RETURN 868 + jmp irq_return 870 869 CFI_ENDPROC 871 - .section __ex_table,"a" 872 - .align 4 873 - .long 1b,iret_exc 874 - .previous 875 870 KPROBE_END(nmi) 876 871 877 872 #ifdef CONFIG_PARAVIRT 878 873 ENTRY(native_iret) 879 - 1: iret 874 + iret 880 875 .section __ex_table,"a" 881 876 .align 4 882 - .long 1b,iret_exc 877 + .long native_iret, iret_exc 883 878 .previous 884 879 END(native_iret) 885 880
+13 -5
arch/x86/kernel/entry_64.S
··· 581 581 */ 582 582 TRACE_IRQS_IRETQ 583 583 restore_args: 584 - RESTORE_ARGS 0,8,0 585 - #ifdef CONFIG_PARAVIRT 584 + RESTORE_ARGS 0,8,0 585 + 586 + ENTRY(irq_return) 586 587 INTERRUPT_RETURN 587 - #endif 588 + 589 + .section __ex_table, "a" 590 + .quad irq_return, bad_iret 591 + .previous 592 + 593 + #ifdef CONFIG_PARAVIRT 588 594 ENTRY(native_iret) 589 595 iretq 590 596 591 597 .section __ex_table,"a" 592 598 .quad native_iret, bad_iret 593 599 .previous 600 + #endif 601 + 594 602 .section .fixup,"ax" 595 603 bad_iret: 596 604 /* ··· 812 804 SWAPGS_UNSAFE_STACK 813 805 paranoid_restore\trace: 814 806 RESTORE_ALL 8 815 - INTERRUPT_RETURN 807 + jmp irq_return 816 808 paranoid_userspace\trace: 817 809 GET_THREAD_INFO(%rcx) 818 810 movl threadinfo_flags(%rcx),%ebx ··· 927 919 iret run with kernel gs again, so don't set the user space flag. 928 920 B stepping K8s sometimes report an truncated RIP for IRET 929 921 exceptions returning to compat mode. Check for these here too. */ 930 - leaq native_iret(%rip),%rbp 922 + leaq irq_return(%rip),%rbp 931 923 cmpq %rbp,RIP(%rsp) 932 924 je error_swapgs 933 925 movl %ebp,%ebp /* zero extend */
+1 -4
arch/x86/kernel/geode_32.c
··· 163 163 164 164 static int __init geode_southbridge_init(void) 165 165 { 166 - int timers; 167 - 168 166 if (!is_geode()) 169 167 return -ENODEV; 170 168 171 169 init_lbars(); 172 - timers = geode_mfgpt_detect(); 173 - printk(KERN_INFO "geode: %d MFGPT timers available.\n", timers); 170 + (void) mfgpt_timer_setup(); 174 171 return 0; 175 172 } 176 173
+116 -35
arch/x86/kernel/head_32.S
··· 19 19 #include <asm/thread_info.h> 20 20 #include <asm/asm-offsets.h> 21 21 #include <asm/setup.h> 22 + #include <asm/processor-flags.h> 23 + 24 + /* Physical address */ 25 + #define pa(X) ((X) - __PAGE_OFFSET) 22 26 23 27 /* 24 28 * References to members of the new_cpu_data structure. ··· 84 80 */ 85 81 .section .text.head,"ax",@progbits 86 82 ENTRY(startup_32) 87 - /* check to see if KEEP_SEGMENTS flag is meaningful */ 88 - cmpw $0x207, BP_version(%esi) 89 - jb 1f 90 - 91 83 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 92 84 us to not reload segments */ 93 85 testb $(1<<6), BP_loadflags(%esi) ··· 92 92 /* 93 93 * Set segments to known values. 94 94 */ 95 - 1: lgdt boot_gdt_descr - __PAGE_OFFSET 95 + lgdt pa(boot_gdt_descr) 96 96 movl $(__BOOT_DS),%eax 97 97 movl %eax,%ds 98 98 movl %eax,%es ··· 105 105 */ 106 106 cld 107 107 xorl %eax,%eax 108 - movl $__bss_start - __PAGE_OFFSET,%edi 109 - movl $__bss_stop - __PAGE_OFFSET,%ecx 108 + movl $pa(__bss_start),%edi 109 + movl $pa(__bss_stop),%ecx 110 110 subl %edi,%ecx 111 111 shrl $2,%ecx 112 112 rep ; stosl ··· 118 118 * (kexec on panic case). Hence copy out the parameters before initializing 119 119 * page tables. 120 120 */ 121 - movl $(boot_params - __PAGE_OFFSET),%edi 121 + movl $pa(boot_params),%edi 122 122 movl $(PARAM_SIZE/4),%ecx 123 123 cld 124 124 rep 125 125 movsl 126 - movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi 126 + movl pa(boot_params) + NEW_CL_POINTER,%esi 127 127 andl %esi,%esi 128 128 jz 1f # No comand line 129 - movl $(boot_command_line - __PAGE_OFFSET),%edi 129 + movl $pa(boot_command_line),%edi 130 130 movl $(COMMAND_LINE_SIZE/4),%ecx 131 131 rep 132 132 movsl 133 133 1: 134 134 135 135 #ifdef CONFIG_PARAVIRT 136 - cmpw $0x207, (boot_params + BP_version - __PAGE_OFFSET) 136 + /* This is can only trip for a broken bootloader... */ 137 + cmpw $0x207, pa(boot_params + BP_version) 137 138 jb default_entry 138 139 139 140 /* Paravirt-compatible boot parameters. Look to see what architecture 140 141 we're booting under. */ 141 - movl (boot_params + BP_hardware_subarch - __PAGE_OFFSET), %eax 142 + movl pa(boot_params + BP_hardware_subarch), %eax 142 143 cmpl $num_subarch_entries, %eax 143 144 jae bad_subarch 144 145 145 - movl subarch_entries - __PAGE_OFFSET(,%eax,4), %eax 146 + movl pa(subarch_entries)(,%eax,4), %eax 146 147 subl $__PAGE_OFFSET, %eax 147 148 jmp *%eax 148 149 ··· 171 170 * Mappings are created both at virtual address 0 (identity mapping) 172 171 * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END. 173 172 * 174 - * Warning: don't use %esi or the stack in this code. However, %esp 175 - * can be used as a GPR if you really need it... 173 + * Note that the stack is not yet set up! 176 174 */ 177 - page_pde_offset = (__PAGE_OFFSET >> 20); 175 + #define PTE_ATTR 0x007 /* PRESENT+RW+USER */ 176 + #define PDE_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ 177 + #define PGD_ATTR 0x001 /* PRESENT (no other attributes) */ 178 178 179 179 default_entry: 180 - movl $(pg0 - __PAGE_OFFSET), %edi 181 - movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 182 - movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */ 180 + #ifdef CONFIG_X86_PAE 181 + 182 + /* 183 + * In PAE mode swapper_pg_dir is statically defined to contain enough 184 + * entries to cover the VMSPLIT option (that is the top 1, 2 or 3 185 + * entries). The identity mapping is handled by pointing two PGD 186 + * entries to the first kernel PMD. 187 + * 188 + * Note the upper half of each PMD or PTE are always zero at 189 + * this stage. 190 + */ 191 + 192 + #define KPMDS ((0x100000000-__PAGE_OFFSET) >> 30) /* Number of kernel PMDs */ 193 + 194 + xorl %ebx,%ebx /* %ebx is kept at zero */ 195 + 196 + movl $pa(pg0), %edi 197 + movl $pa(swapper_pg_pmd), %edx 198 + movl $PTE_ATTR, %eax 183 199 10: 184 - leal 0x007(%edi),%ecx /* Create PDE entry */ 200 + leal PDE_ATTR(%edi),%ecx /* Create PMD entry */ 201 + movl %ecx,(%edx) /* Store PMD entry */ 202 + /* Upper half already zero */ 203 + addl $8,%edx 204 + movl $512,%ecx 205 + 11: 206 + stosl 207 + xchgl %eax,%ebx 208 + stosl 209 + xchgl %eax,%ebx 210 + addl $0x1000,%eax 211 + loop 11b 212 + 213 + /* 214 + * End condition: we must map up to and including INIT_MAP_BEYOND_END 215 + * bytes beyond the end of our own page tables. 216 + */ 217 + leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp 218 + cmpl %ebp,%eax 219 + jb 10b 220 + 1: 221 + movl %edi,pa(init_pg_tables_end) 222 + 223 + /* Do early initialization of the fixmap area */ 224 + movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax 225 + movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) 226 + #else /* Not PAE */ 227 + 228 + page_pde_offset = (__PAGE_OFFSET >> 20); 229 + 230 + movl $pa(pg0), %edi 231 + movl $pa(swapper_pg_dir), %edx 232 + movl $PTE_ATTR, %eax 233 + 10: 234 + leal PDE_ATTR(%edi),%ecx /* Create PDE entry */ 185 235 movl %ecx,(%edx) /* Store identity PDE entry */ 186 236 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 187 237 addl $4,%edx ··· 241 189 stosl 242 190 addl $0x1000,%eax 243 191 loop 11b 244 - /* End condition: we must map up to and including INIT_MAP_BEYOND_END */ 245 - /* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */ 246 - leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp 192 + /* 193 + * End condition: we must map up to and including INIT_MAP_BEYOND_END 194 + * bytes beyond the end of our own page tables; the +0x007 is 195 + * the attribute bits 196 + */ 197 + leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp 247 198 cmpl %ebp,%eax 248 199 jb 10b 249 - movl %edi,(init_pg_tables_end - __PAGE_OFFSET) 200 + movl %edi,pa(init_pg_tables_end) 250 201 251 - /* Do an early initialization of the fixmap area */ 252 - movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 253 - movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax 254 - addl $0x67, %eax /* 0x67 == _PAGE_TABLE */ 255 - movl %eax, 4092(%edx) 256 - 202 + /* Do early initialization of the fixmap area */ 203 + movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax 204 + movl %eax,pa(swapper_pg_dir+0xffc) 205 + #endif 257 206 jmp 3f 258 207 /* 259 208 * Non-boot CPU entry point; entered from trampoline.S ··· 294 241 * NOTE! We have to correct for the fact that we're 295 242 * not yet offset PAGE_OFFSET.. 296 243 */ 297 - #define cr4_bits mmu_cr4_features-__PAGE_OFFSET 244 + #define cr4_bits pa(mmu_cr4_features) 298 245 movl cr4_bits,%edx 299 246 andl %edx,%edx 300 247 jz 6f ··· 329 276 /* 330 277 * Enable paging 331 278 */ 332 - movl $swapper_pg_dir-__PAGE_OFFSET,%eax 279 + movl $pa(swapper_pg_dir),%eax 333 280 movl %eax,%cr3 /* set the page table pointer.. */ 334 281 movl %cr0,%eax 335 - orl $0x80000000,%eax 282 + orl $X86_CR0_PG,%eax 336 283 movl %eax,%cr0 /* ..and set paging (PG) bit */ 337 284 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 338 285 1: ··· 605 552 */ 606 553 .section ".bss.page_aligned","wa" 607 554 .align PAGE_SIZE_asm 555 + #ifdef CONFIG_X86_PAE 556 + ENTRY(swapper_pg_pmd) 557 + .fill 1024*KPMDS,4,0 558 + #else 608 559 ENTRY(swapper_pg_dir) 609 560 .fill 1024,4,0 610 - ENTRY(swapper_pg_pmd) 561 + #endif 562 + ENTRY(swapper_pg_fixmap) 611 563 .fill 1024,4,0 612 564 ENTRY(empty_zero_page) 613 565 .fill 4096,1,0 614 - 615 566 /* 616 567 * This starts the data section. 617 568 */ 569 + #ifdef CONFIG_X86_PAE 570 + .section ".data.page_aligned","wa" 571 + /* Page-aligned for the benefit of paravirt? */ 572 + .align PAGE_SIZE_asm 573 + ENTRY(swapper_pg_dir) 574 + .long pa(swapper_pg_pmd+PGD_ATTR),0 /* low identity map */ 575 + # if KPMDS == 3 576 + .long pa(swapper_pg_pmd+PGD_ATTR),0 577 + .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0 578 + .long pa(swapper_pg_pmd+PGD_ATTR+0x2000),0 579 + # elif KPMDS == 2 580 + .long 0,0 581 + .long pa(swapper_pg_pmd+PGD_ATTR),0 582 + .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0 583 + # elif KPMDS == 1 584 + .long 0,0 585 + .long 0,0 586 + .long pa(swapper_pg_pmd+PGD_ATTR),0 587 + # else 588 + # error "Kernel PMDs should be 1, 2 or 3" 589 + # endif 590 + .align PAGE_SIZE_asm /* needs to be page-sized too */ 591 + #endif 592 + 618 593 .data 619 594 ENTRY(stack_start) 620 595 .long init_thread_union+THREAD_SIZE
+65 -58
arch/x86/kernel/mfgpt_32.c
··· 12 12 */ 13 13 14 14 /* 15 - * We are using the 32Khz input clock - its the only one that has the 15 + * We are using the 32.768kHz input clock - it's the only one that has the 16 16 * ranges we find desirable. The following table lists the suitable 17 - * divisors and the associated hz, minimum interval 18 - * and the maximum interval: 17 + * divisors and the associated Hz, minimum interval and the maximum interval: 19 18 * 20 - * Divisor Hz Min Delta (S) Max Delta (S) 21 - * 1 32000 .0005 2.048 22 - * 2 16000 .001 4.096 23 - * 4 8000 .002 8.192 24 - * 8 4000 .004 16.384 25 - * 16 2000 .008 32.768 26 - * 32 1000 .016 65.536 27 - * 64 500 .032 131.072 28 - * 128 250 .064 262.144 29 - * 256 125 .128 524.288 19 + * Divisor Hz Min Delta (s) Max Delta (s) 20 + * 1 32768 .00048828125 2.000 21 + * 2 16384 .0009765625 4.000 22 + * 4 8192 .001953125 8.000 23 + * 8 4096 .00390625 16.000 24 + * 16 2048 .0078125 32.000 25 + * 32 1024 .015625 64.000 26 + * 64 512 .03125 128.000 27 + * 128 256 .0625 256.000 28 + * 256 128 .125 512.000 30 29 */ 31 30 32 31 #include <linux/kernel.h> 33 32 #include <linux/interrupt.h> 34 - #include <linux/module.h> 35 33 #include <asm/geode.h> 36 34 37 - #define F_AVAIL 0x01 38 - 39 35 static struct mfgpt_timer_t { 40 - int flags; 41 - struct module *owner; 36 + unsigned int avail:1; 42 37 } mfgpt_timers[MFGPT_MAX_TIMERS]; 43 38 44 39 /* Selected from the table above */ 45 40 46 41 #define MFGPT_DIVISOR 16 47 42 #define MFGPT_SCALE 4 /* divisor = 2^(scale) */ 48 - #define MFGPT_HZ (32000 / MFGPT_DIVISOR) 43 + #define MFGPT_HZ (32768 / MFGPT_DIVISOR) 49 44 #define MFGPT_PERIODIC (MFGPT_HZ / HZ) 50 - 51 - #ifdef CONFIG_GEODE_MFGPT_TIMER 52 - static int __init mfgpt_timer_setup(void); 53 - #else 54 - #define mfgpt_timer_setup() (0) 55 - #endif 56 45 57 46 /* Allow for disabling of MFGPTs */ 58 47 static int disable; ··· 74 85 * In other cases (such as with VSAless OpenFirmware), the system firmware 75 86 * leaves timers available for us to use. 76 87 */ 77 - int __init geode_mfgpt_detect(void) 88 + 89 + 90 + static int timers = -1; 91 + 92 + static void geode_mfgpt_detect(void) 78 93 { 79 - int count = 0, i; 94 + int i; 80 95 u16 val; 81 96 97 + timers = 0; 98 + 82 99 if (disable) { 83 - printk(KERN_INFO "geode-mfgpt: Skipping MFGPT setup\n"); 84 - return 0; 100 + printk(KERN_INFO "geode-mfgpt: MFGPT support is disabled\n"); 101 + goto done; 102 + } 103 + 104 + if (!geode_get_dev_base(GEODE_DEV_MFGPT)) { 105 + printk(KERN_INFO "geode-mfgpt: MFGPT LBAR is not set up\n"); 106 + goto done; 85 107 } 86 108 87 109 for (i = 0; i < MFGPT_MAX_TIMERS; i++) { 88 110 val = geode_mfgpt_read(i, MFGPT_REG_SETUP); 89 111 if (!(val & MFGPT_SETUP_SETUP)) { 90 - mfgpt_timers[i].flags = F_AVAIL; 91 - count++; 112 + mfgpt_timers[i].avail = 1; 113 + timers++; 92 114 } 93 115 } 94 116 95 - /* set up clock event device, if desired */ 96 - i = mfgpt_timer_setup(); 97 - 98 - return count; 117 + done: 118 + printk(KERN_INFO "geode-mfgpt: %d MFGPT timers available.\n", timers); 99 119 } 100 120 101 121 int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable) ··· 181 183 return 0; 182 184 } 183 185 184 - static int mfgpt_get(int timer, struct module *owner) 186 + static int mfgpt_get(int timer) 185 187 { 186 - mfgpt_timers[timer].flags &= ~F_AVAIL; 187 - mfgpt_timers[timer].owner = owner; 188 + mfgpt_timers[timer].avail = 0; 188 189 printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer); 189 190 return timer; 190 191 } 191 192 192 - int geode_mfgpt_alloc_timer(int timer, int domain, struct module *owner) 193 + int geode_mfgpt_alloc_timer(int timer, int domain) 193 194 { 194 195 int i; 195 196 196 - if (!geode_get_dev_base(GEODE_DEV_MFGPT)) 197 - return -ENODEV; 197 + if (timers == -1) { 198 + /* timers haven't been detected yet */ 199 + geode_mfgpt_detect(); 200 + } 201 + 202 + if (!timers) 203 + return -1; 204 + 198 205 if (timer >= MFGPT_MAX_TIMERS) 199 - return -EIO; 206 + return -1; 200 207 201 208 if (timer < 0) { 202 209 /* Try to find an available timer */ 203 210 for (i = 0; i < MFGPT_MAX_TIMERS; i++) { 204 - if (mfgpt_timers[i].flags & F_AVAIL) 205 - return mfgpt_get(i, owner); 211 + if (mfgpt_timers[i].avail) 212 + return mfgpt_get(i); 206 213 207 214 if (i == 5 && domain == MFGPT_DOMAIN_WORKING) 208 215 break; 209 216 } 210 217 } else { 211 218 /* If they requested a specific timer, try to honor that */ 212 - if (mfgpt_timers[timer].flags & F_AVAIL) 213 - return mfgpt_get(timer, owner); 219 + if (mfgpt_timers[timer].avail) 220 + return mfgpt_get(timer); 214 221 } 215 222 216 223 /* No timers available - too bad */ ··· 247 244 } 248 245 __setup("mfgpt_irq=", mfgpt_setup); 249 246 250 - static inline void mfgpt_disable_timer(u16 clock) 247 + static void mfgpt_disable_timer(u16 clock) 251 248 { 252 - u16 val = geode_mfgpt_read(clock, MFGPT_REG_SETUP); 253 - geode_mfgpt_write(clock, MFGPT_REG_SETUP, val & ~MFGPT_SETUP_CNTEN); 249 + /* avoid races by clearing CMP1 and CMP2 unconditionally */ 250 + geode_mfgpt_write(clock, MFGPT_REG_SETUP, (u16) ~MFGPT_SETUP_CNTEN | 251 + MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2); 254 252 } 255 253 256 254 static int mfgpt_next_event(unsigned long, struct clock_event_device *); ··· 267 263 .shift = 32 268 264 }; 269 265 270 - static inline void mfgpt_start_timer(u16 clock, u16 delta) 266 + static void mfgpt_start_timer(u16 delta) 271 267 { 272 268 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta); 273 269 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0); ··· 282 278 mfgpt_disable_timer(mfgpt_event_clock); 283 279 284 280 if (mode == CLOCK_EVT_MODE_PERIODIC) 285 - mfgpt_start_timer(mfgpt_event_clock, MFGPT_PERIODIC); 281 + mfgpt_start_timer(MFGPT_PERIODIC); 286 282 287 283 mfgpt_tick_mode = mode; 288 284 } 289 285 290 286 static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) 291 287 { 292 - mfgpt_start_timer(mfgpt_event_clock, delta); 288 + mfgpt_start_timer(delta); 293 289 return 0; 294 290 } 295 291 296 - /* Assume (foolishly?), that this interrupt was due to our tick */ 297 - 298 292 static irqreturn_t mfgpt_tick(int irq, void *dev_id) 299 293 { 294 + u16 val = geode_mfgpt_read(mfgpt_event_clock, MFGPT_REG_SETUP); 295 + 296 + /* See if the interrupt was for us */ 297 + if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1))) 298 + return IRQ_NONE; 299 + 300 300 /* Turn off the clock (and clear the event) */ 301 301 mfgpt_disable_timer(mfgpt_event_clock); 302 302 ··· 328 320 .name = "mfgpt-timer" 329 321 }; 330 322 331 - static int __init mfgpt_timer_setup(void) 323 + int __init mfgpt_timer_setup(void) 332 324 { 333 325 int timer, ret; 334 326 u16 val; 335 327 336 - timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING, 337 - THIS_MODULE); 328 + timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); 338 329 if (timer < 0) { 339 330 printk(KERN_ERR 340 331 "mfgpt-timer: Could not allocate a MFPGT timer\n"); ··· 370 363 &mfgpt_clockevent); 371 364 372 365 printk(KERN_INFO 373 - "mfgpt-timer: registering the MFGT timer as a clock event.\n"); 366 + "mfgpt-timer: registering the MFGPT timer as a clock event.\n"); 374 367 clockevents_register_device(&mfgpt_clockevent); 375 368 376 369 return 0;
+4
arch/x86/kernel/setup_32.c
··· 154 154 struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 155 155 EXPORT_SYMBOL(boot_cpu_data); 156 156 157 + #ifndef CONFIG_X86_PAE 157 158 unsigned long mmu_cr4_features; 159 + #else 160 + unsigned long mmu_cr4_features = X86_CR4_PAE; 161 + #endif 158 162 159 163 /* for MCA, but anyone else can use it if they want */ 160 164 unsigned int machine_id;
+3 -157
arch/x86/kernel/suspend_64.c arch/x86/power/cpu_64.c
··· 1 1 /* 2 - * Suspend support specific for i386. 2 + * Suspend and hibernation support for x86-64 3 3 * 4 4 * Distribute under GPLv2 5 5 * 6 + * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> 6 7 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> 7 8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 8 9 */ ··· 14 13 #include <asm/page.h> 15 14 #include <asm/pgtable.h> 16 15 #include <asm/mtrr.h> 17 - 18 - /* References to section boundaries */ 19 - extern const void __nosave_begin, __nosave_end; 20 16 21 17 static void fix_processor_context(void); 22 18 ··· 61 63 mtrr_save_fixed_ranges(NULL); 62 64 63 65 /* 64 - * control registers 66 + * control registers 65 67 */ 66 68 rdmsrl(MSR_EFER, ctxt->efer); 67 69 ctxt->cr0 = read_cr0(); ··· 164 166 loaddebug(&current->thread, 7); 165 167 } 166 168 } 167 - 168 - #ifdef CONFIG_HIBERNATION 169 - /* Defined in arch/x86_64/kernel/suspend_asm.S */ 170 - extern int restore_image(void); 171 - 172 - /* 173 - * Address to jump to in the last phase of restore in order to get to the image 174 - * kernel's text (this value is passed in the image header). 175 - */ 176 - unsigned long restore_jump_address; 177 - 178 - /* 179 - * Value of the cr3 register from before the hibernation (this value is passed 180 - * in the image header). 181 - */ 182 - unsigned long restore_cr3; 183 - 184 - pgd_t *temp_level4_pgt; 185 - 186 - void *relocated_restore_code; 187 - 188 - static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) 189 - { 190 - long i, j; 191 - 192 - i = pud_index(address); 193 - pud = pud + i; 194 - for (; i < PTRS_PER_PUD; pud++, i++) { 195 - unsigned long paddr; 196 - pmd_t *pmd; 197 - 198 - paddr = address + i*PUD_SIZE; 199 - if (paddr >= end) 200 - break; 201 - 202 - pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); 203 - if (!pmd) 204 - return -ENOMEM; 205 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 206 - for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { 207 - unsigned long pe; 208 - 209 - if (paddr >= end) 210 - break; 211 - pe = __PAGE_KERNEL_LARGE_EXEC | paddr; 212 - pe &= __supported_pte_mask; 213 - set_pmd(pmd, __pmd(pe)); 214 - } 215 - } 216 - return 0; 217 - } 218 - 219 - static int set_up_temporary_mappings(void) 220 - { 221 - unsigned long start, end, next; 222 - int error; 223 - 224 - temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); 225 - if (!temp_level4_pgt) 226 - return -ENOMEM; 227 - 228 - /* It is safe to reuse the original kernel mapping */ 229 - set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 230 - init_level4_pgt[pgd_index(__START_KERNEL_map)]); 231 - 232 - /* Set up the direct mapping from scratch */ 233 - start = (unsigned long)pfn_to_kaddr(0); 234 - end = (unsigned long)pfn_to_kaddr(end_pfn); 235 - 236 - for (; start < end; start = next) { 237 - pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); 238 - if (!pud) 239 - return -ENOMEM; 240 - next = start + PGDIR_SIZE; 241 - if (next > end) 242 - next = end; 243 - if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) 244 - return error; 245 - set_pgd(temp_level4_pgt + pgd_index(start), 246 - mk_kernel_pgd(__pa(pud))); 247 - } 248 - return 0; 249 - } 250 - 251 - int swsusp_arch_resume(void) 252 - { 253 - int error; 254 - 255 - /* We have got enough memory and from now on we cannot recover */ 256 - if ((error = set_up_temporary_mappings())) 257 - return error; 258 - 259 - relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); 260 - if (!relocated_restore_code) 261 - return -ENOMEM; 262 - memcpy(relocated_restore_code, &core_restore_code, 263 - &restore_registers - &core_restore_code); 264 - 265 - restore_image(); 266 - return 0; 267 - } 268 - 269 - /* 270 - * pfn_is_nosave - check if given pfn is in the 'nosave' section 271 - */ 272 - 273 - int pfn_is_nosave(unsigned long pfn) 274 - { 275 - unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; 276 - unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; 277 - return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); 278 - } 279 - 280 - struct restore_data_record { 281 - unsigned long jump_address; 282 - unsigned long cr3; 283 - unsigned long magic; 284 - }; 285 - 286 - #define RESTORE_MAGIC 0x0123456789ABCDEFUL 287 - 288 - /** 289 - * arch_hibernation_header_save - populate the architecture specific part 290 - * of a hibernation image header 291 - * @addr: address to save the data at 292 - */ 293 - int arch_hibernation_header_save(void *addr, unsigned int max_size) 294 - { 295 - struct restore_data_record *rdr = addr; 296 - 297 - if (max_size < sizeof(struct restore_data_record)) 298 - return -EOVERFLOW; 299 - rdr->jump_address = restore_jump_address; 300 - rdr->cr3 = restore_cr3; 301 - rdr->magic = RESTORE_MAGIC; 302 - return 0; 303 - } 304 - 305 - /** 306 - * arch_hibernation_header_restore - read the architecture specific data 307 - * from the hibernation image header 308 - * @addr: address to read the data from 309 - */ 310 - int arch_hibernation_header_restore(void *addr) 311 - { 312 - struct restore_data_record *rdr = addr; 313 - 314 - restore_jump_address = rdr->jump_address; 315 - restore_cr3 = rdr->cr3; 316 - return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; 317 - } 318 - #endif /* CONFIG_HIBERNATION */
+7 -2
arch/x86/kernel/suspend_asm_64.S arch/x86/power/hibernate_asm_64.S
··· 1 - /* Copyright 2004,2005 Pavel Machek <pavel@suse.cz>, Andi Kleen <ak@suse.de>, Rafael J. Wysocki <rjw@sisk.pl> 1 + /* 2 + * Hibernation support for x86-64 2 3 * 3 4 * Distribute under GPLv2. 5 + * 6 + * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl> 7 + * Copyright 2005 Andi Kleen <ak@suse.de> 8 + * Copyright 2004 Pavel Machek <pavel@suse.cz> 4 9 * 5 10 * swsusp_arch_resume must not use any stack or any nonlocal variables while 6 11 * copying pages: ··· 14 9 * image could very well be data page in "new" image, and overwriting 15 10 * your own stack under you is bad idea. 16 11 */ 17 - 12 + 18 13 .text 19 14 #include <linux/linkage.h> 20 15 #include <asm/segment.h>
+1 -1
arch/x86/kernel/topology.c
··· 53 53 54 54 void arch_unregister_cpu(int num) 55 55 { 56 - return unregister_cpu(&per_cpu(cpu_devices, num).cpu); 56 + unregister_cpu(&per_cpu(cpu_devices, num).cpu); 57 57 } 58 58 EXPORT_SYMBOL(arch_unregister_cpu); 59 59 #else
+28 -44
arch/x86/mm/init_32.c
··· 46 46 #include <asm/pgalloc.h> 47 47 #include <asm/sections.h> 48 48 #include <asm/paravirt.h> 49 + #include <asm/setup.h> 49 50 50 51 unsigned int __VMALLOC_RESERVE = 128 << 20; 51 52 ··· 329 328 330 329 void __init native_pagetable_setup_start(pgd_t *base) 331 330 { 332 - #ifdef CONFIG_X86_PAE 333 - int i; 331 + unsigned long pfn, va; 332 + pgd_t *pgd; 333 + pud_t *pud; 334 + pmd_t *pmd; 335 + pte_t *pte; 334 336 335 337 /* 336 - * Init entries of the first-level page table to the 337 - * zero page, if they haven't already been set up. 338 - * 339 - * In a normal native boot, we'll be running on a 340 - * pagetable rooted in swapper_pg_dir, but not in PAE 341 - * mode, so this will end up clobbering the mappings 342 - * for the lower 24Mbytes of the address space, 343 - * without affecting the kernel address space. 338 + * Remove any mappings which extend past the end of physical 339 + * memory from the boot time page table: 344 340 */ 345 - for (i = 0; i < USER_PTRS_PER_PGD; i++) 346 - set_pgd(&base[i], 347 - __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); 341 + for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { 342 + va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); 343 + pgd = base + pgd_index(va); 344 + if (!pgd_present(*pgd)) 345 + break; 348 346 349 - /* Make sure kernel address space is empty so that a pagetable 350 - will be allocated for it. */ 351 - memset(&base[USER_PTRS_PER_PGD], 0, 352 - KERNEL_PGD_PTRS * sizeof(pgd_t)); 353 - #else 347 + pud = pud_offset(pgd, va); 348 + pmd = pmd_offset(pud, va); 349 + if (!pmd_present(*pmd)) 350 + break; 351 + 352 + pte = pte_offset_kernel(pmd, va); 353 + if (!pte_present(*pte)) 354 + break; 355 + 356 + pte_clear(NULL, va, pte); 357 + } 354 358 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); 355 - #endif 356 359 } 357 360 358 361 void __init native_pagetable_setup_done(pgd_t *base) 359 362 { 360 - #ifdef CONFIG_X86_PAE 361 - /* 362 - * Add low memory identity-mappings - SMP needs it when 363 - * starting up on an AP from real-mode. In the non-PAE 364 - * case we already have these mappings through head.S. 365 - * All user-space mappings are explicitly cleared after 366 - * SMP startup. 367 - */ 368 - set_pgd(&base[0], base[USER_PTRS_PER_PGD]); 369 - #endif 370 363 } 371 364 372 365 /* ··· 369 374 * the boot process. 370 375 * 371 376 * If we're booting on native hardware, this will be a pagetable 372 - * constructed in arch/i386/kernel/head.S, and not running in PAE mode 373 - * (even if we'll end up running in PAE). The root of the pagetable 374 - * will be swapper_pg_dir. 377 + * constructed in arch/x86/kernel/head_32.S. The root of the 378 + * pagetable will be swapper_pg_dir. 375 379 * 376 380 * If we're booting paravirtualized under a hypervisor, then there are 377 381 * more options: we may already be running PAE, and the pagetable may ··· 531 537 532 538 load_cr3(swapper_pg_dir); 533 539 534 - #ifdef CONFIG_X86_PAE 535 - /* 536 - * We will bail out later - printk doesn't work right now so 537 - * the user would just see a hanging kernel. 538 - */ 539 - if (cpu_has_pae) 540 - set_in_cr4(X86_CR4_PAE); 541 - #endif 542 540 __flush_tlb_all(); 543 541 544 542 kmap_init(); ··· 661 675 BUG_ON((unsigned long)high_memory > VMALLOC_START); 662 676 #endif /* double-sanity-check paranoia */ 663 677 664 - #ifdef CONFIG_X86_PAE 665 - if (!cpu_has_pae) 666 - panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); 667 - #endif 668 678 if (boot_cpu_data.wp_works_ok < 0) 669 679 test_wp_bit(); 680 + 681 + cpa_init(); 670 682 671 683 /* 672 684 * Subtle. SMP is doing it's boot stuff late (because it has to
+5 -3
arch/x86/mm/init_64.c
··· 528 528 reservedpages << (PAGE_SHIFT-10), 529 529 datasize >> 10, 530 530 initsize >> 10); 531 + 532 + cpa_init(); 531 533 } 532 534 533 535 void free_init_pages(char *what, unsigned long begin, unsigned long end) 534 536 { 535 - unsigned long addr; 537 + unsigned long addr = begin; 536 538 537 - if (begin >= end) 539 + if (addr >= end) 538 540 return; 539 541 540 542 /* ··· 551 549 #else 552 550 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 553 551 554 - for (addr = begin; addr < end; addr += PAGE_SIZE) { 552 + for (; addr < end; addr += PAGE_SIZE) { 555 553 ClearPageReserved(virt_to_page(addr)); 556 554 init_page_count(virt_to_page(addr)); 557 555 memset((void *)(addr & ~(PAGE_SIZE-1)),
+31 -24
arch/x86/mm/ioremap.c
··· 260 260 early_param("early_ioremap_debug", early_ioremap_debug_setup); 261 261 262 262 static __initdata int after_paging_init; 263 - static __initdata unsigned long bm_pte[1024] 263 + static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] 264 264 __attribute__((aligned(PAGE_SIZE))); 265 265 266 - static inline unsigned long * __init early_ioremap_pgd(unsigned long addr) 266 + static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 267 267 { 268 - return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023); 268 + pgd_t *pgd = &swapper_pg_dir[pgd_index(addr)]; 269 + pud_t *pud = pud_offset(pgd, addr); 270 + pmd_t *pmd = pmd_offset(pud, addr); 271 + 272 + return pmd; 269 273 } 270 274 271 - static inline unsigned long * __init early_ioremap_pte(unsigned long addr) 275 + static inline pte_t * __init early_ioremap_pte(unsigned long addr) 272 276 { 273 - return bm_pte + ((addr >> PAGE_SHIFT) & 1023); 277 + return &bm_pte[pte_index(addr)]; 274 278 } 275 279 276 280 void __init early_ioremap_init(void) 277 281 { 278 - unsigned long *pgd; 282 + pmd_t *pmd; 279 283 280 284 if (early_ioremap_debug) 281 285 printk(KERN_INFO "early_ioremap_init()\n"); 282 286 283 - pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); 284 - *pgd = __pa(bm_pte) | _PAGE_TABLE; 287 + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 285 288 memset(bm_pte, 0, sizeof(bm_pte)); 289 + pmd_populate_kernel(&init_mm, pmd, bm_pte); 290 + 286 291 /* 287 - * The boot-ioremap range spans multiple pgds, for which 292 + * The boot-ioremap range spans multiple pmds, for which 288 293 * we are not prepared: 289 294 */ 290 - if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { 295 + if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { 291 296 WARN_ON(1); 292 - printk(KERN_WARNING "pgd %p != %p\n", 293 - pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); 297 + printk(KERN_WARNING "pmd %p != %p\n", 298 + pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); 294 299 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 295 - fix_to_virt(FIX_BTMAP_BEGIN)); 300 + fix_to_virt(FIX_BTMAP_BEGIN)); 296 301 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 297 - fix_to_virt(FIX_BTMAP_END)); 302 + fix_to_virt(FIX_BTMAP_END)); 298 303 299 304 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 300 305 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", ··· 309 304 310 305 void __init early_ioremap_clear(void) 311 306 { 312 - unsigned long *pgd; 307 + pmd_t *pmd; 313 308 314 309 if (early_ioremap_debug) 315 310 printk(KERN_INFO "early_ioremap_clear()\n"); 316 311 317 - pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); 318 - *pgd = 0; 319 - paravirt_release_pt(__pa(pgd) >> PAGE_SHIFT); 312 + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 313 + pmd_clear(pmd); 314 + paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT); 320 315 __flush_tlb_all(); 321 316 } 322 317 323 318 void __init early_ioremap_reset(void) 324 319 { 325 320 enum fixed_addresses idx; 326 - unsigned long *pte, phys, addr; 321 + unsigned long addr, phys; 322 + pte_t *pte; 327 323 328 324 after_paging_init = 1; 329 325 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { 330 326 addr = fix_to_virt(idx); 331 327 pte = early_ioremap_pte(addr); 332 - if (*pte & _PAGE_PRESENT) { 333 - phys = *pte & PAGE_MASK; 328 + if (pte_present(*pte)) { 329 + phys = pte_val(*pte) & PAGE_MASK; 334 330 set_fixmap(idx, phys); 335 331 } 336 332 } ··· 340 334 static void __init __early_set_fixmap(enum fixed_addresses idx, 341 335 unsigned long phys, pgprot_t flags) 342 336 { 343 - unsigned long *pte, addr = __fix_to_virt(idx); 337 + unsigned long addr = __fix_to_virt(idx); 338 + pte_t *pte; 344 339 345 340 if (idx >= __end_of_fixed_addresses) { 346 341 BUG(); ··· 349 342 } 350 343 pte = early_ioremap_pte(addr); 351 344 if (pgprot_val(flags)) 352 - *pte = (phys & PAGE_MASK) | pgprot_val(flags); 345 + set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 353 346 else 354 - *pte = 0; 347 + pte_clear(NULL, addr, pte); 355 348 __flush_tlb_one(addr); 356 349 } 357 350
+126 -16
arch/x86/mm/pageattr.c
··· 8 8 #include <linux/sched.h> 9 9 #include <linux/slab.h> 10 10 #include <linux/mm.h> 11 + #include <linux/interrupt.h> 11 12 12 13 #include <asm/e820.h> 13 14 #include <asm/processor.h> ··· 192 191 * or when the present bit is not set. Otherwise we would return a 193 192 * pointer to a nonexisting mapping. 194 193 */ 195 - pte_t *lookup_address(unsigned long address, int *level) 194 + pte_t *lookup_address(unsigned long address, unsigned int *level) 196 195 { 197 196 pgd_t *pgd = pgd_offset_k(address); 198 197 pud_t *pud; ··· 253 252 try_preserve_large_page(pte_t *kpte, unsigned long address, 254 253 struct cpa_data *cpa) 255 254 { 256 - unsigned long nextpage_addr, numpages, pmask, psize, flags; 255 + unsigned long nextpage_addr, numpages, pmask, psize, flags, addr; 257 256 pte_t new_pte, old_pte, *tmp; 258 257 pgprot_t old_prot, new_prot; 259 - int level, do_split = 1; 258 + int i, do_split = 1; 259 + unsigned int level; 260 260 261 261 spin_lock_irqsave(&pgd_lock, flags); 262 262 /* ··· 304 302 new_prot = static_protections(new_prot, address); 305 303 306 304 /* 305 + * We need to check the full range, whether 306 + * static_protection() requires a different pgprot for one of 307 + * the pages in the range we try to preserve: 308 + */ 309 + addr = address + PAGE_SIZE; 310 + for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE) { 311 + pgprot_t chk_prot = static_protections(new_prot, addr); 312 + 313 + if (pgprot_val(chk_prot) != pgprot_val(new_prot)) 314 + goto out_unlock; 315 + } 316 + 317 + /* 307 318 * If there are no changes, return. maxpages has been updated 308 319 * above: 309 320 */ ··· 350 335 return do_split; 351 336 } 352 337 338 + static LIST_HEAD(page_pool); 339 + static unsigned long pool_size, pool_pages, pool_low; 340 + static unsigned long pool_used, pool_failed, pool_refill; 341 + 342 + static void cpa_fill_pool(void) 343 + { 344 + struct page *p; 345 + gfp_t gfp = GFP_KERNEL; 346 + 347 + /* Do not allocate from interrupt context */ 348 + if (in_irq() || irqs_disabled()) 349 + return; 350 + /* 351 + * Check unlocked. I does not matter when we have one more 352 + * page in the pool. The bit lock avoids recursive pool 353 + * allocations: 354 + */ 355 + if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill)) 356 + return; 357 + 358 + #ifdef CONFIG_DEBUG_PAGEALLOC 359 + /* 360 + * We could do: 361 + * gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; 362 + * but this fails on !PREEMPT kernels 363 + */ 364 + gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 365 + #endif 366 + 367 + while (pool_pages < pool_size) { 368 + p = alloc_pages(gfp, 0); 369 + if (!p) { 370 + pool_failed++; 371 + break; 372 + } 373 + spin_lock_irq(&pgd_lock); 374 + list_add(&p->lru, &page_pool); 375 + pool_pages++; 376 + spin_unlock_irq(&pgd_lock); 377 + } 378 + clear_bit_unlock(0, &pool_refill); 379 + } 380 + 381 + #define SHIFT_MB (20 - PAGE_SHIFT) 382 + #define ROUND_MB_GB ((1 << 10) - 1) 383 + #define SHIFT_MB_GB 10 384 + #define POOL_PAGES_PER_GB 16 385 + 386 + void __init cpa_init(void) 387 + { 388 + struct sysinfo si; 389 + unsigned long gb; 390 + 391 + si_meminfo(&si); 392 + /* 393 + * Calculate the number of pool pages: 394 + * 395 + * Convert totalram (nr of pages) to MiB and round to the next 396 + * GiB. Shift MiB to Gib and multiply the result by 397 + * POOL_PAGES_PER_GB: 398 + */ 399 + gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; 400 + pool_size = POOL_PAGES_PER_GB * gb; 401 + pool_low = pool_size; 402 + 403 + cpa_fill_pool(); 404 + printk(KERN_DEBUG 405 + "CPA: page pool initialized %lu of %lu pages preallocated\n", 406 + pool_pages, pool_size); 407 + } 408 + 353 409 static int split_large_page(pte_t *kpte, unsigned long address) 354 410 { 355 411 unsigned long flags, pfn, pfninc = 1; 356 - gfp_t gfp_flags = GFP_KERNEL; 357 412 unsigned int i, level; 358 413 pte_t *pbase, *tmp; 359 414 pgprot_t ref_prot; 360 415 struct page *base; 361 416 362 - #ifdef CONFIG_DEBUG_PAGEALLOC 363 - gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 364 - #endif 365 - base = alloc_pages(gfp_flags, 0); 366 - if (!base) 367 - return -ENOMEM; 368 - 417 + /* 418 + * Get a page from the pool. The pool list is protected by the 419 + * pgd_lock, which we have to take anyway for the split 420 + * operation: 421 + */ 369 422 spin_lock_irqsave(&pgd_lock, flags); 423 + if (list_empty(&page_pool)) { 424 + spin_unlock_irqrestore(&pgd_lock, flags); 425 + return -ENOMEM; 426 + } 427 + 428 + base = list_first_entry(&page_pool, struct page, lru); 429 + list_del(&base->lru); 430 + pool_pages--; 431 + 432 + if (pool_pages < pool_low) 433 + pool_low = pool_pages; 434 + 370 435 /* 371 436 * Check for races, another CPU might have split this page 372 437 * up for us already: ··· 491 396 base = NULL; 492 397 493 398 out_unlock: 399 + /* 400 + * If we dropped out via the lookup_address check under 401 + * pgd_lock then stick the page back into the pool: 402 + */ 403 + if (base) { 404 + list_add(&base->lru, &page_pool); 405 + pool_pages++; 406 + } else 407 + pool_used++; 494 408 spin_unlock_irqrestore(&pgd_lock, flags); 495 - 496 - if (base) 497 - __free_pages(base, 0); 498 409 499 410 return 0; 500 411 } 501 412 502 413 static int __change_page_attr(unsigned long address, struct cpa_data *cpa) 503 414 { 504 - int level, do_split, err; 415 + int do_split, err; 416 + unsigned int level; 505 417 struct page *kpte_page; 506 418 pte_t *kpte; 507 419 ··· 700 598 * Check whether we really changed something: 701 599 */ 702 600 if (!cpa.flushtlb) 703 - return ret; 601 + goto out; 704 602 705 603 /* 706 604 * No need to flush, when we did not set any of the caching ··· 719 617 else 720 618 cpa_flush_all(cache); 721 619 620 + out: 621 + cpa_fill_pool(); 722 622 return ret; 723 623 } 724 624 ··· 874 770 * but that can deadlock->flush only current cpu: 875 771 */ 876 772 __flush_tlb_all(); 773 + 774 + /* 775 + * Try to refill the page pool here. We can do this only after 776 + * the tlb flush. 777 + */ 778 + cpa_fill_pool(); 877 779 } 878 780 #endif 879 781
+2 -2
arch/x86/power/Makefile
··· 1 - obj-$(CONFIG_PM) += cpu.o 2 - obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o 1 + obj-$(CONFIG_PM_SLEEP) += cpu_$(BITS).o 2 + obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o
+1 -1
arch/x86/power/cpu.c arch/x86/power/cpu_32.c
··· 40 40 savesegment(ss, ctxt->ss); 41 41 42 42 /* 43 - * control registers 43 + * control registers 44 44 */ 45 45 ctxt->cr0 = read_cr0(); 46 46 ctxt->cr2 = read_cr2();
+169
arch/x86/power/hibernate_64.c
··· 1 + /* 2 + * Hibernation support for x86-64 3 + * 4 + * Distribute under GPLv2 5 + * 6 + * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> 7 + * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> 8 + * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 9 + */ 10 + 11 + #include <linux/smp.h> 12 + #include <linux/suspend.h> 13 + #include <asm/proto.h> 14 + #include <asm/page.h> 15 + #include <asm/pgtable.h> 16 + #include <asm/mtrr.h> 17 + 18 + /* References to section boundaries */ 19 + extern const void __nosave_begin, __nosave_end; 20 + 21 + /* Defined in hibernate_asm_64.S */ 22 + extern int restore_image(void); 23 + 24 + /* 25 + * Address to jump to in the last phase of restore in order to get to the image 26 + * kernel's text (this value is passed in the image header). 27 + */ 28 + unsigned long restore_jump_address; 29 + 30 + /* 31 + * Value of the cr3 register from before the hibernation (this value is passed 32 + * in the image header). 33 + */ 34 + unsigned long restore_cr3; 35 + 36 + pgd_t *temp_level4_pgt; 37 + 38 + void *relocated_restore_code; 39 + 40 + static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) 41 + { 42 + long i, j; 43 + 44 + i = pud_index(address); 45 + pud = pud + i; 46 + for (; i < PTRS_PER_PUD; pud++, i++) { 47 + unsigned long paddr; 48 + pmd_t *pmd; 49 + 50 + paddr = address + i*PUD_SIZE; 51 + if (paddr >= end) 52 + break; 53 + 54 + pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); 55 + if (!pmd) 56 + return -ENOMEM; 57 + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 58 + for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { 59 + unsigned long pe; 60 + 61 + if (paddr >= end) 62 + break; 63 + pe = __PAGE_KERNEL_LARGE_EXEC | paddr; 64 + pe &= __supported_pte_mask; 65 + set_pmd(pmd, __pmd(pe)); 66 + } 67 + } 68 + return 0; 69 + } 70 + 71 + static int set_up_temporary_mappings(void) 72 + { 73 + unsigned long start, end, next; 74 + int error; 75 + 76 + temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); 77 + if (!temp_level4_pgt) 78 + return -ENOMEM; 79 + 80 + /* It is safe to reuse the original kernel mapping */ 81 + set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 82 + init_level4_pgt[pgd_index(__START_KERNEL_map)]); 83 + 84 + /* Set up the direct mapping from scratch */ 85 + start = (unsigned long)pfn_to_kaddr(0); 86 + end = (unsigned long)pfn_to_kaddr(end_pfn); 87 + 88 + for (; start < end; start = next) { 89 + pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); 90 + if (!pud) 91 + return -ENOMEM; 92 + next = start + PGDIR_SIZE; 93 + if (next > end) 94 + next = end; 95 + if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) 96 + return error; 97 + set_pgd(temp_level4_pgt + pgd_index(start), 98 + mk_kernel_pgd(__pa(pud))); 99 + } 100 + return 0; 101 + } 102 + 103 + int swsusp_arch_resume(void) 104 + { 105 + int error; 106 + 107 + /* We have got enough memory and from now on we cannot recover */ 108 + if ((error = set_up_temporary_mappings())) 109 + return error; 110 + 111 + relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); 112 + if (!relocated_restore_code) 113 + return -ENOMEM; 114 + memcpy(relocated_restore_code, &core_restore_code, 115 + &restore_registers - &core_restore_code); 116 + 117 + restore_image(); 118 + return 0; 119 + } 120 + 121 + /* 122 + * pfn_is_nosave - check if given pfn is in the 'nosave' section 123 + */ 124 + 125 + int pfn_is_nosave(unsigned long pfn) 126 + { 127 + unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; 128 + unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; 129 + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); 130 + } 131 + 132 + struct restore_data_record { 133 + unsigned long jump_address; 134 + unsigned long cr3; 135 + unsigned long magic; 136 + }; 137 + 138 + #define RESTORE_MAGIC 0x0123456789ABCDEFUL 139 + 140 + /** 141 + * arch_hibernation_header_save - populate the architecture specific part 142 + * of a hibernation image header 143 + * @addr: address to save the data at 144 + */ 145 + int arch_hibernation_header_save(void *addr, unsigned int max_size) 146 + { 147 + struct restore_data_record *rdr = addr; 148 + 149 + if (max_size < sizeof(struct restore_data_record)) 150 + return -EOVERFLOW; 151 + rdr->jump_address = restore_jump_address; 152 + rdr->cr3 = restore_cr3; 153 + rdr->magic = RESTORE_MAGIC; 154 + return 0; 155 + } 156 + 157 + /** 158 + * arch_hibernation_header_restore - read the architecture specific data 159 + * from the hibernation image header 160 + * @addr: address to read the data from 161 + */ 162 + int arch_hibernation_header_restore(void *addr) 163 + { 164 + struct restore_data_record *rdr = addr; 165 + 166 + restore_jump_address = rdr->jump_address; 167 + restore_cr3 = rdr->cr3; 168 + return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; 169 + }
+3 -3
arch/x86/power/suspend.c arch/x86/power/hibernate_32.c
··· 1 1 /* 2 - * Suspend support specific for i386 - temporary page tables 2 + * Hibernation support specific for i386 - temporary page tables 3 3 * 4 4 * Distribute under GPLv2 5 5 * ··· 13 13 #include <asm/page.h> 14 14 #include <asm/pgtable.h> 15 15 16 - /* Defined in arch/i386/power/swsusp.S */ 16 + /* Defined in hibernate_asm_32.S */ 17 17 extern int restore_image(void); 18 18 19 19 /* References to section boundaries */ ··· 23 23 pgd_t *resume_pg_dir; 24 24 25 25 /* The following three functions are based on the analogous code in 26 - * arch/i386/mm/init.c 26 + * arch/x86/mm/init_32.c 27 27 */ 28 28 29 29 /*
+1 -2
arch/x86/power/swsusp.S arch/x86/power/hibernate_asm_32.S
··· 1 1 .text 2 2 3 - /* Originally gcc generated, modified by hand 4 - * 3 + /* 5 4 * This may not use any stack, nor any variable that is not "NoSave": 6 5 * 7 6 * Its rewriting one kernel image with another. What is stack in "old"
+3 -3
arch/x86/xen/mmu.c
··· 58 58 59 59 xmaddr_t arbitrary_virt_to_machine(unsigned long address) 60 60 { 61 - int level; 61 + unsigned int level; 62 62 pte_t *pte = lookup_address(address, &level); 63 63 unsigned offset = address & PAGE_MASK; 64 64 ··· 71 71 { 72 72 pte_t *pte, ptev; 73 73 unsigned long address = (unsigned long)vaddr; 74 - int level; 74 + unsigned int level; 75 75 76 76 pte = lookup_address(address, &level); 77 77 BUG_ON(pte == NULL); ··· 86 86 { 87 87 pte_t *pte, ptev; 88 88 unsigned long address = (unsigned long)vaddr; 89 - int level; 89 + unsigned int level; 90 90 91 91 pte = lookup_address(address, &level); 92 92 BUG_ON(pte == NULL);
+5 -5
arch/x86/xen/time.c
··· 217 217 /* Get the CPU speed from Xen */ 218 218 unsigned long xen_cpu_khz(void) 219 219 { 220 - u64 cpu_khz = 1000000ULL << 32; 220 + u64 xen_khz = 1000000ULL << 32; 221 221 const struct vcpu_time_info *info = 222 222 &HYPERVISOR_shared_info->vcpu_info[0].time; 223 223 224 - do_div(cpu_khz, info->tsc_to_system_mul); 224 + do_div(xen_khz, info->tsc_to_system_mul); 225 225 if (info->tsc_shift < 0) 226 - cpu_khz <<= -info->tsc_shift; 226 + xen_khz <<= -info->tsc_shift; 227 227 else 228 - cpu_khz >>= info->tsc_shift; 228 + xen_khz >>= info->tsc_shift; 229 229 230 - return cpu_khz; 230 + return xen_khz; 231 231 } 232 232 233 233 /*
+1 -6
drivers/acpi/bus.c
··· 31 31 #include <linux/pm.h> 32 32 #include <linux/device.h> 33 33 #include <linux/proc_fs.h> 34 + #include <linux/acpi.h> 34 35 #ifdef CONFIG_X86 35 36 #include <asm/mpspec.h> 36 37 #endif ··· 40 39 41 40 #define _COMPONENT ACPI_BUS_COMPONENT 42 41 ACPI_MODULE_NAME("bus"); 43 - #ifdef CONFIG_X86 44 - extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger); 45 - #endif 46 42 47 43 struct acpi_device *acpi_root; 48 44 struct proc_dir_entry *acpi_root_dir; ··· 651 653 652 654 #ifdef CONFIG_X86 653 655 if (!acpi_ioapic) { 654 - extern u8 acpi_sci_flags; 655 - 656 656 /* compatible (0) means level (3) */ 657 657 if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) { 658 658 acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK; ··· 660 664 acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt, 661 665 (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2); 662 666 } else { 663 - extern int acpi_sci_override_gsi; 664 667 /* 665 668 * now that acpi_gbl_FADT is initialized, 666 669 * update it with result from INT_SRC_OVR parsing
+2 -2
drivers/lguest/page_tables.c
··· 178 178 179 179 static void check_gpte(struct lg_cpu *cpu, pte_t gpte) 180 180 { 181 - if ((pte_flags(gpte) & (_PAGE_PWT|_PAGE_PSE)) 182 - || pte_pfn(gpte) >= cpu->lg->pfn_limit) 181 + if ((pte_flags(gpte) & _PAGE_PSE) || 182 + pte_pfn(gpte) >= cpu->lg->pfn_limit) 183 183 kill_guest(cpu, "bad page table entry"); 184 184 } 185 185
+4
include/asm-x86/acpi.h
··· 89 89 extern int acpi_skip_timer_override; 90 90 extern int acpi_use_timer_override; 91 91 92 + extern u8 acpi_sci_flags; 93 + extern int acpi_sci_override_gsi; 94 + void acpi_pic_sci_set_trigger(unsigned int, u16); 95 + 92 96 static inline void disable_acpi(void) 93 97 { 94 98 acpi_disabled = 1;
+2
include/asm-x86/cacheflush.h
··· 44 44 45 45 void clflush_cache_range(void *addr, unsigned int size); 46 46 47 + void cpa_init(void); 48 + 47 49 #ifdef CONFIG_DEBUG_RODATA 48 50 void mark_rodata_ro(void); 49 51 #endif
+7 -2
include/asm-x86/geode.h
··· 206 206 return inw(base + reg + (timer * 8)); 207 207 } 208 208 209 - extern int __init geode_mfgpt_detect(void); 210 209 extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable); 211 210 extern int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable); 212 - extern int geode_mfgpt_alloc_timer(int timer, int domain, struct module *owner); 211 + extern int geode_mfgpt_alloc_timer(int timer, int domain); 213 212 214 213 #define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1) 215 214 #define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0) 215 + 216 + #ifdef CONFIG_GEODE_MFGPT_TIMER 217 + extern int __init mfgpt_timer_setup(void); 218 + #else 219 + static inline int mfgpt_timer_setup(void) { return 0; } 220 + #endif 216 221 217 222 #endif
-1
include/asm-x86/page_32.h
··· 48 48 typedef unsigned long phys_addr_t; 49 49 50 50 typedef union { pteval_t pte, pte_low; } pte_t; 51 - typedef pte_t boot_pte_t; 52 51 53 52 #endif /* __ASSEMBLY__ */ 54 53 #endif /* CONFIG_X86_PAE */
+1 -1
include/asm-x86/pgtable.h
··· 255 255 * NOTE: the return type is pte_t but if the pmd is PSE then we return it 256 256 * as a pte too. 257 257 */ 258 - extern pte_t *lookup_address(unsigned long address, int *level); 258 + extern pte_t *lookup_address(unsigned long address, unsigned int *level); 259 259 260 260 /* local pte updates need not use xchg for locking */ 261 261 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
-4
include/asm-x86/pgtable_32.h
··· 52 52 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 53 53 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) 54 54 55 - #define TWOLEVEL_PGDIR_SHIFT 22 56 - #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) 57 - #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) 58 - 59 55 /* Just any arbitrary offset to the start of the vmalloc VM area: the 60 56 * current 8MB value just means that there will be a 8MB "hole" after the 61 57 * physical memory until the kernel virtual memory starts. That means that
-9
include/linux/compiler-gcc4.h
··· 5 5 /* These definitions are for GCC v4.x. */ 6 6 #include <linux/compiler-gcc.h> 7 7 8 - #ifdef CONFIG_FORCED_INLINING 9 - # undef inline 10 - # undef __inline__ 11 - # undef __inline 12 - # define inline inline __attribute__((always_inline)) 13 - # define __inline__ __inline__ __attribute__((always_inline)) 14 - # define __inline __inline __attribute__((always_inline)) 15 - #endif 16 - 17 8 #define __used __attribute__((__used__)) 18 9 #define __must_check __attribute__((warn_unused_result)) 19 10 #define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+1 -1
init/Kconfig
··· 587 587 disabled, and can be overriden runtime by setting 588 588 /proc/sys/kernel/randomize_va_space to 2. 589 589 590 - On non-ancient distros (post-2000 ones) Y is usually a safe choice. 590 + On non-ancient distros (post-2000 ones) N is usually a safe choice. 591 591 592 592 config BASE_FULL 593 593 default y
+1 -1
init/main.c
··· 558 558 preempt_disable(); 559 559 build_all_zonelists(); 560 560 page_alloc_init(); 561 - enable_debug_pagealloc(); 562 561 printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); 563 562 parse_early_param(); 564 563 parse_args("Booting kernel", static_command_line, __start___param, ··· 613 614 vfs_caches_init_early(); 614 615 cpuset_init_early(); 615 616 mem_init(); 617 + enable_debug_pagealloc(); 616 618 cpu_hotplug_init(); 617 619 kmem_cache_init(); 618 620 setup_per_cpu_pageset();
-14
lib/Kconfig.debug
··· 465 465 some architectures or if you use external debuggers. 466 466 If you don't debug the kernel, you can say N. 467 467 468 - config FORCED_INLINING 469 - bool "Force gcc to inline functions marked 'inline'" 470 - depends on DEBUG_KERNEL 471 - default y 472 - help 473 - This option determines if the kernel forces gcc to inline the functions 474 - developers have marked 'inline'. Doing so takes away freedom from gcc to 475 - do what it thinks is best, which is desirable for the gcc 3.x series of 476 - compilers. The gcc 4.x series have a rewritten inlining algorithm and 477 - disabling this option will generate a smaller kernel there. Hopefully 478 - this algorithm is so good that allowing gcc4 to make the decision can 479 - become the default in the future, until then this option is there to 480 - test gcc for this. 481 - 482 468 config BOOT_PRINTK_DELAY 483 469 bool "Delay each boot printk message by N milliseconds" 484 470 depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY
+27 -22
lib/vsprintf.c
··· 26 26 #include <asm/page.h> /* for PAGE_SIZE */ 27 27 #include <asm/div64.h> 28 28 29 + /* Works only for digits and letters, but small and fast */ 30 + #define TOLOWER(x) ((x) | 0x20) 31 + 29 32 /** 30 33 * simple_strtoul - convert a string to an unsigned long 31 34 * @cp: The start of the string ··· 44 41 if (*cp == '0') { 45 42 base = 8; 46 43 cp++; 47 - if ((toupper(*cp) == 'X') && isxdigit(cp[1])) { 44 + if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) { 48 45 cp++; 49 46 base = 16; 50 47 } 51 48 } 52 49 } else if (base == 16) { 53 - if (cp[0] == '0' && toupper(cp[1]) == 'X') 50 + if (cp[0] == '0' && TOLOWER(cp[1]) == 'x') 54 51 cp += 2; 55 52 } 56 53 while (isxdigit(*cp) && 57 - (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { 54 + (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) { 58 55 result = result*base + value; 59 56 cp++; 60 57 } ··· 95 92 if (*cp == '0') { 96 93 base = 8; 97 94 cp++; 98 - if ((toupper(*cp) == 'X') && isxdigit(cp[1])) { 95 + if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) { 99 96 cp++; 100 97 base = 16; 101 98 } 102 99 } 103 100 } else if (base == 16) { 104 - if (cp[0] == '0' && toupper(cp[1]) == 'X') 101 + if (cp[0] == '0' && TOLOWER(cp[1]) == 'x') 105 102 cp += 2; 106 103 } 107 - while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp) 108 - ? toupper(*cp) : *cp)-'A'+10) < base) { 104 + while (isxdigit(*cp) 105 + && (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) { 109 106 result = result*base + value; 110 107 cp++; 111 108 } ··· 363 360 #define PLUS 4 /* show plus */ 364 361 #define SPACE 8 /* space if plus */ 365 362 #define LEFT 16 /* left justified */ 366 - #define SPECIAL 32 /* 0x */ 367 - #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ 363 + #define SMALL 32 /* Must be 32 == 0x20 */ 364 + #define SPECIAL 64 /* 0x */ 368 365 369 366 static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type) 370 367 { 371 - char sign,tmp[66]; 372 - const char *digits; 373 - /* we are called with base 8, 10 or 16, only, thus don't need "g..." */ 374 - static const char small_digits[] = "0123456789abcdefx"; /* "ghijklmnopqrstuvwxyz"; */ 375 - static const char large_digits[] = "0123456789ABCDEFX"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 368 + /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 369 + static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 370 + 371 + char tmp[66]; 372 + char sign; 373 + char locase; 376 374 int need_pfx = ((type & SPECIAL) && base != 10); 377 375 int i; 378 376 379 - digits = (type & LARGE) ? large_digits : small_digits; 377 + /* locase = 0 or 0x20. ORing digits or letters with 'locase' 378 + * produces same digits or (maybe lowercased) letters */ 379 + locase = (type & SMALL); 380 380 if (type & LEFT) 381 381 type &= ~ZEROPAD; 382 - if (base < 2 || base > 36) 383 - return NULL; 384 382 sign = 0; 385 383 if (type & SIGN) { 386 384 if ((signed long long) num < 0) { ··· 408 404 tmp[i++] = '0'; 409 405 /* Generic code, for any base: 410 406 else do { 411 - tmp[i++] = digits[do_div(num,base)]; 407 + tmp[i++] = (digits[do_div(num,base)] | locase); 412 408 } while (num != 0); 413 409 */ 414 410 else if (base != 10) { /* 8 or 16 */ ··· 416 412 int shift = 3; 417 413 if (base == 16) shift = 4; 418 414 do { 419 - tmp[i++] = digits[((unsigned char)num) & mask]; 415 + tmp[i++] = (digits[((unsigned char)num) & mask] | locase); 420 416 num >>= shift; 421 417 } while (num); 422 418 } else { /* base 10 */ ··· 448 444 ++buf; 449 445 if (base == 16) { 450 446 if (buf < end) 451 - *buf = digits[16]; /* for arbitrary base: digits[33]; */ 447 + *buf = ('X' | locase); 452 448 ++buf; 453 449 } 454 450 } ··· 648 644 continue; 649 645 650 646 case 'p': 647 + flags |= SMALL; 651 648 if (field_width == -1) { 652 649 field_width = 2*sizeof(void *); 653 650 flags |= ZEROPAD; ··· 685 680 base = 8; 686 681 break; 687 682 688 - case 'X': 689 - flags |= LARGE; 690 683 case 'x': 684 + flags |= SMALL; 685 + case 'X': 691 686 base = 16; 692 687 break; 693 688