Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"A couple of fixes and updates related to x86:

- Fix the W+X check regression on XEN

- The real fix for the low identity map trainwreck

- Probe legacy PIC early instead of unconditionally allocating legacy
irqs

- Add cpu verification to long mode entry

- Adjust the cache topology to AMD Fam17H systems

- Let Merrifield use the TSC across S3"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/cpu: Call verify_cpu() after having entered long mode too
x86/setup: Fix low identity map for >= 2GB kernel range
x86/mm: Skip the hypervisor range when walking PGD
x86/AMD: Fix last level cache topology for AMD Fam17h systems
x86/irq: Probe for PIC presence before allocating descs for legacy IRQs
x86/cpu/intel: Enable X86_FEATURE_NONSTOP_TSC_S3 for Merrifield

+73 -16
+1
arch/x86/include/asm/i8259.h
··· 60 60 void (*mask_all)(void); 61 61 void (*restore_mask)(void); 62 62 void (*init)(int auto_eoi); 63 + int (*probe)(void); 63 64 int (*irq_pending)(unsigned int irq); 64 65 void (*make_irq)(unsigned int irq); 65 66 };
+5 -1
arch/x86/kernel/apic/vector.c
··· 361 361 if (nr < nr_irqs) 362 362 nr_irqs = nr; 363 363 364 - return nr_legacy_irqs(); 364 + /* 365 + * We don't know if PIC is present at this point so we need to do 366 + * probe() to get the right number of legacy IRQs. 367 + */ 368 + return legacy_pic->probe(); 365 369 } 366 370 367 371 #ifdef CONFIG_X86_IO_APIC
+13
arch/x86/kernel/cpu/amd.c
··· 352 352 #ifdef CONFIG_SMP 353 353 unsigned bits; 354 354 int cpu = smp_processor_id(); 355 + unsigned int socket_id, core_complex_id; 355 356 356 357 bits = c->x86_coreid_bits; 357 358 /* Low order bits define the core id (index of core in socket) */ ··· 362 361 /* use socket ID also for last level cache */ 363 362 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 364 363 amd_get_topology(c); 364 + 365 + /* 366 + * Fix percpu cpu_llc_id here as LLC topology is different 367 + * for Fam17h systems. 368 + */ 369 + if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) 370 + return; 371 + 372 + socket_id = (c->apicid >> bits) - 1; 373 + core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3; 374 + 375 + per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id; 365 376 #endif 366 377 } 367 378
+1
arch/x86/kernel/cpu/intel.c
··· 97 97 switch (c->x86_model) { 98 98 case 0x27: /* Penwell */ 99 99 case 0x35: /* Cloverview */ 100 + case 0x4a: /* Merrifield */ 100 101 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); 101 102 break; 102 103 default:
+8
arch/x86/kernel/head_64.S
··· 65 65 * tables and then reload them. 66 66 */ 67 67 68 + /* Sanitize CPU configuration */ 69 + call verify_cpu 70 + 68 71 /* 69 72 * Compute the delta between the address I am compiled to run at and the 70 73 * address I am actually running at. ··· 176 173 * to have any identity mapped pages in the kernel page table 177 174 * after the boot processor executes this code. 178 175 */ 176 + 177 + /* Sanitize CPU configuration */ 178 + call verify_cpu 179 179 180 180 movq $(init_level4_pgt - __START_KERNEL_map), %rax 181 181 1: ··· 293 287 pushq $__KERNEL_CS # set correct cs 294 288 pushq %rax # target address in negative space 295 289 lretq 290 + 291 + #include "verify_cpu.S" 296 292 297 293 #ifdef CONFIG_HOTPLUG_CPU 298 294 /*
+21 -8
arch/x86/kernel/i8259.c
··· 295 295 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 296 296 } 297 297 298 - static void init_8259A(int auto_eoi) 298 + static int probe_8259A(void) 299 299 { 300 300 unsigned long flags; 301 301 unsigned char probe_val = ~(1 << PIC_CASCADE_IR); 302 302 unsigned char new_val; 303 - 304 - i8259A_auto_eoi = auto_eoi; 305 - 306 - raw_spin_lock_irqsave(&i8259A_lock, flags); 307 - 308 303 /* 309 304 * Check to see if we have a PIC. 310 305 * Mask all except the cascade and read ··· 307 312 * have a PIC, we will read 0xff as opposed to the 308 313 * value we wrote. 309 314 */ 315 + raw_spin_lock_irqsave(&i8259A_lock, flags); 316 + 310 317 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 311 318 outb(probe_val, PIC_MASTER_IMR); 312 319 new_val = inb(PIC_MASTER_IMR); 313 320 if (new_val != probe_val) { 314 321 printk(KERN_INFO "Using NULL legacy PIC\n"); 315 322 legacy_pic = &null_legacy_pic; 316 - raw_spin_unlock_irqrestore(&i8259A_lock, flags); 317 - return; 318 323 } 324 + 325 + raw_spin_unlock_irqrestore(&i8259A_lock, flags); 326 + return nr_legacy_irqs(); 327 + } 328 + 329 + static void init_8259A(int auto_eoi) 330 + { 331 + unsigned long flags; 332 + 333 + i8259A_auto_eoi = auto_eoi; 334 + 335 + raw_spin_lock_irqsave(&i8259A_lock, flags); 319 336 320 337 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 321 338 ··· 386 379 { 387 380 return 0; 388 381 } 382 + static int legacy_pic_probe(void) 383 + { 384 + return 0; 385 + } 389 386 390 387 struct legacy_pic null_legacy_pic = { 391 388 .nr_legacy_irqs = 0, ··· 399 388 .mask_all = legacy_pic_noop, 400 389 .restore_mask = legacy_pic_noop, 401 390 .init = legacy_pic_int_noop, 391 + .probe = legacy_pic_probe, 402 392 .irq_pending = legacy_pic_irq_pending_noop, 403 393 .make_irq = legacy_pic_uint_noop, 404 394 }; ··· 412 400 .mask_all = mask_8259A, 413 401 .restore_mask = unmask_8259A, 414 402 .init = init_8259A, 403 + .probe = probe_8259A, 415 404 .irq_pending = i8259A_irq_pending, 416 405 .make_irq = make_8259A_irq, 417 406 };
+1 -1
arch/x86/kernel/setup.c
··· 1188 1188 */ 1189 1189 clone_pgd_range(initial_page_table, 1190 1190 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 1191 - KERNEL_PGD_PTRS); 1191 + min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); 1192 1192 #endif 1193 1193 1194 1194 tboot_probe();
+7 -5
arch/x86/kernel/verify_cpu.S
··· 34 34 #include <asm/msr-index.h> 35 35 36 36 verify_cpu: 37 - pushfl # Save caller passed flags 38 - pushl $0 # Kill any dangerous flags 39 - popfl 37 + pushf # Save caller passed flags 38 + push $0 # Kill any dangerous flags 39 + popf 40 40 41 + #ifndef __x86_64__ 41 42 pushfl # standard way to check for cpuid 42 43 popl %eax 43 44 movl %eax,%ebx ··· 49 48 popl %eax 50 49 cmpl %eax,%ebx 51 50 jz verify_cpu_no_longmode # cpu has no cpuid 51 + #endif 52 52 53 53 movl $0x0,%eax # See if cpuid 1 is implemented 54 54 cpuid ··· 132 130 jmp verify_cpu_sse_test # try again 133 131 134 132 verify_cpu_no_longmode: 135 - popfl # Restore caller passed flags 133 + popf # Restore caller passed flags 136 134 movl $1,%eax 137 135 ret 138 136 verify_cpu_sse_ok: 139 - popfl # Restore caller passed flags 137 + popf # Restore caller passed flags 140 138 xorl %eax, %eax 141 139 ret
+16 -1
arch/x86/mm/dump_pagetables.c
··· 358 358 #define pgd_none(a) pud_none(__pud(pgd_val(a))) 359 359 #endif 360 360 361 + #ifdef CONFIG_X86_64 362 + static inline bool is_hypervisor_range(int idx) 363 + { 364 + /* 365 + * ffff800000000000 - ffff87ffffffffff is reserved for 366 + * the hypervisor. 367 + */ 368 + return paravirt_enabled() && 369 + (idx >= pgd_index(__PAGE_OFFSET) - 16) && 370 + (idx < pgd_index(__PAGE_OFFSET)); 371 + } 372 + #else 373 + static inline bool is_hypervisor_range(int idx) { return false; } 374 + #endif 375 + 361 376 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, 362 377 bool checkwx) 363 378 { ··· 396 381 397 382 for (i = 0; i < PTRS_PER_PGD; i++) { 398 383 st.current_address = normalize_addr(i * PGD_LEVEL_MULT); 399 - if (!pgd_none(*start)) { 384 + if (!pgd_none(*start) && !is_hypervisor_range(i)) { 400 385 if (pgd_large(*start) || !pgd_present(*start)) { 401 386 prot = pgd_flags(*start); 402 387 note_page(m, &st, __pgprot(prot), 1);