Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: use the right protections for split-up pagetables
x86, vmi: TSC going backwards check in vmi clocksource

+9 -11
+4 -1
arch/x86/kernel/vmiclock_32.c
··· 283 283 #endif 284 284 285 285 /** vmi clocksource */ 286 + static struct clocksource clocksource_vmi; 286 287 287 288 static cycle_t read_real_cycles(void) 288 289 { 289 - return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 290 + cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 291 + return ret >= clocksource_vmi.cycle_last ? 292 + ret : clocksource_vmi.cycle_last; 290 293 } 291 294 292 295 static struct clocksource clocksource_vmi = {
+5 -10
arch/x86/mm/pageattr.c
··· 508 508 #endif 509 509 510 510 /* 511 - * Install the new, split up pagetable. Important details here: 511 + * Install the new, split up pagetable. 512 512 * 513 - * On Intel the NX bit of all levels must be cleared to make a 514 - * page executable. See section 4.13.2 of Intel 64 and IA-32 515 - * Architectures Software Developer's Manual). 516 - * 517 - * Mark the entry present. The current mapping might be 518 - * set to not present, which we preserved above. 513 + * We use the standard kernel pagetable protections for the new 514 + * pagetable protections, the actual ptes set above control the 515 + * primary protection behavior: 519 516 */ 520 - ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); 521 - pgprot_val(ref_prot) |= _PAGE_PRESENT; 522 - __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); 517 + __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); 523 518 base = NULL; 524 519 525 520 out_unlock: