Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"Two fixes:

- A PCID related revert that fixes power management and performance
regressions.

- The module loader robustization and sanity check commit is rather
fresh, but it looked like a good idea to apply because of the
hidden data corruption problem such invalid modules could cause"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/module: Detect and skip invalid relocations
Revert "x86/mm: Stop calling leave_mm() in idle code"

Changed files
+38 -7
arch
ia64
include
asm
x86
include
asm
kernel
mm
drivers
+2
arch/ia64/include/asm/acpi.h
··· 112 112 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; 113 113 } 114 114 115 + #define acpi_unlazy_tlb(x) 116 + 115 117 #ifdef CONFIG_ACPI_NUMA 116 118 extern cpumask_t early_cpu_possible_map; 117 119 #define for_each_possible_early_cpu(cpu) \
+2
arch/x86/include/asm/acpi.h
··· 150 150 extern int x86_acpi_numa_init(void); 151 151 #endif /* CONFIG_ACPI_NUMA */ 152 152 153 + #define acpi_unlazy_tlb(x) leave_mm(x) 154 + 153 155 #ifdef CONFIG_ACPI_APEI 154 156 static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) 155 157 {
+13
arch/x86/kernel/module.c
··· 172 172 case R_X86_64_NONE: 173 173 break; 174 174 case R_X86_64_64: 175 + if (*(u64 *)loc != 0) 176 + goto invalid_relocation; 175 177 *(u64 *)loc = val; 176 178 break; 177 179 case R_X86_64_32: 180 + if (*(u32 *)loc != 0) 181 + goto invalid_relocation; 178 182 *(u32 *)loc = val; 179 183 if (val != *(u32 *)loc) 180 184 goto overflow; 181 185 break; 182 186 case R_X86_64_32S: 187 + if (*(s32 *)loc != 0) 188 + goto invalid_relocation; 183 189 *(s32 *)loc = val; 184 190 if ((s64)val != *(s32 *)loc) 185 191 goto overflow; 186 192 break; 187 193 case R_X86_64_PC32: 194 + if (*(u32 *)loc != 0) 195 + goto invalid_relocation; 188 196 val -= (u64)loc; 189 197 *(u32 *)loc = val; 190 198 #if 0 ··· 207 199 } 208 200 } 209 201 return 0; 202 + 203 + invalid_relocation: 204 + pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", 205 + (int)ELF64_R_TYPE(rel[i].r_info), loc, val); 206 + return -ENOEXEC; 210 207 211 208 overflow: 212 209 pr_err("overflow in relocation type %d val %Lx\n",
+14 -3
arch/x86/mm/tlb.c
··· 85 85 86 86 switch_mm(NULL, &init_mm, NULL); 87 87 } 88 + EXPORT_SYMBOL_GPL(leave_mm); 88 89 89 90 void switch_mm(struct mm_struct *prev, struct mm_struct *next, 90 91 struct task_struct *tsk) ··· 196 195 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); 197 196 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); 198 197 write_cr3(build_cr3(next, new_asid)); 199 - trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 200 - TLB_FLUSH_ALL); 198 + 199 + /* 200 + * NB: This gets called via leave_mm() in the idle path 201 + * where RCU functions differently. Tracing normally 202 + * uses RCU, so we need to use the _rcuidle variant. 203 + * 204 + * (There is no good reason for this. The idle code should 205 + * be rearranged to call this before rcu_idle_enter().) 206 + */ 207 + trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 201 208 } else { 202 209 /* The new ASID is already up to date. */ 203 210 write_cr3(build_cr3_noflush(next, new_asid)); 204 - trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); 211 + 212 + /* See above wrt _rcuidle. */ 213 + trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); 205 214 } 206 215 207 216 this_cpu_write(cpu_tlbstate.loaded_mm, next);
+2
drivers/acpi/processor_idle.c
··· 710 710 static void acpi_idle_enter_bm(struct acpi_processor *pr, 711 711 struct acpi_processor_cx *cx, bool timer_bc) 712 712 { 713 + acpi_unlazy_tlb(smp_processor_id()); 714 + 713 715 /* 714 716 * Must be done before busmaster disable as we might need to 715 717 * access HPET !
+5 -4
drivers/idle/intel_idle.c
··· 913 913 struct cpuidle_state *state = &drv->states[index]; 914 914 unsigned long eax = flg2MWAIT(state->flags); 915 915 unsigned int cstate; 916 + int cpu = smp_processor_id(); 916 917 917 918 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 918 919 919 920 /* 920 - * NB: if CPUIDLE_FLAG_TLB_FLUSHED is set, this idle transition 921 - * will probably flush the TLB. It's not guaranteed to flush 922 - * the TLB, though, so it's not clear that we can do anything 923 - * useful with this knowledge. 921 + * leave_mm() to avoid costly and often unnecessary wakeups 922 + * for flushing the user TLB's associated with the active mm. 924 923 */ 924 + if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) 925 + leave_mm(cpu); 925 926 926 927 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 927 928 tick_broadcast_enter();