x86, suspend: Avoid unnecessary smp alternatives switch during suspend/resume

During suspend, we disable all the non boot cpus. And during resume we bring
them all back again. So no need to do alternatives_smp_switch() in between.

On my core 2 based laptop, this speeds up the suspend path by 15msec and the
resume path by 5 msec (suspend/resume speed up differences can be attributed
to the different P-states that the cpu is in during suspend/resume).

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <1290557500.4946.8.camel@sbsiddha-MOBL3.sc.intel.com>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>

authored by Suresh Siddha and committed by H. Peter Anvin 3fb82d56 cf7d7e5a

+28 -1
+1
arch/x86/include/asm/alternative.h
··· 66 66 extern void alternatives_smp_module_del(struct module *mod); 67 67 extern void alternatives_smp_switch(int smp); 68 68 extern int alternatives_text_reserved(void *start, void *end); 69 + extern bool skip_smp_alternatives; 69 70 #else 70 71 static inline void alternatives_smp_module_add(struct module *mod, char *name, 71 72 void *locks, void *locks_end,
+2 -1
arch/x86/kernel/alternative.c
··· 353 353 mutex_unlock(&smp_alt); 354 354 } 355 355 356 + bool skip_smp_alternatives; 356 357 void alternatives_smp_switch(int smp) 357 358 { 358 359 struct smp_alt_module *mod; ··· 369 368 printk("lockdep: fixing up alternatives.\n"); 370 369 #endif 371 370 372 - if (noreplace_smp || smp_alt_once) 371 + if (noreplace_smp || smp_alt_once || skip_smp_alternatives) 373 372 return; 374 373 BUG_ON(!smp && (num_online_cpus() > 1)); 375 374
+14
arch/x86/kernel/smpboot.c
··· 1166 1166 preempt_enable(); 1167 1167 } 1168 1168 1169 + void arch_disable_nonboot_cpus_begin(void) 1170 + { 1171 + /* 1172 + * Avoid the smp alternatives switch during the disable_nonboot_cpus(). 1173 + * In the suspend path, we will be back in the SMP mode shortly anyways. 1174 + */ 1175 + skip_smp_alternatives = true; 1176 + } 1177 + 1178 + void arch_disable_nonboot_cpus_end(void) 1179 + { 1180 + skip_smp_alternatives = false; 1181 + } 1182 + 1169 1183 void arch_enable_nonboot_cpus_begin(void) 1170 1184 { 1171 1185 set_mtrr_aps_delayed_init();
+11
kernel/cpu.c
··· 386 386 #ifdef CONFIG_PM_SLEEP_SMP 387 387 static cpumask_var_t frozen_cpus; 388 388 389 + void __weak arch_disable_nonboot_cpus_begin(void) 390 + { 391 + } 392 + 393 + void __weak arch_disable_nonboot_cpus_end(void) 394 + { 395 + } 396 + 389 397 int disable_nonboot_cpus(void) 390 398 { 391 399 int cpu, first_cpu, error = 0; ··· 405 397 * with the userspace trying to use the CPU hotplug at the same time 406 398 */ 407 399 cpumask_clear(frozen_cpus); 400 + arch_disable_nonboot_cpus_begin(); 408 401 409 402 printk("Disabling non-boot CPUs ...\n"); 410 403 for_each_online_cpu(cpu) { ··· 420 411 break; 421 412 } 422 413 } 414 + 415 + arch_disable_nonboot_cpus_end(); 423 416 424 417 if (!error) { 425 418 BUG_ON(num_online_cpus() > 1);