Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] i386: Handle missing local APIC timer interrupts on C3 state

Whenever we see that a CPU is capable of C3 (during ACPI cstate init), we
disable local APIC timer and switch to using a broadcast from external timer
interrupt (IRQ 0). This is needed because Intel CPUs stop the local
APIC timer in C3. This is currently only enabled for Intel CPUs.

Patch below adds the code for i386 and also the ACPI hunk.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Venkatesh Pallipadi and committed by
Linus Torvalds
6eb0a0fd 5a07a30c

+102 -2
+76 -2
arch/i386/kernel/apic.c
··· 26 26 #include <linux/kernel_stat.h> 27 27 #include <linux/sysdev.h> 28 28 #include <linux/cpu.h> 29 + #include <linux/module.h> 29 30 30 31 #include <asm/atomic.h> 31 32 #include <asm/smp.h> ··· 38 37 #include <asm/i8253.h> 39 38 40 39 #include <mach_apic.h> 40 + #include <mach_ipi.h> 41 41 42 42 #include "io_ports.h" 43 + 44 + /* 45 + * cpu_mask that denotes the CPUs that needs timer interrupt coming in as 46 + * IPIs in place of local APIC timers 47 + */ 48 + static cpumask_t timer_bcast_ipi; 43 49 44 50 /* 45 51 * Knob to control our willingness to enable the local APIC. ··· 939 931 static void __setup_APIC_LVTT(unsigned int clocks) 940 932 { 941 933 unsigned int lvtt_value, tmp_value, ver; 934 + int cpu = smp_processor_id(); 942 935 943 936 ver = GET_APIC_VERSION(apic_read(APIC_LVR)); 944 937 lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; 945 938 if (!APIC_INTEGRATED(ver)) 946 939 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); 940 + 941 + if (cpu_isset(cpu, timer_bcast_ipi)) 942 + lvtt_value |= APIC_LVT_MASKED; 943 + 947 944 apic_write_around(APIC_LVTT, lvtt_value); 948 945 949 946 /* ··· 1081 1068 setup_APIC_timer(calibration_result); 1082 1069 } 1083 1070 1084 - void __devinit disable_APIC_timer(void) 1071 + void disable_APIC_timer(void) 1085 1072 { 1086 1073 if (using_apic_timer) { 1087 1074 unsigned long v; ··· 1093 1080 1094 1081 void enable_APIC_timer(void) 1095 1082 { 1096 - if (using_apic_timer) { 1083 + int cpu = smp_processor_id(); 1084 + 1085 + if (using_apic_timer && 1086 + !cpu_isset(cpu, timer_bcast_ipi)) { 1097 1087 unsigned long v; 1098 1088 1099 1089 v = apic_read(APIC_LVTT); 1100 1090 apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED); 1101 1091 } 1102 1092 } 1093 + 1094 + void switch_APIC_timer_to_ipi(void *cpumask) 1095 + { 1096 + cpumask_t mask = *(cpumask_t *)cpumask; 1097 + int cpu = smp_processor_id(); 1098 + 1099 + if (cpu_isset(cpu, mask) && 1100 + !cpu_isset(cpu, timer_bcast_ipi)) { 1101 + disable_APIC_timer(); 1102 + cpu_set(cpu, timer_bcast_ipi); 1103 + } 1104 + } 1105 + EXPORT_SYMBOL(switch_APIC_timer_to_ipi); 1106 + 1107 + void switch_ipi_to_APIC_timer(void *cpumask) 1108 + { 1109 + cpumask_t mask = *(cpumask_t *)cpumask; 1110 + int cpu = smp_processor_id(); 1111 + 1112 + if (cpu_isset(cpu, mask) && 1113 + cpu_isset(cpu, timer_bcast_ipi)) { 1114 + cpu_clear(cpu, timer_bcast_ipi); 1115 + enable_APIC_timer(); 1116 + } 1117 + } 1118 + EXPORT_SYMBOL(switch_ipi_to_APIC_timer); 1103 1119 1104 1120 #undef APIC_DIVISOR 1105 1121 ··· 1192 1150 irq_enter(); 1193 1151 smp_local_timer_interrupt(regs); 1194 1152 irq_exit(); 1153 + } 1154 + 1155 + #ifndef CONFIG_SMP 1156 + static void up_apic_timer_interrupt_call(struct pt_regs *regs) 1157 + { 1158 + int cpu = smp_processor_id(); 1159 + 1160 + /* 1161 + * the NMI deadlock-detector uses this. 1162 + */ 1163 + per_cpu(irq_stat, cpu).apic_timer_irqs++; 1164 + 1165 + smp_local_timer_interrupt(regs); 1166 + } 1167 + #endif 1168 + 1169 + void smp_send_timer_broadcast_ipi(struct pt_regs *regs) 1170 + { 1171 + cpumask_t mask; 1172 + 1173 + cpus_and(mask, cpu_online_map, timer_bcast_ipi); 1174 + if (!cpus_empty(mask)) { 1175 + #ifdef CONFIG_SMP 1176 + send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 1177 + #else 1178 + /* 1179 + * We can directly call the apic timer interrupt handler 1180 + * in UP case. Minus all irq related functions 1181 + */ 1182 + up_apic_timer_interrupt_call(regs); 1183 + #endif 1184 + } 1195 1185 } 1196 1186 1197 1187 int setup_profiling_timer(unsigned int multiplier)
+6
arch/i386/kernel/time.c
··· 302 302 do_timer_interrupt(irq, regs); 303 303 304 304 write_sequnlock(&xtime_lock); 305 + 306 + #ifdef CONFIG_X86_LOCAL_APIC 307 + if (using_apic_timer) 308 + smp_send_timer_broadcast_ipi(regs); 309 + #endif 310 + 305 311 return IRQ_HANDLED; 306 312 } 307 313
+15
drivers/acpi/processor_idle.c
··· 843 843 unsigned int i; 844 844 unsigned int working = 0; 845 845 846 + #ifdef ARCH_APICTIMER_STOPS_ON_C3 847 + struct cpuinfo_x86 *c = cpu_data + pr->id; 848 + cpumask_t mask = cpumask_of_cpu(pr->id); 849 + 850 + if (c->x86_vendor == X86_VENDOR_INTEL) { 851 + on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); 852 + } 853 + #endif 854 + 846 855 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 847 856 struct acpi_processor_cx *cx = &pr->power.states[i]; 848 857 ··· 866 857 867 858 case ACPI_STATE_C3: 868 859 acpi_processor_power_verify_c3(pr, cx); 860 + #ifdef ARCH_APICTIMER_STOPS_ON_C3 861 + if (c->x86_vendor == X86_VENDOR_INTEL) { 862 + on_each_cpu(switch_APIC_timer_to_ipi, 863 + &mask, 1, 1); 864 + } 865 + #endif 869 866 break; 870 867 } 871 868
+5
include/asm-i386/apic.h
··· 132 132 133 133 extern int disable_timer_pin_1; 134 134 135 + void smp_send_timer_broadcast_ipi(struct pt_regs *regs); 136 + void switch_APIC_timer_to_ipi(void *cpumask); 137 + void switch_ipi_to_APIC_timer(void *cpumask); 138 + #define ARCH_APICTIMER_STOPS_ON_C3 1 139 + 135 140 #else /* !CONFIG_X86_LOCAL_APIC */ 136 141 static inline void lapic_shutdown(void) { } 137 142