Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] i386/x86-64: Remove sub jiffy profile timer support

Remove the finer control of local APIC timer. We cannot provide a sub-jiffy
control like this when we use broadcast from external timer in place of
local APIC. Instead of removing this only on systems that may end up using
broadcast from external timer (due to C3), I am going the
"I'm feeling lucky" way to remove this fully. Basically, I am not sure about
usefulness of this code today. Few other architectures also don't seem to
support this today.

If you are using profiling and fine grained control and don't like this going
away in normal case, yell at me right now.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Venkatesh Pallipadi and committed by
Linus Torvalds
5a07a30c 01b8faae

+8 -106
+6 -55
arch/i386/kernel/apic.c
··· 92 92 /* Using APIC to generate smp_local_timer_interrupt? */ 93 93 int using_apic_timer = 0; 94 94 95 - static DEFINE_PER_CPU(int, prof_multiplier) = 1; 96 - static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; 97 - static DEFINE_PER_CPU(int, prof_counter) = 1; 98 - 99 95 static int enabled_via_apicbase; 100 96 101 97 void enable_NMI_through_LVT0 (void * dummy) ··· 1088 1092 } 1089 1093 } 1090 1094 1091 - /* 1092 - * the frequency of the profiling timer can be changed 1093 - * by writing a multiplier value into /proc/profile. 1094 - */ 1095 - int setup_profiling_timer(unsigned int multiplier) 1096 - { 1097 - int i; 1098 - 1099 - /* 1100 - * Sanity check. [at least 500 APIC cycles should be 1101 - * between APIC interrupts as a rule of thumb, to avoid 1102 - * irqs flooding us] 1103 - */ 1104 - if ( (!multiplier) || (calibration_result/multiplier < 500)) 1105 - return -EINVAL; 1106 - 1107 - /* 1108 - * Set the new multiplier for each CPU. CPUs don't start using the 1109 - * new values until the next timer interrupt in which they do process 1110 - * accounting. At that time they also adjust their APIC timers 1111 - * accordingly. 1112 - */ 1113 - for (i = 0; i < NR_CPUS; ++i) 1114 - per_cpu(prof_multiplier, i) = multiplier; 1115 - 1116 - return 0; 1117 - } 1118 - 1119 1095 #undef APIC_DIVISOR 1120 1096 1121 1097 /* ··· 1102 1134 1103 1135 inline void smp_local_timer_interrupt(struct pt_regs * regs) 1104 1136 { 1105 - int cpu = smp_processor_id(); 1106 - 1107 1137 profile_tick(CPU_PROFILING, regs); 1108 - if (--per_cpu(prof_counter, cpu) <= 0) { 1109 - /* 1110 - * The multiplier may have changed since the last time we got 1111 - * to this point as a result of the user writing to 1112 - * /proc/profile. In this case we need to adjust the APIC 1113 - * timer accordingly. 1114 - * 1115 - * Interrupts are already masked off at this point. 1116 - */ 1117 - per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu); 1118 - if (per_cpu(prof_counter, cpu) != 1119 - per_cpu(prof_old_multiplier, cpu)) { 1120 - __setup_APIC_LVTT( 1121 - calibration_result/ 1122 - per_cpu(prof_counter, cpu)); 1123 - per_cpu(prof_old_multiplier, cpu) = 1124 - per_cpu(prof_counter, cpu); 1125 - } 1126 - 1127 1138 #ifdef CONFIG_SMP 1128 - update_process_times(user_mode_vm(regs)); 1139 + update_process_times(user_mode_vm(regs)); 1129 1140 #endif 1130 - } 1131 1141 1132 1142 /* 1133 1143 * We take the 'long' return path, and there every subsystem ··· 1150 1204 irq_enter(); 1151 1205 smp_local_timer_interrupt(regs); 1152 1206 irq_exit(); 1207 + } 1208 + 1209 + int setup_profiling_timer(unsigned int multiplier) 1210 + { 1211 + return -EINVAL; 1153 1212 } 1154 1213 1155 1214 /*
+2 -51
arch/x86_64/kernel/apic.c
··· 41 41 /* Using APIC to generate smp_local_timer_interrupt? */ 42 42 int using_apic_timer = 0; 43 43 44 - static DEFINE_PER_CPU(int, prof_multiplier) = 1; 45 - static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; 46 - static DEFINE_PER_CPU(int, prof_counter) = 1; 47 - 48 44 static void apic_pm_activate(void); 49 45 50 46 void enable_NMI_through_LVT0 (void * dummy) ··· 801 805 } 802 806 } 803 807 804 - /* 805 - * the frequency of the profiling timer can be changed 806 - * by writing a multiplier value into /proc/profile. 807 - */ 808 808 int setup_profiling_timer(unsigned int multiplier) 809 809 { 810 - int i; 811 - 812 - /* 813 - * Sanity check. [at least 500 APIC cycles should be 814 - * between APIC interrupts as a rule of thumb, to avoid 815 - * irqs flooding us] 816 - */ 817 - if ( (!multiplier) || (calibration_result/multiplier < 500)) 818 - return -EINVAL; 819 - 820 - /* 821 - * Set the new multiplier for each CPU. CPUs don't start using the 822 - * new values until the next timer interrupt in which they do process 823 - * accounting. At that time they also adjust their APIC timers 824 - * accordingly. 825 - */ 826 - for (i = 0; i < NR_CPUS; ++i) 827 - per_cpu(prof_multiplier, i) = multiplier; 828 - 829 - return 0; 810 + return -EINVAL; 830 811 } 831 812 832 813 #ifdef CONFIG_X86_MCE_AMD ··· 830 857 831 858 void smp_local_timer_interrupt(struct pt_regs *regs) 832 859 { 833 - int cpu = smp_processor_id(); 834 - 835 860 profile_tick(CPU_PROFILING, regs); 836 - if (--per_cpu(prof_counter, cpu) <= 0) { 837 - /* 838 - * The multiplier may have changed since the last time we got 839 - * to this point as a result of the user writing to 840 - * /proc/profile. In this case we need to adjust the APIC 841 - * timer accordingly. 842 - * 843 - * Interrupts are already masked off at this point. 844 - */ 845 - per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu); 846 - if (per_cpu(prof_counter, cpu) != 847 - per_cpu(prof_old_multiplier, cpu)) { 848 - __setup_APIC_LVTT(calibration_result/ 849 - per_cpu(prof_counter, cpu)); 850 - per_cpu(prof_old_multiplier, cpu) = 851 - per_cpu(prof_counter, cpu); 852 - } 853 - 854 861 #ifdef CONFIG_SMP 855 - update_process_times(user_mode(regs)); 862 + update_process_times(user_mode(regs)); 856 863 #endif 857 - } 858 - 859 864 /* 860 865 * We take the 'long' return path, and there every subsystem 861 866 * grabs the appropriate locks (kernel lock/ irq lock).