Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpufreq: Notify all policy->cpus in cpufreq_notify_transition()

policy->cpus contains all online cpus that have single shared clock line. And
their frequencies are always updated together.

Many SMP system's cpufreq drivers take care of this in individual drivers but
the best place for this code is in cpufreq core.

This patch modifies cpufreq_notify_transition() to notify frequency change for
all cpus in policy->cpus and hence updates all users of this API.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Stephen Warren <swarren@nvidia.com>
Tested-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Viresh Kumar and committed by
Rafael J. Wysocki
b43a7ffb fd143b4d

+241 -343
+2 -3
arch/arm/mach-davinci/cpufreq.c
··· 90 90 91 91 freqs.old = davinci_getspeed(0); 92 92 freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; 93 - freqs.cpu = 0; 94 93 95 94 if (freqs.old == freqs.new) 96 95 return ret; ··· 101 102 if (ret) 102 103 return -EINVAL; 103 104 104 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 105 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 105 106 106 107 /* if moving to higher frequency, up the voltage beforehand */ 107 108 if (pdata->set_voltage && freqs.new > freqs.old) { ··· 125 126 pdata->set_voltage(idx); 126 127 127 128 out: 128 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 129 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 129 130 130 131 return ret; 131 132 }
+2 -3
arch/arm/mach-imx/cpufreq.c
··· 87 87 88 88 freqs.old = clk_get_rate(cpu_clk) / 1000; 89 89 freqs.new = freq_Hz / 1000; 90 - freqs.cpu = 0; 91 90 freqs.flags = 0; 92 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 91 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 93 92 94 93 ret = set_cpu_freq(freq_Hz); 95 94 96 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 95 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 97 96 98 97 return ret; 99 98 }
+2 -4
arch/arm/mach-integrator/cpu.c
··· 123 123 vco = icst_hz_to_vco(&cclk_params, target_freq * 1000); 124 124 freqs.new = icst_hz(&cclk_params, vco) / 1000; 125 125 126 - freqs.cpu = policy->cpu; 127 - 128 126 if (freqs.old == freqs.new) { 129 127 set_cpus_allowed(current, cpus_allowed); 130 128 return 0; 131 129 } 132 130 133 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 131 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 134 132 135 133 cm_osc = __raw_readl(CM_OSC); 136 134 ··· 149 151 */ 150 152 set_cpus_allowed(current, cpus_allowed); 151 153 152 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 154 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 153 155 154 156 return 0; 155 157 }
+2 -3
arch/arm/mach-pxa/cpufreq-pxa2xx.c
··· 311 311 new_freq_mem = pxa_freq_settings[idx].membus; 312 312 freqs.old = policy->cur; 313 313 freqs.new = new_freq_cpu; 314 - freqs.cpu = policy->cpu; 315 314 316 315 if (freq_debug) 317 316 pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", ··· 326 327 * you should add a notify client with any platform specific 327 328 * Vcc changing capability 328 329 */ 329 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 330 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 330 331 331 332 /* Calculate the next MDREFR. If we're slowing down the SDRAM clock 332 333 * we need to preset the smaller DRI before the change. If we're ··· 381 382 * you should add a notify client with any platform specific 382 383 * SDRAM refresh timer adjustments 383 384 */ 384 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 385 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 385 386 386 387 /* 387 388 * Even if voltage setting fails, we don't report it, as the frequency
+2 -3
arch/arm/mach-pxa/cpufreq-pxa3xx.c
··· 184 184 185 185 freqs.old = policy->cur; 186 186 freqs.new = next->cpufreq_mhz * 1000; 187 - freqs.cpu = policy->cpu; 188 187 189 188 pr_debug("CPU frequency from %d MHz to %d MHz%s\n", 190 189 freqs.old / 1000, freqs.new / 1000, ··· 192 193 if (freqs.old == target_freq) 193 194 return 0; 194 195 195 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 196 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 196 197 197 198 local_irq_save(flags); 198 199 __update_core_freq(next); 199 200 __update_bus_freq(next); 200 201 local_irq_restore(flags); 201 202 202 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 203 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 203 204 204 205 return 0; 205 206 }
+2 -6
arch/arm/mach-s3c24xx/cpufreq.c
··· 204 204 freqs.old = cpu_cur.freq; 205 205 freqs.new = cpu_new.freq; 206 206 207 - freqs.freqs.cpu = 0; 208 207 freqs.freqs.old = cpu_cur.freq.armclk / 1000; 209 208 freqs.freqs.new = cpu_new.freq.armclk / 1000; 210 209 ··· 217 218 s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk); 218 219 219 220 /* start the frequency change */ 220 - 221 - if (policy) 222 - cpufreq_notify_transition(&freqs.freqs, CPUFREQ_PRECHANGE); 221 + cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE); 223 222 224 223 /* If hclk is staying the same, then we do not need to 225 224 * re-write the IO or the refresh timings whilst we are changing ··· 261 264 local_irq_restore(flags); 262 265 263 266 /* notify everyone we've done this */ 264 - if (policy) 265 - cpufreq_notify_transition(&freqs.freqs, CPUFREQ_POSTCHANGE); 267 + cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE); 266 268 267 269 s3c_freq_dbg("%s: finished\n", __func__); 268 270 return 0;
+2 -3
arch/arm/mach-sa1100/cpu-sa1100.c
··· 201 201 202 202 freqs.old = cur; 203 203 freqs.new = sa11x0_ppcr_to_freq(new_ppcr); 204 - freqs.cpu = 0; 205 204 206 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 205 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 207 206 208 207 if (freqs.new > cur) 209 208 sa1100_update_dram_timings(cur, freqs.new); ··· 212 213 if (freqs.new < cur) 213 214 sa1100_update_dram_timings(cur, freqs.new); 214 215 215 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 216 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 216 217 217 218 return 0; 218 219 }
+2 -3
arch/arm/mach-sa1100/cpu-sa1110.c
··· 258 258 259 259 freqs.old = sa11x0_getspeed(0); 260 260 freqs.new = sa11x0_ppcr_to_freq(ppcr); 261 - freqs.cpu = 0; 262 261 263 262 sdram_calculate_timing(&sd, freqs.new, sdram); 264 263 ··· 278 279 sd.mdcas[2] = 0xaaaaaaaa; 279 280 #endif 280 281 281 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 282 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 282 283 283 284 /* 284 285 * The clock could be going away for some time. Set the SDRAMs ··· 326 327 */ 327 328 sdram_update_refresh(freqs.new, sdram); 328 329 329 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 330 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 330 331 331 332 return 0; 332 333 }
+8 -7
arch/arm/mach-tegra/cpu-tegra.c
··· 106 106 return ret; 107 107 } 108 108 109 - static int tegra_update_cpu_speed(unsigned long rate) 109 + static int tegra_update_cpu_speed(struct cpufreq_policy *policy, 110 + unsigned long rate) 110 111 { 111 112 int ret = 0; 112 113 struct cpufreq_freqs freqs; ··· 129 128 else 130 129 clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ 131 130 132 - for_each_online_cpu(freqs.cpu) 133 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 131 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 134 132 135 133 #ifdef CONFIG_CPU_FREQ_DEBUG 136 134 printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", ··· 143 143 return ret; 144 144 } 145 145 146 - for_each_online_cpu(freqs.cpu) 147 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 146 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 148 147 149 148 return 0; 150 149 } ··· 180 181 181 182 target_cpu_speed[policy->cpu] = freq; 182 183 183 - ret = tegra_update_cpu_speed(tegra_cpu_highest_speed()); 184 + ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed()); 184 185 185 186 out: 186 187 mutex_unlock(&tegra_cpu_lock); ··· 192 193 { 193 194 mutex_lock(&tegra_cpu_lock); 194 195 if (event == PM_SUSPEND_PREPARE) { 196 + struct cpufreq_policy *policy = cpufreq_cpu_get(0); 195 197 is_suspended = true; 196 198 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", 197 199 freq_table[0].frequency); 198 - tegra_update_cpu_speed(freq_table[0].frequency); 200 + tegra_update_cpu_speed(policy, freq_table[0].frequency); 201 + cpufreq_cpu_put(policy); 199 202 } else if (event == PM_POST_SUSPEND) { 200 203 is_suspended = false; 201 204 }
+2 -3
arch/avr32/mach-at32ap/cpufreq.c
··· 61 61 62 62 freqs.old = at32_get_speed(0); 63 63 freqs.new = (freq + 500) / 1000; 64 - freqs.cpu = 0; 65 64 freqs.flags = 0; 66 65 67 66 if (!ref_freq) { ··· 68 69 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; 69 70 } 70 71 71 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 72 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 72 73 if (freqs.old < freqs.new) 73 74 boot_cpu_data.loops_per_jiffy = cpufreq_scale( 74 75 loops_per_jiffy_ref, ref_freq, freqs.new); ··· 76 77 if (freqs.new < freqs.old) 77 78 boot_cpu_data.loops_per_jiffy = cpufreq_scale( 78 79 loops_per_jiffy_ref, ref_freq, freqs.new); 79 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 80 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 80 81 81 82 pr_debug("cpufreq: set frequency %lu Hz\n", freq); 82 83
+37 -48
arch/blackfin/mach-common/cpufreq.c
··· 127 127 } 128 128 #endif 129 129 130 - static int bfin_target(struct cpufreq_policy *poli, 130 + static int bfin_target(struct cpufreq_policy *policy, 131 131 unsigned int target_freq, unsigned int relation) 132 132 { 133 133 #ifndef CONFIG_BF60x 134 134 unsigned int plldiv; 135 135 #endif 136 - unsigned int index, cpu; 136 + unsigned int index; 137 137 unsigned long cclk_hz; 138 138 struct cpufreq_freqs freqs; 139 139 static unsigned long lpj_ref; ··· 144 144 cycles_t cycles; 145 145 #endif 146 146 147 - for_each_online_cpu(cpu) { 148 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 147 + if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, 148 + relation, &index)) 149 + return -EINVAL; 149 150 150 - if (!policy) 151 - continue; 151 + cclk_hz = bfin_freq_table[index].frequency; 152 152 153 - if (cpufreq_frequency_table_target(policy, bfin_freq_table, 154 - target_freq, relation, &index)) 155 - return -EINVAL; 153 + freqs.old = bfin_getfreq_khz(0); 154 + freqs.new = cclk_hz; 156 155 157 - cclk_hz = bfin_freq_table[index].frequency; 156 + pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", 157 + cclk_hz, target_freq, freqs.old); 158 158 159 - freqs.old = bfin_getfreq_khz(0); 160 - freqs.new = cclk_hz; 161 - freqs.cpu = cpu; 162 - 163 - pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", 164 - cclk_hz, target_freq, freqs.old); 165 - 166 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 167 - if (cpu == CPUFREQ_CPU) { 159 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 168 160 #ifndef CONFIG_BF60x 169 - plldiv = (bfin_read_PLL_DIV() & SSEL) | 170 - dpm_state_table[index].csel; 171 - bfin_write_PLL_DIV(plldiv); 161 + plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; 162 + bfin_write_PLL_DIV(plldiv); 172 163 #else 173 - ret = cpu_set_cclk(cpu, freqs.new * 1000); 174 - if (ret != 0) { 175 - WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); 176 - break; 177 - } 178 - #endif 179 - on_each_cpu(bfin_adjust_core_timer, &index, 1); 180 - #if defined(CONFIG_CYCLES_CLOCKSOURCE) 181 - cycles = get_cycles(); 182 - SSYNC(); 183 - cycles += 10; /* ~10 cycles we lose after get_cycles() */ 184 - __bfin_cycles_off += 185 - (cycles << __bfin_cycles_mod) - (cycles << index); 186 - __bfin_cycles_mod = index; 187 - #endif 188 - if (!lpj_ref_freq) { 189 - lpj_ref = loops_per_jiffy; 190 - lpj_ref_freq = freqs.old; 191 - } 192 - if (freqs.new != freqs.old) { 193 - loops_per_jiffy = cpufreq_scale(lpj_ref, 194 - lpj_ref_freq, freqs.new); 195 - } 196 - } 197 - /* TODO: just test case for cycles clock source, remove later */ 198 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 164 + ret = cpu_set_cclk(policy->cpu, freqs.new * 1000); 165 + if (ret != 0) { 166 + WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret); 167 + return ret; 199 168 } 169 + #endif 170 + on_each_cpu(bfin_adjust_core_timer, &index, 1); 171 + #if defined(CONFIG_CYCLES_CLOCKSOURCE) 172 + cycles = get_cycles(); 173 + SSYNC(); 174 + cycles += 10; /* ~10 cycles we lose after get_cycles() */ 175 + __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); 176 + __bfin_cycles_mod = index; 177 + #endif 178 + if (!lpj_ref_freq) { 179 + lpj_ref = loops_per_jiffy; 180 + lpj_ref_freq = freqs.old; 181 + } 182 + if (freqs.new != freqs.old) { 183 + loops_per_jiffy = cpufreq_scale(lpj_ref, 184 + lpj_ref_freq, freqs.new); 185 + } 186 + 187 + /* TODO: just test case for cycles clock source, remove later */ 188 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 200 189 201 190 pr_debug("cpufreq: done\n"); 202 191 return ret;
+7 -13
arch/cris/arch-v32/mach-a3/cpufreq.c
··· 27 27 return clk_ctrl.pll ? 200000 : 6000; 28 28 } 29 29 30 - static void cris_freq_set_cpu_state(unsigned int state) 30 + static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, 31 + unsigned int state) 31 32 { 32 - int i = 0; 33 33 struct cpufreq_freqs freqs; 34 34 reg_clkgen_rw_clk_ctrl clk_ctrl; 35 35 clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); 36 36 37 - #ifdef CONFIG_SMP 38 - for_each_present_cpu(i) 39 - #endif 40 - { 41 - freqs.old = cris_freq_get_cpu_frequency(i); 42 - freqs.new = cris_freq_table[state].frequency; 43 - freqs.cpu = i; 44 - } 37 + freqs.old = cris_freq_get_cpu_frequency(policy->cpu); 38 + freqs.new = cris_freq_table[state].frequency; 45 39 46 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 40 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 47 41 48 42 local_irq_disable(); 49 43 ··· 51 57 52 58 local_irq_enable(); 53 59 54 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 60 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 55 61 }; 56 62 57 63 static int cris_freq_verify(struct cpufreq_policy *policy) ··· 69 75 target_freq, relation, &newstate)) 70 76 return -EINVAL; 71 77 72 - cris_freq_set_cpu_state(newstate); 78 + cris_freq_set_cpu_state(policy, newstate); 73 79 74 80 return 0; 75 81 }
+7 -10
arch/cris/arch-v32/mach-fs/cpufreq.c
··· 27 27 return clk_ctrl.pll ? 200000 : 6000; 28 28 } 29 29 30 - static void cris_freq_set_cpu_state(unsigned int state) 30 + static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, 31 + unsigned int state) 31 32 { 32 - int i; 33 33 struct cpufreq_freqs freqs; 34 34 reg_config_rw_clk_ctrl clk_ctrl; 35 35 clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); 36 36 37 - for_each_possible_cpu(i) { 38 - freqs.old = cris_freq_get_cpu_frequency(i); 39 - freqs.new = cris_freq_table[state].frequency; 40 - freqs.cpu = i; 41 - } 37 + freqs.old = cris_freq_get_cpu_frequency(policy->cpu); 38 + freqs.new = cris_freq_table[state].frequency; 42 39 43 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 40 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 44 41 45 42 local_irq_disable(); 46 43 ··· 51 54 52 55 local_irq_enable(); 53 56 54 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 57 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 55 58 }; 56 59 57 60 static int cris_freq_verify(struct cpufreq_policy *policy) ··· 68 71 (policy, cris_freq_table, target_freq, relation, &newstate)) 69 72 return -EINVAL; 70 73 71 - cris_freq_set_cpu_state(newstate); 74 + cris_freq_set_cpu_state(policy, newstate); 72 75 73 76 return 0; 74 77 }
+12 -10
arch/ia64/kernel/cpufreq/acpi-cpufreq.c
··· 137 137 static int 138 138 processor_set_freq ( 139 139 struct cpufreq_acpi_io *data, 140 - unsigned int cpu, 140 + struct cpufreq_policy *policy, 141 141 int state) 142 142 { 143 143 int ret = 0; ··· 149 149 pr_debug("processor_set_freq\n"); 150 150 151 151 saved_mask = current->cpus_allowed; 152 - set_cpus_allowed_ptr(current, cpumask_of(cpu)); 153 - if (smp_processor_id() != cpu) { 152 + set_cpus_allowed_ptr(current, cpumask_of(policy->cpu)); 153 + if (smp_processor_id() != policy->cpu) { 154 154 retval = -EAGAIN; 155 155 goto migrate_end; 156 156 } ··· 170 170 data->acpi_data.state, state); 171 171 172 172 /* cpufreq frequency struct */ 173 - cpufreq_freqs.cpu = cpu; 174 173 cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; 175 174 cpufreq_freqs.new = data->freq_table[state].frequency; 176 175 177 176 /* notify cpufreq */ 178 - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); 177 + cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE); 179 178 180 179 /* 181 180 * First we write the target state's 'control' value to the ··· 188 189 ret = processor_set_pstate(value); 189 190 if (ret) { 190 191 unsigned int tmp = cpufreq_freqs.new; 191 - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); 192 + cpufreq_notify_transition(policy, &cpufreq_freqs, 193 + CPUFREQ_POSTCHANGE); 192 194 cpufreq_freqs.new = cpufreq_freqs.old; 193 195 cpufreq_freqs.old = tmp; 194 - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); 195 - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); 196 + cpufreq_notify_transition(policy, &cpufreq_freqs, 197 + CPUFREQ_PRECHANGE); 198 + cpufreq_notify_transition(policy, &cpufreq_freqs, 199 + CPUFREQ_POSTCHANGE); 196 200 printk(KERN_WARNING "Transition failed with error %d\n", ret); 197 201 retval = -ENODEV; 198 202 goto migrate_end; 199 203 } 200 204 201 - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); 205 + cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE); 202 206 203 207 data->acpi_data.state = state; 204 208 ··· 242 240 if (result) 243 241 return (result); 244 242 245 - result = processor_set_freq(data, policy->cpu, next_state); 243 + result = processor_set_freq(data, policy, next_state); 246 244 247 245 return (result); 248 246 }
+2 -3
arch/mips/kernel/cpufreq/loongson2_cpufreq.c
··· 80 80 81 81 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); 82 82 83 - freqs.cpu = cpu; 84 83 freqs.old = loongson2_cpufreq_get(cpu); 85 84 freqs.new = freq; 86 85 freqs.flags = 0; ··· 88 89 return 0; 89 90 90 91 /* notifiers */ 91 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 92 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 92 93 93 94 set_cpus_allowed_ptr(current, &cpus_allowed); 94 95 ··· 96 97 clk_set_rate(cpuclk, freq); 97 98 98 99 /* notifiers */ 99 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 100 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 100 101 101 102 pr_debug("cpufreq: set frequency %u kHz\n", freq); 102 103
+2 -3
arch/powerpc/platforms/cell/cbe_cpufreq.c
··· 156 156 157 157 freqs.old = policy->cur; 158 158 freqs.new = cbe_freqs[cbe_pmode_new].frequency; 159 - freqs.cpu = policy->cpu; 160 159 161 160 mutex_lock(&cbe_switch_mutex); 162 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 161 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 163 162 164 163 pr_debug("setting frequency for cpu %d to %d kHz, " \ 165 164 "1/%d of max frequency\n", ··· 168 169 169 170 rc = set_pmode(policy->cpu, cbe_pmode_new); 170 171 171 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 172 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 172 173 mutex_unlock(&cbe_switch_mutex); 173 174 174 175 return rc;
+2 -3
arch/powerpc/platforms/pasemi/cpufreq.c
··· 273 273 274 274 freqs.old = policy->cur; 275 275 freqs.new = pas_freqs[pas_astate_new].frequency; 276 - freqs.cpu = policy->cpu; 277 276 278 277 mutex_lock(&pas_switch_mutex); 279 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 278 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 280 279 281 280 pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", 282 281 policy->cpu, ··· 287 288 for_each_online_cpu(i) 288 289 set_astate(i, pas_astate_new); 289 290 290 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 291 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 291 292 mutex_unlock(&pas_switch_mutex); 292 293 293 294 ppc_proc_freq = freqs.new * 1000ul;
+7 -7
arch/powerpc/platforms/powermac/cpufreq_32.c
··· 335 335 return 0; 336 336 } 337 337 338 - static int do_set_cpu_speed(int speed_mode, int notify) 338 + static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode, 339 + int notify) 339 340 { 340 341 struct cpufreq_freqs freqs; 341 342 unsigned long l3cr; ··· 344 343 345 344 freqs.old = cur_freq; 346 345 freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; 347 - freqs.cpu = smp_processor_id(); 348 346 349 347 if (freqs.old == freqs.new) 350 348 return 0; 351 349 352 350 if (notify) 353 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 351 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 354 352 if (speed_mode == CPUFREQ_LOW && 355 353 cpu_has_feature(CPU_FTR_L3CR)) { 356 354 l3cr = _get_L3CR(); ··· 366 366 _set_L3CR(prev_l3cr); 367 367 } 368 368 if (notify) 369 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 369 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 370 370 cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; 371 371 372 372 return 0; ··· 393 393 target_freq, relation, &newstate)) 394 394 return -EINVAL; 395 395 396 - rc = do_set_cpu_speed(newstate, 1); 396 + rc = do_set_cpu_speed(policy, newstate, 1); 397 397 398 398 ppc_proc_freq = cur_freq * 1000ul; 399 399 return rc; ··· 442 442 no_schedule = 1; 443 443 sleep_freq = cur_freq; 444 444 if (cur_freq == low_freq && !is_pmu_based) 445 - do_set_cpu_speed(CPUFREQ_HIGH, 0); 445 + do_set_cpu_speed(policy, CPUFREQ_HIGH, 0); 446 446 return 0; 447 447 } 448 448 ··· 458 458 * is that we force a switch to whatever it was, which is 459 459 * probably high speed due to our suspend() routine 460 460 */ 461 - do_set_cpu_speed(sleep_freq == low_freq ? 461 + do_set_cpu_speed(policy, sleep_freq == low_freq ? 462 462 CPUFREQ_LOW : CPUFREQ_HIGH, 0); 463 463 464 464 ppc_proc_freq = cur_freq * 1000ul;
+2 -3
arch/powerpc/platforms/powermac/cpufreq_64.c
··· 339 339 340 340 freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; 341 341 freqs.new = g5_cpu_freqs[newstate].frequency; 342 - freqs.cpu = 0; 343 342 344 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 343 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 345 344 rc = g5_switch_freq(newstate); 346 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 345 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 347 346 348 347 mutex_unlock(&g5_switch_mutex); 349 348
+2 -3
arch/sh/kernel/cpufreq.c
··· 69 69 70 70 dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000); 71 71 72 - freqs.cpu = cpu; 73 72 freqs.old = sh_cpufreq_get(cpu); 74 73 freqs.new = (freq + 500) / 1000; 75 74 freqs.flags = 0; 76 75 77 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 76 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 78 77 set_cpus_allowed_ptr(current, &cpus_allowed); 79 78 clk_set_rate(cpuclk, freq); 80 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 79 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 81 80 82 81 dev_dbg(dev, "set frequency %lu Hz\n", freq); 83 82
+7 -6
arch/sparc/kernel/us2e_cpufreq.c
··· 248 248 return clock_tick / estar_to_divisor(estar); 249 249 } 250 250 251 - static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) 251 + static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, 252 + unsigned int index) 252 253 { 254 + unsigned int cpu = policy->cpu; 253 255 unsigned long new_bits, new_freq; 254 256 unsigned long clock_tick, divisor, old_divisor, estar; 255 257 cpumask_t cpus_allowed; ··· 274 272 275 273 freqs.old = clock_tick / old_divisor; 276 274 freqs.new = new_freq; 277 - freqs.cpu = cpu; 278 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 275 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 279 276 280 277 if (old_divisor != divisor) 281 278 us2e_transition(estar, new_bits, clock_tick * 1000, 282 279 old_divisor, divisor); 283 280 284 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 281 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 285 282 286 283 set_cpus_allowed_ptr(current, &cpus_allowed); 287 284 } ··· 296 295 target_freq, relation, &new_index)) 297 296 return -EINVAL; 298 297 299 - us2e_set_cpu_divider_index(policy->cpu, new_index); 298 + us2e_set_cpu_divider_index(policy, new_index); 300 299 301 300 return 0; 302 301 } ··· 336 335 static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) 337 336 { 338 337 if (cpufreq_us2e_driver) 339 - us2e_set_cpu_divider_index(policy->cpu, 0); 338 + us2e_set_cpu_divider_index(policy, 0); 340 339 341 340 return 0; 342 341 }
+7 -6
arch/sparc/kernel/us3_cpufreq.c
··· 96 96 return ret; 97 97 } 98 98 99 - static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) 99 + static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, 100 + unsigned int index) 100 101 { 102 + unsigned int cpu = policy->cpu; 101 103 unsigned long new_bits, new_freq, reg; 102 104 cpumask_t cpus_allowed; 103 105 struct cpufreq_freqs freqs; ··· 133 131 134 132 freqs.old = get_current_freq(cpu, reg); 135 133 freqs.new = new_freq; 136 - freqs.cpu = cpu; 137 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 134 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 138 135 139 136 reg &= ~SAFARI_CFG_DIV_MASK; 140 137 reg |= new_bits; 141 138 write_safari_cfg(reg); 142 139 143 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 140 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 144 141 145 142 set_cpus_allowed_ptr(current, &cpus_allowed); 146 143 } ··· 157 156 &new_index)) 158 157 return -EINVAL; 159 158 160 - us3_set_cpu_divider_index(policy->cpu, new_index); 159 + us3_set_cpu_divider_index(policy, new_index); 161 160 162 161 return 0; 163 162 } ··· 193 192 static int us3_freq_cpu_exit(struct cpufreq_policy *policy) 194 193 { 195 194 if (cpufreq_us3_driver) 196 - us3_set_cpu_divider_index(policy->cpu, 0); 195 + us3_set_cpu_divider_index(policy, 0); 197 196 198 197 return 0; 199 198 }
+2 -3
arch/unicore32/kernel/cpu-ucv2.c
··· 52 52 struct cpufreq_freqs freqs; 53 53 struct clk *mclk = clk_get(NULL, "MAIN_CLK"); 54 54 55 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 55 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 56 56 57 57 if (!clk_set_rate(mclk, target_freq * 1000)) { 58 58 freqs.old = cur; 59 59 freqs.new = target_freq; 60 - freqs.cpu = 0; 61 60 } 62 61 63 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 62 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 64 63 65 64 return 0; 66 65 }
+2 -9
drivers/cpufreq/acpi-cpufreq.c
··· 423 423 struct drv_cmd cmd; 424 424 unsigned int next_state = 0; /* Index into freq_table */ 425 425 unsigned int next_perf_state = 0; /* Index into perf table */ 426 - unsigned int i; 427 426 int result = 0; 428 427 429 428 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); ··· 485 486 486 487 freqs.old = perf->states[perf->state].core_frequency * 1000; 487 488 freqs.new = data->freq_table[next_state].frequency; 488 - for_each_cpu(i, policy->cpus) { 489 - freqs.cpu = i; 490 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 491 - } 489 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 492 490 493 491 drv_write(&cmd); 494 492 ··· 498 502 } 499 503 } 500 504 501 - for_each_cpu(i, policy->cpus) { 502 - freqs.cpu = i; 503 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 504 - } 505 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 505 506 perf->state = next_perf_state; 506 507 507 508 out:
+3 -9
drivers/cpufreq/cpufreq-cpu0.c
··· 46 46 struct opp *opp; 47 47 unsigned long volt = 0, volt_old = 0, tol = 0; 48 48 long freq_Hz; 49 - unsigned int index, cpu; 49 + unsigned int index; 50 50 int ret; 51 51 52 52 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, ··· 66 66 if (freqs.old == freqs.new) 67 67 return 0; 68 68 69 - for_each_online_cpu(cpu) { 70 - freqs.cpu = cpu; 71 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 72 - } 69 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 73 70 74 71 if (cpu_reg) { 75 72 rcu_read_lock(); ··· 118 121 } 119 122 120 123 post_notify: 121 - for_each_online_cpu(cpu) { 122 - freqs.cpu = cpu; 123 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 124 - } 124 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 125 125 126 126 return ret; 127 127 }
+2 -3
drivers/cpufreq/cpufreq-nforce2.c
··· 263 263 264 264 freqs.old = nforce2_get(policy->cpu); 265 265 freqs.new = target_fsb * fid * 100; 266 - freqs.cpu = 0; /* Only one CPU on nForce2 platforms */ 267 266 268 267 if (freqs.old == freqs.new) 269 268 return 0; ··· 270 271 pr_debug("Old CPU frequency %d kHz, new %d kHz\n", 271 272 freqs.old, freqs.new); 272 273 273 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 274 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 274 275 275 276 /* Disable IRQs */ 276 277 /* local_irq_save(flags); */ ··· 285 286 /* Enable IRQs */ 286 287 /* local_irq_restore(flags); */ 287 288 288 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 289 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 289 290 290 291 return 0; 291 292 }
+26 -19
drivers/cpufreq/cpufreq.c
··· 249 249 #endif 250 250 251 251 252 - /** 253 - * cpufreq_notify_transition - call notifier chain and adjust_jiffies 254 - * on frequency transition. 255 - * 256 - * This function calls the transition notifiers and the "adjust_jiffies" 257 - * function. It is called twice on all CPU frequency changes that have 258 - * external effects. 259 - */ 260 - void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) 252 + void __cpufreq_notify_transition(struct cpufreq_policy *policy, 253 + struct cpufreq_freqs *freqs, unsigned int state) 261 254 { 262 - struct cpufreq_policy *policy; 263 - unsigned long flags; 264 - 265 255 BUG_ON(irqs_disabled()); 266 256 267 257 if (cpufreq_disabled()) ··· 260 270 freqs->flags = cpufreq_driver->flags; 261 271 pr_debug("notification %u of frequency transition to %u kHz\n", 262 272 state, freqs->new); 263 - 264 - read_lock_irqsave(&cpufreq_driver_lock, flags); 265 - policy = per_cpu(cpufreq_cpu_data, freqs->cpu); 266 - read_unlock_irqrestore(&cpufreq_driver_lock, flags); 267 273 268 274 switch (state) { 269 275 ··· 293 307 policy->cur = freqs->new; 294 308 break; 295 309 } 310 + } 311 + /** 312 + * cpufreq_notify_transition - call notifier chain and adjust_jiffies 313 + * on frequency transition. 314 + * 315 + * This function calls the transition notifiers and the "adjust_jiffies" 316 + * function. It is called twice on all CPU frequency changes that have 317 + * external effects. 318 + */ 319 + void cpufreq_notify_transition(struct cpufreq_policy *policy, 320 + struct cpufreq_freqs *freqs, unsigned int state) 321 + { 322 + for_each_cpu(freqs->cpu, policy->cpus) 323 + __cpufreq_notify_transition(policy, freqs, state); 296 324 } 297 325 EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 298 326 ··· 1141 1141 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, 1142 1142 unsigned int new_freq) 1143 1143 { 1144 + struct cpufreq_policy *policy; 1144 1145 struct cpufreq_freqs freqs; 1146 + unsigned long flags; 1147 + 1145 1148 1146 1149 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " 1147 1150 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 1148 1151 1149 - freqs.cpu = cpu; 1150 1152 freqs.old = old_freq; 1151 1153 freqs.new = new_freq; 1152 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 1153 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 1154 + 1155 + read_lock_irqsave(&cpufreq_driver_lock, flags); 1156 + policy = per_cpu(cpufreq_cpu_data, cpu); 1157 + read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1158 + 1159 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 1160 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 1154 1161 } 1155 1162 1156 1163
+2 -4
drivers/cpufreq/dbx500-cpufreq.c
··· 55 55 return 0; 56 56 57 57 /* pre-change notification */ 58 - for_each_cpu(freqs.cpu, policy->cpus) 59 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 58 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 60 59 61 60 /* update armss clk frequency */ 62 61 ret = clk_set_rate(armss_clk, freqs.new * 1000); ··· 67 68 } 68 69 69 70 /* post change notification */ 70 - for_each_cpu(freqs.cpu, policy->cpus) 71 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 71 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 72 72 73 73 return 0; 74 74 }
+5 -6
drivers/cpufreq/e_powersaver.c
··· 104 104 } 105 105 106 106 static int eps_set_state(struct eps_cpu_data *centaur, 107 - unsigned int cpu, 107 + struct cpufreq_policy *policy, 108 108 u32 dest_state) 109 109 { 110 110 struct cpufreq_freqs freqs; ··· 112 112 int err = 0; 113 113 int i; 114 114 115 - freqs.old = eps_get(cpu); 115 + freqs.old = eps_get(policy->cpu); 116 116 freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); 117 - freqs.cpu = cpu; 118 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 117 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 119 118 120 119 /* Wait while CPU is busy */ 121 120 rdmsr(MSR_IA32_PERF_STATUS, lo, hi); ··· 161 162 current_multiplier); 162 163 } 163 164 #endif 164 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 165 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 165 166 return err; 166 167 } 167 168 ··· 189 190 190 191 /* Make frequency transition */ 191 192 dest_state = centaur->freq_table[newstate].index & 0xffff; 192 - ret = eps_set_state(centaur, cpu, dest_state); 193 + ret = eps_set_state(centaur, policy, dest_state); 193 194 if (ret) 194 195 printk(KERN_ERR "eps: Timeout!\n"); 195 196 return ret;
+5 -5
drivers/cpufreq/elanfreq.c
··· 117 117 * There is no return value. 118 118 */ 119 119 120 - static void elanfreq_set_cpu_state(unsigned int state) 120 + static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, 121 + unsigned int state) 121 122 { 122 123 struct cpufreq_freqs freqs; 123 124 124 125 freqs.old = elanfreq_get_cpu_frequency(0); 125 126 freqs.new = elan_multiplier[state].clock; 126 - freqs.cpu = 0; /* elanfreq.c is UP only driver */ 127 127 128 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 128 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 129 129 130 130 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n", 131 131 elan_multiplier[state].clock); ··· 161 161 udelay(10000); 162 162 local_irq_enable(); 163 163 164 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 164 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 165 165 }; 166 166 167 167 ··· 188 188 target_freq, relation, &newstate)) 189 189 return -EINVAL; 190 190 191 - elanfreq_set_cpu_state(newstate); 191 + elanfreq_set_cpu_state(policy, newstate); 192 192 193 193 return 0; 194 194 }
+2 -5
drivers/cpufreq/exynos-cpufreq.c
··· 70 70 71 71 freqs.old = policy->cur; 72 72 freqs.new = target_freq; 73 - freqs.cpu = policy->cpu; 74 73 75 74 if (freqs.new == freqs.old) 76 75 goto out; ··· 104 105 } 105 106 arm_volt = volt_table[index]; 106 107 107 - for_each_cpu(freqs.cpu, policy->cpus) 108 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 108 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 109 109 110 110 /* When the new frequency is higher than current frequency */ 111 111 if ((freqs.new > freqs.old) && !safe_arm_volt) { ··· 129 131 130 132 exynos_info->set_freq(old_index, index); 131 133 132 - for_each_cpu(freqs.cpu, policy->cpus) 133 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 134 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 134 135 135 136 /* When the new frequency is lower than current frequency */ 136 137 if ((freqs.new < freqs.old) ||
+4 -7
drivers/cpufreq/gx-suspmod.c
··· 251 251 * set cpu speed in khz. 252 252 **/ 253 253 254 - static void gx_set_cpuspeed(unsigned int khz) 254 + static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) 255 255 { 256 256 u8 suscfg, pmer1; 257 257 unsigned int new_khz; 258 258 unsigned long flags; 259 259 struct cpufreq_freqs freqs; 260 260 261 - freqs.cpu = 0; 262 261 freqs.old = gx_get_cpuspeed(0); 263 262 264 263 new_khz = gx_validate_speed(khz, &gx_params->on_duration, ··· 265 266 266 267 freqs.new = new_khz; 267 268 268 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 269 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 269 270 local_irq_save(flags); 270 - 271 - 272 271 273 272 if (new_khz != stock_freq) { 274 273 /* if new khz == 100% of CPU speed, it is special case */ ··· 314 317 315 318 gx_params->pci_suscfg = suscfg; 316 319 317 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 320 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 318 321 319 322 pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", 320 323 gx_params->on_duration * 32, gx_params->off_duration * 32); ··· 394 397 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); 395 398 } 396 399 397 - gx_set_cpuspeed(tmp_freq); 400 + gx_set_cpuspeed(policy, tmp_freq); 398 401 399 402 return 0; 400 403 }
+3 -9
drivers/cpufreq/imx6q-cpufreq.c
··· 50 50 struct cpufreq_freqs freqs; 51 51 struct opp *opp; 52 52 unsigned long freq_hz, volt, volt_old; 53 - unsigned int index, cpu; 53 + unsigned int index; 54 54 int ret; 55 55 56 56 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, ··· 68 68 if (freqs.old == freqs.new) 69 69 return 0; 70 70 71 - for_each_online_cpu(cpu) { 72 - freqs.cpu = cpu; 73 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 74 - } 71 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 75 72 76 73 rcu_read_lock(); 77 74 opp = opp_find_freq_ceil(cpu_dev, &freq_hz); ··· 163 166 } 164 167 } 165 168 166 - for_each_online_cpu(cpu) { 167 - freqs.cpu = cpu; 168 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 169 - } 169 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 170 170 171 171 return 0; 172 172 }
+5 -5
drivers/cpufreq/kirkwood-cpufreq.c
··· 55 55 return kirkwood_freq_table[0].frequency; 56 56 } 57 57 58 - static void kirkwood_cpufreq_set_cpu_state(unsigned int index) 58 + static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy, 59 + unsigned int index) 59 60 { 60 61 struct cpufreq_freqs freqs; 61 62 unsigned int state = kirkwood_freq_table[index].index; ··· 64 63 65 64 freqs.old = kirkwood_cpufreq_get_cpu_frequency(0); 66 65 freqs.new = kirkwood_freq_table[index].frequency; 67 - freqs.cpu = 0; /* Kirkwood is UP */ 68 66 69 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 67 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 70 68 71 69 dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n", 72 70 kirkwood_freq_table[index].frequency); ··· 99 99 100 100 local_irq_enable(); 101 101 } 102 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 102 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 103 103 }; 104 104 105 105 static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy) ··· 117 117 target_freq, relation, &index)) 118 118 return -EINVAL; 119 119 120 - kirkwood_cpufreq_set_cpu_state(index); 120 + kirkwood_cpufreq_set_cpu_state(policy, index); 121 121 122 122 return 0; 123 123 }
+10 -8
drivers/cpufreq/longhaul.c
··· 242 242 * Sets a new clock ratio. 243 243 */ 244 244 245 - static void longhaul_setstate(unsigned int table_index) 245 + static void longhaul_setstate(struct cpufreq_policy *policy, 246 + unsigned int table_index) 246 247 { 247 248 unsigned int mults_index; 248 249 int speed, mult; ··· 268 267 269 268 freqs.old = calc_speed(longhaul_get_cpu_mult()); 270 269 freqs.new = speed; 271 - freqs.cpu = 0; /* longhaul.c is UP only driver */ 272 270 273 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 271 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 274 272 275 273 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 276 274 fsb, mult/10, mult%10, print_speed(speed/1000)); ··· 386 386 } 387 387 } 388 388 /* Report true CPU frequency */ 389 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 389 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 390 390 391 391 if (!bm_timeout) 392 392 printk(KERN_INFO PFX "Warning: Timeout while waiting for " ··· 648 648 return 0; 649 649 650 650 if (!can_scale_voltage) 651 - longhaul_setstate(table_index); 651 + longhaul_setstate(policy, table_index); 652 652 else { 653 653 /* On test system voltage transitions exceeding single 654 654 * step up or down were turning motherboard off. Both ··· 663 663 while (i != table_index) { 664 664 vid = (longhaul_table[i].index >> 8) & 0x1f; 665 665 if (vid != current_vid) { 666 - longhaul_setstate(i); 666 + longhaul_setstate(policy, i); 667 667 current_vid = vid; 668 668 msleep(200); 669 669 } ··· 672 672 else 673 673 i--; 674 674 } 675 - longhaul_setstate(table_index); 675 + longhaul_setstate(policy, table_index); 676 676 } 677 677 longhaul_index = table_index; 678 678 return 0; ··· 998 998 999 999 static void __exit longhaul_exit(void) 1000 1000 { 1001 + struct cpufreq_policy *policy = cpufreq_cpu_get(0); 1001 1002 int i; 1002 1003 1003 1004 for (i = 0; i < numscales; i++) { 1004 1005 if (mults[i] == maxmult) { 1005 - longhaul_setstate(i); 1006 + longhaul_setstate(policy, i); 1006 1007 break; 1007 1008 } 1008 1009 } 1009 1010 1011 + cpufreq_cpu_put(policy); 1010 1012 cpufreq_unregister_driver(&longhaul_driver); 1011 1013 kfree(longhaul_table); 1012 1014 }
+2 -3
drivers/cpufreq/maple-cpufreq.c
··· 158 158 159 159 freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency; 160 160 freqs.new = maple_cpu_freqs[newstate].frequency; 161 - freqs.cpu = 0; 162 161 163 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 162 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 164 163 rc = maple_scom_switch_freq(newstate); 165 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 164 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 166 165 167 166 mutex_unlock(&maple_switch_mutex); 168 167
+2 -9
drivers/cpufreq/omap-cpufreq.c
··· 88 88 } 89 89 90 90 freqs.old = omap_getspeed(policy->cpu); 91 - freqs.cpu = policy->cpu; 92 91 93 92 if (freqs.old == freqs.new && policy->cur == freqs.new) 94 93 return ret; 95 94 96 95 /* notifiers */ 97 - for_each_cpu(i, policy->cpus) { 98 - freqs.cpu = i; 99 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 100 - } 96 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 101 97 102 98 freq = freqs.new * 1000; 103 99 ret = clk_round_rate(mpu_clk, freq); ··· 153 157 154 158 done: 155 159 /* notifiers */ 156 - for_each_cpu(i, policy->cpus) { 157 - freqs.cpu = i; 158 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 159 - } 160 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 160 161 161 162 return ret; 162 163 }
+2 -8
drivers/cpufreq/p4-clockmod.c
··· 125 125 return 0; 126 126 127 127 /* notifiers */ 128 - for_each_cpu(i, policy->cpus) { 129 - freqs.cpu = i; 130 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 131 - } 128 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 132 129 133 130 /* run on each logical CPU, 134 131 * see section 13.15.3 of IA32 Intel Architecture Software ··· 135 138 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 136 139 137 140 /* notifiers */ 138 - for_each_cpu(i, policy->cpus) { 139 - freqs.cpu = i; 140 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 141 - } 141 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 142 142 143 143 return 0; 144 144 }
+2 -3
drivers/cpufreq/pcc-cpufreq.c
··· 215 215 (pcch_virt_addr + pcc_cpu_data->input_offset)); 216 216 217 217 freqs.new = target_freq; 218 - freqs.cpu = cpu; 219 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 218 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 220 219 221 220 input_buffer = 0x1 | (((target_freq * 100) 222 221 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); ··· 236 237 } 237 238 iowrite16(0, &pcch_hdr->status); 238 239 239 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 240 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 240 241 pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); 241 242 spin_unlock(&pcc_lock); 242 243
+6 -6
drivers/cpufreq/powernow-k6.c
··· 68 68 * 69 69 * Tries to change the PowerNow! multiplier 70 70 */ 71 - static void powernow_k6_set_state(unsigned int best_i) 71 + static void powernow_k6_set_state(struct cpufreq_policy *policy, 72 + unsigned int best_i) 72 73 { 73 74 unsigned long outvalue = 0, invalue = 0; 74 75 unsigned long msrval; ··· 82 81 83 82 freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); 84 83 freqs.new = busfreq * clock_ratio[best_i].index; 85 - freqs.cpu = 0; /* powernow-k6.c is UP only driver */ 86 84 87 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 85 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 88 86 89 87 /* we now need to transform best_i to the BVC format, see AMD#23446 */ 90 88 ··· 98 98 msrval = POWERNOW_IOPORT + 0x0; 99 99 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ 100 100 101 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 101 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 102 102 103 103 return; 104 104 } ··· 136 136 target_freq, relation, &newstate)) 137 137 return -EINVAL; 138 138 139 - powernow_k6_set_state(newstate); 139 + powernow_k6_set_state(policy, newstate); 140 140 141 141 return 0; 142 142 } ··· 182 182 unsigned int i; 183 183 for (i = 0; i < 8; i++) { 184 184 if (i == max_multiplier) 185 - powernow_k6_set_state(i); 185 + powernow_k6_set_state(policy, i); 186 186 } 187 187 cpufreq_frequency_table_put_attr(policy->cpu); 188 188 return 0;
+4 -6
drivers/cpufreq/powernow-k7.c
··· 248 248 } 249 249 250 250 251 - static void change_speed(unsigned int index) 251 + static void change_speed(struct cpufreq_policy *policy, unsigned int index) 252 252 { 253 253 u8 fid, vid; 254 254 struct cpufreq_freqs freqs; ··· 263 263 fid = powernow_table[index].index & 0xFF; 264 264 vid = (powernow_table[index].index & 0xFF00) >> 8; 265 265 266 - freqs.cpu = 0; 267 - 268 266 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 269 267 cfid = fidvidstatus.bits.CFID; 270 268 freqs.old = fsb * fid_codes[cfid] / 10; 271 269 272 270 freqs.new = powernow_table[index].frequency; 273 271 274 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 272 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 275 273 276 274 /* Now do the magic poking into the MSRs. */ 277 275 ··· 290 292 if (have_a0 == 1) 291 293 local_irq_enable(); 292 294 293 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 295 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 294 296 } 295 297 296 298 ··· 544 546 relation, &newstate)) 545 547 return -EINVAL; 546 548 547 - change_speed(newstate); 549 + change_speed(policy, newstate); 548 550 549 551 return 0; 550 552 }
+7 -9
drivers/cpufreq/powernow-k8.c
··· 928 928 static int transition_frequency_fidvid(struct powernow_k8_data *data, 929 929 unsigned int index) 930 930 { 931 + struct cpufreq_policy *policy; 931 932 u32 fid = 0; 932 933 u32 vid = 0; 933 - int res, i; 934 + int res; 934 935 struct cpufreq_freqs freqs; 935 936 936 937 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); ··· 960 959 freqs.old = find_khz_freq_from_fid(data->currfid); 961 960 freqs.new = find_khz_freq_from_fid(fid); 962 961 963 - for_each_cpu(i, data->available_cores) { 964 - freqs.cpu = i; 965 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 966 - } 962 + policy = cpufreq_cpu_get(smp_processor_id()); 963 + cpufreq_cpu_put(policy); 964 + 965 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 967 966 968 967 res = transition_fid_vid(data, fid, vid); 969 968 if (res) ··· 971 970 972 971 freqs.new = find_khz_freq_from_fid(data->currfid); 973 972 974 - for_each_cpu(i, data->available_cores) { 975 - freqs.cpu = i; 976 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 977 - } 973 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 978 974 return res; 979 975 } 980 976
+2 -3
drivers/cpufreq/s3c2416-cpufreq.c
··· 256 256 goto out; 257 257 } 258 258 259 - freqs.cpu = 0; 260 259 freqs.flags = 0; 261 260 freqs.old = s3c_freq->is_dvs ? FREQ_DVS 262 261 : clk_get_rate(s3c_freq->armclk) / 1000; ··· 273 274 if (!to_dvs && freqs.old == freqs.new) 274 275 goto out; 275 276 276 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 277 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 277 278 278 279 if (to_dvs) { 279 280 pr_debug("cpufreq: enter dvs\n"); ··· 286 287 ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new); 287 288 } 288 289 289 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 290 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 290 291 291 292 out: 292 293 mutex_unlock(&cpufreq_lock);
+3 -4
drivers/cpufreq/s3c64xx-cpufreq.c
··· 84 84 if (ret != 0) 85 85 return ret; 86 86 87 - freqs.cpu = 0; 88 87 freqs.old = clk_get_rate(armclk) / 1000; 89 88 freqs.new = s3c64xx_freq_table[i].frequency; 90 89 freqs.flags = 0; ··· 94 95 95 96 pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); 96 97 97 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 98 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 98 99 99 100 #ifdef CONFIG_REGULATOR 100 101 if (vddarm && freqs.new > freqs.old) { ··· 116 117 goto err; 117 118 } 118 119 119 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 120 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 120 121 121 122 #ifdef CONFIG_REGULATOR 122 123 if (vddarm && freqs.new < freqs.old) { ··· 140 141 if (clk_set_rate(armclk, freqs.old * 1000) < 0) 141 142 pr_err("Failed to restore original clock rate\n"); 142 143 err: 143 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 144 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 144 145 145 146 return ret; 146 147 }
+2 -3
drivers/cpufreq/s5pv210-cpufreq.c
··· 229 229 } 230 230 231 231 freqs.new = s5pv210_freq_table[index].frequency; 232 - freqs.cpu = 0; 233 232 234 233 if (freqs.new == freqs.old) 235 234 goto exit; ··· 255 256 goto exit; 256 257 } 257 258 258 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 259 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 259 260 260 261 /* Check if there need to change PLL */ 261 262 if ((index == L0) || (priv_index == L0)) ··· 467 468 } 468 469 } 469 470 470 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 471 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 471 472 472 473 if (freqs.new < freqs.old) { 473 474 regulator_set_voltage(int_regulator,
+5 -5
drivers/cpufreq/sc520_freq.c
··· 53 53 } 54 54 } 55 55 56 - static void sc520_freq_set_cpu_state(unsigned int state) 56 + static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy, 57 + unsigned int state) 57 58 { 58 59 59 60 struct cpufreq_freqs freqs; ··· 62 61 63 62 freqs.old = sc520_freq_get_cpu_frequency(0); 64 63 freqs.new = sc520_freq_table[state].frequency; 65 - freqs.cpu = 0; /* AMD Elan is UP */ 66 64 67 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 65 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 68 66 69 67 pr_debug("attempting to set frequency to %i kHz\n", 70 68 sc520_freq_table[state].frequency); ··· 75 75 76 76 local_irq_enable(); 77 77 78 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 78 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 79 79 }; 80 80 81 81 static int sc520_freq_verify(struct cpufreq_policy *policy) ··· 93 93 target_freq, relation, &newstate)) 94 94 return -EINVAL; 95 95 96 - sc520_freq_set_cpu_state(newstate); 96 + sc520_freq_set_cpu_state(policy, newstate); 97 97 98 98 return 0; 99 99 }
+2 -5
drivers/cpufreq/spear-cpufreq.c
··· 121 121 target_freq, relation, &index)) 122 122 return -EINVAL; 123 123 124 - freqs.cpu = policy->cpu; 125 124 freqs.old = spear_cpufreq_get(0); 126 125 127 126 newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; ··· 157 158 freqs.new = newfreq / 1000; 158 159 freqs.new /= mult; 159 160 160 - for_each_cpu(freqs.cpu, policy->cpus) 161 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 161 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 162 162 163 163 if (mult == 2) 164 164 ret = spear1340_set_cpu_rate(srcclk, newfreq); ··· 170 172 freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; 171 173 } 172 174 173 - for_each_cpu(freqs.cpu, policy->cpus) 174 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 175 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 175 176 return ret; 176 177 } 177 178
+5 -19
drivers/cpufreq/speedstep-centrino.c
··· 457 457 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; 458 458 struct cpufreq_freqs freqs; 459 459 int retval = 0; 460 - unsigned int j, k, first_cpu, tmp; 460 + unsigned int j, first_cpu, tmp; 461 461 cpumask_var_t covered_cpus; 462 462 463 463 if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) ··· 522 522 pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", 523 523 target_freq, freqs.old, freqs.new, msr); 524 524 525 - for_each_cpu(k, policy->cpus) { 526 - if (!cpu_online(k)) 527 - continue; 528 - freqs.cpu = k; 529 - cpufreq_notify_transition(&freqs, 525 + cpufreq_notify_transition(policy, &freqs, 530 526 CPUFREQ_PRECHANGE); 531 - } 532 527 533 528 first_cpu = 0; 534 529 /* all but 16 LSB are reserved, treat them with care */ ··· 539 544 cpumask_set_cpu(j, covered_cpus); 540 545 } 541 546 542 - for_each_cpu(k, policy->cpus) { 543 - if (!cpu_online(k)) 544 - continue; 545 - freqs.cpu = k; 546 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 547 - } 547 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 548 548 549 549 if (unlikely(retval)) { 550 550 /* ··· 555 565 tmp = freqs.new; 556 566 freqs.new = freqs.old; 557 567 freqs.old = tmp; 558 - for_each_cpu(j, policy->cpus) { 559 - if (!cpu_online(j)) 560 - continue; 561 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 562 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 563 - } 568 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 569 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 564 570 } 565 571 retval = 0; 566 572
+2 -10
drivers/cpufreq/speedstep-ich.c
··· 263 263 { 264 264 unsigned int newstate = 0, policy_cpu; 265 265 struct cpufreq_freqs freqs; 266 - int i; 267 266 268 267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], 269 268 target_freq, relation, &newstate)) ··· 271 272 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); 272 273 freqs.old = speedstep_get(policy_cpu); 273 274 freqs.new = speedstep_freqs[newstate].frequency; 274 - freqs.cpu = policy->cpu; 275 275 276 276 pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); 277 277 ··· 278 280 if (freqs.old == freqs.new) 279 281 return 0; 280 282 281 - for_each_cpu(i, policy->cpus) { 282 - freqs.cpu = i; 283 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 284 - } 283 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 285 284 286 285 smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, 287 286 true); 288 287 289 - for_each_cpu(i, policy->cpus) { 290 - freqs.cpu = i; 291 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 292 - } 288 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 293 289 294 290 return 0; 295 291 }
+2 -3
drivers/cpufreq/speedstep-smi.c
··· 252 252 253 253 freqs.old = speedstep_freqs[speedstep_get_state()].frequency; 254 254 freqs.new = speedstep_freqs[newstate].frequency; 255 - freqs.cpu = 0; /* speedstep.c is UP only driver */ 256 255 257 256 if (freqs.old == freqs.new) 258 257 return 0; 259 258 260 - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 259 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 261 260 speedstep_set_state(newstate); 262 - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 261 + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 263 262 264 263 return 0; 265 264 }
+2 -2
include/linux/cpufreq.h
··· 278 278 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); 279 279 280 280 281 - void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); 282 - 281 + void cpufreq_notify_transition(struct cpufreq_policy *policy, 282 + struct cpufreq_freqs *freqs, unsigned int state); 283 283 284 284 static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) 285 285 {