cpumask: use work_on_cpu in acpi-cpufreq.c for read_measured_perf_ctrs

Impact: use new cpumask API to reduce stack usage

Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with
a work_on_cpu function for read_measured_perf_ctrs().

Basically splits off the work function from get_measured_perf which is
run on the designated cpu. Moves definition of struct perf_cur out of
function local namespace, and is used as the work function argument.
References in get_measured_perf use values in the perf_cur struct.

Signed-off-by: Mike Travis <travis@sgi.com>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Mike Travis and committed by
Ingo Molnar
e39ad415 7503bfba

+43 -40
+43 -40
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 245 245 return cmd.val; 246 246 } 247 247 248 + struct perf_cur { 249 + union { 250 + struct { 251 + u32 lo; 252 + u32 hi; 253 + } split; 254 + u64 whole; 255 + } aperf_cur, mperf_cur; 256 + }; 257 + 258 + 259 + static long read_measured_perf_ctrs(void *_cur) 260 + { 261 + struct perf_cur *cur = _cur; 262 + 263 + rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi); 264 + rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi); 265 + 266 + wrmsr(MSR_IA32_APERF, 0, 0); 267 + wrmsr(MSR_IA32_MPERF, 0, 0); 268 + 269 + return 0; 270 + } 271 + 248 272 /* 249 273 * Return the measured active (C0) frequency on this CPU since last call 250 274 * to this function. ··· 285 261 static unsigned int get_measured_perf(struct cpufreq_policy *policy, 286 262 unsigned int cpu) 287 263 { 288 - union { 289 - struct { 290 - u32 lo; 291 - u32 hi; 292 - } split; 293 - u64 whole; 294 - } aperf_cur, mperf_cur; 295 - 296 - cpumask_t saved_mask; 264 + struct perf_cur cur; 297 265 unsigned int perf_percent; 298 266 unsigned int retval; 299 267 300 - saved_mask = current->cpus_allowed; 301 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 302 - if (get_cpu() != cpu) { 303 - /* We were not able to run on requested processor */ 304 - put_cpu(); 268 + if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) 305 269 return 0; 306 - } 307 - 308 - rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi); 309 - rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi); 310 - 311 - wrmsr(MSR_IA32_APERF, 0,0); 312 - wrmsr(MSR_IA32_MPERF, 0,0); 313 270 314 271 #ifdef __i386__ 315 272 /* ··· 298 293 * Get an approximate value. Return failure in case we cannot get 299 294 * an approximate value. 300 295 */ 301 - if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) { 296 + if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { 302 297 int shift_count; 303 298 u32 h; 304 299 305 - h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi); 300 + h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); 306 301 shift_count = fls(h); 307 302 308 - aperf_cur.whole >>= shift_count; 309 - mperf_cur.whole >>= shift_count; 303 + cur.aperf_cur.whole >>= shift_count; 304 + cur.mperf_cur.whole >>= shift_count; 310 305 } 311 306 312 - if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) { 307 + if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { 313 308 int shift_count = 7; 314 - aperf_cur.split.lo >>= shift_count; 315 - mperf_cur.split.lo >>= shift_count; 309 + cur.aperf_cur.split.lo >>= shift_count; 310 + cur.mperf_cur.split.lo >>= shift_count; 316 311 } 317 312 318 - if (aperf_cur.split.lo && mperf_cur.split.lo) 319 - perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo; 313 + if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) 314 + perf_percent = (cur.aperf_cur.split.lo * 100) / 315 + cur.mperf_cur.split.lo; 320 316 else 321 317 perf_percent = 0; 322 318 323 319 #else 324 - if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) { 320 + if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { 325 321 int shift_count = 7; 326 - aperf_cur.whole >>= shift_count; 327 - mperf_cur.whole >>= shift_count; 322 + cur.aperf_cur.whole >>= shift_count; 323 + cur.mperf_cur.whole >>= shift_count; 328 324 } 329 325 330 - if (aperf_cur.whole && mperf_cur.whole) 331 - perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole; 326 + if (cur.aperf_cur.whole && cur.mperf_cur.whole) 327 + perf_percent = (cur.aperf_cur.whole * 100) / 328 + cur.mperf_cur.whole; 332 329 else 333 330 perf_percent = 0; 334 331 ··· 338 331 339 332 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 340 333 341 - put_cpu(); 342 - set_cpus_allowed_ptr(current, &saved_mask); 343 - 344 - dprintk("cpu %d: performance percent %d\n", cpu, perf_percent); 345 334 return retval; 346 335 } 347 336 ··· 355 352 } 356 353 357 354 cached_freq = data->freq_table[data->acpi_data->state].frequency; 358 - freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); 355 + freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); 359 356 if (freq != cached_freq) { 360 357 /* 361 358 * The dreaded BIOS frequency change behind our back.