cpumask: use cpumask_var_t in acpi-cpufreq.c

Impact: cleanup, reduce stack usage, use new cpumask API.

Replace the cpumask_t in struct drv_cmd with a cpumask_var_t. Remove unneeded
online_policy_cpus cpumask_t in acpi_cpufreq_target. Update refs to use
new cpumask API.

Signed-off-by: Mike Travis <travis@sgi.com>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Mike Travis and committed by
Ingo Molnar
4d8bb537 c74f31c0

+29 -29
+29 -29
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 145 145 146 146 struct drv_cmd { 147 147 unsigned int type; 148 - cpumask_t mask; 148 + cpumask_var_t mask; 149 149 drv_addr_union addr; 150 150 u32 val; 151 151 }; ··· 193 193 cpumask_t saved_mask = current->cpus_allowed; 194 194 cmd->val = 0; 195 195 196 - set_cpus_allowed_ptr(current, &cmd->mask); 196 + set_cpus_allowed_ptr(current, cmd->mask); 197 197 do_drv_read(cmd); 198 198 set_cpus_allowed_ptr(current, &saved_mask); 199 199 } ··· 203 203 cpumask_t saved_mask = current->cpus_allowed; 204 204 unsigned int i; 205 205 206 - for_each_cpu_mask_nr(i, cmd->mask) { 207 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 206 + for_each_cpu(i, cmd->mask) { 207 + set_cpus_allowed_ptr(current, cpumask_of(i)); 208 208 do_drv_write(cmd); 209 209 } 210 210 ··· 212 212 return; 213 213 } 214 214 215 - static u32 get_cur_val(const cpumask_t *mask) 215 + static u32 get_cur_val(const struct cpumask *mask) 216 216 { 217 217 struct acpi_processor_performance *perf; 218 218 struct drv_cmd cmd; 219 219 220 - if (unlikely(cpus_empty(*mask))) 220 + if (unlikely(cpumask_empty(mask))) 221 221 return 0; 222 222 223 - switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) { 223 + switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { 224 224 case SYSTEM_INTEL_MSR_CAPABLE: 225 225 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 226 226 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 227 227 break; 228 228 case SYSTEM_IO_CAPABLE: 229 229 cmd.type = SYSTEM_IO_CAPABLE; 230 - perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data; 230 + perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; 231 231 cmd.addr.io.port = perf->control_register.address; 232 232 cmd.addr.io.bit_width = perf->control_register.bit_width; 233 233 break; ··· 235 235 return 0; 236 236 } 237 237 238 - cmd.mask = *mask; 238 + cpumask_copy(cmd.mask, mask); 239 239 240 240 drv_read(&cmd); 241 241 ··· 386 386 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 387 387 struct acpi_processor_performance *perf; 388 388 struct cpufreq_freqs freqs; 389 - cpumask_t online_policy_cpus; 390 389 struct drv_cmd cmd; 391 390 unsigned int next_state = 0; /* Index into freq_table */ 392 391 unsigned int next_perf_state = 0; /* Index into perf table */ ··· 400 401 return -ENODEV; 401 402 } 402 403 404 + if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) 405 + return -ENOMEM; 406 + 403 407 perf = data->acpi_data; 404 408 result = cpufreq_frequency_table_target(policy, 405 409 data->freq_table, 406 410 target_freq, 407 411 relation, &next_state); 408 - if (unlikely(result)) 409 - return -ENODEV; 410 - 411 - #ifdef CONFIG_HOTPLUG_CPU 412 - /* cpufreq holds the hotplug lock, so we are safe from here on */ 413 - cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus); 414 - #else 415 - online_policy_cpus = policy->cpus; 416 - #endif 412 + if (unlikely(result)) { 413 + result = -ENODEV; 414 + goto out; 415 + } 417 416 418 417 next_perf_state = data->freq_table[next_state].index; 419 418 if (perf->state == next_perf_state) { ··· 422 425 } else { 423 426 dprintk("Already at target state (P%d)\n", 424 427 next_perf_state); 425 - return 0; 428 + goto out; 426 429 } 427 430 } 428 431 ··· 441 444 cmd.val = (u32) perf->states[next_perf_state].control; 442 445 break; 443 446 default: 444 - return -ENODEV; 447 + result = -ENODEV; 448 + goto out; 445 449 } 446 450 447 - cpus_clear(cmd.mask); 448 - 451 + /* cpufreq holds the hotplug lock, so we are safe from here on */ 449 452 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) 450 - cmd.mask = online_policy_cpus; 453 + cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); 451 454 else 452 - cpu_set(policy->cpu, cmd.mask); 455 + cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); 453 456 454 457 freqs.old = perf->states[perf->state].core_frequency * 1000; 455 458 freqs.new = data->freq_table[next_state].frequency; 456 - for_each_cpu_mask_nr(i, cmd.mask) { 459 + for_each_cpu(i, cmd.mask) { 457 460 freqs.cpu = i; 458 461 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 459 462 } ··· 461 464 drv_write(&cmd); 462 465 463 466 if (acpi_pstate_strict) { 464 - if (!check_freqs(&cmd.mask, freqs.new, data)) { 467 + if (!check_freqs(cmd.mask, freqs.new, data)) { 465 468 dprintk("acpi_cpufreq_target failed (%d)\n", 466 469 policy->cpu); 467 - return -EAGAIN; 470 + result = -EAGAIN; 471 + goto out; 468 472 } 469 473 } 470 474 471 - for_each_cpu_mask_nr(i, cmd.mask) { 475 + for_each_cpu(i, cmd.mask) { 472 476 freqs.cpu = i; 473 477 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 474 478 } 475 479 perf->state = next_perf_state; 476 480 481 + out: 482 + free_cpumask_var(cmd.mask); 477 483 return result; 478 484 } 479 485