cpumask: use cpumask_var_t in acpi-cpufreq.c

Impact: cleanup, reduce stack usage, use new cpumask API.

Replace the cpumask_t in struct drv_cmd with a cpumask_var_t. Remove unneeded
online_policy_cpus cpumask_t in acpi_cpufreq_target. Update refs to use
new cpumask API.

Signed-off-by: Mike Travis <travis@sgi.com>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Mike Travis and committed by
Ingo Molnar
4d8bb537 c74f31c0

+29 -29
+29 -29
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 145 146 struct drv_cmd { 147 unsigned int type; 148 - cpumask_t mask; 149 drv_addr_union addr; 150 u32 val; 151 }; ··· 193 cpumask_t saved_mask = current->cpus_allowed; 194 cmd->val = 0; 195 196 - set_cpus_allowed_ptr(current, &cmd->mask); 197 do_drv_read(cmd); 198 set_cpus_allowed_ptr(current, &saved_mask); 199 } ··· 203 cpumask_t saved_mask = current->cpus_allowed; 204 unsigned int i; 205 206 - for_each_cpu_mask_nr(i, cmd->mask) { 207 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 208 do_drv_write(cmd); 209 } 210 ··· 212 return; 213 } 214 215 - static u32 get_cur_val(const cpumask_t *mask) 216 { 217 struct acpi_processor_performance *perf; 218 struct drv_cmd cmd; 219 220 - if (unlikely(cpus_empty(*mask))) 221 return 0; 222 223 - switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) { 224 case SYSTEM_INTEL_MSR_CAPABLE: 225 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 226 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 227 break; 228 case SYSTEM_IO_CAPABLE: 229 cmd.type = SYSTEM_IO_CAPABLE; 230 - perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data; 231 cmd.addr.io.port = perf->control_register.address; 232 cmd.addr.io.bit_width = perf->control_register.bit_width; 233 break; ··· 235 return 0; 236 } 237 238 - cmd.mask = *mask; 239 240 drv_read(&cmd); 241 ··· 386 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 387 struct acpi_processor_performance *perf; 388 struct cpufreq_freqs freqs; 389 - cpumask_t online_policy_cpus; 390 struct drv_cmd cmd; 391 unsigned int next_state = 0; /* Index into freq_table */ 392 unsigned int next_perf_state = 0; /* Index into perf table */ ··· 400 return -ENODEV; 401 } 402 403 perf = data->acpi_data; 404 result = cpufreq_frequency_table_target(policy, 405 data->freq_table, 406 target_freq, 407 relation, &next_state); 408 - if (unlikely(result)) 409 - return -ENODEV; 410 - 411 - #ifdef CONFIG_HOTPLUG_CPU 412 - /* cpufreq holds the hotplug lock, so we are safe from here on */ 413 - cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus); 414 - #else 415 - online_policy_cpus = policy->cpus; 416 - #endif 417 418 next_perf_state = data->freq_table[next_state].index; 419 if (perf->state == next_perf_state) { ··· 422 } else { 423 dprintk("Already at target state (P%d)\n", 424 next_perf_state); 425 - return 0; 426 } 427 } 428 ··· 441 cmd.val = (u32) perf->states[next_perf_state].control; 442 break; 443 default: 444 - return -ENODEV; 445 } 446 447 - cpus_clear(cmd.mask); 448 - 449 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) 450 - cmd.mask = online_policy_cpus; 451 else 452 - cpu_set(policy->cpu, cmd.mask); 453 454 freqs.old = perf->states[perf->state].core_frequency * 1000; 455 freqs.new = data->freq_table[next_state].frequency; 456 - for_each_cpu_mask_nr(i, cmd.mask) { 457 freqs.cpu = i; 458 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 459 } ··· 461 drv_write(&cmd); 462 463 if (acpi_pstate_strict) { 464 - if (!check_freqs(&cmd.mask, freqs.new, data)) { 465 dprintk("acpi_cpufreq_target failed (%d)\n", 466 policy->cpu); 467 - return -EAGAIN; 468 } 469 } 470 471 - for_each_cpu_mask_nr(i, cmd.mask) { 472 freqs.cpu = i; 473 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 474 } 475 perf->state = next_perf_state; 476 477 return result; 478 } 479
··· 145 146 struct drv_cmd { 147 unsigned int type; 148 + cpumask_var_t mask; 149 drv_addr_union addr; 150 u32 val; 151 }; ··· 193 cpumask_t saved_mask = current->cpus_allowed; 194 cmd->val = 0; 195 196 + set_cpus_allowed_ptr(current, cmd->mask); 197 do_drv_read(cmd); 198 set_cpus_allowed_ptr(current, &saved_mask); 199 } ··· 203 cpumask_t saved_mask = current->cpus_allowed; 204 unsigned int i; 205 206 + for_each_cpu(i, cmd->mask) { 207 + set_cpus_allowed_ptr(current, cpumask_of(i)); 208 do_drv_write(cmd); 209 } 210 ··· 212 return; 213 } 214 215 + static u32 get_cur_val(const struct cpumask *mask) 216 { 217 struct acpi_processor_performance *perf; 218 struct drv_cmd cmd; 219 220 + if (unlikely(cpumask_empty(mask))) 221 return 0; 222 223 + switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { 224 case SYSTEM_INTEL_MSR_CAPABLE: 225 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 226 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 227 break; 228 case SYSTEM_IO_CAPABLE: 229 cmd.type = SYSTEM_IO_CAPABLE; 230 + perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; 231 cmd.addr.io.port = perf->control_register.address; 232 cmd.addr.io.bit_width = perf->control_register.bit_width; 233 break; ··· 235 return 0; 236 } 237 238 + cpumask_copy(cmd.mask, mask); 239 240 drv_read(&cmd); 241 ··· 386 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 387 struct acpi_processor_performance *perf; 388 struct cpufreq_freqs freqs; 389 struct drv_cmd cmd; 390 unsigned int next_state = 0; /* Index into freq_table */ 391 unsigned int next_perf_state = 0; /* Index into perf table */ ··· 401 return -ENODEV; 402 } 403 404 + if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) 405 + return -ENOMEM; 406 + 407 perf = data->acpi_data; 408 result = cpufreq_frequency_table_target(policy, 409 data->freq_table, 410 target_freq, 411 relation, &next_state); 412 + if (unlikely(result)) { 413 + result = -ENODEV; 414 + goto out; 415 + } 416 417 next_perf_state = data->freq_table[next_state].index; 418 if (perf->state == next_perf_state) { ··· 425 } else { 426 dprintk("Already at target state (P%d)\n", 427 next_perf_state); 428 + goto out; 429 } 430 } 431 ··· 444 cmd.val = (u32) perf->states[next_perf_state].control; 445 break; 446 default: 447 + result = -ENODEV; 448 + goto out; 449 } 450 451 + /* cpufreq holds the hotplug lock, so we are safe from here on */ 452 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) 453 + cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); 454 else 455 + cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); 456 457 freqs.old = perf->states[perf->state].core_frequency * 1000; 458 freqs.new = data->freq_table[next_state].frequency; 459 + for_each_cpu(i, cmd.mask) { 460 freqs.cpu = i; 461 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 462 } ··· 464 drv_write(&cmd); 465 466 if (acpi_pstate_strict) { 467 + if (!check_freqs(cmd.mask, freqs.new, data)) { 468 dprintk("acpi_cpufreq_target failed (%d)\n", 469 policy->cpu); 470 + result = -EAGAIN; 471 + goto out; 472 } 473 } 474 475 + for_each_cpu(i, cmd.mask) { 476 freqs.cpu = i; 477 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 478 } 479 perf->state = next_perf_state; 480 481 + out: 482 + free_cpumask_var(cmd.mask); 483 return result; 484 } 485