cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t

Impact: Reduce memory usage, use new API.

This is part of an effort to reduce structure sizes for machines
configured with large NR_CPUS. cpumask_t gets replaced by
cpumask_var_t, which is either struct cpumask[1] (small NR_CPUS) or
struct cpumask * (large NR_CPUS).

(Changes to powernow-k* by <travis>.)

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Rusty Russell and committed by
Ingo Molnar
2fdf66b4 ee943a82

+128 -58
+24 -3
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 517 } 518 } 519 520 /* 521 * acpi_cpufreq_early_init - initialize ACPI P-States library 522 * ··· 538 */ 539 static int __init acpi_cpufreq_early_init(void) 540 { 541 dprintk("acpi_cpufreq_early_init\n"); 542 543 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 544 if (!acpi_perf_data) { 545 dprintk("Memory allocation error for acpi_perf_data.\n"); 546 return -ENOMEM; 547 } 548 549 /* Do initialization in ACPI core */ ··· 625 */ 626 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 627 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 628 - policy->cpus = perf->shared_cpu_map; 629 } 630 - policy->related_cpus = perf->shared_cpu_map; 631 632 #ifdef CONFIG_SMP 633 dmi_check_system(sw_any_bug_dmi_table); ··· 816 817 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 818 if (ret) 819 - free_percpu(acpi_perf_data); 820 821 return ret; 822 }
··· 517 } 518 } 519 520 + static void free_acpi_perf_data(void) 521 + { 522 + unsigned int i; 523 + 524 + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ 525 + for_each_possible_cpu(i) 526 + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) 527 + ->shared_cpu_map); 528 + free_percpu(acpi_perf_data); 529 + } 530 + 531 /* 532 * acpi_cpufreq_early_init - initialize ACPI P-States library 533 * ··· 527 */ 528 static int __init acpi_cpufreq_early_init(void) 529 { 530 + unsigned int i; 531 dprintk("acpi_cpufreq_early_init\n"); 532 533 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 534 if (!acpi_perf_data) { 535 dprintk("Memory allocation error for acpi_perf_data.\n"); 536 return -ENOMEM; 537 + } 538 + for_each_possible_cpu(i) { 539 + if (!alloc_cpumask_var(&per_cpu_ptr(acpi_perf_data, i) 540 + ->shared_cpu_map, GFP_KERNEL)) { 541 + 542 + /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ 543 + free_acpi_perf_data(); 544 + return -ENOMEM; 545 + } 546 } 547 548 /* Do initialization in ACPI core */ ··· 604 */ 605 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 606 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 607 + cpumask_copy(&policy->cpus, perf->shared_cpu_map); 608 } 609 + cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); 610 611 #ifdef CONFIG_SMP 612 dmi_check_system(sw_any_bug_dmi_table); ··· 795 796 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 797 if (ret) 798 + free_acpi_perf_data(); 799 800 return ret; 801 }
+9
arch/x86/kernel/cpu/cpufreq/powernow-k7.c
··· 310 goto err0; 311 } 312 313 if (acpi_processor_register_performance(acpi_processor_perf, 0)) { 314 retval = -EIO; 315 goto err1; ··· 418 err2: 419 acpi_processor_unregister_performance(acpi_processor_perf, 0); 420 err1: 421 kfree(acpi_processor_perf); 422 err0: 423 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); ··· 660 #ifdef CONFIG_X86_POWERNOW_K7_ACPI 661 if (acpi_processor_perf) { 662 acpi_processor_unregister_performance(acpi_processor_perf, 0); 663 kfree(acpi_processor_perf); 664 } 665 #endif
··· 310 goto err0; 311 } 312 313 + if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, 314 + GFP_KERNEL)) { 315 + retval = -ENOMEM; 316 + goto err05; 317 + } 318 + 319 if (acpi_processor_register_performance(acpi_processor_perf, 0)) { 320 retval = -EIO; 321 goto err1; ··· 412 err2: 413 acpi_processor_unregister_performance(acpi_processor_perf, 0); 414 err1: 415 + free_cpumask_var(acpi_processor_perf->shared_cpu_map); 416 + err05: 417 kfree(acpi_processor_perf); 418 err0: 419 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); ··· 652 #ifdef CONFIG_X86_POWERNOW_K7_ACPI 653 if (acpi_processor_perf) { 654 acpi_processor_unregister_performance(acpi_processor_perf, 0); 655 + free_cpumask_var(acpi_processor_perf->shared_cpu_map); 656 kfree(acpi_processor_perf); 657 } 658 #endif
+15 -9
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 766 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 767 { 768 struct cpufreq_frequency_table *powernow_table; 769 - int ret_val; 770 771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 772 dprintk("register performance failed: bad ACPI data\n"); ··· 815 /* notify BIOS that we exist */ 816 acpi_processor_notify_smm(THIS_MODULE); 817 818 return 0; 819 820 err_out_mem: ··· 833 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 834 data->acpi_data.state_count = 0; 835 836 - return -ENODEV; 837 } 838 839 static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) ··· 936 { 937 if (data->acpi_data.state_count) 938 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 939 } 940 941 #else ··· 1142 data->cpu = pol->cpu; 1143 data->currpstate = HW_PSTATE_INVALID; 1144 1145 - if (powernow_k8_cpu_init_acpi(data)) { 1146 /* 1147 * Use the PSB BIOS structure. This is only availabe on 1148 * an UP version, and is deprecated by AMD. ··· 1161 "ACPI maintainers and complain to your BIOS " 1162 "vendor.\n"); 1163 #endif 1164 - kfree(data); 1165 - return -ENODEV; 1166 } 1167 if (pol->cpu != 0) { 1168 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1169 "CPU other than CPU0. Complain to your BIOS " 1170 "vendor.\n"); 1171 - kfree(data); 1172 - return -ENODEV; 1173 } 1174 rc = find_psb_table(data); 1175 if (rc) { 1176 - kfree(data); 1177 - return -ENODEV; 1178 } 1179 } 1180
··· 766 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 767 { 768 struct cpufreq_frequency_table *powernow_table; 769 + int ret_val = -ENODEV; 770 771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 772 dprintk("register performance failed: bad ACPI data\n"); ··· 815 /* notify BIOS that we exist */ 816 acpi_processor_notify_smm(THIS_MODULE); 817 818 + if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 819 + printk(KERN_ERR PFX 820 + "unable to alloc powernow_k8_data cpumask\n"); 821 + ret_val = -ENOMEM; 822 + goto err_out_mem; 823 + } 824 + 825 return 0; 826 827 err_out_mem: ··· 826 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 827 data->acpi_data.state_count = 0; 828 829 + return ret_val; 830 } 831 832 static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) ··· 929 { 930 if (data->acpi_data.state_count) 931 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 932 + free_cpumask_var(data->acpi_data.shared_cpu_map); 933 } 934 935 #else ··· 1134 data->cpu = pol->cpu; 1135 data->currpstate = HW_PSTATE_INVALID; 1136 1137 + rc = powernow_k8_cpu_init_acpi(data); 1138 + if (rc) { 1139 /* 1140 * Use the PSB BIOS structure. This is only availabe on 1141 * an UP version, and is deprecated by AMD. ··· 1152 "ACPI maintainers and complain to your BIOS " 1153 "vendor.\n"); 1154 #endif 1155 + goto err_out; 1156 } 1157 if (pol->cpu != 0) { 1158 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1159 "CPU other than CPU0. Complain to your BIOS " 1160 "vendor.\n"); 1161 + goto err_out; 1162 } 1163 rc = find_psb_table(data); 1164 if (rc) { 1165 + goto err_out; 1166 } 1167 } 1168
+10 -4
drivers/acpi/processor_core.c
··· 826 if (!pr) 827 return -ENOMEM; 828 829 pr->handle = device->handle; 830 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 831 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); ··· 850 851 pr = acpi_driver_data(device); 852 853 - if (pr->id >= nr_cpu_ids) { 854 - kfree(pr); 855 - return 0; 856 - } 857 858 if (type == ACPI_BUS_REMOVAL_EJECT) { 859 if (acpi_processor_handle_eject(pr)) ··· 876 877 per_cpu(processors, pr->id) = NULL; 878 per_cpu(processor_device_array, pr->id) = NULL; 879 kfree(pr); 880 881 return 0;
··· 826 if (!pr) 827 return -ENOMEM; 828 829 + if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 830 + kfree(pr); 831 + return -ENOMEM; 832 + } 833 + 834 pr->handle = device->handle; 835 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 836 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); ··· 845 846 pr = acpi_driver_data(device); 847 848 + if (pr->id >= nr_cpu_ids) 849 + goto free; 850 851 if (type == ACPI_BUS_REMOVAL_EJECT) { 852 if (acpi_processor_handle_eject(pr)) ··· 873 874 per_cpu(processors, pr->id) = NULL; 875 per_cpu(processor_device_array, pr->id) = NULL; 876 + 877 + free: 878 + free_cpumask_var(pr->throttling.shared_cpu_map); 879 kfree(pr); 880 881 return 0;
+16 -12
drivers/acpi/processor_perflib.c
··· 588 int count, count_target; 589 int retval = 0; 590 unsigned int i, j; 591 - cpumask_t covered_cpus; 592 struct acpi_processor *pr; 593 struct acpi_psd_package *pdomain; 594 struct acpi_processor *match_pr; 595 struct acpi_psd_package *match_pdomain; 596 597 mutex_lock(&performance_mutex); 598 ··· 620 } 621 622 pr->performance = percpu_ptr(performance, i); 623 - cpu_set(i, pr->performance->shared_cpu_map); 624 if (acpi_processor_get_psd(pr)) { 625 retval = -EINVAL; 626 continue; ··· 653 } 654 } 655 656 - cpus_clear(covered_cpus); 657 for_each_possible_cpu(i) { 658 pr = per_cpu(processors, i); 659 if (!pr) 660 continue; 661 662 - if (cpu_isset(i, covered_cpus)) 663 continue; 664 665 pdomain = &(pr->performance->domain_info); 666 - cpu_set(i, pr->performance->shared_cpu_map); 667 - cpu_set(i, covered_cpus); 668 if (pdomain->num_processors <= 1) 669 continue; 670 ··· 702 goto err_ret; 703 } 704 705 - cpu_set(j, covered_cpus); 706 - cpu_set(j, pr->performance->shared_cpu_map); 707 count++; 708 } 709 ··· 721 722 match_pr->performance->shared_type = 723 pr->performance->shared_type; 724 - match_pr->performance->shared_cpu_map = 725 - pr->performance->shared_cpu_map; 726 } 727 } 728 ··· 734 735 /* Assume no coordination on any error parsing domain info */ 736 if (retval) { 737 - cpus_clear(pr->performance->shared_cpu_map); 738 - cpu_set(i, pr->performance->shared_cpu_map); 739 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 740 } 741 pr->performance = NULL; /* Will be set for real in register */ 742 } 743 744 mutex_unlock(&performance_mutex); 745 return retval; 746 } 747 EXPORT_SYMBOL(acpi_processor_preregister_performance);
··· 588 int count, count_target; 589 int retval = 0; 590 unsigned int i, j; 591 + cpumask_var_t covered_cpus; 592 struct acpi_processor *pr; 593 struct acpi_psd_package *pdomain; 594 struct acpi_processor *match_pr; 595 struct acpi_psd_package *match_pdomain; 596 + 597 + if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 598 + return -ENOMEM; 599 600 mutex_lock(&performance_mutex); 601 ··· 617 } 618 619 pr->performance = percpu_ptr(performance, i); 620 + cpumask_set_cpu(i, pr->performance->shared_cpu_map); 621 if (acpi_processor_get_psd(pr)) { 622 retval = -EINVAL; 623 continue; ··· 650 } 651 } 652 653 + cpumask_clear(covered_cpus); 654 for_each_possible_cpu(i) { 655 pr = per_cpu(processors, i); 656 if (!pr) 657 continue; 658 659 + if (cpumask_test_cpu(i, covered_cpus)) 660 continue; 661 662 pdomain = &(pr->performance->domain_info); 663 + cpumask_set_cpu(i, pr->performance->shared_cpu_map); 664 + cpumask_set_cpu(i, covered_cpus); 665 if (pdomain->num_processors <= 1) 666 continue; 667 ··· 699 goto err_ret; 700 } 701 702 + cpumask_set_cpu(j, covered_cpus); 703 + cpumask_set_cpu(j, pr->performance->shared_cpu_map); 704 count++; 705 } 706 ··· 718 719 match_pr->performance->shared_type = 720 pr->performance->shared_type; 721 + cpumask_copy(match_pr->performance->shared_cpu_map, 722 + pr->performance->shared_cpu_map); 723 } 724 } 725 ··· 731 732 /* Assume no coordination on any error parsing domain info */ 733 if (retval) { 734 + cpumask_clear(pr->performance->shared_cpu_map); 735 + cpumask_set_cpu(i, pr->performance->shared_cpu_map); 736 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 737 } 738 pr->performance = NULL; /* Will be set for real in register */ 739 } 740 741 mutex_unlock(&performance_mutex); 742 + free_cpumask_var(covered_cpus); 743 return retval; 744 } 745 EXPORT_SYMBOL(acpi_processor_preregister_performance);
+52 -28
drivers/acpi/processor_throttling.c
··· 61 int count, count_target; 62 int retval = 0; 63 unsigned int i, j; 64 - cpumask_t covered_cpus; 65 struct acpi_processor *pr, *match_pr; 66 struct acpi_tsd_package *pdomain, *match_pdomain; 67 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 68 69 /* 70 * Now that we have _TSD data from all CPUs, lets setup T-state ··· 94 if (retval) 95 goto err_ret; 96 97 - cpus_clear(covered_cpus); 98 for_each_possible_cpu(i) { 99 pr = per_cpu(processors, i); 100 if (!pr) 101 continue; 102 103 - if (cpu_isset(i, covered_cpus)) 104 continue; 105 pthrottling = &pr->throttling; 106 107 pdomain = &(pthrottling->domain_info); 108 - cpu_set(i, pthrottling->shared_cpu_map); 109 - cpu_set(i, covered_cpus); 110 /* 111 * If the number of processor in the TSD domain is 1, it is 112 * unnecessary to parse the coordination for this CPU. ··· 147 goto err_ret; 148 } 149 150 - cpu_set(j, covered_cpus); 151 - cpu_set(j, pthrottling->shared_cpu_map); 152 count++; 153 } 154 for_each_possible_cpu(j) { ··· 168 * If some CPUS have the same domain, they 169 * will have the same shared_cpu_map. 170 */ 171 - match_pthrottling->shared_cpu_map = 172 - pthrottling->shared_cpu_map; 173 } 174 } 175 176 err_ret: 177 for_each_possible_cpu(i) { 178 pr = per_cpu(processors, i); 179 if (!pr) ··· 187 */ 188 if (retval) { 189 pthrottling = &(pr->throttling); 190 - cpus_clear(pthrottling->shared_cpu_map); 191 - cpu_set(i, pthrottling->shared_cpu_map); 192 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 193 } 194 } ··· 572 pthrottling = &pr->throttling; 573 pthrottling->tsd_valid_flag = 1; 574 pthrottling->shared_type = pdomain->coord_type; 575 - cpu_set(pr->id, pthrottling->shared_cpu_map); 576 /* 577 * If the coordination type is not defined in ACPI spec, 578 * the tsd_valid_flag will be clear and coordination type ··· 831 832 static int acpi_processor_get_throttling(struct acpi_processor *pr) 833 { 834 - cpumask_t saved_mask; 835 int ret; 836 837 if (!pr) ··· 839 840 if (!pr->flags.throttling) 841 return -ENODEV; 842 /* 843 * Migrate task to the cpu pointed by pr. 844 */ 845 - saved_mask = current->cpus_allowed; 846 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 847 ret = pr->throttling.acpi_processor_get_throttling(pr); 848 /* restore the previous state */ 849 - set_cpus_allowed_ptr(current, &saved_mask); 850 851 return ret; 852 } ··· 997 998 int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 999 { 1000 - cpumask_t saved_mask; 1001 int ret = 0; 1002 unsigned int i; 1003 struct acpi_processor *match_pr; 1004 struct acpi_processor_throttling *p_throttling; 1005 struct throttling_tstate t_state; 1006 - cpumask_t online_throttling_cpus; 1007 1008 if (!pr) 1009 return -EINVAL; ··· 1014 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1015 return -EINVAL; 1016 1017 - saved_mask = current->cpus_allowed; 1018 t_state.target_state = state; 1019 p_throttling = &(pr->throttling); 1020 - cpus_and(online_throttling_cpus, cpu_online_map, 1021 - p_throttling->shared_cpu_map); 1022 /* 1023 * The throttling notifier will be called for every 1024 * affected cpu in order to get one proper T-state. 1025 * The notifier event is THROTTLING_PRECHANGE. 1026 */ 1027 - for_each_cpu_mask_nr(i, online_throttling_cpus) { 1028 t_state.cpu = i; 1029 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1030 &t_state); ··· 1044 * it can be called only for the cpu pointed by pr. 1045 */ 1046 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1047 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 1048 ret = p_throttling->acpi_processor_set_throttling(pr, 1049 t_state.target_state); 1050 } else { ··· 1054 * it is necessary to set T-state for every affected 1055 * cpus. 1056 */ 1057 - for_each_cpu_mask_nr(i, online_throttling_cpus) { 1058 match_pr = per_cpu(processors, i); 1059 /* 1060 * If the pointer is invalid, we will report the ··· 1076 continue; 1077 } 1078 t_state.cpu = i; 1079 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 1080 ret = match_pr->throttling. 1081 acpi_processor_set_throttling( 1082 match_pr, t_state.target_state); ··· 1089 * affected cpu to update the T-states. 1090 * The notifier event is THROTTLING_POSTCHANGE 1091 */ 1092 - for_each_cpu_mask_nr(i, online_throttling_cpus) { 1093 t_state.cpu = i; 1094 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1095 &t_state); 1096 } 1097 /* restore the previous state */ 1098 - set_cpus_allowed_ptr(current, &saved_mask); 1099 return ret; 1100 } 1101 ··· 1144 if (acpi_processor_get_tsd(pr)) { 1145 pthrottling = &pr->throttling; 1146 pthrottling->tsd_valid_flag = 0; 1147 - cpu_set(pr->id, pthrottling->shared_cpu_map); 1148 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1149 } 1150
··· 61 int count, count_target; 62 int retval = 0; 63 unsigned int i, j; 64 + cpumask_var_t covered_cpus; 65 struct acpi_processor *pr, *match_pr; 66 struct acpi_tsd_package *pdomain, *match_pdomain; 67 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 68 + 69 + if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 70 + return -ENOMEM; 71 72 /* 73 * Now that we have _TSD data from all CPUs, lets setup T-state ··· 91 if (retval) 92 goto err_ret; 93 94 + cpumask_clear(covered_cpus); 95 for_each_possible_cpu(i) { 96 pr = per_cpu(processors, i); 97 if (!pr) 98 continue; 99 100 + if (cpumask_test_cpu(i, covered_cpus)) 101 continue; 102 pthrottling = &pr->throttling; 103 104 pdomain = &(pthrottling->domain_info); 105 + cpumask_set_cpu(i, pthrottling->shared_cpu_map); 106 + cpumask_set_cpu(i, covered_cpus); 107 /* 108 * If the number of processor in the TSD domain is 1, it is 109 * unnecessary to parse the coordination for this CPU. ··· 144 goto err_ret; 145 } 146 147 + cpumask_set_cpu(j, covered_cpus); 148 + cpumask_set_cpu(j, pthrottling->shared_cpu_map); 149 count++; 150 } 151 for_each_possible_cpu(j) { ··· 165 * If some CPUS have the same domain, they 166 * will have the same shared_cpu_map. 167 */ 168 + cpumask_copy(match_pthrottling->shared_cpu_map, 169 + pthrottling->shared_cpu_map); 170 } 171 } 172 173 err_ret: 174 + free_cpumask_var(covered_cpus); 175 + 176 for_each_possible_cpu(i) { 177 pr = per_cpu(processors, i); 178 if (!pr) ··· 182 */ 183 if (retval) { 184 pthrottling = &(pr->throttling); 185 + cpumask_clear(pthrottling->shared_cpu_map); 186 + cpumask_set_cpu(i, pthrottling->shared_cpu_map); 187 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 188 } 189 } ··· 567 pthrottling = &pr->throttling; 568 pthrottling->tsd_valid_flag = 1; 569 pthrottling->shared_type = pdomain->coord_type; 570 + cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 571 /* 572 * If the coordination type is not defined in ACPI spec, 573 * the tsd_valid_flag will be clear and coordination type ··· 826 827 static int acpi_processor_get_throttling(struct acpi_processor *pr) 828 { 829 + cpumask_var_t saved_mask; 830 int ret; 831 832 if (!pr) ··· 834 835 if (!pr->flags.throttling) 836 return -ENODEV; 837 + 838 + if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 839 + return -ENOMEM; 840 + 841 /* 842 * Migrate task to the cpu pointed by pr. 843 */ 844 + cpumask_copy(saved_mask, &current->cpus_allowed); 845 + /* FIXME: use work_on_cpu() */ 846 + set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 847 ret = pr->throttling.acpi_processor_get_throttling(pr); 848 /* restore the previous state */ 849 + set_cpus_allowed_ptr(current, saved_mask); 850 + free_cpumask_var(saved_mask); 851 852 return ret; 853 } ··· 986 987 int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 988 { 989 + cpumask_var_t saved_mask; 990 int ret = 0; 991 unsigned int i; 992 struct acpi_processor *match_pr; 993 struct acpi_processor_throttling *p_throttling; 994 struct throttling_tstate t_state; 995 + cpumask_var_t online_throttling_cpus; 996 997 if (!pr) 998 return -EINVAL; ··· 1003 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1004 return -EINVAL; 1005 1006 + if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 1007 + return -ENOMEM; 1008 + 1009 + if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { 1010 + free_cpumask_var(saved_mask); 1011 + return -ENOMEM; 1012 + } 1013 + 1014 + cpumask_copy(saved_mask, &current->cpus_allowed); 1015 t_state.target_state = state; 1016 p_throttling = &(pr->throttling); 1017 + cpumask_and(online_throttling_cpus, cpu_online_mask, 1018 + p_throttling->shared_cpu_map); 1019 /* 1020 * The throttling notifier will be called for every 1021 * affected cpu in order to get one proper T-state. 1022 * The notifier event is THROTTLING_PRECHANGE. 1023 */ 1024 + for_each_cpu(i, online_throttling_cpus) { 1025 t_state.cpu = i; 1026 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1027 &t_state); ··· 1025 * it can be called only for the cpu pointed by pr. 1026 */ 1027 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1028 + /* FIXME: use work_on_cpu() */ 1029 + set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 1030 ret = p_throttling->acpi_processor_set_throttling(pr, 1031 t_state.target_state); 1032 } else { ··· 1034 * it is necessary to set T-state for every affected 1035 * cpus. 1036 */ 1037 + for_each_cpu(i, online_throttling_cpus) { 1038 match_pr = per_cpu(processors, i); 1039 /* 1040 * If the pointer is invalid, we will report the ··· 1056 continue; 1057 } 1058 t_state.cpu = i; 1059 + /* FIXME: use work_on_cpu() */ 1060 + set_cpus_allowed_ptr(current, cpumask_of(i)); 1061 ret = match_pr->throttling. 1062 acpi_processor_set_throttling( 1063 match_pr, t_state.target_state); ··· 1068 * affected cpu to update the T-states. 1069 * The notifier event is THROTTLING_POSTCHANGE 1070 */ 1071 + for_each_cpu(i, online_throttling_cpus) { 1072 t_state.cpu = i; 1073 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1074 &t_state); 1075 } 1076 /* restore the previous state */ 1077 + /* FIXME: use work_on_cpu() */ 1078 + set_cpus_allowed_ptr(current, saved_mask); 1079 + free_cpumask_var(online_throttling_cpus); 1080 + free_cpumask_var(saved_mask); 1081 return ret; 1082 } 1083 ··· 1120 if (acpi_processor_get_tsd(pr)) { 1121 pthrottling = &pr->throttling; 1122 pthrottling->tsd_valid_flag = 0; 1123 + cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 1124 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1125 } 1126
+2 -2
include/acpi/processor.h
··· 127 unsigned int state_count; 128 struct acpi_processor_px *states; 129 struct acpi_psd_package domain_info; 130 - cpumask_t shared_cpu_map; 131 unsigned int shared_type; 132 }; 133 ··· 172 unsigned int state_count; 173 struct acpi_processor_tx_tss *states_tss; 174 struct acpi_tsd_package domain_info; 175 - cpumask_t shared_cpu_map; 176 int (*acpi_processor_get_throttling) (struct acpi_processor * pr); 177 int (*acpi_processor_set_throttling) (struct acpi_processor * pr, 178 int state);
··· 127 unsigned int state_count; 128 struct acpi_processor_px *states; 129 struct acpi_psd_package domain_info; 130 + cpumask_var_t shared_cpu_map; 131 unsigned int shared_type; 132 }; 133 ··· 172 unsigned int state_count; 173 struct acpi_processor_tx_tss *states_tss; 174 struct acpi_tsd_package domain_info; 175 + cpumask_var_t shared_cpu_map; 176 int (*acpi_processor_get_throttling) (struct acpi_processor * pr); 177 int (*acpi_processor_set_throttling) (struct acpi_processor * pr, 178 int state);