cpumask: convert struct cpufreq_policy to cpumask_var_t

Impact: use new cpumask API to reduce memory usage

This is part of an effort to reduce structure sizes for machines
configured with large NR_CPUS. cpumask_t gets replaced by
cpumask_var_t, which is either struct cpumask[1] (small NR_CPUS) or
struct cpumask * (large NR_CPUS).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Acked-by: Dave Jones <davej@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Rusty Russell and committed by
Ingo Molnar
835481d9 5cb0535f

+62 -48
+5 -5
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 411 412 #ifdef CONFIG_HOTPLUG_CPU 413 /* cpufreq holds the hotplug lock, so we are safe from here on */ 414 - cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); 415 #else 416 online_policy_cpus = policy->cpus; 417 #endif ··· 626 */ 627 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 628 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 629 - cpumask_copy(&policy->cpus, perf->shared_cpu_map); 630 } 631 - cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); 632 633 #ifdef CONFIG_SMP 634 dmi_check_system(sw_any_bug_dmi_table); 635 - if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) { 636 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 637 - policy->cpus = per_cpu(cpu_core_map, cpu); 638 } 639 #endif 640
··· 411 412 #ifdef CONFIG_HOTPLUG_CPU 413 /* cpufreq holds the hotplug lock, so we are safe from here on */ 414 + cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus); 415 #else 416 online_policy_cpus = policy->cpus; 417 #endif ··· 626 */ 627 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 628 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 629 + cpumask_copy(policy->cpus, perf->shared_cpu_map); 630 } 631 + cpumask_copy(policy->related_cpus, perf->shared_cpu_map); 632 633 #ifdef CONFIG_SMP 634 dmi_check_system(sw_any_bug_dmi_table); 635 + if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) { 636 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 637 + cpumask_copy(policy->cpus, cpu_core_mask(cpu)); 638 } 639 #endif 640
+4 -4
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
··· 122 return 0; 123 124 /* notifiers */ 125 - for_each_cpu_mask_nr(i, policy->cpus) { 126 freqs.cpu = i; 127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 128 } ··· 130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software 131 * Developer's Manual, Volume 3 132 */ 133 - for_each_cpu_mask_nr(i, policy->cpus) 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 135 136 /* notifiers */ 137 - for_each_cpu_mask_nr(i, policy->cpus) { 138 freqs.cpu = i; 139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 140 } ··· 203 unsigned int i; 204 205 #ifdef CONFIG_SMP 206 - policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); 207 #endif 208 209 /* Errata workaround */
··· 122 return 0; 123 124 /* notifiers */ 125 + for_each_cpu(i, policy->cpus) { 126 freqs.cpu = i; 127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 128 } ··· 130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software 131 * Developer's Manual, Volume 3 132 */ 133 + for_each_cpu(i, policy->cpus) 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 135 136 /* notifiers */ 137 + for_each_cpu(i, policy->cpus) { 138 freqs.cpu = i; 139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 140 } ··· 203 unsigned int i; 204 205 #ifdef CONFIG_SMP 206 + cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); 207 #endif 208 209 /* Errata workaround */
+3 -3
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 1199 set_cpus_allowed_ptr(current, &oldmask); 1200 1201 if (cpu_family == CPU_HW_PSTATE) 1202 - pol->cpus = cpumask_of_cpu(pol->cpu); 1203 else 1204 - pol->cpus = per_cpu(cpu_core_map, pol->cpu); 1205 - data->available_cores = &(pol->cpus); 1206 1207 /* Take a crude guess here. 1208 * That guess was in microseconds, so multiply with 1000 */
··· 1199 set_cpus_allowed_ptr(current, &oldmask); 1200 1201 if (cpu_family == CPU_HW_PSTATE) 1202 + cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); 1203 else 1204 + cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); 1205 + data->available_cores = pol->cpus; 1206 1207 /* Take a crude guess here. 1208 * That guess was in microseconds, so multiply with 1000 */
+1 -1
arch/x86/kernel/cpu/cpufreq/powernow-k8.h
··· 53 /* we need to keep track of associated cores, but let cpufreq 54 * handle hotplug events - so just point at cpufreq pol->cpus 55 * structure */ 56 - cpumask_t *available_cores; 57 }; 58 59
··· 53 /* we need to keep track of associated cores, but let cpufreq 54 * handle hotplug events - so just point at cpufreq pol->cpus 55 * structure */ 56 + struct cpumask *available_cores; 57 }; 58 59
+7 -7
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
··· 492 } 493 494 first_cpu = 1; 495 - for_each_cpu_mask_nr(j, policy->cpus) { 496 - const cpumask_t *mask; 497 498 /* cpufreq holds the hotplug lock, so we are safe here */ 499 if (!cpu_online(j)) ··· 504 * Make sure we are running on CPU that wants to change freq 505 */ 506 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 507 - mask = &policy->cpus; 508 else 509 - mask = &cpumask_of_cpu(j); 510 511 set_cpus_allowed_ptr(current, mask); 512 preempt_disable(); ··· 538 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 539 target_freq, freqs.old, freqs.new, msr); 540 541 - for_each_cpu_mask_nr(k, policy->cpus) { 542 if (!cpu_online(k)) 543 continue; 544 freqs.cpu = k; ··· 563 preempt_enable(); 564 } 565 566 - for_each_cpu_mask_nr(k, policy->cpus) { 567 if (!cpu_online(k)) 568 continue; 569 freqs.cpu = k; ··· 586 tmp = freqs.new; 587 freqs.new = freqs.old; 588 freqs.old = tmp; 589 - for_each_cpu_mask_nr(j, policy->cpus) { 590 if (!cpu_online(j)) 591 continue; 592 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
··· 492 } 493 494 first_cpu = 1; 495 + for_each_cpu(j, policy->cpus) { 496 + const struct cpumask *mask; 497 498 /* cpufreq holds the hotplug lock, so we are safe here */ 499 if (!cpu_online(j)) ··· 504 * Make sure we are running on CPU that wants to change freq 505 */ 506 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 507 + mask = policy->cpus; 508 else 509 + mask = cpumask_of(j); 510 511 set_cpus_allowed_ptr(current, mask); 512 preempt_disable(); ··· 538 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 539 target_freq, freqs.old, freqs.new, msr); 540 541 + for_each_cpu(k, policy->cpus) { 542 if (!cpu_online(k)) 543 continue; 544 freqs.cpu = k; ··· 563 preempt_enable(); 564 } 565 566 + for_each_cpu(k, policy->cpus) { 567 if (!cpu_online(k)) 568 continue; 569 freqs.cpu = k; ··· 586 tmp = freqs.new; 587 freqs.new = freqs.old; 588 freqs.old = tmp; 589 + for_each_cpu(j, policy->cpus) { 590 if (!cpu_online(j)) 591 continue; 592 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+9 -9
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
··· 229 return 0; 230 } 231 232 - static unsigned int _speedstep_get(const cpumask_t *cpus) 233 { 234 unsigned int speed; 235 cpumask_t cpus_allowed; ··· 244 245 static unsigned int speedstep_get(unsigned int cpu) 246 { 247 - return _speedstep_get(&cpumask_of_cpu(cpu)); 248 } 249 250 /** ··· 267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) 268 return -EINVAL; 269 270 - freqs.old = _speedstep_get(&policy->cpus); 271 freqs.new = speedstep_freqs[newstate].frequency; 272 freqs.cpu = policy->cpu; 273 ··· 279 280 cpus_allowed = current->cpus_allowed; 281 282 - for_each_cpu_mask_nr(i, policy->cpus) { 283 freqs.cpu = i; 284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 285 } 286 287 /* switch to physical CPU where state is to be changed */ 288 - set_cpus_allowed_ptr(current, &policy->cpus); 289 290 speedstep_set_state(newstate); 291 292 /* allow to be run on all CPUs */ 293 set_cpus_allowed_ptr(current, &cpus_allowed); 294 295 - for_each_cpu_mask_nr(i, policy->cpus) { 296 freqs.cpu = i; 297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 298 } ··· 322 323 /* only run on CPU to be set, or on its sibling */ 324 #ifdef CONFIG_SMP 325 - policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); 326 #endif 327 328 cpus_allowed = current->cpus_allowed; 329 - set_cpus_allowed_ptr(current, &policy->cpus); 330 331 /* detect low and high frequency and transition latency */ 332 result = speedstep_get_freqs(speedstep_processor, ··· 339 return result; 340 341 /* get current speed setting */ 342 - speed = _speedstep_get(&policy->cpus); 343 if (!speed) 344 return -EIO; 345
··· 229 return 0; 230 } 231 232 + static unsigned int _speedstep_get(const struct cpumask *cpus) 233 { 234 unsigned int speed; 235 cpumask_t cpus_allowed; ··· 244 245 static unsigned int speedstep_get(unsigned int cpu) 246 { 247 + return _speedstep_get(cpumask_of(cpu)); 248 } 249 250 /** ··· 267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) 268 return -EINVAL; 269 270 + freqs.old = _speedstep_get(policy->cpus); 271 freqs.new = speedstep_freqs[newstate].frequency; 272 freqs.cpu = policy->cpu; 273 ··· 279 280 cpus_allowed = current->cpus_allowed; 281 282 + for_each_cpu(i, policy->cpus) { 283 freqs.cpu = i; 284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 285 } 286 287 /* switch to physical CPU where state is to be changed */ 288 + set_cpus_allowed_ptr(current, policy->cpus); 289 290 speedstep_set_state(newstate); 291 292 /* allow to be run on all CPUs */ 293 set_cpus_allowed_ptr(current, &cpus_allowed); 294 295 + for_each_cpu(i, policy->cpus) { 296 freqs.cpu = i; 297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 298 } ··· 322 323 /* only run on CPU to be set, or on its sibling */ 324 #ifdef CONFIG_SMP 325 + cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); 326 #endif 327 328 cpus_allowed = current->cpus_allowed; 329 + set_cpus_allowed_ptr(current, policy->cpus); 330 331 /* detect low and high frequency and transition latency */ 332 result = speedstep_get_freqs(speedstep_processor, ··· 339 return result; 340 341 /* get current speed setting */ 342 + speed = _speedstep_get(policy->cpus); 343 if (!speed) 344 return -EIO; 345
+28 -14
drivers/cpufreq/cpufreq.c
··· 584 return i; 585 } 586 587 - static ssize_t show_cpus(cpumask_t mask, char *buf) 588 { 589 ssize_t i = 0; 590 unsigned int cpu; 591 592 - for_each_cpu_mask_nr(cpu, mask) { 593 if (i) 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); ··· 606 */ 607 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 608 { 609 - if (cpus_empty(policy->related_cpus)) 610 return show_cpus(policy->cpus, buf); 611 return show_cpus(policy->related_cpus, buf); 612 } ··· 806 ret = -ENOMEM; 807 goto nomem_out; 808 } 809 810 policy->cpu = cpu; 811 - policy->cpus = cpumask_of_cpu(cpu); 812 813 /* Initially set CPU itself as the policy_cpu */ 814 per_cpu(policy_cpu, cpu) = cpu; ··· 854 } 855 #endif 856 857 - for_each_cpu_mask_nr(j, policy->cpus) { 858 if (cpu == j) 859 continue; 860 ··· 872 goto err_out_driver_exit; 873 874 spin_lock_irqsave(&cpufreq_driver_lock, flags); 875 - managed_policy->cpus = policy->cpus; 876 per_cpu(cpufreq_cpu_data, cpu) = managed_policy; 877 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 878 ··· 927 } 928 929 spin_lock_irqsave(&cpufreq_driver_lock, flags); 930 - for_each_cpu_mask_nr(j, policy->cpus) { 931 per_cpu(cpufreq_cpu_data, j) = policy; 932 per_cpu(policy_cpu, j) = policy->cpu; 933 } 934 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 935 936 /* symlink affected CPUs */ 937 - for_each_cpu_mask_nr(j, policy->cpus) { 938 if (j == cpu) 939 continue; 940 if (!cpu_online(j)) ··· 974 975 err_out_unregister: 976 spin_lock_irqsave(&cpufreq_driver_lock, flags); 977 - for_each_cpu_mask_nr(j, policy->cpus) 978 per_cpu(cpufreq_cpu_data, j) = NULL; 979 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 980 ··· 1035 */ 1036 if (unlikely(cpu != data->cpu)) { 1037 dprintk("removing link\n"); 1038 - cpu_clear(cpu, data->cpus); 1039 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1040 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 1041 cpufreq_cpu_put(data); ··· 1056 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove 1057 * the sysfs links afterwards. 1058 */ 1059 - if (unlikely(cpus_weight(data->cpus) > 1)) { 1060 - for_each_cpu_mask_nr(j, data->cpus) { 1061 if (j == cpu) 1062 continue; 1063 per_cpu(cpufreq_cpu_data, j) = NULL; ··· 1066 1067 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1068 1069 - if (unlikely(cpus_weight(data->cpus) > 1)) { 1070 - for_each_cpu_mask_nr(j, data->cpus) { 1071 if (j == cpu) 1072 continue; 1073 dprintk("removing link for cpu %u\n", j); ··· 1101 if (cpufreq_driver->exit) 1102 cpufreq_driver->exit(data); 1103 1104 kfree(data); 1105 1106 cpufreq_debug_enable_ratelimit(); 1107 return 0;
··· 584 return i; 585 } 586 587 + static ssize_t show_cpus(const struct cpumask *mask, char *buf) 588 { 589 ssize_t i = 0; 590 unsigned int cpu; 591 592 + for_each_cpu(cpu, mask) { 593 if (i) 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); ··· 606 */ 607 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 608 { 609 + if (cpumask_empty(policy->related_cpus)) 610 return show_cpus(policy->cpus, buf); 611 return show_cpus(policy->related_cpus, buf); 612 } ··· 806 ret = -ENOMEM; 807 goto nomem_out; 808 } 809 + if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { 810 + kfree(policy); 811 + ret = -ENOMEM; 812 + goto nomem_out; 813 + } 814 + if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { 815 + free_cpumask_var(policy->cpus); 816 + kfree(policy); 817 + ret = -ENOMEM; 818 + goto nomem_out; 819 + } 820 821 policy->cpu = cpu; 822 + cpumask_copy(policy->cpus, cpumask_of(cpu)); 823 824 /* Initially set CPU itself as the policy_cpu */ 825 per_cpu(policy_cpu, cpu) = cpu; ··· 843 } 844 #endif 845 846 + for_each_cpu(j, policy->cpus) { 847 if (cpu == j) 848 continue; 849 ··· 861 goto err_out_driver_exit; 862 863 spin_lock_irqsave(&cpufreq_driver_lock, flags); 864 + cpumask_copy(managed_policy->cpus, policy->cpus); 865 per_cpu(cpufreq_cpu_data, cpu) = managed_policy; 866 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 867 ··· 916 } 917 918 spin_lock_irqsave(&cpufreq_driver_lock, flags); 919 + for_each_cpu(j, policy->cpus) { 920 per_cpu(cpufreq_cpu_data, j) = policy; 921 per_cpu(policy_cpu, j) = policy->cpu; 922 } 923 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 924 925 /* symlink affected CPUs */ 926 + for_each_cpu(j, policy->cpus) { 927 if (j == cpu) 928 continue; 929 if (!cpu_online(j)) ··· 963 964 err_out_unregister: 965 spin_lock_irqsave(&cpufreq_driver_lock, flags); 966 + for_each_cpu(j, policy->cpus) 967 per_cpu(cpufreq_cpu_data, j) = NULL; 968 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 969 ··· 1024 */ 1025 if (unlikely(cpu != data->cpu)) { 1026 dprintk("removing link\n"); 1027 + cpumask_clear_cpu(cpu, data->cpus); 1028 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1029 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 1030 cpufreq_cpu_put(data); ··· 1045 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove 1046 * the sysfs links afterwards. 1047 */ 1048 + if (unlikely(cpumask_weight(data->cpus) > 1)) { 1049 + for_each_cpu(j, data->cpus) { 1050 if (j == cpu) 1051 continue; 1052 per_cpu(cpufreq_cpu_data, j) = NULL; ··· 1055 1056 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1057 1058 + if (unlikely(cpumask_weight(data->cpus) > 1)) { 1059 + for_each_cpu(j, data->cpus) { 1060 if (j == cpu) 1061 continue; 1062 dprintk("removing link for cpu %u\n", j); ··· 1090 if (cpufreq_driver->exit) 1091 cpufreq_driver->exit(data); 1092 1093 + free_cpumask_var(data->related_cpus); 1094 + free_cpumask_var(data->cpus); 1095 kfree(data); 1096 + per_cpu(cpufreq_cpu_data, cpu) = NULL; 1097 1098 cpufreq_debug_enable_ratelimit(); 1099 return 0;
+1 -1
drivers/cpufreq/cpufreq_conservative.c
··· 498 return rc; 499 } 500 501 - for_each_cpu_mask_nr(j, policy->cpus) { 502 struct cpu_dbs_info_s *j_dbs_info; 503 j_dbs_info = &per_cpu(cpu_dbs_info, j); 504 j_dbs_info->cur_policy = policy;
··· 498 return rc; 499 } 500 501 + for_each_cpu(j, policy->cpus) { 502 struct cpu_dbs_info_s *j_dbs_info; 503 j_dbs_info = &per_cpu(cpu_dbs_info, j); 504 j_dbs_info->cur_policy = policy;
+2 -2
drivers/cpufreq/cpufreq_ondemand.c
··· 400 /* Get Absolute Load - in terms of freq */ 401 max_load_freq = 0; 402 403 - for_each_cpu_mask_nr(j, policy->cpus) { 404 struct cpu_dbs_info_s *j_dbs_info; 405 cputime64_t cur_wall_time, cur_idle_time; 406 unsigned int idle_time, wall_time; ··· 568 return rc; 569 } 570 571 - for_each_cpu_mask_nr(j, policy->cpus) { 572 struct cpu_dbs_info_s *j_dbs_info; 573 j_dbs_info = &per_cpu(cpu_dbs_info, j); 574 j_dbs_info->cur_policy = policy;
··· 400 /* Get Absolute Load - in terms of freq */ 401 max_load_freq = 0; 402 403 + for_each_cpu(j, policy->cpus) { 404 struct cpu_dbs_info_s *j_dbs_info; 405 cputime64_t cur_wall_time, cur_idle_time; 406 unsigned int idle_time, wall_time; ··· 568 return rc; 569 } 570 571 + for_each_cpu(j, policy->cpus) { 572 struct cpu_dbs_info_s *j_dbs_info; 573 j_dbs_info = &per_cpu(cpu_dbs_info, j); 574 j_dbs_info->cur_policy = policy;
+2 -2
include/linux/cpufreq.h
··· 80 }; 81 82 struct cpufreq_policy { 83 - cpumask_t cpus; /* CPUs requiring sw coordination */ 84 - cpumask_t related_cpus; /* CPUs with any coordination */ 85 unsigned int shared_type; /* ANY or ALL affected CPUs 86 should set cpufreq */ 87 unsigned int cpu; /* cpu nr of registered CPU */
··· 80 }; 81 82 struct cpufreq_policy { 83 + cpumask_var_t cpus; /* CPUs requiring sw coordination */ 84 + cpumask_var_t related_cpus; /* CPUs with any coordination */ 85 unsigned int shared_type; /* ANY or ALL affected CPUs 86 should set cpufreq */ 87 unsigned int cpu; /* cpu nr of registered CPU */