cpumask: alloc zeroed cpumask for static cpumask_var_ts

These are defined as static cpumask_var_t so if MAXSMP is not used,
they are cleared already. Avoid surprises when MAXSMP is enabled.

Signed-off-by: Yinghai Lu <yinghai.lu@kernel.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>

authored by Yinghai Lu and committed by Rusty Russell eaa95840 0281b5dc

Changed files
+11 -11
arch
drivers
kernel
+1 -1
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 550 550 return -ENOMEM; 551 551 } 552 552 for_each_possible_cpu(i) { 553 - if (!alloc_cpumask_var_node( 553 + if (!zalloc_cpumask_var_node( 554 554 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, 555 555 GFP_KERNEL, cpu_to_node(i))) { 556 556
+1 -1
arch/x86/kernel/cpu/cpufreq/powernow-k7.c
··· 322 322 goto err0; 323 323 } 324 324 325 - if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, 325 + if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, 326 326 GFP_KERNEL)) { 327 327 retval = -ENOMEM; 328 328 goto err05;
+1 -1
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 887 887 /* notify BIOS that we exist */ 888 888 acpi_processor_notify_smm(THIS_MODULE); 889 889 890 - if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 890 + if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 891 891 printk(KERN_ERR PFX 892 892 "unable to alloc powernow_k8_data cpumask\n"); 893 893 ret_val = -ENOMEM;
+1 -1
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
··· 471 471 472 472 if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) 473 473 return -ENOMEM; 474 - if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { 474 + if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { 475 475 free_cpumask_var(saved_mask); 476 476 return -ENOMEM; 477 477 }
+1 -1
arch/x86/kernel/cpu/mcheck/mce_64.c
··· 1163 1163 if (!mce_available(&boot_cpu_data)) 1164 1164 return -EIO; 1165 1165 1166 - alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); 1166 + zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); 1167 1167 1168 1168 err = mce_init_banks(); 1169 1169 if (err)
+1 -1
arch/x86/kernel/tlb_uv.c
··· 832 832 return 0; 833 833 834 834 for_each_possible_cpu(cur_cpu) 835 - alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 835 + zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 836 836 GFP_KERNEL, cpu_to_node(cur_cpu)); 837 837 838 838 uv_bau_retry_limit = 1;
+1 -1
drivers/acpi/processor_core.c
··· 844 844 if (!pr) 845 845 return -ENOMEM; 846 846 847 - if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 847 + if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 848 848 kfree(pr); 849 849 return -ENOMEM; 850 850 }
+1 -1
drivers/cpufreq/cpufreq.c
··· 808 808 ret = -ENOMEM; 809 809 goto nomem_out; 810 810 } 811 - if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { 811 + if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { 812 812 free_cpumask_var(policy->cpus); 813 813 kfree(policy); 814 814 ret = -ENOMEM;
+1 -1
kernel/sched_cpupri.c
··· 165 165 vec->count = 0; 166 166 if (bootmem) 167 167 alloc_bootmem_cpumask_var(&vec->mask); 168 - else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) 168 + else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) 169 169 goto cleanup; 170 170 } 171 171
+1 -1
kernel/sched_rt.c
··· 1591 1591 unsigned int i; 1592 1592 1593 1593 for_each_possible_cpu(i) 1594 - alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 1594 + zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 1595 1595 GFP_KERNEL, cpu_to_node(i)); 1596 1596 } 1597 1597 #endif /* CONFIG_SMP */
+1 -1
kernel/smp.c
··· 52 52 switch (action) { 53 53 case CPU_UP_PREPARE: 54 54 case CPU_UP_PREPARE_FROZEN: 55 - if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 55 + if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 56 56 cpu_to_node(cpu))) 57 57 return NOTIFY_BAD; 58 58 break;