Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits)
x86: setup_per_cpu_areas() cleanup
cpumask: fix compile error when CONFIG_NR_CPUS is not defined
cpumask: use alloc_cpumask_var_node where appropriate
cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t
x86: use cpumask_var_t in acpi/boot.c
x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids
sched: put back some stack hog changes that were undone in kernel/sched.c
x86: enable cpus display of kernel_max and offlined cpus
ia64: cpumask fix for is_affinity_mask_valid()
cpumask: convert RCU implementations, fix
xtensa: define __fls
mn10300: define __fls
m32r: define __fls
h8300: define __fls
frv: define __fls
cris: define __fls
cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
cpumask: zero extra bits in alloc_cpumask_var_node
cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/
cpumask: convert mm/
...

+1286 -879
+48
Documentation/cputopology.txt
··· 31 31 2) core_id: 0 32 32 3) thread_siblings: just the given CPU 33 33 4) core_siblings: just the given CPU 34 + 35 + Additionally, cpu topology information is provided under 36 + /sys/devices/system/cpu and includes these files. The internal 37 + source for the output is in brackets ("[]"). 38 + 39 + kernel_max: the maximum cpu index allowed by the kernel configuration. 40 + [NR_CPUS-1] 41 + 42 + offline: cpus that are not online because they have been 43 + HOTPLUGGED off (see cpu-hotplug.txt) or exceed the limit 44 + of cpus allowed by the kernel configuration (kernel_max 45 + above). [~cpu_online_mask + cpus >= NR_CPUS] 46 + 47 + online: cpus that are online and being scheduled [cpu_online_mask] 48 + 49 + possible: cpus that have been allocated resources and can be 50 + brought online if they are present. [cpu_possible_mask] 51 + 52 + present: cpus that have been identified as being present in the 53 + system. [cpu_present_mask] 54 + 55 + The format for the above output is compatible with cpulist_parse() 56 + [see <linux/cpumask.h>]. Some examples follow. 57 + 58 + In this example, there are 64 cpus in the system but cpus 32-63 exceed 59 + the kernel max which is limited to 0..31 by the NR_CPUS config option 60 + being 32. Note also that cpus 2 and 4-31 are not online but could be 61 + brought online as they are both present and possible. 62 + 63 + kernel_max: 31 64 + offline: 2,4-31,32-63 65 + online: 0-1,3 66 + possible: 0-31 67 + present: 0-31 68 + 69 + In this example, the NR_CPUS config option is 128, but the kernel was 70 + started with possible_cpus=144. There are 4 cpus in the system and cpu2 71 + was manually taken offline (and is the only cpu that can be brought 72 + online.) 73 + 74 + kernel_max: 127 75 + offline: 2,4-127,128-143 76 + online: 0-1,3 77 + possible: 0-127 78 + present: 0-3 79 + 80 + See cpu-hotplug.txt for the possible_cpus=NUM kernel start parameter 81 + as well as more information on the various cpumask's.
+17
arch/alpha/include/asm/topology.h
··· 39 39 return node_cpu_mask; 40 40 } 41 41 42 + extern struct cpumask node_to_cpumask_map[]; 43 + /* FIXME: This is dumb, recalculating every time. But simple. */ 44 + static const struct cpumask *cpumask_of_node(int node) 45 + { 46 + int cpu; 47 + 48 + cpumask_clear(&node_to_cpumask_map[node]); 49 + 50 + for_each_online_cpu(cpu) { 51 + if (cpu_to_node(cpu) == node) 52 + cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 53 + } 54 + 55 + return &node_to_cpumask_map[node]; 56 + } 57 + 42 58 #define pcibus_to_cpumask(bus) (cpu_online_map) 59 + #define cpumask_of_pcibus(bus) (cpu_online_mask) 43 60 44 61 #endif /* !CONFIG_NUMA */ 45 62 # include <asm-generic/topology.h>
+2 -1
arch/alpha/kernel/irq.c
··· 50 50 if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) 51 51 return 1; 52 52 53 - while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity)) 53 + while (!cpu_possible(cpu) || 54 + !cpumask_test_cpu(cpu, irq_default_affinity)) 54 55 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 55 56 last_cpu = cpu; 56 57
+5
arch/alpha/kernel/setup.c
··· 79 79 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; 80 80 #endif 81 81 82 + #ifdef CONFIG_NUMA 83 + struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly; 84 + EXPORT_SYMBOL(node_to_cpumask_map); 85 + #endif 86 + 82 87 /* Which processor we booted from. */ 83 88 int boot_cpuid; 84 89
+5
arch/avr32/include/asm/bitops.h
··· 263 263 return 32 - result; 264 264 } 265 265 266 + static inline int __fls(unsigned long word) 267 + { 268 + return fls(word) - 1; 269 + } 270 + 266 271 unsigned long find_first_zero_bit(const unsigned long *addr, 267 272 unsigned long size); 268 273 unsigned long find_next_zero_bit(const unsigned long *addr,
+1
arch/blackfin/include/asm/bitops.h
··· 213 213 #endif /* __KERNEL__ */ 214 214 215 215 #include <asm-generic/bitops/fls.h> 216 + #include <asm-generic/bitops/__fls.h> 216 217 #include <asm-generic/bitops/fls64.h> 217 218 218 219 #endif /* _BLACKFIN_BITOPS_H */
+1
arch/cris/include/asm/bitops.h
··· 148 148 #define ffs kernel_ffs 149 149 150 150 #include <asm-generic/bitops/fls.h> 151 + #include <asm-generic/bitops/__fls.h> 151 152 #include <asm-generic/bitops/fls64.h> 152 153 #include <asm-generic/bitops/hweight.h> 153 154 #include <asm-generic/bitops/find.h>
+1
arch/h8300/include/asm/bitops.h
··· 207 207 #endif /* __KERNEL__ */ 208 208 209 209 #include <asm-generic/bitops/fls.h> 210 + #include <asm-generic/bitops/__fls.h> 210 211 #include <asm-generic/bitops/fls64.h> 211 212 212 213 #endif /* _H8300_BITOPS_H */
+1 -1
arch/ia64/include/asm/irq.h
··· 27 27 } 28 28 29 29 extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); 30 - bool is_affinity_mask_valid(cpumask_t cpumask); 30 + bool is_affinity_mask_valid(cpumask_var_t cpumask); 31 31 32 32 #define is_affinity_mask_valid is_affinity_mask_valid 33 33
+8 -1
arch/ia64/include/asm/topology.h
··· 34 34 * Returns a bitmask of CPUs on Node 'node'. 35 35 */ 36 36 #define node_to_cpumask(node) (node_to_cpu_mask[node]) 37 + #define cpumask_of_node(node) (&node_to_cpu_mask[node]) 37 38 38 39 /* 39 40 * Returns the number of the node containing Node 'nid'. ··· 46 45 /* 47 46 * Returns the number of the first CPU on Node 'node'. 48 47 */ 49 - #define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) 48 + #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) 50 49 51 50 /* 52 51 * Determines the node for a given pci bus ··· 110 109 #define topology_core_id(cpu) (cpu_data(cpu)->core_id) 111 110 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 112 111 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 112 + #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 113 + #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 113 114 #define smt_capable() (smp_num_siblings > 1) 114 115 #endif 115 116 ··· 121 118 CPU_MASK_ALL : \ 122 119 node_to_cpumask(pcibus_to_node(bus)) \ 123 120 ) 121 + 122 + #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 123 + cpu_all_mask : \ 124 + cpumask_from_node(pcibus_to_node(bus))) 124 125 125 126 #include <asm-generic/topology.h> 126 127
+1 -2
arch/ia64/kernel/acpi.c
··· 202 202 Boot-time Table Parsing 203 203 -------------------------------------------------------------------------- */ 204 204 205 - static int total_cpus __initdata; 206 205 static int available_cpus __initdata; 207 206 struct acpi_table_madt *acpi_madt __initdata; 208 207 static u8 has_8259; ··· 1000 1001 node = pxm_to_node(pxm); 1001 1002 1002 1003 if (node >= MAX_NUMNODES || !node_online(node) || 1003 - cpus_empty(node_to_cpumask(node))) 1004 + cpumask_empty(cpumask_of_node(node))) 1004 1005 return AE_OK; 1005 1006 1006 1007 /* We know a gsi to node mapping! */
+11 -12
arch/ia64/kernel/iosapic.c
··· 695 695 #ifdef CONFIG_NUMA 696 696 { 697 697 int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; 698 - cpumask_t cpu_mask; 698 + const struct cpumask *cpu_mask; 699 699 700 700 iosapic_index = find_iosapic(gsi); 701 701 if (iosapic_index < 0 || 702 702 iosapic_lists[iosapic_index].node == MAX_NUMNODES) 703 703 goto skip_numa_setup; 704 704 705 - cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); 706 - cpus_and(cpu_mask, cpu_mask, domain); 707 - for_each_cpu_mask(numa_cpu, cpu_mask) { 708 - if (!cpu_online(numa_cpu)) 709 - cpu_clear(numa_cpu, cpu_mask); 705 + cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node); 706 + num_cpus = 0; 707 + for_each_cpu_and(numa_cpu, cpu_mask, &domain) { 708 + if (cpu_online(numa_cpu)) 709 + num_cpus++; 710 710 } 711 - 712 - num_cpus = cpus_weight(cpu_mask); 713 711 714 712 if (!num_cpus) 715 713 goto skip_numa_setup; ··· 715 717 /* Use irq assignment to distribute across cpus in node */ 716 718 cpu_index = irq % num_cpus; 717 719 718 - for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) 719 - numa_cpu = next_cpu(numa_cpu, cpu_mask); 720 + for_each_cpu_and(numa_cpu, cpu_mask, &domain) 721 + if (cpu_online(numa_cpu) && i++ >= cpu_index) 722 + break; 720 723 721 - if (numa_cpu != NR_CPUS) 724 + if (numa_cpu < nr_cpu_ids) 722 725 return cpu_physical_id(numa_cpu); 723 726 } 724 727 skip_numa_setup: ··· 730 731 * case of NUMA.) 731 732 */ 732 733 do { 733 - if (++cpu >= NR_CPUS) 734 + if (++cpu >= nr_cpu_ids) 734 735 cpu = 0; 735 736 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); 736 737
+2 -2
arch/ia64/kernel/irq.c
··· 112 112 } 113 113 } 114 114 115 - bool is_affinity_mask_valid(cpumask_t cpumask) 115 + bool is_affinity_mask_valid(cpumask_var_t cpumask) 116 116 { 117 117 if (ia64_platform_is("sn2")) { 118 118 /* Only allow one CPU to be specified in the smp_affinity mask */ 119 - if (cpus_weight(cpumask) != 1) 119 + if (cpumask_weight(cpumask) != 1) 120 120 return false; 121 121 } 122 122 return true;
+12 -15
arch/ia64/sn/kernel/sn2/sn_hwperf.c
··· 385 385 int j; 386 386 const char *slabname; 387 387 int ordinal; 388 - cpumask_t cpumask; 389 388 char slice; 390 389 struct cpuinfo_ia64 *c; 391 390 struct sn_hwperf_port_info *ptdata; ··· 472 473 * CPUs on this node, if any 473 474 */ 474 475 if (!SN_HWPERF_IS_IONODE(obj)) { 475 - cpumask = node_to_cpumask(ordinal); 476 - for_each_online_cpu(i) { 477 - if (cpu_isset(i, cpumask)) { 478 - slice = 'a' + cpuid_to_slice(i); 479 - c = cpu_data(i); 480 - seq_printf(s, "cpu %d %s%c local" 481 - " freq %luMHz, arch ia64", 482 - i, obj->location, slice, 483 - c->proc_freq / 1000000); 484 - for_each_online_cpu(j) { 485 - seq_printf(s, j ? ":%d" : ", dist %d", 486 - node_distance( 476 + for_each_cpu_and(i, cpu_online_mask, 477 + cpumask_of_node(ordinal)) { 478 + slice = 'a' + cpuid_to_slice(i); 479 + c = cpu_data(i); 480 + seq_printf(s, "cpu %d %s%c local" 481 + " freq %luMHz, arch ia64", 482 + i, obj->location, slice, 483 + c->proc_freq / 1000000); 484 + for_each_online_cpu(j) { 485 + seq_printf(s, j ? ":%d" : ", dist %d", 486 + node_distance( 487 487 cpu_to_node(i), 488 488 cpu_to_node(j))); 489 - } 490 - seq_putc(s, '\n'); 491 489 } 490 + seq_putc(s, '\n'); 492 491 } 493 492 } 494 493 }
+1 -1
arch/m32r/kernel/smpboot.c
··· 592 592 * accounting. At that time they also adjust their APIC timers 593 593 * accordingly. 594 594 */ 595 - for (i = 0; i < NR_CPUS; ++i) 595 + for_each_possible_cpu(i) 596 596 per_cpu(prof_multiplier, i) = multiplier; 597 597 598 598 return 0;
+1
arch/m68knommu/include/asm/bitops.h
··· 331 331 #endif /* __KERNEL__ */ 332 332 333 333 #include <asm-generic/bitops/fls.h> 334 + #include <asm-generic/bitops/__fls.h> 334 335 #include <asm-generic/bitops/fls64.h> 335 336 336 337 #endif /* _M68KNOMMU_BITOPS_H */
+3 -1
arch/mips/include/asm/mach-ip27/topology.h
··· 25 25 #define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) 26 26 #define parent_node(node) (node) 27 27 #define node_to_cpumask(node) (hub_data(node)->h_cpus) 28 - #define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) 28 + #define cpumask_of_node(node) (&hub_data(node)->h_cpus) 29 + #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) 29 30 struct pci_bus; 30 31 extern int pcibus_to_node(struct pci_bus *); 31 32 32 33 #define pcibus_to_cpumask(bus) (cpu_online_map) 34 + #define cpumask_of_pcibus(bus) (cpu_online_mask) 33 35 34 36 extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; 35 37
-2
arch/parisc/include/asm/smp.h
··· 16 16 #include <linux/cpumask.h> 17 17 typedef unsigned long address_t; 18 18 19 - extern cpumask_t cpu_online_map; 20 - 21 19 22 20 /* 23 21 * Private routines/data
+9 -3
arch/powerpc/include/asm/topology.h
··· 22 22 return numa_cpumask_lookup_table[node]; 23 23 } 24 24 25 + #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 26 + 25 27 static inline int node_to_first_cpu(int node) 26 28 { 27 - cpumask_t tmp; 28 - tmp = node_to_cpumask(node); 29 - return first_cpu(tmp); 29 + return cpumask_first(cpumask_of_node(node)); 30 30 } 31 31 32 32 int of_node_to_nid(struct device_node *device); ··· 45 45 CPU_MASK_ALL : \ 46 46 node_to_cpumask(pcibus_to_node(bus)) \ 47 47 ) 48 + 49 + #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 50 + cpu_all_mask : \ 51 + cpumask_of_node(pcibus_to_node(bus))) 48 52 49 53 /* sched_domains SD_NODE_INIT for PPC64 machines */ 50 54 #define SD_NODE_INIT (struct sched_domain) { \ ··· 112 108 113 109 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 114 110 #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 111 + #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 112 + #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) 115 113 #define topology_core_id(cpu) (cpu_to_core_id(cpu)) 116 114 #endif 117 115 #endif
+3 -3
arch/powerpc/platforms/cell/spu_priv1_mmio.c
··· 80 80 u64 route; 81 81 82 82 if (nr_cpus_node(spu->node)) { 83 - cpumask_t spumask = node_to_cpumask(spu->node); 84 - cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); 83 + const struct cpumask *spumask = cpumask_of_node(spu->node), 84 + *cpumask = cpumask_of_node(cpu_to_node(cpu)); 85 85 86 - if (!cpus_intersects(spumask, cpumask)) 86 + if (!cpumask_intersects(spumask, cpumask)) 87 87 return; 88 88 } 89 89
+2 -2
arch/powerpc/platforms/cell/spufs/sched.c
··· 166 166 static int __node_allowed(struct spu_context *ctx, int node) 167 167 { 168 168 if (nr_cpus_node(node)) { 169 - cpumask_t mask = node_to_cpumask(node); 169 + const struct cpumask *mask = cpumask_of_node(node); 170 170 171 - if (cpus_intersects(mask, ctx->cpus_allowed)) 171 + if (cpumask_intersects(mask, &ctx->cpus_allowed)) 172 172 return 1; 173 173 } 174 174
+2
arch/s390/include/asm/topology.h
··· 6 6 #define mc_capable() (1) 7 7 8 8 cpumask_t cpu_coregroup_map(unsigned int cpu); 9 + const struct cpumask *cpu_coregroup_mask(unsigned int cpu); 9 10 10 11 extern cpumask_t cpu_core_map[NR_CPUS]; 11 12 12 13 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 14 + #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 13 15 14 16 int topology_set_cpu_management(int fc); 15 17 void topology_schedule_update(void);
+5
arch/s390/kernel/topology.c
··· 97 97 return mask; 98 98 } 99 99 100 + const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 101 + { 102 + return &cpu_core_map[cpu]; 103 + } 104 + 100 105 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) 101 106 { 102 107 unsigned int cpu;
+1
arch/sh/include/asm/topology.h
··· 32 32 #define parent_node(node) ((void)(node),0) 33 33 34 34 #define node_to_cpumask(node) ((void)node, cpu_online_map) 35 + #define cpumask_of_node(node) ((void)node, cpu_online_mask) 35 36 #define node_to_first_cpu(node) ((void)(node),0) 36 37 37 38 #define pcibus_to_node(bus) ((void)(bus), -1)
+9 -4
arch/sparc/include/asm/topology_64.h
··· 16 16 { 17 17 return numa_cpumask_lookup_table[node]; 18 18 } 19 + #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 19 20 20 - /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 21 + /* 22 + * Returns a pointer to the cpumask of CPUs on Node 'node'. 23 + * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 24 + */ 21 25 #define node_to_cpumask_ptr(v, node) \ 22 26 cpumask_t *v = &(numa_cpumask_lookup_table[node]) 23 27 ··· 30 26 31 27 static inline int node_to_first_cpu(int node) 32 28 { 33 - cpumask_t tmp; 34 - tmp = node_to_cpumask(node); 35 - return first_cpu(tmp); 29 + return cpumask_first(cpumask_of_node(node)); 36 30 } 37 31 38 32 struct pci_bus; ··· 79 77 #define topology_core_id(cpu) (cpu_data(cpu).core_id) 80 78 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 81 79 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 80 + #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 81 + #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 82 82 #define mc_capable() (sparc64_multi_core) 83 83 #define smt_capable() (sparc64_multi_core) 84 84 #endif /* CONFIG_SMP */ 85 85 86 86 #define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) 87 + #define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu]) 87 88 88 89 #endif /* _ASM_SPARC64_TOPOLOGY_H */
+1 -1
arch/sparc/kernel/of_device_64.c
··· 778 778 out: 779 779 nid = of_node_to_nid(dp); 780 780 if (nid != -1) { 781 - cpumask_t numa_mask = node_to_cpumask(nid); 781 + cpumask_t numa_mask = *cpumask_of_node(nid); 782 782 783 783 irq_set_affinity(irq, &numa_mask); 784 784 }
+1 -1
arch/sparc/kernel/pci_msi.c
··· 286 286 287 287 nid = pbm->numa_node; 288 288 if (nid != -1) { 289 - cpumask_t numa_mask = node_to_cpumask(nid); 289 + cpumask_t numa_mask = *cpumask_of_node(nid); 290 290 291 291 irq_set_affinity(irq, &numa_mask); 292 292 }
+3 -29
arch/x86/include/asm/es7000/apic.h
··· 157 157 158 158 num_bits_set = cpumask_weight(cpumask); 159 159 /* Return id to all */ 160 - if (num_bits_set == NR_CPUS) 160 + if (num_bits_set == nr_cpu_ids) 161 161 return 0xFF; 162 162 /* 163 163 * The cpus in the mask must all be on the apic cluster. If are not ··· 190 190 191 191 num_bits_set = cpus_weight(*cpumask); 192 192 /* Return id to all */ 193 - if (num_bits_set == NR_CPUS) 193 + if (num_bits_set == nr_cpu_ids) 194 194 return cpu_to_logical_apicid(0); 195 195 /* 196 196 * The cpus in the mask must all be on the apic cluster. If are not ··· 218 218 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 219 219 const struct cpumask *andmask) 220 220 { 221 - int num_bits_set; 222 - int cpus_found = 0; 223 - int cpu; 224 221 int apicid = cpu_to_logical_apicid(0); 225 222 cpumask_var_t cpumask; 226 223 ··· 226 229 227 230 cpumask_and(cpumask, inmask, andmask); 228 231 cpumask_and(cpumask, cpumask, cpu_online_mask); 232 + apicid = cpu_mask_to_apicid(cpumask); 229 233 230 - num_bits_set = cpumask_weight(cpumask); 231 - /* Return id to all */ 232 - if (num_bits_set == NR_CPUS) 233 - goto exit; 234 - /* 235 - * The cpus in the mask must all be on the apic cluster. If are not 236 - * on the same apicid cluster return default value of TARGET_CPUS. 237 - */ 238 - cpu = cpumask_first(cpumask); 239 - apicid = cpu_to_logical_apicid(cpu); 240 - while (cpus_found < num_bits_set) { 241 - if (cpumask_test_cpu(cpu, cpumask)) { 242 - int new_apicid = cpu_to_logical_apicid(cpu); 243 - if (apicid_cluster(apicid) != 244 - apicid_cluster(new_apicid)){ 245 - printk ("%s: Not a valid mask!\n", __func__); 246 - return cpu_to_logical_apicid(0); 247 - } 248 - apicid = new_apicid; 249 - cpus_found++; 250 - } 251 - cpu++; 252 - } 253 - exit: 254 234 free_cpumask_var(cpumask); 255 235 return apicid; 256 236 }
+1 -1
arch/x86/include/asm/lguest.h
··· 15 15 #define SHARED_SWITCHER_PAGES \ 16 16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) 17 17 /* Pages for switcher itself, then two pages per cpu */ 18 - #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) 18 + #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) 19 19 20 20 /* We map at -4M for ease of mapping into the guest (one PTE page). */ 21 21 #define SWITCHER_ADDR 0xFFC00000
+2 -2
arch/x86/include/asm/numaq/apic.h
··· 63 63 extern u8 cpu_2_logical_apicid[]; 64 64 static inline int cpu_to_logical_apicid(int cpu) 65 65 { 66 - if (cpu >= NR_CPUS) 67 - return BAD_APICID; 66 + if (cpu >= nr_cpu_ids) 67 + return BAD_APICID; 68 68 return (int)cpu_2_logical_apicid[cpu]; 69 69 } 70 70
+8 -2
arch/x86/include/asm/pci.h
··· 102 102 103 103 #ifdef CONFIG_NUMA 104 104 /* Returns the node based on pci bus */ 105 - static inline int __pcibus_to_node(struct pci_bus *bus) 105 + static inline int __pcibus_to_node(const struct pci_bus *bus) 106 106 { 107 - struct pci_sysdata *sd = bus->sysdata; 107 + const struct pci_sysdata *sd = bus->sysdata; 108 108 109 109 return sd->node; 110 110 } ··· 112 112 static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) 113 113 { 114 114 return node_to_cpumask(__pcibus_to_node(bus)); 115 + } 116 + 117 + static inline const struct cpumask * 118 + cpumask_of_pcibus(const struct pci_bus *bus) 119 + { 120 + return cpumask_of_node(__pcibus_to_node(bus)); 115 121 } 116 122 #endif 117 123
+8 -34
arch/x86/include/asm/summit/apic.h
··· 52 52 int i; 53 53 54 54 /* Create logical APIC IDs by counting CPUs already in cluster. */ 55 - for (count = 0, i = NR_CPUS; --i >= 0; ) { 55 + for (count = 0, i = nr_cpu_ids; --i >= 0; ) { 56 56 lid = cpu_2_logical_apicid[i]; 57 57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) 58 58 ++count; ··· 97 97 static inline int cpu_to_logical_apicid(int cpu) 98 98 { 99 99 #ifdef CONFIG_SMP 100 - if (cpu >= NR_CPUS) 101 - return BAD_APICID; 100 + if (cpu >= nr_cpu_ids) 101 + return BAD_APICID; 102 102 return (int)cpu_2_logical_apicid[cpu]; 103 103 #else 104 104 return logical_smp_processor_id(); ··· 107 107 108 108 static inline int cpu_present_to_apicid(int mps_cpu) 109 109 { 110 - if (mps_cpu < NR_CPUS) 110 + if (mps_cpu < nr_cpu_ids) 111 111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 112 112 else 113 113 return BAD_APICID; ··· 146 146 147 147 num_bits_set = cpus_weight(*cpumask); 148 148 /* Return id to all */ 149 - if (num_bits_set == NR_CPUS) 149 + if (num_bits_set >= nr_cpu_ids) 150 150 return (int) 0xFF; 151 151 /* 152 152 * The cpus in the mask must all be on the apic cluster. If are not ··· 173 173 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 174 174 const struct cpumask *andmask) 175 175 { 176 - int num_bits_set; 177 - int cpus_found = 0; 178 - int cpu; 179 - int apicid = 0xFF; 176 + int apicid = cpu_to_logical_apicid(0); 180 177 cpumask_var_t cpumask; 181 178 182 179 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 183 - return (int) 0xFF; 180 + return apicid; 184 181 185 182 cpumask_and(cpumask, inmask, andmask); 186 183 cpumask_and(cpumask, cpumask, cpu_online_mask); 184 + apicid = cpu_mask_to_apicid(cpumask); 187 185 188 - num_bits_set = cpumask_weight(cpumask); 189 - /* Return id to all */ 190 - if (num_bits_set == nr_cpu_ids) 191 - goto exit; 192 - /* 193 - * The cpus in the mask must all be on the apic cluster. If are not 194 - * on the same apicid cluster return default value of TARGET_CPUS. 195 - */ 196 - cpu = cpumask_first(cpumask); 197 - apicid = cpu_to_logical_apicid(cpu); 198 - while (cpus_found < num_bits_set) { 199 - if (cpumask_test_cpu(cpu, cpumask)) { 200 - int new_apicid = cpu_to_logical_apicid(cpu); 201 - if (apicid_cluster(apicid) != 202 - apicid_cluster(new_apicid)){ 203 - printk ("%s: Not a valid mask!\n", __func__); 204 - return 0xFF; 205 - } 206 - apicid = apicid | new_apicid; 207 - cpus_found++; 208 - } 209 - cpu++; 210 - } 211 - exit: 212 186 free_cpumask_var(cpumask); 213 187 return apicid; 214 188 }
+24 -12
arch/x86/include/asm/topology.h
··· 61 61 * 62 62 * Side note: this function creates the returned cpumask on the stack 63 63 * so with a high NR_CPUS count, excessive stack space is used. The 64 - * node_to_cpumask_ptr function should be used whenever possible. 64 + * cpumask_of_node function should be used whenever possible. 65 65 */ 66 66 static inline cpumask_t node_to_cpumask(int node) 67 67 { 68 68 return node_to_cpumask_map[node]; 69 + } 70 + 71 + /* Returns a bitmask of CPUs on Node 'node'. */ 72 + static inline const struct cpumask *cpumask_of_node(int node) 73 + { 74 + return &node_to_cpumask_map[node]; 69 75 } 70 76 71 77 #else /* CONFIG_X86_64 */ ··· 88 82 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 89 83 extern int cpu_to_node(int cpu); 90 84 extern int early_cpu_to_node(int cpu); 91 - extern const cpumask_t *_node_to_cpumask_ptr(int node); 85 + extern const cpumask_t *cpumask_of_node(int node); 92 86 extern cpumask_t node_to_cpumask(int node); 93 87 94 88 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ ··· 109 103 } 110 104 111 105 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 112 - static inline const cpumask_t *_node_to_cpumask_ptr(int node) 106 + static inline const cpumask_t *cpumask_of_node(int node) 113 107 { 114 108 return &node_to_cpumask_map[node]; 115 109 } ··· 122 116 123 117 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 124 118 125 - /* Replace default node_to_cpumask_ptr with optimized version */ 119 + /* 120 + * Replace default node_to_cpumask_ptr with optimized version 121 + * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 122 + */ 126 123 #define node_to_cpumask_ptr(v, node) \ 127 - const cpumask_t *v = _node_to_cpumask_ptr(node) 124 + const cpumask_t *v = cpumask_of_node(node) 128 125 129 126 #define node_to_cpumask_ptr_next(v, node) \ 130 - v = _node_to_cpumask_ptr(node) 127 + v = cpumask_of_node(node) 131 128 132 129 #endif /* CONFIG_X86_64 */ 133 130 ··· 196 187 #define cpu_to_node(cpu) 0 197 188 #define early_cpu_to_node(cpu) 0 198 189 199 - static inline const cpumask_t *_node_to_cpumask_ptr(int node) 190 + static inline const cpumask_t *cpumask_of_node(int node) 200 191 { 201 192 return &cpu_online_map; 202 193 } ··· 209 200 return first_cpu(cpu_online_map); 210 201 } 211 202 212 - /* Replace default node_to_cpumask_ptr with optimized version */ 203 + /* 204 + * Replace default node_to_cpumask_ptr with optimized version 205 + * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 206 + */ 213 207 #define node_to_cpumask_ptr(v, node) \ 214 - const cpumask_t *v = _node_to_cpumask_ptr(node) 208 + const cpumask_t *v = cpumask_of_node(node) 215 209 216 210 #define node_to_cpumask_ptr_next(v, node) \ 217 - v = _node_to_cpumask_ptr(node) 211 + v = cpumask_of_node(node) 218 212 #endif 219 213 220 214 #include <asm-generic/topology.h> ··· 226 214 /* Returns the number of the first CPU on Node 'node'. */ 227 215 static inline int node_to_first_cpu(int node) 228 216 { 229 - node_to_cpumask_ptr(mask, node); 230 - return first_cpu(*mask); 217 + return cpumask_first(cpumask_of_node(node)); 231 218 } 232 219 #endif 233 220 234 221 extern cpumask_t cpu_coregroup_map(int cpu); 222 + extern const struct cpumask *cpu_coregroup_mask(int cpu); 235 223 236 224 #ifdef ENABLE_TOPO_DEFINES 237 225 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
+23 -8
arch/x86/kernel/acpi/boot.c
··· 538 538 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 539 539 union acpi_object *obj; 540 540 struct acpi_madt_local_apic *lapic; 541 - cpumask_t tmp_map, new_map; 541 + cpumask_var_t tmp_map, new_map; 542 542 u8 physid; 543 543 int cpu; 544 + int retval = -ENOMEM; 544 545 545 546 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 546 547 return -EINVAL; ··· 570 569 buffer.length = ACPI_ALLOCATE_BUFFER; 571 570 buffer.pointer = NULL; 572 571 573 - tmp_map = cpu_present_map; 572 + if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) 573 + goto out; 574 + 575 + if (!alloc_cpumask_var(&new_map, GFP_KERNEL)) 576 + goto free_tmp_map; 577 + 578 + cpumask_copy(tmp_map, cpu_present_mask); 574 579 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); 575 580 576 581 /* 577 582 * If mp_register_lapic successfully generates a new logical cpu 578 583 * number, then the following will get us exactly what was mapped 579 584 */ 580 - cpus_andnot(new_map, cpu_present_map, tmp_map); 581 - if (cpus_empty(new_map)) { 585 + cpumask_andnot(new_map, cpu_present_mask, tmp_map); 586 + if (cpumask_empty(new_map)) { 582 587 printk ("Unable to map lapic to logical cpu number\n"); 583 - return -EINVAL; 588 + retval = -EINVAL; 589 + goto free_new_map; 584 590 } 585 591 586 - cpu = first_cpu(new_map); 592 + cpu = cpumask_first(new_map); 587 593 588 594 *pcpu = cpu; 589 - return 0; 595 + retval = 0; 596 + 597 + free_new_map: 598 + free_cpumask_var(new_map); 599 + free_tmp_map: 600 + free_cpumask_var(tmp_map); 601 + out: 602 + return retval; 590 603 } 591 604 592 605 /* wrapper to silence section mismatch warning */ ··· 613 598 int acpi_unmap_lsapic(int cpu) 614 599 { 615 600 per_cpu(x86_cpu_to_apicid, cpu) = -1; 616 - cpu_clear(cpu, cpu_present_map); 601 + set_cpu_present(cpu, false); 617 602 num_processors--; 618 603 619 604 return (0);
+2 -2
arch/x86/kernel/apic.c
··· 140 140 struct clock_event_device *evt); 141 141 static void lapic_timer_setup(enum clock_event_mode mode, 142 142 struct clock_event_device *evt); 143 - static void lapic_timer_broadcast(const cpumask_t *mask); 143 + static void lapic_timer_broadcast(const struct cpumask *mask); 144 144 static void apic_pm_activate(void); 145 145 146 146 /* ··· 453 453 /* 454 454 * Local APIC timer broadcast function 455 455 */ 456 - static void lapic_timer_broadcast(const cpumask_t *mask) 456 + static void lapic_timer_broadcast(const struct cpumask *mask) 457 457 { 458 458 #ifdef CONFIG_SMP 459 459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+1 -1
arch/x86/kernel/cpu/common.c
··· 355 355 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 356 356 } else if (smp_num_siblings > 1) { 357 357 358 - if (smp_num_siblings > NR_CPUS) { 358 + if (smp_num_siblings > nr_cpu_ids) { 359 359 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 360 360 smp_num_siblings); 361 361 smp_num_siblings = 1;
+25 -3
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 517 517 } 518 518 } 519 519 520 + static void free_acpi_perf_data(void) 521 + { 522 + unsigned int i; 523 + 524 + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ 525 + for_each_possible_cpu(i) 526 + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) 527 + ->shared_cpu_map); 528 + free_percpu(acpi_perf_data); 529 + } 530 + 520 531 /* 521 532 * acpi_cpufreq_early_init - initialize ACPI P-States library 522 533 * ··· 538 527 */ 539 528 static int __init acpi_cpufreq_early_init(void) 540 529 { 530 + unsigned int i; 541 531 dprintk("acpi_cpufreq_early_init\n"); 542 532 543 533 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 544 534 if (!acpi_perf_data) { 545 535 dprintk("Memory allocation error for acpi_perf_data.\n"); 546 536 return -ENOMEM; 537 + } 538 + for_each_possible_cpu(i) { 539 + if (!alloc_cpumask_var_node( 540 + &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, 541 + GFP_KERNEL, cpu_to_node(i))) { 542 + 543 + /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ 544 + free_acpi_perf_data(); 545 + return -ENOMEM; 546 + } 547 547 } 548 548 549 549 /* Do initialization in ACPI core */ ··· 626 604 */ 627 605 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 628 606 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 629 - policy->cpus = perf->shared_cpu_map; 607 + cpumask_copy(&policy->cpus, perf->shared_cpu_map); 630 608 } 631 - policy->related_cpus = perf->shared_cpu_map; 609 + cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); 632 610 633 611 #ifdef CONFIG_SMP 634 612 dmi_check_system(sw_any_bug_dmi_table); ··· 817 795 818 796 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 819 797 if (ret) 820 - free_percpu(acpi_perf_data); 798 + free_acpi_perf_data(); 821 799 822 800 return ret; 823 801 }
+9
arch/x86/kernel/cpu/cpufreq/powernow-k7.c
··· 310 310 goto err0; 311 311 } 312 312 313 + if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, 314 + GFP_KERNEL)) { 315 + retval = -ENOMEM; 316 + goto err05; 317 + } 318 + 313 319 if (acpi_processor_register_performance(acpi_processor_perf, 0)) { 314 320 retval = -EIO; 315 321 goto err1; ··· 418 412 err2: 419 413 acpi_processor_unregister_performance(acpi_processor_perf, 0); 420 414 err1: 415 + free_cpumask_var(acpi_processor_perf->shared_cpu_map); 416 + err05: 421 417 kfree(acpi_processor_perf); 422 418 err0: 423 419 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); ··· 660 652 #ifdef CONFIG_X86_POWERNOW_K7_ACPI 661 653 if (acpi_processor_perf) { 662 654 acpi_processor_unregister_performance(acpi_processor_perf, 0); 655 + free_cpumask_var(acpi_processor_perf->shared_cpu_map); 663 656 kfree(acpi_processor_perf); 664 657 } 665 658 #endif
+15 -9
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 766 766 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 767 767 { 768 768 struct cpufreq_frequency_table *powernow_table; 769 - int ret_val; 769 + int ret_val = -ENODEV; 770 770 771 771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 772 772 dprintk("register performance failed: bad ACPI data\n"); ··· 815 815 /* notify BIOS that we exist */ 816 816 acpi_processor_notify_smm(THIS_MODULE); 817 817 818 + if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 819 + printk(KERN_ERR PFX 820 + "unable to alloc powernow_k8_data cpumask\n"); 821 + ret_val = -ENOMEM; 822 + goto err_out_mem; 823 + } 824 + 818 825 return 0; 819 826 820 827 err_out_mem: ··· 833 826 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 834 827 data->acpi_data.state_count = 0; 835 828 836 - return -ENODEV; 829 + return ret_val; 837 830 } 838 831 839 832 static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) ··· 936 929 { 937 930 if (data->acpi_data.state_count) 938 931 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 932 + free_cpumask_var(data->acpi_data.shared_cpu_map); 939 933 } 940 934 941 935 #else ··· 1142 1134 data->cpu = pol->cpu; 1143 1135 data->currpstate = HW_PSTATE_INVALID; 1144 1136 1145 - if (powernow_k8_cpu_init_acpi(data)) { 1137 + rc = powernow_k8_cpu_init_acpi(data); 1138 + if (rc) { 1146 1139 /* 1147 1140 * Use the PSB BIOS structure. This is only availabe on 1148 1141 * an UP version, and is deprecated by AMD. ··· 1161 1152 "ACPI maintainers and complain to your BIOS " 1162 1153 "vendor.\n"); 1163 1154 #endif 1164 - kfree(data); 1165 - return -ENODEV; 1155 + goto err_out; 1166 1156 } 1167 1157 if (pol->cpu != 0) { 1168 1158 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1169 1159 "CPU other than CPU0. Complain to your BIOS " 1170 1160 "vendor.\n"); 1171 - kfree(data); 1172 - return -ENODEV; 1161 + goto err_out; 1173 1162 } 1174 1163 rc = find_psb_table(data); 1175 1164 if (rc) { 1176 - kfree(data); 1177 - return -ENODEV; 1165 + goto err_out; 1178 1166 } 1179 1167 } 1180 1168
+1 -1
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 534 534 per_cpu(cpuid4_info, cpu) = NULL; 535 535 } 536 536 537 - static void get_cpu_leaves(void *_retval) 537 + static void __cpuinit get_cpu_leaves(void *_retval) 538 538 { 539 539 int j, *retval = _retval, cpu = smp_processor_id(); 540 540
+1 -1
arch/x86/kernel/cpuid.c
··· 121 121 lock_kernel(); 122 122 123 123 cpu = iminor(file->f_path.dentry->d_inode); 124 - if (cpu >= NR_CPUS || !cpu_online(cpu)) { 124 + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 125 125 ret = -ENXIO; /* No such CPU */ 126 126 goto out; 127 127 }
+3 -3
arch/x86/kernel/io_apic.c
··· 214 214 215 215 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 216 216 if (cfg) { 217 - /* FIXME: needs alloc_cpumask_var_node() */ 218 - if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) { 217 + if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 219 218 kfree(cfg); 220 219 cfg = NULL; 221 - } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { 220 + } else if (!alloc_cpumask_var_node(&cfg->old_domain, 221 + GFP_ATOMIC, node)) { 222 222 free_cpumask_var(cfg->domain); 223 223 kfree(cfg); 224 224 cfg = NULL;
+1 -1
arch/x86/kernel/msr.c
··· 136 136 lock_kernel(); 137 137 cpu = iminor(file->f_path.dentry->d_inode); 138 138 139 - if (cpu >= NR_CPUS || !cpu_online(cpu)) { 139 + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 140 140 ret = -ENXIO; /* No such CPU */ 141 141 goto out; 142 142 }
+2 -2
arch/x86/kernel/reboot.c
··· 501 501 502 502 #ifdef CONFIG_X86_32 503 503 /* See if there has been given a command line override */ 504 - if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && 504 + if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && 505 505 cpu_online(reboot_cpu)) 506 506 reboot_cpu_id = reboot_cpu; 507 507 #endif ··· 511 511 reboot_cpu_id = smp_processor_id(); 512 512 513 513 /* Make certain I only run on the appropriate processor */ 514 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); 514 + set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); 515 515 516 516 /* O.K Now that I'm on the appropriate processor, 517 517 * stop all of the others.
+12 -21
arch/x86/kernel/setup_percpu.c
··· 153 153 align = max_t(unsigned long, PAGE_SIZE, align); 154 154 size = roundup(old_size, align); 155 155 156 - printk(KERN_INFO 157 - "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 156 + pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 158 157 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 159 158 160 - printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 161 - size); 159 + pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); 162 160 163 161 for_each_possible_cpu(cpu) { 164 162 #ifndef CONFIG_NEED_MULTIPLE_NODES ··· 167 169 if (!node_online(node) || !NODE_DATA(node)) { 168 170 ptr = __alloc_bootmem(size, align, 169 171 __pa(MAX_DMA_ADDRESS)); 170 - printk(KERN_INFO 171 - "cpu %d has no node %d or node-local memory\n", 172 + pr_info("cpu %d has no node %d or node-local memory\n", 172 173 cpu, node); 173 - if (ptr) 174 - printk(KERN_DEBUG 175 - "per cpu data for cpu%d at %016lx\n", 176 - cpu, __pa(ptr)); 177 - } 178 - else { 174 + pr_debug("per cpu data for cpu%d at %016lx\n", 175 + cpu, __pa(ptr)); 176 + } else { 179 177 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 180 178 __pa(MAX_DMA_ADDRESS)); 181 - if (ptr) 182 - printk(KERN_DEBUG 183 - "per cpu data for cpu%d on node%d " 184 - "at %016lx\n", 185 - cpu, node, __pa(ptr)); 179 + pr_debug("per cpu data for cpu%d on node%d at %016lx\n", 180 + cpu, node, __pa(ptr)); 186 181 } 187 182 #endif 188 183 per_cpu_offset(cpu) = ptr - __per_cpu_start; ··· 330 339 /* 331 340 * Returns a pointer to the bitmask of CPUs on Node 'node'. 332 341 */ 333 - const cpumask_t *_node_to_cpumask_ptr(int node) 342 + const cpumask_t *cpumask_of_node(int node) 334 343 { 335 344 if (node_to_cpumask_map == NULL) { 336 345 printk(KERN_WARNING 337 - "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", 346 + "cpumask_of_node(%d): no node_to_cpumask_map!\n", 338 347 node); 339 348 dump_stack(); 340 349 return (const cpumask_t *)&cpu_online_map; 341 350 } 342 351 if (node >= nr_node_ids) { 343 352 printk(KERN_WARNING 344 - "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", 353 + "cpumask_of_node(%d): node > nr_node_ids(%d)\n", 345 354 node, nr_node_ids); 346 355 dump_stack(); 347 356 return &cpu_mask_none; 348 357 } 349 358 return &node_to_cpumask_map[node]; 350 359 } 351 - EXPORT_SYMBOL(_node_to_cpumask_ptr); 360 + EXPORT_SYMBOL(cpumask_of_node); 352 361 353 362 /* 354 363 * Returns a bitmask of CPUs on Node 'node'.
+11 -4
arch/x86/kernel/smpboot.c
··· 496 496 } 497 497 498 498 /* maps the cpu to the sched domain representing multi-core */ 499 - cpumask_t cpu_coregroup_map(int cpu) 499 + const struct cpumask *cpu_coregroup_mask(int cpu) 500 500 { 501 501 struct cpuinfo_x86 *c = &cpu_data(cpu); 502 502 /* ··· 504 504 * And for power savings, we return cpu_core_map 505 505 */ 506 506 if (sched_mc_power_savings || sched_smt_power_savings) 507 - return per_cpu(cpu_core_map, cpu); 507 + return &per_cpu(cpu_core_map, cpu); 508 508 else 509 - return c->llc_shared_map; 509 + return &c->llc_shared_map; 510 + } 511 + 512 + cpumask_t cpu_coregroup_map(int cpu) 513 + { 514 + return *cpu_coregroup_mask(cpu); 510 515 } 511 516 512 517 static void impress_friends(void) ··· 1154 1149 for_each_possible_cpu(i) { 1155 1150 c = &cpu_data(i); 1156 1151 /* mark all to hotplug */ 1157 - c->cpu_index = NR_CPUS; 1152 + c->cpu_index = nr_cpu_ids; 1158 1153 } 1159 1154 } 1160 1155 ··· 1297 1292 possible = num_processors + disabled_cpus; 1298 1293 else 1299 1294 possible = setup_possible_cpus; 1295 + 1296 + total_cpus = max_t(int, possible, num_processors + disabled_cpus); 1300 1297 1301 1298 if (possible > CONFIG_NR_CPUS) { 1302 1299 printk(KERN_WARNING
+3 -4
arch/x86/mach-voyager/voyager_smp.c
··· 357 357 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); 358 358 359 359 /* initialize the CPU structures (moved from smp_boot_cpus) */ 360 - for (i = 0; i < NR_CPUS; i++) { 360 + for (i = 0; i < nr_cpu_ids; i++) 361 361 cpu_irq_affinity[i] = ~0; 362 - } 363 362 cpu_online_map = cpumask_of_cpu(boot_cpu_id); 364 363 365 364 /* The boot CPU must be extended */ ··· 1226 1227 * new values until the next timer interrupt in which they do process 1227 1228 * accounting. 1228 1229 */ 1229 - for (i = 0; i < NR_CPUS; ++i) 1230 + for (i = 0; i < nr_cpu_ids; ++i) 1230 1231 per_cpu(prof_multiplier, i) = multiplier; 1231 1232 1232 1233 return 0; ··· 1256 1257 int i; 1257 1258 1258 1259 /* initialize the per cpu irq mask to all disabled */ 1259 - for (i = 0; i < NR_CPUS; i++) 1260 + for (i = 0; i < nr_cpu_ids; i++) 1260 1261 vic_irq_mask[i] = 0xFFFF; 1261 1262 1262 1263 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
+2 -2
block/blk.h
··· 99 99 static inline int blk_cpu_to_group(int cpu) 100 100 { 101 101 #ifdef CONFIG_SCHED_MC 102 - cpumask_t mask = cpu_coregroup_map(cpu); 103 - return first_cpu(mask); 102 + const struct cpumask *mask = cpu_coregroup_mask(cpu); 103 + return cpumask_first(mask); 104 104 #elif defined(CONFIG_SCHED_SMT) 105 105 return first_cpu(per_cpu(cpu_sibling_map, cpu)); 106 106 #else
+10 -4
drivers/acpi/processor_core.c
··· 826 826 if (!pr) 827 827 return -ENOMEM; 828 828 829 + if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 830 + kfree(pr); 831 + return -ENOMEM; 832 + } 833 + 829 834 pr->handle = device->handle; 830 835 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 831 836 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); ··· 850 845 851 846 pr = acpi_driver_data(device); 852 847 853 - if (pr->id >= nr_cpu_ids) { 854 - kfree(pr); 855 - return 0; 856 - } 848 + if (pr->id >= nr_cpu_ids) 849 + goto free; 857 850 858 851 if (type == ACPI_BUS_REMOVAL_EJECT) { 859 852 if (acpi_processor_handle_eject(pr)) ··· 876 873 877 874 per_cpu(processors, pr->id) = NULL; 878 875 per_cpu(processor_device_array, pr->id) = NULL; 876 + 877 + free: 878 + free_cpumask_var(pr->throttling.shared_cpu_map); 879 879 kfree(pr); 880 880 881 881 return 0;
+16 -12
drivers/acpi/processor_perflib.c
··· 588 588 int count, count_target; 589 589 int retval = 0; 590 590 unsigned int i, j; 591 - cpumask_t covered_cpus; 591 + cpumask_var_t covered_cpus; 592 592 struct acpi_processor *pr; 593 593 struct acpi_psd_package *pdomain; 594 594 struct acpi_processor *match_pr; 595 595 struct acpi_psd_package *match_pdomain; 596 + 597 + if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 598 + return -ENOMEM; 596 599 597 600 mutex_lock(&performance_mutex); 598 601 ··· 620 617 } 621 618 622 619 pr->performance = percpu_ptr(performance, i); 623 - cpu_set(i, pr->performance->shared_cpu_map); 620 + cpumask_set_cpu(i, pr->performance->shared_cpu_map); 624 621 if (acpi_processor_get_psd(pr)) { 625 622 retval = -EINVAL; 626 623 continue; ··· 653 650 } 654 651 } 655 652 656 - cpus_clear(covered_cpus); 653 + cpumask_clear(covered_cpus); 657 654 for_each_possible_cpu(i) { 658 655 pr = per_cpu(processors, i); 659 656 if (!pr) 660 657 continue; 661 658 662 - if (cpu_isset(i, covered_cpus)) 659 + if (cpumask_test_cpu(i, covered_cpus)) 663 660 continue; 664 661 665 662 pdomain = &(pr->performance->domain_info); 666 - cpu_set(i, pr->performance->shared_cpu_map); 667 - cpu_set(i, covered_cpus); 663 + cpumask_set_cpu(i, pr->performance->shared_cpu_map); 664 + cpumask_set_cpu(i, covered_cpus); 668 665 if (pdomain->num_processors <= 1) 669 666 continue; 670 667 ··· 702 699 goto err_ret; 703 700 } 704 701 705 - cpu_set(j, covered_cpus); 706 - cpu_set(j, pr->performance->shared_cpu_map); 702 + cpumask_set_cpu(j, covered_cpus); 703 + cpumask_set_cpu(j, pr->performance->shared_cpu_map); 707 704 count++; 708 705 } 709 706 ··· 721 718 722 719 match_pr->performance->shared_type = 723 720 pr->performance->shared_type; 724 - match_pr->performance->shared_cpu_map = 725 - pr->performance->shared_cpu_map; 721 + cpumask_copy(match_pr->performance->shared_cpu_map, 722 + pr->performance->shared_cpu_map); 726 723 } 727 724 } 728 725 ··· 734 731 735 732 /* Assume no coordination on any error parsing domain info */ 736 733 if (retval) { 737 - cpus_clear(pr->performance->shared_cpu_map); 738 - cpu_set(i, pr->performance->shared_cpu_map); 734 + cpumask_clear(pr->performance->shared_cpu_map); 735 + cpumask_set_cpu(i, pr->performance->shared_cpu_map); 739 736 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 740 737 } 741 738 pr->performance = NULL; /* Will be set for real in register */ 742 739 } 743 740 744 741 mutex_unlock(&performance_mutex); 742 + free_cpumask_var(covered_cpus); 745 743 return retval; 746 744 } 747 745 EXPORT_SYMBOL(acpi_processor_preregister_performance);
+52 -28
drivers/acpi/processor_throttling.c
··· 61 61 int count, count_target; 62 62 int retval = 0; 63 63 unsigned int i, j; 64 - cpumask_t covered_cpus; 64 + cpumask_var_t covered_cpus; 65 65 struct acpi_processor *pr, *match_pr; 66 66 struct acpi_tsd_package *pdomain, *match_pdomain; 67 67 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 68 + 69 + if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 70 + return -ENOMEM; 68 71 69 72 /* 70 73 * Now that we have _TSD data from all CPUs, lets setup T-state ··· 94 91 if (retval) 95 92 goto err_ret; 96 93 97 - cpus_clear(covered_cpus); 94 + cpumask_clear(covered_cpus); 98 95 for_each_possible_cpu(i) { 99 96 pr = per_cpu(processors, i); 100 97 if (!pr) 101 98 continue; 102 99 103 - if (cpu_isset(i, covered_cpus)) 100 + if (cpumask_test_cpu(i, covered_cpus)) 104 101 continue; 105 102 pthrottling = &pr->throttling; 106 103 107 104 pdomain = &(pthrottling->domain_info); 108 - cpu_set(i, pthrottling->shared_cpu_map); 109 - cpu_set(i, covered_cpus); 105 + cpumask_set_cpu(i, pthrottling->shared_cpu_map); 106 + cpumask_set_cpu(i, covered_cpus); 110 107 /* 111 108 * If the number of processor in the TSD domain is 1, it is 112 109 * unnecessary to parse the coordination for this CPU. ··· 147 144 goto err_ret; 148 145 } 149 146 150 - cpu_set(j, covered_cpus); 151 - cpu_set(j, pthrottling->shared_cpu_map); 147 + cpumask_set_cpu(j, covered_cpus); 148 + cpumask_set_cpu(j, pthrottling->shared_cpu_map); 152 149 count++; 153 150 } 154 151 for_each_possible_cpu(j) { ··· 168 165 * If some CPUS have the same domain, they 169 166 * will have the same shared_cpu_map. 170 167 */ 171 - match_pthrottling->shared_cpu_map = 172 - pthrottling->shared_cpu_map; 168 + cpumask_copy(match_pthrottling->shared_cpu_map, 169 + pthrottling->shared_cpu_map); 173 170 } 174 171 } 175 172 176 173 err_ret: 174 + free_cpumask_var(covered_cpus); 175 + 177 176 for_each_possible_cpu(i) { 178 177 pr = per_cpu(processors, i); 179 178 if (!pr) ··· 187 182 */ 188 183 if (retval) { 189 184 pthrottling = &(pr->throttling); 190 - cpus_clear(pthrottling->shared_cpu_map); 191 - cpu_set(i, pthrottling->shared_cpu_map); 185 + cpumask_clear(pthrottling->shared_cpu_map); 186 + cpumask_set_cpu(i, pthrottling->shared_cpu_map); 192 187 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 193 188 } 194 189 } ··· 572 567 pthrottling = &pr->throttling; 573 568 pthrottling->tsd_valid_flag = 1; 574 569 pthrottling->shared_type = pdomain->coord_type; 575 - cpu_set(pr->id, pthrottling->shared_cpu_map); 570 + cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 576 571 /* 577 572 * If the coordination type is not defined in ACPI spec, 578 573 * the tsd_valid_flag will be clear and coordination type ··· 831 826 832 827 static int acpi_processor_get_throttling(struct acpi_processor *pr) 833 828 { 834 - cpumask_t saved_mask; 829 + cpumask_var_t saved_mask; 835 830 int ret; 836 831 837 832 if (!pr) ··· 839 834 840 835 if (!pr->flags.throttling) 841 836 return -ENODEV; 837 + 838 + if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 839 + return -ENOMEM; 840 + 842 841 /* 843 842 * Migrate task to the cpu pointed by pr. 844 843 */ 845 - saved_mask = current->cpus_allowed; 846 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 844 + cpumask_copy(saved_mask, &current->cpus_allowed); 845 + /* FIXME: use work_on_cpu() */ 846 + set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 847 847 ret = pr->throttling.acpi_processor_get_throttling(pr); 848 848 /* restore the previous state */ 849 - set_cpus_allowed_ptr(current, &saved_mask); 849 + set_cpus_allowed_ptr(current, saved_mask); 850 + free_cpumask_var(saved_mask); 850 851 851 852 return ret; 852 853 } ··· 997 986 998 987 int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 999 988 { 1000 - cpumask_t saved_mask; 989 + cpumask_var_t saved_mask; 1001 990 int ret = 0; 1002 991 unsigned int i; 1003 992 struct acpi_processor *match_pr; 1004 993 struct acpi_processor_throttling *p_throttling; 1005 994 struct throttling_tstate t_state; 1006 - cpumask_t online_throttling_cpus; 995 + cpumask_var_t online_throttling_cpus; 1007 996 1008 997 if (!pr) 1009 998 return -EINVAL; ··· 1014 1003 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1015 1004 return -EINVAL; 1016 1005 1017 - saved_mask = current->cpus_allowed; 1006 + if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 1007 + return -ENOMEM; 1008 + 1009 + if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { 1010 + free_cpumask_var(saved_mask); 1011 + return -ENOMEM; 1012 + } 1013 + 1014 + cpumask_copy(saved_mask, &current->cpus_allowed); 1018 1015 t_state.target_state = state; 1019 1016 p_throttling = &(pr->throttling); 1020 - cpus_and(online_throttling_cpus, cpu_online_map, 1021 - p_throttling->shared_cpu_map); 1017 + cpumask_and(online_throttling_cpus, cpu_online_mask, 1018 + p_throttling->shared_cpu_map); 1022 1019 /* 1023 1020 * The throttling notifier will be called for every 1024 1021 * affected cpu in order to get one proper T-state. 1025 1022 * The notifier event is THROTTLING_PRECHANGE. 1026 1023 */ 1027 - for_each_cpu_mask_nr(i, online_throttling_cpus) { 1024 + for_each_cpu(i, online_throttling_cpus) { 1028 1025 t_state.cpu = i; 1029 1026 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1030 1027 &t_state); ··· 1044 1025 * it can be called only for the cpu pointed by pr. 1045 1026 */ 1046 1027 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1047 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 1028 + /* FIXME: use work_on_cpu() */ 1029 + set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 1048 1030 ret = p_throttling->acpi_processor_set_throttling(pr, 1049 1031 t_state.target_state); 1050 1032 } else { ··· 1054 1034 * it is necessary to set T-state for every affected 1055 1035 * cpus. 1056 1036 */ 1057 - for_each_cpu_mask_nr(i, online_throttling_cpus) { 1037 + for_each_cpu(i, online_throttling_cpus) { 1058 1038 match_pr = per_cpu(processors, i); 1059 1039 /* 1060 1040 * If the pointer is invalid, we will report the ··· 1076 1056 continue; 1077 1057 } 1078 1058 t_state.cpu = i; 1079 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 1059 + /* FIXME: use work_on_cpu() */ 1060 + set_cpus_allowed_ptr(current, cpumask_of(i)); 1080 1061 ret = match_pr->throttling. 1081 1062 acpi_processor_set_throttling( 1082 1063 match_pr, t_state.target_state); ··· 1089 1068 * affected cpu to update the T-states. 1090 1069 * The notifier event is THROTTLING_POSTCHANGE 1091 1070 */ 1092 - for_each_cpu_mask_nr(i, online_throttling_cpus) { 1071 + for_each_cpu(i, online_throttling_cpus) { 1093 1072 t_state.cpu = i; 1094 1073 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1095 1074 &t_state); 1096 1075 } 1097 1076 /* restore the previous state */ 1098 - set_cpus_allowed_ptr(current, &saved_mask); 1077 + /* FIXME: use work_on_cpu() */ 1078 + set_cpus_allowed_ptr(current, saved_mask); 1079 + free_cpumask_var(online_throttling_cpus); 1080 + free_cpumask_var(saved_mask); 1099 1081 return ret; 1100 1082 } 1101 1083 ··· 1144 1120 if (acpi_processor_get_tsd(pr)) { 1145 1121 pthrottling = &pr->throttling; 1146 1122 pthrottling->tsd_valid_flag = 0; 1147 - cpu_set(pr->id, pthrottling->shared_cpu_map); 1123 + cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 1148 1124 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1149 1125 } 1150 1126
+44
drivers/base/cpu.c
··· 128 128 print_cpus_func(possible); 129 129 print_cpus_func(present); 130 130 131 + /* 132 + * Print values for NR_CPUS and offlined cpus 133 + */ 134 + static ssize_t print_cpus_kernel_max(struct sysdev_class *class, char *buf) 135 + { 136 + int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); 137 + return n; 138 + } 139 + static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); 140 + 141 + /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ 142 + unsigned int total_cpus; 143 + 144 + static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf) 145 + { 146 + int n = 0, len = PAGE_SIZE-2; 147 + cpumask_var_t offline; 148 + 149 + /* display offline cpus < nr_cpu_ids */ 150 + if (!alloc_cpumask_var(&offline, GFP_KERNEL)) 151 + return -ENOMEM; 152 + cpumask_complement(offline, cpu_online_mask); 153 + n = cpulist_scnprintf(buf, len, offline); 154 + free_cpumask_var(offline); 155 + 156 + /* display offline cpus >= nr_cpu_ids */ 157 + if (total_cpus && nr_cpu_ids < total_cpus) { 158 + if (n && n < len) 159 + buf[n++] = ','; 160 + 161 + if (nr_cpu_ids == total_cpus-1) 162 + n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); 163 + else 164 + n += snprintf(&buf[n], len - n, "%d-%d", 165 + nr_cpu_ids, total_cpus-1); 166 + } 167 + 168 + n += snprintf(&buf[n], len - n, "\n"); 169 + return n; 170 + } 171 + static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); 172 + 131 173 static struct sysdev_class_attribute *cpu_state_attr[] = { 132 174 &attr_online_map, 133 175 &attr_possible_map, 134 176 &attr_present_map, 177 + &attr_kernel_max, 178 + &attr_offline, 135 179 }; 136 180 137 181 static int cpu_states_init(void)
+8 -9
drivers/infiniband/hw/ehca/ehca_irq.c
··· 659 659 660 660 WARN_ON_ONCE(!in_interrupt()); 661 661 if (ehca_debug_level >= 3) 662 - ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 662 + ehca_dmp(cpu_online_mask, cpumask_size(), ""); 663 663 664 664 spin_lock_irqsave(&pool->last_cpu_lock, flags); 665 - cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); 665 + cpu = cpumask_next(pool->last_cpu, cpu_online_mask); 666 666 if (cpu >= nr_cpu_ids) 667 - cpu = first_cpu(cpu_online_map); 667 + cpu = cpumask_first(cpu_online_mask); 668 668 pool->last_cpu = cpu; 669 669 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); 670 670 ··· 855 855 case CPU_UP_CANCELED_FROZEN: 856 856 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); 857 857 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 858 - kthread_bind(cct->task, any_online_cpu(cpu_online_map)); 858 + kthread_bind(cct->task, cpumask_any(cpu_online_mask)); 859 859 destroy_comp_task(pool, cpu); 860 860 break; 861 861 case CPU_ONLINE: ··· 902 902 return -ENOMEM; 903 903 904 904 spin_lock_init(&pool->last_cpu_lock); 905 - pool->last_cpu = any_online_cpu(cpu_online_map); 905 + pool->last_cpu = cpumask_any(cpu_online_mask); 906 906 907 907 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); 908 908 if (pool->cpu_comp_tasks == NULL) { ··· 934 934 935 935 unregister_hotcpu_notifier(&comp_pool_callback_nb); 936 936 937 - for (i = 0; i < NR_CPUS; i++) { 938 - if (cpu_online(i)) 939 - destroy_comp_task(pool, i); 940 - } 937 + for_each_online_cpu(i) 938 + destroy_comp_task(pool, i); 939 + 941 940 free_percpu(pool->cpu_comp_tasks); 942 941 kfree(pool); 943 942 }
+4 -4
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 1679 1679 * InfiniPath chip to that processor (we assume reasonable connectivity, 1680 1680 * for now). This code assumes that if affinity has been set 1681 1681 * before this point, that at most one cpu is set; for now this 1682 - * is reasonable. I check for both cpus_empty() and cpus_full(), 1682 + * is reasonable. I check for both cpumask_empty() and cpumask_full(), 1683 1683 * in case some kernel variant sets none of the bits when no 1684 1684 * affinity is set. 2.6.11 and 12 kernels have all present 1685 1685 * cpus set. Some day we'll have to fix it up further to handle ··· 1688 1688 * information. There may be some issues with dual core numbering 1689 1689 * as well. This needs more work prior to release. 1690 1690 */ 1691 - if (!cpus_empty(current->cpus_allowed) && 1692 - !cpus_full(current->cpus_allowed)) { 1691 + if (!cpumask_empty(&current->cpus_allowed) && 1692 + !cpumask_full(&current->cpus_allowed)) { 1693 1693 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; 1694 1694 for (i = 0; i < ncpus; i++) 1695 - if (cpu_isset(i, current->cpus_allowed)) { 1695 + if (cpumask_test_cpu(i, &current->cpus_allowed)) { 1696 1696 ipath_cdbg(PROC, "%s[%u] affinity set for " 1697 1697 "cpu %d/%d\n", current->comm, 1698 1698 current->pid, i, ncpus);
+1 -1
drivers/pnp/pnpbios/bioscalls.c
··· 481 481 482 482 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); 483 483 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); 484 - for (i = 0; i < NR_CPUS; i++) { 484 + for_each_possible_cpu(i) { 485 485 struct desc_struct *gdt = get_cpu_gdt_table(i); 486 486 if (!gdt) 487 487 continue;
+2 -1
fs/seq_file.c
··· 468 468 return -1; 469 469 } 470 470 471 - int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits) 471 + int seq_bitmap(struct seq_file *m, const unsigned long *bits, 472 + unsigned int nr_bits) 472 473 { 473 474 if (m->count < m->size) { 474 475 int len = bitmap_scnprintf(m->buf + m->count,
+2 -2
include/acpi/processor.h
··· 127 127 unsigned int state_count; 128 128 struct acpi_processor_px *states; 129 129 struct acpi_psd_package domain_info; 130 - cpumask_t shared_cpu_map; 130 + cpumask_var_t shared_cpu_map; 131 131 unsigned int shared_type; 132 132 }; 133 133 ··· 172 172 unsigned int state_count; 173 173 struct acpi_processor_tx_tss *states_tss; 174 174 struct acpi_tsd_package domain_info; 175 - cpumask_t shared_cpu_map; 175 + cpumask_var_t shared_cpu_map; 176 176 int (*acpi_processor_get_throttling) (struct acpi_processor * pr); 177 177 int (*acpi_processor_set_throttling) (struct acpi_processor * pr, 178 178 int state);
+13
include/asm-frv/bitops.h
··· 339 339 return 31 - bit; 340 340 } 341 341 342 + /** 343 + * __fls - find last (most-significant) set bit in a long word 344 + * @word: the word to search 345 + * 346 + * Undefined if no set bit exists, so code should check against 0 first. 347 + */ 348 + static inline unsigned long __fls(unsigned long word) 349 + { 350 + unsigned long bit; 351 + asm("scan %1,gr0,%0" : "=r"(bit) : "r"(word)); 352 + return bit; 353 + } 354 + 342 355 /* 343 356 * special slimline version of fls() for calculating ilog2_u32() 344 357 * - note: no protection against n == 0
+1
include/asm-m32r/bitops.h
··· 251 251 #include <asm-generic/bitops/ffz.h> 252 252 #include <asm-generic/bitops/__ffs.h> 253 253 #include <asm-generic/bitops/fls.h> 254 + #include <asm-generic/bitops/__fls.h> 254 255 #include <asm-generic/bitops/fls64.h> 255 256 256 257 #ifdef __KERNEL__
+5
include/asm-m68k/bitops.h
··· 315 315 return 32 - cnt; 316 316 } 317 317 318 + static inline int __fls(int x) 319 + { 320 + return fls(x) - 1; 321 + } 322 + 318 323 #include <asm-generic/bitops/fls64.h> 319 324 #include <asm-generic/bitops/sched.h> 320 325 #include <asm-generic/bitops/hweight.h>
+11
include/asm-mn10300/bitops.h
··· 196 196 } 197 197 198 198 /** 199 + * __fls - find last (most-significant) set bit in a long word 200 + * @word: the word to search 201 + * 202 + * Undefined if no set bit exists, so code should check against 0 first. 203 + */ 204 + static inline unsigned long __fls(unsigned long word) 205 + { 206 + return __ilog2_u32(word); 207 + } 208 + 209 + /** 199 210 * ffs - find first bit set 200 211 * @x: the word to search 201 212 *
+11
include/asm-xtensa/bitops.h
··· 82 82 return 32 - __cntlz(x); 83 83 } 84 84 85 + /** 86 + * __fls - find last (most-significant) set bit in a long word 87 + * @word: the word to search 88 + * 89 + * Undefined if no set bit exists, so code should check against 0 first. 90 + */ 91 + static inline unsigned long __fls(unsigned long word) 92 + { 93 + return 31 - __cntlz(word); 94 + } 85 95 #else 86 96 87 97 /* Use the generic implementation if we don't have the nsa/nsau instructions. */ ··· 100 90 # include <asm-generic/bitops/__ffs.h> 101 91 # include <asm-generic/bitops/ffz.h> 102 92 # include <asm-generic/bitops/fls.h> 93 + # include <asm-generic/bitops/__fls.h> 103 94 104 95 #endif 105 96
+19 -16
include/linux/bitmap.h
··· 137 137 (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ 138 138 ) 139 139 140 + #define small_const_nbits(nbits) \ 141 + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) 142 + 140 143 static inline void bitmap_zero(unsigned long *dst, int nbits) 141 144 { 142 - if (nbits <= BITS_PER_LONG) 145 + if (small_const_nbits(nbits)) 143 146 *dst = 0UL; 144 147 else { 145 148 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ··· 153 150 static inline void bitmap_fill(unsigned long *dst, int nbits) 154 151 { 155 152 size_t nlongs = BITS_TO_LONGS(nbits); 156 - if (nlongs > 1) { 153 + if (!small_const_nbits(nbits)) { 157 154 int len = (nlongs - 1) * sizeof(unsigned long); 158 155 memset(dst, 0xff, len); 159 156 } ··· 163 160 static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, 164 161 int nbits) 165 162 { 166 - if (nbits <= BITS_PER_LONG) 163 + if (small_const_nbits(nbits)) 167 164 *dst = *src; 168 165 else { 169 166 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ··· 174 171 static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, 175 172 const unsigned long *src2, int nbits) 176 173 { 177 - if (nbits <= BITS_PER_LONG) 174 + if (small_const_nbits(nbits)) 178 175 *dst = *src1 & *src2; 179 176 else 180 177 __bitmap_and(dst, src1, src2, nbits); ··· 183 180 static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, 184 181 const unsigned long *src2, int nbits) 185 182 { 186 - if (nbits <= BITS_PER_LONG) 183 + if (small_const_nbits(nbits)) 187 184 *dst = *src1 | *src2; 188 185 else 189 186 __bitmap_or(dst, src1, src2, nbits); ··· 192 189 static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, 193 190 const unsigned long *src2, int nbits) 194 191 { 195 - if (nbits <= BITS_PER_LONG) 192 + if (small_const_nbits(nbits)) 196 193 *dst = *src1 ^ *src2; 197 194 else 198 195 __bitmap_xor(dst, src1, src2, nbits); ··· 201 198 static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, 202 199 const unsigned long *src2, int nbits) 203 200 { 204 - if (nbits <= BITS_PER_LONG) 201 + if (small_const_nbits(nbits)) 205 202 *dst = *src1 & ~(*src2); 206 203 else 207 204 __bitmap_andnot(dst, src1, src2, nbits); ··· 210 207 static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, 211 208 int nbits) 212 209 { 213 - if (nbits <= BITS_PER_LONG) 210 + if (small_const_nbits(nbits)) 214 211 *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); 215 212 else 216 213 __bitmap_complement(dst, src, nbits); ··· 219 216 static inline int bitmap_equal(const unsigned long *src1, 220 217 const unsigned long *src2, int nbits) 221 218 { 222 - if (nbits <= BITS_PER_LONG) 219 + if (small_const_nbits(nbits)) 223 220 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); 224 221 else 225 222 return __bitmap_equal(src1, src2, nbits); ··· 228 225 static inline int bitmap_intersects(const unsigned long *src1, 229 226 const unsigned long *src2, int nbits) 230 227 { 231 - if (nbits <= BITS_PER_LONG) 228 + if (small_const_nbits(nbits)) 232 229 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; 233 230 else 234 231 return __bitmap_intersects(src1, src2, nbits); ··· 237 234 static inline int bitmap_subset(const unsigned long *src1, 238 235 const unsigned long *src2, int nbits) 239 236 { 240 - if (nbits <= BITS_PER_LONG) 237 + if (small_const_nbits(nbits)) 241 238 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); 242 239 else 243 240 return __bitmap_subset(src1, src2, nbits); ··· 245 242 246 243 static inline int bitmap_empty(const unsigned long *src, int nbits) 247 244 { 248 - if (nbits <= BITS_PER_LONG) 245 + if (small_const_nbits(nbits)) 249 246 return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); 250 247 else 251 248 return __bitmap_empty(src, nbits); ··· 253 250 254 251 static inline int bitmap_full(const unsigned long *src, int nbits) 255 252 { 256 - if (nbits <= BITS_PER_LONG) 253 + if (small_const_nbits(nbits)) 257 254 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); 258 255 else 259 256 return __bitmap_full(src, nbits); ··· 261 258 262 259 static inline int bitmap_weight(const unsigned long *src, int nbits) 263 260 { 264 - if (nbits <= BITS_PER_LONG) 261 + if (small_const_nbits(nbits)) 265 262 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); 266 263 return __bitmap_weight(src, nbits); 267 264 } ··· 269 266 static inline void bitmap_shift_right(unsigned long *dst, 270 267 const unsigned long *src, int n, int nbits) 271 268 { 272 - if (nbits <= BITS_PER_LONG) 269 + if (small_const_nbits(nbits)) 273 270 *dst = *src >> n; 274 271 else 275 272 __bitmap_shift_right(dst, src, n, nbits); ··· 278 275 static inline void bitmap_shift_left(unsigned long *dst, 279 276 const unsigned long *src, int n, int nbits) 280 277 { 281 - if (nbits <= BITS_PER_LONG) 278 + if (small_const_nbits(nbits)) 282 279 *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); 283 280 else 284 281 __bitmap_shift_left(dst, src, n, nbits);
+12 -1
include/linux/bitops.h
··· 134 134 */ 135 135 extern unsigned long find_first_zero_bit(const unsigned long *addr, 136 136 unsigned long size); 137 - 138 137 #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 138 + 139 + #ifdef CONFIG_GENERIC_FIND_LAST_BIT 140 + /** 141 + * find_last_bit - find the last set bit in a memory region 142 + * @addr: The address to start the search at 143 + * @size: The maximum size to search 144 + * 145 + * Returns the bit number of the first set bit, or size. 146 + */ 147 + extern unsigned long find_last_bit(const unsigned long *addr, 148 + unsigned long size); 149 + #endif /* CONFIG_GENERIC_FIND_LAST_BIT */ 139 150 140 151 #ifdef CONFIG_GENERIC_FIND_NEXT_BIT 141 152
+91 -130
include/linux/cpumask.h
··· 144 144 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 145 145 extern cpumask_t _unused_cpumask_arg_; 146 146 147 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 147 148 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 148 149 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) 149 150 { ··· 268 267 { 269 268 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 270 269 } 270 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 271 + 272 + /** 273 + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 274 + * @bitmap: the bitmap 275 + * 276 + * There are a few places where cpumask_var_t isn't appropriate and 277 + * static cpumasks must be used (eg. very early boot), yet we don't 278 + * expose the definition of 'struct cpumask'. 279 + * 280 + * This does the conversion, and can be used as a constant initializer. 281 + */ 282 + #define to_cpumask(bitmap) \ 283 + ((struct cpumask *)(1 ? (bitmap) \ 284 + : (void *)sizeof(__check_is_bitmap(bitmap)))) 285 + 286 + static inline int __check_is_bitmap(const unsigned long *bitmap) 287 + { 288 + return 1; 289 + } 271 290 272 291 /* 273 292 * Special-case data structure for "single bit set only" constant CPU masks. ··· 299 278 extern const unsigned long 300 279 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 301 280 302 - static inline const cpumask_t *get_cpu_mask(unsigned int cpu) 281 + static inline const struct cpumask *get_cpu_mask(unsigned int cpu) 303 282 { 304 283 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 305 284 p -= cpu / BITS_PER_LONG; 306 - return (const cpumask_t *)p; 285 + return to_cpumask(p); 307 286 } 308 287 288 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 309 289 /* 310 290 * In cases where we take the address of the cpumask immediately, 311 291 * gcc optimizes it out (it's a constant) and there's no huge stack ··· 392 370 { 393 371 bitmap_fold(dstp->bits, origp->bits, sz, nbits); 394 372 } 373 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 395 374 396 375 #if NR_CPUS == 1 397 376 398 377 #define nr_cpu_ids 1 378 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 399 379 #define first_cpu(src) ({ (void)(src); 0; }) 400 380 #define next_cpu(n, src) ({ (void)(src); 1; }) 401 381 #define any_online_cpu(mask) 0 402 382 #define for_each_cpu_mask(cpu, mask) \ 403 383 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 404 - 384 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 405 385 #else /* NR_CPUS > 1 */ 406 386 407 387 extern int nr_cpu_ids; 388 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 408 389 int __first_cpu(const cpumask_t *srcp); 409 390 int __next_cpu(int n, const cpumask_t *srcp); 410 391 int __any_online_cpu(const cpumask_t *mask); ··· 419 394 for ((cpu) = -1; \ 420 395 (cpu) = next_cpu((cpu), (mask)), \ 421 396 (cpu) < NR_CPUS; ) 397 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 422 398 #endif 423 399 400 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 424 401 #if NR_CPUS <= 64 425 402 426 403 #define next_cpu_nr(n, src) next_cpu(n, src) ··· 440 413 (cpu) < nr_cpu_ids; ) 441 414 442 415 #endif /* NR_CPUS > 64 */ 416 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 443 417 444 418 /* 445 419 * The following particular system cpumasks and operations manage 446 - * possible, present, active and online cpus. Each of them is a fixed size 447 - * bitmap of size NR_CPUS. 420 + * possible, present, active and online cpus. 448 421 * 449 - * #ifdef CONFIG_HOTPLUG_CPU 450 - * cpu_possible_map - has bit 'cpu' set iff cpu is populatable 451 - * cpu_present_map - has bit 'cpu' set iff cpu is populated 452 - * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler 453 - * cpu_active_map - has bit 'cpu' set iff cpu available to migration 454 - * #else 455 - * cpu_possible_map - has bit 'cpu' set iff cpu is populated 456 - * cpu_present_map - copy of cpu_possible_map 457 - * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler 458 - * #endif 422 + * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable 423 + * cpu_present_mask - has bit 'cpu' set iff cpu is populated 424 + * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler 425 + * cpu_active_mask - has bit 'cpu' set iff cpu available to migration 459 426 * 460 - * In either case, NR_CPUS is fixed at compile time, as the static 461 - * size of these bitmaps. The cpu_possible_map is fixed at boot 462 - * time, as the set of CPU id's that it is possible might ever 463 - * be plugged in at anytime during the life of that system boot. 464 - * The cpu_present_map is dynamic(*), representing which CPUs 465 - * are currently plugged in. And cpu_online_map is the dynamic 466 - * subset of cpu_present_map, indicating those CPUs available 467 - * for scheduling. 427 + * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. 468 428 * 469 - * If HOTPLUG is enabled, then cpu_possible_map is forced to have 429 + * The cpu_possible_mask is fixed at boot time, as the set of CPU id's 430 + * that it is possible might ever be plugged in at anytime during the 431 + * life of that system boot. The cpu_present_mask is dynamic(*), 432 + * representing which CPUs are currently plugged in. And 433 + * cpu_online_mask is the dynamic subset of cpu_present_mask, 434 + * indicating those CPUs available for scheduling. 435 + * 436 + * If HOTPLUG is enabled, then cpu_possible_mask is forced to have 470 437 * all NR_CPUS bits set, otherwise it is just the set of CPUs that 471 438 * ACPI reports present at boot. 472 439 * 473 - * If HOTPLUG is enabled, then cpu_present_map varies dynamically, 440 + * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, 474 441 * depending on what ACPI reports as currently plugged in, otherwise 475 - * cpu_present_map is just a copy of cpu_possible_map. 442 + * cpu_present_mask is just a copy of cpu_possible_mask. 476 443 * 477 - * (*) Well, cpu_present_map is dynamic in the hotplug case. If not 478 - * hotplug, it's a copy of cpu_possible_map, hence fixed at boot. 444 + * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not 445 + * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. 479 446 * 480 447 * Subtleties: 481 448 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode 482 449 * assumption that their single CPU is online. The UP 483 - * cpu_{online,possible,present}_maps are placebos. Changing them 450 + * cpu_{online,possible,present}_masks are placebos. Changing them 484 451 * will have no useful affect on the following num_*_cpus() 485 452 * and cpu_*() macros in the UP case. This ugliness is a UP 486 453 * optimization - don't waste any instructions or memory references 487 454 * asking if you're online or how many CPUs there are if there is 488 455 * only one CPU. 489 - * 2) Most SMP arch's #define some of these maps to be some 490 - * other map specific to that arch. Therefore, the following 491 - * must be #define macros, not inlines. To see why, examine 492 - * the assembly code produced by the following. Note that 493 - * set1() writes phys_x_map, but set2() writes x_map: 494 - * int x_map, phys_x_map; 495 - * #define set1(a) x_map = a 496 - * inline void set2(int a) { x_map = a; } 497 - * #define x_map phys_x_map 498 - * main(){ set1(3); set2(5); } 499 456 */ 500 457 501 - extern cpumask_t cpu_possible_map; 502 - extern cpumask_t cpu_online_map; 503 - extern cpumask_t cpu_present_map; 504 - extern cpumask_t cpu_active_map; 458 + extern const struct cpumask *const cpu_possible_mask; 459 + extern const struct cpumask *const cpu_online_mask; 460 + extern const struct cpumask *const cpu_present_mask; 461 + extern const struct cpumask *const cpu_active_mask; 462 + 463 + /* These strip const, as traditionally they weren't const. */ 464 + #define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) 465 + #define cpu_online_map (*(cpumask_t *)cpu_online_mask) 466 + #define cpu_present_map (*(cpumask_t *)cpu_present_mask) 467 + #define cpu_active_map (*(cpumask_t *)cpu_active_mask) 505 468 506 469 #if NR_CPUS > 1 507 - #define num_online_cpus() cpus_weight_nr(cpu_online_map) 508 - #define num_possible_cpus() cpus_weight_nr(cpu_possible_map) 509 - #define num_present_cpus() cpus_weight_nr(cpu_present_map) 510 - #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) 511 - #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) 512 - #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) 513 - #define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) 470 + #define num_online_cpus() cpumask_weight(cpu_online_mask) 471 + #define num_possible_cpus() cpumask_weight(cpu_possible_mask) 472 + #define num_present_cpus() cpumask_weight(cpu_present_mask) 473 + #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) 474 + #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) 475 + #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) 476 + #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) 514 477 #else 515 478 #define num_online_cpus() 1 516 479 #define num_possible_cpus() 1 ··· 512 495 #endif 513 496 514 497 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 515 - 516 - #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) 517 - #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) 518 - #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) 519 498 520 499 /* These are the new versions of the cpumask operators: passed by pointer. 521 500 * The older versions will be implemented in terms of these, then deleted. */ ··· 700 687 * No static inline type checking - see Subtlety (1) above. 701 688 */ 702 689 #define cpumask_test_cpu(cpu, cpumask) \ 703 - test_bit(cpumask_check(cpu), (cpumask)->bits) 690 + test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) 704 691 705 692 /** 706 693 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask ··· 943 930 static inline int cpumask_scnprintf(char *buf, int len, 944 931 const struct cpumask *srcp) 945 932 { 946 - return bitmap_scnprintf(buf, len, srcp->bits, nr_cpumask_bits); 933 + return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); 947 934 } 948 935 949 936 /** ··· 957 944 static inline int cpumask_parse_user(const char __user *buf, int len, 958 945 struct cpumask *dstp) 959 946 { 960 - return bitmap_parse_user(buf, len, dstp->bits, nr_cpumask_bits); 947 + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); 961 948 } 962 949 963 950 /** ··· 972 959 static inline int cpulist_scnprintf(char *buf, int len, 973 960 const struct cpumask *srcp) 974 961 { 975 - return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpumask_bits); 962 + return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), 963 + nr_cpumask_bits); 976 964 } 977 965 978 966 /** ··· 986 972 */ 987 973 static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 988 974 { 989 - return bitmap_parselist(buf, dstp->bits, nr_cpumask_bits); 990 - } 991 - 992 - /** 993 - * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 994 - * @bitmap: the bitmap 995 - * 996 - * There are a few places where cpumask_var_t isn't appropriate and 997 - * static cpumasks must be used (eg. very early boot), yet we don't 998 - * expose the definition of 'struct cpumask'. 999 - * 1000 - * This does the conversion, and can be used as a constant initializer. 1001 - */ 1002 - #define to_cpumask(bitmap) \ 1003 - ((struct cpumask *)(1 ? (bitmap) \ 1004 - : (void *)sizeof(__check_is_bitmap(bitmap)))) 1005 - 1006 - static inline int __check_is_bitmap(const unsigned long *bitmap) 1007 - { 1008 - return 1; 975 + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); 1009 976 } 1010 977 1011 978 /** ··· 1020 1025 #ifdef CONFIG_CPUMASK_OFFSTACK 1021 1026 typedef struct cpumask *cpumask_var_t; 1022 1027 1028 + bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 1023 1029 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 1024 1030 void alloc_bootmem_cpumask_var(cpumask_var_t *mask); 1025 1031 void free_cpumask_var(cpumask_var_t mask); ··· 1030 1034 typedef struct cpumask cpumask_var_t[1]; 1031 1035 1032 1036 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 1037 + { 1038 + return true; 1039 + } 1040 + 1041 + static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 1042 + int node) 1033 1043 { 1034 1044 return true; 1035 1045 } ··· 1053 1051 } 1054 1052 #endif /* CONFIG_CPUMASK_OFFSTACK */ 1055 1053 1056 - /* The pointer versions of the maps, these will become the primary versions. */ 1057 - #define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map) 1058 - #define cpu_online_mask ((const struct cpumask *)&cpu_online_map) 1059 - #define cpu_present_mask ((const struct cpumask *)&cpu_present_map) 1060 - #define cpu_active_mask ((const struct cpumask *)&cpu_active_map) 1061 - 1062 1054 /* It's common to want to use cpu_all_mask in struct member initializers, 1063 1055 * so it has to refer to an address rather than a pointer. */ 1064 1056 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); ··· 1061 1065 /* First bits of cpu_bit_bitmap are in fact unset. */ 1062 1066 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) 1063 1067 1068 + #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) 1069 + #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) 1070 + #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) 1071 + 1064 1072 /* Wrappers for arch boot code to manipulate normally-constant masks */ 1065 - static inline void set_cpu_possible(unsigned int cpu, bool possible) 1066 - { 1067 - if (possible) 1068 - cpumask_set_cpu(cpu, &cpu_possible_map); 1069 - else 1070 - cpumask_clear_cpu(cpu, &cpu_possible_map); 1071 - } 1072 - 1073 - static inline void set_cpu_present(unsigned int cpu, bool present) 1074 - { 1075 - if (present) 1076 - cpumask_set_cpu(cpu, &cpu_present_map); 1077 - else 1078 - cpumask_clear_cpu(cpu, &cpu_present_map); 1079 - } 1080 - 1081 - static inline void set_cpu_online(unsigned int cpu, bool online) 1082 - { 1083 - if (online) 1084 - cpumask_set_cpu(cpu, &cpu_online_map); 1085 - else 1086 - cpumask_clear_cpu(cpu, &cpu_online_map); 1087 - } 1088 - 1089 - static inline void set_cpu_active(unsigned int cpu, bool active) 1090 - { 1091 - if (active) 1092 - cpumask_set_cpu(cpu, &cpu_active_map); 1093 - else 1094 - cpumask_clear_cpu(cpu, &cpu_active_map); 1095 - } 1096 - 1097 - static inline void init_cpu_present(const struct cpumask *src) 1098 - { 1099 - cpumask_copy(&cpu_present_map, src); 1100 - } 1101 - 1102 - static inline void init_cpu_possible(const struct cpumask *src) 1103 - { 1104 - cpumask_copy(&cpu_possible_map, src); 1105 - } 1106 - 1107 - static inline void init_cpu_online(const struct cpumask *src) 1108 - { 1109 - cpumask_copy(&cpu_online_map, src); 1110 - } 1073 + void set_cpu_possible(unsigned int cpu, bool possible); 1074 + void set_cpu_present(unsigned int cpu, bool present); 1075 + void set_cpu_online(unsigned int cpu, bool online); 1076 + void set_cpu_active(unsigned int cpu, bool active); 1077 + void init_cpu_present(const struct cpumask *src); 1078 + void init_cpu_possible(const struct cpumask *src); 1079 + void init_cpu_online(const struct cpumask *src); 1111 1080 #endif /* __LINUX_CPUMASK_H */
+1 -1
include/linux/interrupt.h
··· 109 109 110 110 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 111 111 112 - extern cpumask_t irq_default_affinity; 112 + extern cpumask_var_t irq_default_affinity; 113 113 114 114 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); 115 115 extern int irq_can_set_affinity(unsigned int irq);
+2 -2
include/linux/rcuclassic.h
··· 59 59 int signaled; 60 60 61 61 spinlock_t lock ____cacheline_internodealigned_in_smp; 62 - cpumask_t cpumask; /* CPUs that need to switch in order */ 63 - /* for current batch to proceed. */ 62 + DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */ 63 + /* current batch to proceed. */ 64 64 } ____cacheline_internodealigned_in_smp; 65 65 66 66 /* Is batch a before batch b ? */
+4 -3
include/linux/seq_file.h
··· 50 50 int seq_dentry(struct seq_file *, struct dentry *, char *); 51 51 int seq_path_root(struct seq_file *m, struct path *path, struct path *root, 52 52 char *esc); 53 - int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); 54 - static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) 53 + int seq_bitmap(struct seq_file *m, const unsigned long *bits, 54 + unsigned int nr_bits); 55 + static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) 55 56 { 56 - return seq_bitmap(m, mask->bits, NR_CPUS); 57 + return seq_bitmap(m, mask->bits, nr_cpu_ids); 57 58 } 58 59 59 60 static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
+11 -7
include/linux/smp.h
··· 21 21 u16 priv; 22 22 }; 23 23 24 + /* total number of cpus in this system (may exceed NR_CPUS) */ 25 + extern unsigned int total_cpus; 26 + 24 27 #ifdef CONFIG_SMP 25 28 26 29 #include <linux/preempt.h> ··· 67 64 * Call a function on all other processors 68 65 */ 69 66 int smp_call_function(void(*func)(void *info), void *info, int wait); 70 - /* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */ 71 - int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 72 - int wait); 67 + void smp_call_function_many(const struct cpumask *mask, 68 + void (*func)(void *info), void *info, bool wait); 73 69 74 - static inline void smp_call_function_many(const struct cpumask *mask, 75 - void (*func)(void *info), void *info, 76 - int wait) 70 + /* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */ 71 + static inline int 72 + smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 73 + int wait) 77 74 { 78 - smp_call_function_mask(*mask, func, info, wait); 75 + smp_call_function_many(&mask, func, info, wait); 76 + return 0; 79 77 } 80 78 81 79 int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
+3 -3
include/linux/stop_machine.h
··· 23 23 * 24 24 * This can be thought of as a very heavy write lock, equivalent to 25 25 * grabbing every spinlock in the kernel. */ 26 - int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); 26 + int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 27 27 28 28 /** 29 29 * __stop_machine: freeze the machine on all CPUs and run this function ··· 34 34 * Description: This is a special version of the above, which assumes cpus 35 35 * won't come or go while it's being called. Used by hotplug cpu. 36 36 */ 37 - int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); 37 + int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 38 38 #else 39 39 40 40 static inline int stop_machine(int (*fn)(void *), void *data, 41 - const cpumask_t *cpus) 41 + const struct cpumask *cpus) 42 42 { 43 43 int ret; 44 44 local_irq_disable();
+8 -8
include/linux/threads.h
··· 8 8 */ 9 9 10 10 /* 11 - * Maximum supported processors that can run under SMP. This value is 12 - * set via configure setting. The maximum is equal to the size of the 13 - * bitmasks used on that platform, i.e. 32 or 64. Setting this smaller 14 - * saves quite a bit of memory. 11 + * Maximum supported processors. Setting this smaller saves quite a 12 + * bit of memory. Use nr_cpu_ids instead of this except for static bitmaps. 15 13 */ 16 - #ifdef CONFIG_SMP 17 - #define NR_CPUS CONFIG_NR_CPUS 18 - #else 19 - #define NR_CPUS 1 14 + #ifndef CONFIG_NR_CPUS 15 + /* FIXME: This should be fixed in the arch's Kconfig */ 16 + #define CONFIG_NR_CPUS 1 20 17 #endif 18 + 19 + /* Places which use this should consider cpumask_var_t. */ 20 + #define NR_CPUS CONFIG_NR_CPUS 21 21 22 22 #define MIN_THREADS_LEFT_FOR_ROOT 4 23 23
+2 -2
include/linux/tick.h
··· 84 84 85 85 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 86 86 extern struct tick_device *tick_get_broadcast_device(void); 87 - extern cpumask_t *tick_get_broadcast_mask(void); 87 + extern struct cpumask *tick_get_broadcast_mask(void); 88 88 89 89 # ifdef CONFIG_TICK_ONESHOT 90 - extern cpumask_t *tick_get_broadcast_oneshot_mask(void); 90 + extern struct cpumask *tick_get_broadcast_oneshot_mask(void); 91 91 # endif 92 92 93 93 # endif /* BROADCAST */
+4 -9
init/main.c
··· 371 371 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 372 372 static void __init setup_nr_cpu_ids(void) 373 373 { 374 - int cpu, highest_cpu = 0; 375 - 376 - for_each_possible_cpu(cpu) 377 - highest_cpu = cpu; 378 - 379 - nr_cpu_ids = highest_cpu + 1; 374 + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 380 375 } 381 376 382 377 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA ··· 513 518 { 514 519 int cpu = smp_processor_id(); 515 520 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ 516 - cpu_set(cpu, cpu_online_map); 517 - cpu_set(cpu, cpu_present_map); 518 - cpu_set(cpu, cpu_possible_map); 521 + set_cpu_online(cpu, true); 522 + set_cpu_present(cpu, true); 523 + set_cpu_possible(cpu, true); 519 524 } 520 525 521 526 void __init __weak smp_setup_processor_id(void)
+31 -20
kernel/compat.c
··· 454 454 } 455 455 456 456 static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, 457 - unsigned len, cpumask_t *new_mask) 457 + unsigned len, struct cpumask *new_mask) 458 458 { 459 459 unsigned long *k; 460 460 461 - if (len < sizeof(cpumask_t)) 462 - memset(new_mask, 0, sizeof(cpumask_t)); 463 - else if (len > sizeof(cpumask_t)) 464 - len = sizeof(cpumask_t); 461 + if (len < cpumask_size()) 462 + memset(new_mask, 0, cpumask_size()); 463 + else if (len > cpumask_size()) 464 + len = cpumask_size(); 465 465 466 - k = cpus_addr(*new_mask); 466 + k = cpumask_bits(new_mask); 467 467 return compat_get_bitmap(k, user_mask_ptr, len * 8); 468 468 } 469 469 ··· 471 471 unsigned int len, 472 472 compat_ulong_t __user *user_mask_ptr) 473 473 { 474 - cpumask_t new_mask; 474 + cpumask_var_t new_mask; 475 475 int retval; 476 476 477 - retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask); 478 - if (retval) 479 - return retval; 477 + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 478 + return -ENOMEM; 480 479 481 - return sched_setaffinity(pid, &new_mask); 480 + retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask); 481 + if (retval) 482 + goto out; 483 + 484 + retval = sched_setaffinity(pid, new_mask); 485 + out: 486 + free_cpumask_var(new_mask); 487 + return retval; 482 488 } 483 489 484 490 asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, 485 491 compat_ulong_t __user *user_mask_ptr) 486 492 { 487 493 int ret; 488 - cpumask_t mask; 494 + cpumask_var_t mask; 489 495 unsigned long *k; 490 - unsigned int min_length = sizeof(cpumask_t); 496 + unsigned int min_length = cpumask_size(); 491 497 492 - if (NR_CPUS <= BITS_PER_COMPAT_LONG) 498 + if (nr_cpu_ids <= BITS_PER_COMPAT_LONG) 493 499 min_length = sizeof(compat_ulong_t); 494 500 495 501 if (len < min_length) 496 502 return -EINVAL; 497 503 498 - ret = sched_getaffinity(pid, &mask); 504 + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 505 + return -ENOMEM; 506 + 507 + ret = sched_getaffinity(pid, mask); 499 508 if (ret < 0) 500 - return ret; 509 + goto out; 501 510 502 - k = cpus_addr(mask); 511 + k = cpumask_bits(mask); 503 512 ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); 504 - if (ret) 505 - return ret; 513 + if (ret == 0) 514 + ret = min_length; 506 515 507 - return min_length; 516 + out: 517 + free_cpumask_var(mask); 518 + return ret; 508 519 } 509 520 510 521 int get_compat_itimerspec(struct itimerspec *dst,
+99 -45
kernel/cpu.c
··· 15 15 #include <linux/stop_machine.h> 16 16 #include <linux/mutex.h> 17 17 18 - /* 19 - * Represents all cpu's present in the system 20 - * In systems capable of hotplug, this map could dynamically grow 21 - * as new cpu's are detected in the system via any platform specific 22 - * method, such as ACPI for e.g. 23 - */ 24 - cpumask_t cpu_present_map __read_mostly; 25 - EXPORT_SYMBOL(cpu_present_map); 26 - 27 - /* 28 - * Represents all cpu's that are currently online. 29 - */ 30 - cpumask_t cpu_online_map __read_mostly; 31 - EXPORT_SYMBOL(cpu_online_map); 32 - 33 - #ifdef CONFIG_INIT_ALL_POSSIBLE 34 - cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; 35 - #else 36 - cpumask_t cpu_possible_map __read_mostly; 37 - #endif 38 - EXPORT_SYMBOL(cpu_possible_map); 39 - 40 18 #ifdef CONFIG_SMP 41 - /* Serializes the updates to cpu_online_map, cpu_present_map */ 19 + /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 42 20 static DEFINE_MUTEX(cpu_add_remove_lock); 43 21 44 22 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); ··· 42 64 mutex_init(&cpu_hotplug.lock); 43 65 cpu_hotplug.refcount = 0; 44 66 } 45 - 46 - cpumask_t cpu_active_map; 47 67 48 68 #ifdef CONFIG_HOTPLUG_CPU 49 69 ··· 73 97 74 98 /* 75 99 * The following two API's must be used when attempting 76 - * to serialize the updates to cpu_online_map, cpu_present_map. 100 + * to serialize the updates to cpu_online_mask, cpu_present_mask. 77 101 */ 78 102 void cpu_maps_update_begin(void) 79 103 { ··· 194 218 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 195 219 { 196 220 int err, nr_calls = 0; 197 - cpumask_t old_allowed, tmp; 221 + cpumask_var_t old_allowed; 198 222 void *hcpu = (void *)(long)cpu; 199 223 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 200 224 struct take_cpu_down_param tcd_param = { ··· 207 231 208 232 if (!cpu_online(cpu)) 209 233 return -EINVAL; 234 + 235 + if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) 236 + return -ENOMEM; 210 237 211 238 cpu_hotplug_begin(); 212 239 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, ··· 225 246 } 226 247 227 248 /* Ensure that we are not runnable on dying cpu */ 228 - old_allowed = current->cpus_allowed; 229 - cpus_setall(tmp); 230 - cpu_clear(cpu, tmp); 231 - set_cpus_allowed_ptr(current, &tmp); 232 - tmp = cpumask_of_cpu(cpu); 249 + cpumask_copy(old_allowed, &current->cpus_allowed); 250 + set_cpus_allowed_ptr(current, 251 + cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); 233 252 234 - err = __stop_machine(take_cpu_down, &tcd_param, &tmp); 253 + err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 235 254 if (err) { 236 255 /* CPU didn't die: tell everyone. Can't complain. */ 237 256 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, ··· 255 278 check_for_tasks(cpu); 256 279 257 280 out_allowed: 258 - set_cpus_allowed_ptr(current, &old_allowed); 281 + set_cpus_allowed_ptr(current, old_allowed); 259 282 out_release: 260 283 cpu_hotplug_done(); 261 284 if (!err) { ··· 263 286 hcpu) == NOTIFY_BAD) 264 287 BUG(); 265 288 } 289 + free_cpumask_var(old_allowed); 266 290 return err; 267 291 } 268 292 ··· 282 304 283 305 /* 284 306 * Make sure the all cpus did the reschedule and are not 285 - * using stale version of the cpu_active_map. 307 + * using stale version of the cpu_active_mask. 286 308 * This is not strictly necessary becuase stop_machine() 287 309 * that we run down the line already provides the required 288 310 * synchronization. But it's really a side effect and we do not ··· 346 368 int __cpuinit cpu_up(unsigned int cpu) 347 369 { 348 370 int err = 0; 349 - if (!cpu_isset(cpu, cpu_possible_map)) { 371 + if (!cpu_possible(cpu)) { 350 372 printk(KERN_ERR "can't online cpu %d because it is not " 351 373 "configured as may-hotadd at boot time\n", cpu); 352 374 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) ··· 371 393 } 372 394 373 395 #ifdef CONFIG_PM_SLEEP_SMP 374 - static cpumask_t frozen_cpus; 396 + static cpumask_var_t frozen_cpus; 375 397 376 398 int disable_nonboot_cpus(void) 377 399 { 378 400 int cpu, first_cpu, error = 0; 379 401 380 402 cpu_maps_update_begin(); 381 - first_cpu = first_cpu(cpu_online_map); 403 + first_cpu = cpumask_first(cpu_online_mask); 382 404 /* We take down all of the non-boot CPUs in one shot to avoid races 383 405 * with the userspace trying to use the CPU hotplug at the same time 384 406 */ 385 - cpus_clear(frozen_cpus); 407 + cpumask_clear(frozen_cpus); 386 408 printk("Disabling non-boot CPUs ...\n"); 387 409 for_each_online_cpu(cpu) { 388 410 if (cpu == first_cpu) 389 411 continue; 390 412 error = _cpu_down(cpu, 1); 391 413 if (!error) { 392 - cpu_set(cpu, frozen_cpus); 414 + cpumask_set_cpu(cpu, frozen_cpus); 393 415 printk("CPU%d is down\n", cpu); 394 416 } else { 395 417 printk(KERN_ERR "Error taking CPU%d down: %d\n", ··· 415 437 /* Allow everyone to use the CPU hotplug again */ 416 438 cpu_maps_update_begin(); 417 439 cpu_hotplug_disabled = 0; 418 - if (cpus_empty(frozen_cpus)) 440 + if (cpumask_empty(frozen_cpus)) 419 441 goto out; 420 442 421 443 printk("Enabling non-boot CPUs ...\n"); 422 - for_each_cpu_mask_nr(cpu, frozen_cpus) { 444 + for_each_cpu(cpu, frozen_cpus) { 423 445 error = _cpu_up(cpu, 1); 424 446 if (!error) { 425 447 printk("CPU%d is up\n", cpu); ··· 427 449 } 428 450 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 429 451 } 430 - cpus_clear(frozen_cpus); 452 + cpumask_clear(frozen_cpus); 431 453 out: 432 454 cpu_maps_update_done(); 433 455 } 456 + 457 + static int alloc_frozen_cpus(void) 458 + { 459 + if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 460 + return -ENOMEM; 461 + return 0; 462 + } 463 + core_initcall(alloc_frozen_cpus); 434 464 #endif /* CONFIG_PM_SLEEP_SMP */ 435 465 436 466 /** ··· 454 468 unsigned long val = CPU_STARTING; 455 469 456 470 #ifdef CONFIG_PM_SLEEP_SMP 457 - if (cpu_isset(cpu, frozen_cpus)) 471 + if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 458 472 val = CPU_STARTING_FROZEN; 459 473 #endif /* CONFIG_PM_SLEEP_SMP */ 460 474 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); ··· 466 480 * cpu_bit_bitmap[] is a special, "compressed" data structure that 467 481 * represents all NR_CPUS bits binary values of 1<<nr. 468 482 * 469 - * It is used by cpumask_of_cpu() to get a constant address to a CPU 483 + * It is used by cpumask_of() to get a constant address to a CPU 470 484 * mask value that has a single bit set only. 471 485 */ 472 486 ··· 489 503 490 504 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 491 505 EXPORT_SYMBOL(cpu_all_bits); 506 + 507 + #ifdef CONFIG_INIT_ALL_POSSIBLE 508 + static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 509 + = CPU_BITS_ALL; 510 + #else 511 + static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 512 + #endif 513 + const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 514 + EXPORT_SYMBOL(cpu_possible_mask); 515 + 516 + static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 517 + const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 518 + EXPORT_SYMBOL(cpu_online_mask); 519 + 520 + static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 521 + const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 522 + EXPORT_SYMBOL(cpu_present_mask); 523 + 524 + static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 525 + const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 526 + EXPORT_SYMBOL(cpu_active_mask); 527 + 528 + void set_cpu_possible(unsigned int cpu, bool possible) 529 + { 530 + if (possible) 531 + cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 532 + else 533 + cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 534 + } 535 + 536 + void set_cpu_present(unsigned int cpu, bool present) 537 + { 538 + if (present) 539 + cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 540 + else 541 + cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 542 + } 543 + 544 + void set_cpu_online(unsigned int cpu, bool online) 545 + { 546 + if (online) 547 + cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 548 + else 549 + cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 550 + } 551 + 552 + void set_cpu_active(unsigned int cpu, bool active) 553 + { 554 + if (active) 555 + cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 556 + else 557 + cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 558 + } 559 + 560 + void init_cpu_present(const struct cpumask *src) 561 + { 562 + cpumask_copy(to_cpumask(cpu_present_bits), src); 563 + } 564 + 565 + void init_cpu_possible(const struct cpumask *src) 566 + { 567 + cpumask_copy(to_cpumask(cpu_possible_bits), src); 568 + } 569 + 570 + void init_cpu_online(const struct cpumask *src) 571 + { 572 + cpumask_copy(to_cpumask(cpu_online_bits), src); 573 + }
+9 -2
kernel/irq/manage.c
··· 16 16 #include "internals.h" 17 17 18 18 #ifdef CONFIG_SMP 19 + cpumask_var_t irq_default_affinity; 19 20 20 - cpumask_t irq_default_affinity = CPU_MASK_ALL; 21 + static int init_irq_default_affinity(void) 22 + { 23 + alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); 24 + cpumask_setall(irq_default_affinity); 25 + return 0; 26 + } 27 + core_initcall(init_irq_default_affinity); 21 28 22 29 /** 23 30 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) ··· 134 127 desc->status &= ~IRQ_AFFINITY_SET; 135 128 } 136 129 137 - cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); 130 + cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); 138 131 set_affinity: 139 132 desc->chip->set_affinity(irq, &desc->affinity); 140 133
+23 -13
kernel/irq/proc.c
··· 20 20 static int irq_affinity_proc_show(struct seq_file *m, void *v) 21 21 { 22 22 struct irq_desc *desc = irq_to_desc((long)m->private); 23 - cpumask_t *mask = &desc->affinity; 23 + const struct cpumask *mask = &desc->affinity; 24 24 25 25 #ifdef CONFIG_GENERIC_PENDING_IRQ 26 26 if (desc->status & IRQ_MOVE_PENDING) ··· 54 54 if (err) 55 55 goto free_cpumask; 56 56 57 - if (!is_affinity_mask_valid(*new_value)) { 57 + if (!is_affinity_mask_valid(new_value)) { 58 58 err = -EINVAL; 59 59 goto free_cpumask; 60 60 } ··· 93 93 94 94 static int default_affinity_show(struct seq_file *m, void *v) 95 95 { 96 - seq_cpumask(m, &irq_default_affinity); 96 + seq_cpumask(m, irq_default_affinity); 97 97 seq_putc(m, '\n'); 98 98 return 0; 99 99 } ··· 101 101 static ssize_t default_affinity_write(struct file *file, 102 102 const char __user *buffer, size_t count, loff_t *ppos) 103 103 { 104 - cpumask_t new_value; 104 + cpumask_var_t new_value; 105 105 int err; 106 106 107 - err = cpumask_parse_user(buffer, count, &new_value); 108 - if (err) 109 - return err; 107 + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 108 + return -ENOMEM; 110 109 111 - if (!is_affinity_mask_valid(new_value)) 112 - return -EINVAL; 110 + err = cpumask_parse_user(buffer, count, new_value); 111 + if (err) 112 + goto out; 113 + 114 + if (!is_affinity_mask_valid(new_value)) { 115 + err = -EINVAL; 116 + goto out; 117 + } 113 118 114 119 /* 115 120 * Do not allow disabling IRQs completely - it's a too easy 116 121 * way to make the system unusable accidentally :-) At least 117 122 * one online CPU still has to be targeted. 118 123 */ 119 - if (!cpus_intersects(new_value, cpu_online_map)) 120 - return -EINVAL; 124 + if (!cpumask_intersects(new_value, cpu_online_mask)) { 125 + err = -EINVAL; 126 + goto out; 127 + } 121 128 122 - irq_default_affinity = new_value; 129 + cpumask_copy(irq_default_affinity, new_value); 130 + err = count; 123 131 124 - return count; 132 + out: 133 + free_cpumask_var(new_value); 134 + return err; 125 135 } 126 136 127 137 static int default_affinity_open(struct inode *inode, struct file *file)
+1 -1
kernel/kexec.c
··· 1116 1116 struct elf_prstatus prstatus; 1117 1117 u32 *buf; 1118 1118 1119 - if ((cpu < 0) || (cpu >= NR_CPUS)) 1119 + if ((cpu < 0) || (cpu >= nr_cpu_ids)) 1120 1120 return; 1121 1121 1122 1122 /* Using ELF notes here is opportunistic.
+1 -1
kernel/power/poweroff.c
··· 27 27 static void handle_poweroff(int key, struct tty_struct *tty) 28 28 { 29 29 /* run sysrq poweroff on boot cpu */ 30 - schedule_work_on(first_cpu(cpu_online_map), &poweroff_work); 30 + schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); 31 31 } 32 32 33 33 static struct sysrq_key_op sysrq_poweroff_op = {
+25 -13
kernel/profile.c
··· 45 45 int prof_on __read_mostly; 46 46 EXPORT_SYMBOL_GPL(prof_on); 47 47 48 - static cpumask_t prof_cpu_mask = CPU_MASK_ALL; 48 + static cpumask_var_t prof_cpu_mask; 49 49 #ifdef CONFIG_SMP 50 50 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 51 51 static DEFINE_PER_CPU(int, cpu_profile_flip); ··· 113 113 buffer_bytes = prof_len*sizeof(atomic_t); 114 114 if (!slab_is_available()) { 115 115 prof_buffer = alloc_bootmem(buffer_bytes); 116 + alloc_bootmem_cpumask_var(&prof_cpu_mask); 116 117 return 0; 117 118 } 119 + 120 + if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) 121 + return -ENOMEM; 118 122 119 123 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); 120 124 if (prof_buffer) ··· 132 128 if (prof_buffer) 133 129 return 0; 134 130 131 + free_cpumask_var(prof_cpu_mask); 135 132 return -ENOMEM; 136 133 } 137 134 ··· 391 386 return NOTIFY_BAD; 392 387 case CPU_ONLINE: 393 388 case CPU_ONLINE_FROZEN: 394 - cpu_set(cpu, prof_cpu_mask); 389 + if (prof_cpu_mask != NULL) 390 + cpumask_set_cpu(cpu, prof_cpu_mask); 395 391 break; 396 392 case CPU_UP_CANCELED: 397 393 case CPU_UP_CANCELED_FROZEN: 398 394 case CPU_DEAD: 399 395 case CPU_DEAD_FROZEN: 400 - cpu_clear(cpu, prof_cpu_mask); 396 + if (prof_cpu_mask != NULL) 397 + cpumask_clear_cpu(cpu, prof_cpu_mask); 401 398 if (per_cpu(cpu_profile_hits, cpu)[0]) { 402 399 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); 403 400 per_cpu(cpu_profile_hits, cpu)[0] = NULL; ··· 437 430 438 431 if (type == CPU_PROFILING && timer_hook) 439 432 timer_hook(regs); 440 - if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) 433 + if (!user_mode(regs) && prof_cpu_mask != NULL && 434 + cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) 441 435 profile_hit(type, (void *)profile_pc(regs)); 442 436 } 443 437 ··· 450 442 static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, 451 443 int count, int *eof, void *data) 452 444 { 453 - int len = cpumask_scnprintf(page, count, (cpumask_t *)data); 445 + int len = cpumask_scnprintf(page, count, data); 454 446 if (count - len < 2) 455 447 return -EINVAL; 456 448 len += sprintf(page + len, "\n"); ··· 460 452 static int prof_cpu_mask_write_proc(struct file *file, 461 453 const char __user *buffer, unsigned long count, void *data) 462 454 { 463 - cpumask_t *mask = (cpumask_t *)data; 455 + struct cpumask *mask = data; 464 456 unsigned long full_count = count, err; 465 - cpumask_t new_value; 457 + cpumask_var_t new_value; 466 458 467 - err = cpumask_parse_user(buffer, count, &new_value); 468 - if (err) 469 - return err; 459 + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 460 + return -ENOMEM; 470 461 471 - *mask = new_value; 472 - return full_count; 462 + err = cpumask_parse_user(buffer, count, new_value); 463 + if (!err) { 464 + cpumask_copy(mask, new_value); 465 + err = full_count; 466 + } 467 + free_cpumask_var(new_value); 468 + return err; 473 469 } 474 470 475 471 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) ··· 484 472 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); 485 473 if (!entry) 486 474 return; 487 - entry->data = (void *)&prof_cpu_mask; 475 + entry->data = prof_cpu_mask; 488 476 entry->read_proc = prof_cpu_mask_read_proc; 489 477 entry->write_proc = prof_cpu_mask_write_proc; 490 478 }
+17 -15
kernel/rcuclassic.c
··· 63 63 .completed = -300, 64 64 .pending = -300, 65 65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 66 - .cpumask = CPU_MASK_NONE, 66 + .cpumask = CPU_BITS_NONE, 67 67 }; 68 68 static struct rcu_ctrlblk rcu_bh_ctrlblk = { 69 69 .cur = -300, 70 70 .completed = -300, 71 71 .pending = -300, 72 72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), 73 - .cpumask = CPU_MASK_NONE, 73 + .cpumask = CPU_BITS_NONE, 74 74 }; 75 75 76 76 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; ··· 85 85 struct rcu_ctrlblk *rcp) 86 86 { 87 87 int cpu; 88 - cpumask_t cpumask; 89 88 unsigned long flags; 90 89 91 90 set_need_resched(); ··· 95 96 * Don't send IPI to itself. With irqs disabled, 96 97 * rdp->cpu is the current cpu. 97 98 * 98 - * cpu_online_map is updated by the _cpu_down() 99 + * cpu_online_mask is updated by the _cpu_down() 99 100 * using __stop_machine(). Since we're in irqs disabled 100 101 * section, __stop_machine() is not exectuting, hence 101 - * the cpu_online_map is stable. 102 + * the cpu_online_mask is stable. 102 103 * 103 104 * However, a cpu might have been offlined _just_ before 104 105 * we disabled irqs while entering here. ··· 106 107 * notification, leading to the offlined cpu's bit 107 108 * being set in the rcp->cpumask. 108 109 * 109 - * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent 110 + * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent 110 111 * sending smp_reschedule() to an offlined CPU. 111 112 */ 112 - cpus_and(cpumask, rcp->cpumask, cpu_online_map); 113 - cpu_clear(rdp->cpu, cpumask); 114 - for_each_cpu_mask_nr(cpu, cpumask) 115 - smp_send_reschedule(cpu); 113 + for_each_cpu_and(cpu, 114 + to_cpumask(rcp->cpumask), cpu_online_mask) { 115 + if (cpu != rdp->cpu) 116 + smp_send_reschedule(cpu); 117 + } 116 118 } 117 119 spin_unlock_irqrestore(&rcp->lock, flags); 118 120 } ··· 193 193 194 194 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 195 195 for_each_possible_cpu(cpu) { 196 - if (cpu_isset(cpu, rcp->cpumask)) 196 + if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask))) 197 197 printk(" %d", cpu); 198 198 } 199 199 printk(" (detected by %d, t=%ld jiffies)\n", ··· 221 221 long delta; 222 222 223 223 delta = jiffies - rcp->jiffies_stall; 224 - if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) { 224 + if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) && 225 + delta >= 0) { 225 226 226 227 /* We haven't checked in, so go dump stack. */ 227 228 print_cpu_stall(rcp); ··· 394 393 * unnecessarily. 395 394 */ 396 395 smp_mb(); 397 - cpumask_andnot(&rcp->cpumask, cpu_online_mask, nohz_cpu_mask); 396 + cpumask_andnot(to_cpumask(rcp->cpumask), 397 + cpu_online_mask, nohz_cpu_mask); 398 398 399 399 rcp->signaled = 0; 400 400 } ··· 408 406 */ 409 407 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) 410 408 { 411 - cpu_clear(cpu, rcp->cpumask); 412 - if (cpus_empty(rcp->cpumask)) { 409 + cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask)); 410 + if (cpumask_empty(to_cpumask(rcp->cpumask))) { 413 411 /* batch completed ! */ 414 412 rcp->completed = rcp->cur; 415 413 rcu_start_batch(rcp);
+10 -9
kernel/rcupreempt.c
··· 164 164 { "idle", "waitack", "waitzero", "waitmb" }; 165 165 #endif /* #ifdef CONFIG_RCU_TRACE */ 166 166 167 - static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE; 167 + static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly 168 + = CPU_BITS_NONE; 168 169 169 170 /* 170 171 * Enum and per-CPU flag to determine when each CPU has seen ··· 759 758 760 759 /* Now ask each CPU for acknowledgement of the flip. */ 761 760 762 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { 761 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { 763 762 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 764 763 dyntick_save_progress_counter(cpu); 765 764 } ··· 777 776 int cpu; 778 777 779 778 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 780 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 779 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) 781 780 if (rcu_try_flip_waitack_needed(cpu) && 782 781 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 783 782 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); ··· 809 808 /* Check to see if the sum of the "last" counters is zero. */ 810 809 811 810 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 812 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 811 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) 813 812 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 814 813 if (sum != 0) { 815 814 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); ··· 824 823 smp_mb(); /* ^^^^^^^^^^^^ */ 825 824 826 825 /* Call for a memory barrier from each CPU. */ 827 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { 826 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { 828 827 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 829 828 dyntick_save_progress_counter(cpu); 830 829 } ··· 844 843 int cpu; 845 844 846 845 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 847 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 846 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) 848 847 if (rcu_try_flip_waitmb_needed(cpu) && 849 848 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 850 849 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); ··· 1033 1032 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; 1034 1033 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; 1035 1034 1036 - cpu_clear(cpu, rcu_cpu_online_map); 1035 + cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map)); 1037 1036 1038 1037 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1039 1038 ··· 1073 1072 struct rcu_data *rdp; 1074 1073 1075 1074 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); 1076 - cpu_set(cpu, rcu_cpu_online_map); 1075 + cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map)); 1077 1076 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1078 1077 1079 1078 /* ··· 1431 1430 * We don't need protection against CPU-Hotplug here 1432 1431 * since 1433 1432 * a) If a CPU comes online while we are iterating over the 1434 - * cpu_online_map below, we would only end up making a 1433 + * cpu_online_mask below, we would only end up making a 1435 1434 * duplicate call to rcu_online_cpu() which sets the corresponding 1436 1435 * CPU's mask in the rcu_cpu_online_map. 1437 1436 *
+15 -12
kernel/rcutorture.c
··· 868 868 */ 869 869 static void rcu_torture_shuffle_tasks(void) 870 870 { 871 - cpumask_t tmp_mask; 871 + cpumask_var_t tmp_mask; 872 872 int i; 873 873 874 - cpus_setall(tmp_mask); 874 + if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) 875 + BUG(); 876 + 877 + cpumask_setall(tmp_mask); 875 878 get_online_cpus(); 876 879 877 880 /* No point in shuffling if there is only one online CPU (ex: UP) */ 878 - if (num_online_cpus() == 1) { 879 - put_online_cpus(); 880 - return; 881 - } 881 + if (num_online_cpus() == 1) 882 + goto out; 882 883 883 884 if (rcu_idle_cpu != -1) 884 - cpu_clear(rcu_idle_cpu, tmp_mask); 885 + cpumask_clear_cpu(rcu_idle_cpu, tmp_mask); 885 886 886 - set_cpus_allowed_ptr(current, &tmp_mask); 887 + set_cpus_allowed_ptr(current, tmp_mask); 887 888 888 889 if (reader_tasks) { 889 890 for (i = 0; i < nrealreaders; i++) 890 891 if (reader_tasks[i]) 891 892 set_cpus_allowed_ptr(reader_tasks[i], 892 - &tmp_mask); 893 + tmp_mask); 893 894 } 894 895 895 896 if (fakewriter_tasks) { 896 897 for (i = 0; i < nfakewriters; i++) 897 898 if (fakewriter_tasks[i]) 898 899 set_cpus_allowed_ptr(fakewriter_tasks[i], 899 - &tmp_mask); 900 + tmp_mask); 900 901 } 901 902 902 903 if (writer_task) 903 - set_cpus_allowed_ptr(writer_task, &tmp_mask); 904 + set_cpus_allowed_ptr(writer_task, tmp_mask); 904 905 905 906 if (stats_task) 906 - set_cpus_allowed_ptr(stats_task, &tmp_mask); 907 + set_cpus_allowed_ptr(stats_task, tmp_mask); 907 908 908 909 if (rcu_idle_cpu == -1) 909 910 rcu_idle_cpu = num_online_cpus() - 1; 910 911 else 911 912 rcu_idle_cpu--; 912 913 914 + out: 913 915 put_online_cpus(); 916 + free_cpumask_var(tmp_mask); 914 917 } 915 918 916 919 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
+15 -38
kernel/sched.c
··· 3715 3715 * don't kick the migration_thread, if the curr 3716 3716 * task on busiest cpu can't be moved to this_cpu 3717 3717 */ 3718 - if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { 3718 + if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { 3719 3719 double_unlock_balance(this_rq, busiest); 3720 3720 all_pinned = 1; 3721 3721 return ld_moved; ··· 6257 6257 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 6258 6258 { 6259 6259 int dest_cpu; 6260 - /* FIXME: Use cpumask_of_node here. */ 6261 - cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu)); 6262 - const struct cpumask *nodemask = &_nodemask; 6260 + const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); 6263 6261 6264 6262 again: 6265 6263 /* Look for allowed, online CPU in same node. */ ··· 7168 7170 static void sched_domain_node_span(int node, struct cpumask *span) 7169 7171 { 7170 7172 nodemask_t used_nodes; 7171 - /* FIXME: use cpumask_of_node() */ 7172 - node_to_cpumask_ptr(nodemask, node); 7173 7173 int i; 7174 7174 7175 - cpus_clear(*span); 7175 + cpumask_clear(span); 7176 7176 nodes_clear(used_nodes); 7177 7177 7178 - cpus_or(*span, *span, *nodemask); 7178 + cpumask_or(span, span, cpumask_of_node(node)); 7179 7179 node_set(node, used_nodes); 7180 7180 7181 7181 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { 7182 7182 int next_node = find_next_best_node(node, &used_nodes); 7183 7183 7184 - node_to_cpumask_ptr_next(nodemask, next_node); 7185 - cpus_or(*span, *span, *nodemask); 7184 + cpumask_or(span, span, cpumask_of_node(next_node)); 7186 7185 } 7187 7186 } 7188 7187 #endif /* CONFIG_NUMA */ ··· 7259 7264 { 7260 7265 int group; 7261 7266 #ifdef CONFIG_SCHED_MC 7262 - /* FIXME: Use cpu_coregroup_mask. */ 7263 - *mask = cpu_coregroup_map(cpu); 7264 - cpus_and(*mask, *mask, *cpu_map); 7267 + cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); 7265 7268 group = cpumask_first(mask); 7266 7269 #elif defined(CONFIG_SCHED_SMT) 7267 7270 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); ··· 7289 7296 struct cpumask *nodemask) 7290 7297 { 7291 7298 int group; 7292 - /* FIXME: use cpumask_of_node */ 7293 - node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); 7294 7299 7295 - cpumask_and(nodemask, pnodemask, cpu_map); 7300 + cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); 7296 7301 group = cpumask_first(nodemask); 7297 7302 7298 7303 if (sg) ··· 7341 7350 7342 7351 for (i = 0; i < nr_node_ids; i++) { 7343 7352 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 7344 - /* FIXME: Use cpumask_of_node */ 7345 - node_to_cpumask_ptr(pnodemask, i); 7346 7353 7347 - cpus_and(*nodemask, *pnodemask, *cpu_map); 7354 + cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 7348 7355 if (cpumask_empty(nodemask)) 7349 7356 continue; 7350 7357 ··· 7551 7562 for_each_cpu(i, cpu_map) { 7552 7563 struct sched_domain *sd = NULL, *p; 7553 7564 7554 - /* FIXME: use cpumask_of_node */ 7555 - *nodemask = node_to_cpumask(cpu_to_node(i)); 7556 - cpus_and(*nodemask, *nodemask, *cpu_map); 7565 + cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); 7557 7566 7558 7567 #ifdef CONFIG_NUMA 7559 7568 if (cpumask_weight(cpu_map) > ··· 7592 7605 sd = &per_cpu(core_domains, i).sd; 7593 7606 SD_INIT(sd, MC); 7594 7607 set_domain_attribute(sd, attr); 7595 - *sched_domain_span(sd) = cpu_coregroup_map(i); 7596 - cpumask_and(sched_domain_span(sd), 7597 - sched_domain_span(sd), cpu_map); 7608 + cpumask_and(sched_domain_span(sd), cpu_map, 7609 + cpu_coregroup_mask(i)); 7598 7610 sd->parent = p; 7599 7611 p->child = sd; 7600 7612 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); ··· 7629 7643 #ifdef CONFIG_SCHED_MC 7630 7644 /* Set up multi-core groups */ 7631 7645 for_each_cpu(i, cpu_map) { 7632 - /* FIXME: Use cpu_coregroup_mask */ 7633 - *this_core_map = cpu_coregroup_map(i); 7634 - cpus_and(*this_core_map, *this_core_map, *cpu_map); 7646 + cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); 7635 7647 if (i != cpumask_first(this_core_map)) 7636 7648 continue; 7637 7649 ··· 7641 7657 7642 7658 /* Set up physical groups */ 7643 7659 for (i = 0; i < nr_node_ids; i++) { 7644 - /* FIXME: Use cpumask_of_node */ 7645 - *nodemask = node_to_cpumask(i); 7646 - cpus_and(*nodemask, *nodemask, *cpu_map); 7660 + cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 7647 7661 if (cpumask_empty(nodemask)) 7648 7662 continue; 7649 7663 ··· 7663 7681 struct sched_group *sg, *prev; 7664 7682 int j; 7665 7683 7666 - /* FIXME: Use cpumask_of_node */ 7667 - *nodemask = node_to_cpumask(i); 7668 7684 cpumask_clear(covered); 7669 - 7670 - cpus_and(*nodemask, *nodemask, *cpu_map); 7685 + cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 7671 7686 if (cpumask_empty(nodemask)) { 7672 7687 sched_group_nodes[i] = NULL; 7673 7688 continue; ··· 7695 7716 7696 7717 for (j = 0; j < nr_node_ids; j++) { 7697 7718 int n = (i + j) % nr_node_ids; 7698 - /* FIXME: Use cpumask_of_node */ 7699 - node_to_cpumask_ptr(pnodemask, n); 7700 7719 7701 7720 cpumask_complement(notcovered, covered); 7702 7721 cpumask_and(tmpmask, notcovered, cpu_map); ··· 7702 7725 if (cpumask_empty(tmpmask)) 7703 7726 break; 7704 7727 7705 - cpumask_and(tmpmask, tmpmask, pnodemask); 7728 + cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); 7706 7729 if (cpumask_empty(tmpmask)) 7707 7730 continue; 7708 7731
+2 -1
kernel/sched_rt.c
··· 1383 1383 unsigned int i; 1384 1384 1385 1385 for_each_possible_cpu(i) 1386 - alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL); 1386 + alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 1387 + GFP_KERNEL, cpu_to_node(i)); 1387 1388 } 1388 1389 #endif /* CONFIG_SMP */ 1389 1390
+54 -89
kernel/smp.c
··· 24 24 struct call_single_data csd; 25 25 spinlock_t lock; 26 26 unsigned int refs; 27 - cpumask_t cpumask; 28 27 struct rcu_head rcu_head; 28 + unsigned long cpumask_bits[]; 29 29 }; 30 30 31 31 struct call_single_queue { ··· 110 110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) { 111 111 int refs; 112 112 113 - if (!cpu_isset(cpu, data->cpumask)) 113 + if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits))) 114 114 continue; 115 115 116 116 data->csd.func(data->csd.info); 117 117 118 118 spin_lock(&data->lock); 119 - cpu_clear(cpu, data->cpumask); 119 + cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits)); 120 120 WARN_ON(data->refs == 0); 121 121 data->refs--; 122 122 refs = data->refs; ··· 223 223 local_irq_save(flags); 224 224 func(info); 225 225 local_irq_restore(flags); 226 - } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { 226 + } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { 227 227 struct call_single_data *data = NULL; 228 228 229 229 if (!wait) { ··· 266 266 generic_exec_single(cpu, data); 267 267 } 268 268 269 - /* Dummy function */ 270 - static void quiesce_dummy(void *unused) 271 - { 272 - } 273 - 274 - /* 275 - * Ensure stack based data used in call function mask is safe to free. 276 - * 277 - * This is needed by smp_call_function_mask when using on-stack data, because 278 - * a single call function queue is shared by all CPUs, and any CPU may pick up 279 - * the data item on the queue at any time before it is deleted. So we need to 280 - * ensure that all CPUs have transitioned through a quiescent state after 281 - * this call. 282 - * 283 - * This is a very slow function, implemented by sending synchronous IPIs to 284 - * all possible CPUs. For this reason, we have to alloc data rather than use 285 - * stack based data even in the case of synchronous calls. The stack based 286 - * data is then just used for deadlock/oom fallback which will be very rare. 287 - * 288 - * If a faster scheme can be made, we could go back to preferring stack based 289 - * data -- the data allocation/free is non-zero cost. 290 - */ 291 - static void smp_call_function_mask_quiesce_stack(cpumask_t mask) 292 - { 293 - struct call_single_data data; 294 - int cpu; 295 - 296 - data.func = quiesce_dummy; 297 - data.info = NULL; 298 - 299 - for_each_cpu_mask(cpu, mask) { 300 - data.flags = CSD_FLAG_WAIT; 301 - generic_exec_single(cpu, &data); 302 - } 303 - } 269 + /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ 270 + #ifndef arch_send_call_function_ipi_mask 271 + #define arch_send_call_function_ipi_mask(maskp) \ 272 + arch_send_call_function_ipi(*(maskp)) 273 + #endif 304 274 305 275 /** 306 - * smp_call_function_mask(): Run a function on a set of other CPUs. 307 - * @mask: The set of cpus to run on. 276 + * smp_call_function_many(): Run a function on a set of other CPUs. 277 + * @mask: The set of cpus to run on (only runs on online subset). 308 278 * @func: The function to run. This must be fast and non-blocking. 309 279 * @info: An arbitrary pointer to pass to the function. 310 280 * @wait: If true, wait (atomically) until function has completed on other CPUs. 311 - * 312 - * Returns 0 on success, else a negative status code. 313 281 * 314 282 * If @wait is true, then returns once @func has returned. Note that @wait 315 283 * will be implicitly turned on in case of allocation failures, since ··· 287 319 * hardware interrupt handler or from a bottom half handler. Preemption 288 320 * must be disabled when calling this function. 289 321 */ 290 - int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, 291 - int wait) 322 + void smp_call_function_many(const struct cpumask *mask, 323 + void (*func)(void *), void *info, 324 + bool wait) 292 325 { 293 - struct call_function_data d; 294 - struct call_function_data *data = NULL; 295 - cpumask_t allbutself; 326 + struct call_function_data *data; 296 327 unsigned long flags; 297 - int cpu, num_cpus; 298 - int slowpath = 0; 328 + int cpu, next_cpu; 299 329 300 330 /* Can deadlock when called with interrupts disabled */ 301 331 WARN_ON(irqs_disabled()); 302 332 303 - cpu = smp_processor_id(); 304 - allbutself = cpu_online_map; 305 - cpu_clear(cpu, allbutself); 306 - cpus_and(mask, mask, allbutself); 307 - num_cpus = cpus_weight(mask); 333 + /* So, what's a CPU they want? Ignoring this one. */ 334 + cpu = cpumask_first_and(mask, cpu_online_mask); 335 + if (cpu == smp_processor_id()) 336 + cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 337 + /* No online cpus? We're done. */ 338 + if (cpu >= nr_cpu_ids) 339 + return; 308 340 309 - /* 310 - * If zero CPUs, return. If just a single CPU, turn this request 311 - * into a targetted single call instead since it's faster. 312 - */ 313 - if (!num_cpus) 314 - return 0; 315 - else if (num_cpus == 1) { 316 - cpu = first_cpu(mask); 317 - return smp_call_function_single(cpu, func, info, wait); 341 + /* Do we have another CPU which isn't us? */ 342 + next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 343 + if (next_cpu == smp_processor_id()) 344 + next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); 345 + 346 + /* Fastpath: do that cpu by itself. */ 347 + if (next_cpu >= nr_cpu_ids) { 348 + smp_call_function_single(cpu, func, info, wait); 349 + return; 318 350 } 319 351 320 - data = kmalloc(sizeof(*data), GFP_ATOMIC); 321 - if (data) { 322 - data->csd.flags = CSD_FLAG_ALLOC; 323 - if (wait) 324 - data->csd.flags |= CSD_FLAG_WAIT; 325 - } else { 326 - data = &d; 327 - data->csd.flags = CSD_FLAG_WAIT; 328 - wait = 1; 329 - slowpath = 1; 352 + data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC); 353 + if (unlikely(!data)) { 354 + /* Slow path. */ 355 + for_each_online_cpu(cpu) { 356 + if (cpu == smp_processor_id()) 357 + continue; 358 + if (cpumask_test_cpu(cpu, mask)) 359 + smp_call_function_single(cpu, func, info, wait); 360 + } 361 + return; 330 362 } 331 363 332 364 spin_lock_init(&data->lock); 365 + data->csd.flags = CSD_FLAG_ALLOC; 366 + if (wait) 367 + data->csd.flags |= CSD_FLAG_WAIT; 333 368 data->csd.func = func; 334 369 data->csd.info = info; 335 - data->refs = num_cpus; 336 - data->cpumask = mask; 370 + cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask); 371 + cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits)); 372 + data->refs = cpumask_weight(to_cpumask(data->cpumask_bits)); 337 373 338 374 spin_lock_irqsave(&call_function_lock, flags); 339 375 list_add_tail_rcu(&data->csd.list, &call_function_queue); ··· 349 377 smp_mb(); 350 378 351 379 /* Send a message to all CPUs in the map */ 352 - arch_send_call_function_ipi(mask); 380 + arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits)); 353 381 354 382 /* optionally wait for the CPUs to complete */ 355 - if (wait) { 383 + if (wait) 356 384 csd_flag_wait(&data->csd); 357 - if (unlikely(slowpath)) 358 - smp_call_function_mask_quiesce_stack(mask); 359 - } 360 - 361 - return 0; 362 385 } 363 - EXPORT_SYMBOL(smp_call_function_mask); 386 + EXPORT_SYMBOL(smp_call_function_many); 364 387 365 388 /** 366 389 * smp_call_function(): Run a function on all other CPUs. ··· 363 396 * @info: An arbitrary pointer to pass to the function. 364 397 * @wait: If true, wait (atomically) until function has completed on other CPUs. 365 398 * 366 - * Returns 0 on success, else a negative status code. 399 + * Returns 0. 367 400 * 368 401 * If @wait is true, then returns once @func has returned; otherwise 369 402 * it returns just before the target cpu calls @func. In case of allocation ··· 374 407 */ 375 408 int smp_call_function(void (*func)(void *), void *info, int wait) 376 409 { 377 - int ret; 378 - 379 410 preempt_disable(); 380 - ret = smp_call_function_mask(cpu_online_map, func, info, wait); 411 + smp_call_function_many(cpu_online_mask, func, info, wait); 381 412 preempt_enable(); 382 - return ret; 413 + return 0; 383 414 } 384 415 EXPORT_SYMBOL(smp_call_function); 385 416
+1 -1
kernel/softirq.c
··· 733 733 break; 734 734 /* Unbind so it can run. Fall thru. */ 735 735 kthread_bind(per_cpu(ksoftirqd, hotcpu), 736 - any_online_cpu(cpu_online_map)); 736 + cpumask_any(cpu_online_mask)); 737 737 case CPU_DEAD: 738 738 case CPU_DEAD_FROZEN: { 739 739 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+4 -6
kernel/softlockup.c
··· 303 303 break; 304 304 case CPU_ONLINE: 305 305 case CPU_ONLINE_FROZEN: 306 - check_cpu = any_online_cpu(cpu_online_map); 306 + check_cpu = cpumask_any(cpu_online_mask); 307 307 wake_up_process(per_cpu(watchdog_task, hotcpu)); 308 308 break; 309 309 #ifdef CONFIG_HOTPLUG_CPU 310 310 case CPU_DOWN_PREPARE: 311 311 case CPU_DOWN_PREPARE_FROZEN: 312 312 if (hotcpu == check_cpu) { 313 - cpumask_t temp_cpu_online_map = cpu_online_map; 314 - 315 - cpu_clear(hotcpu, temp_cpu_online_map); 316 - check_cpu = any_online_cpu(temp_cpu_online_map); 313 + /* Pick any other online cpu. */ 314 + check_cpu = cpumask_any_but(cpu_online_mask, hotcpu); 317 315 } 318 316 break; 319 317 ··· 321 323 break; 322 324 /* Unbind so it can run. Fall thru. */ 323 325 kthread_bind(per_cpu(watchdog_task, hotcpu), 324 - any_online_cpu(cpu_online_map)); 326 + cpumask_any(cpu_online_mask)); 325 327 case CPU_DEAD: 326 328 case CPU_DEAD_FROZEN: 327 329 p = per_cpu(watchdog_task, hotcpu);
+4 -4
kernel/stop_machine.c
··· 69 69 int err; 70 70 71 71 if (!active_cpus) { 72 - if (cpu == first_cpu(cpu_online_map)) 72 + if (cpu == cpumask_first(cpu_online_mask)) 73 73 smdata = &active; 74 74 } else { 75 - if (cpu_isset(cpu, *active_cpus)) 75 + if (cpumask_test_cpu(cpu, active_cpus)) 76 76 smdata = &active; 77 77 } 78 78 /* Simple state machine */ ··· 109 109 return 0; 110 110 } 111 111 112 - int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 112 + int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 113 113 { 114 114 struct work_struct *sm_work; 115 115 int i, ret; ··· 142 142 return ret; 143 143 } 144 144 145 - int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 145 + int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 146 146 { 147 147 int ret; 148 148
+25 -16
kernel/taskstats.c
··· 290 290 return; 291 291 } 292 292 293 - static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) 293 + static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) 294 294 { 295 295 struct listener_list *listeners; 296 296 struct listener *s, *tmp; 297 297 unsigned int cpu; 298 - cpumask_t mask = *maskp; 299 298 300 - if (!cpus_subset(mask, cpu_possible_map)) 299 + if (!cpumask_subset(mask, cpu_possible_mask)) 301 300 return -EINVAL; 302 301 303 302 if (isadd == REGISTER) { 304 - for_each_cpu_mask_nr(cpu, mask) { 303 + for_each_cpu(cpu, mask) { 305 304 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 306 305 cpu_to_node(cpu)); 307 306 if (!s) ··· 319 320 320 321 /* Deregister or cleanup */ 321 322 cleanup: 322 - for_each_cpu_mask_nr(cpu, mask) { 323 + for_each_cpu(cpu, mask) { 323 324 listeners = &per_cpu(listener_array, cpu); 324 325 down_write(&listeners->sem); 325 326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { ··· 334 335 return 0; 335 336 } 336 337 337 - static int parse(struct nlattr *na, cpumask_t *mask) 338 + static int parse(struct nlattr *na, struct cpumask *mask) 338 339 { 339 340 char *data; 340 341 int len; ··· 427 428 428 429 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) 429 430 { 430 - int rc = 0; 431 + int rc; 431 432 struct sk_buff *rep_skb; 432 433 struct taskstats *stats; 433 434 size_t size; 434 - cpumask_t mask; 435 + cpumask_var_t mask; 435 436 436 - rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask); 437 - if (rc < 0) 438 - return rc; 439 - if (rc == 0) 440 - return add_del_listener(info->snd_pid, &mask, REGISTER); 437 + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 438 + return -ENOMEM; 441 439 442 - rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask); 440 + rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); 443 441 if (rc < 0) 442 + goto free_return_rc; 443 + if (rc == 0) { 444 + rc = add_del_listener(info->snd_pid, mask, REGISTER); 445 + goto free_return_rc; 446 + } 447 + 448 + rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); 449 + if (rc < 0) 450 + goto free_return_rc; 451 + if (rc == 0) { 452 + rc = add_del_listener(info->snd_pid, mask, DEREGISTER); 453 + free_return_rc: 454 + free_cpumask_var(mask); 444 455 return rc; 445 - if (rc == 0) 446 - return add_del_listener(info->snd_pid, &mask, DEREGISTER); 456 + } 457 + free_cpumask_var(mask); 447 458 448 459 /* 449 460 * Size includes space for nested attributes
+5 -4
kernel/time/clocksource.c
··· 145 145 * Cycle through CPUs to check if the CPUs stay 146 146 * synchronized to each other. 147 147 */ 148 - int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); 148 + int next_cpu = cpumask_next(raw_smp_processor_id(), 149 + cpu_online_mask); 149 150 150 151 if (next_cpu >= nr_cpu_ids) 151 - next_cpu = first_cpu(cpu_online_map); 152 + next_cpu = cpumask_first(cpu_online_mask); 152 153 watchdog_timer.expires += WATCHDOG_INTERVAL; 153 154 add_timer_on(&watchdog_timer, next_cpu); 154 155 } ··· 174 173 watchdog_last = watchdog->read(); 175 174 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 176 175 add_timer_on(&watchdog_timer, 177 - first_cpu(cpu_online_map)); 176 + cpumask_first(cpu_online_mask)); 178 177 } 179 178 } else { 180 179 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) ··· 196 195 watchdog_timer.expires = 197 196 jiffies + WATCHDOG_INTERVAL; 198 197 add_timer_on(&watchdog_timer, 199 - first_cpu(cpu_online_map)); 198 + cpumask_first(cpu_online_mask)); 200 199 } 201 200 } 202 201 }
+58 -55
kernel/time/tick-broadcast.c
··· 28 28 */ 29 29 30 30 struct tick_device tick_broadcast_device; 31 - static cpumask_t tick_broadcast_mask; 31 + /* FIXME: Use cpumask_var_t. */ 32 + static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); 33 + static DECLARE_BITMAP(tmpmask, NR_CPUS); 32 34 static DEFINE_SPINLOCK(tick_broadcast_lock); 33 35 static int tick_broadcast_force; 34 36 ··· 48 46 return &tick_broadcast_device; 49 47 } 50 48 51 - cpumask_t *tick_get_broadcast_mask(void) 49 + struct cpumask *tick_get_broadcast_mask(void) 52 50 { 53 - return &tick_broadcast_mask; 51 + return to_cpumask(tick_broadcast_mask); 54 52 } 55 53 56 54 /* ··· 74 72 75 73 clockevents_exchange_device(NULL, dev); 76 74 tick_broadcast_device.evtdev = dev; 77 - if (!cpus_empty(tick_broadcast_mask)) 75 + if (!cpumask_empty(tick_get_broadcast_mask())) 78 76 tick_broadcast_start_periodic(dev); 79 77 return 1; 80 78 } ··· 106 104 */ 107 105 if (!tick_device_is_functional(dev)) { 108 106 dev->event_handler = tick_handle_periodic; 109 - cpu_set(cpu, tick_broadcast_mask); 107 + cpumask_set_cpu(cpu, tick_get_broadcast_mask()); 110 108 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 111 109 ret = 1; 112 110 } else { ··· 118 116 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 119 117 int cpu = smp_processor_id(); 120 118 121 - cpu_clear(cpu, tick_broadcast_mask); 119 + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 122 120 tick_broadcast_clear_oneshot(cpu); 123 121 } 124 122 } ··· 127 125 } 128 126 129 127 /* 130 - * Broadcast the event to the cpus, which are set in the mask 128 + * Broadcast the event to the cpus, which are set in the mask (mangled). 131 129 */ 132 - static void tick_do_broadcast(cpumask_t mask) 130 + static void tick_do_broadcast(struct cpumask *mask) 133 131 { 134 132 int cpu = smp_processor_id(); 135 133 struct tick_device *td; ··· 137 135 /* 138 136 * Check, if the current cpu is in the mask 139 137 */ 140 - if (cpu_isset(cpu, mask)) { 141 - cpu_clear(cpu, mask); 138 + if (cpumask_test_cpu(cpu, mask)) { 139 + cpumask_clear_cpu(cpu, mask); 142 140 td = &per_cpu(tick_cpu_device, cpu); 143 141 td->evtdev->event_handler(td->evtdev); 144 142 } 145 143 146 - if (!cpus_empty(mask)) { 144 + if (!cpumask_empty(mask)) { 147 145 /* 148 146 * It might be necessary to actually check whether the devices 149 147 * have different broadcast functions. For now, just use the 150 148 * one of the first device. This works as long as we have this 151 149 * misfeature only on x86 (lapic) 152 150 */ 153 - cpu = first_cpu(mask); 154 - td = &per_cpu(tick_cpu_device, cpu); 155 - td->evtdev->broadcast(&mask); 151 + td = &per_cpu(tick_cpu_device, cpumask_first(mask)); 152 + td->evtdev->broadcast(mask); 156 153 } 157 154 } 158 155 ··· 161 160 */ 162 161 static void tick_do_periodic_broadcast(void) 163 162 { 164 - cpumask_t mask; 165 - 166 163 spin_lock(&tick_broadcast_lock); 167 164 168 - cpus_and(mask, cpu_online_map, tick_broadcast_mask); 169 - tick_do_broadcast(mask); 165 + cpumask_and(to_cpumask(tmpmask), 166 + cpu_online_mask, tick_get_broadcast_mask()); 167 + tick_do_broadcast(to_cpumask(tmpmask)); 170 168 171 169 spin_unlock(&tick_broadcast_lock); 172 170 } ··· 228 228 if (!tick_device_is_functional(dev)) 229 229 goto out; 230 230 231 - bc_stopped = cpus_empty(tick_broadcast_mask); 231 + bc_stopped = cpumask_empty(tick_get_broadcast_mask()); 232 232 233 233 switch (*reason) { 234 234 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 235 235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 236 - if (!cpu_isset(cpu, tick_broadcast_mask)) { 237 - cpu_set(cpu, tick_broadcast_mask); 236 + if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { 237 + cpumask_set_cpu(cpu, tick_get_broadcast_mask()); 238 238 if (tick_broadcast_device.mode == 239 239 TICKDEV_MODE_PERIODIC) 240 240 clockevents_shutdown(dev); ··· 244 244 break; 245 245 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 246 246 if (!tick_broadcast_force && 247 - cpu_isset(cpu, tick_broadcast_mask)) { 248 - cpu_clear(cpu, tick_broadcast_mask); 247 + cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { 248 + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 249 249 if (tick_broadcast_device.mode == 250 250 TICKDEV_MODE_PERIODIC) 251 251 tick_setup_periodic(dev, 0); ··· 253 253 break; 254 254 } 255 255 256 - if (cpus_empty(tick_broadcast_mask)) { 256 + if (cpumask_empty(tick_get_broadcast_mask())) { 257 257 if (!bc_stopped) 258 258 clockevents_shutdown(bc); 259 259 } else if (bc_stopped) { ··· 272 272 */ 273 273 void tick_broadcast_on_off(unsigned long reason, int *oncpu) 274 274 { 275 - if (!cpu_isset(*oncpu, cpu_online_map)) 275 + if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) 276 276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 277 277 "offline CPU #%d\n", *oncpu); 278 278 else ··· 303 303 spin_lock_irqsave(&tick_broadcast_lock, flags); 304 304 305 305 bc = tick_broadcast_device.evtdev; 306 - cpu_clear(cpu, tick_broadcast_mask); 306 + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 307 307 308 308 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 309 - if (bc && cpus_empty(tick_broadcast_mask)) 309 + if (bc && cpumask_empty(tick_get_broadcast_mask())) 310 310 clockevents_shutdown(bc); 311 311 } 312 312 ··· 342 342 343 343 switch (tick_broadcast_device.mode) { 344 344 case TICKDEV_MODE_PERIODIC: 345 - if(!cpus_empty(tick_broadcast_mask)) 345 + if (!cpumask_empty(tick_get_broadcast_mask())) 346 346 tick_broadcast_start_periodic(bc); 347 - broadcast = cpu_isset(smp_processor_id(), 348 - tick_broadcast_mask); 347 + broadcast = cpumask_test_cpu(smp_processor_id(), 348 + tick_get_broadcast_mask()); 349 349 break; 350 350 case TICKDEV_MODE_ONESHOT: 351 351 broadcast = tick_resume_broadcast_oneshot(bc); ··· 360 360 361 361 #ifdef CONFIG_TICK_ONESHOT 362 362 363 - static cpumask_t tick_broadcast_oneshot_mask; 363 + /* FIXME: use cpumask_var_t. */ 364 + static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS); 364 365 365 366 /* 366 - * Debugging: see timer_list.c 367 + * Exposed for debugging: see timer_list.c 367 368 */ 368 - cpumask_t *tick_get_broadcast_oneshot_mask(void) 369 + struct cpumask *tick_get_broadcast_oneshot_mask(void) 369 370 { 370 - return &tick_broadcast_oneshot_mask; 371 + return to_cpumask(tick_broadcast_oneshot_mask); 371 372 } 372 373 373 374 static int tick_broadcast_set_event(ktime_t expires, int force) ··· 390 389 */ 391 390 void tick_check_oneshot_broadcast(int cpu) 392 391 { 393 - if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 392 + if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) { 394 393 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 395 394 396 395 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); ··· 403 402 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 404 403 { 405 404 struct tick_device *td; 406 - cpumask_t mask; 407 405 ktime_t now, next_event; 408 406 int cpu; 409 407 ··· 410 410 again: 411 411 dev->next_event.tv64 = KTIME_MAX; 412 412 next_event.tv64 = KTIME_MAX; 413 - mask = CPU_MASK_NONE; 413 + cpumask_clear(to_cpumask(tmpmask)); 414 414 now = ktime_get(); 415 415 /* Find all expired events */ 416 - for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { 416 + for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) { 417 417 td = &per_cpu(tick_cpu_device, cpu); 418 418 if (td->evtdev->next_event.tv64 <= now.tv64) 419 - cpu_set(cpu, mask); 419 + cpumask_set_cpu(cpu, to_cpumask(tmpmask)); 420 420 else if (td->evtdev->next_event.tv64 < next_event.tv64) 421 421 next_event.tv64 = td->evtdev->next_event.tv64; 422 422 } ··· 424 424 /* 425 425 * Wakeup the cpus which have an expired event. 426 426 */ 427 - tick_do_broadcast(mask); 427 + tick_do_broadcast(to_cpumask(tmpmask)); 428 428 429 429 /* 430 430 * Two reasons for reprogram: ··· 476 476 goto out; 477 477 478 478 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 479 - if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 480 - cpu_set(cpu, tick_broadcast_oneshot_mask); 479 + if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { 480 + cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask()); 481 481 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 482 482 if (dev->next_event.tv64 < bc->next_event.tv64) 483 483 tick_broadcast_set_event(dev->next_event, 1); 484 484 } 485 485 } else { 486 - if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 487 - cpu_clear(cpu, tick_broadcast_oneshot_mask); 486 + if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { 487 + cpumask_clear_cpu(cpu, 488 + tick_get_broadcast_oneshot_mask()); 488 489 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 489 490 if (dev->next_event.tv64 != KTIME_MAX) 490 491 tick_program_event(dev->next_event, 1); ··· 503 502 */ 504 503 static void tick_broadcast_clear_oneshot(int cpu) 505 504 { 506 - cpu_clear(cpu, tick_broadcast_oneshot_mask); 505 + cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); 507 506 } 508 507 509 - static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) 508 + static void tick_broadcast_init_next_event(struct cpumask *mask, 509 + ktime_t expires) 510 510 { 511 511 struct tick_device *td; 512 512 int cpu; 513 513 514 - for_each_cpu_mask_nr(cpu, *mask) { 514 + for_each_cpu(cpu, mask) { 515 515 td = &per_cpu(tick_cpu_device, cpu); 516 516 if (td->evtdev) 517 517 td->evtdev->next_event = expires; ··· 528 526 if (bc->event_handler != tick_handle_oneshot_broadcast) { 529 527 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 530 528 int cpu = smp_processor_id(); 531 - cpumask_t mask; 532 529 533 530 bc->event_handler = tick_handle_oneshot_broadcast; 534 531 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); ··· 541 540 * oneshot_mask bits for those and program the 542 541 * broadcast device to fire. 543 542 */ 544 - mask = tick_broadcast_mask; 545 - cpu_clear(cpu, mask); 546 - cpus_or(tick_broadcast_oneshot_mask, 547 - tick_broadcast_oneshot_mask, mask); 543 + cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask()); 544 + cpumask_clear_cpu(cpu, to_cpumask(tmpmask)); 545 + cpumask_or(tick_get_broadcast_oneshot_mask(), 546 + tick_get_broadcast_oneshot_mask(), 547 + to_cpumask(tmpmask)); 548 548 549 - if (was_periodic && !cpus_empty(mask)) { 550 - tick_broadcast_init_next_event(&mask, tick_next_period); 549 + if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { 550 + tick_broadcast_init_next_event(to_cpumask(tmpmask), 551 + tick_next_period); 551 552 tick_broadcast_set_event(tick_next_period, 1); 552 553 } else 553 554 bc->next_event.tv64 = KTIME_MAX; ··· 588 585 * Clear the broadcast mask flag for the dead cpu, but do not 589 586 * stop the broadcast device! 590 587 */ 591 - cpu_clear(cpu, tick_broadcast_oneshot_mask); 588 + cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); 592 589 593 590 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 594 591 }
+3 -3
kernel/time/tick-common.c
··· 254 254 curdev = NULL; 255 255 } 256 256 clockevents_exchange_device(curdev, newdev); 257 - tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu)); 257 + tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); 258 258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 259 259 tick_oneshot_notify(); 260 260 ··· 299 299 } 300 300 /* Transfer the do_timer job away from this cpu */ 301 301 if (*cpup == tick_do_timer_cpu) { 302 - int cpu = first_cpu(cpu_online_map); 302 + int cpu = cpumask_first(cpu_online_mask); 303 303 304 - tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : 304 + tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : 305 305 TICK_DO_TIMER_NONE; 306 306 } 307 307 spin_unlock_irqrestore(&tick_device_lock, flags);
+25 -17
kernel/trace/ring_buffer.c
··· 195 195 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 196 196 197 197 #define for_each_buffer_cpu(buffer, cpu) \ 198 - for_each_cpu_mask(cpu, buffer->cpumask) 198 + for_each_cpu(cpu, buffer->cpumask) 199 199 200 200 #define TS_SHIFT 27 201 201 #define TS_MASK ((1ULL << TS_SHIFT) - 1) ··· 267 267 unsigned pages; 268 268 unsigned flags; 269 269 int cpus; 270 - cpumask_t cpumask; 270 + cpumask_var_t cpumask; 271 271 atomic_t record_disabled; 272 272 273 273 struct mutex mutex; ··· 458 458 if (!buffer) 459 459 return NULL; 460 460 461 + if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 462 + goto fail_free_buffer; 463 + 461 464 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 462 465 buffer->flags = flags; 463 466 ··· 468 465 if (buffer->pages == 1) 469 466 buffer->pages++; 470 467 471 - buffer->cpumask = cpu_possible_map; 468 + cpumask_copy(buffer->cpumask, cpu_possible_mask); 472 469 buffer->cpus = nr_cpu_ids; 473 470 474 471 bsize = sizeof(void *) * nr_cpu_ids; 475 472 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 476 473 GFP_KERNEL); 477 474 if (!buffer->buffers) 478 - goto fail_free_buffer; 475 + goto fail_free_cpumask; 479 476 480 477 for_each_buffer_cpu(buffer, cpu) { 481 478 buffer->buffers[cpu] = ··· 495 492 } 496 493 kfree(buffer->buffers); 497 494 495 + fail_free_cpumask: 496 + free_cpumask_var(buffer->cpumask); 497 + 498 498 fail_free_buffer: 499 499 kfree(buffer); 500 500 return NULL; ··· 515 509 516 510 for_each_buffer_cpu(buffer, cpu) 517 511 rb_free_cpu_buffer(buffer->buffers[cpu]); 512 + 513 + free_cpumask_var(buffer->cpumask); 518 514 519 515 kfree(buffer); 520 516 } ··· 1291 1283 1292 1284 cpu = raw_smp_processor_id(); 1293 1285 1294 - if (!cpu_isset(cpu, buffer->cpumask)) 1286 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1295 1287 goto out; 1296 1288 1297 1289 cpu_buffer = buffer->buffers[cpu]; ··· 1404 1396 1405 1397 cpu = raw_smp_processor_id(); 1406 1398 1407 - if (!cpu_isset(cpu, buffer->cpumask)) 1399 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1408 1400 goto out; 1409 1401 1410 1402 cpu_buffer = buffer->buffers[cpu]; ··· 1486 1478 { 1487 1479 struct ring_buffer_per_cpu *cpu_buffer; 1488 1480 1489 - if (!cpu_isset(cpu, buffer->cpumask)) 1481 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1490 1482 return; 1491 1483 1492 1484 cpu_buffer = buffer->buffers[cpu]; ··· 1506 1498 { 1507 1499 struct ring_buffer_per_cpu *cpu_buffer; 1508 1500 1509 - if (!cpu_isset(cpu, buffer->cpumask)) 1501 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1510 1502 return; 1511 1503 1512 1504 cpu_buffer = buffer->buffers[cpu]; ··· 1523 1515 { 1524 1516 struct ring_buffer_per_cpu *cpu_buffer; 1525 1517 1526 - if (!cpu_isset(cpu, buffer->cpumask)) 1518 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1527 1519 return 0; 1528 1520 1529 1521 cpu_buffer = buffer->buffers[cpu]; ··· 1540 1532 { 1541 1533 struct ring_buffer_per_cpu *cpu_buffer; 1542 1534 1543 - if (!cpu_isset(cpu, buffer->cpumask)) 1535 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1544 1536 return 0; 1545 1537 1546 1538 cpu_buffer = buffer->buffers[cpu]; ··· 1858 1850 struct buffer_page *reader; 1859 1851 int nr_loops = 0; 1860 1852 1861 - if (!cpu_isset(cpu, buffer->cpumask)) 1853 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1862 1854 return NULL; 1863 1855 1864 1856 cpu_buffer = buffer->buffers[cpu]; ··· 2033 2025 struct ring_buffer_event *event; 2034 2026 unsigned long flags; 2035 2027 2036 - if (!cpu_isset(cpu, buffer->cpumask)) 2028 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2037 2029 return NULL; 2038 2030 2039 2031 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ··· 2070 2062 struct ring_buffer_iter *iter; 2071 2063 unsigned long flags; 2072 2064 2073 - if (!cpu_isset(cpu, buffer->cpumask)) 2065 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2074 2066 return NULL; 2075 2067 2076 2068 iter = kmalloc(sizeof(*iter), GFP_KERNEL); ··· 2180 2172 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2181 2173 unsigned long flags; 2182 2174 2183 - if (!cpu_isset(cpu, buffer->cpumask)) 2175 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2184 2176 return; 2185 2177 2186 2178 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ··· 2236 2228 { 2237 2229 struct ring_buffer_per_cpu *cpu_buffer; 2238 2230 2239 - if (!cpu_isset(cpu, buffer->cpumask)) 2231 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2240 2232 return 1; 2241 2233 2242 2234 cpu_buffer = buffer->buffers[cpu]; ··· 2260 2252 struct ring_buffer_per_cpu *cpu_buffer_a; 2261 2253 struct ring_buffer_per_cpu *cpu_buffer_b; 2262 2254 2263 - if (!cpu_isset(cpu, buffer_a->cpumask) || 2264 - !cpu_isset(cpu, buffer_b->cpumask)) 2255 + if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 2256 + !cpumask_test_cpu(cpu, buffer_b->cpumask)) 2265 2257 return -EINVAL; 2266 2258 2267 2259 /* At least make sure the two buffers are somewhat the same */
+45 -27
kernel/trace/trace.c
··· 89 89 preempt_enable(); 90 90 } 91 91 92 - static cpumask_t __read_mostly tracing_buffer_mask; 92 + static cpumask_var_t __read_mostly tracing_buffer_mask; 93 93 94 94 #define for_each_tracing_cpu(cpu) \ 95 - for_each_cpu_mask(cpu, tracing_buffer_mask) 95 + for_each_cpu(cpu, tracing_buffer_mask) 96 96 97 97 /* 98 98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops ··· 1811 1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 1812 1812 return; 1813 1813 1814 - if (cpu_isset(iter->cpu, iter->started)) 1814 + if (cpumask_test_cpu(iter->cpu, iter->started)) 1815 1815 return; 1816 1816 1817 - cpu_set(iter->cpu, iter->started); 1817 + cpumask_set_cpu(iter->cpu, iter->started); 1818 1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1819 1819 } 1820 1820 ··· 2646 2646 /* 2647 2647 * Only trace on a CPU if the bitmask is set: 2648 2648 */ 2649 - static cpumask_t tracing_cpumask = CPU_MASK_ALL; 2650 - 2651 - /* 2652 - * When tracing/tracing_cpu_mask is modified then this holds 2653 - * the new bitmask we are about to install: 2654 - */ 2655 - static cpumask_t tracing_cpumask_new; 2649 + static cpumask_var_t tracing_cpumask; 2656 2650 2657 2651 /* 2658 2652 * The tracer itself will not take this lock, but still we want ··· 2668 2674 2669 2675 mutex_lock(&tracing_cpumask_update_lock); 2670 2676 2671 - len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); 2677 + len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 2672 2678 if (count - len < 2) { 2673 2679 count = -EINVAL; 2674 2680 goto out_err; ··· 2687 2693 size_t count, loff_t *ppos) 2688 2694 { 2689 2695 int err, cpu; 2696 + cpumask_var_t tracing_cpumask_new; 2697 + 2698 + if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 2699 + return -ENOMEM; 2690 2700 2691 2701 mutex_lock(&tracing_cpumask_update_lock); 2692 - err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); 2702 + err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2693 2703 if (err) 2694 2704 goto err_unlock; 2695 2705 ··· 2704 2706 * Increase/decrease the disabled counter if we are 2705 2707 * about to flip a bit in the cpumask: 2706 2708 */ 2707 - if (cpu_isset(cpu, tracing_cpumask) && 2708 - !cpu_isset(cpu, tracing_cpumask_new)) { 2709 + if (cpumask_test_cpu(cpu, tracing_cpumask) && 2710 + !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2709 2711 atomic_inc(&global_trace.data[cpu]->disabled); 2710 2712 } 2711 - if (!cpu_isset(cpu, tracing_cpumask) && 2712 - cpu_isset(cpu, tracing_cpumask_new)) { 2713 + if (!cpumask_test_cpu(cpu, tracing_cpumask) && 2714 + cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2713 2715 atomic_dec(&global_trace.data[cpu]->disabled); 2714 2716 } 2715 2717 } 2716 2718 __raw_spin_unlock(&ftrace_max_lock); 2717 2719 local_irq_enable(); 2718 2720 2719 - tracing_cpumask = tracing_cpumask_new; 2721 + cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2720 2722 2721 2723 mutex_unlock(&tracing_cpumask_update_lock); 2724 + free_cpumask_var(tracing_cpumask_new); 2722 2725 2723 2726 return count; 2724 2727 2725 2728 err_unlock: 2726 2729 mutex_unlock(&tracing_cpumask_update_lock); 2730 + free_cpumask_var(tracing_cpumask); 2727 2731 2728 2732 return err; 2729 2733 } ··· 3114 3114 if (!iter) 3115 3115 return -ENOMEM; 3116 3116 3117 + if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3118 + kfree(iter); 3119 + return -ENOMEM; 3120 + } 3121 + 3117 3122 mutex_lock(&trace_types_lock); 3118 3123 3119 3124 /* trace pipe does not show start of buffer */ 3120 - cpus_setall(iter->started); 3125 + cpumask_setall(iter->started); 3121 3126 3122 3127 iter->tr = &global_trace; 3123 3128 iter->trace = current_trace; ··· 3139 3134 { 3140 3135 struct trace_iterator *iter = file->private_data; 3141 3136 3137 + free_cpumask_var(iter->started); 3142 3138 kfree(iter); 3143 3139 atomic_dec(&tracing_reader); 3144 3140 ··· 3758 3752 static DEFINE_SPINLOCK(ftrace_dump_lock); 3759 3753 /* use static because iter can be a bit big for the stack */ 3760 3754 static struct trace_iterator iter; 3761 - static cpumask_t mask; 3762 3755 static int dump_ran; 3763 3756 unsigned long flags; 3764 3757 int cnt = 0, cpu; ··· 3790 3785 * not done often. We fill all what we can read, 3791 3786 * and then release the locks again. 3792 3787 */ 3793 - 3794 - cpus_clear(mask); 3795 3788 3796 3789 while (!trace_empty(&iter)) { 3797 3790 ··· 3826 3823 { 3827 3824 struct trace_array_cpu *data; 3828 3825 int i; 3826 + int ret = -ENOMEM; 3827 + 3828 + if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 3829 + goto out; 3830 + 3831 + if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 3832 + goto out_free_buffer_mask; 3833 + 3834 + cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 3835 + cpumask_copy(tracing_cpumask, cpu_all_mask); 3829 3836 3830 3837 /* TODO: make the number of buffers hot pluggable with CPUS */ 3831 - tracing_buffer_mask = cpu_possible_map; 3832 - 3833 3838 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3834 3839 TRACE_BUFFER_FLAGS); 3835 3840 if (!global_trace.buffer) { 3836 3841 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3837 3842 WARN_ON(1); 3838 - return 0; 3843 + goto out_free_cpumask; 3839 3844 } 3840 3845 global_trace.entries = ring_buffer_size(global_trace.buffer); 3846 + 3841 3847 3842 3848 #ifdef CONFIG_TRACER_MAX_TRACE 3843 3849 max_tr.buffer = ring_buffer_alloc(trace_buf_size, ··· 3855 3843 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3856 3844 WARN_ON(1); 3857 3845 ring_buffer_free(global_trace.buffer); 3858 - return 0; 3846 + goto out_free_cpumask; 3859 3847 } 3860 3848 max_tr.entries = ring_buffer_size(max_tr.buffer); 3861 3849 WARN_ON(max_tr.entries != global_trace.entries); ··· 3885 3873 &trace_panic_notifier); 3886 3874 3887 3875 register_die_notifier(&trace_die_notifier); 3876 + ret = 0; 3888 3877 3889 - return 0; 3878 + out_free_cpumask: 3879 + free_cpumask_var(tracing_cpumask); 3880 + out_free_buffer_mask: 3881 + free_cpumask_var(tracing_buffer_mask); 3882 + out: 3883 + return ret; 3890 3884 } 3891 3885 early_initcall(tracer_alloc_buffers); 3892 3886 fs_initcall(tracer_init_debugfs);
+1 -1
kernel/trace/trace.h
··· 368 368 loff_t pos; 369 369 long idx; 370 370 371 - cpumask_t started; 371 + cpumask_var_t started; 372 372 }; 373 373 374 374 int tracing_is_enabled(void);
+1 -1
kernel/trace/trace_boot.c
··· 42 42 int cpu; 43 43 boot_trace = tr; 44 44 45 - for_each_cpu_mask(cpu, cpu_possible_map) 45 + for_each_cpu(cpu, cpu_possible_mask) 46 46 tracing_reset(tr, cpu); 47 47 48 48 tracing_sched_switch_assign_trace(tr);
+1 -1
kernel/trace/trace_functions_graph.c
··· 79 79 int i; 80 80 int ret; 81 81 int log10_this = log10_cpu(cpu); 82 - int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); 82 + int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); 83 83 84 84 85 85 /*
+3 -3
kernel/trace/trace_hw_branches.c
··· 46 46 47 47 tracing_reset_online_cpus(tr); 48 48 49 - for_each_cpu_mask(cpu, cpu_possible_map) 49 + for_each_cpu(cpu, cpu_possible_mask) 50 50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); 51 51 } 52 52 ··· 62 62 { 63 63 int cpu; 64 64 65 - for_each_cpu_mask(cpu, cpu_possible_map) 65 + for_each_cpu(cpu, cpu_possible_mask) 66 66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); 67 67 } 68 68 ··· 172 172 { 173 173 int cpu; 174 174 175 - for_each_cpu_mask(cpu, cpu_possible_map) 175 + for_each_cpu(cpu, cpu_possible_mask) 176 176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); 177 177 } 178 178
+1 -1
kernel/trace/trace_power.c
··· 39 39 40 40 trace_power_enabled = 1; 41 41 42 - for_each_cpu_mask(cpu, cpu_possible_map) 42 + for_each_cpu(cpu, cpu_possible_mask) 43 43 tracing_reset(tr, cpu); 44 44 return 0; 45 45 }
+3 -10
kernel/trace/trace_sysprof.c
··· 196 196 return HRTIMER_RESTART; 197 197 } 198 198 199 - static void start_stack_timer(int cpu) 199 + static void start_stack_timer(void *unused) 200 200 { 201 - struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); 201 + struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); 202 202 203 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 204 204 hrtimer->function = stack_trace_timer_fn; ··· 208 208 209 209 static void start_stack_timers(void) 210 210 { 211 - cpumask_t saved_mask = current->cpus_allowed; 212 - int cpu; 213 - 214 - for_each_online_cpu(cpu) { 215 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 216 - start_stack_timer(cpu); 217 - } 218 - set_cpus_allowed_ptr(current, &saved_mask); 211 + on_each_cpu(start_stack_timer, NULL, 1); 219 212 } 220 213 221 214 static void stop_stack_timer(int cpu)
+14 -12
kernel/workqueue.c
··· 73 73 static LIST_HEAD(workqueues); 74 74 75 75 static int singlethread_cpu __read_mostly; 76 - static cpumask_t cpu_singlethread_map __read_mostly; 76 + static const struct cpumask *cpu_singlethread_map __read_mostly; 77 77 /* 78 78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 79 79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work ··· 81 81 * use cpu_possible_map, the cpumask below is more a documentation 82 82 * than optimization. 83 83 */ 84 - static cpumask_t cpu_populated_map __read_mostly; 84 + static cpumask_var_t cpu_populated_map __read_mostly; 85 85 86 86 /* If it's single threaded, it isn't in the list of workqueues. */ 87 87 static inline int is_wq_single_threaded(struct workqueue_struct *wq) ··· 89 89 return wq->singlethread; 90 90 } 91 91 92 - static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) 92 + static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) 93 93 { 94 94 return is_wq_single_threaded(wq) 95 - ? &cpu_singlethread_map : &cpu_populated_map; 95 + ? cpu_singlethread_map : cpu_populated_map; 96 96 } 97 97 98 98 static ··· 410 410 */ 411 411 void flush_workqueue(struct workqueue_struct *wq) 412 412 { 413 - const cpumask_t *cpu_map = wq_cpu_map(wq); 413 + const struct cpumask *cpu_map = wq_cpu_map(wq); 414 414 int cpu; 415 415 416 416 might_sleep(); ··· 532 532 { 533 533 struct cpu_workqueue_struct *cwq; 534 534 struct workqueue_struct *wq; 535 - const cpumask_t *cpu_map; 535 + const struct cpumask *cpu_map; 536 536 int cpu; 537 537 538 538 might_sleep(); ··· 903 903 */ 904 904 void destroy_workqueue(struct workqueue_struct *wq) 905 905 { 906 - const cpumask_t *cpu_map = wq_cpu_map(wq); 906 + const struct cpumask *cpu_map = wq_cpu_map(wq); 907 907 int cpu; 908 908 909 909 cpu_maps_update_begin(); ··· 933 933 934 934 switch (action) { 935 935 case CPU_UP_PREPARE: 936 - cpu_set(cpu, cpu_populated_map); 936 + cpumask_set_cpu(cpu, cpu_populated_map); 937 937 } 938 938 undo: 939 939 list_for_each_entry(wq, &workqueues, list) { ··· 964 964 switch (action) { 965 965 case CPU_UP_CANCELED: 966 966 case CPU_POST_DEAD: 967 - cpu_clear(cpu, cpu_populated_map); 967 + cpumask_clear_cpu(cpu, cpu_populated_map); 968 968 } 969 969 970 970 return ret; ··· 1017 1017 1018 1018 void __init init_workqueues(void) 1019 1019 { 1020 - cpu_populated_map = cpu_online_map; 1021 - singlethread_cpu = first_cpu(cpu_possible_map); 1022 - cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); 1020 + alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); 1021 + 1022 + cpumask_copy(cpu_populated_map, cpu_online_mask); 1023 + singlethread_cpu = cpumask_first(cpu_possible_mask); 1024 + cpu_singlethread_map = cpumask_of(singlethread_cpu); 1023 1025 hotcpu_notifier(workqueue_cpu_callback, 0); 1024 1026 keventd_wq = create_workqueue("events"); 1025 1027 BUG_ON(!keventd_wq);
+8
lib/Kconfig
··· 13 13 config GENERIC_FIND_NEXT_BIT 14 14 bool 15 15 16 + config GENERIC_FIND_LAST_BIT 17 + bool 18 + default y 19 + 16 20 config CRC_CCITT 17 21 tristate "CRC-CCITT functions" 18 22 help ··· 169 165 Use dynamic allocation for cpumask_var_t, instead of putting 170 166 them on the stack. This is a bit more expensive, but avoids 171 167 stack overflow. 168 + 169 + config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 170 + bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS 171 + depends on EXPERIMENTAL && BROKEN 172 172 173 173 endmenu
+1
lib/Makefile
··· 37 37 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 38 38 lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 39 39 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 40 + lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 40 41 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 41 42 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 42 43 obj-$(CONFIG_PLIST) += plist.o
+59 -3
lib/cpumask.c
··· 76 76 77 77 /* These are not inline because of header tangles. */ 78 78 #ifdef CONFIG_CPUMASK_OFFSTACK 79 - bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 79 + /** 80 + * alloc_cpumask_var_node - allocate a struct cpumask on a given node 81 + * @mask: pointer to cpumask_var_t where the cpumask is returned 82 + * @flags: GFP_ flags 83 + * 84 + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 85 + * a nop returning a constant 1 (in <linux/cpumask.h>) 86 + * Returns TRUE if memory allocation succeeded, FALSE otherwise. 87 + * 88 + * In addition, mask will be NULL if this fails. Note that gcc is 89 + * usually smart enough to know that mask can never be NULL if 90 + * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case 91 + * too. 92 + */ 93 + bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 80 94 { 81 95 if (likely(slab_is_available())) 82 - *mask = kmalloc(cpumask_size(), flags); 96 + *mask = kmalloc_node(cpumask_size(), flags, node); 83 97 else { 84 98 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 85 99 printk(KERN_ERR 86 100 "=> alloc_cpumask_var: kmalloc not available!\n"); 87 - dump_stack(); 88 101 #endif 89 102 *mask = NULL; 90 103 } ··· 107 94 dump_stack(); 108 95 } 109 96 #endif 97 + /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ 98 + if (*mask) { 99 + unsigned int tail; 100 + tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); 101 + memset(cpumask_bits(*mask) + cpumask_size() - tail, 102 + 0, tail); 103 + } 104 + 110 105 return *mask != NULL; 106 + } 107 + EXPORT_SYMBOL(alloc_cpumask_var_node); 108 + 109 + /** 110 + * alloc_cpumask_var - allocate a struct cpumask 111 + * @mask: pointer to cpumask_var_t where the cpumask is returned 112 + * @flags: GFP_ flags 113 + * 114 + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 115 + * a nop returning a constant 1 (in <linux/cpumask.h>). 116 + * 117 + * See alloc_cpumask_var_node. 118 + */ 119 + bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 120 + { 121 + return alloc_cpumask_var_node(mask, flags, numa_node_id()); 111 122 } 112 123 EXPORT_SYMBOL(alloc_cpumask_var); 113 124 125 + /** 126 + * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. 127 + * @mask: pointer to cpumask_var_t where the cpumask is returned 128 + * 129 + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 130 + * a nop (in <linux/cpumask.h>). 131 + * Either returns an allocated (zero-filled) cpumask, or causes the 132 + * system to panic. 133 + */ 114 134 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 115 135 { 116 136 *mask = alloc_bootmem(cpumask_size()); 117 137 } 118 138 139 + /** 140 + * free_cpumask_var - frees memory allocated for a struct cpumask. 141 + * @mask: cpumask to free 142 + * 143 + * This is safe on a NULL mask. 144 + */ 119 145 void free_cpumask_var(cpumask_var_t mask) 120 146 { 121 147 kfree(mask); 122 148 } 123 149 EXPORT_SYMBOL(free_cpumask_var); 124 150 151 + /** 152 + * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var 153 + * @mask: cpumask to free 154 + */ 125 155 void __init free_bootmem_cpumask_var(cpumask_var_t mask) 126 156 { 127 157 free_bootmem((unsigned long)mask, cpumask_size());
+45
lib/find_last_bit.c
··· 1 + /* find_last_bit.c: fallback find next bit implementation 2 + * 3 + * Copyright (C) 2008 IBM Corporation 4 + * Written by Rusty Russell <rusty@rustcorp.com.au> 5 + * (Inspired by David Howell's find_next_bit implementation) 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation; either version 10 + * 2 of the License, or (at your option) any later version. 11 + */ 12 + 13 + #include <linux/bitops.h> 14 + #include <linux/module.h> 15 + #include <asm/types.h> 16 + #include <asm/byteorder.h> 17 + 18 + unsigned long find_last_bit(const unsigned long *addr, unsigned long size) 19 + { 20 + unsigned long words; 21 + unsigned long tmp; 22 + 23 + /* Start at final word. */ 24 + words = size / BITS_PER_LONG; 25 + 26 + /* Partial final word? */ 27 + if (size & (BITS_PER_LONG-1)) { 28 + tmp = (addr[words] & (~0UL >> (BITS_PER_LONG 29 + - (size & (BITS_PER_LONG-1))))); 30 + if (tmp) 31 + goto found; 32 + } 33 + 34 + while (words) { 35 + tmp = addr[--words]; 36 + if (tmp) { 37 + found: 38 + return words * BITS_PER_LONG + __fls(tmp); 39 + } 40 + } 41 + 42 + /* Not found */ 43 + return size; 44 + } 45 + EXPORT_SYMBOL(find_last_bit);
+13 -3
mm/pdflush.c
··· 172 172 static int pdflush(void *dummy) 173 173 { 174 174 struct pdflush_work my_work; 175 - cpumask_t cpus_allowed; 175 + cpumask_var_t cpus_allowed; 176 + 177 + /* 178 + * Since the caller doesn't even check kthread_run() worked, let's not 179 + * freak out too much if this fails. 180 + */ 181 + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 182 + printk(KERN_WARNING "pdflush failed to allocate cpumask\n"); 183 + return 0; 184 + } 176 185 177 186 /* 178 187 * pdflush can spend a lot of time doing encryption via dm-crypt. We ··· 196 187 * This is needed as pdflush's are dynamically created and destroyed. 197 188 * The boottime pdflush's are easily placed w/o these 2 lines. 198 189 */ 199 - cpuset_cpus_allowed(current, &cpus_allowed); 200 - set_cpus_allowed_ptr(current, &cpus_allowed); 190 + cpuset_cpus_allowed(current, cpus_allowed); 191 + set_cpus_allowed_ptr(current, cpus_allowed); 192 + free_cpumask_var(cpus_allowed); 201 193 202 194 return __pdflush(&my_work); 203 195 }
+1 -1
mm/slab.c
··· 2157 2157 2158 2158 /* 2159 2159 * We use cache_chain_mutex to ensure a consistent view of 2160 - * cpu_online_map as well. Please see cpuup_callback 2160 + * cpu_online_mask as well. Please see cpuup_callback 2161 2161 */ 2162 2162 get_online_cpus(); 2163 2163 mutex_lock(&cache_chain_mutex);
+11 -9
mm/slub.c
··· 1970 1970 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 1971 1971 1972 1972 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 1973 - static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; 1973 + static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); 1974 1974 1975 1975 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 1976 1976 int cpu, gfp_t flags) ··· 2045 2045 { 2046 2046 int i; 2047 2047 2048 - if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) 2048 + if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once))) 2049 2049 return; 2050 2050 2051 2051 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2052 2052 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2053 2053 2054 - cpu_set(cpu, kmem_cach_cpu_free_init_once); 2054 + cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)); 2055 2055 } 2056 2056 2057 2057 static void __init init_alloc_cpu(void) ··· 3451 3451 long max_time; 3452 3452 long min_pid; 3453 3453 long max_pid; 3454 - cpumask_t cpus; 3454 + DECLARE_BITMAP(cpus, NR_CPUS); 3455 3455 nodemask_t nodes; 3456 3456 }; 3457 3457 ··· 3526 3526 if (track->pid > l->max_pid) 3527 3527 l->max_pid = track->pid; 3528 3528 3529 - cpu_set(track->cpu, l->cpus); 3529 + cpumask_set_cpu(track->cpu, 3530 + to_cpumask(l->cpus)); 3530 3531 } 3531 3532 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3532 3533 return 1; ··· 3557 3556 l->max_time = age; 3558 3557 l->min_pid = track->pid; 3559 3558 l->max_pid = track->pid; 3560 - cpus_clear(l->cpus); 3561 - cpu_set(track->cpu, l->cpus); 3559 + cpumask_clear(to_cpumask(l->cpus)); 3560 + cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 3562 3561 nodes_clear(l->nodes); 3563 3562 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3564 3563 return 1; ··· 3639 3638 len += sprintf(buf + len, " pid=%ld", 3640 3639 l->min_pid); 3641 3640 3642 - if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3641 + if (num_online_cpus() > 1 && 3642 + !cpumask_empty(to_cpumask(l->cpus)) && 3643 3643 len < PAGE_SIZE - 60) { 3644 3644 len += sprintf(buf + len, " cpus="); 3645 3645 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3646 - &l->cpus); 3646 + to_cpumask(l->cpus)); 3647 3647 } 3648 3648 3649 3649 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
+2 -2
mm/vmscan.c
··· 1902 1902 }; 1903 1903 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1904 1904 1905 - if (!cpus_empty(*cpumask)) 1905 + if (!cpumask_empty(cpumask)) 1906 1906 set_cpus_allowed_ptr(tsk, cpumask); 1907 1907 current->reclaim_state = &reclaim_state; 1908 1908 ··· 2141 2141 pg_data_t *pgdat = NODE_DATA(nid); 2142 2142 node_to_cpumask_ptr(mask, pgdat->node_id); 2143 2143 2144 - if (any_online_cpu(*mask) < nr_cpu_ids) 2144 + if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2145 2145 /* One of our CPUs online: restore mask */ 2146 2146 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2147 2147 }
+2 -2
mm/vmstat.c
··· 20 20 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 21 21 EXPORT_PER_CPU_SYMBOL(vm_event_states); 22 22 23 - static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) 23 + static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) 24 24 { 25 25 int cpu; 26 26 int i; ··· 43 43 void all_vm_events(unsigned long *ret) 44 44 { 45 45 get_online_cpus(); 46 - sum_vm_events(ret, &cpu_online_map); 46 + sum_vm_events(ret, cpu_online_mask); 47 47 put_online_cpus(); 48 48 } 49 49 EXPORT_SYMBOL_GPL(all_vm_events);
+1 -1
security/selinux/selinuxfs.c
··· 1211 1211 { 1212 1212 int cpu; 1213 1213 1214 - for (cpu = *idx; cpu < NR_CPUS; ++cpu) { 1214 + for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) { 1215 1215 if (!cpu_possible(cpu)) 1216 1216 continue; 1217 1217 *idx = cpu + 1;