Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits)
x86: setup_per_cpu_areas() cleanup
cpumask: fix compile error when CONFIG_NR_CPUS is not defined
cpumask: use alloc_cpumask_var_node where appropriate
cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t
x86: use cpumask_var_t in acpi/boot.c
x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids
sched: put back some stack hog changes that were undone in kernel/sched.c
x86: enable cpus display of kernel_max and offlined cpus
ia64: cpumask fix for is_affinity_mask_valid()
cpumask: convert RCU implementations, fix
xtensa: define __fls
mn10300: define __fls
m32r: define __fls
h8300: define __fls
frv: define __fls
cris: define __fls
cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
cpumask: zero extra bits in alloc_cpumask_var_node
cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/
cpumask: convert mm/
...

+1286 -879
+48
Documentation/cputopology.txt
··· 31 2) core_id: 0 32 3) thread_siblings: just the given CPU 33 4) core_siblings: just the given CPU
··· 31 2) core_id: 0 32 3) thread_siblings: just the given CPU 33 4) core_siblings: just the given CPU 34 + 35 + Additionally, cpu topology information is provided under 36 + /sys/devices/system/cpu and includes these files. The internal 37 + source for the output is in brackets ("[]"). 38 + 39 + kernel_max: the maximum cpu index allowed by the kernel configuration. 40 + [NR_CPUS-1] 41 + 42 + offline: cpus that are not online because they have been 43 + HOTPLUGGED off (see cpu-hotplug.txt) or exceed the limit 44 + of cpus allowed by the kernel configuration (kernel_max 45 + above). [~cpu_online_mask + cpus >= NR_CPUS] 46 + 47 + online: cpus that are online and being scheduled [cpu_online_mask] 48 + 49 + possible: cpus that have been allocated resources and can be 50 + brought online if they are present. [cpu_possible_mask] 51 + 52 + present: cpus that have been identified as being present in the 53 + system. [cpu_present_mask] 54 + 55 + The format for the above output is compatible with cpulist_parse() 56 + [see <linux/cpumask.h>]. Some examples follow. 57 + 58 + In this example, there are 64 cpus in the system but cpus 32-63 exceed 59 + the kernel max which is limited to 0..31 by the NR_CPUS config option 60 + being 32. Note also that cpus 2 and 4-31 are not online but could be 61 + brought online as they are both present and possible. 62 + 63 + kernel_max: 31 64 + offline: 2,4-31,32-63 65 + online: 0-1,3 66 + possible: 0-31 67 + present: 0-31 68 + 69 + In this example, the NR_CPUS config option is 128, but the kernel was 70 + started with possible_cpus=144. There are 4 cpus in the system and cpu2 71 + was manually taken offline (and is the only cpu that can be brought 72 + online.) 73 + 74 + kernel_max: 127 75 + offline: 2,4-127,128-143 76 + online: 0-1,3 77 + possible: 0-127 78 + present: 0-3 79 + 80 + See cpu-hotplug.txt for the possible_cpus=NUM kernel start parameter 81 + as well as more information on the various cpumask's.
+17
arch/alpha/include/asm/topology.h
··· 39 return node_cpu_mask; 40 } 41 42 #define pcibus_to_cpumask(bus) (cpu_online_map) 43 44 #endif /* !CONFIG_NUMA */ 45 # include <asm-generic/topology.h>
··· 39 return node_cpu_mask; 40 } 41 42 + extern struct cpumask node_to_cpumask_map[]; 43 + /* FIXME: This is dumb, recalculating every time. But simple. */ 44 + static const struct cpumask *cpumask_of_node(int node) 45 + { 46 + int cpu; 47 + 48 + cpumask_clear(&node_to_cpumask_map[node]); 49 + 50 + for_each_online_cpu(cpu) { 51 + if (cpu_to_node(cpu) == node) 52 + cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 53 + } 54 + 55 + return &node_to_cpumask_map[node]; 56 + } 57 + 58 #define pcibus_to_cpumask(bus) (cpu_online_map) 59 + #define cpumask_of_pcibus(bus) (cpu_online_mask) 60 61 #endif /* !CONFIG_NUMA */ 62 # include <asm-generic/topology.h>
+2 -1
arch/alpha/kernel/irq.c
··· 50 if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) 51 return 1; 52 53 - while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity)) 54 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 55 last_cpu = cpu; 56
··· 50 if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) 51 return 1; 52 53 + while (!cpu_possible(cpu) || 54 + !cpumask_test_cpu(cpu, irq_default_affinity)) 55 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 56 last_cpu = cpu; 57
+5
arch/alpha/kernel/setup.c
··· 79 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; 80 #endif 81 82 /* Which processor we booted from. */ 83 int boot_cpuid; 84
··· 79 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; 80 #endif 81 82 + #ifdef CONFIG_NUMA 83 + struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly; 84 + EXPORT_SYMBOL(node_to_cpumask_map); 85 + #endif 86 + 87 /* Which processor we booted from. */ 88 int boot_cpuid; 89
+5
arch/avr32/include/asm/bitops.h
··· 263 return 32 - result; 264 } 265 266 unsigned long find_first_zero_bit(const unsigned long *addr, 267 unsigned long size); 268 unsigned long find_next_zero_bit(const unsigned long *addr,
··· 263 return 32 - result; 264 } 265 266 + static inline int __fls(unsigned long word) 267 + { 268 + return fls(word) - 1; 269 + } 270 + 271 unsigned long find_first_zero_bit(const unsigned long *addr, 272 unsigned long size); 273 unsigned long find_next_zero_bit(const unsigned long *addr,
+1
arch/blackfin/include/asm/bitops.h
··· 213 #endif /* __KERNEL__ */ 214 215 #include <asm-generic/bitops/fls.h> 216 #include <asm-generic/bitops/fls64.h> 217 218 #endif /* _BLACKFIN_BITOPS_H */
··· 213 #endif /* __KERNEL__ */ 214 215 #include <asm-generic/bitops/fls.h> 216 + #include <asm-generic/bitops/__fls.h> 217 #include <asm-generic/bitops/fls64.h> 218 219 #endif /* _BLACKFIN_BITOPS_H */
+1
arch/cris/include/asm/bitops.h
··· 148 #define ffs kernel_ffs 149 150 #include <asm-generic/bitops/fls.h> 151 #include <asm-generic/bitops/fls64.h> 152 #include <asm-generic/bitops/hweight.h> 153 #include <asm-generic/bitops/find.h>
··· 148 #define ffs kernel_ffs 149 150 #include <asm-generic/bitops/fls.h> 151 + #include <asm-generic/bitops/__fls.h> 152 #include <asm-generic/bitops/fls64.h> 153 #include <asm-generic/bitops/hweight.h> 154 #include <asm-generic/bitops/find.h>
+1
arch/h8300/include/asm/bitops.h
··· 207 #endif /* __KERNEL__ */ 208 209 #include <asm-generic/bitops/fls.h> 210 #include <asm-generic/bitops/fls64.h> 211 212 #endif /* _H8300_BITOPS_H */
··· 207 #endif /* __KERNEL__ */ 208 209 #include <asm-generic/bitops/fls.h> 210 + #include <asm-generic/bitops/__fls.h> 211 #include <asm-generic/bitops/fls64.h> 212 213 #endif /* _H8300_BITOPS_H */
+1 -1
arch/ia64/include/asm/irq.h
··· 27 } 28 29 extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); 30 - bool is_affinity_mask_valid(cpumask_t cpumask); 31 32 #define is_affinity_mask_valid is_affinity_mask_valid 33
··· 27 } 28 29 extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); 30 + bool is_affinity_mask_valid(cpumask_var_t cpumask); 31 32 #define is_affinity_mask_valid is_affinity_mask_valid 33
+8 -1
arch/ia64/include/asm/topology.h
··· 34 * Returns a bitmask of CPUs on Node 'node'. 35 */ 36 #define node_to_cpumask(node) (node_to_cpu_mask[node]) 37 38 /* 39 * Returns the number of the node containing Node 'nid'. ··· 46 /* 47 * Returns the number of the first CPU on Node 'node'. 48 */ 49 - #define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) 50 51 /* 52 * Determines the node for a given pci bus ··· 110 #define topology_core_id(cpu) (cpu_data(cpu)->core_id) 111 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 112 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 113 #define smt_capable() (smp_num_siblings > 1) 114 #endif 115 ··· 121 CPU_MASK_ALL : \ 122 node_to_cpumask(pcibus_to_node(bus)) \ 123 ) 124 125 #include <asm-generic/topology.h> 126
··· 34 * Returns a bitmask of CPUs on Node 'node'. 35 */ 36 #define node_to_cpumask(node) (node_to_cpu_mask[node]) 37 + #define cpumask_of_node(node) (&node_to_cpu_mask[node]) 38 39 /* 40 * Returns the number of the node containing Node 'nid'. ··· 45 /* 46 * Returns the number of the first CPU on Node 'node'. 47 */ 48 + #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) 49 50 /* 51 * Determines the node for a given pci bus ··· 109 #define topology_core_id(cpu) (cpu_data(cpu)->core_id) 110 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 111 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 112 + #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 113 + #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 114 #define smt_capable() (smp_num_siblings > 1) 115 #endif 116 ··· 118 CPU_MASK_ALL : \ 119 node_to_cpumask(pcibus_to_node(bus)) \ 120 ) 121 + 122 + #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 123 + cpu_all_mask : \ 124 + cpumask_from_node(pcibus_to_node(bus))) 125 126 #include <asm-generic/topology.h> 127
+1 -2
arch/ia64/kernel/acpi.c
··· 202 Boot-time Table Parsing 203 -------------------------------------------------------------------------- */ 204 205 - static int total_cpus __initdata; 206 static int available_cpus __initdata; 207 struct acpi_table_madt *acpi_madt __initdata; 208 static u8 has_8259; ··· 1000 node = pxm_to_node(pxm); 1001 1002 if (node >= MAX_NUMNODES || !node_online(node) || 1003 - cpus_empty(node_to_cpumask(node))) 1004 return AE_OK; 1005 1006 /* We know a gsi to node mapping! */
··· 202 Boot-time Table Parsing 203 -------------------------------------------------------------------------- */ 204 205 static int available_cpus __initdata; 206 struct acpi_table_madt *acpi_madt __initdata; 207 static u8 has_8259; ··· 1001 node = pxm_to_node(pxm); 1002 1003 if (node >= MAX_NUMNODES || !node_online(node) || 1004 + cpumask_empty(cpumask_of_node(node))) 1005 return AE_OK; 1006 1007 /* We know a gsi to node mapping! */
+11 -12
arch/ia64/kernel/iosapic.c
··· 695 #ifdef CONFIG_NUMA 696 { 697 int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; 698 - cpumask_t cpu_mask; 699 700 iosapic_index = find_iosapic(gsi); 701 if (iosapic_index < 0 || 702 iosapic_lists[iosapic_index].node == MAX_NUMNODES) 703 goto skip_numa_setup; 704 705 - cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); 706 - cpus_and(cpu_mask, cpu_mask, domain); 707 - for_each_cpu_mask(numa_cpu, cpu_mask) { 708 - if (!cpu_online(numa_cpu)) 709 - cpu_clear(numa_cpu, cpu_mask); 710 } 711 - 712 - num_cpus = cpus_weight(cpu_mask); 713 714 if (!num_cpus) 715 goto skip_numa_setup; ··· 715 /* Use irq assignment to distribute across cpus in node */ 716 cpu_index = irq % num_cpus; 717 718 - for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) 719 - numa_cpu = next_cpu(numa_cpu, cpu_mask); 720 721 - if (numa_cpu != NR_CPUS) 722 return cpu_physical_id(numa_cpu); 723 } 724 skip_numa_setup: ··· 730 * case of NUMA.) 731 */ 732 do { 733 - if (++cpu >= NR_CPUS) 734 cpu = 0; 735 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); 736
··· 695 #ifdef CONFIG_NUMA 696 { 697 int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; 698 + const struct cpumask *cpu_mask; 699 700 iosapic_index = find_iosapic(gsi); 701 if (iosapic_index < 0 || 702 iosapic_lists[iosapic_index].node == MAX_NUMNODES) 703 goto skip_numa_setup; 704 705 + cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node); 706 + num_cpus = 0; 707 + for_each_cpu_and(numa_cpu, cpu_mask, &domain) { 708 + if (cpu_online(numa_cpu)) 709 + num_cpus++; 710 } 711 712 if (!num_cpus) 713 goto skip_numa_setup; ··· 717 /* Use irq assignment to distribute across cpus in node */ 718 cpu_index = irq % num_cpus; 719 720 + for_each_cpu_and(numa_cpu, cpu_mask, &domain) 721 + if (cpu_online(numa_cpu) && i++ >= cpu_index) 722 + break; 723 724 + if (numa_cpu < nr_cpu_ids) 725 return cpu_physical_id(numa_cpu); 726 } 727 skip_numa_setup: ··· 731 * case of NUMA.) 732 */ 733 do { 734 + if (++cpu >= nr_cpu_ids) 735 cpu = 0; 736 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); 737
+2 -2
arch/ia64/kernel/irq.c
··· 112 } 113 } 114 115 - bool is_affinity_mask_valid(cpumask_t cpumask) 116 { 117 if (ia64_platform_is("sn2")) { 118 /* Only allow one CPU to be specified in the smp_affinity mask */ 119 - if (cpus_weight(cpumask) != 1) 120 return false; 121 } 122 return true;
··· 112 } 113 } 114 115 + bool is_affinity_mask_valid(cpumask_var_t cpumask) 116 { 117 if (ia64_platform_is("sn2")) { 118 /* Only allow one CPU to be specified in the smp_affinity mask */ 119 + if (cpumask_weight(cpumask) != 1) 120 return false; 121 } 122 return true;
+12 -15
arch/ia64/sn/kernel/sn2/sn_hwperf.c
··· 385 int j; 386 const char *slabname; 387 int ordinal; 388 - cpumask_t cpumask; 389 char slice; 390 struct cpuinfo_ia64 *c; 391 struct sn_hwperf_port_info *ptdata; ··· 472 * CPUs on this node, if any 473 */ 474 if (!SN_HWPERF_IS_IONODE(obj)) { 475 - cpumask = node_to_cpumask(ordinal); 476 - for_each_online_cpu(i) { 477 - if (cpu_isset(i, cpumask)) { 478 - slice = 'a' + cpuid_to_slice(i); 479 - c = cpu_data(i); 480 - seq_printf(s, "cpu %d %s%c local" 481 - " freq %luMHz, arch ia64", 482 - i, obj->location, slice, 483 - c->proc_freq / 1000000); 484 - for_each_online_cpu(j) { 485 - seq_printf(s, j ? ":%d" : ", dist %d", 486 - node_distance( 487 cpu_to_node(i), 488 cpu_to_node(j))); 489 - } 490 - seq_putc(s, '\n'); 491 } 492 } 493 } 494 }
··· 385 int j; 386 const char *slabname; 387 int ordinal; 388 char slice; 389 struct cpuinfo_ia64 *c; 390 struct sn_hwperf_port_info *ptdata; ··· 473 * CPUs on this node, if any 474 */ 475 if (!SN_HWPERF_IS_IONODE(obj)) { 476 + for_each_cpu_and(i, cpu_online_mask, 477 + cpumask_of_node(ordinal)) { 478 + slice = 'a' + cpuid_to_slice(i); 479 + c = cpu_data(i); 480 + seq_printf(s, "cpu %d %s%c local" 481 + " freq %luMHz, arch ia64", 482 + i, obj->location, slice, 483 + c->proc_freq / 1000000); 484 + for_each_online_cpu(j) { 485 + seq_printf(s, j ? ":%d" : ", dist %d", 486 + node_distance( 487 cpu_to_node(i), 488 cpu_to_node(j))); 489 } 490 + seq_putc(s, '\n'); 491 } 492 } 493 }
+1 -1
arch/m32r/kernel/smpboot.c
··· 592 * accounting. At that time they also adjust their APIC timers 593 * accordingly. 594 */ 595 - for (i = 0; i < NR_CPUS; ++i) 596 per_cpu(prof_multiplier, i) = multiplier; 597 598 return 0;
··· 592 * accounting. At that time they also adjust their APIC timers 593 * accordingly. 594 */ 595 + for_each_possible_cpu(i) 596 per_cpu(prof_multiplier, i) = multiplier; 597 598 return 0;
+1
arch/m68knommu/include/asm/bitops.h
··· 331 #endif /* __KERNEL__ */ 332 333 #include <asm-generic/bitops/fls.h> 334 #include <asm-generic/bitops/fls64.h> 335 336 #endif /* _M68KNOMMU_BITOPS_H */
··· 331 #endif /* __KERNEL__ */ 332 333 #include <asm-generic/bitops/fls.h> 334 + #include <asm-generic/bitops/__fls.h> 335 #include <asm-generic/bitops/fls64.h> 336 337 #endif /* _M68KNOMMU_BITOPS_H */
+3 -1
arch/mips/include/asm/mach-ip27/topology.h
··· 25 #define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) 26 #define parent_node(node) (node) 27 #define node_to_cpumask(node) (hub_data(node)->h_cpus) 28 - #define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) 29 struct pci_bus; 30 extern int pcibus_to_node(struct pci_bus *); 31 32 #define pcibus_to_cpumask(bus) (cpu_online_map) 33 34 extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; 35
··· 25 #define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) 26 #define parent_node(node) (node) 27 #define node_to_cpumask(node) (hub_data(node)->h_cpus) 28 + #define cpumask_of_node(node) (&hub_data(node)->h_cpus) 29 + #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) 30 struct pci_bus; 31 extern int pcibus_to_node(struct pci_bus *); 32 33 #define pcibus_to_cpumask(bus) (cpu_online_map) 34 + #define cpumask_of_pcibus(bus) (cpu_online_mask) 35 36 extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; 37
-2
arch/parisc/include/asm/smp.h
··· 16 #include <linux/cpumask.h> 17 typedef unsigned long address_t; 18 19 - extern cpumask_t cpu_online_map; 20 - 21 22 /* 23 * Private routines/data
··· 16 #include <linux/cpumask.h> 17 typedef unsigned long address_t; 18 19 20 /* 21 * Private routines/data
+9 -3
arch/powerpc/include/asm/topology.h
··· 22 return numa_cpumask_lookup_table[node]; 23 } 24 25 static inline int node_to_first_cpu(int node) 26 { 27 - cpumask_t tmp; 28 - tmp = node_to_cpumask(node); 29 - return first_cpu(tmp); 30 } 31 32 int of_node_to_nid(struct device_node *device); ··· 45 CPU_MASK_ALL : \ 46 node_to_cpumask(pcibus_to_node(bus)) \ 47 ) 48 49 /* sched_domains SD_NODE_INIT for PPC64 machines */ 50 #define SD_NODE_INIT (struct sched_domain) { \ ··· 112 113 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 114 #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 115 #define topology_core_id(cpu) (cpu_to_core_id(cpu)) 116 #endif 117 #endif
··· 22 return numa_cpumask_lookup_table[node]; 23 } 24 25 + #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 26 + 27 static inline int node_to_first_cpu(int node) 28 { 29 + return cpumask_first(cpumask_of_node(node)); 30 } 31 32 int of_node_to_nid(struct device_node *device); ··· 45 CPU_MASK_ALL : \ 46 node_to_cpumask(pcibus_to_node(bus)) \ 47 ) 48 + 49 + #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 50 + cpu_all_mask : \ 51 + cpumask_of_node(pcibus_to_node(bus))) 52 53 /* sched_domains SD_NODE_INIT for PPC64 machines */ 54 #define SD_NODE_INIT (struct sched_domain) { \ ··· 108 109 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 110 #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 111 + #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 112 + #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) 113 #define topology_core_id(cpu) (cpu_to_core_id(cpu)) 114 #endif 115 #endif
+3 -3
arch/powerpc/platforms/cell/spu_priv1_mmio.c
··· 80 u64 route; 81 82 if (nr_cpus_node(spu->node)) { 83 - cpumask_t spumask = node_to_cpumask(spu->node); 84 - cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); 85 86 - if (!cpus_intersects(spumask, cpumask)) 87 return; 88 } 89
··· 80 u64 route; 81 82 if (nr_cpus_node(spu->node)) { 83 + const struct cpumask *spumask = cpumask_of_node(spu->node), 84 + *cpumask = cpumask_of_node(cpu_to_node(cpu)); 85 86 + if (!cpumask_intersects(spumask, cpumask)) 87 return; 88 } 89
+2 -2
arch/powerpc/platforms/cell/spufs/sched.c
··· 166 static int __node_allowed(struct spu_context *ctx, int node) 167 { 168 if (nr_cpus_node(node)) { 169 - cpumask_t mask = node_to_cpumask(node); 170 171 - if (cpus_intersects(mask, ctx->cpus_allowed)) 172 return 1; 173 } 174
··· 166 static int __node_allowed(struct spu_context *ctx, int node) 167 { 168 if (nr_cpus_node(node)) { 169 + const struct cpumask *mask = cpumask_of_node(node); 170 171 + if (cpumask_intersects(mask, &ctx->cpus_allowed)) 172 return 1; 173 } 174
+2
arch/s390/include/asm/topology.h
··· 6 #define mc_capable() (1) 7 8 cpumask_t cpu_coregroup_map(unsigned int cpu); 9 10 extern cpumask_t cpu_core_map[NR_CPUS]; 11 12 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 13 14 int topology_set_cpu_management(int fc); 15 void topology_schedule_update(void);
··· 6 #define mc_capable() (1) 7 8 cpumask_t cpu_coregroup_map(unsigned int cpu); 9 + const struct cpumask *cpu_coregroup_mask(unsigned int cpu); 10 11 extern cpumask_t cpu_core_map[NR_CPUS]; 12 13 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 14 + #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 15 16 int topology_set_cpu_management(int fc); 17 void topology_schedule_update(void);
+5
arch/s390/kernel/topology.c
··· 97 return mask; 98 } 99 100 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) 101 { 102 unsigned int cpu;
··· 97 return mask; 98 } 99 100 + const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 101 + { 102 + return &cpu_core_map[cpu]; 103 + } 104 + 105 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) 106 { 107 unsigned int cpu;
+1
arch/sh/include/asm/topology.h
··· 32 #define parent_node(node) ((void)(node),0) 33 34 #define node_to_cpumask(node) ((void)node, cpu_online_map) 35 #define node_to_first_cpu(node) ((void)(node),0) 36 37 #define pcibus_to_node(bus) ((void)(bus), -1)
··· 32 #define parent_node(node) ((void)(node),0) 33 34 #define node_to_cpumask(node) ((void)node, cpu_online_map) 35 + #define cpumask_of_node(node) ((void)node, cpu_online_mask) 36 #define node_to_first_cpu(node) ((void)(node),0) 37 38 #define pcibus_to_node(bus) ((void)(bus), -1)
+9 -4
arch/sparc/include/asm/topology_64.h
··· 16 { 17 return numa_cpumask_lookup_table[node]; 18 } 19 20 - /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 21 #define node_to_cpumask_ptr(v, node) \ 22 cpumask_t *v = &(numa_cpumask_lookup_table[node]) 23 ··· 30 31 static inline int node_to_first_cpu(int node) 32 { 33 - cpumask_t tmp; 34 - tmp = node_to_cpumask(node); 35 - return first_cpu(tmp); 36 } 37 38 struct pci_bus; ··· 79 #define topology_core_id(cpu) (cpu_data(cpu).core_id) 80 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 81 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 82 #define mc_capable() (sparc64_multi_core) 83 #define smt_capable() (sparc64_multi_core) 84 #endif /* CONFIG_SMP */ 85 86 #define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) 87 88 #endif /* _ASM_SPARC64_TOPOLOGY_H */
··· 16 { 17 return numa_cpumask_lookup_table[node]; 18 } 19 + #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 20 21 + /* 22 + * Returns a pointer to the cpumask of CPUs on Node 'node'. 23 + * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 24 + */ 25 #define node_to_cpumask_ptr(v, node) \ 26 cpumask_t *v = &(numa_cpumask_lookup_table[node]) 27 ··· 26 27 static inline int node_to_first_cpu(int node) 28 { 29 + return cpumask_first(cpumask_of_node(node)); 30 } 31 32 struct pci_bus; ··· 77 #define topology_core_id(cpu) (cpu_data(cpu).core_id) 78 #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 79 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 80 + #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 81 + #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 82 #define mc_capable() (sparc64_multi_core) 83 #define smt_capable() (sparc64_multi_core) 84 #endif /* CONFIG_SMP */ 85 86 #define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) 87 + #define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu]) 88 89 #endif /* _ASM_SPARC64_TOPOLOGY_H */
+1 -1
arch/sparc/kernel/of_device_64.c
··· 778 out: 779 nid = of_node_to_nid(dp); 780 if (nid != -1) { 781 - cpumask_t numa_mask = node_to_cpumask(nid); 782 783 irq_set_affinity(irq, &numa_mask); 784 }
··· 778 out: 779 nid = of_node_to_nid(dp); 780 if (nid != -1) { 781 + cpumask_t numa_mask = *cpumask_of_node(nid); 782 783 irq_set_affinity(irq, &numa_mask); 784 }
+1 -1
arch/sparc/kernel/pci_msi.c
··· 286 287 nid = pbm->numa_node; 288 if (nid != -1) { 289 - cpumask_t numa_mask = node_to_cpumask(nid); 290 291 irq_set_affinity(irq, &numa_mask); 292 }
··· 286 287 nid = pbm->numa_node; 288 if (nid != -1) { 289 + cpumask_t numa_mask = *cpumask_of_node(nid); 290 291 irq_set_affinity(irq, &numa_mask); 292 }
+3 -29
arch/x86/include/asm/es7000/apic.h
··· 157 158 num_bits_set = cpumask_weight(cpumask); 159 /* Return id to all */ 160 - if (num_bits_set == NR_CPUS) 161 return 0xFF; 162 /* 163 * The cpus in the mask must all be on the apic cluster. If are not ··· 190 191 num_bits_set = cpus_weight(*cpumask); 192 /* Return id to all */ 193 - if (num_bits_set == NR_CPUS) 194 return cpu_to_logical_apicid(0); 195 /* 196 * The cpus in the mask must all be on the apic cluster. If are not ··· 218 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 219 const struct cpumask *andmask) 220 { 221 - int num_bits_set; 222 - int cpus_found = 0; 223 - int cpu; 224 int apicid = cpu_to_logical_apicid(0); 225 cpumask_var_t cpumask; 226 ··· 226 227 cpumask_and(cpumask, inmask, andmask); 228 cpumask_and(cpumask, cpumask, cpu_online_mask); 229 230 - num_bits_set = cpumask_weight(cpumask); 231 - /* Return id to all */ 232 - if (num_bits_set == NR_CPUS) 233 - goto exit; 234 - /* 235 - * The cpus in the mask must all be on the apic cluster. If are not 236 - * on the same apicid cluster return default value of TARGET_CPUS. 237 - */ 238 - cpu = cpumask_first(cpumask); 239 - apicid = cpu_to_logical_apicid(cpu); 240 - while (cpus_found < num_bits_set) { 241 - if (cpumask_test_cpu(cpu, cpumask)) { 242 - int new_apicid = cpu_to_logical_apicid(cpu); 243 - if (apicid_cluster(apicid) != 244 - apicid_cluster(new_apicid)){ 245 - printk ("%s: Not a valid mask!\n", __func__); 246 - return cpu_to_logical_apicid(0); 247 - } 248 - apicid = new_apicid; 249 - cpus_found++; 250 - } 251 - cpu++; 252 - } 253 - exit: 254 free_cpumask_var(cpumask); 255 return apicid; 256 }
··· 157 158 num_bits_set = cpumask_weight(cpumask); 159 /* Return id to all */ 160 + if (num_bits_set == nr_cpu_ids) 161 return 0xFF; 162 /* 163 * The cpus in the mask must all be on the apic cluster. If are not ··· 190 191 num_bits_set = cpus_weight(*cpumask); 192 /* Return id to all */ 193 + if (num_bits_set == nr_cpu_ids) 194 return cpu_to_logical_apicid(0); 195 /* 196 * The cpus in the mask must all be on the apic cluster. If are not ··· 218 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 219 const struct cpumask *andmask) 220 { 221 int apicid = cpu_to_logical_apicid(0); 222 cpumask_var_t cpumask; 223 ··· 229 230 cpumask_and(cpumask, inmask, andmask); 231 cpumask_and(cpumask, cpumask, cpu_online_mask); 232 + apicid = cpu_mask_to_apicid(cpumask); 233 234 free_cpumask_var(cpumask); 235 return apicid; 236 }
+1 -1
arch/x86/include/asm/lguest.h
··· 15 #define SHARED_SWITCHER_PAGES \ 16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) 17 /* Pages for switcher itself, then two pages per cpu */ 18 - #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) 19 20 /* We map at -4M for ease of mapping into the guest (one PTE page). */ 21 #define SWITCHER_ADDR 0xFFC00000
··· 15 #define SHARED_SWITCHER_PAGES \ 16 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) 17 /* Pages for switcher itself, then two pages per cpu */ 18 + #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) 19 20 /* We map at -4M for ease of mapping into the guest (one PTE page). */ 21 #define SWITCHER_ADDR 0xFFC00000
+2 -2
arch/x86/include/asm/numaq/apic.h
··· 63 extern u8 cpu_2_logical_apicid[]; 64 static inline int cpu_to_logical_apicid(int cpu) 65 { 66 - if (cpu >= NR_CPUS) 67 - return BAD_APICID; 68 return (int)cpu_2_logical_apicid[cpu]; 69 } 70
··· 63 extern u8 cpu_2_logical_apicid[]; 64 static inline int cpu_to_logical_apicid(int cpu) 65 { 66 + if (cpu >= nr_cpu_ids) 67 + return BAD_APICID; 68 return (int)cpu_2_logical_apicid[cpu]; 69 } 70
+8 -2
arch/x86/include/asm/pci.h
··· 102 103 #ifdef CONFIG_NUMA 104 /* Returns the node based on pci bus */ 105 - static inline int __pcibus_to_node(struct pci_bus *bus) 106 { 107 - struct pci_sysdata *sd = bus->sysdata; 108 109 return sd->node; 110 } ··· 112 static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) 113 { 114 return node_to_cpumask(__pcibus_to_node(bus)); 115 } 116 #endif 117
··· 102 103 #ifdef CONFIG_NUMA 104 /* Returns the node based on pci bus */ 105 + static inline int __pcibus_to_node(const struct pci_bus *bus) 106 { 107 + const struct pci_sysdata *sd = bus->sysdata; 108 109 return sd->node; 110 } ··· 112 static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) 113 { 114 return node_to_cpumask(__pcibus_to_node(bus)); 115 + } 116 + 117 + static inline const struct cpumask * 118 + cpumask_of_pcibus(const struct pci_bus *bus) 119 + { 120 + return cpumask_of_node(__pcibus_to_node(bus)); 121 } 122 #endif 123
+8 -34
arch/x86/include/asm/summit/apic.h
··· 52 int i; 53 54 /* Create logical APIC IDs by counting CPUs already in cluster. */ 55 - for (count = 0, i = NR_CPUS; --i >= 0; ) { 56 lid = cpu_2_logical_apicid[i]; 57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) 58 ++count; ··· 97 static inline int cpu_to_logical_apicid(int cpu) 98 { 99 #ifdef CONFIG_SMP 100 - if (cpu >= NR_CPUS) 101 - return BAD_APICID; 102 return (int)cpu_2_logical_apicid[cpu]; 103 #else 104 return logical_smp_processor_id(); ··· 107 108 static inline int cpu_present_to_apicid(int mps_cpu) 109 { 110 - if (mps_cpu < NR_CPUS) 111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 112 else 113 return BAD_APICID; ··· 146 147 num_bits_set = cpus_weight(*cpumask); 148 /* Return id to all */ 149 - if (num_bits_set == NR_CPUS) 150 return (int) 0xFF; 151 /* 152 * The cpus in the mask must all be on the apic cluster. If are not ··· 173 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 174 const struct cpumask *andmask) 175 { 176 - int num_bits_set; 177 - int cpus_found = 0; 178 - int cpu; 179 - int apicid = 0xFF; 180 cpumask_var_t cpumask; 181 182 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 183 - return (int) 0xFF; 184 185 cpumask_and(cpumask, inmask, andmask); 186 cpumask_and(cpumask, cpumask, cpu_online_mask); 187 188 - num_bits_set = cpumask_weight(cpumask); 189 - /* Return id to all */ 190 - if (num_bits_set == nr_cpu_ids) 191 - goto exit; 192 - /* 193 - * The cpus in the mask must all be on the apic cluster. If are not 194 - * on the same apicid cluster return default value of TARGET_CPUS. 195 - */ 196 - cpu = cpumask_first(cpumask); 197 - apicid = cpu_to_logical_apicid(cpu); 198 - while (cpus_found < num_bits_set) { 199 - if (cpumask_test_cpu(cpu, cpumask)) { 200 - int new_apicid = cpu_to_logical_apicid(cpu); 201 - if (apicid_cluster(apicid) != 202 - apicid_cluster(new_apicid)){ 203 - printk ("%s: Not a valid mask!\n", __func__); 204 - return 0xFF; 205 - } 206 - apicid = apicid | new_apicid; 207 - cpus_found++; 208 - } 209 - cpu++; 210 - } 211 - exit: 212 free_cpumask_var(cpumask); 213 return apicid; 214 }
··· 52 int i; 53 54 /* Create logical APIC IDs by counting CPUs already in cluster. */ 55 + for (count = 0, i = nr_cpu_ids; --i >= 0; ) { 56 lid = cpu_2_logical_apicid[i]; 57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) 58 ++count; ··· 97 static inline int cpu_to_logical_apicid(int cpu) 98 { 99 #ifdef CONFIG_SMP 100 + if (cpu >= nr_cpu_ids) 101 + return BAD_APICID; 102 return (int)cpu_2_logical_apicid[cpu]; 103 #else 104 return logical_smp_processor_id(); ··· 107 108 static inline int cpu_present_to_apicid(int mps_cpu) 109 { 110 + if (mps_cpu < nr_cpu_ids) 111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 112 else 113 return BAD_APICID; ··· 146 147 num_bits_set = cpus_weight(*cpumask); 148 /* Return id to all */ 149 + if (num_bits_set >= nr_cpu_ids) 150 return (int) 0xFF; 151 /* 152 * The cpus in the mask must all be on the apic cluster. If are not ··· 173 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 174 const struct cpumask *andmask) 175 { 176 + int apicid = cpu_to_logical_apicid(0); 177 cpumask_var_t cpumask; 178 179 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 180 + return apicid; 181 182 cpumask_and(cpumask, inmask, andmask); 183 cpumask_and(cpumask, cpumask, cpu_online_mask); 184 + apicid = cpu_mask_to_apicid(cpumask); 185 186 free_cpumask_var(cpumask); 187 return apicid; 188 }
+24 -12
arch/x86/include/asm/topology.h
··· 61 * 62 * Side note: this function creates the returned cpumask on the stack 63 * so with a high NR_CPUS count, excessive stack space is used. The 64 - * node_to_cpumask_ptr function should be used whenever possible. 65 */ 66 static inline cpumask_t node_to_cpumask(int node) 67 { 68 return node_to_cpumask_map[node]; 69 } 70 71 #else /* CONFIG_X86_64 */ ··· 88 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 89 extern int cpu_to_node(int cpu); 90 extern int early_cpu_to_node(int cpu); 91 - extern const cpumask_t *_node_to_cpumask_ptr(int node); 92 extern cpumask_t node_to_cpumask(int node); 93 94 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ ··· 109 } 110 111 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 112 - static inline const cpumask_t *_node_to_cpumask_ptr(int node) 113 { 114 return &node_to_cpumask_map[node]; 115 } ··· 122 123 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 124 125 - /* Replace default node_to_cpumask_ptr with optimized version */ 126 #define node_to_cpumask_ptr(v, node) \ 127 - const cpumask_t *v = _node_to_cpumask_ptr(node) 128 129 #define node_to_cpumask_ptr_next(v, node) \ 130 - v = _node_to_cpumask_ptr(node) 131 132 #endif /* CONFIG_X86_64 */ 133 ··· 196 #define cpu_to_node(cpu) 0 197 #define early_cpu_to_node(cpu) 0 198 199 - static inline const cpumask_t *_node_to_cpumask_ptr(int node) 200 { 201 return &cpu_online_map; 202 } ··· 209 return first_cpu(cpu_online_map); 210 } 211 212 - /* Replace default node_to_cpumask_ptr with optimized version */ 213 #define node_to_cpumask_ptr(v, node) \ 214 - const cpumask_t *v = _node_to_cpumask_ptr(node) 215 216 #define node_to_cpumask_ptr_next(v, node) \ 217 - v = _node_to_cpumask_ptr(node) 218 #endif 219 220 #include <asm-generic/topology.h> ··· 226 /* Returns the number of the first CPU on Node 'node'. */ 227 static inline int node_to_first_cpu(int node) 228 { 229 - node_to_cpumask_ptr(mask, node); 230 - return first_cpu(*mask); 231 } 232 #endif 233 234 extern cpumask_t cpu_coregroup_map(int cpu); 235 236 #ifdef ENABLE_TOPO_DEFINES 237 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
··· 61 * 62 * Side note: this function creates the returned cpumask on the stack 63 * so with a high NR_CPUS count, excessive stack space is used. The 64 + * cpumask_of_node function should be used whenever possible. 65 */ 66 static inline cpumask_t node_to_cpumask(int node) 67 { 68 return node_to_cpumask_map[node]; 69 + } 70 + 71 + /* Returns a bitmask of CPUs on Node 'node'. */ 72 + static inline const struct cpumask *cpumask_of_node(int node) 73 + { 74 + return &node_to_cpumask_map[node]; 75 } 76 77 #else /* CONFIG_X86_64 */ ··· 82 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 83 extern int cpu_to_node(int cpu); 84 extern int early_cpu_to_node(int cpu); 85 + extern const cpumask_t *cpumask_of_node(int node); 86 extern cpumask_t node_to_cpumask(int node); 87 88 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ ··· 103 } 104 105 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 106 + static inline const cpumask_t *cpumask_of_node(int node) 107 { 108 return &node_to_cpumask_map[node]; 109 } ··· 116 117 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 118 119 + /* 120 + * Replace default node_to_cpumask_ptr with optimized version 121 + * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 122 + */ 123 #define node_to_cpumask_ptr(v, node) \ 124 + const cpumask_t *v = cpumask_of_node(node) 125 126 #define node_to_cpumask_ptr_next(v, node) \ 127 + v = cpumask_of_node(node) 128 129 #endif /* CONFIG_X86_64 */ 130 ··· 187 #define cpu_to_node(cpu) 0 188 #define early_cpu_to_node(cpu) 0 189 190 + static inline const cpumask_t *cpumask_of_node(int node) 191 { 192 return &cpu_online_map; 193 } ··· 200 return first_cpu(cpu_online_map); 201 } 202 203 + /* 204 + * Replace default node_to_cpumask_ptr with optimized version 205 + * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 206 + */ 207 #define node_to_cpumask_ptr(v, node) \ 208 + const cpumask_t *v = cpumask_of_node(node) 209 210 #define node_to_cpumask_ptr_next(v, node) \ 211 + v = cpumask_of_node(node) 212 #endif 213 214 #include <asm-generic/topology.h> ··· 214 /* Returns the number of the first CPU on Node 'node'. */ 215 static inline int node_to_first_cpu(int node) 216 { 217 + return cpumask_first(cpumask_of_node(node)); 218 } 219 #endif 220 221 extern cpumask_t cpu_coregroup_map(int cpu); 222 + extern const struct cpumask *cpu_coregroup_mask(int cpu); 223 224 #ifdef ENABLE_TOPO_DEFINES 225 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
+23 -8
arch/x86/kernel/acpi/boot.c
··· 538 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 539 union acpi_object *obj; 540 struct acpi_madt_local_apic *lapic; 541 - cpumask_t tmp_map, new_map; 542 u8 physid; 543 int cpu; 544 545 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 546 return -EINVAL; ··· 570 buffer.length = ACPI_ALLOCATE_BUFFER; 571 buffer.pointer = NULL; 572 573 - tmp_map = cpu_present_map; 574 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); 575 576 /* 577 * If mp_register_lapic successfully generates a new logical cpu 578 * number, then the following will get us exactly what was mapped 579 */ 580 - cpus_andnot(new_map, cpu_present_map, tmp_map); 581 - if (cpus_empty(new_map)) { 582 printk ("Unable to map lapic to logical cpu number\n"); 583 - return -EINVAL; 584 } 585 586 - cpu = first_cpu(new_map); 587 588 *pcpu = cpu; 589 - return 0; 590 } 591 592 /* wrapper to silence section mismatch warning */ ··· 613 int acpi_unmap_lsapic(int cpu) 614 { 615 per_cpu(x86_cpu_to_apicid, cpu) = -1; 616 - cpu_clear(cpu, cpu_present_map); 617 num_processors--; 618 619 return (0);
··· 538 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 539 union acpi_object *obj; 540 struct acpi_madt_local_apic *lapic; 541 + cpumask_var_t tmp_map, new_map; 542 u8 physid; 543 int cpu; 544 + int retval = -ENOMEM; 545 546 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 547 return -EINVAL; ··· 569 buffer.length = ACPI_ALLOCATE_BUFFER; 570 buffer.pointer = NULL; 571 572 + if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) 573 + goto out; 574 + 575 + if (!alloc_cpumask_var(&new_map, GFP_KERNEL)) 576 + goto free_tmp_map; 577 + 578 + cpumask_copy(tmp_map, cpu_present_mask); 579 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); 580 581 /* 582 * If mp_register_lapic successfully generates a new logical cpu 583 * number, then the following will get us exactly what was mapped 584 */ 585 + cpumask_andnot(new_map, cpu_present_mask, tmp_map); 586 + if (cpumask_empty(new_map)) { 587 printk ("Unable to map lapic to logical cpu number\n"); 588 + retval = -EINVAL; 589 + goto free_new_map; 590 } 591 592 + cpu = cpumask_first(new_map); 593 594 *pcpu = cpu; 595 + retval = 0; 596 + 597 + free_new_map: 598 + free_cpumask_var(new_map); 599 + free_tmp_map: 600 + free_cpumask_var(tmp_map); 601 + out: 602 + return retval; 603 } 604 605 /* wrapper to silence section mismatch warning */ ··· 598 int acpi_unmap_lsapic(int cpu) 599 { 600 per_cpu(x86_cpu_to_apicid, cpu) = -1; 601 + set_cpu_present(cpu, false); 602 num_processors--; 603 604 return (0);
+2 -2
arch/x86/kernel/apic.c
··· 140 struct clock_event_device *evt); 141 static void lapic_timer_setup(enum clock_event_mode mode, 142 struct clock_event_device *evt); 143 - static void lapic_timer_broadcast(const cpumask_t *mask); 144 static void apic_pm_activate(void); 145 146 /* ··· 453 /* 454 * Local APIC timer broadcast function 455 */ 456 - static void lapic_timer_broadcast(const cpumask_t *mask) 457 { 458 #ifdef CONFIG_SMP 459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
··· 140 struct clock_event_device *evt); 141 static void lapic_timer_setup(enum clock_event_mode mode, 142 struct clock_event_device *evt); 143 + static void lapic_timer_broadcast(const struct cpumask *mask); 144 static void apic_pm_activate(void); 145 146 /* ··· 453 /* 454 * Local APIC timer broadcast function 455 */ 456 + static void lapic_timer_broadcast(const struct cpumask *mask) 457 { 458 #ifdef CONFIG_SMP 459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+1 -1
arch/x86/kernel/cpu/common.c
··· 355 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 356 } else if (smp_num_siblings > 1) { 357 358 - if (smp_num_siblings > NR_CPUS) { 359 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 360 smp_num_siblings); 361 smp_num_siblings = 1;
··· 355 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 356 } else if (smp_num_siblings > 1) { 357 358 + if (smp_num_siblings > nr_cpu_ids) { 359 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 360 smp_num_siblings); 361 smp_num_siblings = 1;
+25 -3
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 517 } 518 } 519 520 /* 521 * acpi_cpufreq_early_init - initialize ACPI P-States library 522 * ··· 538 */ 539 static int __init acpi_cpufreq_early_init(void) 540 { 541 dprintk("acpi_cpufreq_early_init\n"); 542 543 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 544 if (!acpi_perf_data) { 545 dprintk("Memory allocation error for acpi_perf_data.\n"); 546 return -ENOMEM; 547 } 548 549 /* Do initialization in ACPI core */ ··· 626 */ 627 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 628 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 629 - policy->cpus = perf->shared_cpu_map; 630 } 631 - policy->related_cpus = perf->shared_cpu_map; 632 633 #ifdef CONFIG_SMP 634 dmi_check_system(sw_any_bug_dmi_table); ··· 817 818 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 819 if (ret) 820 - free_percpu(acpi_perf_data); 821 822 return ret; 823 }
··· 517 } 518 } 519 520 + static void free_acpi_perf_data(void) 521 + { 522 + unsigned int i; 523 + 524 + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ 525 + for_each_possible_cpu(i) 526 + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) 527 + ->shared_cpu_map); 528 + free_percpu(acpi_perf_data); 529 + } 530 + 531 /* 532 * acpi_cpufreq_early_init - initialize ACPI P-States library 533 * ··· 527 */ 528 static int __init acpi_cpufreq_early_init(void) 529 { 530 + unsigned int i; 531 dprintk("acpi_cpufreq_early_init\n"); 532 533 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 534 if (!acpi_perf_data) { 535 dprintk("Memory allocation error for acpi_perf_data.\n"); 536 return -ENOMEM; 537 + } 538 + for_each_possible_cpu(i) { 539 + if (!alloc_cpumask_var_node( 540 + &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, 541 + GFP_KERNEL, cpu_to_node(i))) { 542 + 543 + /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ 544 + free_acpi_perf_data(); 545 + return -ENOMEM; 546 + } 547 } 548 549 /* Do initialization in ACPI core */ ··· 604 */ 605 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 606 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 607 + cpumask_copy(&policy->cpus, perf->shared_cpu_map); 608 } 609 + cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); 610 611 #ifdef CONFIG_SMP 612 dmi_check_system(sw_any_bug_dmi_table); ··· 795 796 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 797 if (ret) 798 + free_acpi_perf_data(); 799 800 return ret; 801 }
+9
arch/x86/kernel/cpu/cpufreq/powernow-k7.c
··· 310 goto err0; 311 } 312 313 if (acpi_processor_register_performance(acpi_processor_perf, 0)) { 314 retval = -EIO; 315 goto err1; ··· 418 err2: 419 acpi_processor_unregister_performance(acpi_processor_perf, 0); 420 err1: 421 kfree(acpi_processor_perf); 422 err0: 423 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); ··· 660 #ifdef CONFIG_X86_POWERNOW_K7_ACPI 661 if (acpi_processor_perf) { 662 acpi_processor_unregister_performance(acpi_processor_perf, 0); 663 kfree(acpi_processor_perf); 664 } 665 #endif
··· 310 goto err0; 311 } 312 313 + if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, 314 + GFP_KERNEL)) { 315 + retval = -ENOMEM; 316 + goto err05; 317 + } 318 + 319 if (acpi_processor_register_performance(acpi_processor_perf, 0)) { 320 retval = -EIO; 321 goto err1; ··· 412 err2: 413 acpi_processor_unregister_performance(acpi_processor_perf, 0); 414 err1: 415 + free_cpumask_var(acpi_processor_perf->shared_cpu_map); 416 + err05: 417 kfree(acpi_processor_perf); 418 err0: 419 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); ··· 652 #ifdef CONFIG_X86_POWERNOW_K7_ACPI 653 if (acpi_processor_perf) { 654 acpi_processor_unregister_performance(acpi_processor_perf, 0); 655 + free_cpumask_var(acpi_processor_perf->shared_cpu_map); 656 kfree(acpi_processor_perf); 657 } 658 #endif
+15 -9
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
··· 766 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 767 { 768 struct cpufreq_frequency_table *powernow_table; 769 - int ret_val; 770 771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 772 dprintk("register performance failed: bad ACPI data\n"); ··· 815 /* notify BIOS that we exist */ 816 acpi_processor_notify_smm(THIS_MODULE); 817 818 return 0; 819 820 err_out_mem: ··· 833 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 834 data->acpi_data.state_count = 0; 835 836 - return -ENODEV; 837 } 838 839 static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) ··· 936 { 937 if (data->acpi_data.state_count) 938 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 939 } 940 941 #else ··· 1142 data->cpu = pol->cpu; 1143 data->currpstate = HW_PSTATE_INVALID; 1144 1145 - if (powernow_k8_cpu_init_acpi(data)) { 1146 /* 1147 * Use the PSB BIOS structure. This is only availabe on 1148 * an UP version, and is deprecated by AMD. ··· 1161 "ACPI maintainers and complain to your BIOS " 1162 "vendor.\n"); 1163 #endif 1164 - kfree(data); 1165 - return -ENODEV; 1166 } 1167 if (pol->cpu != 0) { 1168 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1169 "CPU other than CPU0. Complain to your BIOS " 1170 "vendor.\n"); 1171 - kfree(data); 1172 - return -ENODEV; 1173 } 1174 rc = find_psb_table(data); 1175 if (rc) { 1176 - kfree(data); 1177 - return -ENODEV; 1178 } 1179 } 1180
··· 766 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 767 { 768 struct cpufreq_frequency_table *powernow_table; 769 + int ret_val = -ENODEV; 770 771 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 772 dprintk("register performance failed: bad ACPI data\n"); ··· 815 /* notify BIOS that we exist */ 816 acpi_processor_notify_smm(THIS_MODULE); 817 818 + if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 819 + printk(KERN_ERR PFX 820 + "unable to alloc powernow_k8_data cpumask\n"); 821 + ret_val = -ENOMEM; 822 + goto err_out_mem; 823 + } 824 + 825 return 0; 826 827 err_out_mem: ··· 826 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 827 data->acpi_data.state_count = 0; 828 829 + return ret_val; 830 } 831 832 static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) ··· 929 { 930 if (data->acpi_data.state_count) 931 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 932 + free_cpumask_var(data->acpi_data.shared_cpu_map); 933 } 934 935 #else ··· 1134 data->cpu = pol->cpu; 1135 data->currpstate = HW_PSTATE_INVALID; 1136 1137 + rc = powernow_k8_cpu_init_acpi(data); 1138 + if (rc) { 1139 /* 1140 * Use the PSB BIOS structure. This is only availabe on 1141 * an UP version, and is deprecated by AMD. ··· 1152 "ACPI maintainers and complain to your BIOS " 1153 "vendor.\n"); 1154 #endif 1155 + goto err_out; 1156 } 1157 if (pol->cpu != 0) { 1158 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1159 "CPU other than CPU0. Complain to your BIOS " 1160 "vendor.\n"); 1161 + goto err_out; 1162 } 1163 rc = find_psb_table(data); 1164 if (rc) { 1165 + goto err_out; 1166 } 1167 } 1168
+1 -1
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 534 per_cpu(cpuid4_info, cpu) = NULL; 535 } 536 537 - static void get_cpu_leaves(void *_retval) 538 { 539 int j, *retval = _retval, cpu = smp_processor_id(); 540
··· 534 per_cpu(cpuid4_info, cpu) = NULL; 535 } 536 537 + static void __cpuinit get_cpu_leaves(void *_retval) 538 { 539 int j, *retval = _retval, cpu = smp_processor_id(); 540
+1 -1
arch/x86/kernel/cpuid.c
··· 121 lock_kernel(); 122 123 cpu = iminor(file->f_path.dentry->d_inode); 124 - if (cpu >= NR_CPUS || !cpu_online(cpu)) { 125 ret = -ENXIO; /* No such CPU */ 126 goto out; 127 }
··· 121 lock_kernel(); 122 123 cpu = iminor(file->f_path.dentry->d_inode); 124 + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 125 ret = -ENXIO; /* No such CPU */ 126 goto out; 127 }
+3 -3
arch/x86/kernel/io_apic.c
··· 214 215 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 216 if (cfg) { 217 - /* FIXME: needs alloc_cpumask_var_node() */ 218 - if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) { 219 kfree(cfg); 220 cfg = NULL; 221 - } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { 222 free_cpumask_var(cfg->domain); 223 kfree(cfg); 224 cfg = NULL;
··· 214 215 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 216 if (cfg) { 217 + if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 218 kfree(cfg); 219 cfg = NULL; 220 + } else if (!alloc_cpumask_var_node(&cfg->old_domain, 221 + GFP_ATOMIC, node)) { 222 free_cpumask_var(cfg->domain); 223 kfree(cfg); 224 cfg = NULL;
+1 -1
arch/x86/kernel/msr.c
··· 136 lock_kernel(); 137 cpu = iminor(file->f_path.dentry->d_inode); 138 139 - if (cpu >= NR_CPUS || !cpu_online(cpu)) { 140 ret = -ENXIO; /* No such CPU */ 141 goto out; 142 }
··· 136 lock_kernel(); 137 cpu = iminor(file->f_path.dentry->d_inode); 138 139 + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 140 ret = -ENXIO; /* No such CPU */ 141 goto out; 142 }
+2 -2
arch/x86/kernel/reboot.c
··· 501 502 #ifdef CONFIG_X86_32 503 /* See if there has been given a command line override */ 504 - if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && 505 cpu_online(reboot_cpu)) 506 reboot_cpu_id = reboot_cpu; 507 #endif ··· 511 reboot_cpu_id = smp_processor_id(); 512 513 /* Make certain I only run on the appropriate processor */ 514 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); 515 516 /* O.K Now that I'm on the appropriate processor, 517 * stop all of the others.
··· 501 502 #ifdef CONFIG_X86_32 503 /* See if there has been given a command line override */ 504 + if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && 505 cpu_online(reboot_cpu)) 506 reboot_cpu_id = reboot_cpu; 507 #endif ··· 511 reboot_cpu_id = smp_processor_id(); 512 513 /* Make certain I only run on the appropriate processor */ 514 + set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); 515 516 /* O.K Now that I'm on the appropriate processor, 517 * stop all of the others.
+12 -21
arch/x86/kernel/setup_percpu.c
··· 153 align = max_t(unsigned long, PAGE_SIZE, align); 154 size = roundup(old_size, align); 155 156 - printk(KERN_INFO 157 - "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 158 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 159 160 - printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 161 - size); 162 163 for_each_possible_cpu(cpu) { 164 #ifndef CONFIG_NEED_MULTIPLE_NODES ··· 167 if (!node_online(node) || !NODE_DATA(node)) { 168 ptr = __alloc_bootmem(size, align, 169 __pa(MAX_DMA_ADDRESS)); 170 - printk(KERN_INFO 171 - "cpu %d has no node %d or node-local memory\n", 172 cpu, node); 173 - if (ptr) 174 - printk(KERN_DEBUG 175 - "per cpu data for cpu%d at %016lx\n", 176 - cpu, __pa(ptr)); 177 - } 178 - else { 179 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 180 __pa(MAX_DMA_ADDRESS)); 181 - if (ptr) 182 - printk(KERN_DEBUG 183 - "per cpu data for cpu%d on node%d " 184 - "at %016lx\n", 185 - cpu, node, __pa(ptr)); 186 } 187 #endif 188 per_cpu_offset(cpu) = ptr - __per_cpu_start; ··· 330 /* 331 * Returns a pointer to the bitmask of CPUs on Node 'node'. 332 */ 333 - const cpumask_t *_node_to_cpumask_ptr(int node) 334 { 335 if (node_to_cpumask_map == NULL) { 336 printk(KERN_WARNING 337 - "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", 338 node); 339 dump_stack(); 340 return (const cpumask_t *)&cpu_online_map; 341 } 342 if (node >= nr_node_ids) { 343 printk(KERN_WARNING 344 - "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", 345 node, nr_node_ids); 346 dump_stack(); 347 return &cpu_mask_none; 348 } 349 return &node_to_cpumask_map[node]; 350 } 351 - EXPORT_SYMBOL(_node_to_cpumask_ptr); 352 353 /* 354 * Returns a bitmask of CPUs on Node 'node'.
··· 153 align = max_t(unsigned long, PAGE_SIZE, align); 154 size = roundup(old_size, align); 155 156 + pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 157 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 158 159 + pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); 160 161 for_each_possible_cpu(cpu) { 162 #ifndef CONFIG_NEED_MULTIPLE_NODES ··· 169 if (!node_online(node) || !NODE_DATA(node)) { 170 ptr = __alloc_bootmem(size, align, 171 __pa(MAX_DMA_ADDRESS)); 172 + pr_info("cpu %d has no node %d or node-local memory\n", 173 cpu, node); 174 + pr_debug("per cpu data for cpu%d at %016lx\n", 175 + cpu, __pa(ptr)); 176 + } else { 177 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 178 __pa(MAX_DMA_ADDRESS)); 179 + pr_debug("per cpu data for cpu%d on node%d at %016lx\n", 180 + cpu, node, __pa(ptr)); 181 } 182 #endif 183 per_cpu_offset(cpu) = ptr - __per_cpu_start; ··· 339 /* 340 * Returns a pointer to the bitmask of CPUs on Node 'node'. 341 */ 342 + const cpumask_t *cpumask_of_node(int node) 343 { 344 if (node_to_cpumask_map == NULL) { 345 printk(KERN_WARNING 346 + "cpumask_of_node(%d): no node_to_cpumask_map!\n", 347 node); 348 dump_stack(); 349 return (const cpumask_t *)&cpu_online_map; 350 } 351 if (node >= nr_node_ids) { 352 printk(KERN_WARNING 353 + "cpumask_of_node(%d): node > nr_node_ids(%d)\n", 354 node, nr_node_ids); 355 dump_stack(); 356 return &cpu_mask_none; 357 } 358 return &node_to_cpumask_map[node]; 359 } 360 + EXPORT_SYMBOL(cpumask_of_node); 361 362 /* 363 * Returns a bitmask of CPUs on Node 'node'.
+11 -4
arch/x86/kernel/smpboot.c
··· 496 } 497 498 /* maps the cpu to the sched domain representing multi-core */ 499 - cpumask_t cpu_coregroup_map(int cpu) 500 { 501 struct cpuinfo_x86 *c = &cpu_data(cpu); 502 /* ··· 504 * And for power savings, we return cpu_core_map 505 */ 506 if (sched_mc_power_savings || sched_smt_power_savings) 507 - return per_cpu(cpu_core_map, cpu); 508 else 509 - return c->llc_shared_map; 510 } 511 512 static void impress_friends(void) ··· 1154 for_each_possible_cpu(i) { 1155 c = &cpu_data(i); 1156 /* mark all to hotplug */ 1157 - c->cpu_index = NR_CPUS; 1158 } 1159 } 1160 ··· 1297 possible = num_processors + disabled_cpus; 1298 else 1299 possible = setup_possible_cpus; 1300 1301 if (possible > CONFIG_NR_CPUS) { 1302 printk(KERN_WARNING
··· 496 } 497 498 /* maps the cpu to the sched domain representing multi-core */ 499 + const struct cpumask *cpu_coregroup_mask(int cpu) 500 { 501 struct cpuinfo_x86 *c = &cpu_data(cpu); 502 /* ··· 504 * And for power savings, we return cpu_core_map 505 */ 506 if (sched_mc_power_savings || sched_smt_power_savings) 507 + return &per_cpu(cpu_core_map, cpu); 508 else 509 + return &c->llc_shared_map; 510 + } 511 + 512 + cpumask_t cpu_coregroup_map(int cpu) 513 + { 514 + return *cpu_coregroup_mask(cpu); 515 } 516 517 static void impress_friends(void) ··· 1149 for_each_possible_cpu(i) { 1150 c = &cpu_data(i); 1151 /* mark all to hotplug */ 1152 + c->cpu_index = nr_cpu_ids; 1153 } 1154 } 1155 ··· 1292 possible = num_processors + disabled_cpus; 1293 else 1294 possible = setup_possible_cpus; 1295 + 1296 + total_cpus = max_t(int, possible, num_processors + disabled_cpus); 1297 1298 if (possible > CONFIG_NR_CPUS) { 1299 printk(KERN_WARNING
+3 -4
arch/x86/mach-voyager/voyager_smp.c
··· 357 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); 358 359 /* initialize the CPU structures (moved from smp_boot_cpus) */ 360 - for (i = 0; i < NR_CPUS; i++) { 361 cpu_irq_affinity[i] = ~0; 362 - } 363 cpu_online_map = cpumask_of_cpu(boot_cpu_id); 364 365 /* The boot CPU must be extended */ ··· 1226 * new values until the next timer interrupt in which they do process 1227 * accounting. 1228 */ 1229 - for (i = 0; i < NR_CPUS; ++i) 1230 per_cpu(prof_multiplier, i) = multiplier; 1231 1232 return 0; ··· 1256 int i; 1257 1258 /* initialize the per cpu irq mask to all disabled */ 1259 - for (i = 0; i < NR_CPUS; i++) 1260 vic_irq_mask[i] = 0xFFFF; 1261 1262 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
··· 357 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); 358 359 /* initialize the CPU structures (moved from smp_boot_cpus) */ 360 + for (i = 0; i < nr_cpu_ids; i++) 361 cpu_irq_affinity[i] = ~0; 362 cpu_online_map = cpumask_of_cpu(boot_cpu_id); 363 364 /* The boot CPU must be extended */ ··· 1227 * new values until the next timer interrupt in which they do process 1228 * accounting. 1229 */ 1230 + for (i = 0; i < nr_cpu_ids; ++i) 1231 per_cpu(prof_multiplier, i) = multiplier; 1232 1233 return 0; ··· 1257 int i; 1258 1259 /* initialize the per cpu irq mask to all disabled */ 1260 + for (i = 0; i < nr_cpu_ids; i++) 1261 vic_irq_mask[i] = 0xFFFF; 1262 1263 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
+2 -2
block/blk.h
··· 99 static inline int blk_cpu_to_group(int cpu) 100 { 101 #ifdef CONFIG_SCHED_MC 102 - cpumask_t mask = cpu_coregroup_map(cpu); 103 - return first_cpu(mask); 104 #elif defined(CONFIG_SCHED_SMT) 105 return first_cpu(per_cpu(cpu_sibling_map, cpu)); 106 #else
··· 99 static inline int blk_cpu_to_group(int cpu) 100 { 101 #ifdef CONFIG_SCHED_MC 102 + const struct cpumask *mask = cpu_coregroup_mask(cpu); 103 + return cpumask_first(mask); 104 #elif defined(CONFIG_SCHED_SMT) 105 return first_cpu(per_cpu(cpu_sibling_map, cpu)); 106 #else
+10 -4
drivers/acpi/processor_core.c
··· 826 if (!pr) 827 return -ENOMEM; 828 829 pr->handle = device->handle; 830 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 831 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); ··· 850 851 pr = acpi_driver_data(device); 852 853 - if (pr->id >= nr_cpu_ids) { 854 - kfree(pr); 855 - return 0; 856 - } 857 858 if (type == ACPI_BUS_REMOVAL_EJECT) { 859 if (acpi_processor_handle_eject(pr)) ··· 876 877 per_cpu(processors, pr->id) = NULL; 878 per_cpu(processor_device_array, pr->id) = NULL; 879 kfree(pr); 880 881 return 0;
··· 826 if (!pr) 827 return -ENOMEM; 828 829 + if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 830 + kfree(pr); 831 + return -ENOMEM; 832 + } 833 + 834 pr->handle = device->handle; 835 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 836 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); ··· 845 846 pr = acpi_driver_data(device); 847 848 + if (pr->id >= nr_cpu_ids) 849 + goto free; 850 851 if (type == ACPI_BUS_REMOVAL_EJECT) { 852 if (acpi_processor_handle_eject(pr)) ··· 873 874 per_cpu(processors, pr->id) = NULL; 875 per_cpu(processor_device_array, pr->id) = NULL; 876 + 877 + free: 878 + free_cpumask_var(pr->throttling.shared_cpu_map); 879 kfree(pr); 880 881 return 0;
+16 -12
drivers/acpi/processor_perflib.c
··· 588 int count, count_target; 589 int retval = 0; 590 unsigned int i, j; 591 - cpumask_t covered_cpus; 592 struct acpi_processor *pr; 593 struct acpi_psd_package *pdomain; 594 struct acpi_processor *match_pr; 595 struct acpi_psd_package *match_pdomain; 596 597 mutex_lock(&performance_mutex); 598 ··· 620 } 621 622 pr->performance = percpu_ptr(performance, i); 623 - cpu_set(i, pr->performance->shared_cpu_map); 624 if (acpi_processor_get_psd(pr)) { 625 retval = -EINVAL; 626 continue; ··· 653 } 654 } 655 656 - cpus_clear(covered_cpus); 657 for_each_possible_cpu(i) { 658 pr = per_cpu(processors, i); 659 if (!pr) 660 continue; 661 662 - if (cpu_isset(i, covered_cpus)) 663 continue; 664 665 pdomain = &(pr->performance->domain_info); 666 - cpu_set(i, pr->performance->shared_cpu_map); 667 - cpu_set(i, covered_cpus); 668 if (pdomain->num_processors <= 1) 669 continue; 670 ··· 702 goto err_ret; 703 } 704 705 - cpu_set(j, covered_cpus); 706 - cpu_set(j, pr->performance->shared_cpu_map); 707 count++; 708 } 709 ··· 721 722 match_pr->performance->shared_type = 723 pr->performance->shared_type; 724 - match_pr->performance->shared_cpu_map = 725 - pr->performance->shared_cpu_map; 726 } 727 } 728 ··· 734 735 /* Assume no coordination on any error parsing domain info */ 736 if (retval) { 737 - cpus_clear(pr->performance->shared_cpu_map); 738 - cpu_set(i, pr->performance->shared_cpu_map); 739 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 740 } 741 pr->performance = NULL; /* Will be set for real in register */ 742 } 743 744 mutex_unlock(&performance_mutex); 745 return retval; 746 } 747 EXPORT_SYMBOL(acpi_processor_preregister_performance);
··· 588 int count, count_target; 589 int retval = 0; 590 unsigned int i, j; 591 + cpumask_var_t covered_cpus; 592 struct acpi_processor *pr; 593 struct acpi_psd_package *pdomain; 594 struct acpi_processor *match_pr; 595 struct acpi_psd_package *match_pdomain; 596 + 597 + if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 598 + return -ENOMEM; 599 600 mutex_lock(&performance_mutex); 601 ··· 617 } 618 619 pr->performance = percpu_ptr(performance, i); 620 + cpumask_set_cpu(i, pr->performance->shared_cpu_map); 621 if (acpi_processor_get_psd(pr)) { 622 retval = -EINVAL; 623 continue; ··· 650 } 651 } 652 653 + cpumask_clear(covered_cpus); 654 for_each_possible_cpu(i) { 655 pr = per_cpu(processors, i); 656 if (!pr) 657 continue; 658 659 + if (cpumask_test_cpu(i, covered_cpus)) 660 continue; 661 662 pdomain = &(pr->performance->domain_info); 663 + cpumask_set_cpu(i, pr->performance->shared_cpu_map); 664 + cpumask_set_cpu(i, covered_cpus); 665 if (pdomain->num_processors <= 1) 666 continue; 667 ··· 699 goto err_ret; 700 } 701 702 + cpumask_set_cpu(j, covered_cpus); 703 + cpumask_set_cpu(j, pr->performance->shared_cpu_map); 704 count++; 705 } 706 ··· 718 719 match_pr->performance->shared_type = 720 pr->performance->shared_type; 721 + cpumask_copy(match_pr->performance->shared_cpu_map, 722 + pr->performance->shared_cpu_map); 723 } 724 } 725 ··· 731 732 /* Assume no coordination on any error parsing domain info */ 733 if (retval) { 734 + cpumask_clear(pr->performance->shared_cpu_map); 735 + cpumask_set_cpu(i, pr->performance->shared_cpu_map); 736 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 737 } 738 pr->performance = NULL; /* Will be set for real in register */ 739 } 740 741 mutex_unlock(&performance_mutex); 742 + free_cpumask_var(covered_cpus); 743 return retval; 744 } 745 EXPORT_SYMBOL(acpi_processor_preregister_performance);
+52 -28
drivers/acpi/processor_throttling.c
··· 61 int count, count_target; 62 int retval = 0; 63 unsigned int i, j; 64 - cpumask_t covered_cpus; 65 struct acpi_processor *pr, *match_pr; 66 struct acpi_tsd_package *pdomain, *match_pdomain; 67 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 68 69 /* 70 * Now that we have _TSD data from all CPUs, lets setup T-state ··· 94 if (retval) 95 goto err_ret; 96 97 - cpus_clear(covered_cpus); 98 for_each_possible_cpu(i) { 99 pr = per_cpu(processors, i); 100 if (!pr) 101 continue; 102 103 - if (cpu_isset(i, covered_cpus)) 104 continue; 105 pthrottling = &pr->throttling; 106 107 pdomain = &(pthrottling->domain_info); 108 - cpu_set(i, pthrottling->shared_cpu_map); 109 - cpu_set(i, covered_cpus); 110 /* 111 * If the number of processor in the TSD domain is 1, it is 112 * unnecessary to parse the coordination for this CPU. ··· 147 goto err_ret; 148 } 149 150 - cpu_set(j, covered_cpus); 151 - cpu_set(j, pthrottling->shared_cpu_map); 152 count++; 153 } 154 for_each_possible_cpu(j) { ··· 168 * If some CPUS have the same domain, they 169 * will have the same shared_cpu_map. 170 */ 171 - match_pthrottling->shared_cpu_map = 172 - pthrottling->shared_cpu_map; 173 } 174 } 175 176 err_ret: 177 for_each_possible_cpu(i) { 178 pr = per_cpu(processors, i); 179 if (!pr) ··· 187 */ 188 if (retval) { 189 pthrottling = &(pr->throttling); 190 - cpus_clear(pthrottling->shared_cpu_map); 191 - cpu_set(i, pthrottling->shared_cpu_map); 192 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 193 } 194 } ··· 572 pthrottling = &pr->throttling; 573 pthrottling->tsd_valid_flag = 1; 574 pthrottling->shared_type = pdomain->coord_type; 575 - cpu_set(pr->id, pthrottling->shared_cpu_map); 576 /* 577 * If the coordination type is not defined in ACPI spec, 578 * the tsd_valid_flag will be clear and coordination type ··· 831 832 static int acpi_processor_get_throttling(struct acpi_processor *pr) 833 { 834 - cpumask_t saved_mask; 835 int ret; 836 837 if (!pr) ··· 839 840 if (!pr->flags.throttling) 841 return -ENODEV; 842 /* 843 * Migrate task to the cpu pointed by pr. 844 */ 845 - saved_mask = current->cpus_allowed; 846 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 847 ret = pr->throttling.acpi_processor_get_throttling(pr); 848 /* restore the previous state */ 849 - set_cpus_allowed_ptr(current, &saved_mask); 850 851 return ret; 852 } ··· 997 998 int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 999 { 1000 - cpumask_t saved_mask; 1001 int ret = 0; 1002 unsigned int i; 1003 struct acpi_processor *match_pr; 1004 struct acpi_processor_throttling *p_throttling; 1005 struct throttling_tstate t_state; 1006 - cpumask_t online_throttling_cpus; 1007 1008 if (!pr) 1009 return -EINVAL; ··· 1014 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1015 return -EINVAL; 1016 1017 - saved_mask = current->cpus_allowed; 1018 t_state.target_state = state; 1019 p_throttling = &(pr->throttling); 1020 - cpus_and(online_throttling_cpus, cpu_online_map, 1021 - p_throttling->shared_cpu_map); 1022 /* 1023 * The throttling notifier will be called for every 1024 * affected cpu in order to get one proper T-state. 1025 * The notifier event is THROTTLING_PRECHANGE. 1026 */ 1027 - for_each_cpu_mask_nr(i, online_throttling_cpus) { 1028 t_state.cpu = i; 1029 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1030 &t_state); ··· 1044 * it can be called only for the cpu pointed by pr. 1045 */ 1046 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1047 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 1048 ret = p_throttling->acpi_processor_set_throttling(pr, 1049 t_state.target_state); 1050 } else { ··· 1054 * it is necessary to set T-state for every affected 1055 * cpus. 1056 */ 1057 - for_each_cpu_mask_nr(i, online_throttling_cpus) { 1058 match_pr = per_cpu(processors, i); 1059 /* 1060 * If the pointer is invalid, we will report the ··· 1076 continue; 1077 } 1078 t_state.cpu = i; 1079 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 1080 ret = match_pr->throttling. 1081 acpi_processor_set_throttling( 1082 match_pr, t_state.target_state); ··· 1089 * affected cpu to update the T-states. 1090 * The notifier event is THROTTLING_POSTCHANGE 1091 */ 1092 - for_each_cpu_mask_nr(i, online_throttling_cpus) { 1093 t_state.cpu = i; 1094 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1095 &t_state); 1096 } 1097 /* restore the previous state */ 1098 - set_cpus_allowed_ptr(current, &saved_mask); 1099 return ret; 1100 } 1101 ··· 1144 if (acpi_processor_get_tsd(pr)) { 1145 pthrottling = &pr->throttling; 1146 pthrottling->tsd_valid_flag = 0; 1147 - cpu_set(pr->id, pthrottling->shared_cpu_map); 1148 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1149 } 1150
··· 61 int count, count_target; 62 int retval = 0; 63 unsigned int i, j; 64 + cpumask_var_t covered_cpus; 65 struct acpi_processor *pr, *match_pr; 66 struct acpi_tsd_package *pdomain, *match_pdomain; 67 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 68 + 69 + if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 70 + return -ENOMEM; 71 72 /* 73 * Now that we have _TSD data from all CPUs, lets setup T-state ··· 91 if (retval) 92 goto err_ret; 93 94 + cpumask_clear(covered_cpus); 95 for_each_possible_cpu(i) { 96 pr = per_cpu(processors, i); 97 if (!pr) 98 continue; 99 100 + if (cpumask_test_cpu(i, covered_cpus)) 101 continue; 102 pthrottling = &pr->throttling; 103 104 pdomain = &(pthrottling->domain_info); 105 + cpumask_set_cpu(i, pthrottling->shared_cpu_map); 106 + cpumask_set_cpu(i, covered_cpus); 107 /* 108 * If the number of processor in the TSD domain is 1, it is 109 * unnecessary to parse the coordination for this CPU. ··· 144 goto err_ret; 145 } 146 147 + cpumask_set_cpu(j, covered_cpus); 148 + cpumask_set_cpu(j, pthrottling->shared_cpu_map); 149 count++; 150 } 151 for_each_possible_cpu(j) { ··· 165 * If some CPUS have the same domain, they 166 * will have the same shared_cpu_map. 167 */ 168 + cpumask_copy(match_pthrottling->shared_cpu_map, 169 + pthrottling->shared_cpu_map); 170 } 171 } 172 173 err_ret: 174 + free_cpumask_var(covered_cpus); 175 + 176 for_each_possible_cpu(i) { 177 pr = per_cpu(processors, i); 178 if (!pr) ··· 182 */ 183 if (retval) { 184 pthrottling = &(pr->throttling); 185 + cpumask_clear(pthrottling->shared_cpu_map); 186 + cpumask_set_cpu(i, pthrottling->shared_cpu_map); 187 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 188 } 189 } ··· 567 pthrottling = &pr->throttling; 568 pthrottling->tsd_valid_flag = 1; 569 pthrottling->shared_type = pdomain->coord_type; 570 + cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 571 /* 572 * If the coordination type is not defined in ACPI spec, 573 * the tsd_valid_flag will be clear and coordination type ··· 826 827 static int acpi_processor_get_throttling(struct acpi_processor *pr) 828 { 829 + cpumask_var_t saved_mask; 830 int ret; 831 832 if (!pr) ··· 834 835 if (!pr->flags.throttling) 836 return -ENODEV; 837 + 838 + if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 839 + return -ENOMEM; 840 + 841 /* 842 * Migrate task to the cpu pointed by pr. 843 */ 844 + cpumask_copy(saved_mask, &current->cpus_allowed); 845 + /* FIXME: use work_on_cpu() */ 846 + set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 847 ret = pr->throttling.acpi_processor_get_throttling(pr); 848 /* restore the previous state */ 849 + set_cpus_allowed_ptr(current, saved_mask); 850 + free_cpumask_var(saved_mask); 851 852 return ret; 853 } ··· 986 987 int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 988 { 989 + cpumask_var_t saved_mask; 990 int ret = 0; 991 unsigned int i; 992 struct acpi_processor *match_pr; 993 struct acpi_processor_throttling *p_throttling; 994 struct throttling_tstate t_state; 995 + cpumask_var_t online_throttling_cpus; 996 997 if (!pr) 998 return -EINVAL; ··· 1003 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1004 return -EINVAL; 1005 1006 + if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 1007 + return -ENOMEM; 1008 + 1009 + if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { 1010 + free_cpumask_var(saved_mask); 1011 + return -ENOMEM; 1012 + } 1013 + 1014 + cpumask_copy(saved_mask, &current->cpus_allowed); 1015 t_state.target_state = state; 1016 p_throttling = &(pr->throttling); 1017 + cpumask_and(online_throttling_cpus, cpu_online_mask, 1018 + p_throttling->shared_cpu_map); 1019 /* 1020 * The throttling notifier will be called for every 1021 * affected cpu in order to get one proper T-state. 1022 * The notifier event is THROTTLING_PRECHANGE. 1023 */ 1024 + for_each_cpu(i, online_throttling_cpus) { 1025 t_state.cpu = i; 1026 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1027 &t_state); ··· 1025 * it can be called only for the cpu pointed by pr. 1026 */ 1027 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1028 + /* FIXME: use work_on_cpu() */ 1029 + set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 1030 ret = p_throttling->acpi_processor_set_throttling(pr, 1031 t_state.target_state); 1032 } else { ··· 1034 * it is necessary to set T-state for every affected 1035 * cpus. 1036 */ 1037 + for_each_cpu(i, online_throttling_cpus) { 1038 match_pr = per_cpu(processors, i); 1039 /* 1040 * If the pointer is invalid, we will report the ··· 1056 continue; 1057 } 1058 t_state.cpu = i; 1059 + /* FIXME: use work_on_cpu() */ 1060 + set_cpus_allowed_ptr(current, cpumask_of(i)); 1061 ret = match_pr->throttling. 1062 acpi_processor_set_throttling( 1063 match_pr, t_state.target_state); ··· 1068 * affected cpu to update the T-states. 1069 * The notifier event is THROTTLING_POSTCHANGE 1070 */ 1071 + for_each_cpu(i, online_throttling_cpus) { 1072 t_state.cpu = i; 1073 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1074 &t_state); 1075 } 1076 /* restore the previous state */ 1077 + /* FIXME: use work_on_cpu() */ 1078 + set_cpus_allowed_ptr(current, saved_mask); 1079 + free_cpumask_var(online_throttling_cpus); 1080 + free_cpumask_var(saved_mask); 1081 return ret; 1082 } 1083 ··· 1120 if (acpi_processor_get_tsd(pr)) { 1121 pthrottling = &pr->throttling; 1122 pthrottling->tsd_valid_flag = 0; 1123 + cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 1124 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1125 } 1126
+44
drivers/base/cpu.c
··· 128 print_cpus_func(possible); 129 print_cpus_func(present); 130 131 static struct sysdev_class_attribute *cpu_state_attr[] = { 132 &attr_online_map, 133 &attr_possible_map, 134 &attr_present_map, 135 }; 136 137 static int cpu_states_init(void)
··· 128 print_cpus_func(possible); 129 print_cpus_func(present); 130 131 + /* 132 + * Print values for NR_CPUS and offlined cpus 133 + */ 134 + static ssize_t print_cpus_kernel_max(struct sysdev_class *class, char *buf) 135 + { 136 + int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); 137 + return n; 138 + } 139 + static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); 140 + 141 + /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ 142 + unsigned int total_cpus; 143 + 144 + static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf) 145 + { 146 + int n = 0, len = PAGE_SIZE-2; 147 + cpumask_var_t offline; 148 + 149 + /* display offline cpus < nr_cpu_ids */ 150 + if (!alloc_cpumask_var(&offline, GFP_KERNEL)) 151 + return -ENOMEM; 152 + cpumask_complement(offline, cpu_online_mask); 153 + n = cpulist_scnprintf(buf, len, offline); 154 + free_cpumask_var(offline); 155 + 156 + /* display offline cpus >= nr_cpu_ids */ 157 + if (total_cpus && nr_cpu_ids < total_cpus) { 158 + if (n && n < len) 159 + buf[n++] = ','; 160 + 161 + if (nr_cpu_ids == total_cpus-1) 162 + n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); 163 + else 164 + n += snprintf(&buf[n], len - n, "%d-%d", 165 + nr_cpu_ids, total_cpus-1); 166 + } 167 + 168 + n += snprintf(&buf[n], len - n, "\n"); 169 + return n; 170 + } 171 + static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); 172 + 173 static struct sysdev_class_attribute *cpu_state_attr[] = { 174 &attr_online_map, 175 &attr_possible_map, 176 &attr_present_map, 177 + &attr_kernel_max, 178 + &attr_offline, 179 }; 180 181 static int cpu_states_init(void)
+8 -9
drivers/infiniband/hw/ehca/ehca_irq.c
··· 659 660 WARN_ON_ONCE(!in_interrupt()); 661 if (ehca_debug_level >= 3) 662 - ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 663 664 spin_lock_irqsave(&pool->last_cpu_lock, flags); 665 - cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); 666 if (cpu >= nr_cpu_ids) 667 - cpu = first_cpu(cpu_online_map); 668 pool->last_cpu = cpu; 669 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); 670 ··· 855 case CPU_UP_CANCELED_FROZEN: 856 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); 857 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 858 - kthread_bind(cct->task, any_online_cpu(cpu_online_map)); 859 destroy_comp_task(pool, cpu); 860 break; 861 case CPU_ONLINE: ··· 902 return -ENOMEM; 903 904 spin_lock_init(&pool->last_cpu_lock); 905 - pool->last_cpu = any_online_cpu(cpu_online_map); 906 907 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); 908 if (pool->cpu_comp_tasks == NULL) { ··· 934 935 unregister_hotcpu_notifier(&comp_pool_callback_nb); 936 937 - for (i = 0; i < NR_CPUS; i++) { 938 - if (cpu_online(i)) 939 - destroy_comp_task(pool, i); 940 - } 941 free_percpu(pool->cpu_comp_tasks); 942 kfree(pool); 943 }
··· 659 660 WARN_ON_ONCE(!in_interrupt()); 661 if (ehca_debug_level >= 3) 662 + ehca_dmp(cpu_online_mask, cpumask_size(), ""); 663 664 spin_lock_irqsave(&pool->last_cpu_lock, flags); 665 + cpu = cpumask_next(pool->last_cpu, cpu_online_mask); 666 if (cpu >= nr_cpu_ids) 667 + cpu = cpumask_first(cpu_online_mask); 668 pool->last_cpu = cpu; 669 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); 670 ··· 855 case CPU_UP_CANCELED_FROZEN: 856 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); 857 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 858 + kthread_bind(cct->task, cpumask_any(cpu_online_mask)); 859 destroy_comp_task(pool, cpu); 860 break; 861 case CPU_ONLINE: ··· 902 return -ENOMEM; 903 904 spin_lock_init(&pool->last_cpu_lock); 905 + pool->last_cpu = cpumask_any(cpu_online_mask); 906 907 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); 908 if (pool->cpu_comp_tasks == NULL) { ··· 934 935 unregister_hotcpu_notifier(&comp_pool_callback_nb); 936 937 + for_each_online_cpu(i) 938 + destroy_comp_task(pool, i); 939 + 940 free_percpu(pool->cpu_comp_tasks); 941 kfree(pool); 942 }
+4 -4
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 1679 * InfiniPath chip to that processor (we assume reasonable connectivity, 1680 * for now). This code assumes that if affinity has been set 1681 * before this point, that at most one cpu is set; for now this 1682 - * is reasonable. I check for both cpus_empty() and cpus_full(), 1683 * in case some kernel variant sets none of the bits when no 1684 * affinity is set. 2.6.11 and 12 kernels have all present 1685 * cpus set. Some day we'll have to fix it up further to handle ··· 1688 * information. There may be some issues with dual core numbering 1689 * as well. This needs more work prior to release. 1690 */ 1691 - if (!cpus_empty(current->cpus_allowed) && 1692 - !cpus_full(current->cpus_allowed)) { 1693 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; 1694 for (i = 0; i < ncpus; i++) 1695 - if (cpu_isset(i, current->cpus_allowed)) { 1696 ipath_cdbg(PROC, "%s[%u] affinity set for " 1697 "cpu %d/%d\n", current->comm, 1698 current->pid, i, ncpus);
··· 1679 * InfiniPath chip to that processor (we assume reasonable connectivity, 1680 * for now). This code assumes that if affinity has been set 1681 * before this point, that at most one cpu is set; for now this 1682 + * is reasonable. I check for both cpumask_empty() and cpumask_full(), 1683 * in case some kernel variant sets none of the bits when no 1684 * affinity is set. 2.6.11 and 12 kernels have all present 1685 * cpus set. Some day we'll have to fix it up further to handle ··· 1688 * information. There may be some issues with dual core numbering 1689 * as well. This needs more work prior to release. 1690 */ 1691 + if (!cpumask_empty(&current->cpus_allowed) && 1692 + !cpumask_full(&current->cpus_allowed)) { 1693 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; 1694 for (i = 0; i < ncpus; i++) 1695 + if (cpumask_test_cpu(i, &current->cpus_allowed)) { 1696 ipath_cdbg(PROC, "%s[%u] affinity set for " 1697 "cpu %d/%d\n", current->comm, 1698 current->pid, i, ncpus);
+1 -1
drivers/pnp/pnpbios/bioscalls.c
··· 481 482 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); 483 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); 484 - for (i = 0; i < NR_CPUS; i++) { 485 struct desc_struct *gdt = get_cpu_gdt_table(i); 486 if (!gdt) 487 continue;
··· 481 482 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); 483 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); 484 + for_each_possible_cpu(i) { 485 struct desc_struct *gdt = get_cpu_gdt_table(i); 486 if (!gdt) 487 continue;
+2 -1
fs/seq_file.c
··· 468 return -1; 469 } 470 471 - int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits) 472 { 473 if (m->count < m->size) { 474 int len = bitmap_scnprintf(m->buf + m->count,
··· 468 return -1; 469 } 470 471 + int seq_bitmap(struct seq_file *m, const unsigned long *bits, 472 + unsigned int nr_bits) 473 { 474 if (m->count < m->size) { 475 int len = bitmap_scnprintf(m->buf + m->count,
+2 -2
include/acpi/processor.h
··· 127 unsigned int state_count; 128 struct acpi_processor_px *states; 129 struct acpi_psd_package domain_info; 130 - cpumask_t shared_cpu_map; 131 unsigned int shared_type; 132 }; 133 ··· 172 unsigned int state_count; 173 struct acpi_processor_tx_tss *states_tss; 174 struct acpi_tsd_package domain_info; 175 - cpumask_t shared_cpu_map; 176 int (*acpi_processor_get_throttling) (struct acpi_processor * pr); 177 int (*acpi_processor_set_throttling) (struct acpi_processor * pr, 178 int state);
··· 127 unsigned int state_count; 128 struct acpi_processor_px *states; 129 struct acpi_psd_package domain_info; 130 + cpumask_var_t shared_cpu_map; 131 unsigned int shared_type; 132 }; 133 ··· 172 unsigned int state_count; 173 struct acpi_processor_tx_tss *states_tss; 174 struct acpi_tsd_package domain_info; 175 + cpumask_var_t shared_cpu_map; 176 int (*acpi_processor_get_throttling) (struct acpi_processor * pr); 177 int (*acpi_processor_set_throttling) (struct acpi_processor * pr, 178 int state);
+13
include/asm-frv/bitops.h
··· 339 return 31 - bit; 340 } 341 342 /* 343 * special slimline version of fls() for calculating ilog2_u32() 344 * - note: no protection against n == 0
··· 339 return 31 - bit; 340 } 341 342 + /** 343 + * __fls - find last (most-significant) set bit in a long word 344 + * @word: the word to search 345 + * 346 + * Undefined if no set bit exists, so code should check against 0 first. 347 + */ 348 + static inline unsigned long __fls(unsigned long word) 349 + { 350 + unsigned long bit; 351 + asm("scan %1,gr0,%0" : "=r"(bit) : "r"(word)); 352 + return bit; 353 + } 354 + 355 /* 356 * special slimline version of fls() for calculating ilog2_u32() 357 * - note: no protection against n == 0
+1
include/asm-m32r/bitops.h
··· 251 #include <asm-generic/bitops/ffz.h> 252 #include <asm-generic/bitops/__ffs.h> 253 #include <asm-generic/bitops/fls.h> 254 #include <asm-generic/bitops/fls64.h> 255 256 #ifdef __KERNEL__
··· 251 #include <asm-generic/bitops/ffz.h> 252 #include <asm-generic/bitops/__ffs.h> 253 #include <asm-generic/bitops/fls.h> 254 + #include <asm-generic/bitops/__fls.h> 255 #include <asm-generic/bitops/fls64.h> 256 257 #ifdef __KERNEL__
+5
include/asm-m68k/bitops.h
··· 315 return 32 - cnt; 316 } 317 318 #include <asm-generic/bitops/fls64.h> 319 #include <asm-generic/bitops/sched.h> 320 #include <asm-generic/bitops/hweight.h>
··· 315 return 32 - cnt; 316 } 317 318 + static inline int __fls(int x) 319 + { 320 + return fls(x) - 1; 321 + } 322 + 323 #include <asm-generic/bitops/fls64.h> 324 #include <asm-generic/bitops/sched.h> 325 #include <asm-generic/bitops/hweight.h>
+11
include/asm-mn10300/bitops.h
··· 196 } 197 198 /** 199 * ffs - find first bit set 200 * @x: the word to search 201 *
··· 196 } 197 198 /** 199 + * __fls - find last (most-significant) set bit in a long word 200 + * @word: the word to search 201 + * 202 + * Undefined if no set bit exists, so code should check against 0 first. 203 + */ 204 + static inline unsigned long __fls(unsigned long word) 205 + { 206 + return __ilog2_u32(word); 207 + } 208 + 209 + /** 210 * ffs - find first bit set 211 * @x: the word to search 212 *
+11
include/asm-xtensa/bitops.h
··· 82 return 32 - __cntlz(x); 83 } 84 85 #else 86 87 /* Use the generic implementation if we don't have the nsa/nsau instructions. */ ··· 100 # include <asm-generic/bitops/__ffs.h> 101 # include <asm-generic/bitops/ffz.h> 102 # include <asm-generic/bitops/fls.h> 103 104 #endif 105
··· 82 return 32 - __cntlz(x); 83 } 84 85 + /** 86 + * __fls - find last (most-significant) set bit in a long word 87 + * @word: the word to search 88 + * 89 + * Undefined if no set bit exists, so code should check against 0 first. 90 + */ 91 + static inline unsigned long __fls(unsigned long word) 92 + { 93 + return 31 - __cntlz(word); 94 + } 95 #else 96 97 /* Use the generic implementation if we don't have the nsa/nsau instructions. */ ··· 90 # include <asm-generic/bitops/__ffs.h> 91 # include <asm-generic/bitops/ffz.h> 92 # include <asm-generic/bitops/fls.h> 93 + # include <asm-generic/bitops/__fls.h> 94 95 #endif 96
+19 -16
include/linux/bitmap.h
··· 137 (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ 138 ) 139 140 static inline void bitmap_zero(unsigned long *dst, int nbits) 141 { 142 - if (nbits <= BITS_PER_LONG) 143 *dst = 0UL; 144 else { 145 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ··· 153 static inline void bitmap_fill(unsigned long *dst, int nbits) 154 { 155 size_t nlongs = BITS_TO_LONGS(nbits); 156 - if (nlongs > 1) { 157 int len = (nlongs - 1) * sizeof(unsigned long); 158 memset(dst, 0xff, len); 159 } ··· 163 static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, 164 int nbits) 165 { 166 - if (nbits <= BITS_PER_LONG) 167 *dst = *src; 168 else { 169 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ··· 174 static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, 175 const unsigned long *src2, int nbits) 176 { 177 - if (nbits <= BITS_PER_LONG) 178 *dst = *src1 & *src2; 179 else 180 __bitmap_and(dst, src1, src2, nbits); ··· 183 static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, 184 const unsigned long *src2, int nbits) 185 { 186 - if (nbits <= BITS_PER_LONG) 187 *dst = *src1 | *src2; 188 else 189 __bitmap_or(dst, src1, src2, nbits); ··· 192 static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, 193 const unsigned long *src2, int nbits) 194 { 195 - if (nbits <= BITS_PER_LONG) 196 *dst = *src1 ^ *src2; 197 else 198 __bitmap_xor(dst, src1, src2, nbits); ··· 201 static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, 202 const unsigned long *src2, int nbits) 203 { 204 - if (nbits <= BITS_PER_LONG) 205 *dst = *src1 & ~(*src2); 206 else 207 __bitmap_andnot(dst, src1, src2, nbits); ··· 210 static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, 211 int nbits) 212 { 213 - if (nbits <= BITS_PER_LONG) 214 *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); 215 else 216 __bitmap_complement(dst, src, nbits); ··· 219 static inline int bitmap_equal(const unsigned long *src1, 220 const unsigned long *src2, int nbits) 221 { 222 - if (nbits <= BITS_PER_LONG) 223 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); 224 else 225 return __bitmap_equal(src1, src2, nbits); ··· 228 static inline int bitmap_intersects(const unsigned long *src1, 229 const unsigned long *src2, int nbits) 230 { 231 - if (nbits <= BITS_PER_LONG) 232 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; 233 else 234 return __bitmap_intersects(src1, src2, nbits); ··· 237 static inline int bitmap_subset(const unsigned long *src1, 238 const unsigned long *src2, int nbits) 239 { 240 - if (nbits <= BITS_PER_LONG) 241 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); 242 else 243 return __bitmap_subset(src1, src2, nbits); ··· 245 246 static inline int bitmap_empty(const unsigned long *src, int nbits) 247 { 248 - if (nbits <= BITS_PER_LONG) 249 return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); 250 else 251 return __bitmap_empty(src, nbits); ··· 253 254 static inline int bitmap_full(const unsigned long *src, int nbits) 255 { 256 - if (nbits <= BITS_PER_LONG) 257 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); 258 else 259 return __bitmap_full(src, nbits); ··· 261 262 static inline int bitmap_weight(const unsigned long *src, int nbits) 263 { 264 - if (nbits <= BITS_PER_LONG) 265 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); 266 return __bitmap_weight(src, nbits); 267 } ··· 269 static inline void bitmap_shift_right(unsigned long *dst, 270 const unsigned long *src, int n, int nbits) 271 { 272 - if (nbits <= BITS_PER_LONG) 273 *dst = *src >> n; 274 else 275 __bitmap_shift_right(dst, src, n, nbits); ··· 278 static inline void bitmap_shift_left(unsigned long *dst, 279 const unsigned long *src, int n, int nbits) 280 { 281 - if (nbits <= BITS_PER_LONG) 282 *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); 283 else 284 __bitmap_shift_left(dst, src, n, nbits);
··· 137 (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ 138 ) 139 140 + #define small_const_nbits(nbits) \ 141 + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) 142 + 143 static inline void bitmap_zero(unsigned long *dst, int nbits) 144 { 145 + if (small_const_nbits(nbits)) 146 *dst = 0UL; 147 else { 148 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ··· 150 static inline void bitmap_fill(unsigned long *dst, int nbits) 151 { 152 size_t nlongs = BITS_TO_LONGS(nbits); 153 + if (!small_const_nbits(nbits)) { 154 int len = (nlongs - 1) * sizeof(unsigned long); 155 memset(dst, 0xff, len); 156 } ··· 160 static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, 161 int nbits) 162 { 163 + if (small_const_nbits(nbits)) 164 *dst = *src; 165 else { 166 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ··· 171 static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, 172 const unsigned long *src2, int nbits) 173 { 174 + if (small_const_nbits(nbits)) 175 *dst = *src1 & *src2; 176 else 177 __bitmap_and(dst, src1, src2, nbits); ··· 180 static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, 181 const unsigned long *src2, int nbits) 182 { 183 + if (small_const_nbits(nbits)) 184 *dst = *src1 | *src2; 185 else 186 __bitmap_or(dst, src1, src2, nbits); ··· 189 static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, 190 const unsigned long *src2, int nbits) 191 { 192 + if (small_const_nbits(nbits)) 193 *dst = *src1 ^ *src2; 194 else 195 __bitmap_xor(dst, src1, src2, nbits); ··· 198 static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, 199 const unsigned long *src2, int nbits) 200 { 201 + if (small_const_nbits(nbits)) 202 *dst = *src1 & ~(*src2); 203 else 204 __bitmap_andnot(dst, src1, src2, nbits); ··· 207 static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, 208 int nbits) 209 { 210 + if (small_const_nbits(nbits)) 211 *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); 212 else 213 __bitmap_complement(dst, src, nbits); ··· 216 static inline int bitmap_equal(const unsigned long *src1, 217 const unsigned long *src2, int nbits) 218 { 219 + if (small_const_nbits(nbits)) 220 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); 221 else 222 return __bitmap_equal(src1, src2, nbits); ··· 225 static inline int bitmap_intersects(const unsigned long *src1, 226 const unsigned long *src2, int nbits) 227 { 228 + if (small_const_nbits(nbits)) 229 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; 230 else 231 return __bitmap_intersects(src1, src2, nbits); ··· 234 static inline int bitmap_subset(const unsigned long *src1, 235 const unsigned long *src2, int nbits) 236 { 237 + if (small_const_nbits(nbits)) 238 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); 239 else 240 return __bitmap_subset(src1, src2, nbits); ··· 242 243 static inline int bitmap_empty(const unsigned long *src, int nbits) 244 { 245 + if (small_const_nbits(nbits)) 246 return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); 247 else 248 return __bitmap_empty(src, nbits); ··· 250 251 static inline int bitmap_full(const unsigned long *src, int nbits) 252 { 253 + if (small_const_nbits(nbits)) 254 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); 255 else 256 return __bitmap_full(src, nbits); ··· 258 259 static inline int bitmap_weight(const unsigned long *src, int nbits) 260 { 261 + if (small_const_nbits(nbits)) 262 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); 263 return __bitmap_weight(src, nbits); 264 } ··· 266 static inline void bitmap_shift_right(unsigned long *dst, 267 const unsigned long *src, int n, int nbits) 268 { 269 + if (small_const_nbits(nbits)) 270 *dst = *src >> n; 271 else 272 __bitmap_shift_right(dst, src, n, nbits); ··· 275 static inline void bitmap_shift_left(unsigned long *dst, 276 const unsigned long *src, int n, int nbits) 277 { 278 + if (small_const_nbits(nbits)) 279 *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); 280 else 281 __bitmap_shift_left(dst, src, n, nbits);
+12 -1
include/linux/bitops.h
··· 134 */ 135 extern unsigned long find_first_zero_bit(const unsigned long *addr, 136 unsigned long size); 137 - 138 #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 139 140 #ifdef CONFIG_GENERIC_FIND_NEXT_BIT 141
··· 134 */ 135 extern unsigned long find_first_zero_bit(const unsigned long *addr, 136 unsigned long size); 137 #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 138 + 139 + #ifdef CONFIG_GENERIC_FIND_LAST_BIT 140 + /** 141 + * find_last_bit - find the last set bit in a memory region 142 + * @addr: The address to start the search at 143 + * @size: The maximum size to search 144 + * 145 + * Returns the bit number of the first set bit, or size. 146 + */ 147 + extern unsigned long find_last_bit(const unsigned long *addr, 148 + unsigned long size); 149 + #endif /* CONFIG_GENERIC_FIND_LAST_BIT */ 150 151 #ifdef CONFIG_GENERIC_FIND_NEXT_BIT 152
+91 -130
include/linux/cpumask.h
··· 144 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 145 extern cpumask_t _unused_cpumask_arg_; 146 147 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 148 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) 149 { ··· 268 { 269 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 270 } 271 272 /* 273 * Special-case data structure for "single bit set only" constant CPU masks. ··· 299 extern const unsigned long 300 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 301 302 - static inline const cpumask_t *get_cpu_mask(unsigned int cpu) 303 { 304 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 305 p -= cpu / BITS_PER_LONG; 306 - return (const cpumask_t *)p; 307 } 308 309 /* 310 * In cases where we take the address of the cpumask immediately, 311 * gcc optimizes it out (it's a constant) and there's no huge stack ··· 392 { 393 bitmap_fold(dstp->bits, origp->bits, sz, nbits); 394 } 395 396 #if NR_CPUS == 1 397 398 #define nr_cpu_ids 1 399 #define first_cpu(src) ({ (void)(src); 0; }) 400 #define next_cpu(n, src) ({ (void)(src); 1; }) 401 #define any_online_cpu(mask) 0 402 #define for_each_cpu_mask(cpu, mask) \ 403 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 404 - 405 #else /* NR_CPUS > 1 */ 406 407 extern int nr_cpu_ids; 408 int __first_cpu(const cpumask_t *srcp); 409 int __next_cpu(int n, const cpumask_t *srcp); 410 int __any_online_cpu(const cpumask_t *mask); ··· 419 for ((cpu) = -1; \ 420 (cpu) = next_cpu((cpu), (mask)), \ 421 (cpu) < NR_CPUS; ) 422 #endif 423 424 #if NR_CPUS <= 64 425 426 #define next_cpu_nr(n, src) next_cpu(n, src) ··· 440 (cpu) < nr_cpu_ids; ) 441 442 #endif /* NR_CPUS > 64 */ 443 444 /* 445 * The following particular system cpumasks and operations manage 446 - * possible, present, active and online cpus. Each of them is a fixed size 447 - * bitmap of size NR_CPUS. 448 * 449 - * #ifdef CONFIG_HOTPLUG_CPU 450 - * cpu_possible_map - has bit 'cpu' set iff cpu is populatable 451 - * cpu_present_map - has bit 'cpu' set iff cpu is populated 452 - * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler 453 - * cpu_active_map - has bit 'cpu' set iff cpu available to migration 454 - * #else 455 - * cpu_possible_map - has bit 'cpu' set iff cpu is populated 456 - * cpu_present_map - copy of cpu_possible_map 457 - * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler 458 - * #endif 459 * 460 - * In either case, NR_CPUS is fixed at compile time, as the static 461 - * size of these bitmaps. The cpu_possible_map is fixed at boot 462 - * time, as the set of CPU id's that it is possible might ever 463 - * be plugged in at anytime during the life of that system boot. 464 - * The cpu_present_map is dynamic(*), representing which CPUs 465 - * are currently plugged in. And cpu_online_map is the dynamic 466 - * subset of cpu_present_map, indicating those CPUs available 467 - * for scheduling. 468 * 469 - * If HOTPLUG is enabled, then cpu_possible_map is forced to have 470 * all NR_CPUS bits set, otherwise it is just the set of CPUs that 471 * ACPI reports present at boot. 472 * 473 - * If HOTPLUG is enabled, then cpu_present_map varies dynamically, 474 * depending on what ACPI reports as currently plugged in, otherwise 475 - * cpu_present_map is just a copy of cpu_possible_map. 476 * 477 - * (*) Well, cpu_present_map is dynamic in the hotplug case. If not 478 - * hotplug, it's a copy of cpu_possible_map, hence fixed at boot. 479 * 480 * Subtleties: 481 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode 482 * assumption that their single CPU is online. The UP 483 - * cpu_{online,possible,present}_maps are placebos. Changing them 484 * will have no useful affect on the following num_*_cpus() 485 * and cpu_*() macros in the UP case. This ugliness is a UP 486 * optimization - don't waste any instructions or memory references 487 * asking if you're online or how many CPUs there are if there is 488 * only one CPU. 489 - * 2) Most SMP arch's #define some of these maps to be some 490 - * other map specific to that arch. Therefore, the following 491 - * must be #define macros, not inlines. To see why, examine 492 - * the assembly code produced by the following. Note that 493 - * set1() writes phys_x_map, but set2() writes x_map: 494 - * int x_map, phys_x_map; 495 - * #define set1(a) x_map = a 496 - * inline void set2(int a) { x_map = a; } 497 - * #define x_map phys_x_map 498 - * main(){ set1(3); set2(5); } 499 */ 500 501 - extern cpumask_t cpu_possible_map; 502 - extern cpumask_t cpu_online_map; 503 - extern cpumask_t cpu_present_map; 504 - extern cpumask_t cpu_active_map; 505 506 #if NR_CPUS > 1 507 - #define num_online_cpus() cpus_weight_nr(cpu_online_map) 508 - #define num_possible_cpus() cpus_weight_nr(cpu_possible_map) 509 - #define num_present_cpus() cpus_weight_nr(cpu_present_map) 510 - #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) 511 - #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) 512 - #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) 513 - #define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) 514 #else 515 #define num_online_cpus() 1 516 #define num_possible_cpus() 1 ··· 512 #endif 513 514 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 515 - 516 - #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) 517 - #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) 518 - #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) 519 520 /* These are the new versions of the cpumask operators: passed by pointer. 521 * The older versions will be implemented in terms of these, then deleted. */ ··· 700 * No static inline type checking - see Subtlety (1) above. 701 */ 702 #define cpumask_test_cpu(cpu, cpumask) \ 703 - test_bit(cpumask_check(cpu), (cpumask)->bits) 704 705 /** 706 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask ··· 943 static inline int cpumask_scnprintf(char *buf, int len, 944 const struct cpumask *srcp) 945 { 946 - return bitmap_scnprintf(buf, len, srcp->bits, nr_cpumask_bits); 947 } 948 949 /** ··· 957 static inline int cpumask_parse_user(const char __user *buf, int len, 958 struct cpumask *dstp) 959 { 960 - return bitmap_parse_user(buf, len, dstp->bits, nr_cpumask_bits); 961 } 962 963 /** ··· 972 static inline int cpulist_scnprintf(char *buf, int len, 973 const struct cpumask *srcp) 974 { 975 - return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpumask_bits); 976 } 977 978 /** ··· 986 */ 987 static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 988 { 989 - return bitmap_parselist(buf, dstp->bits, nr_cpumask_bits); 990 - } 991 - 992 - /** 993 - * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 994 - * @bitmap: the bitmap 995 - * 996 - * There are a few places where cpumask_var_t isn't appropriate and 997 - * static cpumasks must be used (eg. very early boot), yet we don't 998 - * expose the definition of 'struct cpumask'. 999 - * 1000 - * This does the conversion, and can be used as a constant initializer. 1001 - */ 1002 - #define to_cpumask(bitmap) \ 1003 - ((struct cpumask *)(1 ? (bitmap) \ 1004 - : (void *)sizeof(__check_is_bitmap(bitmap)))) 1005 - 1006 - static inline int __check_is_bitmap(const unsigned long *bitmap) 1007 - { 1008 - return 1; 1009 } 1010 1011 /** ··· 1020 #ifdef CONFIG_CPUMASK_OFFSTACK 1021 typedef struct cpumask *cpumask_var_t; 1022 1023 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 1024 void alloc_bootmem_cpumask_var(cpumask_var_t *mask); 1025 void free_cpumask_var(cpumask_var_t mask); ··· 1030 typedef struct cpumask cpumask_var_t[1]; 1031 1032 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 1033 { 1034 return true; 1035 } ··· 1053 } 1054 #endif /* CONFIG_CPUMASK_OFFSTACK */ 1055 1056 - /* The pointer versions of the maps, these will become the primary versions. */ 1057 - #define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map) 1058 - #define cpu_online_mask ((const struct cpumask *)&cpu_online_map) 1059 - #define cpu_present_mask ((const struct cpumask *)&cpu_present_map) 1060 - #define cpu_active_mask ((const struct cpumask *)&cpu_active_map) 1061 - 1062 /* It's common to want to use cpu_all_mask in struct member initializers, 1063 * so it has to refer to an address rather than a pointer. */ 1064 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); ··· 1061 /* First bits of cpu_bit_bitmap are in fact unset. */ 1062 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) 1063 1064 /* Wrappers for arch boot code to manipulate normally-constant masks */ 1065 - static inline void set_cpu_possible(unsigned int cpu, bool possible) 1066 - { 1067 - if (possible) 1068 - cpumask_set_cpu(cpu, &cpu_possible_map); 1069 - else 1070 - cpumask_clear_cpu(cpu, &cpu_possible_map); 1071 - } 1072 - 1073 - static inline void set_cpu_present(unsigned int cpu, bool present) 1074 - { 1075 - if (present) 1076 - cpumask_set_cpu(cpu, &cpu_present_map); 1077 - else 1078 - cpumask_clear_cpu(cpu, &cpu_present_map); 1079 - } 1080 - 1081 - static inline void set_cpu_online(unsigned int cpu, bool online) 1082 - { 1083 - if (online) 1084 - cpumask_set_cpu(cpu, &cpu_online_map); 1085 - else 1086 - cpumask_clear_cpu(cpu, &cpu_online_map); 1087 - } 1088 - 1089 - static inline void set_cpu_active(unsigned int cpu, bool active) 1090 - { 1091 - if (active) 1092 - cpumask_set_cpu(cpu, &cpu_active_map); 1093 - else 1094 - cpumask_clear_cpu(cpu, &cpu_active_map); 1095 - } 1096 - 1097 - static inline void init_cpu_present(const struct cpumask *src) 1098 - { 1099 - cpumask_copy(&cpu_present_map, src); 1100 - } 1101 - 1102 - static inline void init_cpu_possible(const struct cpumask *src) 1103 - { 1104 - cpumask_copy(&cpu_possible_map, src); 1105 - } 1106 - 1107 - static inline void init_cpu_online(const struct cpumask *src) 1108 - { 1109 - cpumask_copy(&cpu_online_map, src); 1110 - } 1111 #endif /* __LINUX_CPUMASK_H */
··· 144 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 145 extern cpumask_t _unused_cpumask_arg_; 146 147 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 148 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 149 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) 150 { ··· 267 { 268 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 269 } 270 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 271 + 272 + /** 273 + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 274 + * @bitmap: the bitmap 275 + * 276 + * There are a few places where cpumask_var_t isn't appropriate and 277 + * static cpumasks must be used (eg. very early boot), yet we don't 278 + * expose the definition of 'struct cpumask'. 279 + * 280 + * This does the conversion, and can be used as a constant initializer. 281 + */ 282 + #define to_cpumask(bitmap) \ 283 + ((struct cpumask *)(1 ? (bitmap) \ 284 + : (void *)sizeof(__check_is_bitmap(bitmap)))) 285 + 286 + static inline int __check_is_bitmap(const unsigned long *bitmap) 287 + { 288 + return 1; 289 + } 290 291 /* 292 * Special-case data structure for "single bit set only" constant CPU masks. ··· 278 extern const unsigned long 279 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 280 281 + static inline const struct cpumask *get_cpu_mask(unsigned int cpu) 282 { 283 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 284 p -= cpu / BITS_PER_LONG; 285 + return to_cpumask(p); 286 } 287 288 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 289 /* 290 * In cases where we take the address of the cpumask immediately, 291 * gcc optimizes it out (it's a constant) and there's no huge stack ··· 370 { 371 bitmap_fold(dstp->bits, origp->bits, sz, nbits); 372 } 373 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 374 375 #if NR_CPUS == 1 376 377 #define nr_cpu_ids 1 378 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 379 #define first_cpu(src) ({ (void)(src); 0; }) 380 #define next_cpu(n, src) ({ (void)(src); 1; }) 381 #define any_online_cpu(mask) 0 382 #define for_each_cpu_mask(cpu, mask) \ 383 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 384 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 385 #else /* NR_CPUS > 1 */ 386 387 extern int nr_cpu_ids; 388 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 389 int __first_cpu(const cpumask_t *srcp); 390 int __next_cpu(int n, const cpumask_t *srcp); 391 int __any_online_cpu(const cpumask_t *mask); ··· 394 for ((cpu) = -1; \ 395 (cpu) = next_cpu((cpu), (mask)), \ 396 (cpu) < NR_CPUS; ) 397 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 398 #endif 399 400 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 401 #if NR_CPUS <= 64 402 403 #define next_cpu_nr(n, src) next_cpu(n, src) ··· 413 (cpu) < nr_cpu_ids; ) 414 415 #endif /* NR_CPUS > 64 */ 416 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 417 418 /* 419 * The following particular system cpumasks and operations manage 420 + * possible, present, active and online cpus. 421 * 422 + * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable 423 + * cpu_present_mask - has bit 'cpu' set iff cpu is populated 424 + * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler 425 + * cpu_active_mask - has bit 'cpu' set iff cpu available to migration 426 * 427 + * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. 428 * 429 + * The cpu_possible_mask is fixed at boot time, as the set of CPU id's 430 + * that it is possible might ever be plugged in at anytime during the 431 + * life of that system boot. The cpu_present_mask is dynamic(*), 432 + * representing which CPUs are currently plugged in. And 433 + * cpu_online_mask is the dynamic subset of cpu_present_mask, 434 + * indicating those CPUs available for scheduling. 435 + * 436 + * If HOTPLUG is enabled, then cpu_possible_mask is forced to have 437 * all NR_CPUS bits set, otherwise it is just the set of CPUs that 438 * ACPI reports present at boot. 439 * 440 + * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, 441 * depending on what ACPI reports as currently plugged in, otherwise 442 + * cpu_present_mask is just a copy of cpu_possible_mask. 443 * 444 + * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not 445 + * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. 446 * 447 * Subtleties: 448 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode 449 * assumption that their single CPU is online. The UP 450 + * cpu_{online,possible,present}_masks are placebos. Changing them 451 * will have no useful affect on the following num_*_cpus() 452 * and cpu_*() macros in the UP case. This ugliness is a UP 453 * optimization - don't waste any instructions or memory references 454 * asking if you're online or how many CPUs there are if there is 455 * only one CPU. 456 */ 457 458 + extern const struct cpumask *const cpu_possible_mask; 459 + extern const struct cpumask *const cpu_online_mask; 460 + extern const struct cpumask *const cpu_present_mask; 461 + extern const struct cpumask *const cpu_active_mask; 462 + 463 + /* These strip const, as traditionally they weren't const. */ 464 + #define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) 465 + #define cpu_online_map (*(cpumask_t *)cpu_online_mask) 466 + #define cpu_present_map (*(cpumask_t *)cpu_present_mask) 467 + #define cpu_active_map (*(cpumask_t *)cpu_active_mask) 468 469 #if NR_CPUS > 1 470 + #define num_online_cpus() cpumask_weight(cpu_online_mask) 471 + #define num_possible_cpus() cpumask_weight(cpu_possible_mask) 472 + #define num_present_cpus() cpumask_weight(cpu_present_mask) 473 + #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) 474 + #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) 475 + #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) 476 + #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) 477 #else 478 #define num_online_cpus() 1 479 #define num_possible_cpus() 1 ··· 495 #endif 496 497 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 498 499 /* These are the new versions of the cpumask operators: passed by pointer. 500 * The older versions will be implemented in terms of these, then deleted. */ ··· 687 * No static inline type checking - see Subtlety (1) above. 688 */ 689 #define cpumask_test_cpu(cpu, cpumask) \ 690 + test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) 691 692 /** 693 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask ··· 930 static inline int cpumask_scnprintf(char *buf, int len, 931 const struct cpumask *srcp) 932 { 933 + return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); 934 } 935 936 /** ··· 944 static inline int cpumask_parse_user(const char __user *buf, int len, 945 struct cpumask *dstp) 946 { 947 + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); 948 } 949 950 /** ··· 959 static inline int cpulist_scnprintf(char *buf, int len, 960 const struct cpumask *srcp) 961 { 962 + return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), 963 + nr_cpumask_bits); 964 } 965 966 /** ··· 972 */ 973 static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 974 { 975 + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); 976 } 977 978 /** ··· 1025 #ifdef CONFIG_CPUMASK_OFFSTACK 1026 typedef struct cpumask *cpumask_var_t; 1027 1028 + bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 1029 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 1030 void alloc_bootmem_cpumask_var(cpumask_var_t *mask); 1031 void free_cpumask_var(cpumask_var_t mask); ··· 1034 typedef struct cpumask cpumask_var_t[1]; 1035 1036 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 1037 + { 1038 + return true; 1039 + } 1040 + 1041 + static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 1042 + int node) 1043 { 1044 return true; 1045 } ··· 1051 } 1052 #endif /* CONFIG_CPUMASK_OFFSTACK */ 1053 1054 /* It's common to want to use cpu_all_mask in struct member initializers, 1055 * so it has to refer to an address rather than a pointer. */ 1056 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); ··· 1065 /* First bits of cpu_bit_bitmap are in fact unset. */ 1066 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) 1067 1068 + #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) 1069 + #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) 1070 + #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) 1071 + 1072 /* Wrappers for arch boot code to manipulate normally-constant masks */ 1073 + void set_cpu_possible(unsigned int cpu, bool possible); 1074 + void set_cpu_present(unsigned int cpu, bool present); 1075 + void set_cpu_online(unsigned int cpu, bool online); 1076 + void set_cpu_active(unsigned int cpu, bool active); 1077 + void init_cpu_present(const struct cpumask *src); 1078 + void init_cpu_possible(const struct cpumask *src); 1079 + void init_cpu_online(const struct cpumask *src); 1080 #endif /* __LINUX_CPUMASK_H */
+1 -1
include/linux/interrupt.h
··· 109 110 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 111 112 - extern cpumask_t irq_default_affinity; 113 114 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); 115 extern int irq_can_set_affinity(unsigned int irq);
··· 109 110 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 111 112 + extern cpumask_var_t irq_default_affinity; 113 114 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); 115 extern int irq_can_set_affinity(unsigned int irq);
+2 -2
include/linux/rcuclassic.h
··· 59 int signaled; 60 61 spinlock_t lock ____cacheline_internodealigned_in_smp; 62 - cpumask_t cpumask; /* CPUs that need to switch in order */ 63 - /* for current batch to proceed. */ 64 } ____cacheline_internodealigned_in_smp; 65 66 /* Is batch a before batch b ? */
··· 59 int signaled; 60 61 spinlock_t lock ____cacheline_internodealigned_in_smp; 62 + DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */ 63 + /* current batch to proceed. */ 64 } ____cacheline_internodealigned_in_smp; 65 66 /* Is batch a before batch b ? */
+4 -3
include/linux/seq_file.h
··· 50 int seq_dentry(struct seq_file *, struct dentry *, char *); 51 int seq_path_root(struct seq_file *m, struct path *path, struct path *root, 52 char *esc); 53 - int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); 54 - static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) 55 { 56 - return seq_bitmap(m, mask->bits, NR_CPUS); 57 } 58 59 static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
··· 50 int seq_dentry(struct seq_file *, struct dentry *, char *); 51 int seq_path_root(struct seq_file *m, struct path *path, struct path *root, 52 char *esc); 53 + int seq_bitmap(struct seq_file *m, const unsigned long *bits, 54 + unsigned int nr_bits); 55 + static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) 56 { 57 + return seq_bitmap(m, mask->bits, nr_cpu_ids); 58 } 59 60 static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
+11 -7
include/linux/smp.h
··· 21 u16 priv; 22 }; 23 24 #ifdef CONFIG_SMP 25 26 #include <linux/preempt.h> ··· 67 * Call a function on all other processors 68 */ 69 int smp_call_function(void(*func)(void *info), void *info, int wait); 70 - /* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */ 71 - int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 72 - int wait); 73 74 - static inline void smp_call_function_many(const struct cpumask *mask, 75 - void (*func)(void *info), void *info, 76 - int wait) 77 { 78 - smp_call_function_mask(*mask, func, info, wait); 79 } 80 81 int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
··· 21 u16 priv; 22 }; 23 24 + /* total number of cpus in this system (may exceed NR_CPUS) */ 25 + extern unsigned int total_cpus; 26 + 27 #ifdef CONFIG_SMP 28 29 #include <linux/preempt.h> ··· 64 * Call a function on all other processors 65 */ 66 int smp_call_function(void(*func)(void *info), void *info, int wait); 67 + void smp_call_function_many(const struct cpumask *mask, 68 + void (*func)(void *info), void *info, bool wait); 69 70 + /* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */ 71 + static inline int 72 + smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 73 + int wait) 74 { 75 + smp_call_function_many(&mask, func, info, wait); 76 + return 0; 77 } 78 79 int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
+3 -3
include/linux/stop_machine.h
··· 23 * 24 * This can be thought of as a very heavy write lock, equivalent to 25 * grabbing every spinlock in the kernel. */ 26 - int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); 27 28 /** 29 * __stop_machine: freeze the machine on all CPUs and run this function ··· 34 * Description: This is a special version of the above, which assumes cpus 35 * won't come or go while it's being called. Used by hotplug cpu. 36 */ 37 - int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); 38 #else 39 40 static inline int stop_machine(int (*fn)(void *), void *data, 41 - const cpumask_t *cpus) 42 { 43 int ret; 44 local_irq_disable();
··· 23 * 24 * This can be thought of as a very heavy write lock, equivalent to 25 * grabbing every spinlock in the kernel. */ 26 + int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 27 28 /** 29 * __stop_machine: freeze the machine on all CPUs and run this function ··· 34 * Description: This is a special version of the above, which assumes cpus 35 * won't come or go while it's being called. Used by hotplug cpu. 36 */ 37 + int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 38 #else 39 40 static inline int stop_machine(int (*fn)(void *), void *data, 41 + const struct cpumask *cpus) 42 { 43 int ret; 44 local_irq_disable();
+8 -8
include/linux/threads.h
··· 8 */ 9 10 /* 11 - * Maximum supported processors that can run under SMP. This value is 12 - * set via configure setting. The maximum is equal to the size of the 13 - * bitmasks used on that platform, i.e. 32 or 64. Setting this smaller 14 - * saves quite a bit of memory. 15 */ 16 - #ifdef CONFIG_SMP 17 - #define NR_CPUS CONFIG_NR_CPUS 18 - #else 19 - #define NR_CPUS 1 20 #endif 21 22 #define MIN_THREADS_LEFT_FOR_ROOT 4 23
··· 8 */ 9 10 /* 11 + * Maximum supported processors. Setting this smaller saves quite a 12 + * bit of memory. Use nr_cpu_ids instead of this except for static bitmaps. 13 */ 14 + #ifndef CONFIG_NR_CPUS 15 + /* FIXME: This should be fixed in the arch's Kconfig */ 16 + #define CONFIG_NR_CPUS 1 17 #endif 18 + 19 + /* Places which use this should consider cpumask_var_t. */ 20 + #define NR_CPUS CONFIG_NR_CPUS 21 22 #define MIN_THREADS_LEFT_FOR_ROOT 4 23
+2 -2
include/linux/tick.h
··· 84 85 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 86 extern struct tick_device *tick_get_broadcast_device(void); 87 - extern cpumask_t *tick_get_broadcast_mask(void); 88 89 # ifdef CONFIG_TICK_ONESHOT 90 - extern cpumask_t *tick_get_broadcast_oneshot_mask(void); 91 # endif 92 93 # endif /* BROADCAST */
··· 84 85 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 86 extern struct tick_device *tick_get_broadcast_device(void); 87 + extern struct cpumask *tick_get_broadcast_mask(void); 88 89 # ifdef CONFIG_TICK_ONESHOT 90 + extern struct cpumask *tick_get_broadcast_oneshot_mask(void); 91 # endif 92 93 # endif /* BROADCAST */
+4 -9
init/main.c
··· 371 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 372 static void __init setup_nr_cpu_ids(void) 373 { 374 - int cpu, highest_cpu = 0; 375 - 376 - for_each_possible_cpu(cpu) 377 - highest_cpu = cpu; 378 - 379 - nr_cpu_ids = highest_cpu + 1; 380 } 381 382 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA ··· 513 { 514 int cpu = smp_processor_id(); 515 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ 516 - cpu_set(cpu, cpu_online_map); 517 - cpu_set(cpu, cpu_present_map); 518 - cpu_set(cpu, cpu_possible_map); 519 } 520 521 void __init __weak smp_setup_processor_id(void)
··· 371 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 372 static void __init setup_nr_cpu_ids(void) 373 { 374 + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 375 } 376 377 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA ··· 518 { 519 int cpu = smp_processor_id(); 520 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ 521 + set_cpu_online(cpu, true); 522 + set_cpu_present(cpu, true); 523 + set_cpu_possible(cpu, true); 524 } 525 526 void __init __weak smp_setup_processor_id(void)
+31 -20
kernel/compat.c
··· 454 } 455 456 static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, 457 - unsigned len, cpumask_t *new_mask) 458 { 459 unsigned long *k; 460 461 - if (len < sizeof(cpumask_t)) 462 - memset(new_mask, 0, sizeof(cpumask_t)); 463 - else if (len > sizeof(cpumask_t)) 464 - len = sizeof(cpumask_t); 465 466 - k = cpus_addr(*new_mask); 467 return compat_get_bitmap(k, user_mask_ptr, len * 8); 468 } 469 ··· 471 unsigned int len, 472 compat_ulong_t __user *user_mask_ptr) 473 { 474 - cpumask_t new_mask; 475 int retval; 476 477 - retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask); 478 - if (retval) 479 - return retval; 480 481 - return sched_setaffinity(pid, &new_mask); 482 } 483 484 asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, 485 compat_ulong_t __user *user_mask_ptr) 486 { 487 int ret; 488 - cpumask_t mask; 489 unsigned long *k; 490 - unsigned int min_length = sizeof(cpumask_t); 491 492 - if (NR_CPUS <= BITS_PER_COMPAT_LONG) 493 min_length = sizeof(compat_ulong_t); 494 495 if (len < min_length) 496 return -EINVAL; 497 498 - ret = sched_getaffinity(pid, &mask); 499 if (ret < 0) 500 - return ret; 501 502 - k = cpus_addr(mask); 503 ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); 504 - if (ret) 505 - return ret; 506 507 - return min_length; 508 } 509 510 int get_compat_itimerspec(struct itimerspec *dst,
··· 454 } 455 456 static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, 457 + unsigned len, struct cpumask *new_mask) 458 { 459 unsigned long *k; 460 461 + if (len < cpumask_size()) 462 + memset(new_mask, 0, cpumask_size()); 463 + else if (len > cpumask_size()) 464 + len = cpumask_size(); 465 466 + k = cpumask_bits(new_mask); 467 return compat_get_bitmap(k, user_mask_ptr, len * 8); 468 } 469 ··· 471 unsigned int len, 472 compat_ulong_t __user *user_mask_ptr) 473 { 474 + cpumask_var_t new_mask; 475 int retval; 476 477 + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 478 + return -ENOMEM; 479 480 + retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask); 481 + if (retval) 482 + goto out; 483 + 484 + retval = sched_setaffinity(pid, new_mask); 485 + out: 486 + free_cpumask_var(new_mask); 487 + return retval; 488 } 489 490 asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, 491 compat_ulong_t __user *user_mask_ptr) 492 { 493 int ret; 494 + cpumask_var_t mask; 495 unsigned long *k; 496 + unsigned int min_length = cpumask_size(); 497 498 + if (nr_cpu_ids <= BITS_PER_COMPAT_LONG) 499 min_length = sizeof(compat_ulong_t); 500 501 if (len < min_length) 502 return -EINVAL; 503 504 + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 505 + return -ENOMEM; 506 + 507 + ret = sched_getaffinity(pid, mask); 508 if (ret < 0) 509 + goto out; 510 511 + k = cpumask_bits(mask); 512 ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); 513 + if (ret == 0) 514 + ret = min_length; 515 516 + out: 517 + free_cpumask_var(mask); 518 + return ret; 519 } 520 521 int get_compat_itimerspec(struct itimerspec *dst,
+99 -45
kernel/cpu.c
··· 15 #include <linux/stop_machine.h> 16 #include <linux/mutex.h> 17 18 - /* 19 - * Represents all cpu's present in the system 20 - * In systems capable of hotplug, this map could dynamically grow 21 - * as new cpu's are detected in the system via any platform specific 22 - * method, such as ACPI for e.g. 23 - */ 24 - cpumask_t cpu_present_map __read_mostly; 25 - EXPORT_SYMBOL(cpu_present_map); 26 - 27 - /* 28 - * Represents all cpu's that are currently online. 29 - */ 30 - cpumask_t cpu_online_map __read_mostly; 31 - EXPORT_SYMBOL(cpu_online_map); 32 - 33 - #ifdef CONFIG_INIT_ALL_POSSIBLE 34 - cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; 35 - #else 36 - cpumask_t cpu_possible_map __read_mostly; 37 - #endif 38 - EXPORT_SYMBOL(cpu_possible_map); 39 - 40 #ifdef CONFIG_SMP 41 - /* Serializes the updates to cpu_online_map, cpu_present_map */ 42 static DEFINE_MUTEX(cpu_add_remove_lock); 43 44 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); ··· 42 mutex_init(&cpu_hotplug.lock); 43 cpu_hotplug.refcount = 0; 44 } 45 - 46 - cpumask_t cpu_active_map; 47 48 #ifdef CONFIG_HOTPLUG_CPU 49 ··· 73 74 /* 75 * The following two API's must be used when attempting 76 - * to serialize the updates to cpu_online_map, cpu_present_map. 77 */ 78 void cpu_maps_update_begin(void) 79 { ··· 194 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 195 { 196 int err, nr_calls = 0; 197 - cpumask_t old_allowed, tmp; 198 void *hcpu = (void *)(long)cpu; 199 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 200 struct take_cpu_down_param tcd_param = { ··· 207 208 if (!cpu_online(cpu)) 209 return -EINVAL; 210 211 cpu_hotplug_begin(); 212 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, ··· 225 } 226 227 /* Ensure that we are not runnable on dying cpu */ 228 - old_allowed = current->cpus_allowed; 229 - cpus_setall(tmp); 230 - cpu_clear(cpu, tmp); 231 - set_cpus_allowed_ptr(current, &tmp); 232 - tmp = cpumask_of_cpu(cpu); 233 234 - err = __stop_machine(take_cpu_down, &tcd_param, &tmp); 235 if (err) { 236 /* CPU didn't die: tell everyone. Can't complain. */ 237 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, ··· 255 check_for_tasks(cpu); 256 257 out_allowed: 258 - set_cpus_allowed_ptr(current, &old_allowed); 259 out_release: 260 cpu_hotplug_done(); 261 if (!err) { ··· 263 hcpu) == NOTIFY_BAD) 264 BUG(); 265 } 266 return err; 267 } 268 ··· 282 283 /* 284 * Make sure the all cpus did the reschedule and are not 285 - * using stale version of the cpu_active_map. 286 * This is not strictly necessary becuase stop_machine() 287 * that we run down the line already provides the required 288 * synchronization. But it's really a side effect and we do not ··· 346 int __cpuinit cpu_up(unsigned int cpu) 347 { 348 int err = 0; 349 - if (!cpu_isset(cpu, cpu_possible_map)) { 350 printk(KERN_ERR "can't online cpu %d because it is not " 351 "configured as may-hotadd at boot time\n", cpu); 352 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) ··· 371 } 372 373 #ifdef CONFIG_PM_SLEEP_SMP 374 - static cpumask_t frozen_cpus; 375 376 int disable_nonboot_cpus(void) 377 { 378 int cpu, first_cpu, error = 0; 379 380 cpu_maps_update_begin(); 381 - first_cpu = first_cpu(cpu_online_map); 382 /* We take down all of the non-boot CPUs in one shot to avoid races 383 * with the userspace trying to use the CPU hotplug at the same time 384 */ 385 - cpus_clear(frozen_cpus); 386 printk("Disabling non-boot CPUs ...\n"); 387 for_each_online_cpu(cpu) { 388 if (cpu == first_cpu) 389 continue; 390 error = _cpu_down(cpu, 1); 391 if (!error) { 392 - cpu_set(cpu, frozen_cpus); 393 printk("CPU%d is down\n", cpu); 394 } else { 395 printk(KERN_ERR "Error taking CPU%d down: %d\n", ··· 415 /* Allow everyone to use the CPU hotplug again */ 416 cpu_maps_update_begin(); 417 cpu_hotplug_disabled = 0; 418 - if (cpus_empty(frozen_cpus)) 419 goto out; 420 421 printk("Enabling non-boot CPUs ...\n"); 422 - for_each_cpu_mask_nr(cpu, frozen_cpus) { 423 error = _cpu_up(cpu, 1); 424 if (!error) { 425 printk("CPU%d is up\n", cpu); ··· 427 } 428 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 429 } 430 - cpus_clear(frozen_cpus); 431 out: 432 cpu_maps_update_done(); 433 } 434 #endif /* CONFIG_PM_SLEEP_SMP */ 435 436 /** ··· 454 unsigned long val = CPU_STARTING; 455 456 #ifdef CONFIG_PM_SLEEP_SMP 457 - if (cpu_isset(cpu, frozen_cpus)) 458 val = CPU_STARTING_FROZEN; 459 #endif /* CONFIG_PM_SLEEP_SMP */ 460 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); ··· 466 * cpu_bit_bitmap[] is a special, "compressed" data structure that 467 * represents all NR_CPUS bits binary values of 1<<nr. 468 * 469 - * It is used by cpumask_of_cpu() to get a constant address to a CPU 470 * mask value that has a single bit set only. 471 */ 472 ··· 489 490 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 491 EXPORT_SYMBOL(cpu_all_bits);
··· 15 #include <linux/stop_machine.h> 16 #include <linux/mutex.h> 17 18 #ifdef CONFIG_SMP 19 + /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 20 static DEFINE_MUTEX(cpu_add_remove_lock); 21 22 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); ··· 64 mutex_init(&cpu_hotplug.lock); 65 cpu_hotplug.refcount = 0; 66 } 67 68 #ifdef CONFIG_HOTPLUG_CPU 69 ··· 97 98 /* 99 * The following two API's must be used when attempting 100 + * to serialize the updates to cpu_online_mask, cpu_present_mask. 101 */ 102 void cpu_maps_update_begin(void) 103 { ··· 218 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 219 { 220 int err, nr_calls = 0; 221 + cpumask_var_t old_allowed; 222 void *hcpu = (void *)(long)cpu; 223 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 224 struct take_cpu_down_param tcd_param = { ··· 231 232 if (!cpu_online(cpu)) 233 return -EINVAL; 234 + 235 + if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) 236 + return -ENOMEM; 237 238 cpu_hotplug_begin(); 239 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, ··· 246 } 247 248 /* Ensure that we are not runnable on dying cpu */ 249 + cpumask_copy(old_allowed, &current->cpus_allowed); 250 + set_cpus_allowed_ptr(current, 251 + cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); 252 253 + err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 254 if (err) { 255 /* CPU didn't die: tell everyone. Can't complain. */ 256 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, ··· 278 check_for_tasks(cpu); 279 280 out_allowed: 281 + set_cpus_allowed_ptr(current, old_allowed); 282 out_release: 283 cpu_hotplug_done(); 284 if (!err) { ··· 286 hcpu) == NOTIFY_BAD) 287 BUG(); 288 } 289 + free_cpumask_var(old_allowed); 290 return err; 291 } 292 ··· 304 305 /* 306 * Make sure the all cpus did the reschedule and are not 307 + * using stale version of the cpu_active_mask. 308 * This is not strictly necessary becuase stop_machine() 309 * that we run down the line already provides the required 310 * synchronization. But it's really a side effect and we do not ··· 368 int __cpuinit cpu_up(unsigned int cpu) 369 { 370 int err = 0; 371 + if (!cpu_possible(cpu)) { 372 printk(KERN_ERR "can't online cpu %d because it is not " 373 "configured as may-hotadd at boot time\n", cpu); 374 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) ··· 393 } 394 395 #ifdef CONFIG_PM_SLEEP_SMP 396 + static cpumask_var_t frozen_cpus; 397 398 int disable_nonboot_cpus(void) 399 { 400 int cpu, first_cpu, error = 0; 401 402 cpu_maps_update_begin(); 403 + first_cpu = cpumask_first(cpu_online_mask); 404 /* We take down all of the non-boot CPUs in one shot to avoid races 405 * with the userspace trying to use the CPU hotplug at the same time 406 */ 407 + cpumask_clear(frozen_cpus); 408 printk("Disabling non-boot CPUs ...\n"); 409 for_each_online_cpu(cpu) { 410 if (cpu == first_cpu) 411 continue; 412 error = _cpu_down(cpu, 1); 413 if (!error) { 414 + cpumask_set_cpu(cpu, frozen_cpus); 415 printk("CPU%d is down\n", cpu); 416 } else { 417 printk(KERN_ERR "Error taking CPU%d down: %d\n", ··· 437 /* Allow everyone to use the CPU hotplug again */ 438 cpu_maps_update_begin(); 439 cpu_hotplug_disabled = 0; 440 + if (cpumask_empty(frozen_cpus)) 441 goto out; 442 443 printk("Enabling non-boot CPUs ...\n"); 444 + for_each_cpu(cpu, frozen_cpus) { 445 error = _cpu_up(cpu, 1); 446 if (!error) { 447 printk("CPU%d is up\n", cpu); ··· 449 } 450 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 451 } 452 + cpumask_clear(frozen_cpus); 453 out: 454 cpu_maps_update_done(); 455 } 456 + 457 + static int alloc_frozen_cpus(void) 458 + { 459 + if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 460 + return -ENOMEM; 461 + return 0; 462 + } 463 + core_initcall(alloc_frozen_cpus); 464 #endif /* CONFIG_PM_SLEEP_SMP */ 465 466 /** ··· 468 unsigned long val = CPU_STARTING; 469 470 #ifdef CONFIG_PM_SLEEP_SMP 471 + if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 472 val = CPU_STARTING_FROZEN; 473 #endif /* CONFIG_PM_SLEEP_SMP */ 474 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); ··· 480 * cpu_bit_bitmap[] is a special, "compressed" data structure that 481 * represents all NR_CPUS bits binary values of 1<<nr. 482 * 483 + * It is used by cpumask_of() to get a constant address to a CPU 484 * mask value that has a single bit set only. 485 */ 486 ··· 503 504 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 505 EXPORT_SYMBOL(cpu_all_bits); 506 + 507 + #ifdef CONFIG_INIT_ALL_POSSIBLE 508 + static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 509 + = CPU_BITS_ALL; 510 + #else 511 + static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 512 + #endif 513 + const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 514 + EXPORT_SYMBOL(cpu_possible_mask); 515 + 516 + static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 517 + const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 518 + EXPORT_SYMBOL(cpu_online_mask); 519 + 520 + static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 521 + const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 522 + EXPORT_SYMBOL(cpu_present_mask); 523 + 524 + static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 525 + const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 526 + EXPORT_SYMBOL(cpu_active_mask); 527 + 528 + void set_cpu_possible(unsigned int cpu, bool possible) 529 + { 530 + if (possible) 531 + cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 532 + else 533 + cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 534 + } 535 + 536 + void set_cpu_present(unsigned int cpu, bool present) 537 + { 538 + if (present) 539 + cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 540 + else 541 + cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 542 + } 543 + 544 + void set_cpu_online(unsigned int cpu, bool online) 545 + { 546 + if (online) 547 + cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 548 + else 549 + cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 550 + } 551 + 552 + void set_cpu_active(unsigned int cpu, bool active) 553 + { 554 + if (active) 555 + cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 556 + else 557 + cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 558 + } 559 + 560 + void init_cpu_present(const struct cpumask *src) 561 + { 562 + cpumask_copy(to_cpumask(cpu_present_bits), src); 563 + } 564 + 565 + void init_cpu_possible(const struct cpumask *src) 566 + { 567 + cpumask_copy(to_cpumask(cpu_possible_bits), src); 568 + } 569 + 570 + void init_cpu_online(const struct cpumask *src) 571 + { 572 + cpumask_copy(to_cpumask(cpu_online_bits), src); 573 + }
+9 -2
kernel/irq/manage.c
··· 16 #include "internals.h" 17 18 #ifdef CONFIG_SMP 19 20 - cpumask_t irq_default_affinity = CPU_MASK_ALL; 21 22 /** 23 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) ··· 134 desc->status &= ~IRQ_AFFINITY_SET; 135 } 136 137 - cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); 138 set_affinity: 139 desc->chip->set_affinity(irq, &desc->affinity); 140
··· 16 #include "internals.h" 17 18 #ifdef CONFIG_SMP 19 + cpumask_var_t irq_default_affinity; 20 21 + static int init_irq_default_affinity(void) 22 + { 23 + alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); 24 + cpumask_setall(irq_default_affinity); 25 + return 0; 26 + } 27 + core_initcall(init_irq_default_affinity); 28 29 /** 30 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) ··· 127 desc->status &= ~IRQ_AFFINITY_SET; 128 } 129 130 + cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); 131 set_affinity: 132 desc->chip->set_affinity(irq, &desc->affinity); 133
+23 -13
kernel/irq/proc.c
··· 20 static int irq_affinity_proc_show(struct seq_file *m, void *v) 21 { 22 struct irq_desc *desc = irq_to_desc((long)m->private); 23 - cpumask_t *mask = &desc->affinity; 24 25 #ifdef CONFIG_GENERIC_PENDING_IRQ 26 if (desc->status & IRQ_MOVE_PENDING) ··· 54 if (err) 55 goto free_cpumask; 56 57 - if (!is_affinity_mask_valid(*new_value)) { 58 err = -EINVAL; 59 goto free_cpumask; 60 } ··· 93 94 static int default_affinity_show(struct seq_file *m, void *v) 95 { 96 - seq_cpumask(m, &irq_default_affinity); 97 seq_putc(m, '\n'); 98 return 0; 99 } ··· 101 static ssize_t default_affinity_write(struct file *file, 102 const char __user *buffer, size_t count, loff_t *ppos) 103 { 104 - cpumask_t new_value; 105 int err; 106 107 - err = cpumask_parse_user(buffer, count, &new_value); 108 - if (err) 109 - return err; 110 111 - if (!is_affinity_mask_valid(new_value)) 112 - return -EINVAL; 113 114 /* 115 * Do not allow disabling IRQs completely - it's a too easy 116 * way to make the system unusable accidentally :-) At least 117 * one online CPU still has to be targeted. 118 */ 119 - if (!cpus_intersects(new_value, cpu_online_map)) 120 - return -EINVAL; 121 122 - irq_default_affinity = new_value; 123 124 - return count; 125 } 126 127 static int default_affinity_open(struct inode *inode, struct file *file)
··· 20 static int irq_affinity_proc_show(struct seq_file *m, void *v) 21 { 22 struct irq_desc *desc = irq_to_desc((long)m->private); 23 + const struct cpumask *mask = &desc->affinity; 24 25 #ifdef CONFIG_GENERIC_PENDING_IRQ 26 if (desc->status & IRQ_MOVE_PENDING) ··· 54 if (err) 55 goto free_cpumask; 56 57 + if (!is_affinity_mask_valid(new_value)) { 58 err = -EINVAL; 59 goto free_cpumask; 60 } ··· 93 94 static int default_affinity_show(struct seq_file *m, void *v) 95 { 96 + seq_cpumask(m, irq_default_affinity); 97 seq_putc(m, '\n'); 98 return 0; 99 } ··· 101 static ssize_t default_affinity_write(struct file *file, 102 const char __user *buffer, size_t count, loff_t *ppos) 103 { 104 + cpumask_var_t new_value; 105 int err; 106 107 + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 108 + return -ENOMEM; 109 110 + err = cpumask_parse_user(buffer, count, new_value); 111 + if (err) 112 + goto out; 113 + 114 + if (!is_affinity_mask_valid(new_value)) { 115 + err = -EINVAL; 116 + goto out; 117 + } 118 119 /* 120 * Do not allow disabling IRQs completely - it's a too easy 121 * way to make the system unusable accidentally :-) At least 122 * one online CPU still has to be targeted. 123 */ 124 + if (!cpumask_intersects(new_value, cpu_online_mask)) { 125 + err = -EINVAL; 126 + goto out; 127 + } 128 129 + cpumask_copy(irq_default_affinity, new_value); 130 + err = count; 131 132 + out: 133 + free_cpumask_var(new_value); 134 + return err; 135 } 136 137 static int default_affinity_open(struct inode *inode, struct file *file)
+1 -1
kernel/kexec.c
··· 1116 struct elf_prstatus prstatus; 1117 u32 *buf; 1118 1119 - if ((cpu < 0) || (cpu >= NR_CPUS)) 1120 return; 1121 1122 /* Using ELF notes here is opportunistic.
··· 1116 struct elf_prstatus prstatus; 1117 u32 *buf; 1118 1119 + if ((cpu < 0) || (cpu >= nr_cpu_ids)) 1120 return; 1121 1122 /* Using ELF notes here is opportunistic.
+1 -1
kernel/power/poweroff.c
··· 27 static void handle_poweroff(int key, struct tty_struct *tty) 28 { 29 /* run sysrq poweroff on boot cpu */ 30 - schedule_work_on(first_cpu(cpu_online_map), &poweroff_work); 31 } 32 33 static struct sysrq_key_op sysrq_poweroff_op = {
··· 27 static void handle_poweroff(int key, struct tty_struct *tty) 28 { 29 /* run sysrq poweroff on boot cpu */ 30 + schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); 31 } 32 33 static struct sysrq_key_op sysrq_poweroff_op = {
+25 -13
kernel/profile.c
··· 45 int prof_on __read_mostly; 46 EXPORT_SYMBOL_GPL(prof_on); 47 48 - static cpumask_t prof_cpu_mask = CPU_MASK_ALL; 49 #ifdef CONFIG_SMP 50 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 51 static DEFINE_PER_CPU(int, cpu_profile_flip); ··· 113 buffer_bytes = prof_len*sizeof(atomic_t); 114 if (!slab_is_available()) { 115 prof_buffer = alloc_bootmem(buffer_bytes); 116 return 0; 117 } 118 119 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); 120 if (prof_buffer) ··· 132 if (prof_buffer) 133 return 0; 134 135 return -ENOMEM; 136 } 137 ··· 391 return NOTIFY_BAD; 392 case CPU_ONLINE: 393 case CPU_ONLINE_FROZEN: 394 - cpu_set(cpu, prof_cpu_mask); 395 break; 396 case CPU_UP_CANCELED: 397 case CPU_UP_CANCELED_FROZEN: 398 case CPU_DEAD: 399 case CPU_DEAD_FROZEN: 400 - cpu_clear(cpu, prof_cpu_mask); 401 if (per_cpu(cpu_profile_hits, cpu)[0]) { 402 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); 403 per_cpu(cpu_profile_hits, cpu)[0] = NULL; ··· 437 438 if (type == CPU_PROFILING && timer_hook) 439 timer_hook(regs); 440 - if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) 441 profile_hit(type, (void *)profile_pc(regs)); 442 } 443 ··· 450 static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, 451 int count, int *eof, void *data) 452 { 453 - int len = cpumask_scnprintf(page, count, (cpumask_t *)data); 454 if (count - len < 2) 455 return -EINVAL; 456 len += sprintf(page + len, "\n"); ··· 460 static int prof_cpu_mask_write_proc(struct file *file, 461 const char __user *buffer, unsigned long count, void *data) 462 { 463 - cpumask_t *mask = (cpumask_t *)data; 464 unsigned long full_count = count, err; 465 - cpumask_t new_value; 466 467 - err = cpumask_parse_user(buffer, count, &new_value); 468 - if (err) 469 - return err; 470 471 - *mask = new_value; 472 - return full_count; 473 } 474 475 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) ··· 484 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); 485 if (!entry) 486 return; 487 - entry->data = (void *)&prof_cpu_mask; 488 entry->read_proc = prof_cpu_mask_read_proc; 489 entry->write_proc = prof_cpu_mask_write_proc; 490 }
··· 45 int prof_on __read_mostly; 46 EXPORT_SYMBOL_GPL(prof_on); 47 48 + static cpumask_var_t prof_cpu_mask; 49 #ifdef CONFIG_SMP 50 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 51 static DEFINE_PER_CPU(int, cpu_profile_flip); ··· 113 buffer_bytes = prof_len*sizeof(atomic_t); 114 if (!slab_is_available()) { 115 prof_buffer = alloc_bootmem(buffer_bytes); 116 + alloc_bootmem_cpumask_var(&prof_cpu_mask); 117 return 0; 118 } 119 + 120 + if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) 121 + return -ENOMEM; 122 123 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); 124 if (prof_buffer) ··· 128 if (prof_buffer) 129 return 0; 130 131 + free_cpumask_var(prof_cpu_mask); 132 return -ENOMEM; 133 } 134 ··· 386 return NOTIFY_BAD; 387 case CPU_ONLINE: 388 case CPU_ONLINE_FROZEN: 389 + if (prof_cpu_mask != NULL) 390 + cpumask_set_cpu(cpu, prof_cpu_mask); 391 break; 392 case CPU_UP_CANCELED: 393 case CPU_UP_CANCELED_FROZEN: 394 case CPU_DEAD: 395 case CPU_DEAD_FROZEN: 396 + if (prof_cpu_mask != NULL) 397 + cpumask_clear_cpu(cpu, prof_cpu_mask); 398 if (per_cpu(cpu_profile_hits, cpu)[0]) { 399 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); 400 per_cpu(cpu_profile_hits, cpu)[0] = NULL; ··· 430 431 if (type == CPU_PROFILING && timer_hook) 432 timer_hook(regs); 433 + if (!user_mode(regs) && prof_cpu_mask != NULL && 434 + cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) 435 profile_hit(type, (void *)profile_pc(regs)); 436 } 437 ··· 442 static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, 443 int count, int *eof, void *data) 444 { 445 + int len = cpumask_scnprintf(page, count, data); 446 if (count - len < 2) 447 return -EINVAL; 448 len += sprintf(page + len, "\n"); ··· 452 static int prof_cpu_mask_write_proc(struct file *file, 453 const char __user *buffer, unsigned long count, void *data) 454 { 455 + struct cpumask *mask = data; 456 unsigned long full_count = count, err; 457 + cpumask_var_t new_value; 458 459 + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 460 + return -ENOMEM; 461 462 + err = cpumask_parse_user(buffer, count, new_value); 463 + if (!err) { 464 + cpumask_copy(mask, new_value); 465 + err = full_count; 466 + } 467 + free_cpumask_var(new_value); 468 + return err; 469 } 470 471 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) ··· 472 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); 473 if (!entry) 474 return; 475 + entry->data = prof_cpu_mask; 476 entry->read_proc = prof_cpu_mask_read_proc; 477 entry->write_proc = prof_cpu_mask_write_proc; 478 }
+17 -15
kernel/rcuclassic.c
··· 63 .completed = -300, 64 .pending = -300, 65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 66 - .cpumask = CPU_MASK_NONE, 67 }; 68 static struct rcu_ctrlblk rcu_bh_ctrlblk = { 69 .cur = -300, 70 .completed = -300, 71 .pending = -300, 72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), 73 - .cpumask = CPU_MASK_NONE, 74 }; 75 76 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; ··· 85 struct rcu_ctrlblk *rcp) 86 { 87 int cpu; 88 - cpumask_t cpumask; 89 unsigned long flags; 90 91 set_need_resched(); ··· 95 * Don't send IPI to itself. With irqs disabled, 96 * rdp->cpu is the current cpu. 97 * 98 - * cpu_online_map is updated by the _cpu_down() 99 * using __stop_machine(). Since we're in irqs disabled 100 * section, __stop_machine() is not exectuting, hence 101 - * the cpu_online_map is stable. 102 * 103 * However, a cpu might have been offlined _just_ before 104 * we disabled irqs while entering here. ··· 106 * notification, leading to the offlined cpu's bit 107 * being set in the rcp->cpumask. 108 * 109 - * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent 110 * sending smp_reschedule() to an offlined CPU. 111 */ 112 - cpus_and(cpumask, rcp->cpumask, cpu_online_map); 113 - cpu_clear(rdp->cpu, cpumask); 114 - for_each_cpu_mask_nr(cpu, cpumask) 115 - smp_send_reschedule(cpu); 116 } 117 spin_unlock_irqrestore(&rcp->lock, flags); 118 } ··· 193 194 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 195 for_each_possible_cpu(cpu) { 196 - if (cpu_isset(cpu, rcp->cpumask)) 197 printk(" %d", cpu); 198 } 199 printk(" (detected by %d, t=%ld jiffies)\n", ··· 221 long delta; 222 223 delta = jiffies - rcp->jiffies_stall; 224 - if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) { 225 226 /* We haven't checked in, so go dump stack. */ 227 print_cpu_stall(rcp); ··· 394 * unnecessarily. 395 */ 396 smp_mb(); 397 - cpumask_andnot(&rcp->cpumask, cpu_online_mask, nohz_cpu_mask); 398 399 rcp->signaled = 0; 400 } ··· 408 */ 409 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) 410 { 411 - cpu_clear(cpu, rcp->cpumask); 412 - if (cpus_empty(rcp->cpumask)) { 413 /* batch completed ! */ 414 rcp->completed = rcp->cur; 415 rcu_start_batch(rcp);
··· 63 .completed = -300, 64 .pending = -300, 65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 66 + .cpumask = CPU_BITS_NONE, 67 }; 68 static struct rcu_ctrlblk rcu_bh_ctrlblk = { 69 .cur = -300, 70 .completed = -300, 71 .pending = -300, 72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), 73 + .cpumask = CPU_BITS_NONE, 74 }; 75 76 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; ··· 85 struct rcu_ctrlblk *rcp) 86 { 87 int cpu; 88 unsigned long flags; 89 90 set_need_resched(); ··· 96 * Don't send IPI to itself. With irqs disabled, 97 * rdp->cpu is the current cpu. 98 * 99 + * cpu_online_mask is updated by the _cpu_down() 100 * using __stop_machine(). Since we're in irqs disabled 101 * section, __stop_machine() is not exectuting, hence 102 + * the cpu_online_mask is stable. 103 * 104 * However, a cpu might have been offlined _just_ before 105 * we disabled irqs while entering here. ··· 107 * notification, leading to the offlined cpu's bit 108 * being set in the rcp->cpumask. 109 * 110 + * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent 111 * sending smp_reschedule() to an offlined CPU. 112 */ 113 + for_each_cpu_and(cpu, 114 + to_cpumask(rcp->cpumask), cpu_online_mask) { 115 + if (cpu != rdp->cpu) 116 + smp_send_reschedule(cpu); 117 + } 118 } 119 spin_unlock_irqrestore(&rcp->lock, flags); 120 } ··· 193 194 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 195 for_each_possible_cpu(cpu) { 196 + if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask))) 197 printk(" %d", cpu); 198 } 199 printk(" (detected by %d, t=%ld jiffies)\n", ··· 221 long delta; 222 223 delta = jiffies - rcp->jiffies_stall; 224 + if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) && 225 + delta >= 0) { 226 227 /* We haven't checked in, so go dump stack. */ 228 print_cpu_stall(rcp); ··· 393 * unnecessarily. 394 */ 395 smp_mb(); 396 + cpumask_andnot(to_cpumask(rcp->cpumask), 397 + cpu_online_mask, nohz_cpu_mask); 398 399 rcp->signaled = 0; 400 } ··· 406 */ 407 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) 408 { 409 + cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask)); 410 + if (cpumask_empty(to_cpumask(rcp->cpumask))) { 411 /* batch completed ! */ 412 rcp->completed = rcp->cur; 413 rcu_start_batch(rcp);
+10 -9
kernel/rcupreempt.c
··· 164 { "idle", "waitack", "waitzero", "waitmb" }; 165 #endif /* #ifdef CONFIG_RCU_TRACE */ 166 167 - static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE; 168 169 /* 170 * Enum and per-CPU flag to determine when each CPU has seen ··· 759 760 /* Now ask each CPU for acknowledgement of the flip. */ 761 762 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { 763 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 764 dyntick_save_progress_counter(cpu); 765 } ··· 777 int cpu; 778 779 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 780 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 781 if (rcu_try_flip_waitack_needed(cpu) && 782 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 783 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); ··· 809 /* Check to see if the sum of the "last" counters is zero. */ 810 811 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 812 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 813 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 814 if (sum != 0) { 815 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); ··· 824 smp_mb(); /* ^^^^^^^^^^^^ */ 825 826 /* Call for a memory barrier from each CPU. */ 827 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { 828 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 829 dyntick_save_progress_counter(cpu); 830 } ··· 844 int cpu; 845 846 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 847 - for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 848 if (rcu_try_flip_waitmb_needed(cpu) && 849 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 850 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); ··· 1033 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; 1034 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; 1035 1036 - cpu_clear(cpu, rcu_cpu_online_map); 1037 1038 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1039 ··· 1073 struct rcu_data *rdp; 1074 1075 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); 1076 - cpu_set(cpu, rcu_cpu_online_map); 1077 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1078 1079 /* ··· 1431 * We don't need protection against CPU-Hotplug here 1432 * since 1433 * a) If a CPU comes online while we are iterating over the 1434 - * cpu_online_map below, we would only end up making a 1435 * duplicate call to rcu_online_cpu() which sets the corresponding 1436 * CPU's mask in the rcu_cpu_online_map. 1437 *
··· 164 { "idle", "waitack", "waitzero", "waitmb" }; 165 #endif /* #ifdef CONFIG_RCU_TRACE */ 166 167 + static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly 168 + = CPU_BITS_NONE; 169 170 /* 171 * Enum and per-CPU flag to determine when each CPU has seen ··· 758 759 /* Now ask each CPU for acknowledgement of the flip. */ 760 761 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { 762 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 763 dyntick_save_progress_counter(cpu); 764 } ··· 776 int cpu; 777 778 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 779 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) 780 if (rcu_try_flip_waitack_needed(cpu) && 781 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 782 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); ··· 808 /* Check to see if the sum of the "last" counters is zero. */ 809 810 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 811 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) 812 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 813 if (sum != 0) { 814 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); ··· 823 smp_mb(); /* ^^^^^^^^^^^^ */ 824 825 /* Call for a memory barrier from each CPU. */ 826 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { 827 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 828 dyntick_save_progress_counter(cpu); 829 } ··· 843 int cpu; 844 845 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 846 + for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) 847 if (rcu_try_flip_waitmb_needed(cpu) && 848 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 849 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); ··· 1032 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; 1033 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; 1034 1035 + cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map)); 1036 1037 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1038 ··· 1072 struct rcu_data *rdp; 1073 1074 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); 1075 + cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map)); 1076 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1077 1078 /* ··· 1430 * We don't need protection against CPU-Hotplug here 1431 * since 1432 * a) If a CPU comes online while we are iterating over the 1433 + * cpu_online_mask below, we would only end up making a 1434 * duplicate call to rcu_online_cpu() which sets the corresponding 1435 * CPU's mask in the rcu_cpu_online_map. 1436 *
+15 -12
kernel/rcutorture.c
··· 868 */ 869 static void rcu_torture_shuffle_tasks(void) 870 { 871 - cpumask_t tmp_mask; 872 int i; 873 874 - cpus_setall(tmp_mask); 875 get_online_cpus(); 876 877 /* No point in shuffling if there is only one online CPU (ex: UP) */ 878 - if (num_online_cpus() == 1) { 879 - put_online_cpus(); 880 - return; 881 - } 882 883 if (rcu_idle_cpu != -1) 884 - cpu_clear(rcu_idle_cpu, tmp_mask); 885 886 - set_cpus_allowed_ptr(current, &tmp_mask); 887 888 if (reader_tasks) { 889 for (i = 0; i < nrealreaders; i++) 890 if (reader_tasks[i]) 891 set_cpus_allowed_ptr(reader_tasks[i], 892 - &tmp_mask); 893 } 894 895 if (fakewriter_tasks) { 896 for (i = 0; i < nfakewriters; i++) 897 if (fakewriter_tasks[i]) 898 set_cpus_allowed_ptr(fakewriter_tasks[i], 899 - &tmp_mask); 900 } 901 902 if (writer_task) 903 - set_cpus_allowed_ptr(writer_task, &tmp_mask); 904 905 if (stats_task) 906 - set_cpus_allowed_ptr(stats_task, &tmp_mask); 907 908 if (rcu_idle_cpu == -1) 909 rcu_idle_cpu = num_online_cpus() - 1; 910 else 911 rcu_idle_cpu--; 912 913 put_online_cpus(); 914 } 915 916 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
··· 868 */ 869 static void rcu_torture_shuffle_tasks(void) 870 { 871 + cpumask_var_t tmp_mask; 872 int i; 873 874 + if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) 875 + BUG(); 876 + 877 + cpumask_setall(tmp_mask); 878 get_online_cpus(); 879 880 /* No point in shuffling if there is only one online CPU (ex: UP) */ 881 + if (num_online_cpus() == 1) 882 + goto out; 883 884 if (rcu_idle_cpu != -1) 885 + cpumask_clear_cpu(rcu_idle_cpu, tmp_mask); 886 887 + set_cpus_allowed_ptr(current, tmp_mask); 888 889 if (reader_tasks) { 890 for (i = 0; i < nrealreaders; i++) 891 if (reader_tasks[i]) 892 set_cpus_allowed_ptr(reader_tasks[i], 893 + tmp_mask); 894 } 895 896 if (fakewriter_tasks) { 897 for (i = 0; i < nfakewriters; i++) 898 if (fakewriter_tasks[i]) 899 set_cpus_allowed_ptr(fakewriter_tasks[i], 900 + tmp_mask); 901 } 902 903 if (writer_task) 904 + set_cpus_allowed_ptr(writer_task, tmp_mask); 905 906 if (stats_task) 907 + set_cpus_allowed_ptr(stats_task, tmp_mask); 908 909 if (rcu_idle_cpu == -1) 910 rcu_idle_cpu = num_online_cpus() - 1; 911 else 912 rcu_idle_cpu--; 913 914 + out: 915 put_online_cpus(); 916 + free_cpumask_var(tmp_mask); 917 } 918 919 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
+15 -38
kernel/sched.c
··· 3715 * don't kick the migration_thread, if the curr 3716 * task on busiest cpu can't be moved to this_cpu 3717 */ 3718 - if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { 3719 double_unlock_balance(this_rq, busiest); 3720 all_pinned = 1; 3721 return ld_moved; ··· 6257 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 6258 { 6259 int dest_cpu; 6260 - /* FIXME: Use cpumask_of_node here. */ 6261 - cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu)); 6262 - const struct cpumask *nodemask = &_nodemask; 6263 6264 again: 6265 /* Look for allowed, online CPU in same node. */ ··· 7168 static void sched_domain_node_span(int node, struct cpumask *span) 7169 { 7170 nodemask_t used_nodes; 7171 - /* FIXME: use cpumask_of_node() */ 7172 - node_to_cpumask_ptr(nodemask, node); 7173 int i; 7174 7175 - cpus_clear(*span); 7176 nodes_clear(used_nodes); 7177 7178 - cpus_or(*span, *span, *nodemask); 7179 node_set(node, used_nodes); 7180 7181 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { 7182 int next_node = find_next_best_node(node, &used_nodes); 7183 7184 - node_to_cpumask_ptr_next(nodemask, next_node); 7185 - cpus_or(*span, *span, *nodemask); 7186 } 7187 } 7188 #endif /* CONFIG_NUMA */ ··· 7259 { 7260 int group; 7261 #ifdef CONFIG_SCHED_MC 7262 - /* FIXME: Use cpu_coregroup_mask. */ 7263 - *mask = cpu_coregroup_map(cpu); 7264 - cpus_and(*mask, *mask, *cpu_map); 7265 group = cpumask_first(mask); 7266 #elif defined(CONFIG_SCHED_SMT) 7267 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); ··· 7289 struct cpumask *nodemask) 7290 { 7291 int group; 7292 - /* FIXME: use cpumask_of_node */ 7293 - node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); 7294 7295 - cpumask_and(nodemask, pnodemask, cpu_map); 7296 group = cpumask_first(nodemask); 7297 7298 if (sg) ··· 7341 7342 for (i = 0; i < nr_node_ids; i++) { 7343 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 7344 - /* FIXME: Use cpumask_of_node */ 7345 - node_to_cpumask_ptr(pnodemask, i); 7346 7347 - cpus_and(*nodemask, *pnodemask, *cpu_map); 7348 if (cpumask_empty(nodemask)) 7349 continue; 7350 ··· 7551 for_each_cpu(i, cpu_map) { 7552 struct sched_domain *sd = NULL, *p; 7553 7554 - /* FIXME: use cpumask_of_node */ 7555 - *nodemask = node_to_cpumask(cpu_to_node(i)); 7556 - cpus_and(*nodemask, *nodemask, *cpu_map); 7557 7558 #ifdef CONFIG_NUMA 7559 if (cpumask_weight(cpu_map) > ··· 7592 sd = &per_cpu(core_domains, i).sd; 7593 SD_INIT(sd, MC); 7594 set_domain_attribute(sd, attr); 7595 - *sched_domain_span(sd) = cpu_coregroup_map(i); 7596 - cpumask_and(sched_domain_span(sd), 7597 - sched_domain_span(sd), cpu_map); 7598 sd->parent = p; 7599 p->child = sd; 7600 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); ··· 7629 #ifdef CONFIG_SCHED_MC 7630 /* Set up multi-core groups */ 7631 for_each_cpu(i, cpu_map) { 7632 - /* FIXME: Use cpu_coregroup_mask */ 7633 - *this_core_map = cpu_coregroup_map(i); 7634 - cpus_and(*this_core_map, *this_core_map, *cpu_map); 7635 if (i != cpumask_first(this_core_map)) 7636 continue; 7637 ··· 7641 7642 /* Set up physical groups */ 7643 for (i = 0; i < nr_node_ids; i++) { 7644 - /* FIXME: Use cpumask_of_node */ 7645 - *nodemask = node_to_cpumask(i); 7646 - cpus_and(*nodemask, *nodemask, *cpu_map); 7647 if (cpumask_empty(nodemask)) 7648 continue; 7649 ··· 7663 struct sched_group *sg, *prev; 7664 int j; 7665 7666 - /* FIXME: Use cpumask_of_node */ 7667 - *nodemask = node_to_cpumask(i); 7668 cpumask_clear(covered); 7669 - 7670 - cpus_and(*nodemask, *nodemask, *cpu_map); 7671 if (cpumask_empty(nodemask)) { 7672 sched_group_nodes[i] = NULL; 7673 continue; ··· 7695 7696 for (j = 0; j < nr_node_ids; j++) { 7697 int n = (i + j) % nr_node_ids; 7698 - /* FIXME: Use cpumask_of_node */ 7699 - node_to_cpumask_ptr(pnodemask, n); 7700 7701 cpumask_complement(notcovered, covered); 7702 cpumask_and(tmpmask, notcovered, cpu_map); ··· 7702 if (cpumask_empty(tmpmask)) 7703 break; 7704 7705 - cpumask_and(tmpmask, tmpmask, pnodemask); 7706 if (cpumask_empty(tmpmask)) 7707 continue; 7708
··· 3715 * don't kick the migration_thread, if the curr 3716 * task on busiest cpu can't be moved to this_cpu 3717 */ 3718 + if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { 3719 double_unlock_balance(this_rq, busiest); 3720 all_pinned = 1; 3721 return ld_moved; ··· 6257 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 6258 { 6259 int dest_cpu; 6260 + const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); 6261 6262 again: 6263 /* Look for allowed, online CPU in same node. */ ··· 7170 static void sched_domain_node_span(int node, struct cpumask *span) 7171 { 7172 nodemask_t used_nodes; 7173 int i; 7174 7175 + cpumask_clear(span); 7176 nodes_clear(used_nodes); 7177 7178 + cpumask_or(span, span, cpumask_of_node(node)); 7179 node_set(node, used_nodes); 7180 7181 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { 7182 int next_node = find_next_best_node(node, &used_nodes); 7183 7184 + cpumask_or(span, span, cpumask_of_node(next_node)); 7185 } 7186 } 7187 #endif /* CONFIG_NUMA */ ··· 7264 { 7265 int group; 7266 #ifdef CONFIG_SCHED_MC 7267 + cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); 7268 group = cpumask_first(mask); 7269 #elif defined(CONFIG_SCHED_SMT) 7270 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); ··· 7296 struct cpumask *nodemask) 7297 { 7298 int group; 7299 7300 + cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); 7301 group = cpumask_first(nodemask); 7302 7303 if (sg) ··· 7350 7351 for (i = 0; i < nr_node_ids; i++) { 7352 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 7353 7354 + cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 7355 if (cpumask_empty(nodemask)) 7356 continue; 7357 ··· 7562 for_each_cpu(i, cpu_map) { 7563 struct sched_domain *sd = NULL, *p; 7564 7565 + cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); 7566 7567 #ifdef CONFIG_NUMA 7568 if (cpumask_weight(cpu_map) > ··· 7605 sd = &per_cpu(core_domains, i).sd; 7606 SD_INIT(sd, MC); 7607 set_domain_attribute(sd, attr); 7608 + cpumask_and(sched_domain_span(sd), cpu_map, 7609 + cpu_coregroup_mask(i)); 7610 sd->parent = p; 7611 p->child = sd; 7612 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); ··· 7643 #ifdef CONFIG_SCHED_MC 7644 /* Set up multi-core groups */ 7645 for_each_cpu(i, cpu_map) { 7646 + cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); 7647 if (i != cpumask_first(this_core_map)) 7648 continue; 7649 ··· 7657 7658 /* Set up physical groups */ 7659 for (i = 0; i < nr_node_ids; i++) { 7660 + cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 7661 if (cpumask_empty(nodemask)) 7662 continue; 7663 ··· 7681 struct sched_group *sg, *prev; 7682 int j; 7683 7684 cpumask_clear(covered); 7685 + cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 7686 if (cpumask_empty(nodemask)) { 7687 sched_group_nodes[i] = NULL; 7688 continue; ··· 7716 7717 for (j = 0; j < nr_node_ids; j++) { 7718 int n = (i + j) % nr_node_ids; 7719 7720 cpumask_complement(notcovered, covered); 7721 cpumask_and(tmpmask, notcovered, cpu_map); ··· 7725 if (cpumask_empty(tmpmask)) 7726 break; 7727 7728 + cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); 7729 if (cpumask_empty(tmpmask)) 7730 continue; 7731
+2 -1
kernel/sched_rt.c
··· 1383 unsigned int i; 1384 1385 for_each_possible_cpu(i) 1386 - alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL); 1387 } 1388 #endif /* CONFIG_SMP */ 1389
··· 1383 unsigned int i; 1384 1385 for_each_possible_cpu(i) 1386 + alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 1387 + GFP_KERNEL, cpu_to_node(i)); 1388 } 1389 #endif /* CONFIG_SMP */ 1390
+54 -89
kernel/smp.c
··· 24 struct call_single_data csd; 25 spinlock_t lock; 26 unsigned int refs; 27 - cpumask_t cpumask; 28 struct rcu_head rcu_head; 29 }; 30 31 struct call_single_queue { ··· 110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) { 111 int refs; 112 113 - if (!cpu_isset(cpu, data->cpumask)) 114 continue; 115 116 data->csd.func(data->csd.info); 117 118 spin_lock(&data->lock); 119 - cpu_clear(cpu, data->cpumask); 120 WARN_ON(data->refs == 0); 121 data->refs--; 122 refs = data->refs; ··· 223 local_irq_save(flags); 224 func(info); 225 local_irq_restore(flags); 226 - } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { 227 struct call_single_data *data = NULL; 228 229 if (!wait) { ··· 266 generic_exec_single(cpu, data); 267 } 268 269 - /* Dummy function */ 270 - static void quiesce_dummy(void *unused) 271 - { 272 - } 273 - 274 - /* 275 - * Ensure stack based data used in call function mask is safe to free. 276 - * 277 - * This is needed by smp_call_function_mask when using on-stack data, because 278 - * a single call function queue is shared by all CPUs, and any CPU may pick up 279 - * the data item on the queue at any time before it is deleted. So we need to 280 - * ensure that all CPUs have transitioned through a quiescent state after 281 - * this call. 282 - * 283 - * This is a very slow function, implemented by sending synchronous IPIs to 284 - * all possible CPUs. For this reason, we have to alloc data rather than use 285 - * stack based data even in the case of synchronous calls. The stack based 286 - * data is then just used for deadlock/oom fallback which will be very rare. 287 - * 288 - * If a faster scheme can be made, we could go back to preferring stack based 289 - * data -- the data allocation/free is non-zero cost. 290 - */ 291 - static void smp_call_function_mask_quiesce_stack(cpumask_t mask) 292 - { 293 - struct call_single_data data; 294 - int cpu; 295 - 296 - data.func = quiesce_dummy; 297 - data.info = NULL; 298 - 299 - for_each_cpu_mask(cpu, mask) { 300 - data.flags = CSD_FLAG_WAIT; 301 - generic_exec_single(cpu, &data); 302 - } 303 - } 304 305 /** 306 - * smp_call_function_mask(): Run a function on a set of other CPUs. 307 - * @mask: The set of cpus to run on. 308 * @func: The function to run. This must be fast and non-blocking. 309 * @info: An arbitrary pointer to pass to the function. 310 * @wait: If true, wait (atomically) until function has completed on other CPUs. 311 - * 312 - * Returns 0 on success, else a negative status code. 313 * 314 * If @wait is true, then returns once @func has returned. Note that @wait 315 * will be implicitly turned on in case of allocation failures, since ··· 287 * hardware interrupt handler or from a bottom half handler. Preemption 288 * must be disabled when calling this function. 289 */ 290 - int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, 291 - int wait) 292 { 293 - struct call_function_data d; 294 - struct call_function_data *data = NULL; 295 - cpumask_t allbutself; 296 unsigned long flags; 297 - int cpu, num_cpus; 298 - int slowpath = 0; 299 300 /* Can deadlock when called with interrupts disabled */ 301 WARN_ON(irqs_disabled()); 302 303 - cpu = smp_processor_id(); 304 - allbutself = cpu_online_map; 305 - cpu_clear(cpu, allbutself); 306 - cpus_and(mask, mask, allbutself); 307 - num_cpus = cpus_weight(mask); 308 309 - /* 310 - * If zero CPUs, return. If just a single CPU, turn this request 311 - * into a targetted single call instead since it's faster. 312 - */ 313 - if (!num_cpus) 314 - return 0; 315 - else if (num_cpus == 1) { 316 - cpu = first_cpu(mask); 317 - return smp_call_function_single(cpu, func, info, wait); 318 } 319 320 - data = kmalloc(sizeof(*data), GFP_ATOMIC); 321 - if (data) { 322 - data->csd.flags = CSD_FLAG_ALLOC; 323 - if (wait) 324 - data->csd.flags |= CSD_FLAG_WAIT; 325 - } else { 326 - data = &d; 327 - data->csd.flags = CSD_FLAG_WAIT; 328 - wait = 1; 329 - slowpath = 1; 330 } 331 332 spin_lock_init(&data->lock); 333 data->csd.func = func; 334 data->csd.info = info; 335 - data->refs = num_cpus; 336 - data->cpumask = mask; 337 338 spin_lock_irqsave(&call_function_lock, flags); 339 list_add_tail_rcu(&data->csd.list, &call_function_queue); ··· 349 smp_mb(); 350 351 /* Send a message to all CPUs in the map */ 352 - arch_send_call_function_ipi(mask); 353 354 /* optionally wait for the CPUs to complete */ 355 - if (wait) { 356 csd_flag_wait(&data->csd); 357 - if (unlikely(slowpath)) 358 - smp_call_function_mask_quiesce_stack(mask); 359 - } 360 - 361 - return 0; 362 } 363 - EXPORT_SYMBOL(smp_call_function_mask); 364 365 /** 366 * smp_call_function(): Run a function on all other CPUs. ··· 363 * @info: An arbitrary pointer to pass to the function. 364 * @wait: If true, wait (atomically) until function has completed on other CPUs. 365 * 366 - * Returns 0 on success, else a negative status code. 367 * 368 * If @wait is true, then returns once @func has returned; otherwise 369 * it returns just before the target cpu calls @func. In case of allocation ··· 374 */ 375 int smp_call_function(void (*func)(void *), void *info, int wait) 376 { 377 - int ret; 378 - 379 preempt_disable(); 380 - ret = smp_call_function_mask(cpu_online_map, func, info, wait); 381 preempt_enable(); 382 - return ret; 383 } 384 EXPORT_SYMBOL(smp_call_function); 385
··· 24 struct call_single_data csd; 25 spinlock_t lock; 26 unsigned int refs; 27 struct rcu_head rcu_head; 28 + unsigned long cpumask_bits[]; 29 }; 30 31 struct call_single_queue { ··· 110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) { 111 int refs; 112 113 + if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits))) 114 continue; 115 116 data->csd.func(data->csd.info); 117 118 spin_lock(&data->lock); 119 + cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits)); 120 WARN_ON(data->refs == 0); 121 data->refs--; 122 refs = data->refs; ··· 223 local_irq_save(flags); 224 func(info); 225 local_irq_restore(flags); 226 + } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { 227 struct call_single_data *data = NULL; 228 229 if (!wait) { ··· 266 generic_exec_single(cpu, data); 267 } 268 269 + /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ 270 + #ifndef arch_send_call_function_ipi_mask 271 + #define arch_send_call_function_ipi_mask(maskp) \ 272 + arch_send_call_function_ipi(*(maskp)) 273 + #endif 274 275 /** 276 + * smp_call_function_many(): Run a function on a set of other CPUs. 277 + * @mask: The set of cpus to run on (only runs on online subset). 278 * @func: The function to run. This must be fast and non-blocking. 279 * @info: An arbitrary pointer to pass to the function. 280 * @wait: If true, wait (atomically) until function has completed on other CPUs. 281 * 282 * If @wait is true, then returns once @func has returned. Note that @wait 283 * will be implicitly turned on in case of allocation failures, since ··· 319 * hardware interrupt handler or from a bottom half handler. Preemption 320 * must be disabled when calling this function. 321 */ 322 + void smp_call_function_many(const struct cpumask *mask, 323 + void (*func)(void *), void *info, 324 + bool wait) 325 { 326 + struct call_function_data *data; 327 unsigned long flags; 328 + int cpu, next_cpu; 329 330 /* Can deadlock when called with interrupts disabled */ 331 WARN_ON(irqs_disabled()); 332 333 + /* So, what's a CPU they want? Ignoring this one. */ 334 + cpu = cpumask_first_and(mask, cpu_online_mask); 335 + if (cpu == smp_processor_id()) 336 + cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 337 + /* No online cpus? We're done. */ 338 + if (cpu >= nr_cpu_ids) 339 + return; 340 341 + /* Do we have another CPU which isn't us? */ 342 + next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 343 + if (next_cpu == smp_processor_id()) 344 + next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); 345 + 346 + /* Fastpath: do that cpu by itself. */ 347 + if (next_cpu >= nr_cpu_ids) { 348 + smp_call_function_single(cpu, func, info, wait); 349 + return; 350 } 351 352 + data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC); 353 + if (unlikely(!data)) { 354 + /* Slow path. */ 355 + for_each_online_cpu(cpu) { 356 + if (cpu == smp_processor_id()) 357 + continue; 358 + if (cpumask_test_cpu(cpu, mask)) 359 + smp_call_function_single(cpu, func, info, wait); 360 + } 361 + return; 362 } 363 364 spin_lock_init(&data->lock); 365 + data->csd.flags = CSD_FLAG_ALLOC; 366 + if (wait) 367 + data->csd.flags |= CSD_FLAG_WAIT; 368 data->csd.func = func; 369 data->csd.info = info; 370 + cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask); 371 + cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits)); 372 + data->refs = cpumask_weight(to_cpumask(data->cpumask_bits)); 373 374 spin_lock_irqsave(&call_function_lock, flags); 375 list_add_tail_rcu(&data->csd.list, &call_function_queue); ··· 377 smp_mb(); 378 379 /* Send a message to all CPUs in the map */ 380 + arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits)); 381 382 /* optionally wait for the CPUs to complete */ 383 + if (wait) 384 csd_flag_wait(&data->csd); 385 } 386 + EXPORT_SYMBOL(smp_call_function_many); 387 388 /** 389 * smp_call_function(): Run a function on all other CPUs. ··· 396 * @info: An arbitrary pointer to pass to the function. 397 * @wait: If true, wait (atomically) until function has completed on other CPUs. 398 * 399 + * Returns 0. 400 * 401 * If @wait is true, then returns once @func has returned; otherwise 402 * it returns just before the target cpu calls @func. In case of allocation ··· 407 */ 408 int smp_call_function(void (*func)(void *), void *info, int wait) 409 { 410 preempt_disable(); 411 + smp_call_function_many(cpu_online_mask, func, info, wait); 412 preempt_enable(); 413 + return 0; 414 } 415 EXPORT_SYMBOL(smp_call_function); 416
+1 -1
kernel/softirq.c
··· 733 break; 734 /* Unbind so it can run. Fall thru. */ 735 kthread_bind(per_cpu(ksoftirqd, hotcpu), 736 - any_online_cpu(cpu_online_map)); 737 case CPU_DEAD: 738 case CPU_DEAD_FROZEN: { 739 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
··· 733 break; 734 /* Unbind so it can run. Fall thru. */ 735 kthread_bind(per_cpu(ksoftirqd, hotcpu), 736 + cpumask_any(cpu_online_mask)); 737 case CPU_DEAD: 738 case CPU_DEAD_FROZEN: { 739 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+4 -6
kernel/softlockup.c
··· 303 break; 304 case CPU_ONLINE: 305 case CPU_ONLINE_FROZEN: 306 - check_cpu = any_online_cpu(cpu_online_map); 307 wake_up_process(per_cpu(watchdog_task, hotcpu)); 308 break; 309 #ifdef CONFIG_HOTPLUG_CPU 310 case CPU_DOWN_PREPARE: 311 case CPU_DOWN_PREPARE_FROZEN: 312 if (hotcpu == check_cpu) { 313 - cpumask_t temp_cpu_online_map = cpu_online_map; 314 - 315 - cpu_clear(hotcpu, temp_cpu_online_map); 316 - check_cpu = any_online_cpu(temp_cpu_online_map); 317 } 318 break; 319 ··· 321 break; 322 /* Unbind so it can run. Fall thru. */ 323 kthread_bind(per_cpu(watchdog_task, hotcpu), 324 - any_online_cpu(cpu_online_map)); 325 case CPU_DEAD: 326 case CPU_DEAD_FROZEN: 327 p = per_cpu(watchdog_task, hotcpu);
··· 303 break; 304 case CPU_ONLINE: 305 case CPU_ONLINE_FROZEN: 306 + check_cpu = cpumask_any(cpu_online_mask); 307 wake_up_process(per_cpu(watchdog_task, hotcpu)); 308 break; 309 #ifdef CONFIG_HOTPLUG_CPU 310 case CPU_DOWN_PREPARE: 311 case CPU_DOWN_PREPARE_FROZEN: 312 if (hotcpu == check_cpu) { 313 + /* Pick any other online cpu. */ 314 + check_cpu = cpumask_any_but(cpu_online_mask, hotcpu); 315 } 316 break; 317 ··· 323 break; 324 /* Unbind so it can run. Fall thru. */ 325 kthread_bind(per_cpu(watchdog_task, hotcpu), 326 + cpumask_any(cpu_online_mask)); 327 case CPU_DEAD: 328 case CPU_DEAD_FROZEN: 329 p = per_cpu(watchdog_task, hotcpu);
+4 -4
kernel/stop_machine.c
··· 69 int err; 70 71 if (!active_cpus) { 72 - if (cpu == first_cpu(cpu_online_map)) 73 smdata = &active; 74 } else { 75 - if (cpu_isset(cpu, *active_cpus)) 76 smdata = &active; 77 } 78 /* Simple state machine */ ··· 109 return 0; 110 } 111 112 - int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 113 { 114 struct work_struct *sm_work; 115 int i, ret; ··· 142 return ret; 143 } 144 145 - int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 146 { 147 int ret; 148
··· 69 int err; 70 71 if (!active_cpus) { 72 + if (cpu == cpumask_first(cpu_online_mask)) 73 smdata = &active; 74 } else { 75 + if (cpumask_test_cpu(cpu, active_cpus)) 76 smdata = &active; 77 } 78 /* Simple state machine */ ··· 109 return 0; 110 } 111 112 + int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 113 { 114 struct work_struct *sm_work; 115 int i, ret; ··· 142 return ret; 143 } 144 145 + int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 146 { 147 int ret; 148
+25 -16
kernel/taskstats.c
··· 290 return; 291 } 292 293 - static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) 294 { 295 struct listener_list *listeners; 296 struct listener *s, *tmp; 297 unsigned int cpu; 298 - cpumask_t mask = *maskp; 299 300 - if (!cpus_subset(mask, cpu_possible_map)) 301 return -EINVAL; 302 303 if (isadd == REGISTER) { 304 - for_each_cpu_mask_nr(cpu, mask) { 305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 306 cpu_to_node(cpu)); 307 if (!s) ··· 319 320 /* Deregister or cleanup */ 321 cleanup: 322 - for_each_cpu_mask_nr(cpu, mask) { 323 listeners = &per_cpu(listener_array, cpu); 324 down_write(&listeners->sem); 325 list_for_each_entry_safe(s, tmp, &listeners->list, list) { ··· 334 return 0; 335 } 336 337 - static int parse(struct nlattr *na, cpumask_t *mask) 338 { 339 char *data; 340 int len; ··· 427 428 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) 429 { 430 - int rc = 0; 431 struct sk_buff *rep_skb; 432 struct taskstats *stats; 433 size_t size; 434 - cpumask_t mask; 435 436 - rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask); 437 - if (rc < 0) 438 - return rc; 439 - if (rc == 0) 440 - return add_del_listener(info->snd_pid, &mask, REGISTER); 441 442 - rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask); 443 if (rc < 0) 444 return rc; 445 - if (rc == 0) 446 - return add_del_listener(info->snd_pid, &mask, DEREGISTER); 447 448 /* 449 * Size includes space for nested attributes
··· 290 return; 291 } 292 293 + static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) 294 { 295 struct listener_list *listeners; 296 struct listener *s, *tmp; 297 unsigned int cpu; 298 299 + if (!cpumask_subset(mask, cpu_possible_mask)) 300 return -EINVAL; 301 302 if (isadd == REGISTER) { 303 + for_each_cpu(cpu, mask) { 304 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 305 cpu_to_node(cpu)); 306 if (!s) ··· 320 321 /* Deregister or cleanup */ 322 cleanup: 323 + for_each_cpu(cpu, mask) { 324 listeners = &per_cpu(listener_array, cpu); 325 down_write(&listeners->sem); 326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { ··· 335 return 0; 336 } 337 338 + static int parse(struct nlattr *na, struct cpumask *mask) 339 { 340 char *data; 341 int len; ··· 428 429 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) 430 { 431 + int rc; 432 struct sk_buff *rep_skb; 433 struct taskstats *stats; 434 size_t size; 435 + cpumask_var_t mask; 436 437 + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 438 + return -ENOMEM; 439 440 + rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); 441 if (rc < 0) 442 + goto free_return_rc; 443 + if (rc == 0) { 444 + rc = add_del_listener(info->snd_pid, mask, REGISTER); 445 + goto free_return_rc; 446 + } 447 + 448 + rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); 449 + if (rc < 0) 450 + goto free_return_rc; 451 + if (rc == 0) { 452 + rc = add_del_listener(info->snd_pid, mask, DEREGISTER); 453 + free_return_rc: 454 + free_cpumask_var(mask); 455 return rc; 456 + } 457 + free_cpumask_var(mask); 458 459 /* 460 * Size includes space for nested attributes
+5 -4
kernel/time/clocksource.c
··· 145 * Cycle through CPUs to check if the CPUs stay 146 * synchronized to each other. 147 */ 148 - int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); 149 150 if (next_cpu >= nr_cpu_ids) 151 - next_cpu = first_cpu(cpu_online_map); 152 watchdog_timer.expires += WATCHDOG_INTERVAL; 153 add_timer_on(&watchdog_timer, next_cpu); 154 } ··· 174 watchdog_last = watchdog->read(); 175 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 176 add_timer_on(&watchdog_timer, 177 - first_cpu(cpu_online_map)); 178 } 179 } else { 180 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) ··· 196 watchdog_timer.expires = 197 jiffies + WATCHDOG_INTERVAL; 198 add_timer_on(&watchdog_timer, 199 - first_cpu(cpu_online_map)); 200 } 201 } 202 }
··· 145 * Cycle through CPUs to check if the CPUs stay 146 * synchronized to each other. 147 */ 148 + int next_cpu = cpumask_next(raw_smp_processor_id(), 149 + cpu_online_mask); 150 151 if (next_cpu >= nr_cpu_ids) 152 + next_cpu = cpumask_first(cpu_online_mask); 153 watchdog_timer.expires += WATCHDOG_INTERVAL; 154 add_timer_on(&watchdog_timer, next_cpu); 155 } ··· 173 watchdog_last = watchdog->read(); 174 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 175 add_timer_on(&watchdog_timer, 176 + cpumask_first(cpu_online_mask)); 177 } 178 } else { 179 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) ··· 195 watchdog_timer.expires = 196 jiffies + WATCHDOG_INTERVAL; 197 add_timer_on(&watchdog_timer, 198 + cpumask_first(cpu_online_mask)); 199 } 200 } 201 }
+58 -55
kernel/time/tick-broadcast.c
··· 28 */ 29 30 struct tick_device tick_broadcast_device; 31 - static cpumask_t tick_broadcast_mask; 32 static DEFINE_SPINLOCK(tick_broadcast_lock); 33 static int tick_broadcast_force; 34 ··· 48 return &tick_broadcast_device; 49 } 50 51 - cpumask_t *tick_get_broadcast_mask(void) 52 { 53 - return &tick_broadcast_mask; 54 } 55 56 /* ··· 74 75 clockevents_exchange_device(NULL, dev); 76 tick_broadcast_device.evtdev = dev; 77 - if (!cpus_empty(tick_broadcast_mask)) 78 tick_broadcast_start_periodic(dev); 79 return 1; 80 } ··· 106 */ 107 if (!tick_device_is_functional(dev)) { 108 dev->event_handler = tick_handle_periodic; 109 - cpu_set(cpu, tick_broadcast_mask); 110 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 111 ret = 1; 112 } else { ··· 118 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 119 int cpu = smp_processor_id(); 120 121 - cpu_clear(cpu, tick_broadcast_mask); 122 tick_broadcast_clear_oneshot(cpu); 123 } 124 } ··· 127 } 128 129 /* 130 - * Broadcast the event to the cpus, which are set in the mask 131 */ 132 - static void tick_do_broadcast(cpumask_t mask) 133 { 134 int cpu = smp_processor_id(); 135 struct tick_device *td; ··· 137 /* 138 * Check, if the current cpu is in the mask 139 */ 140 - if (cpu_isset(cpu, mask)) { 141 - cpu_clear(cpu, mask); 142 td = &per_cpu(tick_cpu_device, cpu); 143 td->evtdev->event_handler(td->evtdev); 144 } 145 146 - if (!cpus_empty(mask)) { 147 /* 148 * It might be necessary to actually check whether the devices 149 * have different broadcast functions. For now, just use the 150 * one of the first device. This works as long as we have this 151 * misfeature only on x86 (lapic) 152 */ 153 - cpu = first_cpu(mask); 154 - td = &per_cpu(tick_cpu_device, cpu); 155 - td->evtdev->broadcast(&mask); 156 } 157 } 158 ··· 161 */ 162 static void tick_do_periodic_broadcast(void) 163 { 164 - cpumask_t mask; 165 - 166 spin_lock(&tick_broadcast_lock); 167 168 - cpus_and(mask, cpu_online_map, tick_broadcast_mask); 169 - tick_do_broadcast(mask); 170 171 spin_unlock(&tick_broadcast_lock); 172 } ··· 228 if (!tick_device_is_functional(dev)) 229 goto out; 230 231 - bc_stopped = cpus_empty(tick_broadcast_mask); 232 233 switch (*reason) { 234 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 236 - if (!cpu_isset(cpu, tick_broadcast_mask)) { 237 - cpu_set(cpu, tick_broadcast_mask); 238 if (tick_broadcast_device.mode == 239 TICKDEV_MODE_PERIODIC) 240 clockevents_shutdown(dev); ··· 244 break; 245 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 246 if (!tick_broadcast_force && 247 - cpu_isset(cpu, tick_broadcast_mask)) { 248 - cpu_clear(cpu, tick_broadcast_mask); 249 if (tick_broadcast_device.mode == 250 TICKDEV_MODE_PERIODIC) 251 tick_setup_periodic(dev, 0); ··· 253 break; 254 } 255 256 - if (cpus_empty(tick_broadcast_mask)) { 257 if (!bc_stopped) 258 clockevents_shutdown(bc); 259 } else if (bc_stopped) { ··· 272 */ 273 void tick_broadcast_on_off(unsigned long reason, int *oncpu) 274 { 275 - if (!cpu_isset(*oncpu, cpu_online_map)) 276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 277 "offline CPU #%d\n", *oncpu); 278 else ··· 303 spin_lock_irqsave(&tick_broadcast_lock, flags); 304 305 bc = tick_broadcast_device.evtdev; 306 - cpu_clear(cpu, tick_broadcast_mask); 307 308 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 309 - if (bc && cpus_empty(tick_broadcast_mask)) 310 clockevents_shutdown(bc); 311 } 312 ··· 342 343 switch (tick_broadcast_device.mode) { 344 case TICKDEV_MODE_PERIODIC: 345 - if(!cpus_empty(tick_broadcast_mask)) 346 tick_broadcast_start_periodic(bc); 347 - broadcast = cpu_isset(smp_processor_id(), 348 - tick_broadcast_mask); 349 break; 350 case TICKDEV_MODE_ONESHOT: 351 broadcast = tick_resume_broadcast_oneshot(bc); ··· 360 361 #ifdef CONFIG_TICK_ONESHOT 362 363 - static cpumask_t tick_broadcast_oneshot_mask; 364 365 /* 366 - * Debugging: see timer_list.c 367 */ 368 - cpumask_t *tick_get_broadcast_oneshot_mask(void) 369 { 370 - return &tick_broadcast_oneshot_mask; 371 } 372 373 static int tick_broadcast_set_event(ktime_t expires, int force) ··· 390 */ 391 void tick_check_oneshot_broadcast(int cpu) 392 { 393 - if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 394 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 395 396 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); ··· 403 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 404 { 405 struct tick_device *td; 406 - cpumask_t mask; 407 ktime_t now, next_event; 408 int cpu; 409 ··· 410 again: 411 dev->next_event.tv64 = KTIME_MAX; 412 next_event.tv64 = KTIME_MAX; 413 - mask = CPU_MASK_NONE; 414 now = ktime_get(); 415 /* Find all expired events */ 416 - for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { 417 td = &per_cpu(tick_cpu_device, cpu); 418 if (td->evtdev->next_event.tv64 <= now.tv64) 419 - cpu_set(cpu, mask); 420 else if (td->evtdev->next_event.tv64 < next_event.tv64) 421 next_event.tv64 = td->evtdev->next_event.tv64; 422 } ··· 424 /* 425 * Wakeup the cpus which have an expired event. 426 */ 427 - tick_do_broadcast(mask); 428 429 /* 430 * Two reasons for reprogram: ··· 476 goto out; 477 478 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 479 - if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 480 - cpu_set(cpu, tick_broadcast_oneshot_mask); 481 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 482 if (dev->next_event.tv64 < bc->next_event.tv64) 483 tick_broadcast_set_event(dev->next_event, 1); 484 } 485 } else { 486 - if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 487 - cpu_clear(cpu, tick_broadcast_oneshot_mask); 488 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 489 if (dev->next_event.tv64 != KTIME_MAX) 490 tick_program_event(dev->next_event, 1); ··· 503 */ 504 static void tick_broadcast_clear_oneshot(int cpu) 505 { 506 - cpu_clear(cpu, tick_broadcast_oneshot_mask); 507 } 508 509 - static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) 510 { 511 struct tick_device *td; 512 int cpu; 513 514 - for_each_cpu_mask_nr(cpu, *mask) { 515 td = &per_cpu(tick_cpu_device, cpu); 516 if (td->evtdev) 517 td->evtdev->next_event = expires; ··· 528 if (bc->event_handler != tick_handle_oneshot_broadcast) { 529 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 530 int cpu = smp_processor_id(); 531 - cpumask_t mask; 532 533 bc->event_handler = tick_handle_oneshot_broadcast; 534 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); ··· 541 * oneshot_mask bits for those and program the 542 * broadcast device to fire. 543 */ 544 - mask = tick_broadcast_mask; 545 - cpu_clear(cpu, mask); 546 - cpus_or(tick_broadcast_oneshot_mask, 547 - tick_broadcast_oneshot_mask, mask); 548 549 - if (was_periodic && !cpus_empty(mask)) { 550 - tick_broadcast_init_next_event(&mask, tick_next_period); 551 tick_broadcast_set_event(tick_next_period, 1); 552 } else 553 bc->next_event.tv64 = KTIME_MAX; ··· 588 * Clear the broadcast mask flag for the dead cpu, but do not 589 * stop the broadcast device! 590 */ 591 - cpu_clear(cpu, tick_broadcast_oneshot_mask); 592 593 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 594 }
··· 28 */ 29 30 struct tick_device tick_broadcast_device; 31 + /* FIXME: Use cpumask_var_t. */ 32 + static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); 33 + static DECLARE_BITMAP(tmpmask, NR_CPUS); 34 static DEFINE_SPINLOCK(tick_broadcast_lock); 35 static int tick_broadcast_force; 36 ··· 46 return &tick_broadcast_device; 47 } 48 49 + struct cpumask *tick_get_broadcast_mask(void) 50 { 51 + return to_cpumask(tick_broadcast_mask); 52 } 53 54 /* ··· 72 73 clockevents_exchange_device(NULL, dev); 74 tick_broadcast_device.evtdev = dev; 75 + if (!cpumask_empty(tick_get_broadcast_mask())) 76 tick_broadcast_start_periodic(dev); 77 return 1; 78 } ··· 104 */ 105 if (!tick_device_is_functional(dev)) { 106 dev->event_handler = tick_handle_periodic; 107 + cpumask_set_cpu(cpu, tick_get_broadcast_mask()); 108 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 109 ret = 1; 110 } else { ··· 116 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 117 int cpu = smp_processor_id(); 118 119 + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 120 tick_broadcast_clear_oneshot(cpu); 121 } 122 } ··· 125 } 126 127 /* 128 + * Broadcast the event to the cpus, which are set in the mask (mangled). 129 */ 130 + static void tick_do_broadcast(struct cpumask *mask) 131 { 132 int cpu = smp_processor_id(); 133 struct tick_device *td; ··· 135 /* 136 * Check, if the current cpu is in the mask 137 */ 138 + if (cpumask_test_cpu(cpu, mask)) { 139 + cpumask_clear_cpu(cpu, mask); 140 td = &per_cpu(tick_cpu_device, cpu); 141 td->evtdev->event_handler(td->evtdev); 142 } 143 144 + if (!cpumask_empty(mask)) { 145 /* 146 * It might be necessary to actually check whether the devices 147 * have different broadcast functions. For now, just use the 148 * one of the first device. This works as long as we have this 149 * misfeature only on x86 (lapic) 150 */ 151 + td = &per_cpu(tick_cpu_device, cpumask_first(mask)); 152 + td->evtdev->broadcast(mask); 153 } 154 } 155 ··· 160 */ 161 static void tick_do_periodic_broadcast(void) 162 { 163 spin_lock(&tick_broadcast_lock); 164 165 + cpumask_and(to_cpumask(tmpmask), 166 + cpu_online_mask, tick_get_broadcast_mask()); 167 + tick_do_broadcast(to_cpumask(tmpmask)); 168 169 spin_unlock(&tick_broadcast_lock); 170 } ··· 228 if (!tick_device_is_functional(dev)) 229 goto out; 230 231 + bc_stopped = cpumask_empty(tick_get_broadcast_mask()); 232 233 switch (*reason) { 234 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 236 + if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { 237 + cpumask_set_cpu(cpu, tick_get_broadcast_mask()); 238 if (tick_broadcast_device.mode == 239 TICKDEV_MODE_PERIODIC) 240 clockevents_shutdown(dev); ··· 244 break; 245 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 246 if (!tick_broadcast_force && 247 + cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { 248 + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 249 if (tick_broadcast_device.mode == 250 TICKDEV_MODE_PERIODIC) 251 tick_setup_periodic(dev, 0); ··· 253 break; 254 } 255 256 + if (cpumask_empty(tick_get_broadcast_mask())) { 257 if (!bc_stopped) 258 clockevents_shutdown(bc); 259 } else if (bc_stopped) { ··· 272 */ 273 void tick_broadcast_on_off(unsigned long reason, int *oncpu) 274 { 275 + if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) 276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 277 "offline CPU #%d\n", *oncpu); 278 else ··· 303 spin_lock_irqsave(&tick_broadcast_lock, flags); 304 305 bc = tick_broadcast_device.evtdev; 306 + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 307 308 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 309 + if (bc && cpumask_empty(tick_get_broadcast_mask())) 310 clockevents_shutdown(bc); 311 } 312 ··· 342 343 switch (tick_broadcast_device.mode) { 344 case TICKDEV_MODE_PERIODIC: 345 + if (!cpumask_empty(tick_get_broadcast_mask())) 346 tick_broadcast_start_periodic(bc); 347 + broadcast = cpumask_test_cpu(smp_processor_id(), 348 + tick_get_broadcast_mask()); 349 break; 350 case TICKDEV_MODE_ONESHOT: 351 broadcast = tick_resume_broadcast_oneshot(bc); ··· 360 361 #ifdef CONFIG_TICK_ONESHOT 362 363 + /* FIXME: use cpumask_var_t. */ 364 + static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS); 365 366 /* 367 + * Exposed for debugging: see timer_list.c 368 */ 369 + struct cpumask *tick_get_broadcast_oneshot_mask(void) 370 { 371 + return to_cpumask(tick_broadcast_oneshot_mask); 372 } 373 374 static int tick_broadcast_set_event(ktime_t expires, int force) ··· 389 */ 390 void tick_check_oneshot_broadcast(int cpu) 391 { 392 + if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) { 393 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 394 395 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); ··· 402 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 403 { 404 struct tick_device *td; 405 ktime_t now, next_event; 406 int cpu; 407 ··· 410 again: 411 dev->next_event.tv64 = KTIME_MAX; 412 next_event.tv64 = KTIME_MAX; 413 + cpumask_clear(to_cpumask(tmpmask)); 414 now = ktime_get(); 415 /* Find all expired events */ 416 + for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) { 417 td = &per_cpu(tick_cpu_device, cpu); 418 if (td->evtdev->next_event.tv64 <= now.tv64) 419 + cpumask_set_cpu(cpu, to_cpumask(tmpmask)); 420 else if (td->evtdev->next_event.tv64 < next_event.tv64) 421 next_event.tv64 = td->evtdev->next_event.tv64; 422 } ··· 424 /* 425 * Wakeup the cpus which have an expired event. 426 */ 427 + tick_do_broadcast(to_cpumask(tmpmask)); 428 429 /* 430 * Two reasons for reprogram: ··· 476 goto out; 477 478 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 479 + if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { 480 + cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask()); 481 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 482 if (dev->next_event.tv64 < bc->next_event.tv64) 483 tick_broadcast_set_event(dev->next_event, 1); 484 } 485 } else { 486 + if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { 487 + cpumask_clear_cpu(cpu, 488 + tick_get_broadcast_oneshot_mask()); 489 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 490 if (dev->next_event.tv64 != KTIME_MAX) 491 tick_program_event(dev->next_event, 1); ··· 502 */ 503 static void tick_broadcast_clear_oneshot(int cpu) 504 { 505 + cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); 506 } 507 508 + static void tick_broadcast_init_next_event(struct cpumask *mask, 509 + ktime_t expires) 510 { 511 struct tick_device *td; 512 int cpu; 513 514 + for_each_cpu(cpu, mask) { 515 td = &per_cpu(tick_cpu_device, cpu); 516 if (td->evtdev) 517 td->evtdev->next_event = expires; ··· 526 if (bc->event_handler != tick_handle_oneshot_broadcast) { 527 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 528 int cpu = smp_processor_id(); 529 530 bc->event_handler = tick_handle_oneshot_broadcast; 531 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); ··· 540 * oneshot_mask bits for those and program the 541 * broadcast device to fire. 542 */ 543 + cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask()); 544 + cpumask_clear_cpu(cpu, to_cpumask(tmpmask)); 545 + cpumask_or(tick_get_broadcast_oneshot_mask(), 546 + tick_get_broadcast_oneshot_mask(), 547 + to_cpumask(tmpmask)); 548 549 + if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { 550 + tick_broadcast_init_next_event(to_cpumask(tmpmask), 551 + tick_next_period); 552 tick_broadcast_set_event(tick_next_period, 1); 553 } else 554 bc->next_event.tv64 = KTIME_MAX; ··· 585 * Clear the broadcast mask flag for the dead cpu, but do not 586 * stop the broadcast device! 587 */ 588 + cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); 589 590 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 591 }
+3 -3
kernel/time/tick-common.c
··· 254 curdev = NULL; 255 } 256 clockevents_exchange_device(curdev, newdev); 257 - tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu)); 258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 259 tick_oneshot_notify(); 260 ··· 299 } 300 /* Transfer the do_timer job away from this cpu */ 301 if (*cpup == tick_do_timer_cpu) { 302 - int cpu = first_cpu(cpu_online_map); 303 304 - tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : 305 TICK_DO_TIMER_NONE; 306 } 307 spin_unlock_irqrestore(&tick_device_lock, flags);
··· 254 curdev = NULL; 255 } 256 clockevents_exchange_device(curdev, newdev); 257 + tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); 258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 259 tick_oneshot_notify(); 260 ··· 299 } 300 /* Transfer the do_timer job away from this cpu */ 301 if (*cpup == tick_do_timer_cpu) { 302 + int cpu = cpumask_first(cpu_online_mask); 303 304 + tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : 305 TICK_DO_TIMER_NONE; 306 } 307 spin_unlock_irqrestore(&tick_device_lock, flags);
+25 -17
kernel/trace/ring_buffer.c
··· 195 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 196 197 #define for_each_buffer_cpu(buffer, cpu) \ 198 - for_each_cpu_mask(cpu, buffer->cpumask) 199 200 #define TS_SHIFT 27 201 #define TS_MASK ((1ULL << TS_SHIFT) - 1) ··· 267 unsigned pages; 268 unsigned flags; 269 int cpus; 270 - cpumask_t cpumask; 271 atomic_t record_disabled; 272 273 struct mutex mutex; ··· 458 if (!buffer) 459 return NULL; 460 461 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 462 buffer->flags = flags; 463 ··· 468 if (buffer->pages == 1) 469 buffer->pages++; 470 471 - buffer->cpumask = cpu_possible_map; 472 buffer->cpus = nr_cpu_ids; 473 474 bsize = sizeof(void *) * nr_cpu_ids; 475 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 476 GFP_KERNEL); 477 if (!buffer->buffers) 478 - goto fail_free_buffer; 479 480 for_each_buffer_cpu(buffer, cpu) { 481 buffer->buffers[cpu] = ··· 495 } 496 kfree(buffer->buffers); 497 498 fail_free_buffer: 499 kfree(buffer); 500 return NULL; ··· 515 516 for_each_buffer_cpu(buffer, cpu) 517 rb_free_cpu_buffer(buffer->buffers[cpu]); 518 519 kfree(buffer); 520 } ··· 1291 1292 cpu = raw_smp_processor_id(); 1293 1294 - if (!cpu_isset(cpu, buffer->cpumask)) 1295 goto out; 1296 1297 cpu_buffer = buffer->buffers[cpu]; ··· 1404 1405 cpu = raw_smp_processor_id(); 1406 1407 - if (!cpu_isset(cpu, buffer->cpumask)) 1408 goto out; 1409 1410 cpu_buffer = buffer->buffers[cpu]; ··· 1486 { 1487 struct ring_buffer_per_cpu *cpu_buffer; 1488 1489 - if (!cpu_isset(cpu, buffer->cpumask)) 1490 return; 1491 1492 cpu_buffer = buffer->buffers[cpu]; ··· 1506 { 1507 struct ring_buffer_per_cpu *cpu_buffer; 1508 1509 - if (!cpu_isset(cpu, buffer->cpumask)) 1510 return; 1511 1512 cpu_buffer = buffer->buffers[cpu]; ··· 1523 { 1524 struct ring_buffer_per_cpu *cpu_buffer; 1525 1526 - if (!cpu_isset(cpu, buffer->cpumask)) 1527 return 0; 1528 1529 cpu_buffer = buffer->buffers[cpu]; ··· 1540 { 1541 struct ring_buffer_per_cpu *cpu_buffer; 1542 1543 - if (!cpu_isset(cpu, buffer->cpumask)) 1544 return 0; 1545 1546 cpu_buffer = buffer->buffers[cpu]; ··· 1858 struct buffer_page *reader; 1859 int nr_loops = 0; 1860 1861 - if (!cpu_isset(cpu, buffer->cpumask)) 1862 return NULL; 1863 1864 cpu_buffer = buffer->buffers[cpu]; ··· 2033 struct ring_buffer_event *event; 2034 unsigned long flags; 2035 2036 - if (!cpu_isset(cpu, buffer->cpumask)) 2037 return NULL; 2038 2039 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ··· 2070 struct ring_buffer_iter *iter; 2071 unsigned long flags; 2072 2073 - if (!cpu_isset(cpu, buffer->cpumask)) 2074 return NULL; 2075 2076 iter = kmalloc(sizeof(*iter), GFP_KERNEL); ··· 2180 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2181 unsigned long flags; 2182 2183 - if (!cpu_isset(cpu, buffer->cpumask)) 2184 return; 2185 2186 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ··· 2236 { 2237 struct ring_buffer_per_cpu *cpu_buffer; 2238 2239 - if (!cpu_isset(cpu, buffer->cpumask)) 2240 return 1; 2241 2242 cpu_buffer = buffer->buffers[cpu]; ··· 2260 struct ring_buffer_per_cpu *cpu_buffer_a; 2261 struct ring_buffer_per_cpu *cpu_buffer_b; 2262 2263 - if (!cpu_isset(cpu, buffer_a->cpumask) || 2264 - !cpu_isset(cpu, buffer_b->cpumask)) 2265 return -EINVAL; 2266 2267 /* At least make sure the two buffers are somewhat the same */
··· 195 EXPORT_SYMBOL_GPL(ring_buffer_event_data); 196 197 #define for_each_buffer_cpu(buffer, cpu) \ 198 + for_each_cpu(cpu, buffer->cpumask) 199 200 #define TS_SHIFT 27 201 #define TS_MASK ((1ULL << TS_SHIFT) - 1) ··· 267 unsigned pages; 268 unsigned flags; 269 int cpus; 270 + cpumask_var_t cpumask; 271 atomic_t record_disabled; 272 273 struct mutex mutex; ··· 458 if (!buffer) 459 return NULL; 460 461 + if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 462 + goto fail_free_buffer; 463 + 464 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 465 buffer->flags = flags; 466 ··· 465 if (buffer->pages == 1) 466 buffer->pages++; 467 468 + cpumask_copy(buffer->cpumask, cpu_possible_mask); 469 buffer->cpus = nr_cpu_ids; 470 471 bsize = sizeof(void *) * nr_cpu_ids; 472 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 473 GFP_KERNEL); 474 if (!buffer->buffers) 475 + goto fail_free_cpumask; 476 477 for_each_buffer_cpu(buffer, cpu) { 478 buffer->buffers[cpu] = ··· 492 } 493 kfree(buffer->buffers); 494 495 + fail_free_cpumask: 496 + free_cpumask_var(buffer->cpumask); 497 + 498 fail_free_buffer: 499 kfree(buffer); 500 return NULL; ··· 509 510 for_each_buffer_cpu(buffer, cpu) 511 rb_free_cpu_buffer(buffer->buffers[cpu]); 512 + 513 + free_cpumask_var(buffer->cpumask); 514 515 kfree(buffer); 516 } ··· 1283 1284 cpu = raw_smp_processor_id(); 1285 1286 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1287 goto out; 1288 1289 cpu_buffer = buffer->buffers[cpu]; ··· 1396 1397 cpu = raw_smp_processor_id(); 1398 1399 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1400 goto out; 1401 1402 cpu_buffer = buffer->buffers[cpu]; ··· 1478 { 1479 struct ring_buffer_per_cpu *cpu_buffer; 1480 1481 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1482 return; 1483 1484 cpu_buffer = buffer->buffers[cpu]; ··· 1498 { 1499 struct ring_buffer_per_cpu *cpu_buffer; 1500 1501 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1502 return; 1503 1504 cpu_buffer = buffer->buffers[cpu]; ··· 1515 { 1516 struct ring_buffer_per_cpu *cpu_buffer; 1517 1518 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1519 return 0; 1520 1521 cpu_buffer = buffer->buffers[cpu]; ··· 1532 { 1533 struct ring_buffer_per_cpu *cpu_buffer; 1534 1535 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1536 return 0; 1537 1538 cpu_buffer = buffer->buffers[cpu]; ··· 1850 struct buffer_page *reader; 1851 int nr_loops = 0; 1852 1853 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1854 return NULL; 1855 1856 cpu_buffer = buffer->buffers[cpu]; ··· 2025 struct ring_buffer_event *event; 2026 unsigned long flags; 2027 2028 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2029 return NULL; 2030 2031 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ··· 2062 struct ring_buffer_iter *iter; 2063 unsigned long flags; 2064 2065 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2066 return NULL; 2067 2068 iter = kmalloc(sizeof(*iter), GFP_KERNEL); ··· 2172 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2173 unsigned long flags; 2174 2175 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2176 return; 2177 2178 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ··· 2228 { 2229 struct ring_buffer_per_cpu *cpu_buffer; 2230 2231 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2232 return 1; 2233 2234 cpu_buffer = buffer->buffers[cpu]; ··· 2252 struct ring_buffer_per_cpu *cpu_buffer_a; 2253 struct ring_buffer_per_cpu *cpu_buffer_b; 2254 2255 + if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 2256 + !cpumask_test_cpu(cpu, buffer_b->cpumask)) 2257 return -EINVAL; 2258 2259 /* At least make sure the two buffers are somewhat the same */
+45 -27
kernel/trace/trace.c
··· 89 preempt_enable(); 90 } 91 92 - static cpumask_t __read_mostly tracing_buffer_mask; 93 94 #define for_each_tracing_cpu(cpu) \ 95 - for_each_cpu_mask(cpu, tracing_buffer_mask) 96 97 /* 98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops ··· 1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 1812 return; 1813 1814 - if (cpu_isset(iter->cpu, iter->started)) 1815 return; 1816 1817 - cpu_set(iter->cpu, iter->started); 1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1819 } 1820 ··· 2646 /* 2647 * Only trace on a CPU if the bitmask is set: 2648 */ 2649 - static cpumask_t tracing_cpumask = CPU_MASK_ALL; 2650 - 2651 - /* 2652 - * When tracing/tracing_cpu_mask is modified then this holds 2653 - * the new bitmask we are about to install: 2654 - */ 2655 - static cpumask_t tracing_cpumask_new; 2656 2657 /* 2658 * The tracer itself will not take this lock, but still we want ··· 2668 2669 mutex_lock(&tracing_cpumask_update_lock); 2670 2671 - len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); 2672 if (count - len < 2) { 2673 count = -EINVAL; 2674 goto out_err; ··· 2687 size_t count, loff_t *ppos) 2688 { 2689 int err, cpu; 2690 2691 mutex_lock(&tracing_cpumask_update_lock); 2692 - err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); 2693 if (err) 2694 goto err_unlock; 2695 ··· 2704 * Increase/decrease the disabled counter if we are 2705 * about to flip a bit in the cpumask: 2706 */ 2707 - if (cpu_isset(cpu, tracing_cpumask) && 2708 - !cpu_isset(cpu, tracing_cpumask_new)) { 2709 atomic_inc(&global_trace.data[cpu]->disabled); 2710 } 2711 - if (!cpu_isset(cpu, tracing_cpumask) && 2712 - cpu_isset(cpu, tracing_cpumask_new)) { 2713 atomic_dec(&global_trace.data[cpu]->disabled); 2714 } 2715 } 2716 __raw_spin_unlock(&ftrace_max_lock); 2717 local_irq_enable(); 2718 2719 - tracing_cpumask = tracing_cpumask_new; 2720 2721 mutex_unlock(&tracing_cpumask_update_lock); 2722 2723 return count; 2724 2725 err_unlock: 2726 mutex_unlock(&tracing_cpumask_update_lock); 2727 2728 return err; 2729 } ··· 3114 if (!iter) 3115 return -ENOMEM; 3116 3117 mutex_lock(&trace_types_lock); 3118 3119 /* trace pipe does not show start of buffer */ 3120 - cpus_setall(iter->started); 3121 3122 iter->tr = &global_trace; 3123 iter->trace = current_trace; ··· 3139 { 3140 struct trace_iterator *iter = file->private_data; 3141 3142 kfree(iter); 3143 atomic_dec(&tracing_reader); 3144 ··· 3758 static DEFINE_SPINLOCK(ftrace_dump_lock); 3759 /* use static because iter can be a bit big for the stack */ 3760 static struct trace_iterator iter; 3761 - static cpumask_t mask; 3762 static int dump_ran; 3763 unsigned long flags; 3764 int cnt = 0, cpu; ··· 3790 * not done often. We fill all what we can read, 3791 * and then release the locks again. 3792 */ 3793 - 3794 - cpus_clear(mask); 3795 3796 while (!trace_empty(&iter)) { 3797 ··· 3826 { 3827 struct trace_array_cpu *data; 3828 int i; 3829 3830 /* TODO: make the number of buffers hot pluggable with CPUS */ 3831 - tracing_buffer_mask = cpu_possible_map; 3832 - 3833 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3834 TRACE_BUFFER_FLAGS); 3835 if (!global_trace.buffer) { 3836 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3837 WARN_ON(1); 3838 - return 0; 3839 } 3840 global_trace.entries = ring_buffer_size(global_trace.buffer); 3841 3842 #ifdef CONFIG_TRACER_MAX_TRACE 3843 max_tr.buffer = ring_buffer_alloc(trace_buf_size, ··· 3855 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3856 WARN_ON(1); 3857 ring_buffer_free(global_trace.buffer); 3858 - return 0; 3859 } 3860 max_tr.entries = ring_buffer_size(max_tr.buffer); 3861 WARN_ON(max_tr.entries != global_trace.entries); ··· 3885 &trace_panic_notifier); 3886 3887 register_die_notifier(&trace_die_notifier); 3888 3889 - return 0; 3890 } 3891 early_initcall(tracer_alloc_buffers); 3892 fs_initcall(tracer_init_debugfs);
··· 89 preempt_enable(); 90 } 91 92 + static cpumask_var_t __read_mostly tracing_buffer_mask; 93 94 #define for_each_tracing_cpu(cpu) \ 95 + for_each_cpu(cpu, tracing_buffer_mask) 96 97 /* 98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops ··· 1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 1812 return; 1813 1814 + if (cpumask_test_cpu(iter->cpu, iter->started)) 1815 return; 1816 1817 + cpumask_set_cpu(iter->cpu, iter->started); 1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1819 } 1820 ··· 2646 /* 2647 * Only trace on a CPU if the bitmask is set: 2648 */ 2649 + static cpumask_var_t tracing_cpumask; 2650 2651 /* 2652 * The tracer itself will not take this lock, but still we want ··· 2674 2675 mutex_lock(&tracing_cpumask_update_lock); 2676 2677 + len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 2678 if (count - len < 2) { 2679 count = -EINVAL; 2680 goto out_err; ··· 2693 size_t count, loff_t *ppos) 2694 { 2695 int err, cpu; 2696 + cpumask_var_t tracing_cpumask_new; 2697 + 2698 + if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 2699 + return -ENOMEM; 2700 2701 mutex_lock(&tracing_cpumask_update_lock); 2702 + err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2703 if (err) 2704 goto err_unlock; 2705 ··· 2706 * Increase/decrease the disabled counter if we are 2707 * about to flip a bit in the cpumask: 2708 */ 2709 + if (cpumask_test_cpu(cpu, tracing_cpumask) && 2710 + !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2711 atomic_inc(&global_trace.data[cpu]->disabled); 2712 } 2713 + if (!cpumask_test_cpu(cpu, tracing_cpumask) && 2714 + cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2715 atomic_dec(&global_trace.data[cpu]->disabled); 2716 } 2717 } 2718 __raw_spin_unlock(&ftrace_max_lock); 2719 local_irq_enable(); 2720 2721 + cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2722 2723 mutex_unlock(&tracing_cpumask_update_lock); 2724 + free_cpumask_var(tracing_cpumask_new); 2725 2726 return count; 2727 2728 err_unlock: 2729 mutex_unlock(&tracing_cpumask_update_lock); 2730 + free_cpumask_var(tracing_cpumask); 2731 2732 return err; 2733 } ··· 3114 if (!iter) 3115 return -ENOMEM; 3116 3117 + if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3118 + kfree(iter); 3119 + return -ENOMEM; 3120 + } 3121 + 3122 mutex_lock(&trace_types_lock); 3123 3124 /* trace pipe does not show start of buffer */ 3125 + cpumask_setall(iter->started); 3126 3127 iter->tr = &global_trace; 3128 iter->trace = current_trace; ··· 3134 { 3135 struct trace_iterator *iter = file->private_data; 3136 3137 + free_cpumask_var(iter->started); 3138 kfree(iter); 3139 atomic_dec(&tracing_reader); 3140 ··· 3752 static DEFINE_SPINLOCK(ftrace_dump_lock); 3753 /* use static because iter can be a bit big for the stack */ 3754 static struct trace_iterator iter; 3755 static int dump_ran; 3756 unsigned long flags; 3757 int cnt = 0, cpu; ··· 3785 * not done often. We fill all what we can read, 3786 * and then release the locks again. 3787 */ 3788 3789 while (!trace_empty(&iter)) { 3790 ··· 3823 { 3824 struct trace_array_cpu *data; 3825 int i; 3826 + int ret = -ENOMEM; 3827 + 3828 + if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 3829 + goto out; 3830 + 3831 + if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 3832 + goto out_free_buffer_mask; 3833 + 3834 + cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 3835 + cpumask_copy(tracing_cpumask, cpu_all_mask); 3836 3837 /* TODO: make the number of buffers hot pluggable with CPUS */ 3838 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3839 TRACE_BUFFER_FLAGS); 3840 if (!global_trace.buffer) { 3841 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3842 WARN_ON(1); 3843 + goto out_free_cpumask; 3844 } 3845 global_trace.entries = ring_buffer_size(global_trace.buffer); 3846 + 3847 3848 #ifdef CONFIG_TRACER_MAX_TRACE 3849 max_tr.buffer = ring_buffer_alloc(trace_buf_size, ··· 3843 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3844 WARN_ON(1); 3845 ring_buffer_free(global_trace.buffer); 3846 + goto out_free_cpumask; 3847 } 3848 max_tr.entries = ring_buffer_size(max_tr.buffer); 3849 WARN_ON(max_tr.entries != global_trace.entries); ··· 3873 &trace_panic_notifier); 3874 3875 register_die_notifier(&trace_die_notifier); 3876 + ret = 0; 3877 3878 + out_free_cpumask: 3879 + free_cpumask_var(tracing_cpumask); 3880 + out_free_buffer_mask: 3881 + free_cpumask_var(tracing_buffer_mask); 3882 + out: 3883 + return ret; 3884 } 3885 early_initcall(tracer_alloc_buffers); 3886 fs_initcall(tracer_init_debugfs);
+1 -1
kernel/trace/trace.h
··· 368 loff_t pos; 369 long idx; 370 371 - cpumask_t started; 372 }; 373 374 int tracing_is_enabled(void);
··· 368 loff_t pos; 369 long idx; 370 371 + cpumask_var_t started; 372 }; 373 374 int tracing_is_enabled(void);
+1 -1
kernel/trace/trace_boot.c
··· 42 int cpu; 43 boot_trace = tr; 44 45 - for_each_cpu_mask(cpu, cpu_possible_map) 46 tracing_reset(tr, cpu); 47 48 tracing_sched_switch_assign_trace(tr);
··· 42 int cpu; 43 boot_trace = tr; 44 45 + for_each_cpu(cpu, cpu_possible_mask) 46 tracing_reset(tr, cpu); 47 48 tracing_sched_switch_assign_trace(tr);
+1 -1
kernel/trace/trace_functions_graph.c
··· 79 int i; 80 int ret; 81 int log10_this = log10_cpu(cpu); 82 - int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); 83 84 85 /*
··· 79 int i; 80 int ret; 81 int log10_this = log10_cpu(cpu); 82 + int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); 83 84 85 /*
+3 -3
kernel/trace/trace_hw_branches.c
··· 46 47 tracing_reset_online_cpus(tr); 48 49 - for_each_cpu_mask(cpu, cpu_possible_map) 50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); 51 } 52 ··· 62 { 63 int cpu; 64 65 - for_each_cpu_mask(cpu, cpu_possible_map) 66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); 67 } 68 ··· 172 { 173 int cpu; 174 175 - for_each_cpu_mask(cpu, cpu_possible_map) 176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); 177 } 178
··· 46 47 tracing_reset_online_cpus(tr); 48 49 + for_each_cpu(cpu, cpu_possible_mask) 50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); 51 } 52 ··· 62 { 63 int cpu; 64 65 + for_each_cpu(cpu, cpu_possible_mask) 66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); 67 } 68 ··· 172 { 173 int cpu; 174 175 + for_each_cpu(cpu, cpu_possible_mask) 176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); 177 } 178
+1 -1
kernel/trace/trace_power.c
··· 39 40 trace_power_enabled = 1; 41 42 - for_each_cpu_mask(cpu, cpu_possible_map) 43 tracing_reset(tr, cpu); 44 return 0; 45 }
··· 39 40 trace_power_enabled = 1; 41 42 + for_each_cpu(cpu, cpu_possible_mask) 43 tracing_reset(tr, cpu); 44 return 0; 45 }
+3 -10
kernel/trace/trace_sysprof.c
··· 196 return HRTIMER_RESTART; 197 } 198 199 - static void start_stack_timer(int cpu) 200 { 201 - struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); 202 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 204 hrtimer->function = stack_trace_timer_fn; ··· 208 209 static void start_stack_timers(void) 210 { 211 - cpumask_t saved_mask = current->cpus_allowed; 212 - int cpu; 213 - 214 - for_each_online_cpu(cpu) { 215 - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 216 - start_stack_timer(cpu); 217 - } 218 - set_cpus_allowed_ptr(current, &saved_mask); 219 } 220 221 static void stop_stack_timer(int cpu)
··· 196 return HRTIMER_RESTART; 197 } 198 199 + static void start_stack_timer(void *unused) 200 { 201 + struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); 202 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 204 hrtimer->function = stack_trace_timer_fn; ··· 208 209 static void start_stack_timers(void) 210 { 211 + on_each_cpu(start_stack_timer, NULL, 1); 212 } 213 214 static void stop_stack_timer(int cpu)
+14 -12
kernel/workqueue.c
··· 73 static LIST_HEAD(workqueues); 74 75 static int singlethread_cpu __read_mostly; 76 - static cpumask_t cpu_singlethread_map __read_mostly; 77 /* 78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work ··· 81 * use cpu_possible_map, the cpumask below is more a documentation 82 * than optimization. 83 */ 84 - static cpumask_t cpu_populated_map __read_mostly; 85 86 /* If it's single threaded, it isn't in the list of workqueues. */ 87 static inline int is_wq_single_threaded(struct workqueue_struct *wq) ··· 89 return wq->singlethread; 90 } 91 92 - static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) 93 { 94 return is_wq_single_threaded(wq) 95 - ? &cpu_singlethread_map : &cpu_populated_map; 96 } 97 98 static ··· 410 */ 411 void flush_workqueue(struct workqueue_struct *wq) 412 { 413 - const cpumask_t *cpu_map = wq_cpu_map(wq); 414 int cpu; 415 416 might_sleep(); ··· 532 { 533 struct cpu_workqueue_struct *cwq; 534 struct workqueue_struct *wq; 535 - const cpumask_t *cpu_map; 536 int cpu; 537 538 might_sleep(); ··· 903 */ 904 void destroy_workqueue(struct workqueue_struct *wq) 905 { 906 - const cpumask_t *cpu_map = wq_cpu_map(wq); 907 int cpu; 908 909 cpu_maps_update_begin(); ··· 933 934 switch (action) { 935 case CPU_UP_PREPARE: 936 - cpu_set(cpu, cpu_populated_map); 937 } 938 undo: 939 list_for_each_entry(wq, &workqueues, list) { ··· 964 switch (action) { 965 case CPU_UP_CANCELED: 966 case CPU_POST_DEAD: 967 - cpu_clear(cpu, cpu_populated_map); 968 } 969 970 return ret; ··· 1017 1018 void __init init_workqueues(void) 1019 { 1020 - cpu_populated_map = cpu_online_map; 1021 - singlethread_cpu = first_cpu(cpu_possible_map); 1022 - cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); 1023 hotcpu_notifier(workqueue_cpu_callback, 0); 1024 keventd_wq = create_workqueue("events"); 1025 BUG_ON(!keventd_wq);
··· 73 static LIST_HEAD(workqueues); 74 75 static int singlethread_cpu __read_mostly; 76 + static const struct cpumask *cpu_singlethread_map __read_mostly; 77 /* 78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work ··· 81 * use cpu_possible_map, the cpumask below is more a documentation 82 * than optimization. 83 */ 84 + static cpumask_var_t cpu_populated_map __read_mostly; 85 86 /* If it's single threaded, it isn't in the list of workqueues. */ 87 static inline int is_wq_single_threaded(struct workqueue_struct *wq) ··· 89 return wq->singlethread; 90 } 91 92 + static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) 93 { 94 return is_wq_single_threaded(wq) 95 + ? cpu_singlethread_map : cpu_populated_map; 96 } 97 98 static ··· 410 */ 411 void flush_workqueue(struct workqueue_struct *wq) 412 { 413 + const struct cpumask *cpu_map = wq_cpu_map(wq); 414 int cpu; 415 416 might_sleep(); ··· 532 { 533 struct cpu_workqueue_struct *cwq; 534 struct workqueue_struct *wq; 535 + const struct cpumask *cpu_map; 536 int cpu; 537 538 might_sleep(); ··· 903 */ 904 void destroy_workqueue(struct workqueue_struct *wq) 905 { 906 + const struct cpumask *cpu_map = wq_cpu_map(wq); 907 int cpu; 908 909 cpu_maps_update_begin(); ··· 933 934 switch (action) { 935 case CPU_UP_PREPARE: 936 + cpumask_set_cpu(cpu, cpu_populated_map); 937 } 938 undo: 939 list_for_each_entry(wq, &workqueues, list) { ··· 964 switch (action) { 965 case CPU_UP_CANCELED: 966 case CPU_POST_DEAD: 967 + cpumask_clear_cpu(cpu, cpu_populated_map); 968 } 969 970 return ret; ··· 1017 1018 void __init init_workqueues(void) 1019 { 1020 + alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); 1021 + 1022 + cpumask_copy(cpu_populated_map, cpu_online_mask); 1023 + singlethread_cpu = cpumask_first(cpu_possible_mask); 1024 + cpu_singlethread_map = cpumask_of(singlethread_cpu); 1025 hotcpu_notifier(workqueue_cpu_callback, 0); 1026 keventd_wq = create_workqueue("events"); 1027 BUG_ON(!keventd_wq);
+8
lib/Kconfig
··· 13 config GENERIC_FIND_NEXT_BIT 14 bool 15 16 config CRC_CCITT 17 tristate "CRC-CCITT functions" 18 help ··· 169 Use dynamic allocation for cpumask_var_t, instead of putting 170 them on the stack. This is a bit more expensive, but avoids 171 stack overflow. 172 173 endmenu
··· 13 config GENERIC_FIND_NEXT_BIT 14 bool 15 16 + config GENERIC_FIND_LAST_BIT 17 + bool 18 + default y 19 + 20 config CRC_CCITT 21 tristate "CRC-CCITT functions" 22 help ··· 165 Use dynamic allocation for cpumask_var_t, instead of putting 166 them on the stack. This is a bit more expensive, but avoids 167 stack overflow. 168 + 169 + config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 170 + bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS 171 + depends on EXPERIMENTAL && BROKEN 172 173 endmenu
+1
lib/Makefile
··· 37 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 38 lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 39 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 40 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 41 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 42 obj-$(CONFIG_PLIST) += plist.o
··· 37 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 38 lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 39 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 40 + lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 42 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 43 obj-$(CONFIG_PLIST) += plist.o
+59 -3
lib/cpumask.c
··· 76 77 /* These are not inline because of header tangles. */ 78 #ifdef CONFIG_CPUMASK_OFFSTACK 79 - bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 80 { 81 if (likely(slab_is_available())) 82 - *mask = kmalloc(cpumask_size(), flags); 83 else { 84 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 85 printk(KERN_ERR 86 "=> alloc_cpumask_var: kmalloc not available!\n"); 87 - dump_stack(); 88 #endif 89 *mask = NULL; 90 } ··· 107 dump_stack(); 108 } 109 #endif 110 return *mask != NULL; 111 } 112 EXPORT_SYMBOL(alloc_cpumask_var); 113 114 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 115 { 116 *mask = alloc_bootmem(cpumask_size()); 117 } 118 119 void free_cpumask_var(cpumask_var_t mask) 120 { 121 kfree(mask); 122 } 123 EXPORT_SYMBOL(free_cpumask_var); 124 125 void __init free_bootmem_cpumask_var(cpumask_var_t mask) 126 { 127 free_bootmem((unsigned long)mask, cpumask_size());
··· 76 77 /* These are not inline because of header tangles. */ 78 #ifdef CONFIG_CPUMASK_OFFSTACK 79 + /** 80 + * alloc_cpumask_var_node - allocate a struct cpumask on a given node 81 + * @mask: pointer to cpumask_var_t where the cpumask is returned 82 + * @flags: GFP_ flags 83 + * 84 + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 85 + * a nop returning a constant 1 (in <linux/cpumask.h>) 86 + * Returns TRUE if memory allocation succeeded, FALSE otherwise. 87 + * 88 + * In addition, mask will be NULL if this fails. Note that gcc is 89 + * usually smart enough to know that mask can never be NULL if 90 + * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case 91 + * too. 92 + */ 93 + bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 94 { 95 if (likely(slab_is_available())) 96 + *mask = kmalloc_node(cpumask_size(), flags, node); 97 else { 98 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 99 printk(KERN_ERR 100 "=> alloc_cpumask_var: kmalloc not available!\n"); 101 #endif 102 *mask = NULL; 103 } ··· 94 dump_stack(); 95 } 96 #endif 97 + /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ 98 + if (*mask) { 99 + unsigned int tail; 100 + tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); 101 + memset(cpumask_bits(*mask) + cpumask_size() - tail, 102 + 0, tail); 103 + } 104 + 105 return *mask != NULL; 106 + } 107 + EXPORT_SYMBOL(alloc_cpumask_var_node); 108 + 109 + /** 110 + * alloc_cpumask_var - allocate a struct cpumask 111 + * @mask: pointer to cpumask_var_t where the cpumask is returned 112 + * @flags: GFP_ flags 113 + * 114 + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 115 + * a nop returning a constant 1 (in <linux/cpumask.h>). 116 + * 117 + * See alloc_cpumask_var_node. 118 + */ 119 + bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 120 + { 121 + return alloc_cpumask_var_node(mask, flags, numa_node_id()); 122 } 123 EXPORT_SYMBOL(alloc_cpumask_var); 124 125 + /** 126 + * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. 127 + * @mask: pointer to cpumask_var_t where the cpumask is returned 128 + * 129 + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 130 + * a nop (in <linux/cpumask.h>). 131 + * Either returns an allocated (zero-filled) cpumask, or causes the 132 + * system to panic. 133 + */ 134 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 135 { 136 *mask = alloc_bootmem(cpumask_size()); 137 } 138 139 + /** 140 + * free_cpumask_var - frees memory allocated for a struct cpumask. 141 + * @mask: cpumask to free 142 + * 143 + * This is safe on a NULL mask. 144 + */ 145 void free_cpumask_var(cpumask_var_t mask) 146 { 147 kfree(mask); 148 } 149 EXPORT_SYMBOL(free_cpumask_var); 150 151 + /** 152 + * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var 153 + * @mask: cpumask to free 154 + */ 155 void __init free_bootmem_cpumask_var(cpumask_var_t mask) 156 { 157 free_bootmem((unsigned long)mask, cpumask_size());
+45
lib/find_last_bit.c
···
··· 1 + /* find_last_bit.c: fallback find next bit implementation 2 + * 3 + * Copyright (C) 2008 IBM Corporation 4 + * Written by Rusty Russell <rusty@rustcorp.com.au> 5 + * (Inspired by David Howell's find_next_bit implementation) 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation; either version 10 + * 2 of the License, or (at your option) any later version. 11 + */ 12 + 13 + #include <linux/bitops.h> 14 + #include <linux/module.h> 15 + #include <asm/types.h> 16 + #include <asm/byteorder.h> 17 + 18 + unsigned long find_last_bit(const unsigned long *addr, unsigned long size) 19 + { 20 + unsigned long words; 21 + unsigned long tmp; 22 + 23 + /* Start at final word. */ 24 + words = size / BITS_PER_LONG; 25 + 26 + /* Partial final word? */ 27 + if (size & (BITS_PER_LONG-1)) { 28 + tmp = (addr[words] & (~0UL >> (BITS_PER_LONG 29 + - (size & (BITS_PER_LONG-1))))); 30 + if (tmp) 31 + goto found; 32 + } 33 + 34 + while (words) { 35 + tmp = addr[--words]; 36 + if (tmp) { 37 + found: 38 + return words * BITS_PER_LONG + __fls(tmp); 39 + } 40 + } 41 + 42 + /* Not found */ 43 + return size; 44 + } 45 + EXPORT_SYMBOL(find_last_bit);
+13 -3
mm/pdflush.c
··· 172 static int pdflush(void *dummy) 173 { 174 struct pdflush_work my_work; 175 - cpumask_t cpus_allowed; 176 177 /* 178 * pdflush can spend a lot of time doing encryption via dm-crypt. We ··· 196 * This is needed as pdflush's are dynamically created and destroyed. 197 * The boottime pdflush's are easily placed w/o these 2 lines. 198 */ 199 - cpuset_cpus_allowed(current, &cpus_allowed); 200 - set_cpus_allowed_ptr(current, &cpus_allowed); 201 202 return __pdflush(&my_work); 203 }
··· 172 static int pdflush(void *dummy) 173 { 174 struct pdflush_work my_work; 175 + cpumask_var_t cpus_allowed; 176 + 177 + /* 178 + * Since the caller doesn't even check kthread_run() worked, let's not 179 + * freak out too much if this fails. 180 + */ 181 + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 182 + printk(KERN_WARNING "pdflush failed to allocate cpumask\n"); 183 + return 0; 184 + } 185 186 /* 187 * pdflush can spend a lot of time doing encryption via dm-crypt. We ··· 187 * This is needed as pdflush's are dynamically created and destroyed. 188 * The boottime pdflush's are easily placed w/o these 2 lines. 189 */ 190 + cpuset_cpus_allowed(current, cpus_allowed); 191 + set_cpus_allowed_ptr(current, cpus_allowed); 192 + free_cpumask_var(cpus_allowed); 193 194 return __pdflush(&my_work); 195 }
+1 -1
mm/slab.c
··· 2157 2158 /* 2159 * We use cache_chain_mutex to ensure a consistent view of 2160 - * cpu_online_map as well. Please see cpuup_callback 2161 */ 2162 get_online_cpus(); 2163 mutex_lock(&cache_chain_mutex);
··· 2157 2158 /* 2159 * We use cache_chain_mutex to ensure a consistent view of 2160 + * cpu_online_mask as well. Please see cpuup_callback 2161 */ 2162 get_online_cpus(); 2163 mutex_lock(&cache_chain_mutex);
+11 -9
mm/slub.c
··· 1970 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 1971 1972 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 1973 - static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; 1974 1975 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 1976 int cpu, gfp_t flags) ··· 2045 { 2046 int i; 2047 2048 - if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) 2049 return; 2050 2051 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2052 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2053 2054 - cpu_set(cpu, kmem_cach_cpu_free_init_once); 2055 } 2056 2057 static void __init init_alloc_cpu(void) ··· 3451 long max_time; 3452 long min_pid; 3453 long max_pid; 3454 - cpumask_t cpus; 3455 nodemask_t nodes; 3456 }; 3457 ··· 3526 if (track->pid > l->max_pid) 3527 l->max_pid = track->pid; 3528 3529 - cpu_set(track->cpu, l->cpus); 3530 } 3531 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3532 return 1; ··· 3557 l->max_time = age; 3558 l->min_pid = track->pid; 3559 l->max_pid = track->pid; 3560 - cpus_clear(l->cpus); 3561 - cpu_set(track->cpu, l->cpus); 3562 nodes_clear(l->nodes); 3563 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3564 return 1; ··· 3639 len += sprintf(buf + len, " pid=%ld", 3640 l->min_pid); 3641 3642 - if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3643 len < PAGE_SIZE - 60) { 3644 len += sprintf(buf + len, " cpus="); 3645 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3646 - &l->cpus); 3647 } 3648 3649 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
··· 1970 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 1971 1972 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 1973 + static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); 1974 1975 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 1976 int cpu, gfp_t flags) ··· 2045 { 2046 int i; 2047 2048 + if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once))) 2049 return; 2050 2051 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2052 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2053 2054 + cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)); 2055 } 2056 2057 static void __init init_alloc_cpu(void) ··· 3451 long max_time; 3452 long min_pid; 3453 long max_pid; 3454 + DECLARE_BITMAP(cpus, NR_CPUS); 3455 nodemask_t nodes; 3456 }; 3457 ··· 3526 if (track->pid > l->max_pid) 3527 l->max_pid = track->pid; 3528 3529 + cpumask_set_cpu(track->cpu, 3530 + to_cpumask(l->cpus)); 3531 } 3532 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3533 return 1; ··· 3556 l->max_time = age; 3557 l->min_pid = track->pid; 3558 l->max_pid = track->pid; 3559 + cpumask_clear(to_cpumask(l->cpus)); 3560 + cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 3561 nodes_clear(l->nodes); 3562 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3563 return 1; ··· 3638 len += sprintf(buf + len, " pid=%ld", 3639 l->min_pid); 3640 3641 + if (num_online_cpus() > 1 && 3642 + !cpumask_empty(to_cpumask(l->cpus)) && 3643 len < PAGE_SIZE - 60) { 3644 len += sprintf(buf + len, " cpus="); 3645 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3646 + to_cpumask(l->cpus)); 3647 } 3648 3649 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
+2 -2
mm/vmscan.c
··· 1902 }; 1903 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1904 1905 - if (!cpus_empty(*cpumask)) 1906 set_cpus_allowed_ptr(tsk, cpumask); 1907 current->reclaim_state = &reclaim_state; 1908 ··· 2141 pg_data_t *pgdat = NODE_DATA(nid); 2142 node_to_cpumask_ptr(mask, pgdat->node_id); 2143 2144 - if (any_online_cpu(*mask) < nr_cpu_ids) 2145 /* One of our CPUs online: restore mask */ 2146 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2147 }
··· 1902 }; 1903 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1904 1905 + if (!cpumask_empty(cpumask)) 1906 set_cpus_allowed_ptr(tsk, cpumask); 1907 current->reclaim_state = &reclaim_state; 1908 ··· 2141 pg_data_t *pgdat = NODE_DATA(nid); 2142 node_to_cpumask_ptr(mask, pgdat->node_id); 2143 2144 + if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2145 /* One of our CPUs online: restore mask */ 2146 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2147 }
+2 -2
mm/vmstat.c
··· 20 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 21 EXPORT_PER_CPU_SYMBOL(vm_event_states); 22 23 - static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) 24 { 25 int cpu; 26 int i; ··· 43 void all_vm_events(unsigned long *ret) 44 { 45 get_online_cpus(); 46 - sum_vm_events(ret, &cpu_online_map); 47 put_online_cpus(); 48 } 49 EXPORT_SYMBOL_GPL(all_vm_events);
··· 20 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 21 EXPORT_PER_CPU_SYMBOL(vm_event_states); 22 23 + static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) 24 { 25 int cpu; 26 int i; ··· 43 void all_vm_events(unsigned long *ret) 44 { 45 get_online_cpus(); 46 + sum_vm_events(ret, cpu_online_mask); 47 put_online_cpus(); 48 } 49 EXPORT_SYMBOL_GPL(all_vm_events);
+1 -1
security/selinux/selinuxfs.c
··· 1211 { 1212 int cpu; 1213 1214 - for (cpu = *idx; cpu < NR_CPUS; ++cpu) { 1215 if (!cpu_possible(cpu)) 1216 continue; 1217 *idx = cpu + 1;
··· 1211 { 1212 int cpu; 1213 1214 + for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) { 1215 if (!cpu_possible(cpu)) 1216 continue; 1217 *idx = cpu + 1;