Merge branch 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (66 commits)
x86: export vector_used_by_percpu_irq
x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and()
sched: nominate preferred wakeup cpu, fix
x86: fix lguest used_vectors breakage, -v2
x86: fix warning in arch/x86/kernel/io_apic.c
sched: fix warning in kernel/sched.c
sched: move test_sd_parent() to an SMP section of sched.h
sched: add SD_BALANCE_NEWIDLE at MC and CPU level for sched_mc>0
sched: activate active load balancing in new idle cpus
sched: bias task wakeups to preferred semi-idle packages
sched: nominate preferred wakeup cpu
sched: favour lower logical cpu number for sched_mc balance
sched: framework for sched_mc/smt_power_savings=N
sched: convert BALANCE_FOR_xx_POWER to inline functions
x86: use possible_cpus=NUM to extend the possible cpus allowed
x86: fix cpu_mask_to_apicid_and to include cpu_online_mask
x86: update io_apic.c to the new cpumask code
x86: Introduce topology_core_cpumask()/topology_thread_cpumask()
x86: xen: use smp_call_function_many()
x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c
...

Fixed up trivial conflict in kernel/time/tick-sched.c manually

+2018 -1423
+9 -8
Documentation/cpu-hotplug.txt
··· 50 50 cpu_possible_map = cpu_present_map + additional_cpus 51 51 52 52 (*) Option valid only for following architectures 53 - - x86_64, ia64 53 + - ia64 54 54 55 - ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT 56 - to determine the number of potentially hot-pluggable cpus. The implementation 57 - should only rely on this to count the # of cpus, but *MUST* not rely on the 58 - apicid values in those tables for disabled apics. In the event BIOS doesn't 59 - mark such hot-pluggable cpus as disabled entries, one could use this 60 - parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map. 55 + ia64 uses the number of disabled local apics in ACPI tables MADT to 56 + determine the number of potentially hot-pluggable cpus. The implementation 57 + should only rely on this to count the # of cpus, but *MUST* not rely 58 + on the apicid values in those tables for disabled apics. In the event 59 + BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could 60 + use this parameter "additional_cpus=x" to represent those cpus in the 61 + cpu_possible_map. 61 62 62 - possible_cpus=n [s390 only] use this to set hotpluggable cpus. 63 + possible_cpus=n [s390,x86_64] use this to set hotpluggable cpus. 63 64 This option sets possible_cpus bits in 64 65 cpu_possible_map. Thus keeping the numbers of bits set 65 66 constant even if the machine gets rebooted.
-1
arch/alpha/include/asm/smp.h
··· 45 45 #define raw_smp_processor_id() (current_thread_info()->cpu) 46 46 47 47 extern int smp_num_cpus; 48 - #define cpu_possible_map cpu_present_map 49 48 50 49 extern void arch_send_call_function_single_ipi(int cpu); 51 50 extern void arch_send_call_function_ipi(cpumask_t mask);
+1 -1
arch/alpha/kernel/irq.c
··· 55 55 last_cpu = cpu; 56 56 57 57 irq_desc[irq].affinity = cpumask_of_cpu(cpu); 58 - irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu)); 58 + irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); 59 59 return 0; 60 60 } 61 61 #endif /* CONFIG_SMP */
+2
arch/alpha/kernel/process.c
··· 94 94 flags |= 0x00040000UL; /* "remain halted" */ 95 95 *pflags = flags; 96 96 cpu_clear(cpuid, cpu_present_map); 97 + cpu_clear(cpuid, cpu_possible_map); 97 98 halt(); 98 99 } 99 100 #endif ··· 121 120 #ifdef CONFIG_SMP 122 121 /* Wait for the secondaries to halt. */ 123 122 cpu_clear(boot_cpuid, cpu_present_map); 123 + cpu_clear(boot_cpuid, cpu_possible_map); 124 124 while (cpus_weight(cpu_present_map)) 125 125 barrier(); 126 126 #endif
+2 -5
arch/alpha/kernel/smp.c
··· 70 70 /* Set to a secondary's cpuid when it comes online. */ 71 71 static int smp_secondary_alive __devinitdata = 0; 72 72 73 - /* Which cpus ids came online. */ 74 - cpumask_t cpu_online_map; 75 - 76 - EXPORT_SYMBOL(cpu_online_map); 77 - 78 73 int smp_num_probed; /* Internal processor count */ 79 74 int smp_num_cpus = 1; /* Number that came online. */ 80 75 EXPORT_SYMBOL(smp_num_cpus); ··· 435 440 ((char *)cpubase + i*hwrpb->processor_size); 436 441 if ((cpu->flags & 0x1cc) == 0x1cc) { 437 442 smp_num_probed++; 443 + cpu_set(i, cpu_possible_map); 438 444 cpu_set(i, cpu_present_map); 439 445 cpu->pal_revision = boot_cpu_palrev; 440 446 } ··· 469 473 470 474 /* Nothing to do on a UP box, or when told not to. */ 471 475 if (smp_num_probed == 1 || max_cpus == 0) { 476 + cpu_possible_map = cpumask_of_cpu(boot_cpuid); 472 477 cpu_present_map = cpumask_of_cpu(boot_cpuid); 473 478 printk(KERN_INFO "SMP mode deactivated.\n"); 474 479 return;
+4 -4
arch/alpha/kernel/sys_dp264.c
··· 177 177 } 178 178 179 179 static void 180 - dp264_set_affinity(unsigned int irq, cpumask_t affinity) 180 + dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) 181 181 { 182 182 spin_lock(&dp264_irq_lock); 183 - cpu_set_irq_affinity(irq, affinity); 183 + cpu_set_irq_affinity(irq, *affinity); 184 184 tsunami_update_irq_hw(cached_irq_mask); 185 185 spin_unlock(&dp264_irq_lock); 186 186 } 187 187 188 188 static void 189 - clipper_set_affinity(unsigned int irq, cpumask_t affinity) 189 + clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) 190 190 { 191 191 spin_lock(&dp264_irq_lock); 192 - cpu_set_irq_affinity(irq - 16, affinity); 192 + cpu_set_irq_affinity(irq - 16, *affinity); 193 193 tsunami_update_irq_hw(cached_irq_mask); 194 194 spin_unlock(&dp264_irq_lock); 195 195 }
+2 -2
arch/alpha/kernel/sys_titan.c
··· 158 158 } 159 159 160 160 static void 161 - titan_set_irq_affinity(unsigned int irq, cpumask_t affinity) 161 + titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) 162 162 { 163 163 spin_lock(&titan_irq_lock); 164 - titan_cpu_set_irq_affinity(irq - 16, affinity); 164 + titan_cpu_set_irq_affinity(irq - 16, *affinity); 165 165 titan_update_irq_hw(titan_cached_irq_mask); 166 166 spin_unlock(&titan_irq_lock); 167 167 }
+2 -2
arch/arm/common/gic.c
··· 109 109 } 110 110 111 111 #ifdef CONFIG_SMP 112 - static void gic_set_cpu(unsigned int irq, cpumask_t mask_val) 112 + static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) 113 113 { 114 114 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); 115 115 unsigned int shift = (irq % 4) * 8; 116 - unsigned int cpu = first_cpu(mask_val); 116 + unsigned int cpu = cpumask_first(mask_val); 117 117 u32 val; 118 118 119 119 spin_lock(&irq_controller_lock);
+1 -1
arch/arm/kernel/irq.c
··· 174 174 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); 175 175 176 176 spin_lock_irq(&desc->lock); 177 - desc->chip->set_affinity(irq, cpumask_of_cpu(cpu)); 177 + desc->chip->set_affinity(irq, cpumask_of(cpu)); 178 178 spin_unlock_irq(&desc->lock); 179 179 } 180 180
-10
arch/arm/kernel/smp.c
··· 34 34 #include <asm/ptrace.h> 35 35 36 36 /* 37 - * bitmask of present and online CPUs. 38 - * The present bitmask indicates that the CPU is physically present. 39 - * The online bitmask indicates that the CPU is up and running. 40 - */ 41 - cpumask_t cpu_possible_map; 42 - EXPORT_SYMBOL(cpu_possible_map); 43 - cpumask_t cpu_online_map; 44 - EXPORT_SYMBOL(cpu_online_map); 45 - 46 - /* 47 37 * as from 2.5, kernels no longer have an init_tasks structure 48 38 * so we need some other way of telling a new secondary core 49 39 * where to place its SVC stack
+1 -2
arch/arm/mach-at91/at91rm9200_time.c
··· 178 178 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 179 179 .shift = 32, 180 180 .rating = 150, 181 - .cpumask = CPU_MASK_CPU0, 182 181 .set_next_event = clkevt32k_next_event, 183 182 .set_mode = clkevt32k_mode, 184 183 }; ··· 205 206 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); 206 207 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); 207 208 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; 208 - clkevt.cpumask = cpumask_of_cpu(0); 209 + clkevt.cpumask = cpumask_of(0); 209 210 clockevents_register_device(&clkevt); 210 211 211 212 /* register clocksource */
+1 -1
arch/arm/mach-at91/at91sam926x_time.c
··· 91 91 .features = CLOCK_EVT_FEAT_PERIODIC, 92 92 .shift = 32, 93 93 .rating = 100, 94 - .cpumask = CPU_MASK_CPU0, 95 94 .set_mode = pit_clkevt_mode, 96 95 }; 97 96 ··· 172 173 173 174 /* Set up and register clockevents */ 174 175 pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); 176 + pit_clkevt.cpumask = cpumask_of(0); 175 177 clockevents_register_device(&pit_clkevt); 176 178 } 177 179
+1 -1
arch/arm/mach-davinci/time.c
··· 322 322 clockevent_davinci.min_delta_ns = 323 323 clockevent_delta2ns(1, &clockevent_davinci); 324 324 325 - clockevent_davinci.cpumask = cpumask_of_cpu(0); 325 + clockevent_davinci.cpumask = cpumask_of(0); 326 326 clockevents_register_device(&clockevent_davinci); 327 327 } 328 328
+1 -1
arch/arm/mach-imx/time.c
··· 184 184 clockevent_imx.min_delta_ns = 185 185 clockevent_delta2ns(0xf, &clockevent_imx); 186 186 187 - clockevent_imx.cpumask = cpumask_of_cpu(0); 187 + clockevent_imx.cpumask = cpumask_of(0); 188 188 189 189 clockevents_register_device(&clockevent_imx); 190 190
+1 -1
arch/arm/mach-ixp4xx/common.c
··· 487 487 clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx); 488 488 clockevent_ixp4xx.min_delta_ns = 489 489 clockevent_delta2ns(0xf, &clockevent_ixp4xx); 490 - clockevent_ixp4xx.cpumask = cpumask_of_cpu(0); 490 + clockevent_ixp4xx.cpumask = cpumask_of(0); 491 491 492 492 clockevents_register_device(&clockevent_ixp4xx); 493 493 return 0;
+1 -1
arch/arm/mach-msm/timer.c
··· 182 182 clockevent_delta2ns(0xf0000000 >> clock->shift, ce); 183 183 /* 4 gets rounded down to 3 */ 184 184 ce->min_delta_ns = clockevent_delta2ns(4, ce); 185 - ce->cpumask = cpumask_of_cpu(0); 185 + ce->cpumask = cpumask_of(0); 186 186 187 187 cs->mult = clocksource_hz2mult(clock->freq, cs->shift); 188 188 res = clocksource_register(cs);
+1 -1
arch/arm/mach-ns9xxx/time-ns9360.c
··· 173 173 ns9360_clockevent_device.min_delta_ns = 174 174 clockevent_delta2ns(1, &ns9360_clockevent_device); 175 175 176 - ns9360_clockevent_device.cpumask = cpumask_of_cpu(0); 176 + ns9360_clockevent_device.cpumask = cpumask_of(0); 177 177 clockevents_register_device(&ns9360_clockevent_device); 178 178 179 179 setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT,
+1 -1
arch/arm/mach-omap1/time.c
··· 173 173 clockevent_mpu_timer1.min_delta_ns = 174 174 clockevent_delta2ns(1, &clockevent_mpu_timer1); 175 175 176 - clockevent_mpu_timer1.cpumask = cpumask_of_cpu(0); 176 + clockevent_mpu_timer1.cpumask = cpumask_of(0); 177 177 clockevents_register_device(&clockevent_mpu_timer1); 178 178 } 179 179
+1 -1
arch/arm/mach-omap1/timer32k.c
··· 187 187 clockevent_32k_timer.min_delta_ns = 188 188 clockevent_delta2ns(1, &clockevent_32k_timer); 189 189 190 - clockevent_32k_timer.cpumask = cpumask_of_cpu(0); 190 + clockevent_32k_timer.cpumask = cpumask_of(0); 191 191 clockevents_register_device(&clockevent_32k_timer); 192 192 } 193 193
+1 -1
arch/arm/mach-omap2/timer-gp.c
··· 120 120 clockevent_gpt.min_delta_ns = 121 121 clockevent_delta2ns(1, &clockevent_gpt); 122 122 123 - clockevent_gpt.cpumask = cpumask_of_cpu(0); 123 + clockevent_gpt.cpumask = cpumask_of(0); 124 124 clockevents_register_device(&clockevent_gpt); 125 125 } 126 126
+1 -1
arch/arm/mach-pxa/time.c
··· 122 122 .features = CLOCK_EVT_FEAT_ONESHOT, 123 123 .shift = 32, 124 124 .rating = 200, 125 - .cpumask = CPU_MASK_CPU0, 126 125 .set_next_event = pxa_osmr0_set_next_event, 127 126 .set_mode = pxa_osmr0_set_mode, 128 127 }; ··· 162 163 clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); 163 164 ckevt_pxa_osmr0.min_delta_ns = 164 165 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1; 166 + ckevt_pxa_osmr0.cpumask = cpumask_of(0); 165 167 166 168 cksrc_pxa_oscr0.mult = 167 169 clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift);
+1 -1
arch/arm/mach-realview/core.c
··· 624 624 .set_mode = timer_set_mode, 625 625 .set_next_event = timer_set_next_event, 626 626 .rating = 300, 627 - .cpumask = CPU_MASK_ALL, 627 + .cpumask = cpu_all_mask, 628 628 }; 629 629 630 630 static void __init realview_clockevents_init(unsigned int timer_irq)
+2 -2
arch/arm/mach-realview/localtimer.c
··· 154 154 clk->set_mode = local_timer_set_mode; 155 155 clk->set_next_event = local_timer_set_next_event; 156 156 clk->irq = IRQ_LOCALTIMER; 157 - clk->cpumask = cpumask_of_cpu(cpu); 157 + clk->cpumask = cpumask_of(cpu); 158 158 clk->shift = 20; 159 159 clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift); 160 160 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); ··· 193 193 clk->rating = 200; 194 194 clk->set_mode = dummy_timer_set_mode; 195 195 clk->broadcast = smp_timer_broadcast; 196 - clk->cpumask = cpumask_of_cpu(cpu); 196 + clk->cpumask = cpumask_of(cpu); 197 197 198 198 clockevents_register_device(clk); 199 199 }
+1 -1
arch/arm/mach-sa1100/time.c
··· 73 73 .features = CLOCK_EVT_FEAT_ONESHOT, 74 74 .shift = 32, 75 75 .rating = 200, 76 - .cpumask = CPU_MASK_CPU0, 77 76 .set_next_event = sa1100_osmr0_set_next_event, 78 77 .set_mode = sa1100_osmr0_set_mode, 79 78 }; ··· 109 110 clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0); 110 111 ckevt_sa1100_osmr0.min_delta_ns = 111 112 clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1; 113 + ckevt_sa1100_osmr0.cpumask = cpumask_of(0); 112 114 113 115 cksrc_sa1100_oscr.mult = 114 116 clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift);
+1 -1
arch/arm/mach-versatile/core.c
··· 1005 1005 timer0_clockevent.min_delta_ns = 1006 1006 clockevent_delta2ns(0xf, &timer0_clockevent); 1007 1007 1008 - timer0_clockevent.cpumask = cpumask_of_cpu(0); 1008 + timer0_clockevent.cpumask = cpumask_of(0); 1009 1009 clockevents_register_device(&timer0_clockevent); 1010 1010 } 1011 1011
+2 -2
arch/arm/oprofile/op_model_mpcore.c
··· 260 260 static void em_route_irq(int irq, unsigned int cpu) 261 261 { 262 262 struct irq_desc *desc = irq_desc + irq; 263 - cpumask_t mask = cpumask_of_cpu(cpu); 263 + const struct cpumask *mask = cpumask_of(cpu); 264 264 265 265 spin_lock_irq(&desc->lock); 266 - desc->affinity = mask; 266 + desc->affinity = *mask; 267 267 desc->chip->set_affinity(irq, mask); 268 268 spin_unlock_irq(&desc->lock); 269 269 }
+1 -1
arch/arm/plat-mxc/time.c
··· 190 190 clockevent_mxc.min_delta_ns = 191 191 clockevent_delta2ns(0xff, &clockevent_mxc); 192 192 193 - clockevent_mxc.cpumask = cpumask_of_cpu(0); 193 + clockevent_mxc.cpumask = cpumask_of(0); 194 194 195 195 clockevents_register_device(&clockevent_mxc); 196 196
+1 -1
arch/arm/plat-orion/time.c
··· 149 149 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, 150 150 .shift = 32, 151 151 .rating = 300, 152 - .cpumask = CPU_MASK_CPU0, 153 152 .set_next_event = orion_clkevt_next_event, 154 153 .set_mode = orion_clkevt_mode, 155 154 }; ··· 198 199 orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift); 199 200 orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); 200 201 orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); 202 + orion_clkevt.cpumask = cpumask_of(0); 201 203 clockevents_register_device(&orion_clkevt); 202 204 }
+1 -1
arch/avr32/kernel/time.c
··· 106 106 .features = CLOCK_EVT_FEAT_ONESHOT, 107 107 .shift = 16, 108 108 .rating = 50, 109 - .cpumask = CPU_MASK_CPU0, 110 109 .set_next_event = comparator_next_event, 111 110 .set_mode = comparator_mode, 112 111 }; ··· 133 134 comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); 134 135 comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); 135 136 comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; 137 + comparator.cpumask = cpumask_of(0); 136 138 137 139 sysreg_write(COMPARE, 0); 138 140 timer_irqaction.dev_id = &comparator;
+1 -1
arch/blackfin/kernel/time-ts.c
··· 162 162 .name = "bfin_core_timer", 163 163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 164 164 .shift = 32, 165 - .cpumask = CPU_MASK_CPU0, 166 165 .set_next_event = bfin_timer_set_next_event, 167 166 .set_mode = bfin_timer_set_mode, 168 167 }; ··· 192 193 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); 193 194 clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); 194 195 clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); 196 + clockevent_bfin.cpumask = cpumask_of(0); 195 197 clockevents_register_device(&clockevent_bfin); 196 198 197 199 return 0;
+2 -2
arch/cris/arch-v32/kernel/irq.c
··· 325 325 { 326 326 } 327 327 328 - void set_affinity_crisv32_irq(unsigned int irq, cpumask_t dest) 328 + void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest) 329 329 { 330 330 unsigned long flags; 331 331 spin_lock_irqsave(&irq_lock, flags); 332 - irq_allocations[irq - FIRST_IRQ].mask = dest; 332 + irq_allocations[irq - FIRST_IRQ].mask = *dest; 333 333 spin_unlock_irqrestore(&irq_lock, flags); 334 334 } 335 335
-4
arch/cris/arch-v32/kernel/smp.c
··· 29 29 spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; 30 30 31 31 /* CPU masks */ 32 - cpumask_t cpu_online_map = CPU_MASK_NONE; 33 - EXPORT_SYMBOL(cpu_online_map); 34 32 cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 35 - cpumask_t cpu_possible_map; 36 - EXPORT_SYMBOL(cpu_possible_map); 37 33 EXPORT_SYMBOL(phys_cpu_present_map); 38 34 39 35 /* Variables used during SMP boot */
-1
arch/cris/include/asm/smp.h
··· 4 4 #include <linux/cpumask.h> 5 5 6 6 extern cpumask_t phys_cpu_present_map; 7 - extern cpumask_t cpu_possible_map; 8 7 9 8 #define raw_smp_processor_id() (current_thread_info()->cpu) 10 9
+1 -1
arch/ia64/hp/sim/hpsim_irq.c
··· 22 22 } 23 23 24 24 static void 25 - hpsim_set_affinity_noop (unsigned int a, cpumask_t b) 25 + hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) 26 26 { 27 27 } 28 28
-1
arch/ia64/include/asm/smp.h
··· 57 57 58 58 extern char no_int_routing __devinitdata; 59 59 60 - extern cpumask_t cpu_online_map; 61 60 extern cpumask_t cpu_core_map[NR_CPUS]; 62 61 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 63 62 extern int smp_num_siblings;
-2
arch/ia64/include/asm/topology.h
··· 55 55 void build_cpu_to_node_map(void); 56 56 57 57 #define SD_CPU_INIT (struct sched_domain) { \ 58 - .span = CPU_MASK_NONE, \ 59 58 .parent = NULL, \ 60 59 .child = NULL, \ 61 60 .groups = NULL, \ ··· 79 80 80 81 /* sched_domains SD_NODE_INIT for IA64 NUMA machines */ 81 82 #define SD_NODE_INIT (struct sched_domain) { \ 82 - .span = CPU_MASK_NONE, \ 83 83 .parent = NULL, \ 84 84 .child = NULL, \ 85 85 .groups = NULL, \
+6 -6
arch/ia64/kernel/iosapic.c
··· 330 330 331 331 332 332 static void 333 - iosapic_set_affinity (unsigned int irq, cpumask_t mask) 333 + iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) 334 334 { 335 335 #ifdef CONFIG_SMP 336 336 u32 high32, low32; 337 - int dest, rte_index; 337 + int cpu, dest, rte_index; 338 338 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 339 339 struct iosapic_rte_info *rte; 340 340 struct iosapic *iosapic; 341 341 342 342 irq &= (~IA64_IRQ_REDIRECTED); 343 343 344 - cpus_and(mask, mask, cpu_online_map); 345 - if (cpus_empty(mask)) 344 + cpu = cpumask_first_and(cpu_online_mask, mask); 345 + if (cpu >= nr_cpu_ids) 346 346 return; 347 347 348 - if (irq_prepare_move(irq, first_cpu(mask))) 348 + if (irq_prepare_move(irq, cpu)) 349 349 return; 350 350 351 - dest = cpu_physical_id(first_cpu(mask)); 351 + dest = cpu_physical_id(cpu); 352 352 353 353 if (!iosapic_intr_info[irq].count) 354 354 return; /* not an IOSAPIC interrupt */
+4 -5
arch/ia64/kernel/irq.c
··· 133 133 */ 134 134 static void migrate_irqs(void) 135 135 { 136 - cpumask_t mask; 137 136 irq_desc_t *desc; 138 137 int irq, new_cpu; 139 138 ··· 151 152 if (desc->status == IRQ_PER_CPU) 152 153 continue; 153 154 154 - cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); 155 - if (any_online_cpu(mask) == NR_CPUS) { 155 + if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) 156 + >= nr_cpu_ids) { 156 157 /* 157 158 * Save it for phase 2 processing 158 159 */ 159 160 vectors_in_migration[irq] = irq; 160 161 161 162 new_cpu = any_online_cpu(cpu_online_map); 162 - mask = cpumask_of_cpu(new_cpu); 163 163 164 164 /* 165 165 * Al three are essential, currently WARN_ON.. maybe panic? ··· 166 168 if (desc->chip && desc->chip->disable && 167 169 desc->chip->enable && desc->chip->set_affinity) { 168 170 desc->chip->disable(irq); 169 - desc->chip->set_affinity(irq, mask); 171 + desc->chip->set_affinity(irq, 172 + cpumask_of(new_cpu)); 170 173 desc->chip->enable(irq); 171 174 } else { 172 175 WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
+6 -6
arch/ia64/kernel/msi_ia64.c
··· 49 49 static struct irq_chip ia64_msi_chip; 50 50 51 51 #ifdef CONFIG_SMP 52 - static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 52 + static void ia64_set_msi_irq_affinity(unsigned int irq, 53 + const cpumask_t *cpu_mask) 53 54 { 54 55 struct msi_msg msg; 55 56 u32 addr, data; 56 - int cpu = first_cpu(cpu_mask); 57 + int cpu = first_cpu(*cpu_mask); 57 58 58 59 if (!cpu_online(cpu)) 59 60 return; ··· 167 166 168 167 #ifdef CONFIG_DMAR 169 168 #ifdef CONFIG_SMP 170 - static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 169 + static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 171 170 { 172 171 struct irq_cfg *cfg = irq_cfg + irq; 173 172 struct msi_msg msg; 174 - int cpu = first_cpu(mask); 175 - 173 + int cpu = cpumask_first(mask); 176 174 177 175 if (!cpu_online(cpu)) 178 176 return; ··· 187 187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); 188 188 189 189 dmar_msi_write(irq, &msg); 190 - irq_desc[irq].affinity = mask; 190 + irq_desc[irq].affinity = *mask; 191 191 } 192 192 #endif /* CONFIG_SMP */ 193 193
+2 -8
arch/ia64/kernel/smpboot.c
··· 131 131 */ 132 132 DEFINE_PER_CPU(int, cpu_state); 133 133 134 - /* Bitmasks of currently online, and possible CPUs */ 135 - cpumask_t cpu_online_map; 136 - EXPORT_SYMBOL(cpu_online_map); 137 - cpumask_t cpu_possible_map = CPU_MASK_NONE; 138 - EXPORT_SYMBOL(cpu_possible_map); 139 - 140 134 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 141 135 EXPORT_SYMBOL(cpu_core_map); 142 136 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); ··· 682 688 { 683 689 int new_cpei_cpu; 684 690 irq_desc_t *desc = NULL; 685 - cpumask_t mask; 691 + const struct cpumask *mask; 686 692 int retval = 0; 687 693 688 694 /* ··· 695 701 * Now re-target the CPEI to a different processor 696 702 */ 697 703 new_cpei_cpu = any_online_cpu(cpu_online_map); 698 - mask = cpumask_of_cpu(new_cpei_cpu); 704 + mask = cpumask_of(new_cpei_cpu); 699 705 set_cpei_target_cpu(new_cpei_cpu); 700 706 desc = irq_desc + ia64_cpe_irq; 701 707 /*
+1 -1
arch/ia64/kernel/topology.c
··· 219 219 cpumask_t shared_cpu_map; 220 220 221 221 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); 222 - len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); 222 + len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map); 223 223 len += sprintf(buf+len, "\n"); 224 224 return len; 225 225 }
+3 -3
arch/ia64/sn/kernel/irq.c
··· 227 227 return new_irq_info; 228 228 } 229 229 230 - static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) 230 + static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) 231 231 { 232 232 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; 233 233 nasid_t nasid; 234 234 int slice; 235 235 236 - nasid = cpuid_to_nasid(first_cpu(mask)); 237 - slice = cpuid_to_slice(first_cpu(mask)); 236 + nasid = cpuid_to_nasid(cpumask_first(mask)); 237 + slice = cpuid_to_slice(cpumask_first(mask)); 238 238 239 239 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, 240 240 sn_irq_lh[irq], list)
+4 -3
arch/ia64/sn/kernel/msi_sn.c
··· 151 151 } 152 152 153 153 #ifdef CONFIG_SMP 154 - static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 154 + static void sn_set_msi_irq_affinity(unsigned int irq, 155 + const struct cpumask *cpu_mask) 155 156 { 156 157 struct msi_msg msg; 157 158 int slice; ··· 165 164 struct sn_pcibus_provider *provider; 166 165 unsigned int cpu; 167 166 168 - cpu = first_cpu(cpu_mask); 167 + cpu = cpumask_first(cpu_mask); 169 168 sn_irq_info = sn_msi_info[irq].sn_irq_info; 170 169 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) 171 170 return; ··· 205 204 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); 206 205 207 206 write_msi_msg(irq, &msg); 208 - irq_desc[irq].affinity = cpu_mask; 207 + irq_desc[irq].affinity = *cpu_mask; 209 208 } 210 209 #endif /* CONFIG_SMP */ 211 210
+1
arch/m32r/Kconfig
··· 10 10 default y 11 11 select HAVE_IDE 12 12 select HAVE_OPROFILE 13 + select INIT_ALL_POSSIBLE 13 14 14 15 config SBUS 15 16 bool
-6
arch/m32r/kernel/smpboot.c
··· 73 73 /* Bitmask of physically existing CPUs */ 74 74 physid_mask_t phys_cpu_present_map; 75 75 76 - /* Bitmask of currently online CPUs */ 77 - cpumask_t cpu_online_map; 78 - EXPORT_SYMBOL(cpu_online_map); 79 - 80 76 cpumask_t cpu_bootout_map; 81 77 cpumask_t cpu_bootin_map; 82 78 static cpumask_t cpu_callin_map; 83 79 cpumask_t cpu_callout_map; 84 80 EXPORT_SYMBOL(cpu_callout_map); 85 - cpumask_t cpu_possible_map = CPU_MASK_ALL; 86 - EXPORT_SYMBOL(cpu_possible_map); 87 81 88 82 /* Per CPU bogomips and other parameters */ 89 83 struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
+1 -1
arch/m68knommu/platform/coldfire/pit.c
··· 156 156 { 157 157 u32 imr; 158 158 159 - cf_pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 159 + cf_pit_clockevent.cpumask = cpumask_of(smp_processor_id()); 160 160 cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32); 161 161 cf_pit_clockevent.max_delta_ns = 162 162 clockevent_delta2ns(0xFFFF, &cf_pit_clockevent);
+2 -1
arch/mips/include/asm/irq.h
··· 49 49 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 50 50 #include <linux/cpumask.h> 51 51 52 - extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); 52 + extern void plat_set_irq_affinity(unsigned int irq, 53 + const struct cpumask *affinity); 53 54 extern void smtc_forward_irq(unsigned int irq); 54 55 55 56 /*
-1
arch/mips/include/asm/mach-ip27/topology.h
··· 37 37 38 38 /* sched_domains SD_NODE_INIT for SGI IP27 machines */ 39 39 #define SD_NODE_INIT (struct sched_domain) { \ 40 - .span = CPU_MASK_NONE, \ 41 40 .parent = NULL, \ 42 41 .child = NULL, \ 43 42 .groups = NULL, \
-3
arch/mips/include/asm/smp.h
··· 38 38 #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ 39 39 #define SMP_CALL_FUNCTION 0x2 40 40 41 - extern cpumask_t phys_cpu_present_map; 42 - #define cpu_possible_map phys_cpu_present_map 43 - 44 41 extern void asmlinkage smp_bootstrap(void); 45 42 46 43 /*
+1 -1
arch/mips/jazz/irq.c
··· 146 146 147 147 BUG_ON(HZ != 100); 148 148 149 - cd->cpumask = cpumask_of_cpu(cpu); 149 + cd->cpumask = cpumask_of(cpu); 150 150 clockevents_register_device(cd); 151 151 action->dev_id = cd; 152 152 setup_irq(JAZZ_TIMER_IRQ, action);
+2 -2
arch/mips/kernel/cevt-bcm1480.c
··· 126 126 cd->min_delta_ns = clockevent_delta2ns(2, cd); 127 127 cd->rating = 200; 128 128 cd->irq = irq; 129 - cd->cpumask = cpumask_of_cpu(cpu); 129 + cd->cpumask = cpumask_of(cpu); 130 130 cd->set_next_event = sibyte_next_event; 131 131 cd->set_mode = sibyte_set_mode; 132 132 clockevents_register_device(cd); ··· 148 148 action->name = name; 149 149 action->dev_id = cd; 150 150 151 - irq_set_affinity(irq, cpumask_of_cpu(cpu)); 151 + irq_set_affinity(irq, cpumask_of(cpu)); 152 152 setup_irq(irq, action); 153 153 }
+1 -1
arch/mips/kernel/cevt-ds1287.c
··· 88 88 static struct clock_event_device ds1287_clockevent = { 89 89 .name = "ds1287", 90 90 .features = CLOCK_EVT_FEAT_PERIODIC, 91 - .cpumask = CPU_MASK_CPU0, 92 91 .set_next_event = ds1287_set_next_event, 93 92 .set_mode = ds1287_set_mode, 94 93 .event_handler = ds1287_event_handler, ··· 121 122 clockevent_set_clock(cd, 32768); 122 123 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 123 124 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 125 + cd->cpumask = cpumask_of(0); 124 126 125 127 clockevents_register_device(&ds1287_clockevent); 126 128
+1 -1
arch/mips/kernel/cevt-gt641xx.c
··· 96 96 static struct clock_event_device gt641xx_timer0_clockevent = { 97 97 .name = "gt641xx-timer0", 98 98 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 99 - .cpumask = CPU_MASK_CPU0, 100 99 .irq = GT641XX_TIMER0_IRQ, 101 100 .set_next_event = gt641xx_timer0_set_next_event, 102 101 .set_mode = gt641xx_timer0_set_mode, ··· 131 132 clockevent_set_clock(cd, gt641xx_base_clock); 132 133 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 133 134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 135 + cd->cpumask = cpumask_of(0); 134 136 135 137 clockevents_register_device(&gt641xx_timer0_clockevent); 136 138
+1 -1
arch/mips/kernel/cevt-r4k.c
··· 195 195 196 196 cd->rating = 300; 197 197 cd->irq = irq; 198 - cd->cpumask = cpumask_of_cpu(cpu); 198 + cd->cpumask = cpumask_of(cpu); 199 199 cd->set_next_event = mips_next_event; 200 200 cd->set_mode = mips_set_clock_mode; 201 201 cd->event_handler = mips_event_handler;
+2 -2
arch/mips/kernel/cevt-sb1250.c
··· 125 125 cd->min_delta_ns = clockevent_delta2ns(2, cd); 126 126 cd->rating = 200; 127 127 cd->irq = irq; 128 - cd->cpumask = cpumask_of_cpu(cpu); 128 + cd->cpumask = cpumask_of(cpu); 129 129 cd->set_next_event = sibyte_next_event; 130 130 cd->set_mode = sibyte_set_mode; 131 131 clockevents_register_device(cd); ··· 147 147 action->name = name; 148 148 action->dev_id = cd; 149 149 150 - irq_set_affinity(irq, cpumask_of_cpu(cpu)); 150 + irq_set_affinity(irq, cpumask_of(cpu)); 151 151 setup_irq(irq, action); 152 152 }
+1 -1
arch/mips/kernel/cevt-smtc.c
··· 292 292 293 293 cd->rating = 300; 294 294 cd->irq = irq; 295 - cd->cpumask = cpumask_of_cpu(cpu); 295 + cd->cpumask = cpumask_of(cpu); 296 296 cd->set_next_event = mips_next_event; 297 297 cd->set_mode = mips_set_clock_mode; 298 298 cd->event_handler = mips_event_handler;
+1 -1
arch/mips/kernel/cevt-txx9.c
··· 112 112 .name = "TXx9", 113 113 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 114 114 .rating = 200, 115 - .cpumask = CPU_MASK_CPU0, 116 115 .set_mode = txx9tmr_set_mode, 117 116 .set_next_event = txx9tmr_set_next_event, 118 117 }; ··· 149 150 clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); 150 151 cd->min_delta_ns = clockevent_delta2ns(0xf, cd); 151 152 cd->irq = irq; 153 + cd->cpumask = cpumask_of(0), 152 154 clockevents_register_device(cd); 153 155 setup_irq(irq, &txx9tmr_irq); 154 156 printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
+1 -1
arch/mips/kernel/i8253.c
··· 115 115 * Start pit with the boot cpu mask and make it global after the 116 116 * IO_APIC has been initialized. 117 117 */ 118 - cd->cpumask = cpumask_of_cpu(cpu); 118 + cd->cpumask = cpumask_of(cpu); 119 119 clockevent_set_clock(cd, CLOCK_TICK_RATE); 120 120 cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd); 121 121 cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
+3 -3
arch/mips/kernel/irq-gic.c
··· 155 155 156 156 static DEFINE_SPINLOCK(gic_lock); 157 157 158 - static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) 158 + static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) 159 159 { 160 160 cpumask_t tmp = CPU_MASK_NONE; 161 161 unsigned long flags; ··· 164 164 pr_debug(KERN_DEBUG "%s called\n", __func__); 165 165 irq -= _irqbase; 166 166 167 - cpus_and(tmp, cpumask, cpu_online_map); 167 + cpumask_and(&tmp, cpumask, cpu_online_mask); 168 168 if (cpus_empty(tmp)) 169 169 return; 170 170 ··· 187 187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 188 188 189 189 } 190 - irq_desc[irq].affinity = cpumask; 190 + irq_desc[irq].affinity = *cpumask; 191 191 spin_unlock_irqrestore(&gic_lock, flags); 192 192 193 193 }
+3 -3
arch/mips/kernel/smp-cmp.c
··· 51 51 int len; 52 52 53 53 cpus_clear(cpu_allow_map); 54 - if (cpulist_parse(str, cpu_allow_map) == 0) { 54 + if (cpulist_parse(str, &cpu_allow_map) == 0) { 55 55 cpu_set(0, cpu_allow_map); 56 56 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map); 57 - len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map); 57 + len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map); 58 58 buf[len] = '\0'; 59 59 pr_debug("Allowable CPUs: %s\n", buf); 60 60 return 1; ··· 226 226 227 227 for (i = 1; i < NR_CPUS; i++) { 228 228 if (amon_cpu_avail(i)) { 229 - cpu_set(i, phys_cpu_present_map); 229 + cpu_set(i, cpu_possible_map); 230 230 __cpu_number_map[i] = ++ncpu; 231 231 __cpu_logical_map[ncpu] = i; 232 232 }
+1 -1
arch/mips/kernel/smp-mt.c
··· 70 70 write_vpe_c0_vpeconf0(tmp); 71 71 72 72 /* Record this as available CPU */ 73 - cpu_set(tc, phys_cpu_present_map); 73 + cpu_set(tc, cpu_possible_map); 74 74 __cpu_number_map[tc] = ++ncpu; 75 75 __cpu_logical_map[ncpu] = tc; 76 76 }
+1 -6
arch/mips/kernel/smp.c
··· 44 44 #include <asm/mipsmtregs.h> 45 45 #endif /* CONFIG_MIPS_MT_SMTC */ 46 46 47 - cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ 48 47 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 49 - cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ 50 48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 51 49 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 52 - 53 - EXPORT_SYMBOL(phys_cpu_present_map); 54 - EXPORT_SYMBOL(cpu_online_map); 55 50 56 51 extern void cpu_idle(void); 57 52 ··· 190 195 /* preload SMP state for boot cpu */ 191 196 void __devinit smp_prepare_boot_cpu(void) 192 197 { 193 - cpu_set(0, phys_cpu_present_map); 198 + cpu_set(0, cpu_possible_map); 194 199 cpu_set(0, cpu_online_map); 195 200 cpu_set(0, cpu_callin_map); 196 201 }
+3 -3
arch/mips/kernel/smtc.c
··· 290 290 * possibly leave some TCs/VPEs as "slave" processors. 291 291 * 292 292 * Use c0_MVPConf0 to find out how many TCs are available, setting up 293 - * phys_cpu_present_map and the logical/physical mappings. 293 + * cpu_possible_map and the logical/physical mappings. 294 294 */ 295 295 296 296 int __init smtc_build_cpu_map(int start_cpu_slot) ··· 304 304 */ 305 305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 306 306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 307 - cpu_set(i, phys_cpu_present_map); 307 + cpu_set(i, cpu_possible_map); 308 308 __cpu_number_map[i] = i; 309 309 __cpu_logical_map[i] = i; 310 310 } ··· 521 521 * Pull any physically present but unused TCs out of circulation. 522 522 */ 523 523 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 524 - cpu_clear(tc, phys_cpu_present_map); 524 + cpu_clear(tc, cpu_possible_map); 525 525 cpu_clear(tc, cpu_present_map); 526 526 tc++; 527 527 }
+3 -3
arch/mips/mti-malta/malta-smtc.c
··· 114 114 */ 115 115 116 116 117 - void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) 117 + void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) 118 118 { 119 - cpumask_t tmask = affinity; 119 + cpumask_t tmask = *affinity; 120 120 int cpu = 0; 121 121 void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); 122 122 ··· 139 139 * be made to forward to an offline "CPU". 140 140 */ 141 141 142 - for_each_cpu_mask(cpu, affinity) { 142 + for_each_cpu(cpu, affinity) { 143 143 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) 144 144 cpu_clear(cpu, tmask); 145 145 }
+1
arch/mips/nxp/pnx8550/common/time.c
··· 102 102 unsigned int p; 103 103 unsigned int pow2p; 104 104 105 + pnx8xxx_clockevent.cpumask = cpu_none_mask; 105 106 clockevents_register_device(&pnx8xxx_clockevent); 106 107 clocksource_register(&pnx_clocksource); 107 108
+3 -3
arch/mips/pmc-sierra/yosemite/smp.c
··· 141 141 } 142 142 143 143 /* 144 - * Detect available CPUs, populate phys_cpu_present_map before smp_init 144 + * Detect available CPUs, populate cpu_possible_map before smp_init 145 145 * 146 146 * We don't want to start the secondary CPU yet nor do we have a nice probing 147 147 * feature in PMON so we just assume presence of the secondary core. ··· 150 150 { 151 151 int i; 152 152 153 - cpus_clear(phys_cpu_present_map); 153 + cpus_clear(cpu_possible_map); 154 154 155 155 for (i = 0; i < 2; i++) { 156 - cpu_set(i, phys_cpu_present_map); 156 + cpu_set(i, cpu_possible_map); 157 157 __cpu_number_map[i] = i; 158 158 __cpu_logical_map[i] = i; 159 159 }
+1 -1
arch/mips/sgi-ip27/ip27-smp.c
··· 76 76 /* Only let it join in if it's marked enabled */ 77 77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) && 78 78 (tot_cpus_found != NR_CPUS)) { 79 - cpu_set(cpuid, phys_cpu_present_map); 79 + cpu_set(cpuid, cpu_possible_map); 80 80 alloc_cpupda(cpuid, tot_cpus_found); 81 81 cpus_found++; 82 82 tot_cpus_found++;
+1 -1
arch/mips/sgi-ip27/ip27-timer.c
··· 134 134 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 135 135 cd->rating = 200; 136 136 cd->irq = irq; 137 - cd->cpumask = cpumask_of_cpu(cpu); 137 + cd->cpumask = cpumask_of(cpu); 138 138 cd->set_next_event = rt_next_event; 139 139 cd->set_mode = rt_set_mode; 140 140 clockevents_register_device(cd);
+4 -4
arch/mips/sibyte/bcm1480/irq.c
··· 50 50 static void disable_bcm1480_irq(unsigned int irq); 51 51 static void ack_bcm1480_irq(unsigned int irq); 52 52 #ifdef CONFIG_SMP 53 - static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask); 53 + static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); 54 54 #endif 55 55 56 56 #ifdef CONFIG_PCI ··· 109 109 } 110 110 111 111 #ifdef CONFIG_SMP 112 - static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) 112 + static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) 113 113 { 114 114 int i = 0, old_cpu, cpu, int_on, k; 115 115 u64 cur_ints; ··· 117 117 unsigned long flags; 118 118 unsigned int irq_dirty; 119 119 120 - if (cpus_weight(mask) != 1) { 120 + if (cpumask_weight(mask) != 1) { 121 121 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); 122 122 return; 123 123 } 124 - i = first_cpu(mask); 124 + i = cpumask_first(mask); 125 125 126 126 /* Convert logical CPU to physical CPU */ 127 127 cpu = cpu_logical_map(i);
+4 -4
arch/mips/sibyte/bcm1480/smp.c
··· 136 136 137 137 /* 138 138 * Use CFE to find out how many CPUs are available, setting up 139 - * phys_cpu_present_map and the logical/physical mappings. 139 + * cpu_possible_map and the logical/physical mappings. 140 140 * XXXKW will the boot CPU ever not be physical 0? 141 141 * 142 142 * Common setup before any secondaries are started ··· 145 145 { 146 146 int i, num; 147 147 148 - cpus_clear(phys_cpu_present_map); 149 - cpu_set(0, phys_cpu_present_map); 148 + cpus_clear(cpu_possible_map); 149 + cpu_set(0, cpu_possible_map); 150 150 __cpu_number_map[0] = 0; 151 151 __cpu_logical_map[0] = 0; 152 152 153 153 for (i = 1, num = 0; i < NR_CPUS; i++) { 154 154 if (cfe_cpu_stop(i) == 0) { 155 - cpu_set(i, phys_cpu_present_map); 155 + cpu_set(i, cpu_possible_map); 156 156 __cpu_number_map[i] = ++num; 157 157 __cpu_logical_map[num] = i; 158 158 }
+4 -4
arch/mips/sibyte/sb1250/irq.c
··· 50 50 static void disable_sb1250_irq(unsigned int irq); 51 51 static void ack_sb1250_irq(unsigned int irq); 52 52 #ifdef CONFIG_SMP 53 - static void sb1250_set_affinity(unsigned int irq, cpumask_t mask); 53 + static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); 54 54 #endif 55 55 56 56 #ifdef CONFIG_SIBYTE_HAS_LDT ··· 103 103 } 104 104 105 105 #ifdef CONFIG_SMP 106 - static void sb1250_set_affinity(unsigned int irq, cpumask_t mask) 106 + static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) 107 107 { 108 108 int i = 0, old_cpu, cpu, int_on; 109 109 u64 cur_ints; 110 110 struct irq_desc *desc = irq_desc + irq; 111 111 unsigned long flags; 112 112 113 - i = first_cpu(mask); 113 + i = cpumask_first(mask); 114 114 115 - if (cpus_weight(mask) > 1) { 115 + if (cpumask_weight(mask) > 1) { 116 116 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); 117 117 return; 118 118 }
+4 -4
arch/mips/sibyte/sb1250/smp.c
··· 124 124 125 125 /* 126 126 * Use CFE to find out how many CPUs are available, setting up 127 - * phys_cpu_present_map and the logical/physical mappings. 127 + * cpu_possible_map and the logical/physical mappings. 128 128 * XXXKW will the boot CPU ever not be physical 0? 129 129 * 130 130 * Common setup before any secondaries are started ··· 133 133 { 134 134 int i, num; 135 135 136 - cpus_clear(phys_cpu_present_map); 137 - cpu_set(0, phys_cpu_present_map); 136 + cpus_clear(cpu_possible_map); 137 + cpu_set(0, cpu_possible_map); 138 138 __cpu_number_map[0] = 0; 139 139 __cpu_logical_map[0] = 0; 140 140 141 141 for (i = 1, num = 0; i < NR_CPUS; i++) { 142 142 if (cfe_cpu_stop(i) == 0) { 143 - cpu_set(i, phys_cpu_present_map); 143 + cpu_set(i, cpu_possible_map); 144 144 __cpu_number_map[i] = ++num; 145 145 __cpu_logical_map[num] = i; 146 146 }
+1 -1
arch/mips/sni/time.c
··· 80 80 struct irqaction *action = &a20r_irqaction; 81 81 unsigned int cpu = smp_processor_id(); 82 82 83 - cd->cpumask = cpumask_of_cpu(cpu); 83 + cd->cpumask = cpumask_of(cpu); 84 84 clockevents_register_device(cd); 85 85 action->dev_id = cd; 86 86 setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction);
+1
arch/parisc/Kconfig
··· 11 11 select HAVE_OPROFILE 12 12 select RTC_CLASS 13 13 select RTC_DRV_PARISC 14 + select INIT_ALL_POSSIBLE 14 15 help 15 16 The PA-RISC microprocessor is designed by Hewlett-Packard and used 16 17 in many of their workstations & servers (HP9000 700 and 800 series,
+3 -3
arch/parisc/kernel/irq.c
··· 131 131 return 0; 132 132 } 133 133 134 - static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) 134 + static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) 135 135 { 136 - if (cpu_check_affinity(irq, &dest)) 136 + if (cpu_check_affinity(irq, dest)) 137 137 return; 138 138 139 - irq_desc[irq].affinity = dest; 139 + irq_desc[irq].affinity = *dest; 140 140 } 141 141 #endif 142 142
-15
arch/parisc/kernel/smp.c
··· 67 67 68 68 static int parisc_max_cpus __read_mostly = 1; 69 69 70 - /* online cpus are ones that we've managed to bring up completely 71 - * possible cpus are all valid cpu 72 - * present cpus are all detected cpu 73 - * 74 - * On startup we bring up the "possible" cpus. Since we discover 75 - * CPUs later, we add them as hotplug, so the possible cpu mask is 76 - * empty in the beginning. 77 - */ 78 - 79 - cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */ 80 - cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */ 81 - 82 - EXPORT_SYMBOL(cpu_online_map); 83 - EXPORT_SYMBOL(cpu_possible_map); 84 - 85 70 DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; 86 71 87 72 enum ipi_message_type {
-1
arch/powerpc/include/asm/topology.h
··· 48 48 49 49 /* sched_domains SD_NODE_INIT for PPC64 machines */ 50 50 #define SD_NODE_INIT (struct sched_domain) { \ 51 - .span = CPU_MASK_NONE, \ 52 51 .parent = NULL, \ 53 52 .child = NULL, \ 54 53 .groups = NULL, \
+1 -1
arch/powerpc/kernel/irq.c
··· 237 237 mask = map; 238 238 } 239 239 if (irq_desc[irq].chip->set_affinity) 240 - irq_desc[irq].chip->set_affinity(irq, mask); 240 + irq_desc[irq].chip->set_affinity(irq, &mask); 241 241 else if (irq_desc[irq].action && !(warned++)) 242 242 printk("Cannot set affinity for irq %i\n", irq); 243 243 }
-4
arch/powerpc/kernel/smp.c
··· 59 59 60 60 struct thread_info *secondary_ti; 61 61 62 - cpumask_t cpu_possible_map = CPU_MASK_NONE; 63 - cpumask_t cpu_online_map = CPU_MASK_NONE; 64 62 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 65 63 DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; 66 64 67 - EXPORT_SYMBOL(cpu_online_map); 68 - EXPORT_SYMBOL(cpu_possible_map); 69 65 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 70 66 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 71 67
+1 -1
arch/powerpc/kernel/time.c
··· 844 844 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; 845 845 846 846 *dec = decrementer_clockevent; 847 - dec->cpumask = cpumask_of_cpu(cpu); 847 + dec->cpumask = cpumask_of(cpu); 848 848 849 849 printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", 850 850 dec->name, dec->mult, dec->shift, cpu);
+2 -2
arch/powerpc/platforms/pseries/xics.c
··· 332 332 lpar_xirr_info_set((0xff << 24) | irq); 333 333 } 334 334 335 - static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) 335 + static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) 336 336 { 337 337 unsigned int irq; 338 338 int status; ··· 870 870 871 871 /* Reset affinity to all cpus */ 872 872 irq_desc[virq].affinity = CPU_MASK_ALL; 873 - desc->chip->set_affinity(virq, CPU_MASK_ALL); 873 + desc->chip->set_affinity(virq, cpu_all_mask); 874 874 unlock: 875 875 spin_unlock_irqrestore(&desc->lock, flags); 876 876 }
+2 -2
arch/powerpc/sysdev/mpic.c
··· 806 806 807 807 #endif /* CONFIG_SMP */ 808 808 809 - void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) 809 + void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) 810 810 { 811 811 struct mpic *mpic = mpic_from_irq(irq); 812 812 unsigned int src = mpic_irq_to_hw(irq); ··· 818 818 } else { 819 819 cpumask_t tmp; 820 820 821 - cpus_and(tmp, cpumask, cpu_online_map); 821 + cpumask_and(&tmp, cpumask, cpu_online_mask); 822 822 823 823 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 824 824 mpic_physmask(cpus_addr(tmp)[0]));
+1 -1
arch/powerpc/sysdev/mpic.h
··· 36 36 37 37 extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); 38 38 extern void mpic_set_vector(unsigned int virq, unsigned int vector); 39 - extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask); 39 + extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); 40 40 41 41 #endif /* _POWERPC_SYSDEV_MPIC_H */
+1
arch/s390/Kconfig
··· 83 83 select HAVE_KRETPROBES 84 84 select HAVE_KVM if 64BIT 85 85 select HAVE_ARCH_TRACEHOOK 86 + select INIT_ALL_POSSIBLE 86 87 87 88 source "init/Kconfig" 88 89
-6
arch/s390/kernel/smp.c
··· 55 55 struct _lowcore *lowcore_ptr[NR_CPUS]; 56 56 EXPORT_SYMBOL(lowcore_ptr); 57 57 58 - cpumask_t cpu_online_map = CPU_MASK_NONE; 59 - EXPORT_SYMBOL(cpu_online_map); 60 - 61 - cpumask_t cpu_possible_map = CPU_MASK_ALL; 62 - EXPORT_SYMBOL(cpu_possible_map); 63 - 64 58 static struct task_struct *current_set[NR_CPUS]; 65 59 66 60 static u8 smp_cpu_type;
+1 -1
arch/s390/kernel/time.c
··· 160 160 cd->min_delta_ns = 1; 161 161 cd->max_delta_ns = LONG_MAX; 162 162 cd->rating = 400; 163 - cd->cpumask = cpumask_of_cpu(cpu); 163 + cd->cpumask = cpumask_of(cpu); 164 164 cd->set_next_event = s390_next_event; 165 165 cd->set_mode = s390_set_mode; 166 166
+1 -1
arch/sh/include/asm/smp.h
··· 31 31 }; 32 32 33 33 void smp_message_recv(unsigned int msg); 34 - void smp_timer_broadcast(cpumask_t mask); 34 + void smp_timer_broadcast(const struct cpumask *mask); 35 35 36 36 void local_timer_interrupt(void); 37 37 void local_timer_setup(unsigned int cpu);
-1
arch/sh/include/asm/topology.h
··· 5 5 6 6 /* sched_domains SD_NODE_INIT for sh machines */ 7 7 #define SD_NODE_INIT (struct sched_domain) { \ 8 - .span = CPU_MASK_NONE, \ 9 8 .parent = NULL, \ 10 9 .child = NULL, \ 11 10 .groups = NULL, \
+2 -8
arch/sh/kernel/smp.c
··· 31 31 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 32 32 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 33 33 34 - cpumask_t cpu_possible_map; 35 - EXPORT_SYMBOL(cpu_possible_map); 36 - 37 - cpumask_t cpu_online_map; 38 - EXPORT_SYMBOL(cpu_online_map); 39 - 40 34 static inline void __init smp_store_cpu_info(unsigned int cpu) 41 35 { 42 36 struct sh_cpuinfo *c = cpu_data + cpu; ··· 184 190 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); 185 191 } 186 192 187 - void smp_timer_broadcast(cpumask_t mask) 193 + void smp_timer_broadcast(const struct cpumask *mask) 188 194 { 189 195 int cpu; 190 196 191 - for_each_cpu_mask(cpu, mask) 197 + for_each_cpu(cpu, mask) 192 198 plat_send_ipi(cpu, SMP_MSG_TIMER); 193 199 } 194 200
+1 -1
arch/sh/kernel/timers/timer-broadcast.c
··· 51 51 clk->mult = 1; 52 52 clk->set_mode = dummy_timer_set_mode; 53 53 clk->broadcast = smp_timer_broadcast; 54 - clk->cpumask = cpumask_of_cpu(cpu); 54 + clk->cpumask = cpumask_of(cpu); 55 55 56 56 clockevents_register_device(clk); 57 57 }
+1 -1
arch/sh/kernel/timers/timer-tmu.c
··· 263 263 tmu0_clockevent.min_delta_ns = 264 264 clockevent_delta2ns(1, &tmu0_clockevent); 265 265 266 - tmu0_clockevent.cpumask = cpumask_of_cpu(0); 266 + tmu0_clockevent.cpumask = cpumask_of(0); 267 267 268 268 clockevents_register_device(&tmu0_clockevent); 269 269
-2
arch/sparc/include/asm/smp_32.h
··· 29 29 */ 30 30 31 31 extern unsigned char boot_cpu_id; 32 - extern cpumask_t phys_cpu_present_map; 33 - #define cpu_possible_map phys_cpu_present_map 34 32 35 33 typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, 36 34 unsigned long, unsigned long);
+7 -4
arch/sparc/kernel/irq_64.c
··· 312 312 } 313 313 } 314 314 315 - static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask) 315 + static void sun4u_set_affinity(unsigned int virt_irq, 316 + const struct cpumask *mask) 316 317 { 317 318 sun4u_irq_enable(virt_irq); 318 319 } ··· 363 362 ino, err); 364 363 } 365 364 366 - static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) 365 + static void sun4v_set_affinity(unsigned int virt_irq, 366 + const struct cpumask *mask) 367 367 { 368 368 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 369 369 unsigned long cpuid = irq_choose_cpu(virt_irq); ··· 431 429 dev_handle, dev_ino, err); 432 430 } 433 431 434 - static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) 432 + static void sun4v_virt_set_affinity(unsigned int virt_irq, 433 + const struct cpumask *mask) 435 434 { 436 435 unsigned long cpuid, dev_handle, dev_ino; 437 436 int err; ··· 854 851 !(irq_desc[irq].status & IRQ_PER_CPU)) { 855 852 if (irq_desc[irq].chip->set_affinity) 856 853 irq_desc[irq].chip->set_affinity(irq, 857 - irq_desc[irq].affinity); 854 + &irq_desc[irq].affinity); 858 855 } 859 856 spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 860 857 }
+1 -1
arch/sparc/kernel/of_device_64.c
··· 780 780 if (nid != -1) { 781 781 cpumask_t numa_mask = node_to_cpumask(nid); 782 782 783 - irq_set_affinity(irq, numa_mask); 783 + irq_set_affinity(irq, &numa_mask); 784 784 } 785 785 786 786 return irq;
+1 -1
arch/sparc/kernel/pci_msi.c
··· 288 288 if (nid != -1) { 289 289 cpumask_t numa_mask = node_to_cpumask(nid); 290 290 291 - irq_set_affinity(irq, numa_mask); 291 + irq_set_affinity(irq, &numa_mask); 292 292 } 293 293 err = request_irq(irq, sparc64_msiq_interrupt, 0, 294 294 "MSIQ",
+2 -4
arch/sparc/kernel/smp_32.c
··· 39 39 unsigned char boot_cpu_id = 0; 40 40 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ 41 41 42 - cpumask_t cpu_online_map = CPU_MASK_NONE; 43 - cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 44 42 cpumask_t smp_commenced_mask = CPU_MASK_NONE; 45 43 46 44 /* The only guaranteed locking primitive available on all Sparc ··· 332 334 instance = 0; 333 335 while (!cpu_find_by_instance(instance, NULL, &mid)) { 334 336 if (mid < NR_CPUS) { 335 - cpu_set(mid, phys_cpu_present_map); 337 + cpu_set(mid, cpu_possible_map); 336 338 cpu_set(mid, cpu_present_map); 337 339 } 338 340 instance++; ··· 352 354 353 355 current_thread_info()->cpu = cpuid; 354 356 cpu_set(cpuid, cpu_online_map); 355 - cpu_set(cpuid, phys_cpu_present_map); 357 + cpu_set(cpuid, cpu_possible_map); 356 358 } 357 359 358 360 int __cpuinit __cpu_up(unsigned int cpu)
-4
arch/sparc/kernel/smp_64.c
··· 49 49 50 50 int sparc64_multi_core __read_mostly; 51 51 52 - cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; 53 - cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; 54 52 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 55 53 cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 56 54 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 57 55 58 - EXPORT_SYMBOL(cpu_possible_map); 59 - EXPORT_SYMBOL(cpu_online_map); 60 56 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 61 57 EXPORT_SYMBOL(cpu_core_map); 62 58
-4
arch/sparc/kernel/sparc_ksyms_32.c
··· 112 112 #ifdef CONFIG_SMP 113 113 /* IRQ implementation. */ 114 114 EXPORT_SYMBOL(synchronize_irq); 115 - 116 - /* CPU online map and active count. */ 117 - EXPORT_SYMBOL(cpu_online_map); 118 - EXPORT_SYMBOL(phys_cpu_present_map); 119 115 #endif 120 116 121 117 EXPORT_SYMBOL(__udelay);
+1 -1
arch/sparc/kernel/time_64.c
··· 763 763 sevt = &__get_cpu_var(sparc64_events); 764 764 765 765 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); 766 - sevt->cpumask = cpumask_of_cpu(smp_processor_id()); 766 + sevt->cpumask = cpumask_of(smp_processor_id()); 767 767 768 768 clockevents_register_device(sevt); 769 769 }
-7
arch/um/kernel/smp.c
··· 25 25 #include "irq_user.h" 26 26 #include "os.h" 27 27 28 - /* CPU online map, set by smp_boot_cpus */ 29 - cpumask_t cpu_online_map = CPU_MASK_NONE; 30 - cpumask_t cpu_possible_map = CPU_MASK_NONE; 31 - 32 - EXPORT_SYMBOL(cpu_online_map); 33 - EXPORT_SYMBOL(cpu_possible_map); 34 - 35 28 /* Per CPU bogomips and other parameters 36 29 * The only piece used here is the ipi pipe, which is set before SMP is 37 30 * started and never changed.
+1 -1
arch/um/kernel/time.c
··· 50 50 static struct clock_event_device itimer_clockevent = { 51 51 .name = "itimer", 52 52 .rating = 250, 53 - .cpumask = CPU_MASK_ALL, 53 + .cpumask = cpu_all_mask, 54 54 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 55 55 .set_mode = itimer_set_mode, 56 56 .set_next_event = itimer_next_event,
+7 -6
arch/x86/Kconfig
··· 601 601 602 602 config MAXSMP 603 603 bool "Configure Maximum number of SMP Processors and NUMA Nodes" 604 - depends on X86_64 && SMP && BROKEN 604 + depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL 605 + select CPUMASK_OFFSTACK 605 606 default n 606 607 help 607 608 Configure maximum number of CPUS and NUMA Nodes for this architecture. 608 609 If unsure, say N. 609 610 610 611 config NR_CPUS 611 - int "Maximum number of CPUs (2-512)" if !MAXSMP 612 - range 2 512 613 - depends on SMP 612 + int "Maximum number of CPUs" if SMP && !MAXSMP 613 + range 2 512 if SMP && !MAXSMP 614 + default "1" if !SMP 614 615 default "4096" if MAXSMP 615 - default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 616 - default "8" 616 + default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) 617 + default "8" if SMP 617 618 help 618 619 This allows you to specify the maximum number of CPUs which this 619 620 kernel will support. The maximum supported value is 512 and the
+25 -7
arch/x86/include/asm/bigsmp/apic.h
··· 9 9 return (1); 10 10 } 11 11 12 - static inline cpumask_t target_cpus(void) 12 + static inline const cpumask_t *target_cpus(void) 13 13 { 14 14 #ifdef CONFIG_SMP 15 - return cpu_online_map; 15 + return &cpu_online_map; 16 16 #else 17 - return cpumask_of_cpu(0); 17 + return &cpumask_of_cpu(0); 18 18 #endif 19 19 } 20 20 ··· 79 79 80 80 static inline int cpu_present_to_apicid(int mps_cpu) 81 81 { 82 - if (mps_cpu < NR_CPUS) 82 + if (mps_cpu < nr_cpu_ids) 83 83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 84 84 85 85 return BAD_APICID; ··· 94 94 /* Mapping from cpu number to logical apicid */ 95 95 static inline int cpu_to_logical_apicid(int cpu) 96 96 { 97 - if (cpu >= NR_CPUS) 97 + if (cpu >= nr_cpu_ids) 98 98 return BAD_APICID; 99 99 return cpu_physical_id(cpu); 100 100 } ··· 119 119 } 120 120 121 121 /* As we are using single CPU as destination, pick only one CPU here */ 122 - static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 122 + static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) 123 123 { 124 124 int cpu; 125 125 int apicid; 126 126 127 - cpu = first_cpu(cpumask); 127 + cpu = first_cpu(*cpumask); 128 128 apicid = cpu_to_logical_apicid(cpu); 129 129 return apicid; 130 + } 131 + 132 + static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, 133 + const struct cpumask *andmask) 134 + { 135 + int cpu; 136 + 137 + /* 138 + * We're using fixed IRQ delivery, can only return one phys APIC ID. 139 + * May as well be the first. 140 + */ 141 + for_each_cpu_and(cpu, cpumask, andmask) 142 + if (cpumask_test_cpu(cpu, cpu_online_mask)) 143 + break; 144 + if (cpu < nr_cpu_ids) 145 + return cpu_to_logical_apicid(cpu); 146 + 147 + return BAD_APICID; 130 148 } 131 149 132 150 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
+5 -8
arch/x86/include/asm/bigsmp/ipi.h
··· 1 1 #ifndef __ASM_MACH_IPI_H 2 2 #define __ASM_MACH_IPI_H 3 3 4 - void send_IPI_mask_sequence(cpumask_t mask, int vector); 4 + void send_IPI_mask_sequence(const struct cpumask *mask, int vector); 5 + void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); 5 6 6 - static inline void send_IPI_mask(cpumask_t mask, int vector) 7 + static inline void send_IPI_mask(const struct cpumask *mask, int vector) 7 8 { 8 9 send_IPI_mask_sequence(mask, vector); 9 10 } 10 11 11 12 static inline void send_IPI_allbutself(int vector) 12 13 { 13 - cpumask_t mask = cpu_online_map; 14 - cpu_clear(smp_processor_id(), mask); 15 - 16 - if (!cpus_empty(mask)) 17 - send_IPI_mask(mask, vector); 14 + send_IPI_mask_allbutself(cpu_online_mask, vector); 18 15 } 19 16 20 17 static inline void send_IPI_all(int vector) 21 18 { 22 - send_IPI_mask(cpu_online_map, vector); 19 + send_IPI_mask(cpu_online_mask, vector); 23 20 } 24 21 25 22 #endif /* __ASM_MACH_IPI_H */
+4 -6
arch/x86/include/asm/desc.h
··· 320 320 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); 321 321 } 322 322 323 - #define SYS_VECTOR_FREE 0 324 - #define SYS_VECTOR_ALLOCED 1 325 - 326 323 extern int first_system_vector; 327 - extern char system_vectors[]; 324 + /* used_vectors is BITMAP for irq is not managed by percpu vector_irq */ 325 + extern unsigned long used_vectors[]; 328 326 329 327 static inline void alloc_system_vector(int vector) 330 328 { 331 - if (system_vectors[vector] == SYS_VECTOR_FREE) { 332 - system_vectors[vector] = SYS_VECTOR_ALLOCED; 329 + if (!test_bit(vector, used_vectors)) { 330 + set_bit(vector, used_vectors); 333 331 if (first_system_vector > vector) 334 332 first_system_vector = vector; 335 333 } else
+64 -18
arch/x86/include/asm/es7000/apic.h
··· 9 9 return (1); 10 10 } 11 11 12 - static inline cpumask_t target_cpus_cluster(void) 12 + static inline const cpumask_t *target_cpus_cluster(void) 13 13 { 14 - return CPU_MASK_ALL; 14 + return &CPU_MASK_ALL; 15 15 } 16 16 17 - static inline cpumask_t target_cpus(void) 17 + static inline const cpumask_t *target_cpus(void) 18 18 { 19 - return cpumask_of_cpu(smp_processor_id()); 19 + return &cpumask_of_cpu(smp_processor_id()); 20 20 } 21 21 22 22 #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) ··· 80 80 static inline void setup_apic_routing(void) 81 81 { 82 82 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); 83 - printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 83 + printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 84 84 (apic_version[apic] == 0x14) ? 85 - "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); 85 + "Physical Cluster" : "Logical Cluster", 86 + nr_ioapics, cpus_addr(*target_cpus())[0]); 86 87 } 87 88 88 89 static inline int multi_timer_check(int apic, int irq) ··· 101 100 { 102 101 if (!mps_cpu) 103 102 return boot_cpu_physical_apicid; 104 - else if (mps_cpu < NR_CPUS) 103 + else if (mps_cpu < nr_cpu_ids) 105 104 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 106 105 else 107 106 return BAD_APICID; ··· 121 120 static inline int cpu_to_logical_apicid(int cpu) 122 121 { 123 122 #ifdef CONFIG_SMP 124 - if (cpu >= NR_CPUS) 125 - return BAD_APICID; 126 - return (int)cpu_2_logical_apicid[cpu]; 123 + if (cpu >= nr_cpu_ids) 124 + return BAD_APICID; 125 + return (int)cpu_2_logical_apicid[cpu]; 127 126 #else 128 127 return logical_smp_processor_id(); 129 128 #endif ··· 147 146 return (1); 148 147 } 149 148 150 - static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) 149 + static inline unsigned int 150 + cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) 151 151 { 152 152 int num_bits_set; 153 153 int cpus_found = 0; 154 154 int cpu; 155 155 int apicid; 156 156 157 - num_bits_set = cpus_weight(cpumask); 157 + num_bits_set = cpumask_weight(cpumask); 158 158 /* Return id to all */ 159 159 if (num_bits_set == NR_CPUS) 160 160 return 0xFF; ··· 163 161 * The cpus in the mask must all be on the apic cluster. If are not 164 162 * on the same apicid cluster return default value of TARGET_CPUS. 165 163 */ 166 - cpu = first_cpu(cpumask); 164 + cpu = cpumask_first(cpumask); 167 165 apicid = cpu_to_logical_apicid(cpu); 168 166 while (cpus_found < num_bits_set) { 169 - if (cpu_isset(cpu, cpumask)) { 167 + if (cpumask_test_cpu(cpu, cpumask)) { 170 168 int new_apicid = cpu_to_logical_apicid(cpu); 171 169 if (apicid_cluster(apicid) != 172 170 apicid_cluster(new_apicid)){ ··· 181 179 return apicid; 182 180 } 183 181 184 - static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 182 + static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) 185 183 { 186 184 int num_bits_set; 187 185 int cpus_found = 0; 188 186 int cpu; 189 187 int apicid; 190 188 191 - num_bits_set = cpus_weight(cpumask); 189 + num_bits_set = cpus_weight(*cpumask); 192 190 /* Return id to all */ 193 191 if (num_bits_set == NR_CPUS) 194 192 return cpu_to_logical_apicid(0); ··· 196 194 * The cpus in the mask must all be on the apic cluster. If are not 197 195 * on the same apicid cluster return default value of TARGET_CPUS. 198 196 */ 199 - cpu = first_cpu(cpumask); 197 + cpu = first_cpu(*cpumask); 200 198 apicid = cpu_to_logical_apicid(cpu); 201 199 while (cpus_found < num_bits_set) { 202 - if (cpu_isset(cpu, cpumask)) { 200 + if (cpu_isset(cpu, *cpumask)) { 203 201 int new_apicid = cpu_to_logical_apicid(cpu); 204 202 if (apicid_cluster(apicid) != 205 203 apicid_cluster(new_apicid)){ ··· 211 209 } 212 210 cpu++; 213 211 } 212 + return apicid; 213 + } 214 + 215 + 216 + static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 217 + const struct cpumask *andmask) 218 + { 219 + int num_bits_set; 220 + int cpus_found = 0; 221 + int cpu; 222 + int apicid = cpu_to_logical_apicid(0); 223 + cpumask_var_t cpumask; 224 + 225 + if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 226 + return apicid; 227 + 228 + cpumask_and(cpumask, inmask, andmask); 229 + cpumask_and(cpumask, cpumask, cpu_online_mask); 230 + 231 + num_bits_set = cpumask_weight(cpumask); 232 + /* Return id to all */ 233 + if (num_bits_set == NR_CPUS) 234 + goto exit; 235 + /* 236 + * The cpus in the mask must all be on the apic cluster. If are not 237 + * on the same apicid cluster return default value of TARGET_CPUS. 238 + */ 239 + cpu = cpumask_first(cpumask); 240 + apicid = cpu_to_logical_apicid(cpu); 241 + while (cpus_found < num_bits_set) { 242 + if (cpumask_test_cpu(cpu, cpumask)) { 243 + int new_apicid = cpu_to_logical_apicid(cpu); 244 + if (apicid_cluster(apicid) != 245 + apicid_cluster(new_apicid)){ 246 + printk ("%s: Not a valid mask!\n", __func__); 247 + return cpu_to_logical_apicid(0); 248 + } 249 + apicid = new_apicid; 250 + cpus_found++; 251 + } 252 + cpu++; 253 + } 254 + exit: 255 + free_cpumask_var(cpumask); 214 256 return apicid; 215 257 } 216 258
+5 -7
arch/x86/include/asm/es7000/ipi.h
··· 1 1 #ifndef __ASM_ES7000_IPI_H 2 2 #define __ASM_ES7000_IPI_H 3 3 4 - void send_IPI_mask_sequence(cpumask_t mask, int vector); 4 + void send_IPI_mask_sequence(const struct cpumask *mask, int vector); 5 + void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); 5 6 6 - static inline void send_IPI_mask(cpumask_t mask, int vector) 7 + static inline void send_IPI_mask(const struct cpumask *mask, int vector) 7 8 { 8 9 send_IPI_mask_sequence(mask, vector); 9 10 } 10 11 11 12 static inline void send_IPI_allbutself(int vector) 12 13 { 13 - cpumask_t mask = cpu_online_map; 14 - cpu_clear(smp_processor_id(), mask); 15 - if (!cpus_empty(mask)) 16 - send_IPI_mask(mask, vector); 14 + send_IPI_mask_allbutself(cpu_online_mask, vector); 17 15 } 18 16 19 17 static inline void send_IPI_all(int vector) 20 18 { 21 - send_IPI_mask(cpu_online_map, vector); 19 + send_IPI_mask(cpu_online_mask, vector); 22 20 } 23 21 24 22 #endif /* __ASM_ES7000_IPI_H */
+9 -4
arch/x86/include/asm/genapic_32.h
··· 24 24 int (*probe)(void); 25 25 26 26 int (*apic_id_registered)(void); 27 - cpumask_t (*target_cpus)(void); 27 + const struct cpumask *(*target_cpus)(void); 28 28 int int_delivery_mode; 29 29 int int_dest_mode; 30 30 int ESR_DISABLE; ··· 57 57 58 58 unsigned (*get_apic_id)(unsigned long x); 59 59 unsigned long apic_id_mask; 60 - unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 61 - cpumask_t (*vector_allocation_domain)(int cpu); 60 + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); 61 + unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, 62 + const struct cpumask *andmask); 63 + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); 62 64 63 65 #ifdef CONFIG_SMP 64 66 /* ipi */ 65 - void (*send_IPI_mask)(cpumask_t mask, int vector); 67 + void (*send_IPI_mask)(const struct cpumask *mask, int vector); 68 + void (*send_IPI_mask_allbutself)(const struct cpumask *mask, 69 + int vector); 66 70 void (*send_IPI_allbutself)(int vector); 67 71 void (*send_IPI_all)(int vector); 68 72 #endif ··· 118 114 APICFUNC(get_apic_id) \ 119 115 .apic_id_mask = APIC_ID_MASK, \ 120 116 APICFUNC(cpu_mask_to_apicid) \ 117 + APICFUNC(cpu_mask_to_apicid_and) \ 121 118 APICFUNC(vector_allocation_domain) \ 122 119 APICFUNC(acpi_madt_oem_check) \ 123 120 IPIFUNC(send_IPI_mask) \
+10 -4
arch/x86/include/asm/genapic_64.h
··· 1 1 #ifndef _ASM_X86_GENAPIC_64_H 2 2 #define _ASM_X86_GENAPIC_64_H 3 3 4 + #include <linux/cpumask.h> 5 + 4 6 /* 5 7 * Copyright 2004 James Cleverdon, IBM. 6 8 * Subject to the GNU Public License, v.2 ··· 20 18 u32 int_delivery_mode; 21 19 u32 int_dest_mode; 22 20 int (*apic_id_registered)(void); 23 - cpumask_t (*target_cpus)(void); 24 - cpumask_t (*vector_allocation_domain)(int cpu); 21 + const struct cpumask *(*target_cpus)(void); 22 + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); 25 23 void (*init_apic_ldr)(void); 26 24 /* ipi */ 27 - void (*send_IPI_mask)(cpumask_t mask, int vector); 25 + void (*send_IPI_mask)(const struct cpumask *mask, int vector); 26 + void (*send_IPI_mask_allbutself)(const struct cpumask *mask, 27 + int vector); 28 28 void (*send_IPI_allbutself)(int vector); 29 29 void (*send_IPI_all)(int vector); 30 30 void (*send_IPI_self)(int vector); 31 31 /* */ 32 - unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 32 + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); 33 + unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, 34 + const struct cpumask *andmask); 33 35 unsigned int (*phys_pkg_id)(int index_msb); 34 36 unsigned int (*get_apic_id)(unsigned long x); 35 37 unsigned long (*set_apic_id)(unsigned int id);
+21 -2
arch/x86/include/asm/ipi.h
··· 117 117 native_apic_mem_write(APIC_ICR, cfg); 118 118 } 119 119 120 - static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) 120 + static inline void send_IPI_mask_sequence(const struct cpumask *mask, 121 + int vector) 121 122 { 122 123 unsigned long flags; 123 124 unsigned long query_cpu; ··· 129 128 * - mbligh 130 129 */ 131 130 local_irq_save(flags); 132 - for_each_cpu_mask_nr(query_cpu, mask) { 131 + for_each_cpu(query_cpu, mask) { 133 132 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 134 133 vector, APIC_DEST_PHYSICAL); 135 134 } 135 + local_irq_restore(flags); 136 + } 137 + 138 + static inline void send_IPI_mask_allbutself(const struct cpumask *mask, 139 + int vector) 140 + { 141 + unsigned long flags; 142 + unsigned int query_cpu; 143 + unsigned int this_cpu = smp_processor_id(); 144 + 145 + /* See Hack comment above */ 146 + 147 + local_irq_save(flags); 148 + for_each_cpu(query_cpu, mask) 149 + if (query_cpu != this_cpu) 150 + __send_IPI_dest_field( 151 + per_cpu(x86_cpu_to_apicid, query_cpu), 152 + vector, APIC_DEST_PHYSICAL); 136 153 local_irq_restore(flags); 137 154 } 138 155
+2 -1
arch/x86/include/asm/irq.h
··· 33 33 34 34 #ifdef CONFIG_HOTPLUG_CPU 35 35 #include <linux/cpumask.h> 36 - extern void fixup_irqs(cpumask_t map); 36 + extern void fixup_irqs(void); 37 37 #endif 38 38 39 39 extern unsigned int do_IRQ(struct pt_regs *regs); ··· 42 42 43 43 /* Interrupt vector management */ 44 44 extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 45 + extern int vector_used_by_percpu_irq(unsigned int vector); 45 46 46 47 #endif /* _ASM_X86_IRQ_H */
+19 -9
arch/x86/include/asm/mach-default/mach_apic.h
··· 8 8 9 9 #define APIC_DFR_VALUE (APIC_DFR_FLAT) 10 10 11 - static inline cpumask_t target_cpus(void) 11 + static inline const struct cpumask *target_cpus(void) 12 12 { 13 13 #ifdef CONFIG_SMP 14 - return cpu_online_map; 14 + return cpu_online_mask; 15 15 #else 16 - return cpumask_of_cpu(0); 16 + return cpumask_of(0); 17 17 #endif 18 18 } 19 19 ··· 28 28 #define apic_id_registered (genapic->apic_id_registered) 29 29 #define init_apic_ldr (genapic->init_apic_ldr) 30 30 #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) 31 + #define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) 31 32 #define phys_pkg_id (genapic->phys_pkg_id) 32 33 #define vector_allocation_domain (genapic->vector_allocation_domain) 33 34 #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) ··· 62 61 return physid_isset(read_apic_id(), phys_cpu_present_map); 63 62 } 64 63 65 - static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 64 + static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask) 66 65 { 67 - return cpus_addr(cpumask)[0]; 66 + return cpumask_bits(cpumask)[0]; 67 + } 68 + 69 + static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, 70 + const struct cpumask *andmask) 71 + { 72 + unsigned long mask1 = cpumask_bits(cpumask)[0]; 73 + unsigned long mask2 = cpumask_bits(andmask)[0]; 74 + unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; 75 + 76 + return (unsigned int)(mask1 & mask2 & mask3); 68 77 } 69 78 70 79 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) ··· 99 88 #endif 100 89 } 101 90 102 - static inline cpumask_t vector_allocation_domain(int cpu) 91 + static inline void vector_allocation_domain(int cpu, struct cpumask *retmask) 103 92 { 104 93 /* Careful. Some cpus do not strictly honor the set of cpus 105 94 * specified in the interrupt destination when using lowest ··· 109 98 * deliver interrupts to the wrong hyperthread when only one 110 99 * hyperthread was specified in the interrupt desitination. 111 100 */ 112 - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 113 - return domain; 101 + *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; 114 102 } 115 103 #endif 116 104 ··· 141 131 142 132 static inline int cpu_present_to_apicid(int mps_cpu) 143 133 { 144 - if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) 134 + if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) 145 135 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 146 136 else 147 137 return BAD_APICID;
+9 -9
arch/x86/include/asm/mach-default/mach_ipi.h
··· 4 4 /* Avoid include hell */ 5 5 #define NMI_VECTOR 0x02 6 6 7 - void send_IPI_mask_bitmask(cpumask_t mask, int vector); 7 + void send_IPI_mask_bitmask(const struct cpumask *mask, int vector); 8 + void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); 8 9 void __send_IPI_shortcut(unsigned int shortcut, int vector); 9 10 10 11 extern int no_broadcast; ··· 13 12 #ifdef CONFIG_X86_64 14 13 #include <asm/genapic.h> 15 14 #define send_IPI_mask (genapic->send_IPI_mask) 15 + #define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself) 16 16 #else 17 - static inline void send_IPI_mask(cpumask_t mask, int vector) 17 + static inline void send_IPI_mask(const struct cpumask *mask, int vector) 18 18 { 19 19 send_IPI_mask_bitmask(mask, vector); 20 20 } 21 + void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); 21 22 #endif 22 23 23 24 static inline void __local_send_IPI_allbutself(int vector) 24 25 { 25 - if (no_broadcast || vector == NMI_VECTOR) { 26 - cpumask_t mask = cpu_online_map; 27 - 28 - cpu_clear(smp_processor_id(), mask); 29 - send_IPI_mask(mask, vector); 30 - } else 26 + if (no_broadcast || vector == NMI_VECTOR) 27 + send_IPI_mask_allbutself(cpu_online_mask, vector); 28 + else 31 29 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); 32 30 } 33 31 34 32 static inline void __local_send_IPI_all(int vector) 35 33 { 36 34 if (no_broadcast || vector == NMI_VECTOR) 37 - send_IPI_mask(cpu_online_map, vector); 35 + send_IPI_mask(cpu_online_mask, vector); 38 36 else 39 37 __send_IPI_shortcut(APIC_DEST_ALLINC, vector); 40 38 }
+1
arch/x86/include/asm/mach-generic/mach_apic.h
··· 24 24 #define check_phys_apicid_present (genapic->check_phys_apicid_present) 25 25 #define check_apicid_used (genapic->check_apicid_used) 26 26 #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) 27 + #define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) 27 28 #define vector_allocation_domain (genapic->vector_allocation_domain) 28 29 #define enable_apic_mode (genapic->enable_apic_mode) 29 30 #define phys_pkg_id (genapic->phys_pkg_id)
+9 -3
arch/x86/include/asm/numaq/apic.h
··· 7 7 8 8 #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 9 9 10 - static inline cpumask_t target_cpus(void) 10 + static inline const cpumask_t *target_cpus(void) 11 11 { 12 - return CPU_MASK_ALL; 12 + return &CPU_MASK_ALL; 13 13 } 14 14 15 15 #define NO_BALANCE_IRQ (1) ··· 122 122 * We use physical apicids here, not logical, so just return the default 123 123 * physical broadcast to stop people from breaking us 124 124 */ 125 - static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 125 + static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) 126 + { 127 + return (int) 0xF; 128 + } 129 + 130 + static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, 131 + const struct cpumask *andmask) 126 132 { 127 133 return (int) 0xF; 128 134 }
+5 -8
arch/x86/include/asm/numaq/ipi.h
··· 1 1 #ifndef __ASM_NUMAQ_IPI_H 2 2 #define __ASM_NUMAQ_IPI_H 3 3 4 - void send_IPI_mask_sequence(cpumask_t, int vector); 4 + void send_IPI_mask_sequence(const struct cpumask *mask, int vector); 5 + void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); 5 6 6 - static inline void send_IPI_mask(cpumask_t mask, int vector) 7 + static inline void send_IPI_mask(const struct cpumask *mask, int vector) 7 8 { 8 9 send_IPI_mask_sequence(mask, vector); 9 10 } 10 11 11 12 static inline void send_IPI_allbutself(int vector) 12 13 { 13 - cpumask_t mask = cpu_online_map; 14 - cpu_clear(smp_processor_id(), mask); 15 - 16 - if (!cpus_empty(mask)) 17 - send_IPI_mask(mask, vector); 14 + send_IPI_mask_allbutself(cpu_online_mask, vector); 18 15 } 19 16 20 17 static inline void send_IPI_all(int vector) 21 18 { 22 - send_IPI_mask(cpu_online_map, vector); 19 + send_IPI_mask(cpu_online_mask, vector); 23 20 } 24 21 25 22 #endif /* __ASM_NUMAQ_IPI_H */
+3 -3
arch/x86/include/asm/smp.h
··· 60 60 void (*cpu_die)(unsigned int cpu); 61 61 void (*play_dead)(void); 62 62 63 - void (*send_call_func_ipi)(cpumask_t mask); 63 + void (*send_call_func_ipi)(const struct cpumask *mask); 64 64 void (*send_call_func_single_ipi)(int cpu); 65 65 }; 66 66 ··· 125 125 126 126 static inline void arch_send_call_function_ipi(cpumask_t mask) 127 127 { 128 - smp_ops.send_call_func_ipi(mask); 128 + smp_ops.send_call_func_ipi(&mask); 129 129 } 130 130 131 131 void cpu_disable_common(void); ··· 138 138 void native_play_dead(void); 139 139 void play_dead_common(void); 140 140 141 - void native_send_call_func_ipi(cpumask_t mask); 141 + void native_send_call_func_ipi(const struct cpumask *mask); 142 142 void native_send_call_func_single_ipi(int cpu); 143 143 144 144 extern void prefill_possible_map(void);
+49 -6
arch/x86/include/asm/summit/apic.h
··· 14 14 15 15 #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 16 16 17 - static inline cpumask_t target_cpus(void) 17 + static inline const cpumask_t *target_cpus(void) 18 18 { 19 19 /* CPU_MASK_ALL (0xff) has undefined behaviour with 20 20 * dest_LowestPrio mode logical clustered apic interrupt routing 21 21 * Just start on cpu 0. IRQ balancing will spread load 22 22 */ 23 - return cpumask_of_cpu(0); 23 + return &cpumask_of_cpu(0); 24 24 } 25 25 26 26 #define INT_DELIVERY_MODE (dest_LowestPrio) ··· 137 137 { 138 138 } 139 139 140 - static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 140 + static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) 141 141 { 142 142 int num_bits_set; 143 143 int cpus_found = 0; 144 144 int cpu; 145 145 int apicid; 146 146 147 - num_bits_set = cpus_weight(cpumask); 147 + num_bits_set = cpus_weight(*cpumask); 148 148 /* Return id to all */ 149 149 if (num_bits_set == NR_CPUS) 150 150 return (int) 0xFF; ··· 152 152 * The cpus in the mask must all be on the apic cluster. If are not 153 153 * on the same apicid cluster return default value of TARGET_CPUS. 154 154 */ 155 - cpu = first_cpu(cpumask); 155 + cpu = first_cpu(*cpumask); 156 156 apicid = cpu_to_logical_apicid(cpu); 157 157 while (cpus_found < num_bits_set) { 158 - if (cpu_isset(cpu, cpumask)) { 158 + if (cpu_isset(cpu, *cpumask)) { 159 159 int new_apicid = cpu_to_logical_apicid(cpu); 160 160 if (apicid_cluster(apicid) != 161 161 apicid_cluster(new_apicid)){ ··· 167 167 } 168 168 cpu++; 169 169 } 170 + return apicid; 171 + } 172 + 173 + static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, 174 + const struct cpumask *andmask) 175 + { 176 + int num_bits_set; 177 + int cpus_found = 0; 178 + int cpu; 179 + int apicid = 0xFF; 180 + cpumask_var_t cpumask; 181 + 182 + if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 183 + return (int) 0xFF; 184 + 185 + cpumask_and(cpumask, inmask, andmask); 186 + cpumask_and(cpumask, cpumask, cpu_online_mask); 187 + 188 + num_bits_set = cpumask_weight(cpumask); 189 + /* Return id to all */ 190 + if (num_bits_set == nr_cpu_ids) 191 + goto exit; 192 + /* 193 + * The cpus in the mask must all be on the apic cluster. If are not 194 + * on the same apicid cluster return default value of TARGET_CPUS. 195 + */ 196 + cpu = cpumask_first(cpumask); 197 + apicid = cpu_to_logical_apicid(cpu); 198 + while (cpus_found < num_bits_set) { 199 + if (cpumask_test_cpu(cpu, cpumask)) { 200 + int new_apicid = cpu_to_logical_apicid(cpu); 201 + if (apicid_cluster(apicid) != 202 + apicid_cluster(new_apicid)){ 203 + printk ("%s: Not a valid mask!\n", __func__); 204 + return 0xFF; 205 + } 206 + apicid = apicid | new_apicid; 207 + cpus_found++; 208 + } 209 + cpu++; 210 + } 211 + exit: 212 + free_cpumask_var(cpumask); 170 213 return apicid; 171 214 } 172 215
+5 -4
arch/x86/include/asm/summit/ipi.h
··· 1 1 #ifndef __ASM_SUMMIT_IPI_H 2 2 #define __ASM_SUMMIT_IPI_H 3 3 4 - void send_IPI_mask_sequence(cpumask_t mask, int vector); 4 + void send_IPI_mask_sequence(const cpumask_t *mask, int vector); 5 + void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); 5 6 6 - static inline void send_IPI_mask(cpumask_t mask, int vector) 7 + static inline void send_IPI_mask(const cpumask_t *mask, int vector) 7 8 { 8 9 send_IPI_mask_sequence(mask, vector); 9 10 } ··· 15 14 cpu_clear(smp_processor_id(), mask); 16 15 17 16 if (!cpus_empty(mask)) 18 - send_IPI_mask(mask, vector); 17 + send_IPI_mask(&mask, vector); 19 18 } 20 19 21 20 static inline void send_IPI_all(int vector) 22 21 { 23 - send_IPI_mask(cpu_online_map, vector); 22 + send_IPI_mask(&cpu_online_map, vector); 24 23 } 25 24 26 25 #endif /* __ASM_SUMMIT_IPI_H */
+2
arch/x86/include/asm/topology.h
··· 226 226 #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 227 227 #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 228 228 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 229 + #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) 230 + #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 229 231 230 232 /* indicates that pointers to the topology cpumask_t maps are valid */ 231 233 #define arch_provides_topology_pointers yes
+18 -16
arch/x86/kernel/apic.c
··· 119 119 120 120 int first_system_vector = 0xfe; 121 121 122 - char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; 123 - 124 122 /* 125 123 * Debug level, exported for io_apic.c 126 124 */ ··· 140 142 struct clock_event_device *evt); 141 143 static void lapic_timer_setup(enum clock_event_mode mode, 142 144 struct clock_event_device *evt); 143 - static void lapic_timer_broadcast(cpumask_t mask); 145 + static void lapic_timer_broadcast(const cpumask_t *mask); 144 146 static void apic_pm_activate(void); 145 147 146 148 /* ··· 453 455 /* 454 456 * Local APIC timer broadcast function 455 457 */ 456 - static void lapic_timer_broadcast(cpumask_t mask) 458 + static void lapic_timer_broadcast(const cpumask_t *mask) 457 459 { 458 460 #ifdef CONFIG_SMP 459 461 send_IPI_mask(mask, LOCAL_TIMER_VECTOR); ··· 469 471 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 470 472 471 473 memcpy(levt, &lapic_clockevent, sizeof(*levt)); 472 - levt->cpumask = cpumask_of_cpu(smp_processor_id()); 474 + levt->cpumask = cpumask_of(smp_processor_id()); 473 475 474 476 clockevents_register_device(levt); 475 477 } ··· 1805 1807 void __cpuinit generic_processor_info(int apicid, int version) 1806 1808 { 1807 1809 int cpu; 1808 - cpumask_t tmp_map; 1809 1810 1810 1811 /* 1811 1812 * Validate version 1812 1813 */ 1813 1814 if (version == 0x0) { 1814 1815 pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " 1815 - "fixing up to 0x10. (tell your hw vendor)\n", 1816 - version); 1816 + "fixing up to 0x10. (tell your hw vendor)\n", 1817 + version); 1817 1818 version = 0x10; 1818 1819 } 1819 1820 apic_version[apicid] = version; 1820 1821 1821 - if (num_processors >= NR_CPUS) { 1822 - pr_warning("WARNING: NR_CPUS limit of %i reached." 1823 - " Processor ignored.\n", NR_CPUS); 1822 + if (num_processors >= nr_cpu_ids) { 1823 + int max = nr_cpu_ids; 1824 + int thiscpu = max + disabled_cpus; 1825 + 1826 + pr_warning( 1827 + "ACPI: NR_CPUS/possible_cpus limit of %i reached." 1828 + " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); 1829 + 1830 + disabled_cpus++; 1824 1831 return; 1825 1832 } 1826 1833 1827 1834 num_processors++; 1828 - cpus_complement(tmp_map, cpu_present_map); 1829 - cpu = first_cpu(tmp_map); 1835 + cpu = cpumask_next_zero(-1, cpu_present_mask); 1830 1836 1831 1837 physid_set(apicid, phys_cpu_present_map); 1832 1838 if (apicid == boot_cpu_physical_apicid) { ··· 1880 1878 } 1881 1879 #endif 1882 1880 1883 - cpu_set(cpu, cpu_possible_map); 1884 - cpu_set(cpu, cpu_present_map); 1881 + set_cpu_possible(cpu, true); 1882 + set_cpu_present(cpu, true); 1885 1883 } 1886 1884 1887 1885 #ifdef CONFIG_X86_64 ··· 2083 2081 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); 2084 2082 bitmap_zero(clustermap, NUM_APIC_CLUSTERS); 2085 2083 2086 - for (i = 0; i < NR_CPUS; i++) { 2084 + for (i = 0; i < nr_cpu_ids; i++) { 2087 2085 /* are we being called early in kernel startup? */ 2088 2086 if (bios_cpu_apicid) { 2089 2087 id = bios_cpu_apicid[i];
+23 -26
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 534 534 per_cpu(cpuid4_info, cpu) = NULL; 535 535 } 536 536 537 + static void get_cpu_leaves(void *_retval) 538 + { 539 + int j, *retval = _retval, cpu = smp_processor_id(); 540 + 541 + /* Do cpuid and store the results */ 542 + for (j = 0; j < num_cache_leaves; j++) { 543 + struct _cpuid4_info *this_leaf; 544 + this_leaf = CPUID4_INFO_IDX(cpu, j); 545 + *retval = cpuid4_cache_lookup(j, this_leaf); 546 + if (unlikely(*retval < 0)) { 547 + int i; 548 + 549 + for (i = 0; i < j; i++) 550 + cache_remove_shared_cpu_map(cpu, i); 551 + break; 552 + } 553 + cache_shared_cpu_map_setup(cpu, j); 554 + } 555 + } 556 + 537 557 static int __cpuinit detect_cache_attributes(unsigned int cpu) 538 558 { 539 - struct _cpuid4_info *this_leaf; 540 - unsigned long j; 541 559 int retval; 542 - cpumask_t oldmask; 543 560 544 561 if (num_cache_leaves == 0) 545 562 return -ENOENT; ··· 566 549 if (per_cpu(cpuid4_info, cpu) == NULL) 567 550 return -ENOMEM; 568 551 569 - oldmask = current->cpus_allowed; 570 - retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 571 - if (retval) 572 - goto out; 573 - 574 - /* Do cpuid and store the results */ 575 - for (j = 0; j < num_cache_leaves; j++) { 576 - this_leaf = CPUID4_INFO_IDX(cpu, j); 577 - retval = cpuid4_cache_lookup(j, this_leaf); 578 - if (unlikely(retval < 0)) { 579 - int i; 580 - 581 - for (i = 0; i < j; i++) 582 - cache_remove_shared_cpu_map(cpu, i); 583 - break; 584 - } 585 - cache_shared_cpu_map_setup(cpu, j); 586 - } 587 - set_cpus_allowed_ptr(current, &oldmask); 588 - 589 - out: 552 + smp_call_function_single(cpu, get_cpu_leaves, &retval, true); 590 553 if (retval) { 591 554 kfree(per_cpu(cpuid4_info, cpu)); 592 555 per_cpu(cpuid4_info, cpu) = NULL; ··· 623 626 cpumask_t *mask = &this_leaf->shared_cpu_map; 624 627 625 628 n = type? 626 - cpulist_scnprintf(buf, len-2, *mask): 627 - cpumask_scnprintf(buf, len-2, *mask); 629 + cpulist_scnprintf(buf, len-2, mask) : 630 + cpumask_scnprintf(buf, len-2, mask); 628 631 buf[n++] = '\n'; 629 632 buf[n] = '\0'; 630 633 }
+55 -53
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
··· 83 83 * CPU Initialization 84 84 */ 85 85 86 + struct thresh_restart { 87 + struct threshold_block *b; 88 + int reset; 89 + u16 old_limit; 90 + }; 91 + 86 92 /* must be called with correct cpu affinity */ 87 - static void threshold_restart_bank(struct threshold_block *b, 88 - int reset, u16 old_limit) 93 + static long threshold_restart_bank(void *_tr) 89 94 { 95 + struct thresh_restart *tr = _tr; 90 96 u32 mci_misc_hi, mci_misc_lo; 91 97 92 - rdmsr(b->address, mci_misc_lo, mci_misc_hi); 98 + rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi); 93 99 94 - if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) 95 - reset = 1; /* limit cannot be lower than err count */ 100 + if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) 101 + tr->reset = 1; /* limit cannot be lower than err count */ 96 102 97 - if (reset) { /* reset err count and overflow bit */ 103 + if (tr->reset) { /* reset err count and overflow bit */ 98 104 mci_misc_hi = 99 105 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | 100 - (THRESHOLD_MAX - b->threshold_limit); 101 - } else if (old_limit) { /* change limit w/o reset */ 106 + (THRESHOLD_MAX - tr->b->threshold_limit); 107 + } else if (tr->old_limit) { /* change limit w/o reset */ 102 108 int new_count = (mci_misc_hi & THRESHOLD_MAX) + 103 - (old_limit - b->threshold_limit); 109 + (tr->old_limit - tr->b->threshold_limit); 104 110 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | 105 111 (new_count & THRESHOLD_MAX); 106 112 } 107 113 108 - b->interrupt_enable ? 114 + tr->b->interrupt_enable ? 109 115 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : 110 116 (mci_misc_hi &= ~MASK_INT_TYPE_HI); 111 117 112 118 mci_misc_hi |= MASK_COUNT_EN_HI; 113 - wrmsr(b->address, mci_misc_lo, mci_misc_hi); 119 + wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); 120 + return 0; 114 121 } 115 122 116 123 /* cpu init entry point, called from mce.c with preempt off */ ··· 127 120 unsigned int cpu = smp_processor_id(); 128 121 u8 lvt_off; 129 122 u32 low = 0, high = 0, address = 0; 123 + struct thresh_restart tr; 130 124 131 125 for (bank = 0; bank < NR_BANKS; ++bank) { 132 126 for (block = 0; block < NR_BLOCKS; ++block) { ··· 170 162 wrmsr(address, low, high); 171 163 172 164 threshold_defaults.address = address; 173 - threshold_restart_bank(&threshold_defaults, 0, 0); 165 + tr.b = &threshold_defaults; 166 + tr.reset = 0; 167 + tr.old_limit = 0; 168 + threshold_restart_bank(&tr); 174 169 } 175 170 } 176 171 } ··· 262 251 ssize_t(*store) (struct threshold_block *, const char *, size_t count); 263 252 }; 264 253 265 - static void affinity_set(unsigned int cpu, cpumask_t *oldmask, 266 - cpumask_t *newmask) 267 - { 268 - *oldmask = current->cpus_allowed; 269 - cpus_clear(*newmask); 270 - cpu_set(cpu, *newmask); 271 - set_cpus_allowed_ptr(current, newmask); 272 - } 273 - 274 - static void affinity_restore(const cpumask_t *oldmask) 275 - { 276 - set_cpus_allowed_ptr(current, oldmask); 277 - } 278 - 279 254 #define SHOW_FIELDS(name) \ 280 255 static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ 281 256 { \ ··· 274 277 const char *buf, size_t count) 275 278 { 276 279 char *end; 277 - cpumask_t oldmask, newmask; 280 + struct thresh_restart tr; 278 281 unsigned long new = simple_strtoul(buf, &end, 0); 279 282 if (end == buf) 280 283 return -EINVAL; 281 284 b->interrupt_enable = !!new; 282 285 283 - affinity_set(b->cpu, &oldmask, &newmask); 284 - threshold_restart_bank(b, 0, 0); 285 - affinity_restore(&oldmask); 286 + tr.b = b; 287 + tr.reset = 0; 288 + tr.old_limit = 0; 289 + work_on_cpu(b->cpu, threshold_restart_bank, &tr); 286 290 287 291 return end - buf; 288 292 } ··· 292 294 const char *buf, size_t count) 293 295 { 294 296 char *end; 295 - cpumask_t oldmask, newmask; 296 - u16 old; 297 + struct thresh_restart tr; 297 298 unsigned long new = simple_strtoul(buf, &end, 0); 298 299 if (end == buf) 299 300 return -EINVAL; ··· 300 303 new = THRESHOLD_MAX; 301 304 if (new < 1) 302 305 new = 1; 303 - old = b->threshold_limit; 306 + tr.old_limit = b->threshold_limit; 304 307 b->threshold_limit = new; 308 + tr.b = b; 309 + tr.reset = 0; 305 310 306 - affinity_set(b->cpu, &oldmask, &newmask); 307 - threshold_restart_bank(b, 0, old); 308 - affinity_restore(&oldmask); 311 + work_on_cpu(b->cpu, threshold_restart_bank, &tr); 309 312 310 313 return end - buf; 311 314 } 312 315 316 + static long local_error_count(void *_b) 317 + { 318 + struct threshold_block *b = _b; 319 + u32 low, high; 320 + 321 + rdmsr(b->address, low, high); 322 + return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit); 323 + } 324 + 313 325 static ssize_t show_error_count(struct threshold_block *b, char *buf) 314 326 { 315 - u32 high, low; 316 - cpumask_t oldmask, newmask; 317 - affinity_set(b->cpu, &oldmask, &newmask); 318 - rdmsr(b->address, low, high); 319 - affinity_restore(&oldmask); 320 - return sprintf(buf, "%x\n", 321 - (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); 327 + return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b)); 322 328 } 323 329 324 330 static ssize_t store_error_count(struct threshold_block *b, 325 331 const char *buf, size_t count) 326 332 { 327 - cpumask_t oldmask, newmask; 328 - affinity_set(b->cpu, &oldmask, &newmask); 329 - threshold_restart_bank(b, 1, 0); 330 - affinity_restore(&oldmask); 333 + struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 }; 334 + 335 + work_on_cpu(b->cpu, threshold_restart_bank, &tr); 331 336 return 1; 332 337 } 333 338 ··· 462 463 return err; 463 464 } 464 465 466 + static long local_allocate_threshold_blocks(void *_bank) 467 + { 468 + unsigned int *bank = _bank; 469 + 470 + return allocate_threshold_blocks(smp_processor_id(), *bank, 0, 471 + MSR_IA32_MC0_MISC + *bank * 4); 472 + } 473 + 465 474 /* symlinks sibling shared banks to first core. first core owns dir/files. */ 466 475 static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 467 476 { 468 477 int i, err = 0; 469 478 struct threshold_bank *b = NULL; 470 - cpumask_t oldmask, newmask; 471 479 char name[32]; 472 480 473 481 sprintf(name, "threshold_bank%i", bank); ··· 525 519 526 520 per_cpu(threshold_banks, cpu)[bank] = b; 527 521 528 - affinity_set(cpu, &oldmask, &newmask); 529 - err = allocate_threshold_blocks(cpu, bank, 0, 530 - MSR_IA32_MC0_MISC + bank * 4); 531 - affinity_restore(&oldmask); 532 - 522 + err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank); 533 523 if (err) 534 524 goto out_free; 535 525
+81 -26
arch/x86/kernel/genapic_flat_64.c
··· 30 30 return 1; 31 31 } 32 32 33 - static cpumask_t flat_target_cpus(void) 33 + static const struct cpumask *flat_target_cpus(void) 34 34 { 35 - return cpu_online_map; 35 + return cpu_online_mask; 36 36 } 37 37 38 - static cpumask_t flat_vector_allocation_domain(int cpu) 38 + static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) 39 39 { 40 40 /* Careful. Some cpus do not strictly honor the set of cpus 41 41 * specified in the interrupt destination when using lowest ··· 45 45 * deliver interrupts to the wrong hyperthread when only one 46 46 * hyperthread was specified in the interrupt desitination. 47 47 */ 48 - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 49 - return domain; 48 + cpumask_clear(retmask); 49 + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; 50 50 } 51 51 52 52 /* ··· 69 69 apic_write(APIC_LDR, val); 70 70 } 71 71 72 - static void flat_send_IPI_mask(cpumask_t cpumask, int vector) 72 + static inline void _flat_send_IPI_mask(unsigned long mask, int vector) 73 73 { 74 - unsigned long mask = cpus_addr(cpumask)[0]; 75 74 unsigned long flags; 76 75 77 76 local_irq_save(flags); ··· 78 79 local_irq_restore(flags); 79 80 } 80 81 82 + static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) 83 + { 84 + unsigned long mask = cpumask_bits(cpumask)[0]; 85 + 86 + _flat_send_IPI_mask(mask, vector); 87 + } 88 + 89 + static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, 90 + int vector) 91 + { 92 + unsigned long mask = cpumask_bits(cpumask)[0]; 93 + int cpu = smp_processor_id(); 94 + 95 + if (cpu < BITS_PER_LONG) 96 + clear_bit(cpu, &mask); 97 + _flat_send_IPI_mask(mask, vector); 98 + } 99 + 81 100 static void flat_send_IPI_allbutself(int vector) 82 101 { 102 + int cpu = smp_processor_id(); 83 103 #ifdef CONFIG_HOTPLUG_CPU 84 104 int hotplug = 1; 85 105 #else 86 106 int hotplug = 0; 87 107 #endif 88 108 if (hotplug || vector == NMI_VECTOR) { 89 - cpumask_t allbutme = cpu_online_map; 109 + if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { 110 + unsigned long mask = cpumask_bits(cpu_online_mask)[0]; 90 111 91 - cpu_clear(smp_processor_id(), allbutme); 112 + if (cpu < BITS_PER_LONG) 113 + clear_bit(cpu, &mask); 92 114 93 - if (!cpus_empty(allbutme)) 94 - flat_send_IPI_mask(allbutme, vector); 115 + _flat_send_IPI_mask(mask, vector); 116 + } 95 117 } else if (num_online_cpus() > 1) { 96 118 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); 97 119 } ··· 121 101 static void flat_send_IPI_all(int vector) 122 102 { 123 103 if (vector == NMI_VECTOR) 124 - flat_send_IPI_mask(cpu_online_map, vector); 104 + flat_send_IPI_mask(cpu_online_mask, vector); 125 105 else 126 106 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 127 107 } ··· 155 135 return physid_isset(read_xapic_id(), phys_cpu_present_map); 156 136 } 157 137 158 - static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) 138 + static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask) 159 139 { 160 - return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; 140 + return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; 141 + } 142 + 143 + static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 144 + const struct cpumask *andmask) 145 + { 146 + unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; 147 + unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS; 148 + 149 + return mask1 & mask2; 161 150 } 162 151 163 152 static unsigned int phys_pkg_id(int index_msb) ··· 186 157 .send_IPI_all = flat_send_IPI_all, 187 158 .send_IPI_allbutself = flat_send_IPI_allbutself, 188 159 .send_IPI_mask = flat_send_IPI_mask, 160 + .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, 189 161 .send_IPI_self = apic_send_IPI_self, 190 162 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, 163 + .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, 191 164 .phys_pkg_id = phys_pkg_id, 192 165 .get_apic_id = get_apic_id, 193 166 .set_apic_id = set_apic_id, ··· 219 188 return 0; 220 189 } 221 190 222 - static cpumask_t physflat_target_cpus(void) 191 + static const struct cpumask *physflat_target_cpus(void) 223 192 { 224 - return cpu_online_map; 193 + return cpu_online_mask; 225 194 } 226 195 227 - static cpumask_t physflat_vector_allocation_domain(int cpu) 196 + static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) 228 197 { 229 - return cpumask_of_cpu(cpu); 198 + cpumask_clear(retmask); 199 + cpumask_set_cpu(cpu, retmask); 230 200 } 231 201 232 - static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) 202 + static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) 233 203 { 234 204 send_IPI_mask_sequence(cpumask, vector); 235 205 } 236 206 207 + static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, 208 + int vector) 209 + { 210 + send_IPI_mask_allbutself(cpumask, vector); 211 + } 212 + 237 213 static void physflat_send_IPI_allbutself(int vector) 238 214 { 239 - cpumask_t allbutme = cpu_online_map; 240 - 241 - cpu_clear(smp_processor_id(), allbutme); 242 - physflat_send_IPI_mask(allbutme, vector); 215 + send_IPI_mask_allbutself(cpu_online_mask, vector); 243 216 } 244 217 245 218 static void physflat_send_IPI_all(int vector) 246 219 { 247 - physflat_send_IPI_mask(cpu_online_map, vector); 220 + physflat_send_IPI_mask(cpu_online_mask, vector); 248 221 } 249 222 250 - static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) 223 + static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) 251 224 { 252 225 int cpu; 253 226 ··· 259 224 * We're using fixed IRQ delivery, can only return one phys APIC ID. 260 225 * May as well be the first. 261 226 */ 262 - cpu = first_cpu(cpumask); 227 + cpu = cpumask_first(cpumask); 263 228 if ((unsigned)cpu < nr_cpu_ids) 264 229 return per_cpu(x86_cpu_to_apicid, cpu); 265 230 else 266 231 return BAD_APICID; 232 + } 233 + 234 + static unsigned int 235 + physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 236 + const struct cpumask *andmask) 237 + { 238 + int cpu; 239 + 240 + /* 241 + * We're using fixed IRQ delivery, can only return one phys APIC ID. 242 + * May as well be the first. 243 + */ 244 + for_each_cpu_and(cpu, cpumask, andmask) 245 + if (cpumask_test_cpu(cpu, cpu_online_mask)) 246 + break; 247 + if (cpu < nr_cpu_ids) 248 + return per_cpu(x86_cpu_to_apicid, cpu); 249 + return BAD_APICID; 267 250 } 268 251 269 252 struct genapic apic_physflat = { ··· 296 243 .send_IPI_all = physflat_send_IPI_all, 297 244 .send_IPI_allbutself = physflat_send_IPI_allbutself, 298 245 .send_IPI_mask = physflat_send_IPI_mask, 246 + .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, 299 247 .send_IPI_self = apic_send_IPI_self, 300 248 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 249 + .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, 301 250 .phys_pkg_id = phys_pkg_id, 302 251 .get_apic_id = get_apic_id, 303 252 .set_apic_id = set_apic_id,
+60 -21
arch/x86/kernel/genx2apic_cluster.c
··· 22 22 23 23 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 24 24 25 - static cpumask_t x2apic_target_cpus(void) 25 + static const struct cpumask *x2apic_target_cpus(void) 26 26 { 27 - return cpumask_of_cpu(0); 27 + return cpumask_of(0); 28 28 } 29 29 30 30 /* 31 31 * for now each logical cpu is in its own vector allocation domain. 32 32 */ 33 - static cpumask_t x2apic_vector_allocation_domain(int cpu) 33 + static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) 34 34 { 35 - cpumask_t domain = CPU_MASK_NONE; 36 - cpu_set(cpu, domain); 37 - return domain; 35 + cpumask_clear(retmask); 36 + cpumask_set_cpu(cpu, retmask); 38 37 } 39 38 40 39 static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, ··· 55 56 * at once. We have 16 cpu's in a cluster. This will minimize IPI register 56 57 * writes. 57 58 */ 58 - static void x2apic_send_IPI_mask(cpumask_t mask, int vector) 59 + static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 59 60 { 60 61 unsigned long flags; 61 62 unsigned long query_cpu; 62 63 63 64 local_irq_save(flags); 64 - for_each_cpu_mask(query_cpu, mask) { 65 - __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), 66 - vector, APIC_DEST_LOGICAL); 67 - } 65 + for_each_cpu(query_cpu, mask) 66 + __x2apic_send_IPI_dest( 67 + per_cpu(x86_cpu_to_logical_apicid, query_cpu), 68 + vector, APIC_DEST_LOGICAL); 69 + local_irq_restore(flags); 70 + } 71 + 72 + static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, 73 + int vector) 74 + { 75 + unsigned long flags; 76 + unsigned long query_cpu; 77 + unsigned long this_cpu = smp_processor_id(); 78 + 79 + local_irq_save(flags); 80 + for_each_cpu(query_cpu, mask) 81 + if (query_cpu != this_cpu) 82 + __x2apic_send_IPI_dest( 83 + per_cpu(x86_cpu_to_logical_apicid, query_cpu), 84 + vector, APIC_DEST_LOGICAL); 68 85 local_irq_restore(flags); 69 86 } 70 87 71 88 static void x2apic_send_IPI_allbutself(int vector) 72 89 { 73 - cpumask_t mask = cpu_online_map; 90 + unsigned long flags; 91 + unsigned long query_cpu; 92 + unsigned long this_cpu = smp_processor_id(); 74 93 75 - cpu_clear(smp_processor_id(), mask); 76 - 77 - if (!cpus_empty(mask)) 78 - x2apic_send_IPI_mask(mask, vector); 94 + local_irq_save(flags); 95 + for_each_online_cpu(query_cpu) 96 + if (query_cpu != this_cpu) 97 + __x2apic_send_IPI_dest( 98 + per_cpu(x86_cpu_to_logical_apicid, query_cpu), 99 + vector, APIC_DEST_LOGICAL); 100 + local_irq_restore(flags); 79 101 } 80 102 81 103 static void x2apic_send_IPI_all(int vector) 82 104 { 83 - x2apic_send_IPI_mask(cpu_online_map, vector); 105 + x2apic_send_IPI_mask(cpu_online_mask, vector); 84 106 } 85 107 86 108 static int x2apic_apic_id_registered(void) ··· 109 89 return 1; 110 90 } 111 91 112 - static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) 92 + static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 113 93 { 114 94 int cpu; 115 95 116 96 /* 117 - * We're using fixed IRQ delivery, can only return one phys APIC ID. 97 + * We're using fixed IRQ delivery, can only return one logical APIC ID. 118 98 * May as well be the first. 119 99 */ 120 - cpu = first_cpu(cpumask); 121 - if ((unsigned)cpu < NR_CPUS) 100 + cpu = cpumask_first(cpumask); 101 + if ((unsigned)cpu < nr_cpu_ids) 122 102 return per_cpu(x86_cpu_to_logical_apicid, cpu); 123 103 else 124 104 return BAD_APICID; 105 + } 106 + 107 + static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 108 + const struct cpumask *andmask) 109 + { 110 + int cpu; 111 + 112 + /* 113 + * We're using fixed IRQ delivery, can only return one logical APIC ID. 114 + * May as well be the first. 115 + */ 116 + for_each_cpu_and(cpu, cpumask, andmask) 117 + if (cpumask_test_cpu(cpu, cpu_online_mask)) 118 + break; 119 + if (cpu < nr_cpu_ids) 120 + return per_cpu(x86_cpu_to_logical_apicid, cpu); 121 + return BAD_APICID; 125 122 } 126 123 127 124 static unsigned int get_apic_id(unsigned long x) ··· 187 150 .send_IPI_all = x2apic_send_IPI_all, 188 151 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 189 152 .send_IPI_mask = x2apic_send_IPI_mask, 153 + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 190 154 .send_IPI_self = x2apic_send_IPI_self, 191 155 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 156 + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 192 157 .phys_pkg_id = phys_pkg_id, 193 158 .get_apic_id = get_apic_id, 194 159 .set_apic_id = set_apic_id,
+57 -17
arch/x86/kernel/genx2apic_phys.c
··· 29 29 30 30 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 31 31 32 - static cpumask_t x2apic_target_cpus(void) 32 + static const struct cpumask *x2apic_target_cpus(void) 33 33 { 34 - return cpumask_of_cpu(0); 34 + return cpumask_of(0); 35 35 } 36 36 37 - static cpumask_t x2apic_vector_allocation_domain(int cpu) 37 + static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) 38 38 { 39 - cpumask_t domain = CPU_MASK_NONE; 40 - cpu_set(cpu, domain); 41 - return domain; 39 + cpumask_clear(retmask); 40 + cpumask_set_cpu(cpu, retmask); 42 41 } 43 42 44 43 static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, ··· 53 54 x2apic_icr_write(cfg, apicid); 54 55 } 55 56 56 - static void x2apic_send_IPI_mask(cpumask_t mask, int vector) 57 + static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 57 58 { 58 59 unsigned long flags; 59 60 unsigned long query_cpu; 60 61 61 62 local_irq_save(flags); 62 - for_each_cpu_mask(query_cpu, mask) { 63 + for_each_cpu(query_cpu, mask) { 63 64 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), 64 65 vector, APIC_DEST_PHYSICAL); 65 66 } 66 67 local_irq_restore(flags); 67 68 } 68 69 70 + static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, 71 + int vector) 72 + { 73 + unsigned long flags; 74 + unsigned long query_cpu; 75 + unsigned long this_cpu = smp_processor_id(); 76 + 77 + local_irq_save(flags); 78 + for_each_cpu(query_cpu, mask) { 79 + if (query_cpu != this_cpu) 80 + __x2apic_send_IPI_dest( 81 + per_cpu(x86_cpu_to_apicid, query_cpu), 82 + vector, APIC_DEST_PHYSICAL); 83 + } 84 + local_irq_restore(flags); 85 + } 86 + 69 87 static void x2apic_send_IPI_allbutself(int vector) 70 88 { 71 - cpumask_t mask = cpu_online_map; 89 + unsigned long flags; 90 + unsigned long query_cpu; 91 + unsigned long this_cpu = smp_processor_id(); 72 92 73 - cpu_clear(smp_processor_id(), mask); 74 - 75 - if (!cpus_empty(mask)) 76 - x2apic_send_IPI_mask(mask, vector); 93 + local_irq_save(flags); 94 + for_each_online_cpu(query_cpu) 95 + if (query_cpu != this_cpu) 96 + __x2apic_send_IPI_dest( 97 + per_cpu(x86_cpu_to_apicid, query_cpu), 98 + vector, APIC_DEST_PHYSICAL); 99 + local_irq_restore(flags); 77 100 } 78 101 79 102 static void x2apic_send_IPI_all(int vector) 80 103 { 81 - x2apic_send_IPI_mask(cpu_online_map, vector); 104 + x2apic_send_IPI_mask(cpu_online_mask, vector); 82 105 } 83 106 84 107 static int x2apic_apic_id_registered(void) ··· 108 87 return 1; 109 88 } 110 89 111 - static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) 90 + static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 112 91 { 113 92 int cpu; 114 93 ··· 116 95 * We're using fixed IRQ delivery, can only return one phys APIC ID. 117 96 * May as well be the first. 118 97 */ 119 - cpu = first_cpu(cpumask); 120 - if ((unsigned)cpu < NR_CPUS) 98 + cpu = cpumask_first(cpumask); 99 + if ((unsigned)cpu < nr_cpu_ids) 121 100 return per_cpu(x86_cpu_to_apicid, cpu); 122 101 else 123 102 return BAD_APICID; 103 + } 104 + 105 + static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 106 + const struct cpumask *andmask) 107 + { 108 + int cpu; 109 + 110 + /* 111 + * We're using fixed IRQ delivery, can only return one phys APIC ID. 112 + * May as well be the first. 113 + */ 114 + for_each_cpu_and(cpu, cpumask, andmask) 115 + if (cpumask_test_cpu(cpu, cpu_online_mask)) 116 + break; 117 + if (cpu < nr_cpu_ids) 118 + return per_cpu(x86_cpu_to_apicid, cpu); 119 + return BAD_APICID; 124 120 } 125 121 126 122 static unsigned int get_apic_id(unsigned long x) ··· 183 145 .send_IPI_all = x2apic_send_IPI_all, 184 146 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 185 147 .send_IPI_mask = x2apic_send_IPI_mask, 148 + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 186 149 .send_IPI_self = x2apic_send_IPI_self, 187 150 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 151 + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 188 152 .phys_pkg_id = phys_pkg_id, 189 153 .get_apic_id = get_apic_id, 190 154 .set_apic_id = set_apic_id,
+44 -17
arch/x86/kernel/genx2apic_uv_x.c
··· 79 79 80 80 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 81 81 82 - static cpumask_t uv_target_cpus(void) 82 + static const struct cpumask *uv_target_cpus(void) 83 83 { 84 - return cpumask_of_cpu(0); 84 + return cpumask_of(0); 85 85 } 86 86 87 - static cpumask_t uv_vector_allocation_domain(int cpu) 87 + static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) 88 88 { 89 - cpumask_t domain = CPU_MASK_NONE; 90 - cpu_set(cpu, domain); 91 - return domain; 89 + cpumask_clear(retmask); 90 + cpumask_set_cpu(cpu, retmask); 92 91 } 93 92 94 93 int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) ··· 126 127 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 127 128 } 128 129 129 - static void uv_send_IPI_mask(cpumask_t mask, int vector) 130 + static void uv_send_IPI_mask(const struct cpumask *mask, int vector) 130 131 { 131 132 unsigned int cpu; 132 133 133 - for_each_possible_cpu(cpu) 134 - if (cpu_isset(cpu, mask)) 134 + for_each_cpu(cpu, mask) 135 + uv_send_IPI_one(cpu, vector); 136 + } 137 + 138 + static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 139 + { 140 + unsigned int cpu; 141 + unsigned int this_cpu = smp_processor_id(); 142 + 143 + for_each_cpu(cpu, mask) 144 + if (cpu != this_cpu) 135 145 uv_send_IPI_one(cpu, vector); 136 146 } 137 147 138 148 static void uv_send_IPI_allbutself(int vector) 139 149 { 140 - cpumask_t mask = cpu_online_map; 150 + unsigned int cpu; 151 + unsigned int this_cpu = smp_processor_id(); 141 152 142 - cpu_clear(smp_processor_id(), mask); 143 - 144 - if (!cpus_empty(mask)) 145 - uv_send_IPI_mask(mask, vector); 153 + for_each_online_cpu(cpu) 154 + if (cpu != this_cpu) 155 + uv_send_IPI_one(cpu, vector); 146 156 } 147 157 148 158 static void uv_send_IPI_all(int vector) 149 159 { 150 - uv_send_IPI_mask(cpu_online_map, vector); 160 + uv_send_IPI_mask(cpu_online_mask, vector); 151 161 } 152 162 153 163 static int uv_apic_id_registered(void) ··· 168 160 { 169 161 } 170 162 171 - static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) 163 + static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 172 164 { 173 165 int cpu; 174 166 ··· 176 168 * We're using fixed IRQ delivery, can only return one phys APIC ID. 177 169 * May as well be the first. 178 170 */ 179 - cpu = first_cpu(cpumask); 171 + cpu = cpumask_first(cpumask); 180 172 if ((unsigned)cpu < nr_cpu_ids) 181 173 return per_cpu(x86_cpu_to_apicid, cpu); 182 174 else 183 175 return BAD_APICID; 176 + } 177 + 178 + static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 179 + const struct cpumask *andmask) 180 + { 181 + int cpu; 182 + 183 + /* 184 + * We're using fixed IRQ delivery, can only return one phys APIC ID. 185 + * May as well be the first. 186 + */ 187 + for_each_cpu_and(cpu, cpumask, andmask) 188 + if (cpumask_test_cpu(cpu, cpu_online_mask)) 189 + break; 190 + if (cpu < nr_cpu_ids) 191 + return per_cpu(x86_cpu_to_apicid, cpu); 192 + return BAD_APICID; 184 193 } 185 194 186 195 static unsigned int get_apic_id(unsigned long x) ··· 247 222 .send_IPI_all = uv_send_IPI_all, 248 223 .send_IPI_allbutself = uv_send_IPI_allbutself, 249 224 .send_IPI_mask = uv_send_IPI_mask, 225 + .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, 250 226 .send_IPI_self = uv_send_IPI_self, 251 227 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 228 + .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, 252 229 .phys_pkg_id = phys_pkg_id, 253 230 .get_apic_id = get_apic_id, 254 231 .set_apic_id = set_apic_id,
+4 -4
arch/x86/kernel/hpet.c
··· 248 248 * Start hpet with the boot cpu mask and make it 249 249 * global after the IO_APIC has been initialized. 250 250 */ 251 - hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 251 + hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); 252 252 clockevents_register_device(&hpet_clockevent); 253 253 global_clock_event = &hpet_clockevent; 254 254 printk(KERN_DEBUG "hpet clockevent registered\n"); ··· 303 303 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 304 304 hpet_setup_msi_irq(hdev->irq); 305 305 disable_irq(hdev->irq); 306 - irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); 306 + irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 307 307 enable_irq(hdev->irq); 308 308 } 309 309 break; ··· 451 451 return -1; 452 452 453 453 disable_irq(dev->irq); 454 - irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); 454 + irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); 455 455 enable_irq(dev->irq); 456 456 457 457 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", ··· 502 502 /* 5 usec minimum reprogramming delta. */ 503 503 evt->min_delta_ns = 5000; 504 504 505 - evt->cpumask = cpumask_of_cpu(hdev->cpu); 505 + evt->cpumask = cpumask_of(hdev->cpu); 506 506 clockevents_register_device(evt); 507 507 } 508 508
+1 -1
arch/x86/kernel/i8253.c
··· 114 114 * Start pit with the boot cpu mask and make it global after the 115 115 * IO_APIC has been initialized. 116 116 */ 117 - pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 117 + pit_clockevent.cpumask = cpumask_of(smp_processor_id()); 118 118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 119 119 pit_clockevent.shift); 120 120 pit_clockevent.max_delta_ns =
+178 -192
arch/x86/kernel/io_apic.c
··· 136 136 137 137 struct irq_cfg { 138 138 struct irq_pin_list *irq_2_pin; 139 - cpumask_t domain; 140 - cpumask_t old_domain; 139 + cpumask_var_t domain; 140 + cpumask_var_t old_domain; 141 141 unsigned move_cleanup_count; 142 142 u8 vector; 143 143 u8 move_in_progress : 1; ··· 152 152 #else 153 153 static struct irq_cfg irq_cfgx[NR_IRQS] = { 154 154 #endif 155 - [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, 156 - [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, 157 - [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, 158 - [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, 159 - [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, 160 - [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, 161 - [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, 162 - [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, 163 - [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, 164 - [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, 165 - [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, 166 - [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, 167 - [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, 168 - [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, 169 - [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, 170 - [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, 155 + [0] = { .vector = IRQ0_VECTOR, }, 156 + [1] = { .vector = IRQ1_VECTOR, }, 157 + [2] = { .vector = IRQ2_VECTOR, }, 158 + [3] = { .vector = IRQ3_VECTOR, }, 159 + [4] = { .vector = IRQ4_VECTOR, }, 160 + [5] = { .vector = IRQ5_VECTOR, }, 161 + [6] = { .vector = IRQ6_VECTOR, }, 162 + [7] = { .vector = IRQ7_VECTOR, }, 163 + [8] = { .vector = IRQ8_VECTOR, }, 164 + [9] = { .vector = IRQ9_VECTOR, }, 165 + [10] = { .vector = IRQ10_VECTOR, }, 166 + [11] = { .vector = IRQ11_VECTOR, }, 167 + [12] = { .vector = IRQ12_VECTOR, }, 168 + [13] = { .vector = IRQ13_VECTOR, }, 169 + [14] = { .vector = IRQ14_VECTOR, }, 170 + [15] = { .vector = IRQ15_VECTOR, }, 171 171 }; 172 172 173 173 int __init arch_early_irq_init(void) ··· 183 183 for (i = 0; i < count; i++) { 184 184 desc = irq_to_desc(i); 185 185 desc->chip_data = &cfg[i]; 186 + alloc_bootmem_cpumask_var(&cfg[i].domain); 187 + alloc_bootmem_cpumask_var(&cfg[i].old_domain); 188 + if (i < NR_IRQS_LEGACY) 189 + cpumask_setall(cfg[i].domain); 186 190 } 187 191 188 192 return 0; ··· 213 209 node = cpu_to_node(cpu); 214 210 215 211 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 212 + if (cfg) { 213 + /* FIXME: needs alloc_cpumask_var_node() */ 214 + if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) { 215 + kfree(cfg); 216 + cfg = NULL; 217 + } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { 218 + free_cpumask_var(cfg->domain); 219 + kfree(cfg); 220 + cfg = NULL; 221 + } else { 222 + cpumask_clear(cfg->domain); 223 + cpumask_clear(cfg->old_domain); 224 + } 225 + } 216 226 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); 217 227 218 228 return cfg; ··· 351 333 } 352 334 } 353 335 354 - static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) 336 + static void 337 + set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) 355 338 { 356 339 struct irq_cfg *cfg = desc->chip_data; 357 340 358 341 if (!cfg->move_in_progress) { 359 342 /* it means that domain is not changed */ 360 - if (!cpus_intersects(desc->affinity, mask)) 343 + if (!cpumask_intersects(&desc->affinity, mask)) 361 344 cfg->move_desc_pending = 1; 362 345 } 363 346 } ··· 373 354 #endif 374 355 375 356 #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC 376 - static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) 357 + static inline void 358 + set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) 377 359 { 378 360 } 379 361 #endif ··· 505 485 } 506 486 507 487 #ifdef CONFIG_SMP 488 + static void send_cleanup_vector(struct irq_cfg *cfg) 489 + { 490 + cpumask_var_t cleanup_mask; 491 + 492 + if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 493 + unsigned int i; 494 + cfg->move_cleanup_count = 0; 495 + for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 496 + cfg->move_cleanup_count++; 497 + for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 498 + send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 499 + } else { 500 + cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 501 + cfg->move_cleanup_count = cpumask_weight(cleanup_mask); 502 + send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 503 + free_cpumask_var(cleanup_mask); 504 + } 505 + cfg->move_in_progress = 0; 506 + } 507 + 508 508 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 509 509 { 510 510 int apic, pin; ··· 560 520 } 561 521 } 562 522 563 - static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); 523 + static int 524 + assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); 564 525 565 - static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) 526 + /* 527 + * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid 528 + * of that, or returns BAD_APICID and leaves desc->affinity untouched. 529 + */ 530 + static unsigned int 531 + set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) 566 532 { 567 533 struct irq_cfg *cfg; 568 - unsigned long flags; 569 - unsigned int dest; 570 - cpumask_t tmp; 571 534 unsigned int irq; 572 535 573 - cpus_and(tmp, mask, cpu_online_map); 574 - if (cpus_empty(tmp)) 575 - return; 536 + if (!cpumask_intersects(mask, cpu_online_mask)) 537 + return BAD_APICID; 576 538 577 539 irq = desc->irq; 578 540 cfg = desc->chip_data; 579 541 if (assign_irq_vector(irq, cfg, mask)) 580 - return; 542 + return BAD_APICID; 581 543 544 + cpumask_and(&desc->affinity, cfg->domain, mask); 582 545 set_extra_move_desc(desc, mask); 546 + return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); 547 + } 583 548 584 - cpus_and(tmp, cfg->domain, mask); 585 - dest = cpu_mask_to_apicid(tmp); 586 - /* 587 - * Only the high 8 bits are valid. 588 - */ 589 - dest = SET_APIC_LOGICAL_ID(dest); 549 + static void 550 + set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) 551 + { 552 + struct irq_cfg *cfg; 553 + unsigned long flags; 554 + unsigned int dest; 555 + unsigned int irq; 556 + 557 + irq = desc->irq; 558 + cfg = desc->chip_data; 590 559 591 560 spin_lock_irqsave(&ioapic_lock, flags); 592 - __target_IO_APIC_irq(irq, dest, cfg); 593 - desc->affinity = mask; 561 + dest = set_desc_affinity(desc, mask); 562 + if (dest != BAD_APICID) { 563 + /* Only the high 8 bits are valid. */ 564 + dest = SET_APIC_LOGICAL_ID(dest); 565 + __target_IO_APIC_irq(irq, dest, cfg); 566 + } 594 567 spin_unlock_irqrestore(&ioapic_lock, flags); 595 568 } 596 569 597 - static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 570 + static void 571 + set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) 598 572 { 599 573 struct irq_desc *desc; 600 574 ··· 1276 1222 spin_unlock(&vector_lock); 1277 1223 } 1278 1224 1279 - static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1225 + static int 1226 + __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1280 1227 { 1281 1228 /* 1282 1229 * NOTE! The local APIC isn't very good at handling ··· 1292 1237 */ 1293 1238 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1294 1239 unsigned int old_vector; 1295 - int cpu; 1240 + int cpu, err; 1241 + cpumask_var_t tmp_mask; 1296 1242 1297 1243 if ((cfg->move_in_progress) || cfg->move_cleanup_count) 1298 1244 return -EBUSY; 1299 1245 1300 - /* Only try and allocate irqs on cpus that are present */ 1301 - cpus_and(mask, mask, cpu_online_map); 1246 + if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1247 + return -ENOMEM; 1302 1248 1303 1249 old_vector = cfg->vector; 1304 1250 if (old_vector) { 1305 - cpumask_t tmp; 1306 - cpus_and(tmp, cfg->domain, mask); 1307 - if (!cpus_empty(tmp)) 1251 + cpumask_and(tmp_mask, mask, cpu_online_mask); 1252 + cpumask_and(tmp_mask, cfg->domain, tmp_mask); 1253 + if (!cpumask_empty(tmp_mask)) { 1254 + free_cpumask_var(tmp_mask); 1308 1255 return 0; 1256 + } 1309 1257 } 1310 1258 1311 - for_each_cpu_mask_nr(cpu, mask) { 1312 - cpumask_t domain, new_mask; 1259 + /* Only try and allocate irqs on cpus that are present */ 1260 + err = -ENOSPC; 1261 + for_each_cpu_and(cpu, mask, cpu_online_mask) { 1313 1262 int new_cpu; 1314 1263 int vector, offset; 1315 1264 1316 - domain = vector_allocation_domain(cpu); 1317 - cpus_and(new_mask, domain, cpu_online_map); 1265 + vector_allocation_domain(cpu, tmp_mask); 1318 1266 1319 1267 vector = current_vector; 1320 1268 offset = current_offset; 1321 1269 next: 1322 1270 vector += 8; 1323 1271 if (vector >= first_system_vector) { 1324 - /* If we run out of vectors on large boxen, must share them. */ 1272 + /* If out of vectors on large boxen, must share them. */ 1325 1273 offset = (offset + 1) % 8; 1326 1274 vector = FIRST_DEVICE_VECTOR + offset; 1327 1275 } 1328 1276 if (unlikely(current_vector == vector)) 1329 1277 continue; 1330 - #ifdef CONFIG_X86_64 1331 - if (vector == IA32_SYSCALL_VECTOR) 1278 + 1279 + if (test_bit(vector, used_vectors)) 1332 1280 goto next; 1333 - #else 1334 - if (vector == SYSCALL_VECTOR) 1335 - goto next; 1336 - #endif 1337 - for_each_cpu_mask_nr(new_cpu, new_mask) 1281 + 1282 + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1338 1283 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1339 1284 goto next; 1340 1285 /* Found one! */ ··· 1342 1287 current_offset = offset; 1343 1288 if (old_vector) { 1344 1289 cfg->move_in_progress = 1; 1345 - cfg->old_domain = cfg->domain; 1290 + cpumask_copy(cfg->old_domain, cfg->domain); 1346 1291 } 1347 - for_each_cpu_mask_nr(new_cpu, new_mask) 1292 + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1348 1293 per_cpu(vector_irq, new_cpu)[vector] = irq; 1349 1294 cfg->vector = vector; 1350 - cfg->domain = domain; 1351 - return 0; 1295 + cpumask_copy(cfg->domain, tmp_mask); 1296 + err = 0; 1297 + break; 1352 1298 } 1353 - return -ENOSPC; 1299 + free_cpumask_var(tmp_mask); 1300 + return err; 1354 1301 } 1355 1302 1356 - static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1303 + static int 1304 + assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1357 1305 { 1358 1306 int err; 1359 1307 unsigned long flags; ··· 1369 1311 1370 1312 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1371 1313 { 1372 - cpumask_t mask; 1373 1314 int cpu, vector; 1374 1315 1375 1316 BUG_ON(!cfg->vector); 1376 1317 1377 1318 vector = cfg->vector; 1378 - cpus_and(mask, cfg->domain, cpu_online_map); 1379 - for_each_cpu_mask_nr(cpu, mask) 1319 + for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1380 1320 per_cpu(vector_irq, cpu)[vector] = -1; 1381 1321 1382 1322 cfg->vector = 0; 1383 - cpus_clear(cfg->domain); 1323 + cpumask_clear(cfg->domain); 1384 1324 1385 1325 if (likely(!cfg->move_in_progress)) 1386 1326 return; 1387 - cpus_and(mask, cfg->old_domain, cpu_online_map); 1388 - for_each_cpu_mask_nr(cpu, mask) { 1327 + for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1389 1328 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1390 1329 vector++) { 1391 1330 if (per_cpu(vector_irq, cpu)[vector] != irq) ··· 1405 1350 /* Mark the inuse vectors */ 1406 1351 for_each_irq_desc(irq, desc) { 1407 1352 cfg = desc->chip_data; 1408 - if (!cpu_isset(cpu, cfg->domain)) 1353 + if (!cpumask_test_cpu(cpu, cfg->domain)) 1409 1354 continue; 1410 1355 vector = cfg->vector; 1411 1356 per_cpu(vector_irq, cpu)[vector] = irq; ··· 1417 1362 continue; 1418 1363 1419 1364 cfg = irq_cfg(irq); 1420 - if (!cpu_isset(cpu, cfg->domain)) 1365 + if (!cpumask_test_cpu(cpu, cfg->domain)) 1421 1366 per_cpu(vector_irq, cpu)[vector] = -1; 1422 1367 } 1423 1368 } ··· 1553 1498 { 1554 1499 struct irq_cfg *cfg; 1555 1500 struct IO_APIC_route_entry entry; 1556 - cpumask_t mask; 1501 + unsigned int dest; 1557 1502 1558 1503 if (!IO_APIC_IRQ(irq)) 1559 1504 return; 1560 1505 1561 1506 cfg = desc->chip_data; 1562 1507 1563 - mask = TARGET_CPUS; 1564 - if (assign_irq_vector(irq, cfg, mask)) 1508 + if (assign_irq_vector(irq, cfg, TARGET_CPUS)) 1565 1509 return; 1566 1510 1567 - cpus_and(mask, cfg->domain, mask); 1511 + dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 1568 1512 1569 1513 apic_printk(APIC_VERBOSE,KERN_DEBUG 1570 1514 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " ··· 1573 1519 1574 1520 1575 1521 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, 1576 - cpu_mask_to_apicid(mask), trigger, polarity, 1577 - cfg->vector)) { 1522 + dest, trigger, polarity, cfg->vector)) { 1578 1523 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1579 1524 mp_ioapics[apic].mp_apicid, pin); 1580 1525 __clear_irq_vector(irq, cfg); ··· 2293 2240 unsigned long flags; 2294 2241 2295 2242 spin_lock_irqsave(&vector_lock, flags); 2296 - send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); 2243 + send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2297 2244 spin_unlock_irqrestore(&vector_lock, flags); 2298 2245 2299 2246 return 1; ··· 2342 2289 * as simple as edge triggered migration and we can do the irq migration 2343 2290 * with a simple atomic update to IO-APIC RTE. 2344 2291 */ 2345 - static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) 2292 + static void 2293 + migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) 2346 2294 { 2347 2295 struct irq_cfg *cfg; 2348 - cpumask_t tmp, cleanup_mask; 2349 2296 struct irte irte; 2350 2297 int modify_ioapic_rte; 2351 2298 unsigned int dest; 2352 2299 unsigned long flags; 2353 2300 unsigned int irq; 2354 2301 2355 - cpus_and(tmp, mask, cpu_online_map); 2356 - if (cpus_empty(tmp)) 2302 + if (!cpumask_intersects(mask, cpu_online_mask)) 2357 2303 return; 2358 2304 2359 2305 irq = desc->irq; ··· 2365 2313 2366 2314 set_extra_move_desc(desc, mask); 2367 2315 2368 - cpus_and(tmp, cfg->domain, mask); 2369 - dest = cpu_mask_to_apicid(tmp); 2316 + dest = cpu_mask_to_apicid_and(cfg->domain, mask); 2370 2317 2371 2318 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2372 2319 if (modify_ioapic_rte) { ··· 2382 2331 */ 2383 2332 modify_irte(irq, &irte); 2384 2333 2385 - if (cfg->move_in_progress) { 2386 - cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2387 - cfg->move_cleanup_count = cpus_weight(cleanup_mask); 2388 - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2389 - cfg->move_in_progress = 0; 2390 - } 2334 + if (cfg->move_in_progress) 2335 + send_cleanup_vector(cfg); 2391 2336 2392 - desc->affinity = mask; 2337 + cpumask_copy(&desc->affinity, mask); 2393 2338 } 2394 2339 2395 2340 static int migrate_irq_remapped_level_desc(struct irq_desc *desc) ··· 2407 2360 } 2408 2361 2409 2362 /* everthing is clear. we have right of way */ 2410 - migrate_ioapic_irq_desc(desc, desc->pending_mask); 2363 + migrate_ioapic_irq_desc(desc, &desc->pending_mask); 2411 2364 2412 2365 ret = 0; 2413 2366 desc->status &= ~IRQ_MOVE_PENDING; 2414 - cpus_clear(desc->pending_mask); 2367 + cpumask_clear(&desc->pending_mask); 2415 2368 2416 2369 unmask: 2417 2370 unmask_IO_APIC_irq_desc(desc); ··· 2436 2389 continue; 2437 2390 } 2438 2391 2439 - desc->chip->set_affinity(irq, desc->pending_mask); 2392 + desc->chip->set_affinity(irq, &desc->pending_mask); 2440 2393 spin_unlock_irqrestore(&desc->lock, flags); 2441 2394 } 2442 2395 } ··· 2445 2398 /* 2446 2399 * Migrates the IRQ destination in the process context. 2447 2400 */ 2448 - static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) 2401 + static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, 2402 + const struct cpumask *mask) 2449 2403 { 2450 2404 if (desc->status & IRQ_LEVEL) { 2451 2405 desc->status |= IRQ_MOVE_PENDING; 2452 - desc->pending_mask = mask; 2406 + cpumask_copy(&desc->pending_mask, mask); 2453 2407 migrate_irq_remapped_level_desc(desc); 2454 2408 return; 2455 2409 } 2456 2410 2457 2411 migrate_ioapic_irq_desc(desc, mask); 2458 2412 } 2459 - static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 2413 + static void set_ir_ioapic_affinity_irq(unsigned int irq, 2414 + const struct cpumask *mask) 2460 2415 { 2461 2416 struct irq_desc *desc = irq_to_desc(irq); 2462 2417 ··· 2493 2444 if (!cfg->move_cleanup_count) 2494 2445 goto unlock; 2495 2446 2496 - if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) 2447 + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2497 2448 goto unlock; 2498 2449 2499 2450 __get_cpu_var(vector_irq)[vector] = -1; ··· 2530 2481 2531 2482 vector = ~get_irq_regs()->orig_ax; 2532 2483 me = smp_processor_id(); 2533 - if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { 2534 - cpumask_t cleanup_mask; 2535 - 2536 2484 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC 2537 2485 *descp = desc = move_irq_desc(desc, me); 2538 2486 /* get the new one */ 2539 2487 cfg = desc->chip_data; 2540 2488 #endif 2541 2489 2542 - cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2543 - cfg->move_cleanup_count = cpus_weight(cleanup_mask); 2544 - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2545 - cfg->move_in_progress = 0; 2546 - } 2490 + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2491 + send_cleanup_vector(cfg); 2547 2492 } 2548 2493 #else 2549 2494 static inline void irq_complete_move(struct irq_desc **descp) {} ··· 3259 3216 struct irq_cfg *cfg; 3260 3217 int err; 3261 3218 unsigned dest; 3262 - cpumask_t tmp; 3263 3219 3264 3220 cfg = irq_cfg(irq); 3265 - tmp = TARGET_CPUS; 3266 - err = assign_irq_vector(irq, cfg, tmp); 3221 + err = assign_irq_vector(irq, cfg, TARGET_CPUS); 3267 3222 if (err) 3268 3223 return err; 3269 3224 3270 - cpus_and(tmp, cfg->domain, tmp); 3271 - dest = cpu_mask_to_apicid(tmp); 3225 + dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 3272 3226 3273 3227 #ifdef CONFIG_INTR_REMAP 3274 3228 if (irq_remapped(irq)) { ··· 3319 3279 } 3320 3280 3321 3281 #ifdef CONFIG_SMP 3322 - static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3282 + static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) 3323 3283 { 3324 3284 struct irq_desc *desc = irq_to_desc(irq); 3325 3285 struct irq_cfg *cfg; 3326 3286 struct msi_msg msg; 3327 3287 unsigned int dest; 3328 - cpumask_t tmp; 3329 3288 3330 - cpus_and(tmp, mask, cpu_online_map); 3331 - if (cpus_empty(tmp)) 3289 + dest = set_desc_affinity(desc, mask); 3290 + if (dest == BAD_APICID) 3332 3291 return; 3333 3292 3334 3293 cfg = desc->chip_data; 3335 - if (assign_irq_vector(irq, cfg, mask)) 3336 - return; 3337 - 3338 - set_extra_move_desc(desc, mask); 3339 - 3340 - cpus_and(tmp, cfg->domain, mask); 3341 - dest = cpu_mask_to_apicid(tmp); 3342 3294 3343 3295 read_msi_msg_desc(desc, &msg); 3344 3296 ··· 3340 3308 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3341 3309 3342 3310 write_msi_msg_desc(desc, &msg); 3343 - desc->affinity = mask; 3344 3311 } 3345 3312 #ifdef CONFIG_INTR_REMAP 3346 3313 /* 3347 3314 * Migrate the MSI irq to another cpumask. This migration is 3348 3315 * done in the process context using interrupt-remapping hardware. 3349 3316 */ 3350 - static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3317 + static void 3318 + ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) 3351 3319 { 3352 3320 struct irq_desc *desc = irq_to_desc(irq); 3353 - struct irq_cfg *cfg; 3321 + struct irq_cfg *cfg = desc->chip_data; 3354 3322 unsigned int dest; 3355 - cpumask_t tmp, cleanup_mask; 3356 3323 struct irte irte; 3357 - 3358 - cpus_and(tmp, mask, cpu_online_map); 3359 - if (cpus_empty(tmp)) 3360 - return; 3361 3324 3362 3325 if (get_irte(irq, &irte)) 3363 3326 return; 3364 3327 3365 - cfg = desc->chip_data; 3366 - if (assign_irq_vector(irq, cfg, mask)) 3328 + dest = set_desc_affinity(desc, mask); 3329 + if (dest == BAD_APICID) 3367 3330 return; 3368 - 3369 - set_extra_move_desc(desc, mask); 3370 - 3371 - cpus_and(tmp, cfg->domain, mask); 3372 - dest = cpu_mask_to_apicid(tmp); 3373 3331 3374 3332 irte.vector = cfg->vector; 3375 3333 irte.dest_id = IRTE_DEST(dest); ··· 3374 3352 * at the new destination. So, time to cleanup the previous 3375 3353 * vector allocation. 3376 3354 */ 3377 - if (cfg->move_in_progress) { 3378 - cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 3379 - cfg->move_cleanup_count = cpus_weight(cleanup_mask); 3380 - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 3381 - cfg->move_in_progress = 0; 3382 - } 3383 - 3384 - desc->affinity = mask; 3355 + if (cfg->move_in_progress) 3356 + send_cleanup_vector(cfg); 3385 3357 } 3386 3358 3387 3359 #endif ··· 3566 3550 3567 3551 #ifdef CONFIG_DMAR 3568 3552 #ifdef CONFIG_SMP 3569 - static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 3553 + static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3570 3554 { 3571 3555 struct irq_desc *desc = irq_to_desc(irq); 3572 3556 struct irq_cfg *cfg; 3573 3557 struct msi_msg msg; 3574 3558 unsigned int dest; 3575 - cpumask_t tmp; 3576 3559 3577 - cpus_and(tmp, mask, cpu_online_map); 3578 - if (cpus_empty(tmp)) 3560 + dest = set_desc_affinity(desc, mask); 3561 + if (dest == BAD_APICID) 3579 3562 return; 3580 3563 3581 3564 cfg = desc->chip_data; 3582 - if (assign_irq_vector(irq, cfg, mask)) 3583 - return; 3584 - 3585 - set_extra_move_desc(desc, mask); 3586 - 3587 - cpus_and(tmp, cfg->domain, mask); 3588 - dest = cpu_mask_to_apicid(tmp); 3589 3565 3590 3566 dmar_msi_read(irq, &msg); 3591 3567 ··· 3587 3579 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3588 3580 3589 3581 dmar_msi_write(irq, &msg); 3590 - desc->affinity = mask; 3591 3582 } 3592 3583 3593 3584 #endif /* CONFIG_SMP */ ··· 3620 3613 #ifdef CONFIG_HPET_TIMER 3621 3614 3622 3615 #ifdef CONFIG_SMP 3623 - static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) 3616 + static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3624 3617 { 3625 3618 struct irq_desc *desc = irq_to_desc(irq); 3626 3619 struct irq_cfg *cfg; 3627 3620 struct msi_msg msg; 3628 3621 unsigned int dest; 3629 - cpumask_t tmp; 3630 3622 3631 - cpus_and(tmp, mask, cpu_online_map); 3632 - if (cpus_empty(tmp)) 3623 + dest = set_desc_affinity(desc, mask); 3624 + if (dest == BAD_APICID) 3633 3625 return; 3634 3626 3635 3627 cfg = desc->chip_data; 3636 - if (assign_irq_vector(irq, cfg, mask)) 3637 - return; 3638 - 3639 - set_extra_move_desc(desc, mask); 3640 - 3641 - cpus_and(tmp, cfg->domain, mask); 3642 - dest = cpu_mask_to_apicid(tmp); 3643 3628 3644 3629 hpet_msi_read(irq, &msg); 3645 3630 ··· 3641 3642 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3642 3643 3643 3644 hpet_msi_write(irq, &msg); 3644 - desc->affinity = mask; 3645 3645 } 3646 3646 3647 3647 #endif /* CONFIG_SMP */ ··· 3695 3697 write_ht_irq_msg(irq, &msg); 3696 3698 } 3697 3699 3698 - static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 3700 + static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) 3699 3701 { 3700 3702 struct irq_desc *desc = irq_to_desc(irq); 3701 3703 struct irq_cfg *cfg; 3702 3704 unsigned int dest; 3703 - cpumask_t tmp; 3704 3705 3705 - cpus_and(tmp, mask, cpu_online_map); 3706 - if (cpus_empty(tmp)) 3706 + dest = set_desc_affinity(desc, mask); 3707 + if (dest == BAD_APICID) 3707 3708 return; 3708 3709 3709 3710 cfg = desc->chip_data; 3710 - if (assign_irq_vector(irq, cfg, mask)) 3711 - return; 3712 - 3713 - set_extra_move_desc(desc, mask); 3714 - 3715 - cpus_and(tmp, cfg->domain, mask); 3716 - dest = cpu_mask_to_apicid(tmp); 3717 3711 3718 3712 target_ht_irq(irq, dest, cfg->vector); 3719 - desc->affinity = mask; 3720 3713 } 3721 3714 3722 3715 #endif ··· 3727 3738 { 3728 3739 struct irq_cfg *cfg; 3729 3740 int err; 3730 - cpumask_t tmp; 3731 3741 3732 3742 cfg = irq_cfg(irq); 3733 - tmp = TARGET_CPUS; 3734 - err = assign_irq_vector(irq, cfg, tmp); 3743 + err = assign_irq_vector(irq, cfg, TARGET_CPUS); 3735 3744 if (!err) { 3736 3745 struct ht_irq_msg msg; 3737 3746 unsigned dest; 3738 3747 3739 - cpus_and(tmp, cfg->domain, tmp); 3740 - dest = cpu_mask_to_apicid(tmp); 3748 + dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); 3741 3749 3742 3750 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3743 3751 ··· 3770 3784 int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, 3771 3785 unsigned long mmr_offset) 3772 3786 { 3773 - const cpumask_t *eligible_cpu = get_cpu_mask(cpu); 3787 + const struct cpumask *eligible_cpu = cpumask_of(cpu); 3774 3788 struct irq_cfg *cfg; 3775 3789 int mmr_pnode; 3776 3790 unsigned long mmr_value; ··· 3780 3794 3781 3795 cfg = irq_cfg(irq); 3782 3796 3783 - err = assign_irq_vector(irq, cfg, *eligible_cpu); 3797 + err = assign_irq_vector(irq, cfg, eligible_cpu); 3784 3798 if (err != 0) 3785 3799 return err; 3786 3800 ··· 3799 3813 entry->polarity = 0; 3800 3814 entry->trigger = 0; 3801 3815 entry->mask = 0; 3802 - entry->dest = cpu_mask_to_apicid(*eligible_cpu); 3816 + entry->dest = cpu_mask_to_apicid(eligible_cpu); 3803 3817 3804 3818 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3805 3819 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); ··· 4010 4024 int pin, ioapic, irq, irq_entry; 4011 4025 struct irq_desc *desc; 4012 4026 struct irq_cfg *cfg; 4013 - cpumask_t mask; 4027 + const struct cpumask *mask; 4014 4028 4015 4029 if (skip_ioapic_setup == 1) 4016 4030 return; ··· 4041 4055 */ 4042 4056 if (desc->status & 4043 4057 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4044 - mask = desc->affinity; 4058 + mask = &desc->affinity; 4045 4059 else 4046 4060 mask = TARGET_CPUS; 4047 4061
+20 -8
arch/x86/kernel/ipi.c
··· 116 116 /* 117 117 * This is only used on smaller machines. 118 118 */ 119 - void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) 119 + void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) 120 120 { 121 - unsigned long mask = cpus_addr(cpumask)[0]; 121 + unsigned long mask = cpumask_bits(cpumask)[0]; 122 122 unsigned long flags; 123 123 124 124 local_irq_save(flags); 125 - WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); 125 + WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); 126 126 __send_IPI_dest_field(mask, vector); 127 127 local_irq_restore(flags); 128 128 } 129 129 130 - void send_IPI_mask_sequence(cpumask_t mask, int vector) 130 + void send_IPI_mask_sequence(const struct cpumask *mask, int vector) 131 131 { 132 132 unsigned long flags; 133 133 unsigned int query_cpu; ··· 139 139 */ 140 140 141 141 local_irq_save(flags); 142 - for_each_possible_cpu(query_cpu) { 143 - if (cpu_isset(query_cpu, mask)) { 142 + for_each_cpu(query_cpu, mask) 143 + __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); 144 + local_irq_restore(flags); 145 + } 146 + 147 + void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 148 + { 149 + unsigned long flags; 150 + unsigned int query_cpu; 151 + unsigned int this_cpu = smp_processor_id(); 152 + 153 + /* See Hack comment above */ 154 + 155 + local_irq_save(flags); 156 + for_each_cpu(query_cpu, mask) 157 + if (query_cpu != this_cpu) 144 158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), 145 159 vector); 146 - } 147 - } 148 160 local_irq_restore(flags); 149 161 } 150 162
+3
arch/x86/kernel/irq.c
··· 9 9 #include <asm/apic.h> 10 10 #include <asm/io_apic.h> 11 11 #include <asm/smp.h> 12 + #include <asm/irq.h> 12 13 13 14 atomic_t irq_err_count; 14 15 ··· 191 190 #endif 192 191 return sum; 193 192 } 193 + 194 + EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
+7 -6
arch/x86/kernel/irq_32.c
··· 233 233 #ifdef CONFIG_HOTPLUG_CPU 234 234 #include <mach_apic.h> 235 235 236 - void fixup_irqs(cpumask_t map) 236 + /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 237 + void fixup_irqs(void) 237 238 { 238 239 unsigned int irq; 239 240 static int warned; 240 241 struct irq_desc *desc; 241 242 242 243 for_each_irq_desc(irq, desc) { 243 - cpumask_t mask; 244 + const struct cpumask *affinity; 244 245 245 246 if (!desc) 246 247 continue; 247 248 if (irq == 2) 248 249 continue; 249 250 250 - cpus_and(mask, desc->affinity, map); 251 - if (any_online_cpu(mask) == NR_CPUS) { 251 + affinity = &desc->affinity; 252 + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 252 253 printk("Breaking affinity for irq %i\n", irq); 253 - mask = map; 254 + affinity = cpu_all_mask; 254 255 } 255 256 if (desc->chip->set_affinity) 256 - desc->chip->set_affinity(irq, mask); 257 + desc->chip->set_affinity(irq, affinity); 257 258 else if (desc->action && !(warned++)) 258 259 printk("Cannot set affinity for irq %i\n", irq); 259 260 }
+8 -7
arch/x86/kernel/irq_64.c
··· 80 80 } 81 81 82 82 #ifdef CONFIG_HOTPLUG_CPU 83 - void fixup_irqs(cpumask_t map) 83 + /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 84 + void fixup_irqs(void) 84 85 { 85 86 unsigned int irq; 86 87 static int warned; 87 88 struct irq_desc *desc; 88 89 89 90 for_each_irq_desc(irq, desc) { 90 - cpumask_t mask; 91 91 int break_affinity = 0; 92 92 int set_affinity = 1; 93 + const struct cpumask *affinity; 93 94 94 95 if (!desc) 95 96 continue; ··· 100 99 /* interrupt's are disabled at this point */ 101 100 spin_lock(&desc->lock); 102 101 102 + affinity = &desc->affinity; 103 103 if (!irq_has_action(irq) || 104 - cpus_equal(desc->affinity, map)) { 104 + cpumask_equal(affinity, cpu_online_mask)) { 105 105 spin_unlock(&desc->lock); 106 106 continue; 107 107 } 108 108 109 - cpus_and(mask, desc->affinity, map); 110 - if (cpus_empty(mask)) { 109 + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 111 110 break_affinity = 1; 112 - mask = map; 111 + affinity = cpu_all_mask; 113 112 } 114 113 115 114 if (desc->chip->mask) 116 115 desc->chip->mask(irq); 117 116 118 117 if (desc->chip->set_affinity) 119 - desc->chip->set_affinity(irq, mask); 118 + desc->chip->set_affinity(irq, affinity); 120 119 else if (!(warned++)) 121 120 set_affinity = 0; 122 121
+15 -1
arch/x86/kernel/irqinit_32.c
··· 110 110 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 111 111 }; 112 112 113 + int vector_used_by_percpu_irq(unsigned int vector) 114 + { 115 + int cpu; 116 + 117 + for_each_online_cpu(cpu) { 118 + if (per_cpu(vector_irq, cpu)[vector] != -1) 119 + return 1; 120 + } 121 + 122 + return 0; 123 + } 124 + 113 125 /* Overridden in paravirt.c */ 114 126 void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); 115 127 ··· 158 146 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 159 147 160 148 /* IPI for single call function */ 161 - set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); 149 + alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, 150 + call_function_single_interrupt); 162 151 163 152 /* Low priority IPI to cleanup after moving an irq */ 164 153 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 154 + set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); 165 155 #endif 166 156 167 157 #ifdef CONFIG_X86_LOCAL_APIC
+13
arch/x86/kernel/irqinit_64.c
··· 69 69 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 70 70 }; 71 71 72 + int vector_used_by_percpu_irq(unsigned int vector) 73 + { 74 + int cpu; 75 + 76 + for_each_online_cpu(cpu) { 77 + if (per_cpu(vector_irq, cpu)[vector] != -1) 78 + return 1; 79 + } 80 + 81 + return 0; 82 + } 83 + 72 84 void __init init_ISA_irqs(void) 73 85 { 74 86 int i; ··· 133 121 134 122 /* Low priority IPI to cleanup after moving an irq */ 135 123 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 124 + set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); 136 125 #endif 137 126 } 138 127
+1 -1
arch/x86/kernel/mfgpt_32.c
··· 287 287 .set_mode = mfgpt_set_mode, 288 288 .set_next_event = mfgpt_next_event, 289 289 .rating = 250, 290 - .cpumask = CPU_MASK_ALL, 290 + .cpumask = cpu_all_mask, 291 291 .shift = 32 292 292 }; 293 293
+1 -4
arch/x86/kernel/reboot.c
··· 650 650 651 651 static void smp_send_nmi_allbutself(void) 652 652 { 653 - cpumask_t mask = cpu_online_map; 654 - cpu_clear(safe_smp_processor_id(), mask); 655 - if (!cpus_empty(mask)) 656 - send_IPI_mask(mask, NMI_VECTOR); 653 + send_IPI_allbutself(NMI_VECTOR); 657 654 } 658 655 659 656 static struct notifier_block crash_nmi_nb = {
+12 -7
arch/x86/kernel/setup_percpu.c
··· 152 152 old_size = PERCPU_ENOUGH_ROOM; 153 153 align = max_t(unsigned long, PAGE_SIZE, align); 154 154 size = roundup(old_size, align); 155 + 156 + printk(KERN_INFO 157 + "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 158 + NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 159 + 155 160 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 156 161 size); 157 162 ··· 173 168 "cpu %d has no node %d or node-local memory\n", 174 169 cpu, node); 175 170 if (ptr) 176 - printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", 171 + printk(KERN_DEBUG 172 + "per cpu data for cpu%d at %016lx\n", 177 173 cpu, __pa(ptr)); 178 174 } 179 175 else { 180 176 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 181 177 __pa(MAX_DMA_ADDRESS)); 182 178 if (ptr) 183 - printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", 184 - cpu, node, __pa(ptr)); 179 + printk(KERN_DEBUG 180 + "per cpu data for cpu%d on node%d " 181 + "at %016lx\n", 182 + cpu, node, __pa(ptr)); 185 183 } 186 184 #endif 187 185 per_cpu_offset(cpu) = ptr - __per_cpu_start; 188 186 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 189 187 } 190 - 191 - printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", 192 - NR_CPUS, nr_cpu_ids, nr_node_ids); 193 188 194 189 /* Setup percpu data maps */ 195 190 setup_per_cpu_maps(); ··· 287 282 else 288 283 cpu_clear(cpu, *mask); 289 284 290 - cpulist_scnprintf(buf, sizeof(buf), *mask); 285 + cpulist_scnprintf(buf, sizeof(buf), mask); 291 286 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 292 287 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); 293 288 }
+4 -4
arch/x86/kernel/smp.c
··· 118 118 WARN_ON(1); 119 119 return; 120 120 } 121 - send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 121 + send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); 122 122 } 123 123 124 124 void native_send_call_func_single_ipi(int cpu) 125 125 { 126 - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); 126 + send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); 127 127 } 128 128 129 - void native_send_call_func_ipi(cpumask_t mask) 129 + void native_send_call_func_ipi(const struct cpumask *mask) 130 130 { 131 131 cpumask_t allbutself; 132 132 133 133 allbutself = cpu_online_map; 134 134 cpu_clear(smp_processor_id(), allbutself); 135 135 136 - if (cpus_equal(mask, allbutself) && 136 + if (cpus_equal(*mask, allbutself) && 137 137 cpus_equal(cpu_online_map, cpu_callout_map)) 138 138 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 139 139 else
+22 -11
arch/x86/kernel/smpboot.c
··· 102 102 /* Last level cache ID of each logical CPU */ 103 103 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 104 104 105 - /* bitmap of online cpus */ 106 - cpumask_t cpu_online_map __read_mostly; 107 - EXPORT_SYMBOL(cpu_online_map); 108 - 109 105 cpumask_t cpu_callin_map; 110 106 cpumask_t cpu_callout_map; 111 - cpumask_t cpu_possible_map; 112 - EXPORT_SYMBOL(cpu_possible_map); 113 107 114 108 /* representing HT siblings of each logical CPU */ 115 109 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); ··· 1254 1260 check_nmi_watchdog(); 1255 1261 } 1256 1262 1263 + static int __initdata setup_possible_cpus = -1; 1264 + static int __init _setup_possible_cpus(char *str) 1265 + { 1266 + get_option(&str, &setup_possible_cpus); 1267 + return 0; 1268 + } 1269 + early_param("possible_cpus", _setup_possible_cpus); 1270 + 1271 + 1257 1272 /* 1258 1273 * cpu_possible_map should be static, it cannot change as cpu's 1259 1274 * are onlined, or offlined. The reason is per-cpu data-structures ··· 1275 1272 * 1276 1273 * Three ways to find out the number of additional hotplug CPUs: 1277 1274 * - If the BIOS specified disabled CPUs in ACPI/mptables use that. 1278 - * - The user can overwrite it with additional_cpus=NUM 1275 + * - The user can overwrite it with possible_cpus=NUM 1279 1276 * - Otherwise don't reserve additional CPUs. 1280 1277 * We do this because additional CPUs waste a lot of memory. 1281 1278 * -AK ··· 1288 1285 if (!num_processors) 1289 1286 num_processors = 1; 1290 1287 1291 - possible = num_processors + disabled_cpus; 1292 - if (possible > NR_CPUS) 1293 - possible = NR_CPUS; 1288 + if (setup_possible_cpus == -1) 1289 + possible = num_processors + disabled_cpus; 1290 + else 1291 + possible = setup_possible_cpus; 1292 + 1293 + if (possible > CONFIG_NR_CPUS) { 1294 + printk(KERN_WARNING 1295 + "%d Processors exceeds NR_CPUS limit of %d\n", 1296 + possible, CONFIG_NR_CPUS); 1297 + possible = CONFIG_NR_CPUS; 1298 + } 1294 1299 1295 1300 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 1296 1301 possible, max_t(int, possible - num_processors, 0)); ··· 1363 1352 lock_vector_lock(); 1364 1353 remove_cpu_from_maps(cpu); 1365 1354 unlock_vector_lock(); 1366 - fixup_irqs(cpu_online_map); 1355 + fixup_irqs(); 1367 1356 } 1368 1357 1369 1358 int native_cpu_disable(void)
+1 -1
arch/x86/kernel/tlb_32.c
··· 163 163 * We have to send the IPI only to 164 164 * CPUs affected. 165 165 */ 166 - send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); 166 + send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR); 167 167 168 168 while (!cpus_empty(flush_cpumask)) 169 169 /* nothing. lockup detection does not belong here */
+1 -1
arch/x86/kernel/tlb_64.c
··· 191 191 * We have to send the IPI only to 192 192 * CPUs affected. 193 193 */ 194 - send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); 194 + send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); 195 195 196 196 while (!cpus_empty(f->flush_cpumask)) 197 197 cpu_relax();
+7 -5
arch/x86/kernel/traps.c
··· 72 72 73 73 #include "cpu/mcheck/mce.h" 74 74 75 - DECLARE_BITMAP(used_vectors, NR_VECTORS); 76 - EXPORT_SYMBOL_GPL(used_vectors); 77 - 78 75 asmlinkage int system_call(void); 79 76 80 77 /* Do we ignore FPU interrupts ? */ ··· 85 88 gate_desc idt_table[256] 86 89 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; 87 90 #endif 91 + 92 + DECLARE_BITMAP(used_vectors, NR_VECTORS); 93 + EXPORT_SYMBOL_GPL(used_vectors); 88 94 89 95 static int ignore_nmis; 90 96 ··· 941 941 942 942 void __init trap_init(void) 943 943 { 944 - #ifdef CONFIG_X86_32 945 944 int i; 946 - #endif 947 945 948 946 #ifdef CONFIG_EISA 949 947 void __iomem *p = early_ioremap(0x0FFFD9, 4); ··· 998 1000 } 999 1001 1000 1002 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 1003 + #endif 1001 1004 1002 1005 /* Reserve all the builtin and the syscall vector: */ 1003 1006 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 1004 1007 set_bit(i, used_vectors); 1005 1008 1009 + #ifdef CONFIG_X86_64 1010 + set_bit(IA32_SYSCALL_VECTOR, used_vectors); 1011 + #else 1006 1012 set_bit(SYSCALL_VECTOR, used_vectors); 1007 1013 #endif 1008 1014 /*
+1 -1
arch/x86/kernel/vmiclock_32.c
··· 226 226 /* Upper bound is clockevent's use of ulong for cycle deltas. */ 227 227 evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); 228 228 evt->min_delta_ns = clockevent_delta2ns(1, evt); 229 - evt->cpumask = cpumask_of_cpu(cpu); 229 + evt->cpumask = cpumask_of(cpu); 230 230 231 231 printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", 232 232 evt->name, evt->mult, evt->shift);
+1 -1
arch/x86/lguest/boot.c
··· 738 738 739 739 /* We can't set cpumask in the initializer: damn C limitations! Set it 740 740 * here and register our timer device. */ 741 - lguest_clockevent.cpumask = cpumask_of_cpu(0); 741 + lguest_clockevent.cpumask = cpumask_of(0); 742 742 clockevents_register_device(&lguest_clockevent); 743 743 744 744 /* Finally, we unblock the timer interrupt. */
+3 -2
arch/x86/mach-generic/bigsmp.c
··· 42 42 { } 43 43 }; 44 44 45 - static cpumask_t vector_allocation_domain(int cpu) 45 + static void vector_allocation_domain(int cpu, cpumask_t *retmask) 46 46 { 47 - return cpumask_of_cpu(cpu); 47 + cpus_clear(*retmask); 48 + cpu_set(cpu, *retmask); 48 49 } 49 50 50 51 static int probe_bigsmp(void)
+2 -3
arch/x86/mach-generic/es7000.c
··· 87 87 } 88 88 #endif 89 89 90 - static cpumask_t vector_allocation_domain(int cpu) 90 + static void vector_allocation_domain(int cpu, cpumask_t *retmask) 91 91 { 92 92 /* Careful. Some cpus do not strictly honor the set of cpus 93 93 * specified in the interrupt destination when using lowest ··· 97 97 * deliver interrupts to the wrong hyperthread when only one 98 98 * hyperthread was specified in the interrupt desitination. 99 99 */ 100 - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 101 - return domain; 100 + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; 102 101 } 103 102 104 103 struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
+2 -3
arch/x86/mach-generic/numaq.c
··· 38 38 return 0; 39 39 } 40 40 41 - static cpumask_t vector_allocation_domain(int cpu) 41 + static void vector_allocation_domain(int cpu, cpumask_t *retmask) 42 42 { 43 43 /* Careful. Some cpus do not strictly honor the set of cpus 44 44 * specified in the interrupt destination when using lowest ··· 48 48 * deliver interrupts to the wrong hyperthread when only one 49 49 * hyperthread was specified in the interrupt desitination. 50 50 */ 51 - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 52 - return domain; 51 + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; 53 52 } 54 53 55 54 struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
+2 -3
arch/x86/mach-generic/summit.c
··· 24 24 return 0; 25 25 } 26 26 27 - static cpumask_t vector_allocation_domain(int cpu) 27 + static void vector_allocation_domain(int cpu, cpumask_t *retmask) 28 28 { 29 29 /* Careful. Some cpus do not strictly honor the set of cpus 30 30 * specified in the interrupt destination when using lowest ··· 34 34 * deliver interrupts to the wrong hyperthread when only one 35 35 * hyperthread was specified in the interrupt desitination. 36 36 */ 37 - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 38 - return domain; 37 + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; 39 38 } 40 39 41 40 struct genapic apic_summit = APIC_INIT("summit", probe_summit);
+1 -8
arch/x86/mach-voyager/voyager_smp.c
··· 63 63 /* Used for the invalidate map that's also checked in the spinlock */ 64 64 static volatile unsigned long smp_invalidate_needed; 65 65 66 - /* Bitmask of currently online CPUs - used by setup.c for 67 - /proc/cpuinfo, visible externally but still physical */ 68 - cpumask_t cpu_online_map = CPU_MASK_NONE; 69 - EXPORT_SYMBOL(cpu_online_map); 70 - 71 66 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used 72 67 * by scheduler but indexed physically */ 73 68 cpumask_t phys_cpu_present_map = CPU_MASK_NONE; ··· 213 218 /* This is for the new dynamic CPU boot code */ 214 219 cpumask_t cpu_callin_map = CPU_MASK_NONE; 215 220 cpumask_t cpu_callout_map = CPU_MASK_NONE; 216 - cpumask_t cpu_possible_map = CPU_MASK_NONE; 217 - EXPORT_SYMBOL(cpu_possible_map); 218 221 219 222 /* The per processor IRQ masks (these are usually kept in sync) */ 220 223 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; ··· 672 679 673 680 /* loop over all the extended VIC CPUs and boot them. The 674 681 * Quad CPUs must be bootstrapped by their extended VIC cpu */ 675 - for (i = 0; i < NR_CPUS; i++) { 682 + for (i = 0; i < nr_cpu_ids; i++) { 676 683 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) 677 684 continue; 678 685 do_boot_cpu(i);
+2 -2
arch/x86/mm/numa_64.c
··· 278 278 int rr, i; 279 279 280 280 rr = first_node(node_online_map); 281 - for (i = 0; i < NR_CPUS; i++) { 281 + for (i = 0; i < nr_cpu_ids; i++) { 282 282 if (early_cpu_to_node(i) != NUMA_NO_NODE) 283 283 continue; 284 284 numa_set_node(i, rr); ··· 549 549 memnodemap[0] = 0; 550 550 node_set_online(0); 551 551 node_set(0, node_possible_map); 552 - for (i = 0; i < NR_CPUS; i++) 552 + for (i = 0; i < nr_cpu_ids; i++) 553 553 numa_set_node(i, 0); 554 554 e820_register_active_regions(0, start_pfn, last_pfn); 555 555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
+1 -1
arch/x86/mm/srat_64.c
··· 382 382 if (!node_online(i)) 383 383 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 384 384 385 - for (i = 0; i < NR_CPUS; i++) { 385 + for (i = 0; i < nr_cpu_ids; i++) { 386 386 int node = early_cpu_to_node(i); 387 387 388 388 if (node == NUMA_NO_NODE)
+15 -5
arch/x86/xen/mmu.c
··· 1082 1082 1083 1083 static void xen_drop_mm_ref(struct mm_struct *mm) 1084 1084 { 1085 - cpumask_t mask; 1085 + cpumask_var_t mask; 1086 1086 unsigned cpu; 1087 1087 1088 1088 if (current->active_mm == mm) { ··· 1094 1094 } 1095 1095 1096 1096 /* Get the "official" set of cpus referring to our pagetable. */ 1097 - mask = mm->cpu_vm_mask; 1097 + if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { 1098 + for_each_online_cpu(cpu) { 1099 + if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) 1100 + && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) 1101 + continue; 1102 + smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); 1103 + } 1104 + return; 1105 + } 1106 + cpumask_copy(mask, &mm->cpu_vm_mask); 1098 1107 1099 1108 /* It's possible that a vcpu may have a stale reference to our 1100 1109 cr3, because its in lazy mode, and it hasn't yet flushed ··· 1112 1103 if needed. */ 1113 1104 for_each_online_cpu(cpu) { 1114 1105 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) 1115 - cpu_set(cpu, mask); 1106 + cpumask_set_cpu(cpu, mask); 1116 1107 } 1117 1108 1118 - if (!cpus_empty(mask)) 1119 - smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); 1109 + if (!cpumask_empty(mask)) 1110 + smp_call_function_many(mask, drop_other_mm_ref, mm, 1); 1111 + free_cpumask_var(mask); 1120 1112 } 1121 1113 #else 1122 1114 static void xen_drop_mm_ref(struct mm_struct *mm)
+15 -12
arch/x86/xen/smp.c
··· 33 33 #include "xen-ops.h" 34 34 #include "mmu.h" 35 35 36 - cpumask_t xen_cpu_initialized_map; 36 + cpumask_var_t xen_cpu_initialized_map; 37 37 38 38 static DEFINE_PER_CPU(int, resched_irq); 39 39 static DEFINE_PER_CPU(int, callfunc_irq); ··· 158 158 { 159 159 int i, rc; 160 160 161 - for (i = 0; i < NR_CPUS; i++) { 161 + for (i = 0; i < nr_cpu_ids; i++) { 162 162 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 163 163 if (rc >= 0) { 164 164 num_processors++; ··· 192 192 if (xen_smp_intr_init(0)) 193 193 BUG(); 194 194 195 - xen_cpu_initialized_map = cpumask_of_cpu(0); 195 + if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) 196 + panic("could not allocate xen_cpu_initialized_map\n"); 197 + 198 + cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); 196 199 197 200 /* Restrict the possible_map according to max_cpus. */ 198 201 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 199 - for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) 202 + for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) 200 203 continue; 201 204 cpu_clear(cpu, cpu_possible_map); 202 205 } ··· 224 221 struct vcpu_guest_context *ctxt; 225 222 struct desc_struct *gdt; 226 223 227 - if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) 224 + if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) 228 225 return 0; 229 226 230 227 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); ··· 411 408 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 412 409 } 413 410 414 - static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) 411 + static void xen_send_IPI_mask(const struct cpumask *mask, 412 + enum ipi_vector vector) 415 413 { 416 414 unsigned cpu; 417 415 418 - cpus_and(mask, mask, cpu_online_map); 419 - 420 - for_each_cpu_mask_nr(cpu, mask) 416 + for_each_cpu_and(cpu, mask, cpu_online_mask) 421 417 xen_send_IPI_one(cpu, vector); 422 418 } 423 419 424 - static void xen_smp_send_call_function_ipi(cpumask_t mask) 420 + static void xen_smp_send_call_function_ipi(const struct cpumask *mask) 425 421 { 426 422 int cpu; 427 423 428 424 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 429 425 430 426 /* Make sure other vcpus get a chance to run if they need to. */ 431 - for_each_cpu_mask_nr(cpu, mask) { 427 + for_each_cpu(cpu, mask) { 432 428 if (xen_vcpu_stolen(cpu)) { 433 429 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 434 430 break; ··· 437 435 438 436 static void xen_smp_send_call_function_single_ipi(int cpu) 439 437 { 440 - xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); 438 + xen_send_IPI_mask(cpumask_of(cpu), 439 + XEN_CALL_FUNCTION_SINGLE_VECTOR); 441 440 } 442 441 443 442 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
+2 -1
arch/x86/xen/suspend.c
··· 35 35 pfn_to_mfn(xen_start_info->console.domU.mfn); 36 36 } else { 37 37 #ifdef CONFIG_SMP 38 - xen_cpu_initialized_map = cpu_online_map; 38 + BUG_ON(xen_cpu_initialized_map == NULL); 39 + cpumask_copy(xen_cpu_initialized_map, cpu_online_mask); 39 40 #endif 40 41 xen_vcpu_restore(); 41 42 }
+1 -1
arch/x86/xen/time.c
··· 437 437 evt = &per_cpu(xen_clock_events, cpu); 438 438 memcpy(evt, xen_clockevent, sizeof(*evt)); 439 439 440 - evt->cpumask = cpumask_of_cpu(cpu); 440 + evt->cpumask = cpumask_of(cpu); 441 441 evt->irq = irq; 442 442 443 443 setup_runstate_info(cpu);
+1 -1
arch/x86/xen/xen-ops.h
··· 58 58 __cpuinit void xen_init_lock_cpu(int cpu); 59 59 void xen_uninit_lock_cpu(int cpu); 60 60 61 - extern cpumask_t xen_cpu_initialized_map; 61 + extern cpumask_var_t xen_cpu_initialized_map; 62 62 #else 63 63 static inline void xen_smp_init(void) {} 64 64 #endif
+1 -1
drivers/base/cpu.c
··· 109 109 */ 110 110 static ssize_t print_cpus_map(char *buf, cpumask_t *map) 111 111 { 112 - int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *map); 112 + int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); 113 113 114 114 buf[n++] = '\n'; 115 115 buf[n] = '\0';
+2 -2
drivers/base/node.c
··· 30 30 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); 31 31 32 32 len = type? 33 - cpulist_scnprintf(buf, PAGE_SIZE-2, *mask): 34 - cpumask_scnprintf(buf, PAGE_SIZE-2, *mask); 33 + cpulist_scnprintf(buf, PAGE_SIZE-2, mask) : 34 + cpumask_scnprintf(buf, PAGE_SIZE-2, mask); 35 35 buf[len++] = '\n'; 36 36 buf[len] = '\0'; 37 37 return len;
+2 -2
drivers/base/topology.c
··· 49 49 50 50 if (len > 1) { 51 51 n = type? 52 - cpulist_scnprintf(buf, len-2, *mask): 53 - cpumask_scnprintf(buf, len-2, *mask); 52 + cpulist_scnprintf(buf, len-2, mask) : 53 + cpumask_scnprintf(buf, len-2, mask); 54 54 buf[n++] = '\n'; 55 55 buf[n] = '\0'; 56 56 }
+1 -1
drivers/clocksource/tcb_clksrc.c
··· 154 154 .shift = 32, 155 155 /* Should be lower than at91rm9200's system timer */ 156 156 .rating = 125, 157 - .cpumask = CPU_MASK_CPU0, 158 157 .set_next_event = tc_next_event, 159 158 .set_mode = tc_mode, 160 159 }, ··· 194 195 clkevt.clkevt.max_delta_ns 195 196 = clockevent_delta2ns(0xffff, &clkevt.clkevt); 196 197 clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; 198 + clkevt.clkevt.cpumask = cpumask_of(0); 197 199 198 200 setup_irq(irq, &tc_irqaction); 199 201
+9 -4
drivers/lguest/interrupts_and_traps.c
··· 222 222 int init_interrupts(void) 223 223 { 224 224 /* If they want some strange system call vector, reserve it now */ 225 - if (syscall_vector != SYSCALL_VECTOR 226 - && test_and_set_bit(syscall_vector, used_vectors)) { 227 - printk("lg: couldn't reserve syscall %u\n", syscall_vector); 228 - return -EBUSY; 225 + if (syscall_vector != SYSCALL_VECTOR) { 226 + if (test_bit(syscall_vector, used_vectors) || 227 + vector_used_by_percpu_irq(syscall_vector)) { 228 + printk(KERN_ERR "lg: couldn't reserve syscall %u\n", 229 + syscall_vector); 230 + return -EBUSY; 231 + } 232 + set_bit(syscall_vector, used_vectors); 229 233 } 234 + 230 235 return 0; 231 236 } 232 237
+4 -3
drivers/parisc/iosapic.c
··· 704 704 } 705 705 706 706 #ifdef CONFIG_SMP 707 - static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest) 707 + static void iosapic_set_affinity_irq(unsigned int irq, 708 + const struct cpumask *dest) 708 709 { 709 710 struct vector_info *vi = iosapic_get_vector(irq); 710 711 u32 d0, d1, dummy_d0; 711 712 unsigned long flags; 712 713 713 - if (cpu_check_affinity(irq, &dest)) 714 + if (cpu_check_affinity(irq, dest)) 714 715 return; 715 716 716 - vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest)); 717 + vi->txn_addr = txn_affinity_addr(irq, cpumask_first(dest)); 717 718 718 719 spin_lock_irqsave(&iosapic_lock, flags); 719 720 /* d1 contains the destination CPU, so only want to set that
+2 -2
drivers/pci/pci-sysfs.c
··· 74 74 int len; 75 75 76 76 mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); 77 - len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); 77 + len = cpumask_scnprintf(buf, PAGE_SIZE-2, &mask); 78 78 buf[len++] = '\n'; 79 79 buf[len] = '\0'; 80 80 return len; ··· 88 88 int len; 89 89 90 90 mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); 91 - len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); 91 + len = cpulist_scnprintf(buf, PAGE_SIZE-2, &mask); 92 92 buf[len++] = '\n'; 93 93 buf[len] = '\0'; 94 94 return len;
+2 -2
drivers/pci/probe.c
··· 55 55 56 56 cpumask = pcibus_to_cpumask(to_pci_bus(dev)); 57 57 ret = type? 58 - cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask): 59 - cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask); 58 + cpulist_scnprintf(buf, PAGE_SIZE-2, &cpumask) : 59 + cpumask_scnprintf(buf, PAGE_SIZE-2, &cpumask); 60 60 buf[ret++] = '\n'; 61 61 buf[ret] = '\0'; 62 62 return ret;
+3 -3
drivers/xen/events.c
··· 585 585 spin_unlock(&irq_mapping_update_lock); 586 586 587 587 /* new event channels are always bound to cpu 0 */ 588 - irq_set_affinity(irq, cpumask_of_cpu(0)); 588 + irq_set_affinity(irq, cpumask_of(0)); 589 589 590 590 /* Unmask the event channel. */ 591 591 enable_irq(irq); ··· 614 614 } 615 615 616 616 617 - static void set_affinity_irq(unsigned irq, cpumask_t dest) 617 + static void set_affinity_irq(unsigned irq, const struct cpumask *dest) 618 618 { 619 - unsigned tcpu = first_cpu(dest); 619 + unsigned tcpu = cpumask_first(dest); 620 620 rebind_irq_to_cpu(irq, tcpu); 621 621 } 622 622
+13 -1
include/asm-generic/topology.h
··· 40 40 #ifndef node_to_cpumask 41 41 #define node_to_cpumask(node) ((void)node, cpu_online_map) 42 42 #endif 43 + #ifndef cpumask_of_node 44 + #define cpumask_of_node(node) ((void)node, cpu_online_mask) 45 + #endif 43 46 #ifndef node_to_first_cpu 44 47 #define node_to_first_cpu(node) ((void)(node),0) 45 48 #endif ··· 57 54 ) 58 55 #endif 59 56 57 + #ifndef cpumask_of_pcibus 58 + #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 59 + cpu_all_mask : \ 60 + cpumask_of_node(pcibus_to_node(bus))) 61 + #endif 62 + 60 63 #endif /* CONFIG_NUMA */ 61 64 62 - /* returns pointer to cpumask for specified node */ 65 + /* 66 + * returns pointer to cpumask for specified node 67 + * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 68 + */ 63 69 #ifndef node_to_cpumask_ptr 64 70 65 71 #define node_to_cpumask_ptr(v, node) \
-2
include/asm-m32r/smp.h
··· 63 63 #define raw_smp_processor_id() (current_thread_info()->cpu) 64 64 65 65 extern cpumask_t cpu_callout_map; 66 - extern cpumask_t cpu_possible_map; 67 - extern cpumask_t cpu_present_map; 68 66 69 67 static __inline__ int hard_smp_processor_id(void) 70 68 {
+2 -2
include/linux/clockchips.h
··· 82 82 int shift; 83 83 int rating; 84 84 int irq; 85 - cpumask_t cpumask; 85 + const struct cpumask *cpumask; 86 86 int (*set_next_event)(unsigned long evt, 87 87 struct clock_event_device *); 88 88 void (*set_mode)(enum clock_event_mode mode, 89 89 struct clock_event_device *); 90 90 void (*event_handler)(struct clock_event_device *); 91 - void (*broadcast)(cpumask_t mask); 91 + void (*broadcast)(const struct cpumask *mask); 92 92 struct list_head list; 93 93 enum clock_event_mode mode; 94 94 ktime_t next_event;
+65 -35
include/linux/cpumask.h
··· 339 339 #endif 340 340 #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) 341 341 342 - #define cpumask_scnprintf(buf, len, src) \ 343 - __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) 344 - static inline int __cpumask_scnprintf(char *buf, int len, 345 - const cpumask_t *srcp, int nbits) 346 - { 347 - return bitmap_scnprintf(buf, len, srcp->bits, nbits); 348 - } 349 - 350 - #define cpumask_parse_user(ubuf, ulen, dst) \ 351 - __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) 352 - static inline int __cpumask_parse_user(const char __user *buf, int len, 353 - cpumask_t *dstp, int nbits) 354 - { 355 - return bitmap_parse_user(buf, len, dstp->bits, nbits); 356 - } 357 - 358 - #define cpulist_scnprintf(buf, len, src) \ 359 - __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) 360 - static inline int __cpulist_scnprintf(char *buf, int len, 361 - const cpumask_t *srcp, int nbits) 362 - { 363 - return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); 364 - } 365 - 366 - #define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) 367 - static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) 368 - { 369 - return bitmap_parselist(buf, dstp->bits, nbits); 370 - } 371 - 372 342 #define cpu_remap(oldbit, old, new) \ 373 343 __cpu_remap((oldbit), &(old), &(new), NR_CPUS) 374 344 static inline int __cpu_remap(int oldbit, ··· 510 540 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 511 541 } 512 542 513 - /* This produces more efficient code. */ 514 - #define nr_cpumask_bits NR_CPUS 515 - 516 543 #else /* NR_CPUS > BITS_PER_LONG */ 517 544 518 545 #define CPU_BITS_ALL \ ··· 517 550 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 518 551 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 519 552 } 520 - 521 - #define nr_cpumask_bits nr_cpu_ids 522 553 #endif /* NR_CPUS > BITS_PER_LONG */ 554 + 555 + #ifdef CONFIG_CPUMASK_OFFSTACK 556 + /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, 557 + * not all bits may be allocated. */ 558 + #define nr_cpumask_bits nr_cpu_ids 559 + #else 560 + #define nr_cpumask_bits NR_CPUS 561 + #endif 523 562 524 563 /* verify cpu argument to cpumask_* operators */ 525 564 static inline unsigned int cpumask_check(unsigned int cpu) ··· 917 944 * @cpu: the cpu (<= nr_cpu_ids) 918 945 */ 919 946 #define cpumask_of(cpu) (get_cpu_mask(cpu)) 947 + 948 + /** 949 + * cpumask_scnprintf - print a cpumask into a string as comma-separated hex 950 + * @buf: the buffer to sprintf into 951 + * @len: the length of the buffer 952 + * @srcp: the cpumask to print 953 + * 954 + * If len is zero, returns zero. Otherwise returns the length of the 955 + * (nul-terminated) @buf string. 956 + */ 957 + static inline int cpumask_scnprintf(char *buf, int len, 958 + const struct cpumask *srcp) 959 + { 960 + return bitmap_scnprintf(buf, len, srcp->bits, nr_cpumask_bits); 961 + } 962 + 963 + /** 964 + * cpumask_parse_user - extract a cpumask from a user string 965 + * @buf: the buffer to extract from 966 + * @len: the length of the buffer 967 + * @dstp: the cpumask to set. 968 + * 969 + * Returns -errno, or 0 for success. 970 + */ 971 + static inline int cpumask_parse_user(const char __user *buf, int len, 972 + struct cpumask *dstp) 973 + { 974 + return bitmap_parse_user(buf, len, dstp->bits, nr_cpumask_bits); 975 + } 976 + 977 + /** 978 + * cpulist_scnprintf - print a cpumask into a string as comma-separated list 979 + * @buf: the buffer to sprintf into 980 + * @len: the length of the buffer 981 + * @srcp: the cpumask to print 982 + * 983 + * If len is zero, returns zero. Otherwise returns the length of the 984 + * (nul-terminated) @buf string. 985 + */ 986 + static inline int cpulist_scnprintf(char *buf, int len, 987 + const struct cpumask *srcp) 988 + { 989 + return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpumask_bits); 990 + } 991 + 992 + /** 993 + * cpulist_parse_user - extract a cpumask from a user string of ranges 994 + * @buf: the buffer to extract from 995 + * @len: the length of the buffer 996 + * @dstp: the cpumask to set. 997 + * 998 + * Returns -errno, or 0 for success. 999 + */ 1000 + static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 1001 + { 1002 + return bitmap_parselist(buf, dstp->bits, nr_cpumask_bits); 1003 + } 920 1004 921 1005 /** 922 1006 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+2 -2
include/linux/interrupt.h
··· 111 111 112 112 extern cpumask_t irq_default_affinity; 113 113 114 - extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); 114 + extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); 115 115 extern int irq_can_set_affinity(unsigned int irq); 116 116 extern int irq_select_affinity(unsigned int irq); 117 117 118 118 #else /* CONFIG_SMP */ 119 119 120 - static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) 120 + static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 121 121 { 122 122 return -EINVAL; 123 123 }
+2 -1
include/linux/irq.h
··· 113 113 void (*eoi)(unsigned int irq); 114 114 115 115 void (*end)(unsigned int irq); 116 - void (*set_affinity)(unsigned int irq, cpumask_t dest); 116 + void (*set_affinity)(unsigned int irq, 117 + const struct cpumask *dest); 117 118 int (*retrigger)(unsigned int irq); 118 119 int (*set_type)(unsigned int irq, unsigned int flow_type); 119 120 int (*set_wake)(unsigned int irq, unsigned int on);
+72 -20
include/linux/sched.h
··· 250 250 extern int runqueue_is_locked(void); 251 251 extern void task_rq_unlock_wait(struct task_struct *p); 252 252 253 - extern cpumask_t nohz_cpu_mask; 253 + extern cpumask_var_t nohz_cpu_mask; 254 254 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 255 255 extern int select_nohz_load_balancer(int cpu); 256 256 #else ··· 758 758 #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 759 759 #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 760 760 761 - #define BALANCE_FOR_MC_POWER \ 762 - (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 761 + enum powersavings_balance_level { 762 + POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 763 + POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package 764 + * first for long running threads 765 + */ 766 + POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle 767 + * cpu package for power savings 768 + */ 769 + MAX_POWERSAVINGS_BALANCE_LEVELS 770 + }; 763 771 764 - #define BALANCE_FOR_PKG_POWER \ 765 - ((sched_mc_power_savings || sched_smt_power_savings) ? \ 766 - SD_POWERSAVINGS_BALANCE : 0) 772 + extern int sched_mc_power_savings, sched_smt_power_savings; 767 773 768 - #define test_sd_parent(sd, flag) ((sd->parent && \ 769 - (sd->parent->flags & flag)) ? 1 : 0) 774 + static inline int sd_balance_for_mc_power(void) 775 + { 776 + if (sched_smt_power_savings) 777 + return SD_POWERSAVINGS_BALANCE; 770 778 779 + return 0; 780 + } 781 + 782 + static inline int sd_balance_for_package_power(void) 783 + { 784 + if (sched_mc_power_savings | sched_smt_power_savings) 785 + return SD_POWERSAVINGS_BALANCE; 786 + 787 + return 0; 788 + } 789 + 790 + /* 791 + * Optimise SD flags for power savings: 792 + * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. 793 + * Keep default SD flags if sched_{smt,mc}_power_saving=0 794 + */ 795 + 796 + static inline int sd_power_saving_flags(void) 797 + { 798 + if (sched_mc_power_savings | sched_smt_power_savings) 799 + return SD_BALANCE_NEWIDLE; 800 + 801 + return 0; 802 + } 771 803 772 804 struct sched_group { 773 805 struct sched_group *next; /* Must be a circular list */ 774 - cpumask_t cpumask; 775 806 776 807 /* 777 808 * CPU power of this group, SCHED_LOAD_SCALE being max power for a ··· 815 784 * (see include/linux/reciprocal_div.h) 816 785 */ 817 786 u32 reciprocal_cpu_power; 787 + 788 + unsigned long cpumask[]; 818 789 }; 790 + 791 + static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 792 + { 793 + return to_cpumask(sg->cpumask); 794 + } 819 795 820 796 enum sched_domain_level { 821 797 SD_LV_NONE = 0, ··· 847 809 struct sched_domain *parent; /* top domain must be null terminated */ 848 810 struct sched_domain *child; /* bottom domain must be null terminated */ 849 811 struct sched_group *groups; /* the balancing groups of the domain */ 850 - cpumask_t span; /* span of all CPUs in this domain */ 851 812 unsigned long min_interval; /* Minimum balance interval ms */ 852 813 unsigned long max_interval; /* Maximum balance interval ms */ 853 814 unsigned int busy_factor; /* less balancing by factor if busy */ ··· 901 864 #ifdef CONFIG_SCHED_DEBUG 902 865 char *name; 903 866 #endif 867 + 868 + /* span of all CPUs in this domain */ 869 + unsigned long span[]; 904 870 }; 905 871 906 - extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 872 + static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 873 + { 874 + return to_cpumask(sd->span); 875 + } 876 + 877 + extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, 907 878 struct sched_domain_attr *dattr_new); 908 879 extern int arch_reinit_sched_domains(void); 880 + 881 + /* Test a flag in parent sched domain */ 882 + static inline int test_sd_parent(struct sched_domain *sd, int flag) 883 + { 884 + if (sd->parent && (sd->parent->flags & flag)) 885 + return 1; 886 + 887 + return 0; 888 + } 909 889 910 890 #else /* CONFIG_SMP */ 911 891 912 892 struct sched_domain_attr; 913 893 914 894 static inline void 915 - partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 895 + partition_sched_domains(int ndoms_new, struct cpumask *doms_new, 916 896 struct sched_domain_attr *dattr_new) 917 897 { 918 898 } ··· 980 926 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 981 927 982 928 void (*set_cpus_allowed)(struct task_struct *p, 983 - const cpumask_t *newmask); 929 + const struct cpumask *newmask); 984 930 985 931 void (*rq_online)(struct rq *rq); 986 932 void (*rq_offline)(struct rq *rq); ··· 1633 1579 1634 1580 #ifdef CONFIG_SMP 1635 1581 extern int set_cpus_allowed_ptr(struct task_struct *p, 1636 - const cpumask_t *new_mask); 1582 + const struct cpumask *new_mask); 1637 1583 #else 1638 1584 static inline int set_cpus_allowed_ptr(struct task_struct *p, 1639 - const cpumask_t *new_mask) 1585 + const struct cpumask *new_mask) 1640 1586 { 1641 - if (!cpu_isset(0, *new_mask)) 1587 + if (!cpumask_test_cpu(0, new_mask)) 1642 1588 return -EINVAL; 1643 1589 return 0; 1644 1590 } ··· 2249 2195 } 2250 2196 #endif 2251 2197 2252 - extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); 2253 - extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2254 - 2255 - extern int sched_mc_power_savings, sched_smt_power_savings; 2198 + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2199 + extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2256 2200 2257 2201 extern void normalize_rt_tasks(void); 2258 2202
+4 -2
include/linux/topology.h
··· 125 125 | SD_WAKE_AFFINE \ 126 126 | SD_WAKE_BALANCE \ 127 127 | SD_SHARE_PKG_RESOURCES\ 128 - | BALANCE_FOR_MC_POWER, \ 128 + | sd_balance_for_mc_power()\ 129 + | sd_power_saving_flags(),\ 129 130 .last_balance = jiffies, \ 130 131 .balance_interval = 1, \ 131 132 } ··· 151 150 | SD_BALANCE_FORK \ 152 151 | SD_WAKE_AFFINE \ 153 152 | SD_WAKE_BALANCE \ 154 - | BALANCE_FOR_PKG_POWER,\ 153 + | sd_balance_for_package_power()\ 154 + | sd_power_saving_flags(),\ 155 155 .last_balance = jiffies, \ 156 156 .balance_interval = 1, \ 157 157 }
+9
init/Kconfig
··· 924 924 925 925 endif # MODULES 926 926 927 + config INIT_ALL_POSSIBLE 928 + bool 929 + help 930 + Back when each arch used to define their own cpu_online_map and 931 + cpu_possible_map, some of them chose to initialize cpu_possible_map 932 + with all 1s, and others with all 0s. When they were centralised, 933 + it was better to provide this option than to break all the archs 934 + and have several arch maintainers persuing me down dark alleys. 935 + 927 936 config STOP_MACHINE 928 937 bool 929 938 default y
+6 -5
kernel/cpu.c
··· 24 24 cpumask_t cpu_present_map __read_mostly; 25 25 EXPORT_SYMBOL(cpu_present_map); 26 26 27 - #ifndef CONFIG_SMP 28 - 29 27 /* 30 28 * Represents all cpu's that are currently online. 31 29 */ 32 - cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL; 30 + cpumask_t cpu_online_map __read_mostly; 33 31 EXPORT_SYMBOL(cpu_online_map); 34 32 33 + #ifdef CONFIG_INIT_ALL_POSSIBLE 35 34 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; 35 + #else 36 + cpumask_t cpu_possible_map __read_mostly; 37 + #endif 36 38 EXPORT_SYMBOL(cpu_possible_map); 37 39 38 - #else /* CONFIG_SMP */ 39 - 40 + #ifdef CONFIG_SMP 40 41 /* Serializes the updates to cpu_online_map, cpu_present_map */ 41 42 static DEFINE_MUTEX(cpu_add_remove_lock); 42 43
+2 -2
kernel/cpuset.c
··· 896 896 if (!*buf) { 897 897 cpus_clear(trialcs.cpus_allowed); 898 898 } else { 899 - retval = cpulist_parse(buf, trialcs.cpus_allowed); 899 + retval = cpulist_parse(buf, &trialcs.cpus_allowed); 900 900 if (retval < 0) 901 901 return retval; 902 902 ··· 1482 1482 mask = cs->cpus_allowed; 1483 1483 mutex_unlock(&callback_mutex); 1484 1484 1485 - return cpulist_scnprintf(page, PAGE_SIZE, mask); 1485 + return cpulist_scnprintf(page, PAGE_SIZE, &mask); 1486 1486 } 1487 1487 1488 1488 static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
+1 -1
kernel/irq/chip.c
··· 46 46 desc->irq_count = 0; 47 47 desc->irqs_unhandled = 0; 48 48 #ifdef CONFIG_SMP 49 - cpus_setall(desc->affinity); 49 + cpumask_setall(&desc->affinity); 50 50 #endif 51 51 spin_unlock_irqrestore(&desc->lock, flags); 52 52 }
+10 -12
kernel/irq/manage.c
··· 79 79 * @cpumask: cpumask 80 80 * 81 81 */ 82 - int irq_set_affinity(unsigned int irq, cpumask_t cpumask) 82 + int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 83 83 { 84 84 struct irq_desc *desc = irq_to_desc(irq); 85 85 unsigned long flags; ··· 91 91 92 92 #ifdef CONFIG_GENERIC_PENDING_IRQ 93 93 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 94 - desc->affinity = cpumask; 94 + cpumask_copy(&desc->affinity, cpumask); 95 95 desc->chip->set_affinity(irq, cpumask); 96 96 } else { 97 97 desc->status |= IRQ_MOVE_PENDING; 98 - desc->pending_mask = cpumask; 98 + cpumask_copy(&desc->pending_mask, cpumask); 99 99 } 100 100 #else 101 - desc->affinity = cpumask; 101 + cpumask_copy(&desc->affinity, cpumask); 102 102 desc->chip->set_affinity(irq, cpumask); 103 103 #endif 104 104 desc->status |= IRQ_AFFINITY_SET; ··· 112 112 */ 113 113 int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 114 114 { 115 - cpumask_t mask; 116 - 117 115 if (!irq_can_set_affinity(irq)) 118 116 return 0; 119 - 120 - cpus_and(mask, cpu_online_map, irq_default_affinity); 121 117 122 118 /* 123 119 * Preserve an userspace affinity setup, but make sure that 124 120 * one of the targets is online. 125 121 */ 126 122 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 127 - if (cpus_intersects(desc->affinity, cpu_online_map)) 128 - mask = desc->affinity; 123 + if (cpumask_any_and(&desc->affinity, cpu_online_mask) 124 + < nr_cpu_ids) 125 + goto set_affinity; 129 126 else 130 127 desc->status &= ~IRQ_AFFINITY_SET; 131 128 } 132 129 133 - desc->affinity = mask; 134 - desc->chip->set_affinity(irq, mask); 130 + cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); 131 + set_affinity: 132 + desc->chip->set_affinity(irq, &desc->affinity); 135 133 136 134 return 0; 137 135 }
+7 -7
kernel/irq/migration.c
··· 4 4 void move_masked_irq(int irq) 5 5 { 6 6 struct irq_desc *desc = irq_to_desc(irq); 7 - cpumask_t tmp; 8 7 9 8 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 10 9 return; ··· 18 19 19 20 desc->status &= ~IRQ_MOVE_PENDING; 20 21 21 - if (unlikely(cpus_empty(desc->pending_mask))) 22 + if (unlikely(cpumask_empty(&desc->pending_mask))) 22 23 return; 23 24 24 25 if (!desc->chip->set_affinity) 25 26 return; 26 27 27 28 assert_spin_locked(&desc->lock); 28 - 29 - cpus_and(tmp, desc->pending_mask, cpu_online_map); 30 29 31 30 /* 32 31 * If there was a valid mask to work with, please ··· 38 41 * For correct operation this depends on the caller 39 42 * masking the irqs. 40 43 */ 41 - if (likely(!cpus_empty(tmp))) { 42 - desc->chip->set_affinity(irq,tmp); 44 + if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) 45 + < nr_cpu_ids)) { 46 + cpumask_and(&desc->affinity, 47 + &desc->pending_mask, cpu_online_mask); 48 + desc->chip->set_affinity(irq, &desc->affinity); 43 49 } 44 - cpus_clear(desc->pending_mask); 50 + cpumask_clear(&desc->pending_mask); 45 51 } 46 52 47 53 void move_native_irq(int irq)
+19 -10
kernel/irq/proc.c
··· 40 40 const char __user *buffer, size_t count, loff_t *pos) 41 41 { 42 42 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; 43 - cpumask_t new_value; 43 + cpumask_var_t new_value; 44 44 int err; 45 45 46 46 if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || 47 47 irq_balancing_disabled(irq)) 48 48 return -EIO; 49 49 50 + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 51 + return -ENOMEM; 52 + 50 53 err = cpumask_parse_user(buffer, count, new_value); 51 54 if (err) 52 - return err; 55 + goto free_cpumask; 53 56 54 - if (!is_affinity_mask_valid(new_value)) 55 - return -EINVAL; 57 + if (!is_affinity_mask_valid(*new_value)) { 58 + err = -EINVAL; 59 + goto free_cpumask; 60 + } 56 61 57 62 /* 58 63 * Do not allow disabling IRQs completely - it's a too easy 59 64 * way to make the system unusable accidentally :-) At least 60 65 * one online CPU still has to be targeted. 61 66 */ 62 - if (!cpus_intersects(new_value, cpu_online_map)) 67 + if (!cpumask_intersects(new_value, cpu_online_mask)) { 63 68 /* Special case for empty set - allow the architecture 64 69 code to set default SMP affinity. */ 65 - return irq_select_affinity_usr(irq) ? -EINVAL : count; 70 + err = irq_select_affinity_usr(irq) ? -EINVAL : count; 71 + } else { 72 + irq_set_affinity(irq, new_value); 73 + err = count; 74 + } 66 75 67 - irq_set_affinity(irq, new_value); 68 - 69 - return count; 76 + free_cpumask: 77 + free_cpumask_var(new_value); 78 + return err; 70 79 } 71 80 72 81 static int irq_affinity_proc_open(struct inode *inode, struct file *file) ··· 104 95 cpumask_t new_value; 105 96 int err; 106 97 107 - err = cpumask_parse_user(buffer, count, new_value); 98 + err = cpumask_parse_user(buffer, count, &new_value); 108 99 if (err) 109 100 return err; 110 101
+2 -2
kernel/profile.c
··· 442 442 static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, 443 443 int count, int *eof, void *data) 444 444 { 445 - int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); 445 + int len = cpumask_scnprintf(page, count, (cpumask_t *)data); 446 446 if (count - len < 2) 447 447 return -EINVAL; 448 448 len += sprintf(page + len, "\n"); ··· 456 456 unsigned long full_count = count, err; 457 457 cpumask_t new_value; 458 458 459 - err = cpumask_parse_user(buffer, count, new_value); 459 + err = cpumask_parse_user(buffer, count, &new_value); 460 460 if (err) 461 461 return err; 462 462
+1 -1
kernel/rcuclassic.c
··· 393 393 * unnecessarily. 394 394 */ 395 395 smp_mb(); 396 - cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); 396 + cpumask_andnot(&rcp->cpumask, cpu_online_mask, nohz_cpu_mask); 397 397 398 398 rcp->signaled = 0; 399 399 }
+553 -411
kernel/sched.c
··· 498 498 */ 499 499 struct root_domain { 500 500 atomic_t refcount; 501 - cpumask_t span; 502 - cpumask_t online; 501 + cpumask_var_t span; 502 + cpumask_var_t online; 503 503 504 504 /* 505 505 * The "RT overload" flag: it gets set if a CPU has more than 506 506 * one runnable RT task. 507 507 */ 508 - cpumask_t rto_mask; 508 + cpumask_var_t rto_mask; 509 509 atomic_t rto_count; 510 510 #ifdef CONFIG_SMP 511 511 struct cpupri cpupri; 512 + #endif 513 + #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 514 + /* 515 + * Preferred wake up cpu nominated by sched_mc balance that will be 516 + * used when most cpus are idle in the system indicating overall very 517 + * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2) 518 + */ 519 + unsigned int sched_mc_preferred_wakeup_cpu; 512 520 #endif 513 521 }; 514 522 ··· 1522 1514 struct sched_domain *sd = data; 1523 1515 int i; 1524 1516 1525 - for_each_cpu_mask(i, sd->span) { 1517 + for_each_cpu(i, sched_domain_span(sd)) { 1526 1518 /* 1527 1519 * If there are currently no tasks on the cpu pretend there 1528 1520 * is one of average load so that when a new task gets to ··· 1543 1535 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) 1544 1536 shares = tg->shares; 1545 1537 1546 - for_each_cpu_mask(i, sd->span) 1538 + for_each_cpu(i, sched_domain_span(sd)) 1547 1539 update_group_shares_cpu(tg, i, shares, rq_weight); 1548 1540 1549 1541 return 0; ··· 2109 2101 int i; 2110 2102 2111 2103 /* Skip over this group if it has no CPUs allowed */ 2112 - if (!cpus_intersects(group->cpumask, p->cpus_allowed)) 2104 + if (!cpumask_intersects(sched_group_cpus(group), 2105 + &p->cpus_allowed)) 2113 2106 continue; 2114 2107 2115 - local_group = cpu_isset(this_cpu, group->cpumask); 2108 + local_group = cpumask_test_cpu(this_cpu, 2109 + sched_group_cpus(group)); 2116 2110 2117 2111 /* Tally up the load of all CPUs in the group */ 2118 2112 avg_load = 0; 2119 2113 2120 - for_each_cpu_mask_nr(i, group->cpumask) { 2114 + for_each_cpu(i, sched_group_cpus(group)) { 2121 2115 /* Bias balancing toward cpus of our domain */ 2122 2116 if (local_group) 2123 2117 load = source_load(i, load_idx); ··· 2151 2141 * find_idlest_cpu - find the idlest cpu among the cpus in group. 2152 2142 */ 2153 2143 static int 2154 - find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, 2155 - cpumask_t *tmp) 2144 + find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) 2156 2145 { 2157 2146 unsigned long load, min_load = ULONG_MAX; 2158 2147 int idlest = -1; 2159 2148 int i; 2160 2149 2161 2150 /* Traverse only the allowed CPUs */ 2162 - cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2163 - 2164 - for_each_cpu_mask_nr(i, *tmp) { 2151 + for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { 2165 2152 load = weighted_cpuload(i); 2166 2153 2167 2154 if (load < min_load || (load == min_load && i == this_cpu)) { ··· 2200 2193 update_shares(sd); 2201 2194 2202 2195 while (sd) { 2203 - cpumask_t span, tmpmask; 2204 2196 struct sched_group *group; 2205 2197 int new_cpu, weight; 2206 2198 ··· 2208 2202 continue; 2209 2203 } 2210 2204 2211 - span = sd->span; 2212 2205 group = find_idlest_group(sd, t, cpu); 2213 2206 if (!group) { 2214 2207 sd = sd->child; 2215 2208 continue; 2216 2209 } 2217 2210 2218 - new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); 2211 + new_cpu = find_idlest_cpu(group, t, cpu); 2219 2212 if (new_cpu == -1 || new_cpu == cpu) { 2220 2213 /* Now try balancing at a lower domain level of cpu */ 2221 2214 sd = sd->child; ··· 2223 2218 2224 2219 /* Now try balancing at a lower domain level of new_cpu */ 2225 2220 cpu = new_cpu; 2221 + weight = cpumask_weight(sched_domain_span(sd)); 2226 2222 sd = NULL; 2227 - weight = cpus_weight(span); 2228 2223 for_each_domain(cpu, tmp) { 2229 - if (weight <= cpus_weight(tmp->span)) 2224 + if (weight <= cpumask_weight(sched_domain_span(tmp))) 2230 2225 break; 2231 2226 if (tmp->flags & flag) 2232 2227 sd = tmp; ··· 2271 2266 cpu = task_cpu(p); 2272 2267 2273 2268 for_each_domain(this_cpu, sd) { 2274 - if (cpu_isset(cpu, sd->span)) { 2269 + if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2275 2270 update_shares(sd); 2276 2271 break; 2277 2272 } ··· 2320 2315 else { 2321 2316 struct sched_domain *sd; 2322 2317 for_each_domain(this_cpu, sd) { 2323 - if (cpu_isset(cpu, sd->span)) { 2318 + if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2324 2319 schedstat_inc(sd, ttwu_wake_remote); 2325 2320 break; 2326 2321 } ··· 2851 2846 struct rq *rq; 2852 2847 2853 2848 rq = task_rq_lock(p, &flags); 2854 - if (!cpu_isset(dest_cpu, p->cpus_allowed) 2849 + if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) 2855 2850 || unlikely(!cpu_active(dest_cpu))) 2856 2851 goto out; 2857 2852 ··· 2916 2911 * 2) cannot be migrated to this CPU due to cpus_allowed, or 2917 2912 * 3) are cache-hot on their current CPU. 2918 2913 */ 2919 - if (!cpu_isset(this_cpu, p->cpus_allowed)) { 2914 + if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { 2920 2915 schedstat_inc(p, se.nr_failed_migrations_affine); 2921 2916 return 0; 2922 2917 } ··· 3091 3086 static struct sched_group * 3092 3087 find_busiest_group(struct sched_domain *sd, int this_cpu, 3093 3088 unsigned long *imbalance, enum cpu_idle_type idle, 3094 - int *sd_idle, const cpumask_t *cpus, int *balance) 3089 + int *sd_idle, const struct cpumask *cpus, int *balance) 3095 3090 { 3096 3091 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 3097 3092 unsigned long max_load, avg_load, total_load, this_load, total_pwr; ··· 3127 3122 unsigned long sum_avg_load_per_task; 3128 3123 unsigned long avg_load_per_task; 3129 3124 3130 - local_group = cpu_isset(this_cpu, group->cpumask); 3125 + local_group = cpumask_test_cpu(this_cpu, 3126 + sched_group_cpus(group)); 3131 3127 3132 3128 if (local_group) 3133 - balance_cpu = first_cpu(group->cpumask); 3129 + balance_cpu = cpumask_first(sched_group_cpus(group)); 3134 3130 3135 3131 /* Tally up the load of all CPUs in the group */ 3136 3132 sum_weighted_load = sum_nr_running = avg_load = 0; ··· 3140 3134 max_cpu_load = 0; 3141 3135 min_cpu_load = ~0UL; 3142 3136 3143 - for_each_cpu_mask_nr(i, group->cpumask) { 3144 - struct rq *rq; 3145 - 3146 - if (!cpu_isset(i, *cpus)) 3147 - continue; 3148 - 3149 - rq = cpu_rq(i); 3137 + for_each_cpu_and(i, sched_group_cpus(group), cpus) { 3138 + struct rq *rq = cpu_rq(i); 3150 3139 3151 3140 if (*sd_idle && rq->nr_running) 3152 3141 *sd_idle = 0; ··· 3252 3251 */ 3253 3252 if ((sum_nr_running < min_nr_running) || 3254 3253 (sum_nr_running == min_nr_running && 3255 - first_cpu(group->cpumask) < 3256 - first_cpu(group_min->cpumask))) { 3254 + cpumask_first(sched_group_cpus(group)) > 3255 + cpumask_first(sched_group_cpus(group_min)))) { 3257 3256 group_min = group; 3258 3257 min_nr_running = sum_nr_running; 3259 3258 min_load_per_task = sum_weighted_load / ··· 3268 3267 if (sum_nr_running <= group_capacity - 1) { 3269 3268 if (sum_nr_running > leader_nr_running || 3270 3269 (sum_nr_running == leader_nr_running && 3271 - first_cpu(group->cpumask) > 3272 - first_cpu(group_leader->cpumask))) { 3270 + cpumask_first(sched_group_cpus(group)) < 3271 + cpumask_first(sched_group_cpus(group_leader)))) { 3273 3272 group_leader = group; 3274 3273 leader_nr_running = sum_nr_running; 3275 3274 } ··· 3395 3394 3396 3395 if (this == group_leader && group_leader != group_min) { 3397 3396 *imbalance = min_load_per_task; 3397 + if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { 3398 + cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = 3399 + cpumask_first(sched_group_cpus(group_leader)); 3400 + } 3398 3401 return group_min; 3399 3402 } 3400 3403 #endif ··· 3412 3407 */ 3413 3408 static struct rq * 3414 3409 find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, 3415 - unsigned long imbalance, const cpumask_t *cpus) 3410 + unsigned long imbalance, const struct cpumask *cpus) 3416 3411 { 3417 3412 struct rq *busiest = NULL, *rq; 3418 3413 unsigned long max_load = 0; 3419 3414 int i; 3420 3415 3421 - for_each_cpu_mask_nr(i, group->cpumask) { 3416 + for_each_cpu(i, sched_group_cpus(group)) { 3422 3417 unsigned long wl; 3423 3418 3424 - if (!cpu_isset(i, *cpus)) 3419 + if (!cpumask_test_cpu(i, cpus)) 3425 3420 continue; 3426 3421 3427 3422 rq = cpu_rq(i); ··· 3451 3446 */ 3452 3447 static int load_balance(int this_cpu, struct rq *this_rq, 3453 3448 struct sched_domain *sd, enum cpu_idle_type idle, 3454 - int *balance, cpumask_t *cpus) 3449 + int *balance, struct cpumask *cpus) 3455 3450 { 3456 3451 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 3457 3452 struct sched_group *group; ··· 3459 3454 struct rq *busiest; 3460 3455 unsigned long flags; 3461 3456 3462 - cpus_setall(*cpus); 3457 + cpumask_setall(cpus); 3463 3458 3464 3459 /* 3465 3460 * When power savings policy is enabled for the parent domain, idle ··· 3519 3514 3520 3515 /* All tasks on this runqueue were pinned by CPU affinity */ 3521 3516 if (unlikely(all_pinned)) { 3522 - cpu_clear(cpu_of(busiest), *cpus); 3523 - if (!cpus_empty(*cpus)) 3517 + cpumask_clear_cpu(cpu_of(busiest), cpus); 3518 + if (!cpumask_empty(cpus)) 3524 3519 goto redo; 3525 3520 goto out_balanced; 3526 3521 } ··· 3537 3532 /* don't kick the migration_thread, if the curr 3538 3533 * task on busiest cpu can't be moved to this_cpu 3539 3534 */ 3540 - if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { 3535 + if (!cpumask_test_cpu(this_cpu, 3536 + &busiest->curr->cpus_allowed)) { 3541 3537 spin_unlock_irqrestore(&busiest->lock, flags); 3542 3538 all_pinned = 1; 3543 3539 goto out_one_pinned; ··· 3613 3607 */ 3614 3608 static int 3615 3609 load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, 3616 - cpumask_t *cpus) 3610 + struct cpumask *cpus) 3617 3611 { 3618 3612 struct sched_group *group; 3619 3613 struct rq *busiest = NULL; ··· 3622 3616 int sd_idle = 0; 3623 3617 int all_pinned = 0; 3624 3618 3625 - cpus_setall(*cpus); 3619 + cpumask_setall(cpus); 3626 3620 3627 3621 /* 3628 3622 * When power savings policy is enabled for the parent domain, idle ··· 3666 3660 double_unlock_balance(this_rq, busiest); 3667 3661 3668 3662 if (unlikely(all_pinned)) { 3669 - cpu_clear(cpu_of(busiest), *cpus); 3670 - if (!cpus_empty(*cpus)) 3663 + cpumask_clear_cpu(cpu_of(busiest), cpus); 3664 + if (!cpumask_empty(cpus)) 3671 3665 goto redo; 3672 3666 } 3673 3667 } 3674 3668 3675 3669 if (!ld_moved) { 3670 + int active_balance = 0; 3671 + 3676 3672 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); 3677 3673 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && 3678 3674 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 3679 3675 return -1; 3676 + 3677 + if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) 3678 + return -1; 3679 + 3680 + if (sd->nr_balance_failed++ < 2) 3681 + return -1; 3682 + 3683 + /* 3684 + * The only task running in a non-idle cpu can be moved to this 3685 + * cpu in an attempt to completely freeup the other CPU 3686 + * package. The same method used to move task in load_balance() 3687 + * have been extended for load_balance_newidle() to speedup 3688 + * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2) 3689 + * 3690 + * The package power saving logic comes from 3691 + * find_busiest_group(). If there are no imbalance, then 3692 + * f_b_g() will return NULL. However when sched_mc={1,2} then 3693 + * f_b_g() will select a group from which a running task may be 3694 + * pulled to this cpu in order to make the other package idle. 3695 + * If there is no opportunity to make a package idle and if 3696 + * there are no imbalance, then f_b_g() will return NULL and no 3697 + * action will be taken in load_balance_newidle(). 3698 + * 3699 + * Under normal task pull operation due to imbalance, there 3700 + * will be more than one task in the source run queue and 3701 + * move_tasks() will succeed. ld_moved will be true and this 3702 + * active balance code will not be triggered. 3703 + */ 3704 + 3705 + /* Lock busiest in correct order while this_rq is held */ 3706 + double_lock_balance(this_rq, busiest); 3707 + 3708 + /* 3709 + * don't kick the migration_thread, if the curr 3710 + * task on busiest cpu can't be moved to this_cpu 3711 + */ 3712 + if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { 3713 + double_unlock_balance(this_rq, busiest); 3714 + all_pinned = 1; 3715 + return ld_moved; 3716 + } 3717 + 3718 + if (!busiest->active_balance) { 3719 + busiest->active_balance = 1; 3720 + busiest->push_cpu = this_cpu; 3721 + active_balance = 1; 3722 + } 3723 + 3724 + double_unlock_balance(this_rq, busiest); 3725 + if (active_balance) 3726 + wake_up_process(busiest->migration_thread); 3727 + 3680 3728 } else 3681 3729 sd->nr_balance_failed = 0; 3682 3730 ··· 3756 3696 struct sched_domain *sd; 3757 3697 int pulled_task = 0; 3758 3698 unsigned long next_balance = jiffies + HZ; 3759 - cpumask_t tmpmask; 3699 + cpumask_var_t tmpmask; 3700 + 3701 + if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) 3702 + return; 3760 3703 3761 3704 for_each_domain(this_cpu, sd) { 3762 3705 unsigned long interval; ··· 3770 3707 if (sd->flags & SD_BALANCE_NEWIDLE) 3771 3708 /* If we've pulled tasks over stop searching: */ 3772 3709 pulled_task = load_balance_newidle(this_cpu, this_rq, 3773 - sd, &tmpmask); 3710 + sd, tmpmask); 3774 3711 3775 3712 interval = msecs_to_jiffies(sd->balance_interval); 3776 3713 if (time_after(next_balance, sd->last_balance + interval)) ··· 3785 3722 */ 3786 3723 this_rq->next_balance = next_balance; 3787 3724 } 3725 + free_cpumask_var(tmpmask); 3788 3726 } 3789 3727 3790 3728 /* ··· 3823 3759 /* Search for an sd spanning us and the target CPU. */ 3824 3760 for_each_domain(target_cpu, sd) { 3825 3761 if ((sd->flags & SD_LOAD_BALANCE) && 3826 - cpu_isset(busiest_cpu, sd->span)) 3762 + cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) 3827 3763 break; 3828 3764 } 3829 3765 ··· 3842 3778 #ifdef CONFIG_NO_HZ 3843 3779 static struct { 3844 3780 atomic_t load_balancer; 3845 - cpumask_t cpu_mask; 3781 + cpumask_var_t cpu_mask; 3846 3782 } nohz ____cacheline_aligned = { 3847 3783 .load_balancer = ATOMIC_INIT(-1), 3848 - .cpu_mask = CPU_MASK_NONE, 3849 3784 }; 3850 3785 3851 3786 /* ··· 3872 3809 int cpu = smp_processor_id(); 3873 3810 3874 3811 if (stop_tick) { 3875 - cpu_set(cpu, nohz.cpu_mask); 3812 + cpumask_set_cpu(cpu, nohz.cpu_mask); 3876 3813 cpu_rq(cpu)->in_nohz_recently = 1; 3877 3814 3878 3815 /* ··· 3886 3823 } 3887 3824 3888 3825 /* time for ilb owner also to sleep */ 3889 - if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { 3826 + if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { 3890 3827 if (atomic_read(&nohz.load_balancer) == cpu) 3891 3828 atomic_set(&nohz.load_balancer, -1); 3892 3829 return 0; ··· 3899 3836 } else if (atomic_read(&nohz.load_balancer) == cpu) 3900 3837 return 1; 3901 3838 } else { 3902 - if (!cpu_isset(cpu, nohz.cpu_mask)) 3839 + if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) 3903 3840 return 0; 3904 3841 3905 - cpu_clear(cpu, nohz.cpu_mask); 3842 + cpumask_clear_cpu(cpu, nohz.cpu_mask); 3906 3843 3907 3844 if (atomic_read(&nohz.load_balancer) == cpu) 3908 3845 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) ··· 3930 3867 unsigned long next_balance = jiffies + 60*HZ; 3931 3868 int update_next_balance = 0; 3932 3869 int need_serialize; 3933 - cpumask_t tmp; 3870 + cpumask_var_t tmp; 3871 + 3872 + /* Fails alloc? Rebalancing probably not a priority right now. */ 3873 + if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) 3874 + return; 3934 3875 3935 3876 for_each_domain(cpu, sd) { 3936 3877 if (!(sd->flags & SD_LOAD_BALANCE)) ··· 3959 3892 } 3960 3893 3961 3894 if (time_after_eq(jiffies, sd->last_balance + interval)) { 3962 - if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { 3895 + if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { 3963 3896 /* 3964 3897 * We've pulled tasks over so either we're no 3965 3898 * longer idle, or one of our SMT siblings is ··· 3993 3926 */ 3994 3927 if (likely(update_next_balance)) 3995 3928 rq->next_balance = next_balance; 3929 + 3930 + free_cpumask_var(tmp); 3996 3931 } 3997 3932 3998 3933 /* ··· 4019 3950 */ 4020 3951 if (this_rq->idle_at_tick && 4021 3952 atomic_read(&nohz.load_balancer) == this_cpu) { 4022 - cpumask_t cpus = nohz.cpu_mask; 4023 3953 struct rq *rq; 4024 3954 int balance_cpu; 4025 3955 4026 - cpu_clear(this_cpu, cpus); 4027 - for_each_cpu_mask_nr(balance_cpu, cpus) { 3956 + for_each_cpu(balance_cpu, nohz.cpu_mask) { 3957 + if (balance_cpu == this_cpu) 3958 + continue; 3959 + 4028 3960 /* 4029 3961 * If this cpu gets work to do, stop the load balancing 4030 3962 * work being done for other cpus. Next load ··· 4063 3993 rq->in_nohz_recently = 0; 4064 3994 4065 3995 if (atomic_read(&nohz.load_balancer) == cpu) { 4066 - cpu_clear(cpu, nohz.cpu_mask); 3996 + cpumask_clear_cpu(cpu, nohz.cpu_mask); 4067 3997 atomic_set(&nohz.load_balancer, -1); 4068 3998 } 4069 3999 ··· 4076 4006 * TBD: Traverse the sched domains and nominate 4077 4007 * the nearest cpu in the nohz.cpu_mask. 4078 4008 */ 4079 - int ilb = first_cpu(nohz.cpu_mask); 4009 + int ilb = cpumask_first(nohz.cpu_mask); 4080 4010 4081 4011 if (ilb < nr_cpu_ids) 4082 4012 resched_cpu(ilb); ··· 4088 4018 * cpus with ticks stopped, is it time for that to stop? 4089 4019 */ 4090 4020 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && 4091 - cpus_weight(nohz.cpu_mask) == num_online_cpus()) { 4021 + cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { 4092 4022 resched_cpu(cpu); 4093 4023 return; 4094 4024 } ··· 4098 4028 * someone else, then no need raise the SCHED_SOFTIRQ 4099 4029 */ 4100 4030 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && 4101 - cpu_isset(cpu, nohz.cpu_mask)) 4031 + cpumask_test_cpu(cpu, nohz.cpu_mask)) 4102 4032 return; 4103 4033 #endif 4104 4034 if (time_after_eq(jiffies, rq->next_balance)) ··· 5471 5401 return retval; 5472 5402 } 5473 5403 5474 - long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) 5404 + long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 5475 5405 { 5476 - cpumask_t cpus_allowed; 5477 - cpumask_t new_mask = *in_mask; 5406 + cpumask_var_t cpus_allowed, new_mask; 5478 5407 struct task_struct *p; 5479 5408 int retval; 5480 5409 ··· 5495 5426 get_task_struct(p); 5496 5427 read_unlock(&tasklist_lock); 5497 5428 5429 + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 5430 + retval = -ENOMEM; 5431 + goto out_put_task; 5432 + } 5433 + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 5434 + retval = -ENOMEM; 5435 + goto out_free_cpus_allowed; 5436 + } 5498 5437 retval = -EPERM; 5499 5438 if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) 5500 5439 goto out_unlock; ··· 5511 5434 if (retval) 5512 5435 goto out_unlock; 5513 5436 5514 - cpuset_cpus_allowed(p, &cpus_allowed); 5515 - cpus_and(new_mask, new_mask, cpus_allowed); 5437 + cpuset_cpus_allowed(p, cpus_allowed); 5438 + cpumask_and(new_mask, in_mask, cpus_allowed); 5516 5439 again: 5517 - retval = set_cpus_allowed_ptr(p, &new_mask); 5440 + retval = set_cpus_allowed_ptr(p, new_mask); 5518 5441 5519 5442 if (!retval) { 5520 - cpuset_cpus_allowed(p, &cpus_allowed); 5521 - if (!cpus_subset(new_mask, cpus_allowed)) { 5443 + cpuset_cpus_allowed(p, cpus_allowed); 5444 + if (!cpumask_subset(new_mask, cpus_allowed)) { 5522 5445 /* 5523 5446 * We must have raced with a concurrent cpuset 5524 5447 * update. Just reset the cpus_allowed to the 5525 5448 * cpuset's cpus_allowed 5526 5449 */ 5527 - new_mask = cpus_allowed; 5450 + cpumask_copy(new_mask, cpus_allowed); 5528 5451 goto again; 5529 5452 } 5530 5453 } 5531 5454 out_unlock: 5455 + free_cpumask_var(new_mask); 5456 + out_free_cpus_allowed: 5457 + free_cpumask_var(cpus_allowed); 5458 + out_put_task: 5532 5459 put_task_struct(p); 5533 5460 put_online_cpus(); 5534 5461 return retval; 5535 5462 } 5536 5463 5537 5464 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 5538 - cpumask_t *new_mask) 5465 + struct cpumask *new_mask) 5539 5466 { 5540 - if (len < sizeof(cpumask_t)) { 5541 - memset(new_mask, 0, sizeof(cpumask_t)); 5542 - } else if (len > sizeof(cpumask_t)) { 5543 - len = sizeof(cpumask_t); 5544 - } 5467 + if (len < cpumask_size()) 5468 + cpumask_clear(new_mask); 5469 + else if (len > cpumask_size()) 5470 + len = cpumask_size(); 5471 + 5545 5472 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 5546 5473 } 5547 5474 ··· 5558 5477 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, 5559 5478 unsigned long __user *user_mask_ptr) 5560 5479 { 5561 - cpumask_t new_mask; 5480 + cpumask_var_t new_mask; 5562 5481 int retval; 5563 5482 5564 - retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); 5565 - if (retval) 5566 - return retval; 5483 + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 5484 + return -ENOMEM; 5567 5485 5568 - return sched_setaffinity(pid, &new_mask); 5486 + retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 5487 + if (retval == 0) 5488 + retval = sched_setaffinity(pid, new_mask); 5489 + free_cpumask_var(new_mask); 5490 + return retval; 5569 5491 } 5570 5492 5571 - long sched_getaffinity(pid_t pid, cpumask_t *mask) 5493 + long sched_getaffinity(pid_t pid, struct cpumask *mask) 5572 5494 { 5573 5495 struct task_struct *p; 5574 5496 int retval; ··· 5588 5504 if (retval) 5589 5505 goto out_unlock; 5590 5506 5591 - cpus_and(*mask, p->cpus_allowed, cpu_online_map); 5507 + cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); 5592 5508 5593 5509 out_unlock: 5594 5510 read_unlock(&tasklist_lock); ··· 5607 5523 unsigned long __user *user_mask_ptr) 5608 5524 { 5609 5525 int ret; 5610 - cpumask_t mask; 5526 + cpumask_var_t mask; 5611 5527 5612 - if (len < sizeof(cpumask_t)) 5528 + if (len < cpumask_size()) 5613 5529 return -EINVAL; 5614 5530 5615 - ret = sched_getaffinity(pid, &mask); 5616 - if (ret < 0) 5617 - return ret; 5531 + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 5532 + return -ENOMEM; 5618 5533 5619 - if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) 5620 - return -EFAULT; 5534 + ret = sched_getaffinity(pid, mask); 5535 + if (ret == 0) { 5536 + if (copy_to_user(user_mask_ptr, mask, cpumask_size())) 5537 + ret = -EFAULT; 5538 + else 5539 + ret = cpumask_size(); 5540 + } 5541 + free_cpumask_var(mask); 5621 5542 5622 - return sizeof(cpumask_t); 5543 + return ret; 5623 5544 } 5624 5545 5625 5546 /** ··· 5966 5877 idle->se.exec_start = sched_clock(); 5967 5878 5968 5879 idle->prio = idle->normal_prio = MAX_PRIO; 5969 - idle->cpus_allowed = cpumask_of_cpu(cpu); 5880 + cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); 5970 5881 __set_task_cpu(idle, cpu); 5971 5882 5972 5883 rq->curr = rq->idle = idle; ··· 5993 5904 * indicates which cpus entered this state. This is used 5994 5905 * in the rcu update to wait only for active cpus. For system 5995 5906 * which do not switch off the HZ timer nohz_cpu_mask should 5996 - * always be CPU_MASK_NONE. 5907 + * always be CPU_BITS_NONE. 5997 5908 */ 5998 - cpumask_t nohz_cpu_mask = CPU_MASK_NONE; 5909 + cpumask_var_t nohz_cpu_mask; 5999 5910 6000 5911 /* 6001 5912 * Increase the granularity value when there are more CPUs, ··· 6050 5961 * task must not exit() & deallocate itself prematurely. The 6051 5962 * call is not atomic; no spinlocks may be held. 6052 5963 */ 6053 - int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) 5964 + int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 6054 5965 { 6055 5966 struct migration_req req; 6056 5967 unsigned long flags; ··· 6058 5969 int ret = 0; 6059 5970 6060 5971 rq = task_rq_lock(p, &flags); 6061 - if (!cpus_intersects(*new_mask, cpu_online_map)) { 5972 + if (!cpumask_intersects(new_mask, cpu_online_mask)) { 6062 5973 ret = -EINVAL; 6063 5974 goto out; 6064 5975 } 6065 5976 6066 5977 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && 6067 - !cpus_equal(p->cpus_allowed, *new_mask))) { 5978 + !cpumask_equal(&p->cpus_allowed, new_mask))) { 6068 5979 ret = -EINVAL; 6069 5980 goto out; 6070 5981 } ··· 6072 5983 if (p->sched_class->set_cpus_allowed) 6073 5984 p->sched_class->set_cpus_allowed(p, new_mask); 6074 5985 else { 6075 - p->cpus_allowed = *new_mask; 6076 - p->rt.nr_cpus_allowed = cpus_weight(*new_mask); 5986 + cpumask_copy(&p->cpus_allowed, new_mask); 5987 + p->rt.nr_cpus_allowed = cpumask_weight(new_mask); 6077 5988 } 6078 5989 6079 5990 /* Can the task run on the task's current CPU? If so, we're done */ 6080 - if (cpu_isset(task_cpu(p), *new_mask)) 5991 + if (cpumask_test_cpu(task_cpu(p), new_mask)) 6081 5992 goto out; 6082 5993 6083 - if (migrate_task(p, any_online_cpu(*new_mask), &req)) { 5994 + if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { 6084 5995 /* Need help from migration thread: drop lock and wait. */ 6085 5996 task_rq_unlock(rq, &flags); 6086 5997 wake_up_process(rq->migration_thread); ··· 6122 6033 if (task_cpu(p) != src_cpu) 6123 6034 goto done; 6124 6035 /* Affinity changed (again). */ 6125 - if (!cpu_isset(dest_cpu, p->cpus_allowed)) 6036 + if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 6126 6037 goto fail; 6127 6038 6128 6039 on_rq = p->se.on_rq; ··· 6219 6130 */ 6220 6131 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 6221 6132 { 6222 - unsigned long flags; 6223 - cpumask_t mask; 6224 - struct rq *rq; 6225 6133 int dest_cpu; 6134 + /* FIXME: Use cpumask_of_node here. */ 6135 + cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu)); 6136 + const struct cpumask *nodemask = &_nodemask; 6226 6137 6227 - do { 6228 - /* On same node? */ 6229 - mask = node_to_cpumask(cpu_to_node(dead_cpu)); 6230 - cpus_and(mask, mask, p->cpus_allowed); 6231 - dest_cpu = any_online_cpu(mask); 6138 + again: 6139 + /* Look for allowed, online CPU in same node. */ 6140 + for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) 6141 + if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 6142 + goto move; 6232 6143 6233 - /* On any allowed CPU? */ 6234 - if (dest_cpu >= nr_cpu_ids) 6235 - dest_cpu = any_online_cpu(p->cpus_allowed); 6144 + /* Any allowed, online CPU? */ 6145 + dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); 6146 + if (dest_cpu < nr_cpu_ids) 6147 + goto move; 6236 6148 6237 - /* No more Mr. Nice Guy. */ 6238 - if (dest_cpu >= nr_cpu_ids) { 6239 - cpumask_t cpus_allowed; 6149 + /* No more Mr. Nice Guy. */ 6150 + if (dest_cpu >= nr_cpu_ids) { 6151 + cpuset_cpus_allowed_locked(p, &p->cpus_allowed); 6152 + dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); 6240 6153 6241 - cpuset_cpus_allowed_locked(p, &cpus_allowed); 6242 - /* 6243 - * Try to stay on the same cpuset, where the 6244 - * current cpuset may be a subset of all cpus. 6245 - * The cpuset_cpus_allowed_locked() variant of 6246 - * cpuset_cpus_allowed() will not block. It must be 6247 - * called within calls to cpuset_lock/cpuset_unlock. 6248 - */ 6249 - rq = task_rq_lock(p, &flags); 6250 - p->cpus_allowed = cpus_allowed; 6251 - dest_cpu = any_online_cpu(p->cpus_allowed); 6252 - task_rq_unlock(rq, &flags); 6253 - 6254 - /* 6255 - * Don't tell them about moving exiting tasks or 6256 - * kernel threads (both mm NULL), since they never 6257 - * leave kernel. 6258 - */ 6259 - if (p->mm && printk_ratelimit()) { 6260 - printk(KERN_INFO "process %d (%s) no " 6261 - "longer affine to cpu%d\n", 6262 - task_pid_nr(p), p->comm, dead_cpu); 6263 - } 6154 + /* 6155 + * Don't tell them about moving exiting tasks or 6156 + * kernel threads (both mm NULL), since they never 6157 + * leave kernel. 6158 + */ 6159 + if (p->mm && printk_ratelimit()) { 6160 + printk(KERN_INFO "process %d (%s) no " 6161 + "longer affine to cpu%d\n", 6162 + task_pid_nr(p), p->comm, dead_cpu); 6264 6163 } 6265 - } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); 6164 + } 6165 + 6166 + move: 6167 + /* It can have affinity changed while we were choosing. */ 6168 + if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 6169 + goto again; 6266 6170 } 6267 6171 6268 6172 /* ··· 6267 6185 */ 6268 6186 static void migrate_nr_uninterruptible(struct rq *rq_src) 6269 6187 { 6270 - struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); 6188 + struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); 6271 6189 unsigned long flags; 6272 6190 6273 6191 local_irq_save(flags); ··· 6557 6475 if (!rq->online) { 6558 6476 const struct sched_class *class; 6559 6477 6560 - cpu_set(rq->cpu, rq->rd->online); 6478 + cpumask_set_cpu(rq->cpu, rq->rd->online); 6561 6479 rq->online = 1; 6562 6480 6563 6481 for_each_class(class) { ··· 6577 6495 class->rq_offline(rq); 6578 6496 } 6579 6497 6580 - cpu_clear(rq->cpu, rq->rd->online); 6498 + cpumask_clear_cpu(rq->cpu, rq->rd->online); 6581 6499 rq->online = 0; 6582 6500 } 6583 6501 } ··· 6618 6536 rq = cpu_rq(cpu); 6619 6537 spin_lock_irqsave(&rq->lock, flags); 6620 6538 if (rq->rd) { 6621 - BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6539 + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6622 6540 6623 6541 set_rq_online(rq); 6624 6542 } ··· 6632 6550 break; 6633 6551 /* Unbind it from offline cpu so it can run. Fall thru. */ 6634 6552 kthread_bind(cpu_rq(cpu)->migration_thread, 6635 - any_online_cpu(cpu_online_map)); 6553 + cpumask_any(cpu_online_mask)); 6636 6554 kthread_stop(cpu_rq(cpu)->migration_thread); 6637 6555 cpu_rq(cpu)->migration_thread = NULL; 6638 6556 break; ··· 6682 6600 rq = cpu_rq(cpu); 6683 6601 spin_lock_irqsave(&rq->lock, flags); 6684 6602 if (rq->rd) { 6685 - BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6603 + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6686 6604 set_rq_offline(rq); 6687 6605 } 6688 6606 spin_unlock_irqrestore(&rq->lock, flags); ··· 6721 6639 #ifdef CONFIG_SCHED_DEBUG 6722 6640 6723 6641 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 6724 - cpumask_t *groupmask) 6642 + struct cpumask *groupmask) 6725 6643 { 6726 6644 struct sched_group *group = sd->groups; 6727 6645 char str[256]; 6728 6646 6729 - cpulist_scnprintf(str, sizeof(str), sd->span); 6730 - cpus_clear(*groupmask); 6647 + cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); 6648 + cpumask_clear(groupmask); 6731 6649 6732 6650 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 6733 6651 ··· 6741 6659 6742 6660 printk(KERN_CONT "span %s level %s\n", str, sd->name); 6743 6661 6744 - if (!cpu_isset(cpu, sd->span)) { 6662 + if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 6745 6663 printk(KERN_ERR "ERROR: domain->span does not contain " 6746 6664 "CPU%d\n", cpu); 6747 6665 } 6748 - if (!cpu_isset(cpu, group->cpumask)) { 6666 + if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 6749 6667 printk(KERN_ERR "ERROR: domain->groups does not contain" 6750 6668 " CPU%d\n", cpu); 6751 6669 } ··· 6765 6683 break; 6766 6684 } 6767 6685 6768 - if (!cpus_weight(group->cpumask)) { 6686 + if (!cpumask_weight(sched_group_cpus(group))) { 6769 6687 printk(KERN_CONT "\n"); 6770 6688 printk(KERN_ERR "ERROR: empty group\n"); 6771 6689 break; 6772 6690 } 6773 6691 6774 - if (cpus_intersects(*groupmask, group->cpumask)) { 6692 + if (cpumask_intersects(groupmask, sched_group_cpus(group))) { 6775 6693 printk(KERN_CONT "\n"); 6776 6694 printk(KERN_ERR "ERROR: repeated CPUs\n"); 6777 6695 break; 6778 6696 } 6779 6697 6780 - cpus_or(*groupmask, *groupmask, group->cpumask); 6698 + cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 6781 6699 6782 - cpulist_scnprintf(str, sizeof(str), group->cpumask); 6700 + cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 6783 6701 printk(KERN_CONT " %s", str); 6784 6702 6785 6703 group = group->next; 6786 6704 } while (group != sd->groups); 6787 6705 printk(KERN_CONT "\n"); 6788 6706 6789 - if (!cpus_equal(sd->span, *groupmask)) 6707 + if (!cpumask_equal(sched_domain_span(sd), groupmask)) 6790 6708 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 6791 6709 6792 - if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) 6710 + if (sd->parent && 6711 + !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 6793 6712 printk(KERN_ERR "ERROR: parent span is not a superset " 6794 6713 "of domain->span\n"); 6795 6714 return 0; ··· 6798 6715 6799 6716 static void sched_domain_debug(struct sched_domain *sd, int cpu) 6800 6717 { 6801 - cpumask_t *groupmask; 6718 + cpumask_var_t groupmask; 6802 6719 int level = 0; 6803 6720 6804 6721 if (!sd) { ··· 6808 6725 6809 6726 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 6810 6727 6811 - groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 6812 - if (!groupmask) { 6728 + if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { 6813 6729 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); 6814 6730 return; 6815 6731 } ··· 6821 6739 if (!sd) 6822 6740 break; 6823 6741 } 6824 - kfree(groupmask); 6742 + free_cpumask_var(groupmask); 6825 6743 } 6826 6744 #else /* !CONFIG_SCHED_DEBUG */ 6827 6745 # define sched_domain_debug(sd, cpu) do { } while (0) ··· 6829 6747 6830 6748 static int sd_degenerate(struct sched_domain *sd) 6831 6749 { 6832 - if (cpus_weight(sd->span) == 1) 6750 + if (cpumask_weight(sched_domain_span(sd)) == 1) 6833 6751 return 1; 6834 6752 6835 6753 /* Following flags need at least 2 groups */ ··· 6860 6778 if (sd_degenerate(parent)) 6861 6779 return 1; 6862 6780 6863 - if (!cpus_equal(sd->span, parent->span)) 6781 + if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 6864 6782 return 0; 6865 6783 6866 6784 /* Does parent contain flags not in child? */ ··· 6884 6802 return 1; 6885 6803 } 6886 6804 6805 + static void free_rootdomain(struct root_domain *rd) 6806 + { 6807 + cpupri_cleanup(&rd->cpupri); 6808 + 6809 + free_cpumask_var(rd->rto_mask); 6810 + free_cpumask_var(rd->online); 6811 + free_cpumask_var(rd->span); 6812 + kfree(rd); 6813 + } 6814 + 6887 6815 static void rq_attach_root(struct rq *rq, struct root_domain *rd) 6888 6816 { 6889 6817 unsigned long flags; ··· 6903 6811 if (rq->rd) { 6904 6812 struct root_domain *old_rd = rq->rd; 6905 6813 6906 - if (cpu_isset(rq->cpu, old_rd->online)) 6814 + if (cpumask_test_cpu(rq->cpu, old_rd->online)) 6907 6815 set_rq_offline(rq); 6908 6816 6909 - cpu_clear(rq->cpu, old_rd->span); 6817 + cpumask_clear_cpu(rq->cpu, old_rd->span); 6910 6818 6911 6819 if (atomic_dec_and_test(&old_rd->refcount)) 6912 - kfree(old_rd); 6820 + free_rootdomain(old_rd); 6913 6821 } 6914 6822 6915 6823 atomic_inc(&rd->refcount); 6916 6824 rq->rd = rd; 6917 6825 6918 - cpu_set(rq->cpu, rd->span); 6919 - if (cpu_isset(rq->cpu, cpu_online_map)) 6826 + cpumask_set_cpu(rq->cpu, rd->span); 6827 + if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) 6920 6828 set_rq_online(rq); 6921 6829 6922 6830 spin_unlock_irqrestore(&rq->lock, flags); 6923 6831 } 6924 6832 6925 - static void init_rootdomain(struct root_domain *rd) 6833 + static int init_rootdomain(struct root_domain *rd, bool bootmem) 6926 6834 { 6927 6835 memset(rd, 0, sizeof(*rd)); 6928 6836 6929 - cpus_clear(rd->span); 6930 - cpus_clear(rd->online); 6837 + if (bootmem) { 6838 + alloc_bootmem_cpumask_var(&def_root_domain.span); 6839 + alloc_bootmem_cpumask_var(&def_root_domain.online); 6840 + alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); 6841 + cpupri_init(&rd->cpupri, true); 6842 + return 0; 6843 + } 6931 6844 6932 - cpupri_init(&rd->cpupri); 6845 + if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 6846 + goto free_rd; 6847 + if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 6848 + goto free_span; 6849 + if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 6850 + goto free_online; 6851 + 6852 + if (cpupri_init(&rd->cpupri, false) != 0) 6853 + goto free_rto_mask; 6854 + return 0; 6855 + 6856 + free_rto_mask: 6857 + free_cpumask_var(rd->rto_mask); 6858 + free_online: 6859 + free_cpumask_var(rd->online); 6860 + free_span: 6861 + free_cpumask_var(rd->span); 6862 + free_rd: 6863 + kfree(rd); 6864 + return -ENOMEM; 6933 6865 } 6934 6866 6935 6867 static void init_defrootdomain(void) 6936 6868 { 6937 - init_rootdomain(&def_root_domain); 6869 + init_rootdomain(&def_root_domain, true); 6870 + 6938 6871 atomic_set(&def_root_domain.refcount, 1); 6939 6872 } 6940 6873 ··· 6971 6854 if (!rd) 6972 6855 return NULL; 6973 6856 6974 - init_rootdomain(rd); 6857 + if (init_rootdomain(rd, false) != 0) { 6858 + kfree(rd); 6859 + return NULL; 6860 + } 6975 6861 6976 6862 return rd; 6977 6863 } ··· 7016 6896 } 7017 6897 7018 6898 /* cpus with isolated domains */ 7019 - static cpumask_t cpu_isolated_map = CPU_MASK_NONE; 6899 + static cpumask_var_t cpu_isolated_map; 7020 6900 7021 6901 /* Setup the mask of cpus configured for isolated domains */ 7022 6902 static int __init isolated_cpu_setup(char *str) 7023 6903 { 7024 - static int __initdata ints[NR_CPUS]; 7025 - int i; 7026 - 7027 - str = get_options(str, ARRAY_SIZE(ints), ints); 7028 - cpus_clear(cpu_isolated_map); 7029 - for (i = 1; i <= ints[0]; i++) 7030 - if (ints[i] < NR_CPUS) 7031 - cpu_set(ints[i], cpu_isolated_map); 6904 + cpulist_parse(str, cpu_isolated_map); 7032 6905 return 1; 7033 6906 } 7034 6907 ··· 7030 6917 /* 7031 6918 * init_sched_build_groups takes the cpumask we wish to span, and a pointer 7032 6919 * to a function which identifies what group(along with sched group) a CPU 7033 - * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS 7034 - * (due to the fact that we keep track of groups covered with a cpumask_t). 6920 + * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids 6921 + * (due to the fact that we keep track of groups covered with a struct cpumask). 7035 6922 * 7036 6923 * init_sched_build_groups will build a circular linked list of the groups 7037 6924 * covered by the given span, and will set each group's ->cpumask correctly, 7038 6925 * and ->cpu_power to 0. 7039 6926 */ 7040 6927 static void 7041 - init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, 7042 - int (*group_fn)(int cpu, const cpumask_t *cpu_map, 6928 + init_sched_build_groups(const struct cpumask *span, 6929 + const struct cpumask *cpu_map, 6930 + int (*group_fn)(int cpu, const struct cpumask *cpu_map, 7043 6931 struct sched_group **sg, 7044 - cpumask_t *tmpmask), 7045 - cpumask_t *covered, cpumask_t *tmpmask) 6932 + struct cpumask *tmpmask), 6933 + struct cpumask *covered, struct cpumask *tmpmask) 7046 6934 { 7047 6935 struct sched_group *first = NULL, *last = NULL; 7048 6936 int i; 7049 6937 7050 - cpus_clear(*covered); 6938 + cpumask_clear(covered); 7051 6939 7052 - for_each_cpu_mask_nr(i, *span) { 6940 + for_each_cpu(i, span) { 7053 6941 struct sched_group *sg; 7054 6942 int group = group_fn(i, cpu_map, &sg, tmpmask); 7055 6943 int j; 7056 6944 7057 - if (cpu_isset(i, *covered)) 6945 + if (cpumask_test_cpu(i, covered)) 7058 6946 continue; 7059 6947 7060 - cpus_clear(sg->cpumask); 6948 + cpumask_clear(sched_group_cpus(sg)); 7061 6949 sg->__cpu_power = 0; 7062 6950 7063 - for_each_cpu_mask_nr(j, *span) { 6951 + for_each_cpu(j, span) { 7064 6952 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 7065 6953 continue; 7066 6954 7067 - cpu_set(j, *covered); 7068 - cpu_set(j, sg->cpumask); 6955 + cpumask_set_cpu(j, covered); 6956 + cpumask_set_cpu(j, sched_group_cpus(sg)); 7069 6957 } 7070 6958 if (!first) 7071 6959 first = sg; ··· 7130 7016 * should be one that prevents unnecessary balancing, but also spreads tasks 7131 7017 * out optimally. 7132 7018 */ 7133 - static void sched_domain_node_span(int node, cpumask_t *span) 7019 + static void sched_domain_node_span(int node, struct cpumask *span) 7134 7020 { 7135 7021 nodemask_t used_nodes; 7022 + /* FIXME: use cpumask_of_node() */ 7136 7023 node_to_cpumask_ptr(nodemask, node); 7137 7024 int i; 7138 7025 ··· 7155 7040 int sched_smt_power_savings = 0, sched_mc_power_savings = 0; 7156 7041 7157 7042 /* 7043 + * The cpus mask in sched_group and sched_domain hangs off the end. 7044 + * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space 7045 + * for nr_cpu_ids < CONFIG_NR_CPUS. 7046 + */ 7047 + struct static_sched_group { 7048 + struct sched_group sg; 7049 + DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); 7050 + }; 7051 + 7052 + struct static_sched_domain { 7053 + struct sched_domain sd; 7054 + DECLARE_BITMAP(span, CONFIG_NR_CPUS); 7055 + }; 7056 + 7057 + /* 7158 7058 * SMT sched-domains: 7159 7059 */ 7160 7060 #ifdef CONFIG_SCHED_SMT 7161 - static DEFINE_PER_CPU(struct sched_domain, cpu_domains); 7162 - static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); 7061 + static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); 7062 + static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); 7163 7063 7164 7064 static int 7165 - cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7166 - cpumask_t *unused) 7065 + cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, 7066 + struct sched_group **sg, struct cpumask *unused) 7167 7067 { 7168 7068 if (sg) 7169 - *sg = &per_cpu(sched_group_cpus, cpu); 7069 + *sg = &per_cpu(sched_group_cpus, cpu).sg; 7170 7070 return cpu; 7171 7071 } 7172 7072 #endif /* CONFIG_SCHED_SMT */ ··· 7190 7060 * multi-core sched-domains: 7191 7061 */ 7192 7062 #ifdef CONFIG_SCHED_MC 7193 - static DEFINE_PER_CPU(struct sched_domain, core_domains); 7194 - static DEFINE_PER_CPU(struct sched_group, sched_group_core); 7063 + static DEFINE_PER_CPU(struct static_sched_domain, core_domains); 7064 + static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); 7195 7065 #endif /* CONFIG_SCHED_MC */ 7196 7066 7197 7067 #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) 7198 7068 static int 7199 - cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7200 - cpumask_t *mask) 7069 + cpu_to_core_group(int cpu, const struct cpumask *cpu_map, 7070 + struct sched_group **sg, struct cpumask *mask) 7201 7071 { 7202 7072 int group; 7203 7073 7204 - *mask = per_cpu(cpu_sibling_map, cpu); 7205 - cpus_and(*mask, *mask, *cpu_map); 7206 - group = first_cpu(*mask); 7074 + cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7075 + group = cpumask_first(mask); 7207 7076 if (sg) 7208 - *sg = &per_cpu(sched_group_core, group); 7077 + *sg = &per_cpu(sched_group_core, group).sg; 7209 7078 return group; 7210 7079 } 7211 7080 #elif defined(CONFIG_SCHED_MC) 7212 7081 static int 7213 - cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7214 - cpumask_t *unused) 7082 + cpu_to_core_group(int cpu, const struct cpumask *cpu_map, 7083 + struct sched_group **sg, struct cpumask *unused) 7215 7084 { 7216 7085 if (sg) 7217 - *sg = &per_cpu(sched_group_core, cpu); 7086 + *sg = &per_cpu(sched_group_core, cpu).sg; 7218 7087 return cpu; 7219 7088 } 7220 7089 #endif 7221 7090 7222 - static DEFINE_PER_CPU(struct sched_domain, phys_domains); 7223 - static DEFINE_PER_CPU(struct sched_group, sched_group_phys); 7091 + static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); 7092 + static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); 7224 7093 7225 7094 static int 7226 - cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7227 - cpumask_t *mask) 7095 + cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, 7096 + struct sched_group **sg, struct cpumask *mask) 7228 7097 { 7229 7098 int group; 7230 7099 #ifdef CONFIG_SCHED_MC 7100 + /* FIXME: Use cpu_coregroup_mask. */ 7231 7101 *mask = cpu_coregroup_map(cpu); 7232 7102 cpus_and(*mask, *mask, *cpu_map); 7233 - group = first_cpu(*mask); 7103 + group = cpumask_first(mask); 7234 7104 #elif defined(CONFIG_SCHED_SMT) 7235 - *mask = per_cpu(cpu_sibling_map, cpu); 7236 - cpus_and(*mask, *mask, *cpu_map); 7237 - group = first_cpu(*mask); 7105 + cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7106 + group = cpumask_first(mask); 7238 7107 #else 7239 7108 group = cpu; 7240 7109 #endif 7241 7110 if (sg) 7242 - *sg = &per_cpu(sched_group_phys, group); 7111 + *sg = &per_cpu(sched_group_phys, group).sg; 7243 7112 return group; 7244 7113 } 7245 7114 ··· 7252 7123 static struct sched_group ***sched_group_nodes_bycpu; 7253 7124 7254 7125 static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 7255 - static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); 7126 + static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); 7256 7127 7257 - static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, 7258 - struct sched_group **sg, cpumask_t *nodemask) 7128 + static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, 7129 + struct sched_group **sg, 7130 + struct cpumask *nodemask) 7259 7131 { 7260 7132 int group; 7133 + /* FIXME: use cpumask_of_node */ 7134 + node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); 7261 7135 7262 - *nodemask = node_to_cpumask(cpu_to_node(cpu)); 7263 - cpus_and(*nodemask, *nodemask, *cpu_map); 7264 - group = first_cpu(*nodemask); 7136 + cpumask_and(nodemask, pnodemask, cpu_map); 7137 + group = cpumask_first(nodemask); 7265 7138 7266 7139 if (sg) 7267 - *sg = &per_cpu(sched_group_allnodes, group); 7140 + *sg = &per_cpu(sched_group_allnodes, group).sg; 7268 7141 return group; 7269 7142 } 7270 7143 ··· 7278 7147 if (!sg) 7279 7148 return; 7280 7149 do { 7281 - for_each_cpu_mask_nr(j, sg->cpumask) { 7150 + for_each_cpu(j, sched_group_cpus(sg)) { 7282 7151 struct sched_domain *sd; 7283 7152 7284 - sd = &per_cpu(phys_domains, j); 7285 - if (j != first_cpu(sd->groups->cpumask)) { 7153 + sd = &per_cpu(phys_domains, j).sd; 7154 + if (j != cpumask_first(sched_group_cpus(sd->groups))) { 7286 7155 /* 7287 7156 * Only add "power" once for each 7288 7157 * physical package. ··· 7299 7168 7300 7169 #ifdef CONFIG_NUMA 7301 7170 /* Free memory allocated for various sched_group structures */ 7302 - static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) 7171 + static void free_sched_groups(const struct cpumask *cpu_map, 7172 + struct cpumask *nodemask) 7303 7173 { 7304 7174 int cpu, i; 7305 7175 7306 - for_each_cpu_mask_nr(cpu, *cpu_map) { 7176 + for_each_cpu(cpu, cpu_map) { 7307 7177 struct sched_group **sched_group_nodes 7308 7178 = sched_group_nodes_bycpu[cpu]; 7309 7179 ··· 7313 7181 7314 7182 for (i = 0; i < nr_node_ids; i++) { 7315 7183 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 7184 + /* FIXME: Use cpumask_of_node */ 7185 + node_to_cpumask_ptr(pnodemask, i); 7316 7186 7317 - *nodemask = node_to_cpumask(i); 7318 - cpus_and(*nodemask, *nodemask, *cpu_map); 7319 - if (cpus_empty(*nodemask)) 7187 + cpus_and(*nodemask, *pnodemask, *cpu_map); 7188 + if (cpumask_empty(nodemask)) 7320 7189 continue; 7321 7190 7322 7191 if (sg == NULL) ··· 7335 7202 } 7336 7203 } 7337 7204 #else /* !CONFIG_NUMA */ 7338 - static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) 7205 + static void free_sched_groups(const struct cpumask *cpu_map, 7206 + struct cpumask *nodemask) 7339 7207 { 7340 7208 } 7341 7209 #endif /* CONFIG_NUMA */ ··· 7362 7228 7363 7229 WARN_ON(!sd || !sd->groups); 7364 7230 7365 - if (cpu != first_cpu(sd->groups->cpumask)) 7231 + if (cpu != cpumask_first(sched_group_cpus(sd->groups))) 7366 7232 return; 7367 7233 7368 7234 child = sd->child; ··· 7427 7293 SD_INIT_FUNC(MC) 7428 7294 #endif 7429 7295 7430 - /* 7431 - * To minimize stack usage kmalloc room for cpumasks and share the 7432 - * space as the usage in build_sched_domains() dictates. Used only 7433 - * if the amount of space is significant. 7434 - */ 7435 - struct allmasks { 7436 - cpumask_t tmpmask; /* make this one first */ 7437 - union { 7438 - cpumask_t nodemask; 7439 - cpumask_t this_sibling_map; 7440 - cpumask_t this_core_map; 7441 - }; 7442 - cpumask_t send_covered; 7443 - 7444 - #ifdef CONFIG_NUMA 7445 - cpumask_t domainspan; 7446 - cpumask_t covered; 7447 - cpumask_t notcovered; 7448 - #endif 7449 - }; 7450 - 7451 - #if NR_CPUS > 128 7452 - #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v 7453 - static inline void sched_cpumask_alloc(struct allmasks **masks) 7454 - { 7455 - *masks = kmalloc(sizeof(**masks), GFP_KERNEL); 7456 - } 7457 - static inline void sched_cpumask_free(struct allmasks *masks) 7458 - { 7459 - kfree(masks); 7460 - } 7461 - #else 7462 - #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v 7463 - static inline void sched_cpumask_alloc(struct allmasks **masks) 7464 - { } 7465 - static inline void sched_cpumask_free(struct allmasks *masks) 7466 - { } 7467 - #endif 7468 - 7469 - #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ 7470 - ((unsigned long)(a) + offsetof(struct allmasks, v)) 7471 - 7472 7296 static int default_relax_domain_level = -1; 7473 7297 7474 7298 static int __init setup_relax_domain_level(char *str) ··· 7466 7374 * Build sched domains for a given set of cpus and attach the sched domains 7467 7375 * to the individual cpus 7468 7376 */ 7469 - static int __build_sched_domains(const cpumask_t *cpu_map, 7377 + static int __build_sched_domains(const struct cpumask *cpu_map, 7470 7378 struct sched_domain_attr *attr) 7471 7379 { 7472 - int i; 7380 + int i, err = -ENOMEM; 7473 7381 struct root_domain *rd; 7474 - SCHED_CPUMASK_DECLARE(allmasks); 7475 - cpumask_t *tmpmask; 7382 + cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered, 7383 + tmpmask; 7476 7384 #ifdef CONFIG_NUMA 7385 + cpumask_var_t domainspan, covered, notcovered; 7477 7386 struct sched_group **sched_group_nodes = NULL; 7478 7387 int sd_allnodes = 0; 7479 7388 7389 + if (!alloc_cpumask_var(&domainspan, GFP_KERNEL)) 7390 + goto out; 7391 + if (!alloc_cpumask_var(&covered, GFP_KERNEL)) 7392 + goto free_domainspan; 7393 + if (!alloc_cpumask_var(&notcovered, GFP_KERNEL)) 7394 + goto free_covered; 7395 + #endif 7396 + 7397 + if (!alloc_cpumask_var(&nodemask, GFP_KERNEL)) 7398 + goto free_notcovered; 7399 + if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL)) 7400 + goto free_nodemask; 7401 + if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL)) 7402 + goto free_this_sibling_map; 7403 + if (!alloc_cpumask_var(&send_covered, GFP_KERNEL)) 7404 + goto free_this_core_map; 7405 + if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 7406 + goto free_send_covered; 7407 + 7408 + #ifdef CONFIG_NUMA 7480 7409 /* 7481 7410 * Allocate the per-node list of sched groups 7482 7411 */ ··· 7505 7392 GFP_KERNEL); 7506 7393 if (!sched_group_nodes) { 7507 7394 printk(KERN_WARNING "Can not alloc sched group node list\n"); 7508 - return -ENOMEM; 7395 + goto free_tmpmask; 7509 7396 } 7510 7397 #endif 7511 7398 7512 7399 rd = alloc_rootdomain(); 7513 7400 if (!rd) { 7514 7401 printk(KERN_WARNING "Cannot alloc root domain\n"); 7515 - #ifdef CONFIG_NUMA 7516 - kfree(sched_group_nodes); 7517 - #endif 7518 - return -ENOMEM; 7402 + goto free_sched_groups; 7519 7403 } 7520 7404 7521 - /* get space for all scratch cpumask variables */ 7522 - sched_cpumask_alloc(&allmasks); 7523 - if (!allmasks) { 7524 - printk(KERN_WARNING "Cannot alloc cpumask array\n"); 7525 - kfree(rd); 7526 7405 #ifdef CONFIG_NUMA 7527 - kfree(sched_group_nodes); 7528 - #endif 7529 - return -ENOMEM; 7530 - } 7531 - 7532 - tmpmask = (cpumask_t *)allmasks; 7533 - 7534 - 7535 - #ifdef CONFIG_NUMA 7536 - sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; 7406 + sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; 7537 7407 #endif 7538 7408 7539 7409 /* 7540 7410 * Set up domains for cpus specified by the cpu_map. 7541 7411 */ 7542 - for_each_cpu_mask_nr(i, *cpu_map) { 7412 + for_each_cpu(i, cpu_map) { 7543 7413 struct sched_domain *sd = NULL, *p; 7544 - SCHED_CPUMASK_VAR(nodemask, allmasks); 7545 7414 7415 + /* FIXME: use cpumask_of_node */ 7546 7416 *nodemask = node_to_cpumask(cpu_to_node(i)); 7547 7417 cpus_and(*nodemask, *nodemask, *cpu_map); 7548 7418 7549 7419 #ifdef CONFIG_NUMA 7550 - if (cpus_weight(*cpu_map) > 7551 - SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { 7420 + if (cpumask_weight(cpu_map) > 7421 + SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { 7552 7422 sd = &per_cpu(allnodes_domains, i); 7553 7423 SD_INIT(sd, ALLNODES); 7554 7424 set_domain_attribute(sd, attr); 7555 - sd->span = *cpu_map; 7425 + cpumask_copy(sched_domain_span(sd), cpu_map); 7556 7426 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); 7557 7427 p = sd; 7558 7428 sd_allnodes = 1; ··· 7545 7449 sd = &per_cpu(node_domains, i); 7546 7450 SD_INIT(sd, NODE); 7547 7451 set_domain_attribute(sd, attr); 7548 - sched_domain_node_span(cpu_to_node(i), &sd->span); 7452 + sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); 7549 7453 sd->parent = p; 7550 7454 if (p) 7551 7455 p->child = sd; 7552 - cpus_and(sd->span, sd->span, *cpu_map); 7456 + cpumask_and(sched_domain_span(sd), 7457 + sched_domain_span(sd), cpu_map); 7553 7458 #endif 7554 7459 7555 7460 p = sd; 7556 - sd = &per_cpu(phys_domains, i); 7461 + sd = &per_cpu(phys_domains, i).sd; 7557 7462 SD_INIT(sd, CPU); 7558 7463 set_domain_attribute(sd, attr); 7559 - sd->span = *nodemask; 7464 + cpumask_copy(sched_domain_span(sd), nodemask); 7560 7465 sd->parent = p; 7561 7466 if (p) 7562 7467 p->child = sd; ··· 7565 7468 7566 7469 #ifdef CONFIG_SCHED_MC 7567 7470 p = sd; 7568 - sd = &per_cpu(core_domains, i); 7471 + sd = &per_cpu(core_domains, i).sd; 7569 7472 SD_INIT(sd, MC); 7570 7473 set_domain_attribute(sd, attr); 7571 - sd->span = cpu_coregroup_map(i); 7572 - cpus_and(sd->span, sd->span, *cpu_map); 7474 + *sched_domain_span(sd) = cpu_coregroup_map(i); 7475 + cpumask_and(sched_domain_span(sd), 7476 + sched_domain_span(sd), cpu_map); 7573 7477 sd->parent = p; 7574 7478 p->child = sd; 7575 7479 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); ··· 7578 7480 7579 7481 #ifdef CONFIG_SCHED_SMT 7580 7482 p = sd; 7581 - sd = &per_cpu(cpu_domains, i); 7483 + sd = &per_cpu(cpu_domains, i).sd; 7582 7484 SD_INIT(sd, SIBLING); 7583 7485 set_domain_attribute(sd, attr); 7584 - sd->span = per_cpu(cpu_sibling_map, i); 7585 - cpus_and(sd->span, sd->span, *cpu_map); 7486 + cpumask_and(sched_domain_span(sd), 7487 + &per_cpu(cpu_sibling_map, i), cpu_map); 7586 7488 sd->parent = p; 7587 7489 p->child = sd; 7588 7490 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); ··· 7591 7493 7592 7494 #ifdef CONFIG_SCHED_SMT 7593 7495 /* Set up CPU (sibling) groups */ 7594 - for_each_cpu_mask_nr(i, *cpu_map) { 7595 - SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7596 - SCHED_CPUMASK_VAR(send_covered, allmasks); 7597 - 7598 - *this_sibling_map = per_cpu(cpu_sibling_map, i); 7599 - cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); 7600 - if (i != first_cpu(*this_sibling_map)) 7496 + for_each_cpu(i, cpu_map) { 7497 + cpumask_and(this_sibling_map, 7498 + &per_cpu(cpu_sibling_map, i), cpu_map); 7499 + if (i != cpumask_first(this_sibling_map)) 7601 7500 continue; 7602 7501 7603 7502 init_sched_build_groups(this_sibling_map, cpu_map, ··· 7605 7510 7606 7511 #ifdef CONFIG_SCHED_MC 7607 7512 /* Set up multi-core groups */ 7608 - for_each_cpu_mask_nr(i, *cpu_map) { 7609 - SCHED_CPUMASK_VAR(this_core_map, allmasks); 7610 - SCHED_CPUMASK_VAR(send_covered, allmasks); 7611 - 7513 + for_each_cpu(i, cpu_map) { 7514 + /* FIXME: Use cpu_coregroup_mask */ 7612 7515 *this_core_map = cpu_coregroup_map(i); 7613 7516 cpus_and(*this_core_map, *this_core_map, *cpu_map); 7614 - if (i != first_cpu(*this_core_map)) 7517 + if (i != cpumask_first(this_core_map)) 7615 7518 continue; 7616 7519 7617 7520 init_sched_build_groups(this_core_map, cpu_map, ··· 7620 7527 7621 7528 /* Set up physical groups */ 7622 7529 for (i = 0; i < nr_node_ids; i++) { 7623 - SCHED_CPUMASK_VAR(nodemask, allmasks); 7624 - SCHED_CPUMASK_VAR(send_covered, allmasks); 7625 - 7530 + /* FIXME: Use cpumask_of_node */ 7626 7531 *nodemask = node_to_cpumask(i); 7627 7532 cpus_and(*nodemask, *nodemask, *cpu_map); 7628 - if (cpus_empty(*nodemask)) 7533 + if (cpumask_empty(nodemask)) 7629 7534 continue; 7630 7535 7631 7536 init_sched_build_groups(nodemask, cpu_map, ··· 7634 7543 #ifdef CONFIG_NUMA 7635 7544 /* Set up node groups */ 7636 7545 if (sd_allnodes) { 7637 - SCHED_CPUMASK_VAR(send_covered, allmasks); 7638 - 7639 7546 init_sched_build_groups(cpu_map, cpu_map, 7640 7547 &cpu_to_allnodes_group, 7641 7548 send_covered, tmpmask); ··· 7642 7553 for (i = 0; i < nr_node_ids; i++) { 7643 7554 /* Set up node groups */ 7644 7555 struct sched_group *sg, *prev; 7645 - SCHED_CPUMASK_VAR(nodemask, allmasks); 7646 - SCHED_CPUMASK_VAR(domainspan, allmasks); 7647 - SCHED_CPUMASK_VAR(covered, allmasks); 7648 7556 int j; 7649 7557 7558 + /* FIXME: Use cpumask_of_node */ 7650 7559 *nodemask = node_to_cpumask(i); 7651 - cpus_clear(*covered); 7560 + cpumask_clear(covered); 7652 7561 7653 7562 cpus_and(*nodemask, *nodemask, *cpu_map); 7654 - if (cpus_empty(*nodemask)) { 7563 + if (cpumask_empty(nodemask)) { 7655 7564 sched_group_nodes[i] = NULL; 7656 7565 continue; 7657 7566 } 7658 7567 7659 7568 sched_domain_node_span(i, domainspan); 7660 - cpus_and(*domainspan, *domainspan, *cpu_map); 7569 + cpumask_and(domainspan, domainspan, cpu_map); 7661 7570 7662 - sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); 7571 + sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 7572 + GFP_KERNEL, i); 7663 7573 if (!sg) { 7664 7574 printk(KERN_WARNING "Can not alloc domain group for " 7665 7575 "node %d\n", i); 7666 7576 goto error; 7667 7577 } 7668 7578 sched_group_nodes[i] = sg; 7669 - for_each_cpu_mask_nr(j, *nodemask) { 7579 + for_each_cpu(j, nodemask) { 7670 7580 struct sched_domain *sd; 7671 7581 7672 7582 sd = &per_cpu(node_domains, j); 7673 7583 sd->groups = sg; 7674 7584 } 7675 7585 sg->__cpu_power = 0; 7676 - sg->cpumask = *nodemask; 7586 + cpumask_copy(sched_group_cpus(sg), nodemask); 7677 7587 sg->next = sg; 7678 - cpus_or(*covered, *covered, *nodemask); 7588 + cpumask_or(covered, covered, nodemask); 7679 7589 prev = sg; 7680 7590 7681 7591 for (j = 0; j < nr_node_ids; j++) { 7682 - SCHED_CPUMASK_VAR(notcovered, allmasks); 7683 7592 int n = (i + j) % nr_node_ids; 7593 + /* FIXME: Use cpumask_of_node */ 7684 7594 node_to_cpumask_ptr(pnodemask, n); 7685 7595 7686 - cpus_complement(*notcovered, *covered); 7687 - cpus_and(*tmpmask, *notcovered, *cpu_map); 7688 - cpus_and(*tmpmask, *tmpmask, *domainspan); 7689 - if (cpus_empty(*tmpmask)) 7596 + cpumask_complement(notcovered, covered); 7597 + cpumask_and(tmpmask, notcovered, cpu_map); 7598 + cpumask_and(tmpmask, tmpmask, domainspan); 7599 + if (cpumask_empty(tmpmask)) 7690 7600 break; 7691 7601 7692 - cpus_and(*tmpmask, *tmpmask, *pnodemask); 7693 - if (cpus_empty(*tmpmask)) 7602 + cpumask_and(tmpmask, tmpmask, pnodemask); 7603 + if (cpumask_empty(tmpmask)) 7694 7604 continue; 7695 7605 7696 - sg = kmalloc_node(sizeof(struct sched_group), 7606 + sg = kmalloc_node(sizeof(struct sched_group) + 7607 + cpumask_size(), 7697 7608 GFP_KERNEL, i); 7698 7609 if (!sg) { 7699 7610 printk(KERN_WARNING ··· 7701 7612 goto error; 7702 7613 } 7703 7614 sg->__cpu_power = 0; 7704 - sg->cpumask = *tmpmask; 7615 + cpumask_copy(sched_group_cpus(sg), tmpmask); 7705 7616 sg->next = prev->next; 7706 - cpus_or(*covered, *covered, *tmpmask); 7617 + cpumask_or(covered, covered, tmpmask); 7707 7618 prev->next = sg; 7708 7619 prev = sg; 7709 7620 } ··· 7712 7623 7713 7624 /* Calculate CPU power for physical packages and nodes */ 7714 7625 #ifdef CONFIG_SCHED_SMT 7715 - for_each_cpu_mask_nr(i, *cpu_map) { 7716 - struct sched_domain *sd = &per_cpu(cpu_domains, i); 7626 + for_each_cpu(i, cpu_map) { 7627 + struct sched_domain *sd = &per_cpu(cpu_domains, i).sd; 7717 7628 7718 7629 init_sched_groups_power(i, sd); 7719 7630 } 7720 7631 #endif 7721 7632 #ifdef CONFIG_SCHED_MC 7722 - for_each_cpu_mask_nr(i, *cpu_map) { 7723 - struct sched_domain *sd = &per_cpu(core_domains, i); 7633 + for_each_cpu(i, cpu_map) { 7634 + struct sched_domain *sd = &per_cpu(core_domains, i).sd; 7724 7635 7725 7636 init_sched_groups_power(i, sd); 7726 7637 } 7727 7638 #endif 7728 7639 7729 - for_each_cpu_mask_nr(i, *cpu_map) { 7730 - struct sched_domain *sd = &per_cpu(phys_domains, i); 7640 + for_each_cpu(i, cpu_map) { 7641 + struct sched_domain *sd = &per_cpu(phys_domains, i).sd; 7731 7642 7732 7643 init_sched_groups_power(i, sd); 7733 7644 } ··· 7739 7650 if (sd_allnodes) { 7740 7651 struct sched_group *sg; 7741 7652 7742 - cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, 7653 + cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, 7743 7654 tmpmask); 7744 7655 init_numa_sched_groups_power(sg); 7745 7656 } 7746 7657 #endif 7747 7658 7748 7659 /* Attach the domains */ 7749 - for_each_cpu_mask_nr(i, *cpu_map) { 7660 + for_each_cpu(i, cpu_map) { 7750 7661 struct sched_domain *sd; 7751 7662 #ifdef CONFIG_SCHED_SMT 7752 - sd = &per_cpu(cpu_domains, i); 7663 + sd = &per_cpu(cpu_domains, i).sd; 7753 7664 #elif defined(CONFIG_SCHED_MC) 7754 - sd = &per_cpu(core_domains, i); 7665 + sd = &per_cpu(core_domains, i).sd; 7755 7666 #else 7756 - sd = &per_cpu(phys_domains, i); 7667 + sd = &per_cpu(phys_domains, i).sd; 7757 7668 #endif 7758 7669 cpu_attach_domain(sd, rd, i); 7759 7670 } 7760 7671 7761 - sched_cpumask_free(allmasks); 7762 - return 0; 7672 + err = 0; 7673 + 7674 + free_tmpmask: 7675 + free_cpumask_var(tmpmask); 7676 + free_send_covered: 7677 + free_cpumask_var(send_covered); 7678 + free_this_core_map: 7679 + free_cpumask_var(this_core_map); 7680 + free_this_sibling_map: 7681 + free_cpumask_var(this_sibling_map); 7682 + free_nodemask: 7683 + free_cpumask_var(nodemask); 7684 + free_notcovered: 7685 + #ifdef CONFIG_NUMA 7686 + free_cpumask_var(notcovered); 7687 + free_covered: 7688 + free_cpumask_var(covered); 7689 + free_domainspan: 7690 + free_cpumask_var(domainspan); 7691 + out: 7692 + #endif 7693 + return err; 7694 + 7695 + free_sched_groups: 7696 + #ifdef CONFIG_NUMA 7697 + kfree(sched_group_nodes); 7698 + #endif 7699 + goto free_tmpmask; 7763 7700 7764 7701 #ifdef CONFIG_NUMA 7765 7702 error: 7766 7703 free_sched_groups(cpu_map, tmpmask); 7767 - sched_cpumask_free(allmasks); 7768 - kfree(rd); 7769 - return -ENOMEM; 7704 + free_rootdomain(rd); 7705 + goto free_tmpmask; 7770 7706 #endif 7771 7707 } 7772 7708 7773 - static int build_sched_domains(const cpumask_t *cpu_map) 7709 + static int build_sched_domains(const struct cpumask *cpu_map) 7774 7710 { 7775 7711 return __build_sched_domains(cpu_map, NULL); 7776 7712 } 7777 7713 7778 - static cpumask_t *doms_cur; /* current sched domains */ 7714 + static struct cpumask *doms_cur; /* current sched domains */ 7779 7715 static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 7780 7716 static struct sched_domain_attr *dattr_cur; 7781 7717 /* attribues of custom domains in 'doms_cur' */ 7782 7718 7783 7719 /* 7784 7720 * Special case: If a kmalloc of a doms_cur partition (array of 7785 - * cpumask_t) fails, then fallback to a single sched domain, 7786 - * as determined by the single cpumask_t fallback_doms. 7721 + * cpumask) fails, then fallback to a single sched domain, 7722 + * as determined by the single cpumask fallback_doms. 7787 7723 */ 7788 - static cpumask_t fallback_doms; 7724 + static cpumask_var_t fallback_doms; 7789 7725 7790 7726 /* 7791 7727 * arch_update_cpu_topology lets virtualized architectures update the ··· 7827 7713 * For now this just excludes isolated cpus, but could be used to 7828 7714 * exclude other special cases in the future. 7829 7715 */ 7830 - static int arch_init_sched_domains(const cpumask_t *cpu_map) 7716 + static int arch_init_sched_domains(const struct cpumask *cpu_map) 7831 7717 { 7832 7718 int err; 7833 7719 7834 7720 arch_update_cpu_topology(); 7835 7721 ndoms_cur = 1; 7836 - doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 7722 + doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); 7837 7723 if (!doms_cur) 7838 - doms_cur = &fallback_doms; 7839 - cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); 7724 + doms_cur = fallback_doms; 7725 + cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); 7840 7726 dattr_cur = NULL; 7841 7727 err = build_sched_domains(doms_cur); 7842 7728 register_sched_domain_sysctl(); ··· 7844 7730 return err; 7845 7731 } 7846 7732 7847 - static void arch_destroy_sched_domains(const cpumask_t *cpu_map, 7848 - cpumask_t *tmpmask) 7733 + static void arch_destroy_sched_domains(const struct cpumask *cpu_map, 7734 + struct cpumask *tmpmask) 7849 7735 { 7850 7736 free_sched_groups(cpu_map, tmpmask); 7851 7737 } ··· 7854 7740 * Detach sched domains from a group of cpus specified in cpu_map 7855 7741 * These cpus will now be attached to the NULL domain 7856 7742 */ 7857 - static void detach_destroy_domains(const cpumask_t *cpu_map) 7743 + static void detach_destroy_domains(const struct cpumask *cpu_map) 7858 7744 { 7859 - cpumask_t tmpmask; 7745 + /* Save because hotplug lock held. */ 7746 + static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); 7860 7747 int i; 7861 7748 7862 - for_each_cpu_mask_nr(i, *cpu_map) 7749 + for_each_cpu(i, cpu_map) 7863 7750 cpu_attach_domain(NULL, &def_root_domain, i); 7864 7751 synchronize_sched(); 7865 - arch_destroy_sched_domains(cpu_map, &tmpmask); 7752 + arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); 7866 7753 } 7867 7754 7868 7755 /* handle null as "default" */ ··· 7888 7773 * doms_new[] to the current sched domain partitioning, doms_cur[]. 7889 7774 * It destroys each deleted domain and builds each new domain. 7890 7775 * 7891 - * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. 7776 + * 'doms_new' is an array of cpumask's of length 'ndoms_new'. 7892 7777 * The masks don't intersect (don't overlap.) We should setup one 7893 7778 * sched domain for each mask. CPUs not in any of the cpumasks will 7894 7779 * not be load balanced. If the same cpumask appears both in the ··· 7902 7787 * the single partition 'fallback_doms', it also forces the domains 7903 7788 * to be rebuilt. 7904 7789 * 7905 - * If doms_new == NULL it will be replaced with cpu_online_map. 7790 + * If doms_new == NULL it will be replaced with cpu_online_mask. 7906 7791 * ndoms_new == 0 is a special case for destroying existing domains, 7907 7792 * and it will not create the default domain. 7908 7793 * 7909 7794 * Call with hotplug lock held 7910 7795 */ 7911 - void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 7796 + /* FIXME: Change to struct cpumask *doms_new[] */ 7797 + void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, 7912 7798 struct sched_domain_attr *dattr_new) 7913 7799 { 7914 7800 int i, j, n; ··· 7928 7812 /* Destroy deleted domains */ 7929 7813 for (i = 0; i < ndoms_cur; i++) { 7930 7814 for (j = 0; j < n && !new_topology; j++) { 7931 - if (cpus_equal(doms_cur[i], doms_new[j]) 7815 + if (cpumask_equal(&doms_cur[i], &doms_new[j]) 7932 7816 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7933 7817 goto match1; 7934 7818 } ··· 7940 7824 7941 7825 if (doms_new == NULL) { 7942 7826 ndoms_cur = 0; 7943 - doms_new = &fallback_doms; 7944 - cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); 7827 + doms_new = fallback_doms; 7828 + cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); 7945 7829 WARN_ON_ONCE(dattr_new); 7946 7830 } 7947 7831 7948 7832 /* Build new domains */ 7949 7833 for (i = 0; i < ndoms_new; i++) { 7950 7834 for (j = 0; j < ndoms_cur && !new_topology; j++) { 7951 - if (cpus_equal(doms_new[i], doms_cur[j]) 7835 + if (cpumask_equal(&doms_new[i], &doms_cur[j]) 7952 7836 && dattrs_equal(dattr_new, i, dattr_cur, j)) 7953 7837 goto match2; 7954 7838 } ··· 7960 7844 } 7961 7845 7962 7846 /* Remember the new sched domains */ 7963 - if (doms_cur != &fallback_doms) 7847 + if (doms_cur != fallback_doms) 7964 7848 kfree(doms_cur); 7965 7849 kfree(dattr_cur); /* kfree(NULL) is safe */ 7966 7850 doms_cur = doms_new; ··· 7989 7873 static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) 7990 7874 { 7991 7875 int ret; 7876 + unsigned int level = 0; 7992 7877 7993 - if (buf[0] != '0' && buf[0] != '1') 7878 + if (sscanf(buf, "%u", &level) != 1) 7879 + return -EINVAL; 7880 + 7881 + /* 7882 + * level is always be positive so don't check for 7883 + * level < POWERSAVINGS_BALANCE_NONE which is 0 7884 + * What happens on 0 or 1 byte write, 7885 + * need to check for count as well? 7886 + */ 7887 + 7888 + if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) 7994 7889 return -EINVAL; 7995 7890 7996 7891 if (smt) 7997 - sched_smt_power_savings = (buf[0] == '1'); 7892 + sched_smt_power_savings = level; 7998 7893 else 7999 - sched_mc_power_savings = (buf[0] == '1'); 7894 + sched_mc_power_savings = level; 8000 7895 8001 7896 ret = arch_reinit_sched_domains(); 8002 7897 ··· 8111 7984 8112 7985 void __init sched_init_smp(void) 8113 7986 { 8114 - cpumask_t non_isolated_cpus; 7987 + cpumask_var_t non_isolated_cpus; 7988 + 7989 + alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 8115 7990 8116 7991 #if defined(CONFIG_NUMA) 8117 7992 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), ··· 8122 7993 #endif 8123 7994 get_online_cpus(); 8124 7995 mutex_lock(&sched_domains_mutex); 8125 - arch_init_sched_domains(&cpu_online_map); 8126 - cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); 8127 - if (cpus_empty(non_isolated_cpus)) 8128 - cpu_set(smp_processor_id(), non_isolated_cpus); 7996 + arch_init_sched_domains(cpu_online_mask); 7997 + cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 7998 + if (cpumask_empty(non_isolated_cpus)) 7999 + cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 8129 8000 mutex_unlock(&sched_domains_mutex); 8130 8001 put_online_cpus(); 8131 8002 ··· 8140 8011 init_hrtick(); 8141 8012 8142 8013 /* Move init over to a non-isolated CPU */ 8143 - if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) 8014 + if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) 8144 8015 BUG(); 8145 8016 sched_init_granularity(); 8017 + free_cpumask_var(non_isolated_cpus); 8018 + 8019 + alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 8020 + init_sched_rt_class(); 8146 8021 } 8147 8022 #else 8148 8023 void __init sched_init_smp(void) ··· 8460 8327 * During early bootup we pretend to be a normal task: 8461 8328 */ 8462 8329 current->sched_class = &fair_sched_class; 8330 + 8331 + /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 8332 + alloc_bootmem_cpumask_var(&nohz_cpu_mask); 8333 + #ifdef CONFIG_SMP 8334 + #ifdef CONFIG_NO_HZ 8335 + alloc_bootmem_cpumask_var(&nohz.cpu_mask); 8336 + #endif 8337 + alloc_bootmem_cpumask_var(&cpu_isolated_map); 8338 + #endif /* SMP */ 8463 8339 8464 8340 scheduler_running = 1; 8465 8341 }
+28 -11
kernel/sched_cpupri.c
··· 67 67 * Returns: (int)bool - CPUs were found 68 68 */ 69 69 int cpupri_find(struct cpupri *cp, struct task_struct *p, 70 - cpumask_t *lowest_mask) 70 + struct cpumask *lowest_mask) 71 71 { 72 72 int idx = 0; 73 73 int task_pri = convert_prio(p->prio); 74 74 75 75 for_each_cpupri_active(cp->pri_active, idx) { 76 76 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; 77 - cpumask_t mask; 78 77 79 78 if (idx >= task_pri) 80 79 break; 81 80 82 - cpus_and(mask, p->cpus_allowed, vec->mask); 83 - 84 - if (cpus_empty(mask)) 81 + if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 85 82 continue; 86 83 87 - *lowest_mask = mask; 84 + cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 88 85 return 1; 89 86 } 90 87 ··· 123 126 vec->count--; 124 127 if (!vec->count) 125 128 clear_bit(oldpri, cp->pri_active); 126 - cpu_clear(cpu, vec->mask); 129 + cpumask_clear_cpu(cpu, vec->mask); 127 130 128 131 spin_unlock_irqrestore(&vec->lock, flags); 129 132 } ··· 133 136 134 137 spin_lock_irqsave(&vec->lock, flags); 135 138 136 - cpu_set(cpu, vec->mask); 139 + cpumask_set_cpu(cpu, vec->mask); 137 140 vec->count++; 138 141 if (vec->count == 1) 139 142 set_bit(newpri, cp->pri_active); ··· 147 150 /** 148 151 * cpupri_init - initialize the cpupri structure 149 152 * @cp: The cpupri context 153 + * @bootmem: true if allocations need to use bootmem 150 154 * 151 - * Returns: (void) 155 + * Returns: -ENOMEM if memory fails. 152 156 */ 153 - void cpupri_init(struct cpupri *cp) 157 + int cpupri_init(struct cpupri *cp, bool bootmem) 154 158 { 155 159 int i; 156 160 ··· 162 164 163 165 spin_lock_init(&vec->lock); 164 166 vec->count = 0; 165 - cpus_clear(vec->mask); 167 + if (bootmem) 168 + alloc_bootmem_cpumask_var(&vec->mask); 169 + else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) 170 + goto cleanup; 166 171 } 167 172 168 173 for_each_possible_cpu(i) 169 174 cp->cpu_to_pri[i] = CPUPRI_INVALID; 175 + return 0; 176 + 177 + cleanup: 178 + for (i--; i >= 0; i--) 179 + free_cpumask_var(cp->pri_to_cpu[i].mask); 180 + return -ENOMEM; 170 181 } 171 182 183 + /** 184 + * cpupri_cleanup - clean up the cpupri structure 185 + * @cp: The cpupri context 186 + */ 187 + void cpupri_cleanup(struct cpupri *cp) 188 + { 189 + int i; 172 190 191 + for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) 192 + free_cpumask_var(cp->pri_to_cpu[i].mask); 193 + }
+3 -2
kernel/sched_cpupri.h
··· 14 14 struct cpupri_vec { 15 15 spinlock_t lock; 16 16 int count; 17 - cpumask_t mask; 17 + cpumask_var_t mask; 18 18 }; 19 19 20 20 struct cpupri { ··· 27 27 int cpupri_find(struct cpupri *cp, 28 28 struct task_struct *p, cpumask_t *lowest_mask); 29 29 void cpupri_set(struct cpupri *cp, int cpu, int pri); 30 - void cpupri_init(struct cpupri *cp); 30 + int cpupri_init(struct cpupri *cp, bool bootmem); 31 + void cpupri_cleanup(struct cpupri *cp); 31 32 #else 32 33 #define cpupri_set(cp, cpu, pri) do { } while (0) 33 34 #define cpupri_init() do { } while (0)
+24 -8
kernel/sched_fair.c
··· 1019 1019 * search starts with cpus closest then further out as needed, 1020 1020 * so we always favor a closer, idle cpu. 1021 1021 * Domains may include CPUs that are not usable for migration, 1022 - * hence we need to mask them out (cpu_active_map) 1022 + * hence we need to mask them out (cpu_active_mask) 1023 1023 * 1024 1024 * Returns the CPU we should wake onto. 1025 1025 */ 1026 1026 #if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1027 1027 static int wake_idle(int cpu, struct task_struct *p) 1028 1028 { 1029 - cpumask_t tmp; 1030 1029 struct sched_domain *sd; 1031 1030 int i; 1031 + unsigned int chosen_wakeup_cpu; 1032 + int this_cpu; 1033 + 1034 + /* 1035 + * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu 1036 + * are idle and this is not a kernel thread and this task's affinity 1037 + * allows it to be moved to preferred cpu, then just move! 1038 + */ 1039 + 1040 + this_cpu = smp_processor_id(); 1041 + chosen_wakeup_cpu = 1042 + cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu; 1043 + 1044 + if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP && 1045 + idle_cpu(cpu) && idle_cpu(this_cpu) && 1046 + p->mm && !(p->flags & PF_KTHREAD) && 1047 + cpu_isset(chosen_wakeup_cpu, p->cpus_allowed)) 1048 + return chosen_wakeup_cpu; 1032 1049 1033 1050 /* 1034 1051 * If it is idle, then it is the best cpu to run this task. ··· 1063 1046 if ((sd->flags & SD_WAKE_IDLE) 1064 1047 || ((sd->flags & SD_WAKE_IDLE_FAR) 1065 1048 && !task_hot(p, task_rq(p)->clock, sd))) { 1066 - cpus_and(tmp, sd->span, p->cpus_allowed); 1067 - cpus_and(tmp, tmp, cpu_active_map); 1068 - for_each_cpu_mask_nr(i, tmp) { 1069 - if (idle_cpu(i)) { 1049 + for_each_cpu_and(i, sched_domain_span(sd), 1050 + &p->cpus_allowed) { 1051 + if (cpu_active(i) && idle_cpu(i)) { 1070 1052 if (i != task_cpu(p)) { 1071 1053 schedstat_inc(p, 1072 1054 se.nr_wakeups_idle); ··· 1258 1242 * this_cpu and prev_cpu are present in: 1259 1243 */ 1260 1244 for_each_domain(this_cpu, sd) { 1261 - if (cpu_isset(prev_cpu, sd->span)) { 1245 + if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) { 1262 1246 this_sd = sd; 1263 1247 break; 1264 1248 } 1265 1249 } 1266 1250 1267 - if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1251 + if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed))) 1268 1252 goto out; 1269 1253 1270 1254 /*
+44 -29
kernel/sched_rt.c
··· 15 15 if (!rq->online) 16 16 return; 17 17 18 - cpu_set(rq->cpu, rq->rd->rto_mask); 18 + cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); 19 19 /* 20 20 * Make sure the mask is visible before we set 21 21 * the overload count. That is checked to determine ··· 34 34 35 35 /* the order here really doesn't matter */ 36 36 atomic_dec(&rq->rd->rto_count); 37 - cpu_clear(rq->cpu, rq->rd->rto_mask); 37 + cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 38 38 } 39 39 40 40 static void update_rt_migration(struct rq *rq) ··· 139 139 } 140 140 141 141 #ifdef CONFIG_SMP 142 - static inline cpumask_t sched_rt_period_mask(void) 142 + static inline const struct cpumask *sched_rt_period_mask(void) 143 143 { 144 144 return cpu_rq(smp_processor_id())->rd->span; 145 145 } 146 146 #else 147 - static inline cpumask_t sched_rt_period_mask(void) 147 + static inline const struct cpumask *sched_rt_period_mask(void) 148 148 { 149 - return cpu_online_map; 149 + return cpu_online_mask; 150 150 } 151 151 #endif 152 152 ··· 212 212 return rt_rq->rt_throttled; 213 213 } 214 214 215 - static inline cpumask_t sched_rt_period_mask(void) 215 + static inline const struct cpumask *sched_rt_period_mask(void) 216 216 { 217 - return cpu_online_map; 217 + return cpu_online_mask; 218 218 } 219 219 220 220 static inline ··· 241 241 int i, weight, more = 0; 242 242 u64 rt_period; 243 243 244 - weight = cpus_weight(rd->span); 244 + weight = cpumask_weight(rd->span); 245 245 246 246 spin_lock(&rt_b->rt_runtime_lock); 247 247 rt_period = ktime_to_ns(rt_b->rt_period); 248 - for_each_cpu_mask_nr(i, rd->span) { 248 + for_each_cpu(i, rd->span) { 249 249 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 250 250 s64 diff; 251 251 ··· 324 324 /* 325 325 * Greedy reclaim, take back as much as we can. 326 326 */ 327 - for_each_cpu_mask(i, rd->span) { 327 + for_each_cpu(i, rd->span) { 328 328 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 329 329 s64 diff; 330 330 ··· 429 429 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 430 430 { 431 431 int i, idle = 1; 432 - cpumask_t span; 432 + const struct cpumask *span; 433 433 434 434 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 435 435 return 1; 436 436 437 437 span = sched_rt_period_mask(); 438 - for_each_cpu_mask(i, span) { 438 + for_each_cpu(i, span) { 439 439 int enqueue = 0; 440 440 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 441 441 struct rq *rq = rq_of_rt_rq(rt_rq); ··· 805 805 806 806 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 807 807 { 808 - cpumask_t mask; 808 + cpumask_var_t mask; 809 809 810 810 if (rq->curr->rt.nr_cpus_allowed == 1) 811 811 return; 812 812 813 - if (p->rt.nr_cpus_allowed != 1 814 - && cpupri_find(&rq->rd->cpupri, p, &mask)) 813 + if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) 815 814 return; 816 815 817 - if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) 818 - return; 816 + if (p->rt.nr_cpus_allowed != 1 817 + && cpupri_find(&rq->rd->cpupri, p, mask)) 818 + goto free; 819 + 820 + if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) 821 + goto free; 819 822 820 823 /* 821 824 * There appears to be other cpus that can accept ··· 827 824 */ 828 825 requeue_task_rt(rq, p, 1); 829 826 resched_task(rq->curr); 827 + free: 828 + free_cpumask_var(mask); 830 829 } 831 830 832 831 #endif /* CONFIG_SMP */ ··· 919 914 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 920 915 { 921 916 if (!task_running(rq, p) && 922 - (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && 917 + (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && 923 918 (p->rt.nr_cpus_allowed > 1)) 924 919 return 1; 925 920 return 0; ··· 958 953 return next; 959 954 } 960 955 961 - static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); 956 + static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 962 957 963 958 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) 964 959 { ··· 978 973 static int find_lowest_rq(struct task_struct *task) 979 974 { 980 975 struct sched_domain *sd; 981 - cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); 976 + struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); 982 977 int this_cpu = smp_processor_id(); 983 978 int cpu = task_cpu(task); 984 979 ··· 993 988 * I guess we might want to change cpupri_find() to ignore those 994 989 * in the first place. 995 990 */ 996 - cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); 991 + cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); 997 992 998 993 /* 999 994 * At this point we have built a mask of cpus representing the ··· 1003 998 * We prioritize the last cpu that the task executed on since 1004 999 * it is most likely cache-hot in that location. 1005 1000 */ 1006 - if (cpu_isset(cpu, *lowest_mask)) 1001 + if (cpumask_test_cpu(cpu, lowest_mask)) 1007 1002 return cpu; 1008 1003 1009 1004 /* ··· 1018 1013 cpumask_t domain_mask; 1019 1014 int best_cpu; 1020 1015 1021 - cpus_and(domain_mask, sd->span, *lowest_mask); 1016 + cpumask_and(&domain_mask, sched_domain_span(sd), 1017 + lowest_mask); 1022 1018 1023 1019 best_cpu = pick_optimal_cpu(this_cpu, 1024 1020 &domain_mask); ··· 1060 1054 * Also make sure that it wasn't scheduled on its rq. 1061 1055 */ 1062 1056 if (unlikely(task_rq(task) != rq || 1063 - !cpu_isset(lowest_rq->cpu, 1064 - task->cpus_allowed) || 1057 + !cpumask_test_cpu(lowest_rq->cpu, 1058 + &task->cpus_allowed) || 1065 1059 task_running(rq, task) || 1066 1060 !task->se.on_rq)) { 1067 1061 ··· 1182 1176 1183 1177 next = pick_next_task_rt(this_rq); 1184 1178 1185 - for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { 1179 + for_each_cpu(cpu, this_rq->rd->rto_mask) { 1186 1180 if (this_cpu == cpu) 1187 1181 continue; 1188 1182 ··· 1311 1305 } 1312 1306 1313 1307 static void set_cpus_allowed_rt(struct task_struct *p, 1314 - const cpumask_t *new_mask) 1308 + const struct cpumask *new_mask) 1315 1309 { 1316 - int weight = cpus_weight(*new_mask); 1310 + int weight = cpumask_weight(new_mask); 1317 1311 1318 1312 BUG_ON(!rt_task(p)); 1319 1313 ··· 1334 1328 update_rt_migration(rq); 1335 1329 } 1336 1330 1337 - p->cpus_allowed = *new_mask; 1331 + cpumask_copy(&p->cpus_allowed, new_mask); 1338 1332 p->rt.nr_cpus_allowed = weight; 1339 1333 } 1340 1334 ··· 1376 1370 */ 1377 1371 if (!rq->rt.rt_nr_running) 1378 1372 pull_rt_task(rq); 1373 + } 1374 + 1375 + static inline void init_sched_rt_class(void) 1376 + { 1377 + unsigned int i; 1378 + 1379 + for_each_possible_cpu(i) 1380 + alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL); 1379 1381 } 1380 1382 #endif /* CONFIG_SMP */ 1381 1383 ··· 1555 1541 rcu_read_unlock(); 1556 1542 } 1557 1543 #endif /* CONFIG_SCHED_DEBUG */ 1544 +
+2 -1
kernel/sched_stats.h
··· 42 42 for_each_domain(cpu, sd) { 43 43 enum cpu_idle_type itype; 44 44 45 - cpumask_scnprintf(mask_str, mask_len, sd->span); 45 + cpumask_scnprintf(mask_str, mask_len, 46 + sched_domain_span(sd)); 46 47 seq_printf(seq, "domain%d %s", dcount++, mask_str); 47 48 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; 48 49 itype++) {
+1 -1
kernel/taskstats.c
··· 352 352 if (!data) 353 353 return -ENOMEM; 354 354 nla_strlcpy(data, na, len); 355 - ret = cpulist_parse(data, *mask); 355 + ret = cpulist_parse(data, mask); 356 356 kfree(data); 357 357 return ret; 358 358 }
+2
kernel/time/clockevents.c
··· 166 166 void clockevents_register_device(struct clock_event_device *dev) 167 167 { 168 168 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 169 + BUG_ON(!dev->cpumask); 170 + 169 171 /* 170 172 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 171 173 * on it, so fix it up and emit a warning:
+1 -1
kernel/time/tick-broadcast.c
··· 150 150 */ 151 151 cpu = first_cpu(mask); 152 152 td = &per_cpu(tick_cpu_device, cpu); 153 - td->evtdev->broadcast(mask); 153 + td->evtdev->broadcast(&mask); 154 154 } 155 155 } 156 156
+6 -6
kernel/time/tick-common.c
··· 136 136 */ 137 137 static void tick_setup_device(struct tick_device *td, 138 138 struct clock_event_device *newdev, int cpu, 139 - const cpumask_t *cpumask) 139 + const struct cpumask *cpumask) 140 140 { 141 141 ktime_t next_event; 142 142 void (*handler)(struct clock_event_device *) = NULL; ··· 171 171 * When the device is not per cpu, pin the interrupt to the 172 172 * current cpu: 173 173 */ 174 - if (!cpus_equal(newdev->cpumask, *cpumask)) 175 - irq_set_affinity(newdev->irq, *cpumask); 174 + if (!cpumask_equal(newdev->cpumask, cpumask)) 175 + irq_set_affinity(newdev->irq, cpumask); 176 176 177 177 /* 178 178 * When global broadcasting is active, check if the current ··· 202 202 spin_lock_irqsave(&tick_device_lock, flags); 203 203 204 204 cpu = smp_processor_id(); 205 - if (!cpu_isset(cpu, newdev->cpumask)) 205 + if (!cpumask_test_cpu(cpu, newdev->cpumask)) 206 206 goto out_bc; 207 207 208 208 td = &per_cpu(tick_cpu_device, cpu); 209 209 curdev = td->evtdev; 210 210 211 211 /* cpu local device ? */ 212 - if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) { 212 + if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { 213 213 214 214 /* 215 215 * If the cpu affinity of the device interrupt can not ··· 222 222 * If we have a cpu local device already, do not replace it 223 223 * by a non cpu local device 224 224 */ 225 - if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu))) 225 + if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) 226 226 goto out_bc; 227 227 } 228 228
+5 -5
kernel/time/tick-sched.c
··· 144 144 if (!ts->tick_stopped) 145 145 return; 146 146 147 - cpu_clear(cpu, nohz_cpu_mask); 147 + cpumask_clear_cpu(cpu, nohz_cpu_mask); 148 148 now = ktime_get(); 149 149 ts->idle_waketime = now; 150 150 ··· 301 301 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 302 302 303 303 if (delta_jiffies > 1) 304 - cpu_set(cpu, nohz_cpu_mask); 304 + cpumask_set_cpu(cpu, nohz_cpu_mask); 305 305 306 306 /* Skip reprogram of event if its not changed */ 307 307 if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) ··· 319 319 /* 320 320 * sched tick not stopped! 321 321 */ 322 - cpu_clear(cpu, nohz_cpu_mask); 322 + cpumask_clear_cpu(cpu, nohz_cpu_mask); 323 323 goto out; 324 324 } 325 325 ··· 361 361 * softirq. 362 362 */ 363 363 tick_do_update_jiffies64(ktime_get()); 364 - cpu_clear(cpu, nohz_cpu_mask); 364 + cpumask_clear_cpu(cpu, nohz_cpu_mask); 365 365 } 366 366 raise_softirq_irqoff(TIMER_SOFTIRQ); 367 367 out: ··· 439 439 select_nohz_load_balancer(0); 440 440 now = ktime_get(); 441 441 tick_do_update_jiffies64(now); 442 - cpu_clear(cpu, nohz_cpu_mask); 442 + cpumask_clear_cpu(cpu, nohz_cpu_mask); 443 443 444 444 /* 445 445 * We stopped the tick in idle. Update process times would miss the
+2 -2
kernel/trace/trace.c
··· 2674 2674 2675 2675 mutex_lock(&tracing_cpumask_update_lock); 2676 2676 2677 - len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 2677 + len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); 2678 2678 if (count - len < 2) { 2679 2679 count = -EINVAL; 2680 2680 goto out_err; ··· 2695 2695 int err, cpu; 2696 2696 2697 2697 mutex_lock(&tracing_cpumask_update_lock); 2698 - err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2698 + err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); 2699 2699 if (err) 2700 2700 goto err_unlock; 2701 2701
+7
lib/Kconfig
··· 159 159 config HAVE_LMB 160 160 boolean 161 161 162 + config CPUMASK_OFFSTACK 163 + bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS 164 + help 165 + Use dynamic allocation for cpumask_var_t, instead of putting 166 + them on the stack. This is a bit more expensive, but avoids 167 + stack overflow. 168 + 162 169 endmenu
+1 -1
mm/slub.c
··· 3642 3642 len < PAGE_SIZE - 60) { 3643 3643 len += sprintf(buf + len, " cpus="); 3644 3644 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3645 - l->cpus); 3645 + &l->cpus); 3646 3646 } 3647 3647 3648 3648 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&