Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (39 commits)
cpumask: Move deprecated functions to end of header.
cpumask: remove unused deprecated functions, avoid accusations of insanity
cpumask: use new-style cpumask ops in mm/quicklist.
cpumask: use mm_cpumask() wrapper: x86
cpumask: use mm_cpumask() wrapper: um
cpumask: use mm_cpumask() wrapper: mips
cpumask: use mm_cpumask() wrapper: mn10300
cpumask: use mm_cpumask() wrapper: m32r
cpumask: use mm_cpumask() wrapper: arm
cpumask: Use accessors for cpu_*_mask: um
cpumask: Use accessors for cpu_*_mask: powerpc
cpumask: Use accessors for cpu_*_mask: mips
cpumask: Use accessors for cpu_*_mask: m32r
cpumask: remove arch_send_call_function_ipi
cpumask: arch_send_call_function_ipi_mask: s390
cpumask: arch_send_call_function_ipi_mask: powerpc
cpumask: arch_send_call_function_ipi_mask: mips
cpumask: arch_send_call_function_ipi_mask: m32r
cpumask: arch_send_call_function_ipi_mask: alpha
cpumask: remove obsolete topology_core_siblings and topology_thread_siblings: ia64
...

+403 -724
+1 -1
arch/alpha/include/asm/smp.h
··· 47 47 extern int smp_num_cpus; 48 48 49 49 extern void arch_send_call_function_single_ipi(int cpu); 50 - extern void arch_send_call_function_ipi(cpumask_t mask); 50 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 51 51 52 52 #else /* CONFIG_SMP */ 53 53
-18
arch/alpha/include/asm/topology.h
··· 22 22 return node; 23 23 } 24 24 25 - static inline cpumask_t node_to_cpumask(int node) 26 - { 27 - cpumask_t node_cpu_mask = CPU_MASK_NONE; 28 - int cpu; 29 - 30 - for_each_online_cpu(cpu) { 31 - if (cpu_to_node(cpu) == node) 32 - cpu_set(cpu, node_cpu_mask); 33 - } 34 - 35 - #ifdef DEBUG_NUMA 36 - printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask); 37 - #endif 38 - 39 - return node_cpu_mask; 40 - } 41 - 42 25 extern struct cpumask node_to_cpumask_map[]; 43 26 /* FIXME: This is dumb, recalculating every time. But simple. */ 44 27 static const struct cpumask *cpumask_of_node(int node) ··· 38 55 return &node_to_cpumask_map[node]; 39 56 } 40 57 41 - #define pcibus_to_cpumask(bus) (cpu_online_map) 42 58 #define cpumask_of_pcibus(bus) (cpu_online_mask) 43 59 44 60 #endif /* !CONFIG_NUMA */
+7 -7
arch/alpha/kernel/smp.c
··· 548 548 549 549 550 550 static void 551 - send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) 551 + send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) 552 552 { 553 553 int i; 554 554 555 555 mb(); 556 - for_each_cpu_mask(i, to_whom) 556 + for_each_cpu(i, to_whom) 557 557 set_bit(operation, &ipi_data[i].bits); 558 558 559 559 mb(); 560 - for_each_cpu_mask(i, to_whom) 560 + for_each_cpu(i, to_whom) 561 561 wripir(i); 562 562 } 563 563 ··· 624 624 printk(KERN_WARNING 625 625 "smp_send_reschedule: Sending IPI to self.\n"); 626 626 #endif 627 - send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); 627 + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 628 628 } 629 629 630 630 void ··· 636 636 if (hard_smp_processor_id() != boot_cpu_id) 637 637 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); 638 638 #endif 639 - send_ipi_message(to_whom, IPI_CPU_STOP); 639 + send_ipi_message(&to_whom, IPI_CPU_STOP); 640 640 } 641 641 642 - void arch_send_call_function_ipi(cpumask_t mask) 642 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 643 643 { 644 644 send_ipi_message(mask, IPI_CALL_FUNC); 645 645 } 646 646 647 647 void arch_send_call_function_single_ipi(int cpu) 648 648 { 649 - send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); 649 + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 650 650 } 651 651 652 652 static void
+4 -4
arch/arm/include/asm/cacheflush.h
··· 334 334 #ifndef CONFIG_CPU_CACHE_VIPT 335 335 static inline void flush_cache_mm(struct mm_struct *mm) 336 336 { 337 - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 337 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 338 338 __cpuc_flush_user_all(); 339 339 } 340 340 341 341 static inline void 342 342 flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 343 343 { 344 - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) 344 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 345 345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 346 346 vma->vm_flags); 347 347 } ··· 349 349 static inline void 350 350 flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 351 351 { 352 - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 352 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 353 353 unsigned long addr = user_addr & PAGE_MASK; 354 354 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 355 355 } ··· 360 360 unsigned long uaddr, void *kaddr, 361 361 unsigned long len, int write) 362 362 { 363 - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 363 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 364 364 unsigned long addr = (unsigned long)kaddr; 365 365 __cpuc_coherent_kern_range(addr, addr + len); 366 366 }
+4 -3
arch/arm/include/asm/mmu_context.h
··· 103 103 104 104 #ifdef CONFIG_SMP 105 105 /* check for possible thread migration */ 106 - if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask)) 106 + if (!cpumask_empty(mm_cpumask(next)) && 107 + !cpumask_test_cpu(cpu, mm_cpumask(next))) 107 108 __flush_icache_all(); 108 109 #endif 109 - if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) { 110 + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 110 111 check_context(next); 111 112 cpu_switch_mm(next->pgd, next); 112 113 if (cache_is_vivt()) 113 - cpu_clear(cpu, prev->cpu_vm_mask); 114 + cpumask_clear_cpu(cpu, mm_cpumask(prev)); 114 115 } 115 116 #endif 116 117 }
-1
arch/arm/include/asm/smp.h
··· 93 93 94 94 extern void arch_send_call_function_single_ipi(int cpu); 95 95 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 96 - #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask 97 96 98 97 /* 99 98 * show local interrupt info
+2 -2
arch/arm/include/asm/tlbflush.h
··· 350 350 if (tlb_flag(TLB_WB)) 351 351 dsb(); 352 352 353 - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { 353 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { 354 354 if (tlb_flag(TLB_V3_FULL)) 355 355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 356 356 if (tlb_flag(TLB_V4_U_FULL)) ··· 388 388 if (tlb_flag(TLB_WB)) 389 389 dsb(); 390 390 391 - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 391 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 392 392 if (tlb_flag(TLB_V3_PAGE)) 393 393 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); 394 394 if (tlb_flag(TLB_V4_U_PAGE))
+5 -5
arch/arm/kernel/smp.c
··· 189 189 read_lock(&tasklist_lock); 190 190 for_each_process(p) { 191 191 if (p->mm) 192 - cpu_clear(cpu, p->mm->cpu_vm_mask); 192 + cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); 193 193 } 194 194 read_unlock(&tasklist_lock); 195 195 ··· 257 257 atomic_inc(&mm->mm_users); 258 258 atomic_inc(&mm->mm_count); 259 259 current->active_mm = mm; 260 - cpu_set(cpu, mm->cpu_vm_mask); 260 + cpumask_set_cpu(cpu, mm_cpumask(mm)); 261 261 cpu_switch_mm(mm->pgd, mm); 262 262 enter_lazy_tlb(mm, current); 263 263 local_flush_tlb_all(); ··· 643 643 void flush_tlb_mm(struct mm_struct *mm) 644 644 { 645 645 if (tlb_ops_need_broadcast()) 646 - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); 646 + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); 647 647 else 648 648 local_flush_tlb_mm(mm); 649 649 } ··· 654 654 struct tlb_args ta; 655 655 ta.ta_vma = vma; 656 656 ta.ta_start = uaddr; 657 - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); 657 + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); 658 658 } else 659 659 local_flush_tlb_page(vma, uaddr); 660 660 } ··· 677 677 ta.ta_vma = vma; 678 678 ta.ta_start = start; 679 679 ta.ta_end = end; 680 - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); 680 + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); 681 681 } else 682 682 local_flush_tlb_range(vma, start, end); 683 683 }
+1 -1
arch/arm/mm/context.c
··· 59 59 } 60 60 spin_unlock(&cpu_asid_lock); 61 61 62 - mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); 62 + cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); 63 63 mm->context.id = asid; 64 64 }
+5 -5
arch/arm/mm/flush.c
··· 50 50 void flush_cache_mm(struct mm_struct *mm) 51 51 { 52 52 if (cache_is_vivt()) { 53 - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 53 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 54 54 __cpuc_flush_user_all(); 55 55 return; 56 56 } ··· 73 73 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 74 74 { 75 75 if (cache_is_vivt()) { 76 - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) 76 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 77 77 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 78 78 vma->vm_flags); 79 79 return; ··· 97 97 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 98 98 { 99 99 if (cache_is_vivt()) { 100 - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 100 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 101 101 unsigned long addr = user_addr & PAGE_MASK; 102 102 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 103 103 } ··· 113 113 unsigned long len, int write) 114 114 { 115 115 if (cache_is_vivt()) { 116 - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 116 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 117 117 unsigned long addr = (unsigned long)kaddr; 118 118 __cpuc_coherent_kern_range(addr, addr + len); 119 119 } ··· 126 126 } 127 127 128 128 /* VIPT non-aliasing cache */ 129 - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && 129 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && 130 130 vma->vm_flags & VM_EXEC) { 131 131 unsigned long addr = (unsigned long)kaddr; 132 132 /* only flushing the kernel mapping on non-aliasing VIPT */
-1
arch/ia64/include/asm/smp.h
··· 127 127 128 128 extern void arch_send_call_function_single_ipi(int cpu); 129 129 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 130 - #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask 131 130 132 131 #else /* CONFIG_SMP */ 133 132
-3
arch/ia64/include/asm/topology.h
··· 33 33 /* 34 34 * Returns a bitmask of CPUs on Node 'node'. 35 35 */ 36 - #define node_to_cpumask(node) (node_to_cpu_mask[node]) 37 36 #define cpumask_of_node(node) (&node_to_cpu_mask[node]) 38 37 39 38 /* ··· 103 104 #ifdef CONFIG_SMP 104 105 #define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id) 105 106 #define topology_core_id(cpu) (cpu_data(cpu)->core_id) 106 - #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 107 - #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 108 107 #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 109 108 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 110 109 #define smt_capable() (smp_num_siblings > 1)
+1 -1
arch/ia64/kernel/smp.c
··· 302 302 return; 303 303 } 304 304 305 - smp_call_function_mask(mm->cpu_vm_mask, 305 + smp_call_function_many(mm_cpumask(mm), 306 306 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); 307 307 local_irq_disable(); 308 308 local_finish_flush_tlb_mm(mm);
+2 -2
arch/m32r/include/asm/mmu_context.h
··· 127 127 128 128 if (prev != next) { 129 129 #ifdef CONFIG_SMP 130 - cpu_set(cpu, next->cpu_vm_mask); 130 + cpumask_set_cpu(cpu, mm_cpumask(next)); 131 131 #endif /* CONFIG_SMP */ 132 132 /* Set MPTB = next->pgd */ 133 133 *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; ··· 135 135 } 136 136 #ifdef CONFIG_SMP 137 137 else 138 - if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) 138 + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) 139 139 activate_context(next); 140 140 #endif /* CONFIG_SMP */ 141 141 }
+1 -1
arch/m32r/include/asm/smp.h
··· 88 88 extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); 89 89 90 90 extern void arch_send_call_function_single_ipi(int cpu); 91 - extern void arch_send_call_function_ipi(cpumask_t mask); 91 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 92 92 93 93 #endif /* not __ASSEMBLY__ */ 94 94
+15 -15
arch/m32r/kernel/smp.c
··· 85 85 void smp_local_timer_interrupt(void); 86 86 87 87 static void send_IPI_allbutself(int, int); 88 - static void send_IPI_mask(cpumask_t, int, int); 88 + static void send_IPI_mask(const struct cpumask *, int, int); 89 89 unsigned long send_IPI_mask_phys(cpumask_t, int, int); 90 90 91 91 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ ··· 113 113 void smp_send_reschedule(int cpu_id) 114 114 { 115 115 WARN_ON(cpu_is_offline(cpu_id)); 116 - send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); 116 + send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1); 117 117 } 118 118 119 119 /*==========================================================================* ··· 168 168 spin_lock(&flushcache_lock); 169 169 mask=cpus_addr(cpumask); 170 170 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 171 - send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); 171 + send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); 172 172 _flush_cache_copyback_all(); 173 173 while (flushcache_cpumask) 174 174 mb(); ··· 264 264 preempt_disable(); 265 265 cpu_id = smp_processor_id(); 266 266 mmc = &mm->context[cpu_id]; 267 - cpu_mask = mm->cpu_vm_mask; 267 + cpu_mask = *mm_cpumask(mm); 268 268 cpu_clear(cpu_id, cpu_mask); 269 269 270 270 if (*mmc != NO_CONTEXT) { ··· 273 273 if (mm == current->mm) 274 274 activate_context(mm); 275 275 else 276 - cpu_clear(cpu_id, mm->cpu_vm_mask); 276 + cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); 277 277 local_irq_restore(flags); 278 278 } 279 279 if (!cpus_empty(cpu_mask)) ··· 334 334 preempt_disable(); 335 335 cpu_id = smp_processor_id(); 336 336 mmc = &mm->context[cpu_id]; 337 - cpu_mask = mm->cpu_vm_mask; 337 + cpu_mask = *mm_cpumask(mm); 338 338 cpu_clear(cpu_id, cpu_mask); 339 339 340 340 #ifdef DEBUG_SMP ··· 424 424 * We have to send the IPI only to 425 425 * CPUs affected. 426 426 */ 427 - send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); 427 + send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); 428 428 429 429 while (!cpus_empty(flush_cpumask)) { 430 430 /* nothing. lockup detection does not belong here */ ··· 469 469 if (flush_mm == current->active_mm) 470 470 activate_context(flush_mm); 471 471 else 472 - cpu_clear(cpu_id, flush_mm->cpu_vm_mask); 472 + cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm)); 473 473 } else { 474 474 unsigned long va = flush_va; 475 475 ··· 546 546 for ( ; ; ); 547 547 } 548 548 549 - void arch_send_call_function_ipi(cpumask_t mask) 549 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 550 550 { 551 551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); 552 552 } 553 553 554 554 void arch_send_call_function_single_ipi(int cpu) 555 555 { 556 - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); 556 + send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0); 557 557 } 558 558 559 559 /*==========================================================================* ··· 729 729 cpumask = cpu_online_map; 730 730 cpu_clear(smp_processor_id(), cpumask); 731 731 732 - send_IPI_mask(cpumask, ipi_num, try); 732 + send_IPI_mask(&cpumask, ipi_num, try); 733 733 } 734 734 735 735 /*==========================================================================* ··· 752 752 * ---------- --- -------------------------------------------------------- 753 753 * 754 754 *==========================================================================*/ 755 - static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) 755 + static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) 756 756 { 757 757 cpumask_t physid_mask, tmp; 758 758 int cpu_id, phys_id; ··· 761 761 if (num_cpus <= 1) /* NO MP */ 762 762 return; 763 763 764 - cpus_and(tmp, cpumask, cpu_online_map); 765 - BUG_ON(!cpus_equal(cpumask, tmp)); 764 + cpumask_and(&tmp, cpumask, cpu_online_mask); 765 + BUG_ON(!cpumask_equal(cpumask, &tmp)); 766 766 767 767 physid_mask = CPU_MASK_NONE; 768 - for_each_cpu_mask(cpu_id, cpumask){ 768 + for_each_cpu(cpu_id, cpumask) { 769 769 if ((phys_id = cpu_to_physid(cpu_id)) != -1) 770 770 cpu_set(phys_id, physid_mask); 771 771 }
+1 -1
arch/m32r/kernel/smpboot.c
··· 178 178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) 179 179 physid_set(phys_id, phys_cpu_present_map); 180 180 #ifndef CONFIG_HOTPLUG_CPU 181 - cpu_present_map = cpu_possible_map; 181 + init_cpu_present(&cpu_possible_map); 182 182 #endif 183 183 184 184 show_mp_info(nr_cpu);
+1 -1
arch/mips/alchemy/common/time.c
··· 88 88 .irq = AU1000_RTC_MATCH2_INT, 89 89 .set_next_event = au1x_rtcmatch2_set_next_event, 90 90 .set_mode = au1x_rtcmatch2_set_mode, 91 - .cpumask = CPU_MASK_ALL_PTR, 91 + .cpumask = cpu_all_mask, 92 92 }; 93 93 94 94 static struct irqaction au1x_rtcmatch2_irqaction = {
-2
arch/mips/include/asm/mach-ip27/topology.h
··· 24 24 25 25 #define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) 26 26 #define parent_node(node) (node) 27 - #define node_to_cpumask(node) (hub_data(node)->h_cpus) 28 27 #define cpumask_of_node(node) (&hub_data(node)->h_cpus) 29 28 struct pci_bus; 30 29 extern int pcibus_to_node(struct pci_bus *); 31 30 32 - #define pcibus_to_cpumask(bus) (cpu_online_map) 33 31 #define cpumask_of_pcibus(bus) (cpu_online_mask) 34 32 35 33 extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
+5 -5
arch/mips/include/asm/mmu_context.h
··· 178 178 * Mark current->active_mm as not "active" anymore. 179 179 * We don't want to mislead possible IPI tlb flush routines. 180 180 */ 181 - cpu_clear(cpu, prev->cpu_vm_mask); 182 - cpu_set(cpu, next->cpu_vm_mask); 181 + cpumask_clear_cpu(cpu, mm_cpumask(prev)); 182 + cpumask_set_cpu(cpu, mm_cpumask(next)); 183 183 184 184 local_irq_restore(flags); 185 185 } ··· 235 235 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 236 236 237 237 /* mark mmu ownership change */ 238 - cpu_clear(cpu, prev->cpu_vm_mask); 239 - cpu_set(cpu, next->cpu_vm_mask); 238 + cpumask_clear_cpu(cpu, mm_cpumask(prev)); 239 + cpumask_set_cpu(cpu, mm_cpumask(next)); 240 240 241 241 local_irq_restore(flags); 242 242 } ··· 258 258 259 259 local_irq_save(flags); 260 260 261 - if (cpu_isset(cpu, mm->cpu_vm_mask)) { 261 + if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 262 262 get_new_mmu_context(mm, cpu); 263 263 #ifdef CONFIG_MIPS_MT_SMTC 264 264 /* See comments for similar code above */
+1 -1
arch/mips/include/asm/smp-ops.h
··· 19 19 20 20 struct plat_smp_ops { 21 21 void (*send_ipi_single)(int cpu, unsigned int action); 22 - void (*send_ipi_mask)(cpumask_t mask, unsigned int action); 22 + void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); 23 23 void (*init_secondary)(void); 24 24 void (*smp_finish)(void); 25 25 void (*cpus_done)(void);
+1 -1
arch/mips/include/asm/smp.h
··· 78 78 extern asmlinkage void smp_call_function_interrupt(void); 79 79 80 80 extern void arch_send_call_function_single_ipi(int cpu); 81 - extern void arch_send_call_function_ipi(cpumask_t mask); 81 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 82 82 83 83 #endif /* __ASM_SMP_H */
+3 -3
arch/mips/kernel/smp-cmp.c
··· 80 80 local_irq_restore(flags); 81 81 } 82 82 83 - static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action) 83 + static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) 84 84 { 85 85 unsigned int i; 86 86 87 - for_each_cpu_mask(i, mask) 87 + for_each_cpu(i, mask) 88 88 cmp_send_ipi_single(i, action); 89 89 } 90 90 ··· 171 171 172 172 for (i = 1; i < NR_CPUS; i++) { 173 173 if (amon_cpu_avail(i)) { 174 - cpu_set(i, cpu_possible_map); 174 + set_cpu_possible(i, true); 175 175 __cpu_number_map[i] = ++ncpu; 176 176 __cpu_logical_map[ncpu] = i; 177 177 }
+3 -3
arch/mips/kernel/smp-mt.c
··· 70 70 write_vpe_c0_vpeconf0(tmp); 71 71 72 72 /* Record this as available CPU */ 73 - cpu_set(tc, cpu_possible_map); 73 + set_cpu_possible(tc, true); 74 74 __cpu_number_map[tc] = ++ncpu; 75 75 __cpu_logical_map[ncpu] = tc; 76 76 } ··· 141 141 local_irq_restore(flags); 142 142 } 143 143 144 - static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action) 144 + static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) 145 145 { 146 146 unsigned int i; 147 147 148 - for_each_cpu_mask(i, mask) 148 + for_each_cpu(i, mask) 149 149 vsmp_send_ipi_single(i, action); 150 150 } 151 151
+2 -1
arch/mips/kernel/smp-up.c
··· 18 18 panic(KERN_ERR "%s called", __func__); 19 19 } 20 20 21 - static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action) 21 + static inline void up_send_ipi_mask(const struct cpumask *mask, 22 + unsigned int action) 22 23 { 23 24 panic(KERN_ERR "%s called", __func__); 24 25 }
+4 -4
arch/mips/kernel/smp.c
··· 128 128 cpu_idle(); 129 129 } 130 130 131 - void arch_send_call_function_ipi(cpumask_t mask) 131 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 132 132 { 133 133 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 134 134 } ··· 183 183 mp_ops->prepare_cpus(max_cpus); 184 184 set_cpu_sibling_map(0); 185 185 #ifndef CONFIG_HOTPLUG_CPU 186 - cpu_present_map = cpu_possible_map; 186 + init_cpu_present(&cpu_possible_map); 187 187 #endif 188 188 } 189 189 190 190 /* preload SMP state for boot cpu */ 191 191 void __devinit smp_prepare_boot_cpu(void) 192 192 { 193 - cpu_set(0, cpu_possible_map); 194 - cpu_set(0, cpu_online_map); 193 + set_cpu_possible(0, true); 194 + set_cpu_online(0, true); 195 195 cpu_set(0, cpu_callin_map); 196 196 } 197 197
+3 -3
arch/mips/kernel/smtc.c
··· 305 305 */ 306 306 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 307 307 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { 308 - cpu_set(i, cpu_possible_map); 308 + set_cpu_possible(i, true); 309 309 __cpu_number_map[i] = i; 310 310 __cpu_logical_map[i] = i; 311 311 } ··· 525 525 * Pull any physically present but unused TCs out of circulation. 526 526 */ 527 527 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { 528 - cpu_clear(tc, cpu_possible_map); 529 - cpu_clear(tc, cpu_present_map); 528 + set_cpu_possible(tc, false); 529 + set_cpu_present(tc, false); 530 530 tc++; 531 531 } 532 532
+3 -2
arch/mips/mipssim/sim_smtc.c
··· 43 43 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 44 44 } 45 45 46 - static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action) 46 + static inline void ssmtc_send_ipi_mask(const struct cpumask *mask, 47 + unsigned int action) 47 48 { 48 49 unsigned int i; 49 50 50 - for_each_cpu_mask(i, mask) 51 + for_each_cpu(i, mask) 51 52 ssmtc_send_ipi_single(i, action); 52 53 } 53 54
+1 -1
arch/mips/mm/c-octeon.c
··· 79 79 * cores it has been used on 80 80 */ 81 81 if (vma) 82 - mask = vma->vm_mm->cpu_vm_mask; 82 + mask = *mm_cpumask(vma->vm_mm); 83 83 else 84 84 mask = cpu_online_map; 85 85 cpu_clear(cpu, mask);
+2 -2
arch/mips/mti-malta/malta-smtc.c
··· 21 21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 22 22 } 23 23 24 - static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action) 24 + static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) 25 25 { 26 26 unsigned int i; 27 27 28 - for_each_cpu_mask(i, mask) 28 + for_each_cpu(i, mask) 29 29 msmtc_send_ipi_single(i, action); 30 30 } 31 31
+2 -2
arch/mips/pmc-sierra/yosemite/smp.c
··· 97 97 } 98 98 } 99 99 100 - static void yos_send_ipi_mask(cpumask_t mask, unsigned int action) 100 + static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action) 101 101 { 102 102 unsigned int i; 103 103 104 - for_each_cpu_mask(i, mask) 104 + for_each_cpu(i, mask) 105 105 yos_send_ipi_single(i, action); 106 106 } 107 107
+1 -1
arch/mips/sgi-ip27/ip27-memory.c
··· 421 421 422 422 /* 423 423 * A node with nothing. We use it to avoid any special casing in 424 - * node_to_cpumask 424 + * cpumask_of_node 425 425 */ 426 426 static struct node_data null_node = { 427 427 .hub = {
+2 -2
arch/mips/sgi-ip27/ip27-smp.c
··· 165 165 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); 166 166 } 167 167 168 - static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action) 168 + static void ip27_send_ipi(const struct cpumask *mask, unsigned int action) 169 169 { 170 170 unsigned int i; 171 171 172 - for_each_cpu_mask(i, mask) 172 + for_each_cpu(i, mask) 173 173 ip27_send_ipi_single(i, action); 174 174 } 175 175
+3 -2
arch/mips/sibyte/bcm1480/smp.c
··· 82 82 __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); 83 83 } 84 84 85 - static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action) 85 + static void bcm1480_send_ipi_mask(const struct cpumask *mask, 86 + unsigned int action) 86 87 { 87 88 unsigned int i; 88 89 89 - for_each_cpu_mask(i, mask) 90 + for_each_cpu(i, mask) 90 91 bcm1480_send_ipi_single(i, action); 91 92 } 92 93
+3 -2
arch/mips/sibyte/sb1250/smp.c
··· 70 70 __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]); 71 71 } 72 72 73 - static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action) 73 + static inline void sb1250_send_ipi_mask(const struct cpumask *mask, 74 + unsigned int action) 74 75 { 75 76 unsigned int i; 76 77 77 - for_each_cpu_mask(i, mask) 78 + for_each_cpu(i, mask) 78 79 sb1250_send_ipi_single(i, action); 79 80 } 80 81
+6 -6
arch/mn10300/include/asm/mmu_context.h
··· 38 38 #define enter_lazy_tlb(mm, tsk) do {} while (0) 39 39 40 40 #ifdef CONFIG_SMP 41 - #define cpu_ran_vm(cpu, task) \ 42 - cpu_set((cpu), (task)->cpu_vm_mask) 43 - #define cpu_maybe_ran_vm(cpu, task) \ 44 - cpu_test_and_set((cpu), (task)->cpu_vm_mask) 41 + #define cpu_ran_vm(cpu, mm) \ 42 + cpumask_set_cpu((cpu), mm_cpumask(mm)) 43 + #define cpu_maybe_ran_vm(cpu, mm) \ 44 + cpumask_test_and_set_cpu((cpu), mm_cpumask(mm)) 45 45 #else 46 - #define cpu_ran_vm(cpu, task) do {} while (0) 47 - #define cpu_maybe_ran_vm(cpu, task) true 46 + #define cpu_ran_vm(cpu, mm) do {} while (0) 47 + #define cpu_maybe_ran_vm(cpu, mm) true 48 48 #endif /* CONFIG_SMP */ 49 49 50 50 /*
-1
arch/parisc/include/asm/smp.h
··· 30 30 31 31 extern void arch_send_call_function_single_ipi(int cpu); 32 32 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 33 - #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask 34 33 35 34 #endif /* !ASSEMBLY */ 36 35
+1 -1
arch/powerpc/include/asm/smp.h
··· 146 146 extern struct smp_ops_t *smp_ops; 147 147 148 148 extern void arch_send_call_function_single_ipi(int cpu); 149 - extern void arch_send_call_function_ipi(cpumask_t mask); 149 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 150 150 151 151 /* Definitions relative to the secondary CPU spin loop 152 152 * and entry point. Not all of them exist on both 32 and
-12
arch/powerpc/include/asm/topology.h
··· 17 17 18 18 #define parent_node(node) (node) 19 19 20 - static inline cpumask_t node_to_cpumask(int node) 21 - { 22 - return numa_cpumask_lookup_table[node]; 23 - } 24 - 25 20 #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 26 21 27 22 int of_node_to_nid(struct device_node *device); ··· 30 35 return -1; 31 36 } 32 37 #endif 33 - 34 - #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ 35 - CPU_MASK_ALL : \ 36 - node_to_cpumask(pcibus_to_node(bus)) \ 37 - ) 38 38 39 39 #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 40 40 cpu_all_mask : \ ··· 94 104 #ifdef CONFIG_PPC64 95 105 #include <asm/smp.h> 96 106 97 - #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 98 - #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 99 107 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 100 108 #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) 101 109 #define topology_core_id(cpu) (cpu_to_core_id(cpu))
+3 -3
arch/powerpc/kernel/setup-common.c
··· 431 431 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { 432 432 DBG(" thread %d -> cpu %d (hard id %d)\n", 433 433 j, cpu, intserv[j]); 434 - cpu_set(cpu, cpu_present_map); 434 + set_cpu_present(cpu, true); 435 435 set_hard_smp_processor_id(cpu, intserv[j]); 436 - cpu_set(cpu, cpu_possible_map); 436 + set_cpu_possible(cpu, true); 437 437 cpu++; 438 438 } 439 439 } ··· 479 479 maxcpus); 480 480 481 481 for (cpu = 0; cpu < maxcpus; cpu++) 482 - cpu_set(cpu, cpu_possible_map); 482 + set_cpu_possible(cpu, true); 483 483 out: 484 484 of_node_put(dn); 485 485 }
+6 -6
arch/powerpc/kernel/smp.c
··· 189 189 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); 190 190 } 191 191 192 - void arch_send_call_function_ipi(cpumask_t mask) 192 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 193 193 { 194 194 unsigned int cpu; 195 195 196 - for_each_cpu_mask(cpu, mask) 196 + for_each_cpu(cpu, mask) 197 197 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); 198 198 } 199 199 ··· 287 287 { 288 288 BUG_ON(smp_processor_id() != boot_cpuid); 289 289 290 - cpu_set(boot_cpuid, cpu_online_map); 290 + set_cpu_online(boot_cpuid, true); 291 291 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); 292 292 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); 293 293 #ifdef CONFIG_PPC64 ··· 307 307 if (cpu == boot_cpuid) 308 308 return -EBUSY; 309 309 310 - cpu_clear(cpu, cpu_online_map); 310 + set_cpu_online(cpu, false); 311 311 #ifdef CONFIG_PPC64 312 312 vdso_data->processorCount--; 313 313 fixup_irqs(cpu_online_map); ··· 361 361 smp_wmb(); 362 362 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 363 363 cpu_relax(); 364 - cpu_set(cpu, cpu_online_map); 364 + set_cpu_online(cpu, true); 365 365 local_irq_enable(); 366 366 } 367 367 #endif ··· 508 508 509 509 ipi_call_lock(); 510 510 notify_cpu_starting(cpu); 511 - cpu_set(cpu, cpu_online_map); 511 + set_cpu_online(cpu, true); 512 512 /* Update sibling maps */ 513 513 base = cpu_first_thread_in_core(cpu); 514 514 for (i = 0; i < threads_per_core; i++) {
+3 -3
arch/powerpc/platforms/powermac/smp.c
··· 320 320 if (ncpus > NR_CPUS) 321 321 ncpus = NR_CPUS; 322 322 for (i = 1; i < ncpus ; ++i) 323 - cpu_set(i, cpu_present_map); 323 + set_cpu_present(i, true); 324 324 325 325 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); 326 326 ··· 867 867 868 868 int smp_core99_cpu_disable(void) 869 869 { 870 - cpu_clear(smp_processor_id(), cpu_online_map); 870 + set_cpu_online(smp_processor_id(), false); 871 871 872 872 /* XXX reset cpu affinity here */ 873 873 mpic_cpu_set_priority(0xf); ··· 952 952 int cpu; 953 953 954 954 for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu) 955 - cpu_set(cpu, cpu_possible_map); 955 + set_cpu_possible(cpu, true); 956 956 smp_ops = &psurge_smp_ops; 957 957 } 958 958 #endif /* CONFIG_PPC32 */
+3 -3
arch/powerpc/platforms/pseries/hotplug-cpu.c
··· 94 94 { 95 95 int cpu = smp_processor_id(); 96 96 97 - cpu_clear(cpu, cpu_online_map); 97 + set_cpu_online(cpu, false); 98 98 vdso_data->processorCount--; 99 99 100 100 /*fix boot_cpuid here*/ ··· 185 185 186 186 for_each_cpu_mask(cpu, tmp) { 187 187 BUG_ON(cpu_isset(cpu, cpu_present_map)); 188 - cpu_set(cpu, cpu_present_map); 188 + set_cpu_present(cpu, true); 189 189 set_hard_smp_processor_id(cpu, *intserv++); 190 190 } 191 191 err = 0; ··· 217 217 if (get_hard_smp_processor_id(cpu) != intserv[i]) 218 218 continue; 219 219 BUG_ON(cpu_online(cpu)); 220 - cpu_clear(cpu, cpu_present_map); 220 + set_cpu_present(cpu, false); 221 221 set_hard_smp_processor_id(cpu, -1); 222 222 break; 223 223 }
+1 -1
arch/s390/include/asm/smp.h
··· 62 62 extern int smp_cpu_polarization[]; 63 63 64 64 extern void arch_send_call_function_single_ipi(int cpu); 65 - extern void arch_send_call_function_ipi(cpumask_t mask); 65 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 66 66 67 67 #endif 68 68
-1
arch/s390/include/asm/topology.h
··· 9 9 10 10 extern cpumask_t cpu_core_map[NR_CPUS]; 11 11 12 - #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 13 12 #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 14 13 15 14 int topology_set_cpu_management(int fc);
+2 -2
arch/s390/kernel/smp.c
··· 147 147 udelay(10); 148 148 } 149 149 150 - void arch_send_call_function_ipi(cpumask_t mask) 150 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 151 151 { 152 152 int cpu; 153 153 154 - for_each_cpu_mask(cpu, mask) 154 + for_each_cpu(cpu, mask) 155 155 smp_ext_bitcall(cpu, ec_call_function); 156 156 } 157 157
-1
arch/sh/include/asm/smp.h
··· 44 44 45 45 void arch_send_call_function_single_ipi(int cpu); 46 46 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 47 - #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask 48 47 49 48 #else 50 49
-1
arch/sh/include/asm/topology.h
··· 31 31 #define cpu_to_node(cpu) ((void)(cpu),0) 32 32 #define parent_node(node) ((void)(node),0) 33 33 34 - #define node_to_cpumask(node) ((void)node, cpu_online_map) 35 34 #define cpumask_of_node(node) ((void)node, cpu_online_mask) 36 35 37 36 #define pcibus_to_node(bus) ((void)(bus), -1)
-1
arch/sparc/include/asm/smp_64.h
··· 36 36 37 37 extern void arch_send_call_function_single_ipi(int cpu); 38 38 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 39 - #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask 40 39 41 40 /* 42 41 * General functions that each host system must provide.
-16
arch/sparc/include/asm/topology_64.h
··· 12 12 13 13 #define parent_node(node) (node) 14 14 15 - static inline cpumask_t node_to_cpumask(int node) 16 - { 17 - return numa_cpumask_lookup_table[node]; 18 - } 19 15 #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 20 - 21 - /* 22 - * Returns a pointer to the cpumask of CPUs on Node 'node'. 23 - * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 24 - */ 25 - #define node_to_cpumask_ptr(v, node) \ 26 - cpumask_t *v = &(numa_cpumask_lookup_table[node]) 27 - 28 - #define node_to_cpumask_ptr_next(v, node) \ 29 - v = &(numa_cpumask_lookup_table[node]) 30 16 31 17 struct pci_bus; 32 18 #ifdef CONFIG_PCI ··· 57 71 #ifdef CONFIG_SMP 58 72 #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 59 73 #define topology_core_id(cpu) (cpu_data(cpu).core_id) 60 - #define topology_core_siblings(cpu) (cpu_core_map[cpu]) 61 - #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 62 74 #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 63 75 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 64 76 #define mc_capable() (sparc64_multi_core)
+2 -2
arch/um/include/asm/mmu_context.h
··· 35 35 unsigned cpu = smp_processor_id(); 36 36 37 37 if(prev != next){ 38 - cpu_clear(cpu, prev->cpu_vm_mask); 39 - cpu_set(cpu, next->cpu_vm_mask); 38 + cpumask_clear_cpu(cpu, mm_cpumask(prev)); 39 + cpumask_set_cpu(cpu, mm_cpumask(next)); 40 40 if(next != &init_mm) 41 41 __switch_mm(&next->context.id); 42 42 }
+1 -1
arch/um/kernel/smp.c
··· 111 111 int i; 112 112 113 113 for (i = 0; i < ncpus; ++i) 114 - cpu_set(i, cpu_possible_map); 114 + set_cpu_possible(i, true); 115 115 116 116 cpu_clear(me, cpu_online_map); 117 117 cpu_set(me, cpu_online_map);
+3 -3
arch/x86/include/asm/mmu_context.h
··· 37 37 38 38 if (likely(prev != next)) { 39 39 /* stop flush ipis for the previous mm */ 40 - cpu_clear(cpu, prev->cpu_vm_mask); 40 + cpumask_clear_cpu(cpu, mm_cpumask(prev)); 41 41 #ifdef CONFIG_SMP 42 42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 43 43 percpu_write(cpu_tlbstate.active_mm, next); 44 44 #endif 45 - cpu_set(cpu, next->cpu_vm_mask); 45 + cpumask_set_cpu(cpu, mm_cpumask(next)); 46 46 47 47 /* Re-load page tables */ 48 48 load_cr3(next->pgd); ··· 58 58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 59 59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); 60 60 61 - if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { 61 + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { 62 62 /* We were in lazy tlb mode and leave_mm disabled 63 63 * tlb flush IPI delivery. We must reload CR3 64 64 * to make sure to use no freed page tables.
-1
arch/x86/include/asm/smp.h
··· 121 121 smp_ops.send_call_func_single_ipi(cpu); 122 122 } 123 123 124 - #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask 125 124 static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) 126 125 { 127 126 smp_ops.send_call_func_ipi(mask);
+2 -5
arch/x86/kernel/apic/io_apic.c
··· 227 227 228 228 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 229 229 if (cfg) { 230 - if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 230 + if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 231 231 kfree(cfg); 232 232 cfg = NULL; 233 - } else if (!alloc_cpumask_var_node(&cfg->old_domain, 233 + } else if (!zalloc_cpumask_var_node(&cfg->old_domain, 234 234 GFP_ATOMIC, node)) { 235 235 free_cpumask_var(cfg->domain); 236 236 kfree(cfg); 237 237 cfg = NULL; 238 - } else { 239 - cpumask_clear(cfg->domain); 240 - cpumask_clear(cfg->old_domain); 241 238 } 242 239 } 243 240
+2 -2
arch/x86/kernel/ldt.c
··· 67 67 #ifdef CONFIG_SMP 68 68 preempt_disable(); 69 69 load_LDT(pc); 70 - if (!cpus_equal(current->mm->cpu_vm_mask, 71 - cpumask_of_cpu(smp_processor_id()))) 70 + if (!cpumask_equal(mm_cpumask(current->mm), 71 + cpumask_of(smp_processor_id()))) 72 72 smp_call_function(flush_ldt, current->mm, 1); 73 73 preempt_enable(); 74 74 #else
+2 -4
arch/x86/kernel/process.c
··· 555 555 void __init init_c1e_mask(void) 556 556 { 557 557 /* If we're using c1e_idle, we need to allocate c1e_mask. */ 558 - if (pm_idle == c1e_idle) { 559 - alloc_cpumask_var(&c1e_mask, GFP_KERNEL); 560 - cpumask_clear(c1e_mask); 561 - } 558 + if (pm_idle == c1e_idle) 559 + zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); 562 560 } 563 561 564 562 static int __init idle_setup(char *str)
+3 -6
arch/x86/kernel/smpboot.c
··· 1059 1059 #endif 1060 1060 current_thread_info()->cpu = 0; /* needed? */ 1061 1061 for_each_possible_cpu(i) { 1062 - alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1063 - alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1064 - alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); 1065 - cpumask_clear(per_cpu(cpu_core_map, i)); 1066 - cpumask_clear(per_cpu(cpu_sibling_map, i)); 1067 - cpumask_clear(cpu_data(i).llc_shared_map); 1062 + zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1063 + zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1064 + zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); 1068 1065 } 1069 1066 set_cpu_sibling_map(0); 1070 1067
-1
arch/x86/kernel/time.c
··· 93 93 94 94 void __init setup_default_timer_irq(void) 95 95 { 96 - irq0.mask = cpumask_of_cpu(0); 97 96 setup_irq(0, &irq0); 98 97 } 99 98
+8 -7
arch/x86/mm/tlb.c
··· 59 59 { 60 60 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 61 61 BUG(); 62 - cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); 62 + cpumask_clear_cpu(cpu, 63 + mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); 63 64 load_cr3(swapper_pg_dir); 64 65 } 65 66 EXPORT_SYMBOL_GPL(leave_mm); ··· 235 234 preempt_disable(); 236 235 237 236 local_flush_tlb(); 238 - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) 239 - flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); 237 + if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 238 + flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL); 240 239 preempt_enable(); 241 240 } 242 241 ··· 250 249 else 251 250 leave_mm(smp_processor_id()); 252 251 } 253 - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) 254 - flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); 252 + if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 253 + flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL); 255 254 256 255 preempt_enable(); 257 256 } ··· 269 268 leave_mm(smp_processor_id()); 270 269 } 271 270 272 - if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) 273 - flush_tlb_others(&mm->cpu_vm_mask, mm, va); 271 + if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 272 + flush_tlb_others(mm_cpumask(mm), mm, va); 274 273 275 274 preempt_enable(); 276 275 }
+2 -2
arch/x86/xen/mmu.c
··· 1165 1165 /* Get the "official" set of cpus referring to our pagetable. */ 1166 1166 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { 1167 1167 for_each_online_cpu(cpu) { 1168 - if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) 1168 + if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) 1169 1169 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) 1170 1170 continue; 1171 1171 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); 1172 1172 } 1173 1173 return; 1174 1174 } 1175 - cpumask_copy(mask, &mm->cpu_vm_mask); 1175 + cpumask_copy(mask, mm_cpumask(mm)); 1176 1176 1177 1177 /* It's possible that a vcpu may have a stale reference to our 1178 1178 cr3, because its in lazy mode, and it hasn't yet flushed
+1 -1
drivers/acpi/osl.c
··· 193 193 194 194 static void bind_to_cpu0(struct work_struct *work) 195 195 { 196 - set_cpus_allowed(current, cpumask_of_cpu(0)); 196 + set_cpus_allowed_ptr(current, cpumask_of(0)); 197 197 kfree(work); 198 198 } 199 199
+1 -2
drivers/acpi/processor_perflib.c
··· 511 511 struct acpi_processor *match_pr; 512 512 struct acpi_psd_package *match_pdomain; 513 513 514 - if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 514 + if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 515 515 return -ENOMEM; 516 516 517 517 mutex_lock(&performance_mutex); ··· 558 558 * Now that we have _PSD data from all CPUs, lets setup P-state 559 559 * domain info. 560 560 */ 561 - cpumask_clear(covered_cpus); 562 561 for_each_possible_cpu(i) { 563 562 pr = per_cpu(processors, i); 564 563 if (!pr)
+1 -2
drivers/acpi/processor_throttling.c
··· 77 77 struct acpi_tsd_package *pdomain, *match_pdomain; 78 78 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 79 79 80 - if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 80 + if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 81 81 return -ENOMEM; 82 82 83 83 /* ··· 105 105 if (retval) 106 106 goto err_ret; 107 107 108 - cpumask_clear(covered_cpus); 109 108 for_each_possible_cpu(i) { 110 109 pr = per_cpu(processors, i); 111 110 if (!pr)
+1 -2
drivers/net/sfc/efx.c
··· 884 884 int count; 885 885 int cpu; 886 886 887 - if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) { 887 + if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { 888 888 printk(KERN_WARNING 889 889 "sfc: RSS disabled due to allocation failure\n"); 890 890 return 1; 891 891 } 892 892 893 - cpumask_clear(core_mask); 894 893 count = 0; 895 894 for_each_online_cpu(cpu) { 896 895 if (!cpumask_test_cpu(cpu, core_mask)) {
+1 -2
drivers/oprofile/buffer_sync.c
··· 154 154 { 155 155 int err; 156 156 157 - if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 157 + if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 158 158 return -ENOMEM; 159 - cpumask_clear(marked_cpus); 160 159 161 160 start_cpu_work(); 162 161
-17
include/asm-generic/topology.h
··· 37 37 #ifndef parent_node 38 38 #define parent_node(node) ((void)(node),0) 39 39 #endif 40 - #ifndef node_to_cpumask 41 - #define node_to_cpumask(node) ((void)node, cpu_online_map) 42 - #endif 43 40 #ifndef cpumask_of_node 44 41 #define cpumask_of_node(node) ((void)node, cpu_online_mask) 45 42 #endif ··· 51 54 #endif 52 55 53 56 #endif /* CONFIG_NUMA */ 54 - 55 - /* 56 - * returns pointer to cpumask for specified node 57 - * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 58 - */ 59 - #ifndef node_to_cpumask_ptr 60 - 61 - #define node_to_cpumask_ptr(v, node) \ 62 - cpumask_t _##v = node_to_cpumask(node); \ 63 - const cpumask_t *v = &_##v 64 - 65 - #define node_to_cpumask_ptr_next(v, node) \ 66 - _##v = node_to_cpumask(node) 67 - #endif 68 57 69 58 #endif /* _ASM_GENERIC_TOPOLOGY_H */
+252 -457
include/linux/cpumask.h
··· 3 3 4 4 /* 5 5 * Cpumasks provide a bitmap suitable for representing the 6 - * set of CPU's in a system, one bit position per CPU number. 7 - * 8 - * The new cpumask_ ops take a "struct cpumask *"; the old ones 9 - * use cpumask_t. 10 - * 11 - * See detailed comments in the file linux/bitmap.h describing the 12 - * data type on which these cpumasks are based. 13 - * 14 - * For details of cpumask_scnprintf() and cpumask_parse_user(), 15 - * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c. 16 - * For details of cpulist_scnprintf() and cpulist_parse(), see 17 - * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. 18 - * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c 19 - * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c. 20 - * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. 21 - * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. 22 - * 23 - * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 24 - * Note: The alternate operations with the suffix "_nr" are used 25 - * to limit the range of the loop to nr_cpu_ids instead of 26 - * NR_CPUS when NR_CPUS > 64 for performance reasons. 27 - * If NR_CPUS is <= 64 then most assembler bitmask 28 - * operators execute faster with a constant range, so 29 - * the operator will continue to use NR_CPUS. 30 - * 31 - * Another consideration is that nr_cpu_ids is initialized 32 - * to NR_CPUS and isn't lowered until the possible cpus are 33 - * discovered (including any disabled cpus). So early uses 34 - * will span the entire range of NR_CPUS. 35 - * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 36 - * 37 - * The obsolescent cpumask operations are: 38 - * 39 - * void cpu_set(cpu, mask) turn on bit 'cpu' in mask 40 - * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask 41 - * void cpus_setall(mask) set all bits 42 - * void cpus_clear(mask) clear all bits 43 - * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask 44 - * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask 45 - * 46 - * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection] 47 - * void cpus_or(dst, src1, src2) dst = src1 | src2 [union] 48 - * void cpus_xor(dst, src1, src2) dst = src1 ^ src2 49 - * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2 50 - * void cpus_complement(dst, src) dst = ~src 51 - * 52 - * int cpus_equal(mask1, mask2) Does mask1 == mask2? 53 - * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect? 54 - * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2? 55 - * int cpus_empty(mask) Is mask empty (no bits sets)? 56 - * int cpus_full(mask) Is mask full (all bits sets)? 57 - * int cpus_weight(mask) Hamming weigh - number of set bits 58 - * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS 59 - * 60 - * void cpus_shift_right(dst, src, n) Shift right 61 - * void cpus_shift_left(dst, src, n) Shift left 62 - * 63 - * int first_cpu(mask) Number lowest set bit, or NR_CPUS 64 - * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS 65 - * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids 66 - * 67 - * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set 68 - * (can be used as an lvalue) 69 - * CPU_MASK_ALL Initializer - all bits set 70 - * CPU_MASK_NONE Initializer - no bits set 71 - * unsigned long *cpus_addr(mask) Array of unsigned long's in mask 72 - * 73 - * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t 74 - * variables, and CPUMASK_PTR provides pointers to each field. 75 - * 76 - * The structure should be defined something like this: 77 - * struct my_cpumasks { 78 - * cpumask_t mask1; 79 - * cpumask_t mask2; 80 - * }; 81 - * 82 - * Usage is then: 83 - * CPUMASK_ALLOC(my_cpumasks); 84 - * CPUMASK_PTR(mask1, my_cpumasks); 85 - * CPUMASK_PTR(mask2, my_cpumasks); 86 - * 87 - * --- DO NOT reference cpumask_t pointers until this check --- 88 - * if (my_cpumasks == NULL) 89 - * "kmalloc failed"... 90 - * 91 - * References are now pointers to the cpumask_t variables (*mask1, ...) 92 - * 93 - *if NR_CPUS > BITS_PER_LONG 94 - * CPUMASK_ALLOC(m) Declares and allocates struct m *m = 95 - * kmalloc(sizeof(*m), GFP_KERNEL) 96 - * CPUMASK_FREE(m) Macro for kfree(m) 97 - *else 98 - * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m 99 - * CPUMASK_FREE(m) Nop 100 - *endif 101 - * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v) 102 - * ------------------------------------------------------------------------ 103 - * 104 - * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing 105 - * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask 106 - * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing 107 - * int cpulist_parse(buf, map) Parse ascii string as cpulist 108 - * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit) 109 - * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src) 110 - * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap 111 - * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz 112 - * 113 - * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS 114 - * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids 115 - * 116 - * int num_online_cpus() Number of online CPUs 117 - * int num_possible_cpus() Number of all possible CPUs 118 - * int num_present_cpus() Number of present CPUs 119 - * 120 - * int cpu_online(cpu) Is some cpu online? 121 - * int cpu_possible(cpu) Is some cpu possible? 122 - * int cpu_present(cpu) Is some cpu present (can schedule)? 123 - * 124 - * int any_online_cpu(mask) First online cpu in mask 125 - * 126 - * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map 127 - * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map 128 - * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map 129 - * 130 - * Subtlety: 131 - * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway) 132 - * to generate slightly worse code. Note for example the additional 133 - * 40 lines of assembly code compiling the "for each possible cpu" 134 - * loops buried in the disk_stat_read() macros calls when compiling 135 - * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple 136 - * one-line #define for cpu_isset(), instead of wrapping an inline 137 - * inside a macro, the way we do the other calls. 6 + * set of CPU's in a system, one bit position per CPU number. In general, 7 + * only nr_cpu_ids (<= NR_CPUS) bits are valid. 138 8 */ 139 - 140 9 #include <linux/kernel.h> 141 10 #include <linux/threads.h> 142 11 #include <linux/bitmap.h> 143 12 144 13 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 145 - extern cpumask_t _unused_cpumask_arg_; 146 - 147 - #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 148 - #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 149 - static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) 150 - { 151 - set_bit(cpu, dstp->bits); 152 - } 153 - 154 - #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) 155 - static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) 156 - { 157 - clear_bit(cpu, dstp->bits); 158 - } 159 - 160 - #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) 161 - static inline void __cpus_setall(cpumask_t *dstp, int nbits) 162 - { 163 - bitmap_fill(dstp->bits, nbits); 164 - } 165 - 166 - #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) 167 - static inline void __cpus_clear(cpumask_t *dstp, int nbits) 168 - { 169 - bitmap_zero(dstp->bits, nbits); 170 - } 171 - 172 - /* No static inline type checking - see Subtlety (1) above. */ 173 - #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) 174 - 175 - #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) 176 - static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) 177 - { 178 - return test_and_set_bit(cpu, addr->bits); 179 - } 180 - 181 - #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) 182 - static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, 183 - const cpumask_t *src2p, int nbits) 184 - { 185 - return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); 186 - } 187 - 188 - #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) 189 - static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, 190 - const cpumask_t *src2p, int nbits) 191 - { 192 - bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); 193 - } 194 - 195 - #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) 196 - static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, 197 - const cpumask_t *src2p, int nbits) 198 - { 199 - bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); 200 - } 201 - 202 - #define cpus_andnot(dst, src1, src2) \ 203 - __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) 204 - static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, 205 - const cpumask_t *src2p, int nbits) 206 - { 207 - return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); 208 - } 209 - 210 - #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS) 211 - static inline void __cpus_complement(cpumask_t *dstp, 212 - const cpumask_t *srcp, int nbits) 213 - { 214 - bitmap_complement(dstp->bits, srcp->bits, nbits); 215 - } 216 - 217 - #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) 218 - static inline int __cpus_equal(const cpumask_t *src1p, 219 - const cpumask_t *src2p, int nbits) 220 - { 221 - return bitmap_equal(src1p->bits, src2p->bits, nbits); 222 - } 223 - 224 - #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) 225 - static inline int __cpus_intersects(const cpumask_t *src1p, 226 - const cpumask_t *src2p, int nbits) 227 - { 228 - return bitmap_intersects(src1p->bits, src2p->bits, nbits); 229 - } 230 - 231 - #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) 232 - static inline int __cpus_subset(const cpumask_t *src1p, 233 - const cpumask_t *src2p, int nbits) 234 - { 235 - return bitmap_subset(src1p->bits, src2p->bits, nbits); 236 - } 237 - 238 - #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) 239 - static inline int __cpus_empty(const cpumask_t *srcp, int nbits) 240 - { 241 - return bitmap_empty(srcp->bits, nbits); 242 - } 243 - 244 - #define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS) 245 - static inline int __cpus_full(const cpumask_t *srcp, int nbits) 246 - { 247 - return bitmap_full(srcp->bits, nbits); 248 - } 249 - 250 - #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) 251 - static inline int __cpus_weight(const cpumask_t *srcp, int nbits) 252 - { 253 - return bitmap_weight(srcp->bits, nbits); 254 - } 255 - 256 - #define cpus_shift_right(dst, src, n) \ 257 - __cpus_shift_right(&(dst), &(src), (n), NR_CPUS) 258 - static inline void __cpus_shift_right(cpumask_t *dstp, 259 - const cpumask_t *srcp, int n, int nbits) 260 - { 261 - bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); 262 - } 263 - 264 - #define cpus_shift_left(dst, src, n) \ 265 - __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) 266 - static inline void __cpus_shift_left(cpumask_t *dstp, 267 - const cpumask_t *srcp, int n, int nbits) 268 - { 269 - bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 270 - } 271 - #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 272 14 273 15 /** 274 - * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 275 - * @bitmap: the bitmap 16 + * cpumask_bits - get the bits in a cpumask 17 + * @maskp: the struct cpumask * 276 18 * 277 - * There are a few places where cpumask_var_t isn't appropriate and 278 - * static cpumasks must be used (eg. very early boot), yet we don't 279 - * expose the definition of 'struct cpumask'. 280 - * 281 - * This does the conversion, and can be used as a constant initializer. 19 + * You should only assume nr_cpu_ids bits of this mask are valid. This is 20 + * a macro so it's const-correct. 282 21 */ 283 - #define to_cpumask(bitmap) \ 284 - ((struct cpumask *)(1 ? (bitmap) \ 285 - : (void *)sizeof(__check_is_bitmap(bitmap)))) 286 - 287 - static inline int __check_is_bitmap(const unsigned long *bitmap) 288 - { 289 - return 1; 290 - } 291 - 292 - /* 293 - * Special-case data structure for "single bit set only" constant CPU masks. 294 - * 295 - * We pre-generate all the 64 (or 32) possible bit positions, with enough 296 - * padding to the left and the right, and return the constant pointer 297 - * appropriately offset. 298 - */ 299 - extern const unsigned long 300 - cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 301 - 302 - static inline const struct cpumask *get_cpu_mask(unsigned int cpu) 303 - { 304 - const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 305 - p -= cpu / BITS_PER_LONG; 306 - return to_cpumask(p); 307 - } 308 - 309 - #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 310 - /* 311 - * In cases where we take the address of the cpumask immediately, 312 - * gcc optimizes it out (it's a constant) and there's no huge stack 313 - * variable created: 314 - */ 315 - #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) 316 - 317 - 318 - #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) 319 - 320 - #if NR_CPUS <= BITS_PER_LONG 321 - 322 - #define CPU_MASK_ALL \ 323 - (cpumask_t) { { \ 324 - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 325 - } } 326 - 327 - #define CPU_MASK_ALL_PTR (&CPU_MASK_ALL) 328 - 329 - #else 330 - 331 - #define CPU_MASK_ALL \ 332 - (cpumask_t) { { \ 333 - [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 334 - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 335 - } } 336 - 337 - /* cpu_mask_all is in init/main.c */ 338 - extern cpumask_t cpu_mask_all; 339 - #define CPU_MASK_ALL_PTR (&cpu_mask_all) 340 - 341 - #endif 342 - 343 - #define CPU_MASK_NONE \ 344 - (cpumask_t) { { \ 345 - [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 346 - } } 347 - 348 - #define CPU_MASK_CPU0 \ 349 - (cpumask_t) { { \ 350 - [0] = 1UL \ 351 - } } 352 - 353 - #define cpus_addr(src) ((src).bits) 354 - 355 - #if NR_CPUS > BITS_PER_LONG 356 - #define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL) 357 - #define CPUMASK_FREE(m) kfree(m) 358 - #else 359 - #define CPUMASK_ALLOC(m) struct m _m, *m = &_m 360 - #define CPUMASK_FREE(m) 361 - #endif 362 - #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) 363 - 364 - #define cpu_remap(oldbit, old, new) \ 365 - __cpu_remap((oldbit), &(old), &(new), NR_CPUS) 366 - static inline int __cpu_remap(int oldbit, 367 - const cpumask_t *oldp, const cpumask_t *newp, int nbits) 368 - { 369 - return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); 370 - } 371 - 372 - #define cpus_remap(dst, src, old, new) \ 373 - __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS) 374 - static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp, 375 - const cpumask_t *oldp, const cpumask_t *newp, int nbits) 376 - { 377 - bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); 378 - } 379 - 380 - #define cpus_onto(dst, orig, relmap) \ 381 - __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS) 382 - static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp, 383 - const cpumask_t *relmapp, int nbits) 384 - { 385 - bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); 386 - } 387 - 388 - #define cpus_fold(dst, orig, sz) \ 389 - __cpus_fold(&(dst), &(orig), sz, NR_CPUS) 390 - static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, 391 - int sz, int nbits) 392 - { 393 - bitmap_fold(dstp->bits, origp->bits, sz, nbits); 394 - } 395 - #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 22 + #define cpumask_bits(maskp) ((maskp)->bits) 396 23 397 24 #if NR_CPUS == 1 398 - 399 25 #define nr_cpu_ids 1 400 - #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 401 - #define first_cpu(src) ({ (void)(src); 0; }) 402 - #define next_cpu(n, src) ({ (void)(src); 1; }) 403 - #define any_online_cpu(mask) 0 404 - #define for_each_cpu_mask(cpu, mask) \ 405 - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 406 - #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 407 - #else /* NR_CPUS > 1 */ 408 - 26 + #else 409 27 extern int nr_cpu_ids; 410 - #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 411 - int __first_cpu(const cpumask_t *srcp); 412 - int __next_cpu(int n, const cpumask_t *srcp); 413 - int __any_online_cpu(const cpumask_t *mask); 414 - 415 - #define first_cpu(src) __first_cpu(&(src)) 416 - #define next_cpu(n, src) __next_cpu((n), &(src)) 417 - #define any_online_cpu(mask) __any_online_cpu(&(mask)) 418 - #define for_each_cpu_mask(cpu, mask) \ 419 - for ((cpu) = -1; \ 420 - (cpu) = next_cpu((cpu), (mask)), \ 421 - (cpu) < NR_CPUS; ) 422 - #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 423 28 #endif 424 29 425 - #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 426 - #if NR_CPUS <= 64 427 - 428 - #define next_cpu_nr(n, src) next_cpu(n, src) 429 - #define cpus_weight_nr(cpumask) cpus_weight(cpumask) 430 - #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) 431 - 432 - #else /* NR_CPUS > 64 */ 433 - 434 - int __next_cpu_nr(int n, const cpumask_t *srcp); 435 - #define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) 436 - #define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) 437 - #define for_each_cpu_mask_nr(cpu, mask) \ 438 - for ((cpu) = -1; \ 439 - (cpu) = next_cpu_nr((cpu), (mask)), \ 440 - (cpu) < nr_cpu_ids; ) 441 - 442 - #endif /* NR_CPUS > 64 */ 443 - #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 30 + #ifdef CONFIG_CPUMASK_OFFSTACK 31 + /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, 32 + * not all bits may be allocated. */ 33 + #define nr_cpumask_bits nr_cpu_ids 34 + #else 35 + #define nr_cpumask_bits NR_CPUS 36 + #endif 444 37 445 38 /* 446 39 * The following particular system cpumasks and operations manage ··· 80 487 extern const struct cpumask *const cpu_present_mask; 81 488 extern const struct cpumask *const cpu_active_mask; 82 489 83 - /* These strip const, as traditionally they weren't const. */ 84 - #define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) 85 - #define cpu_online_map (*(cpumask_t *)cpu_online_mask) 86 - #define cpu_present_map (*(cpumask_t *)cpu_present_mask) 87 - #define cpu_active_map (*(cpumask_t *)cpu_active_mask) 88 - 89 490 #if NR_CPUS > 1 90 491 #define num_online_cpus() cpumask_weight(cpu_online_mask) 91 492 #define num_possible_cpus() cpumask_weight(cpu_possible_mask) ··· 96 509 #define cpu_possible(cpu) ((cpu) == 0) 97 510 #define cpu_present(cpu) ((cpu) == 0) 98 511 #define cpu_active(cpu) ((cpu) == 0) 99 - #endif 100 - 101 - #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 102 - 103 - /* These are the new versions of the cpumask operators: passed by pointer. 104 - * The older versions will be implemented in terms of these, then deleted. */ 105 - #define cpumask_bits(maskp) ((maskp)->bits) 106 - 107 - #if NR_CPUS <= BITS_PER_LONG 108 - #define CPU_BITS_ALL \ 109 - { \ 110 - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 111 - } 112 - 113 - #else /* NR_CPUS > BITS_PER_LONG */ 114 - 115 - #define CPU_BITS_ALL \ 116 - { \ 117 - [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 118 - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 119 - } 120 - #endif /* NR_CPUS > BITS_PER_LONG */ 121 - 122 - #ifdef CONFIG_CPUMASK_OFFSTACK 123 - /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, 124 - * not all bits may be allocated. */ 125 - #define nr_cpumask_bits nr_cpu_ids 126 - #else 127 - #define nr_cpumask_bits NR_CPUS 128 512 #endif 129 513 130 514 /* verify cpu argument to cpumask_* operators */ ··· 658 1100 void init_cpu_present(const struct cpumask *src); 659 1101 void init_cpu_possible(const struct cpumask *src); 660 1102 void init_cpu_online(const struct cpumask *src); 1103 + 1104 + /** 1105 + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * 1106 + * @bitmap: the bitmap 1107 + * 1108 + * There are a few places where cpumask_var_t isn't appropriate and 1109 + * static cpumasks must be used (eg. very early boot), yet we don't 1110 + * expose the definition of 'struct cpumask'. 1111 + * 1112 + * This does the conversion, and can be used as a constant initializer. 1113 + */ 1114 + #define to_cpumask(bitmap) \ 1115 + ((struct cpumask *)(1 ? (bitmap) \ 1116 + : (void *)sizeof(__check_is_bitmap(bitmap)))) 1117 + 1118 + static inline int __check_is_bitmap(const unsigned long *bitmap) 1119 + { 1120 + return 1; 1121 + } 1122 + 1123 + /* 1124 + * Special-case data structure for "single bit set only" constant CPU masks. 1125 + * 1126 + * We pre-generate all the 64 (or 32) possible bit positions, with enough 1127 + * padding to the left and the right, and return the constant pointer 1128 + * appropriately offset. 1129 + */ 1130 + extern const unsigned long 1131 + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 1132 + 1133 + static inline const struct cpumask *get_cpu_mask(unsigned int cpu) 1134 + { 1135 + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 1136 + p -= cpu / BITS_PER_LONG; 1137 + return to_cpumask(p); 1138 + } 1139 + 1140 + #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 1141 + 1142 + #if NR_CPUS <= BITS_PER_LONG 1143 + #define CPU_BITS_ALL \ 1144 + { \ 1145 + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 1146 + } 1147 + 1148 + #else /* NR_CPUS > BITS_PER_LONG */ 1149 + 1150 + #define CPU_BITS_ALL \ 1151 + { \ 1152 + [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 1153 + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 1154 + } 1155 + #endif /* NR_CPUS > BITS_PER_LONG */ 1156 + 1157 + /* 1158 + * 1159 + * From here down, all obsolete. Use cpumask_ variants! 1160 + * 1161 + */ 1162 + #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 1163 + /* These strip const, as traditionally they weren't const. */ 1164 + #define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) 1165 + #define cpu_online_map (*(cpumask_t *)cpu_online_mask) 1166 + #define cpu_present_map (*(cpumask_t *)cpu_present_mask) 1167 + #define cpu_active_map (*(cpumask_t *)cpu_active_mask) 1168 + 1169 + #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) 1170 + 1171 + #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) 1172 + 1173 + #if NR_CPUS <= BITS_PER_LONG 1174 + 1175 + #define CPU_MASK_ALL \ 1176 + (cpumask_t) { { \ 1177 + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 1178 + } } 1179 + 1180 + #else 1181 + 1182 + #define CPU_MASK_ALL \ 1183 + (cpumask_t) { { \ 1184 + [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 1185 + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 1186 + } } 1187 + 1188 + #endif 1189 + 1190 + #define CPU_MASK_NONE \ 1191 + (cpumask_t) { { \ 1192 + [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 1193 + } } 1194 + 1195 + #define CPU_MASK_CPU0 \ 1196 + (cpumask_t) { { \ 1197 + [0] = 1UL \ 1198 + } } 1199 + 1200 + #if NR_CPUS == 1 1201 + #define first_cpu(src) ({ (void)(src); 0; }) 1202 + #define next_cpu(n, src) ({ (void)(src); 1; }) 1203 + #define any_online_cpu(mask) 0 1204 + #define for_each_cpu_mask(cpu, mask) \ 1205 + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 1206 + #else /* NR_CPUS > 1 */ 1207 + int __first_cpu(const cpumask_t *srcp); 1208 + int __next_cpu(int n, const cpumask_t *srcp); 1209 + int __any_online_cpu(const cpumask_t *mask); 1210 + 1211 + #define first_cpu(src) __first_cpu(&(src)) 1212 + #define next_cpu(n, src) __next_cpu((n), &(src)) 1213 + #define any_online_cpu(mask) __any_online_cpu(&(mask)) 1214 + #define for_each_cpu_mask(cpu, mask) \ 1215 + for ((cpu) = -1; \ 1216 + (cpu) = next_cpu((cpu), (mask)), \ 1217 + (cpu) < NR_CPUS; ) 1218 + #endif /* SMP */ 1219 + 1220 + #if NR_CPUS <= 64 1221 + 1222 + #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) 1223 + 1224 + #else /* NR_CPUS > 64 */ 1225 + 1226 + int __next_cpu_nr(int n, const cpumask_t *srcp); 1227 + #define for_each_cpu_mask_nr(cpu, mask) \ 1228 + for ((cpu) = -1; \ 1229 + (cpu) = __next_cpu_nr((cpu), &(mask)), \ 1230 + (cpu) < nr_cpu_ids; ) 1231 + 1232 + #endif /* NR_CPUS > 64 */ 1233 + 1234 + #define cpus_addr(src) ((src).bits) 1235 + 1236 + #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 1237 + static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) 1238 + { 1239 + set_bit(cpu, dstp->bits); 1240 + } 1241 + 1242 + #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) 1243 + static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) 1244 + { 1245 + clear_bit(cpu, dstp->bits); 1246 + } 1247 + 1248 + #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) 1249 + static inline void __cpus_setall(cpumask_t *dstp, int nbits) 1250 + { 1251 + bitmap_fill(dstp->bits, nbits); 1252 + } 1253 + 1254 + #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) 1255 + static inline void __cpus_clear(cpumask_t *dstp, int nbits) 1256 + { 1257 + bitmap_zero(dstp->bits, nbits); 1258 + } 1259 + 1260 + /* No static inline type checking - see Subtlety (1) above. */ 1261 + #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) 1262 + 1263 + #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) 1264 + static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) 1265 + { 1266 + return test_and_set_bit(cpu, addr->bits); 1267 + } 1268 + 1269 + #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) 1270 + static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, 1271 + const cpumask_t *src2p, int nbits) 1272 + { 1273 + return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); 1274 + } 1275 + 1276 + #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) 1277 + static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, 1278 + const cpumask_t *src2p, int nbits) 1279 + { 1280 + bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); 1281 + } 1282 + 1283 + #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) 1284 + static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, 1285 + const cpumask_t *src2p, int nbits) 1286 + { 1287 + bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); 1288 + } 1289 + 1290 + #define cpus_andnot(dst, src1, src2) \ 1291 + __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) 1292 + static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, 1293 + const cpumask_t *src2p, int nbits) 1294 + { 1295 + return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); 1296 + } 1297 + 1298 + #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) 1299 + static inline int __cpus_equal(const cpumask_t *src1p, 1300 + const cpumask_t *src2p, int nbits) 1301 + { 1302 + return bitmap_equal(src1p->bits, src2p->bits, nbits); 1303 + } 1304 + 1305 + #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) 1306 + static inline int __cpus_intersects(const cpumask_t *src1p, 1307 + const cpumask_t *src2p, int nbits) 1308 + { 1309 + return bitmap_intersects(src1p->bits, src2p->bits, nbits); 1310 + } 1311 + 1312 + #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) 1313 + static inline int __cpus_subset(const cpumask_t *src1p, 1314 + const cpumask_t *src2p, int nbits) 1315 + { 1316 + return bitmap_subset(src1p->bits, src2p->bits, nbits); 1317 + } 1318 + 1319 + #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) 1320 + static inline int __cpus_empty(const cpumask_t *srcp, int nbits) 1321 + { 1322 + return bitmap_empty(srcp->bits, nbits); 1323 + } 1324 + 1325 + #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) 1326 + static inline int __cpus_weight(const cpumask_t *srcp, int nbits) 1327 + { 1328 + return bitmap_weight(srcp->bits, nbits); 1329 + } 1330 + 1331 + #define cpus_shift_left(dst, src, n) \ 1332 + __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) 1333 + static inline void __cpus_shift_left(cpumask_t *dstp, 1334 + const cpumask_t *srcp, int n, int nbits) 1335 + { 1336 + bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 1337 + } 1338 + #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 1339 + 661 1340 #endif /* __LINUX_CPUMASK_H */
-2
include/linux/interrupt.h
··· 84 84 * struct irqaction - per interrupt action descriptor 85 85 * @handler: interrupt handler function 86 86 * @flags: flags (see IRQF_* above) 87 - * @mask: no comment as it is useless and about to be removed 88 87 * @name: name of the device 89 88 * @dev_id: cookie to identify the device 90 89 * @next: pointer to the next irqaction for shared interrupts ··· 96 97 struct irqaction { 97 98 irq_handler_t handler; 98 99 unsigned long flags; 99 - cpumask_t mask; 100 100 const char *name; 101 101 void *dev_id; 102 102 struct irqaction *next;
+3
include/linux/sched.h
··· 1817 1817 return 0; 1818 1818 } 1819 1819 #endif 1820 + 1821 + #ifndef CONFIG_CPUMASK_OFFSTACK 1820 1822 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1821 1823 { 1822 1824 return set_cpus_allowed_ptr(p, &new_mask); 1823 1825 } 1826 + #endif 1824 1827 1825 1828 /* 1826 1829 * Architectures can set this to 1 if they have specified
-11
include/linux/smp.h
··· 73 73 void smp_call_function_many(const struct cpumask *mask, 74 74 void (*func)(void *info), void *info, bool wait); 75 75 76 - /* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */ 77 - static inline int 78 - smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 79 - int wait) 80 - { 81 - smp_call_function_many(&mask, func, info, wait); 82 - return 0; 83 - } 84 - 85 76 void __smp_call_function_single(int cpuid, struct call_single_data *data, 86 77 int wait); 87 78 ··· 135 144 static inline void smp_send_reschedule(int cpu) { } 136 145 #define num_booting_cpus() 1 137 146 #define smp_prepare_boot_cpu() do {} while (0) 138 - #define smp_call_function_mask(mask, func, info, wait) \ 139 - (up_smp_call_function(func, info)) 140 147 #define smp_call_function_many(mask, func, info, wait) \ 141 148 (up_smp_call_function(func, info)) 142 149 static inline void init_call_single_data(void)
-6
include/linux/topology.h
··· 211 211 #ifndef topology_core_id 212 212 #define topology_core_id(cpu) ((void)(cpu), 0) 213 213 #endif 214 - #ifndef topology_thread_siblings 215 - #define topology_thread_siblings(cpu) cpumask_of_cpu(cpu) 216 - #endif 217 - #ifndef topology_core_siblings 218 - #define topology_core_siblings(cpu) cpumask_of_cpu(cpu) 219 - #endif 220 214 #ifndef topology_thread_cpumask 221 215 #define topology_thread_cpumask(cpu) cpumask_of(cpu) 222 216 #endif
-5
init/main.c
··· 359 359 360 360 #else 361 361 362 - #if NR_CPUS > BITS_PER_LONG 363 - cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL; 364 - EXPORT_SYMBOL(cpu_mask_all); 365 - #endif 366 - 367 362 /* Setup number of possible processor ids */ 368 363 int nr_cpu_ids __read_mostly = NR_CPUS; 369 364 EXPORT_SYMBOL(nr_cpu_ids);
-7
kernel/smp.c
··· 347 347 generic_exec_single(cpu, data, wait); 348 348 } 349 349 350 - /* Deprecated: shim for archs using old arch_send_call_function_ipi API. */ 351 - 352 - #ifndef arch_send_call_function_ipi_mask 353 - # define arch_send_call_function_ipi_mask(maskp) \ 354 - arch_send_call_function_ipi(*(maskp)) 355 - #endif 356 - 357 350 /** 358 351 * smp_call_function_many(): Run a function on a set of other CPUs. 359 352 * @mask: The set of cpus to run on (only runs on online subset).
+2 -5
kernel/trace/trace.c
··· 1984 1984 if (current_trace) 1985 1985 *iter->trace = *current_trace; 1986 1986 1987 - if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) 1987 + if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 1988 1988 goto fail; 1989 - 1990 - cpumask_clear(iter->started); 1991 1989 1992 1990 if (current_trace && current_trace->print_max) 1993 1991 iter->tr = &max_tr; ··· 4387 4389 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4388 4390 goto out_free_buffer_mask; 4389 4391 4390 - if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) 4392 + if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) 4391 4393 goto out_free_tracing_cpumask; 4392 4394 4393 4395 /* To save memory, keep the ring buffer size to its minimum */ ··· 4398 4400 4399 4401 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4400 4402 cpumask_copy(tracing_cpumask, cpu_all_mask); 4401 - cpumask_clear(tracing_reader_cpumask); 4402 4403 4403 4404 /* TODO: make the number of buffers hot pluggable with CPUS */ 4404 4405 global_trace.buffer = ring_buffer_alloc(ring_buf_size,
+1 -2
mm/quicklist.c
··· 29 29 int node = numa_node_id(); 30 30 struct zone *zones = NODE_DATA(node)->node_zones; 31 31 int num_cpus_on_node; 32 - const struct cpumask *cpumask_on_node = cpumask_of_node(node); 33 32 34 33 node_free_pages = 35 34 #ifdef CONFIG_ZONE_DMA ··· 41 42 42 43 max = node_free_pages / FRACTION_OF_NODE_MEM; 43 44 44 - num_cpus_on_node = cpus_weight_nr(*cpumask_on_node); 45 + num_cpus_on_node = cpumask_weight(cpumask_of_node(node)); 45 46 max /= num_cpus_on_node; 46 47 47 48 return max(max, min_pages);
+1 -2
virt/kvm/kvm_main.c
··· 738 738 bool called = true; 739 739 struct kvm_vcpu *vcpu; 740 740 741 - if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) 742 - cpumask_clear(cpus); 741 + zalloc_cpumask_var(&cpus, GFP_ATOMIC); 743 742 744 743 spin_lock(&kvm->requests_lock); 745 744 me = smp_processor_id();