Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'cpumask-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux

Pull final removal of deprecated cpus_* cpumask functions from Rusty Russell:
"This is the final removal (after several years!) of the obsolete
cpus_* functions, prompted by their mis-use in staging.

With these function removed, all cpu functions should only iterate to
nr_cpu_ids, so we finally only allocate that many bits when cpumasks
are allocated offstack"

* tag 'cpumask-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (25 commits)
cpumask: remove __first_cpu / __next_cpu
cpumask: resurrect CPU_MASK_CPU0
linux/cpumask.h: add typechecking to cpumask_test_cpu
cpumask: only allocate nr_cpumask_bits.
Fix weird uses of num_online_cpus().
cpumask: remove deprecated functions.
mips: fix obsolete cpumask_of_cpu usage.
x86: fix more deprecated cpu function usage.
ia64: remove deprecated cpus_ usage.
powerpc: fix deprecated CPU_MASK_CPU0 usage.
CPU_MASK_ALL/CPU_MASK_NONE: remove from deprecated region.
staging/lustre/o2iblnd: Don't use cpus_weight
staging/lustre/libcfs: replace deprecated cpus_ calls with cpumask_
staging/lustre/ptlrpc: Do not use deprecated cpus_* functions
blackfin: fix up obsolete cpu function usage.
parisc: fix up obsolete cpu function usage.
tile: fix up obsolete cpu function usage.
arm64: fix up obsolete cpu function usage.
mips: fix up obsolete cpu function usage.
x86: fix up obsolete cpu function usage.
...

+170 -358
+1 -1
Documentation/cpu-hotplug.txt
··· 108 108 for_each_possible_cpu - Iterate over cpu_possible_mask 109 109 for_each_online_cpu - Iterate over cpu_online_mask 110 110 for_each_present_cpu - Iterate over cpu_present_mask 111 - for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. 111 + for_each_cpu(x,mask) - Iterate over some random collection of cpu mask. 112 112 113 113 #include <linux/cpu.h> 114 114 get_online_cpus() and put_online_cpus():
+1 -1
arch/arm64/kernel/smp.c
··· 636 636 cpumask_t mask; 637 637 638 638 cpumask_copy(&mask, cpu_online_mask); 639 - cpu_clear(smp_processor_id(), mask); 639 + cpumask_clear_cpu(smp_processor_id(), &mask); 640 640 641 641 smp_cross_call(&mask, IPI_CPU_STOP); 642 642 }
+1 -1
arch/blackfin/mach-bf561/smp.c
··· 124 124 unsigned int cpu; 125 125 int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8; 126 126 127 - for_each_cpu_mask(cpu, callmap) { 127 + for_each_cpu(cpu, &callmap) { 128 128 BUG_ON(cpu >= 2); 129 129 SSYNC(); 130 130 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
+3 -3
arch/ia64/include/asm/acpi.h
··· 117 117 #ifdef CONFIG_ACPI_NUMA 118 118 extern cpumask_t early_cpu_possible_map; 119 119 #define for_each_possible_early_cpu(cpu) \ 120 - for_each_cpu_mask((cpu), early_cpu_possible_map) 120 + for_each_cpu((cpu), &early_cpu_possible_map) 121 121 122 122 static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) 123 123 { ··· 125 125 int cpu; 126 126 int next_nid = 0; 127 127 128 - low_cpu = cpus_weight(early_cpu_possible_map); 128 + low_cpu = cpumask_weight(&early_cpu_possible_map); 129 129 130 130 high_cpu = max(low_cpu, min_cpus); 131 131 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); 132 132 133 133 for (cpu = low_cpu; cpu < high_cpu; cpu++) { 134 - cpu_set(cpu, early_cpu_possible_map); 134 + cpumask_set_cpu(cpu, &early_cpu_possible_map); 135 135 if (node_cpuid[cpu].nid == NUMA_NO_NODE) { 136 136 node_cpuid[cpu].nid = next_nid; 137 137 next_nid++;
+1 -1
arch/ia64/kernel/acpi.c
··· 483 483 (pa->apic_id << 8) | (pa->local_sapic_eid); 484 484 /* nid should be overridden as logical node id later */ 485 485 node_cpuid[srat_num_cpus].nid = pxm; 486 - cpu_set(srat_num_cpus, early_cpu_possible_map); 486 + cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map); 487 487 srat_num_cpus++; 488 488 } 489 489
+1 -1
arch/ia64/kernel/iosapic.c
··· 690 690 do { 691 691 if (++cpu >= nr_cpu_ids) 692 692 cpu = 0; 693 - } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); 693 + } while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain)); 694 694 695 695 return cpu_physical_id(cpu); 696 696 #else /* CONFIG_SMP */
+17 -19
arch/ia64/kernel/irq_ia64.c
··· 109 109 int pos, vector; 110 110 111 111 cpumask_and(&mask, &domain, cpu_online_mask); 112 - if (cpus_empty(mask)) 112 + if (cpumask_empty(&mask)) 113 113 return -EINVAL; 114 114 115 115 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { 116 116 vector = IA64_FIRST_DEVICE_VECTOR + pos; 117 - cpus_and(mask, domain, vector_table[vector]); 118 - if (!cpus_empty(mask)) 117 + cpumask_and(&mask, &domain, &vector_table[vector]); 118 + if (!cpumask_empty(&mask)) 119 119 continue; 120 120 return vector; 121 121 } ··· 132 132 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); 133 133 134 134 cpumask_and(&mask, &domain, cpu_online_mask); 135 - if (cpus_empty(mask)) 135 + if (cpumask_empty(&mask)) 136 136 return -EINVAL; 137 - if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) 137 + if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain)) 138 138 return 0; 139 139 if (cfg->vector != IRQ_VECTOR_UNASSIGNED) 140 140 return -EBUSY; 141 - for_each_cpu_mask(cpu, mask) 141 + for_each_cpu(cpu, &mask) 142 142 per_cpu(vector_irq, cpu)[vector] = irq; 143 143 cfg->vector = vector; 144 144 cfg->domain = domain; 145 145 irq_status[irq] = IRQ_USED; 146 - cpus_or(vector_table[vector], vector_table[vector], domain); 146 + cpumask_or(&vector_table[vector], &vector_table[vector], &domain); 147 147 return 0; 148 148 } 149 149 ··· 161 161 static void __clear_irq_vector(int irq) 162 162 { 163 163 int vector, cpu; 164 - cpumask_t mask; 165 164 cpumask_t domain; 166 165 struct irq_cfg *cfg = &irq_cfg[irq]; 167 166 ··· 168 169 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); 169 170 vector = cfg->vector; 170 171 domain = cfg->domain; 171 - cpumask_and(&mask, &cfg->domain, cpu_online_mask); 172 - for_each_cpu_mask(cpu, mask) 172 + for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask) 173 173 per_cpu(vector_irq, cpu)[vector] = -1; 174 174 cfg->vector = IRQ_VECTOR_UNASSIGNED; 175 175 cfg->domain = CPU_MASK_NONE; 176 176 irq_status[irq] = IRQ_UNUSED; 177 - cpus_andnot(vector_table[vector], vector_table[vector], domain); 177 + cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain); 178 178 } 179 179 180 180 static void clear_irq_vector(int irq) ··· 242 244 per_cpu(vector_irq, cpu)[vector] = -1; 243 245 /* Mark the inuse vectors */ 244 246 for (irq = 0; irq < NR_IRQS; ++irq) { 245 - if (!cpu_isset(cpu, irq_cfg[irq].domain)) 247 + if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain)) 246 248 continue; 247 249 vector = irq_to_vector(irq); 248 250 per_cpu(vector_irq, cpu)[vector] = irq; ··· 259 261 static cpumask_t vector_allocation_domain(int cpu) 260 262 { 261 263 if (vector_domain_type == VECTOR_DOMAIN_PERCPU) 262 - return cpumask_of_cpu(cpu); 264 + return *cpumask_of(cpu); 263 265 return CPU_MASK_ALL; 264 266 } 265 267 ··· 273 275 return -EBUSY; 274 276 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) 275 277 return -EINVAL; 276 - if (cpu_isset(cpu, cfg->domain)) 278 + if (cpumask_test_cpu(cpu, &cfg->domain)) 277 279 return 0; 278 280 domain = vector_allocation_domain(cpu); 279 281 vector = find_unassigned_vector(domain); ··· 307 309 if (likely(!cfg->move_in_progress)) 308 310 return; 309 311 310 - if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) 312 + if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain))) 311 313 return; 312 314 313 315 cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); 314 - cfg->move_cleanup_count = cpus_weight(cleanup_mask); 315 - for_each_cpu_mask(i, cleanup_mask) 316 + cfg->move_cleanup_count = cpumask_weight(&cleanup_mask); 317 + for_each_cpu(i, &cleanup_mask) 316 318 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); 317 319 cfg->move_in_progress = 0; 318 320 } ··· 338 340 if (!cfg->move_cleanup_count) 339 341 goto unlock; 340 342 341 - if (!cpu_isset(me, cfg->old_domain)) 343 + if (!cpumask_test_cpu(me, &cfg->old_domain)) 342 344 goto unlock; 343 345 344 346 spin_lock_irqsave(&vector_lock, flags); 345 347 __this_cpu_write(vector_irq[vector], -1); 346 - cpu_clear(me, vector_table[vector]); 348 + cpumask_clear_cpu(me, &vector_table[vector]); 347 349 spin_unlock_irqrestore(&vector_lock, flags); 348 350 cfg->move_cleanup_count--; 349 351 unlock:
+5 -5
arch/ia64/kernel/mca.c
··· 1293 1293 monarch_cpu = cpu; 1294 1294 sos->monarch = 1; 1295 1295 } else { 1296 - cpu_set(cpu, mca_cpu); 1296 + cpumask_set_cpu(cpu, &mca_cpu); 1297 1297 sos->monarch = 0; 1298 1298 } 1299 1299 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " ··· 1316 1316 */ 1317 1317 ia64_mca_wakeup_all(); 1318 1318 } else { 1319 - while (cpu_isset(cpu, mca_cpu)) 1319 + while (cpumask_test_cpu(cpu, &mca_cpu)) 1320 1320 cpu_relax(); /* spin until monarch wakes us */ 1321 1321 } 1322 1322 ··· 1355 1355 * and put this cpu in the rendez loop. 1356 1356 */ 1357 1357 for_each_online_cpu(i) { 1358 - if (cpu_isset(i, mca_cpu)) { 1358 + if (cpumask_test_cpu(i, &mca_cpu)) { 1359 1359 monarch_cpu = i; 1360 - cpu_clear(i, mca_cpu); /* wake next cpu */ 1360 + cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */ 1361 1361 while (monarch_cpu != -1) 1362 1362 cpu_relax(); /* spin until last cpu leaves */ 1363 1363 set_curr_task(cpu, previous_current); ··· 1822 1822 ti->cpu = cpu; 1823 1823 p->stack = ti; 1824 1824 p->state = TASK_UNINTERRUPTIBLE; 1825 - cpu_set(cpu, p->cpus_allowed); 1825 + cpumask_set_cpu(cpu, &p->cpus_allowed); 1826 1826 INIT_LIST_HEAD(&p->tasks); 1827 1827 p->parent = p->real_parent = p->group_leader = p; 1828 1828 INIT_LIST_HEAD(&p->children);
+4 -6
arch/ia64/kernel/msi_ia64.c
··· 47 47 struct msi_msg msg; 48 48 unsigned long dest_phys_id; 49 49 int irq, vector; 50 - cpumask_t mask; 51 50 52 51 irq = create_irq(); 53 52 if (irq < 0) 54 53 return irq; 55 54 56 55 irq_set_msi_desc(irq, desc); 57 - cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask); 58 - dest_phys_id = cpu_physical_id(first_cpu(mask)); 56 + dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)), 57 + cpu_online_mask)); 59 58 vector = irq_to_vector(irq); 60 59 61 60 msg.address_hi = 0; ··· 170 171 { 171 172 struct irq_cfg *cfg = irq_cfg + irq; 172 173 unsigned dest; 173 - cpumask_t mask; 174 174 175 - cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask); 176 - dest = cpu_physical_id(first_cpu(mask)); 175 + dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)), 176 + cpu_online_mask)); 177 177 178 178 msg->address_hi = 0; 179 179 msg->address_lo =
+5 -5
arch/ia64/kernel/numa.c
··· 39 39 } 40 40 /* sanity check first */ 41 41 oldnid = cpu_to_node_map[cpu]; 42 - if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) { 42 + if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) { 43 43 return; /* nothing to do */ 44 44 } 45 45 /* we don't have cpu-driven node hot add yet... ··· 47 47 if (!node_online(nid)) 48 48 nid = first_online_node; 49 49 cpu_to_node_map[cpu] = nid; 50 - cpu_set(cpu, node_to_cpu_mask[nid]); 50 + cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]); 51 51 return; 52 52 } 53 53 54 54 void unmap_cpu_from_node(int cpu, int nid) 55 55 { 56 - WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid])); 56 + WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid])); 57 57 WARN_ON(cpu_to_node_map[cpu] != nid); 58 58 cpu_to_node_map[cpu] = 0; 59 - cpu_clear(cpu, node_to_cpu_mask[nid]); 59 + cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]); 60 60 } 61 61 62 62 ··· 71 71 int cpu, i, node; 72 72 73 73 for(node=0; node < MAX_NUMNODES; node++) 74 - cpus_clear(node_to_cpu_mask[node]); 74 + cpumask_clear(&node_to_cpu_mask[node]); 75 75 76 76 for_each_possible_early_cpu(cpu) { 77 77 node = -1;
+12 -12
arch/ia64/kernel/salinfo.c
··· 256 256 data_saved->buffer = buffer; 257 257 } 258 258 } 259 - cpu_set(smp_processor_id(), data->cpu_event); 259 + cpumask_set_cpu(smp_processor_id(), &data->cpu_event); 260 260 if (irqsafe) { 261 261 salinfo_work_to_do(data); 262 262 spin_unlock_irqrestore(&data_saved_lock, flags); ··· 274 274 unsigned long flags; 275 275 if (!data->open) 276 276 return; 277 - if (!cpus_empty(data->cpu_event)) { 277 + if (!cpumask_empty(&data->cpu_event)) { 278 278 spin_lock_irqsave(&data_saved_lock, flags); 279 279 salinfo_work_to_do(data); 280 280 spin_unlock_irqrestore(&data_saved_lock, flags); ··· 308 308 int i, n, cpu = -1; 309 309 310 310 retry: 311 - if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) { 311 + if (cpumask_empty(&data->cpu_event) && down_trylock(&data->mutex)) { 312 312 if (file->f_flags & O_NONBLOCK) 313 313 return -EAGAIN; 314 314 if (down_interruptible(&data->mutex)) ··· 317 317 318 318 n = data->cpu_check; 319 319 for (i = 0; i < nr_cpu_ids; i++) { 320 - if (cpu_isset(n, data->cpu_event)) { 320 + if (cpumask_test_cpu(n, &data->cpu_event)) { 321 321 if (!cpu_online(n)) { 322 - cpu_clear(n, data->cpu_event); 322 + cpumask_clear_cpu(n, &data->cpu_event); 323 323 continue; 324 324 } 325 325 cpu = n; ··· 451 451 call_on_cpu(cpu, salinfo_log_read_cpu, data); 452 452 if (!data->log_size) { 453 453 data->state = STATE_NO_DATA; 454 - cpu_clear(cpu, data->cpu_event); 454 + cpumask_clear_cpu(cpu, &data->cpu_event); 455 455 } else { 456 456 data->state = STATE_LOG_RECORD; 457 457 } ··· 491 491 unsigned long flags; 492 492 spin_lock_irqsave(&data_saved_lock, flags); 493 493 data->state = STATE_NO_DATA; 494 - if (!cpu_isset(cpu, data->cpu_event)) { 494 + if (!cpumask_test_cpu(cpu, &data->cpu_event)) { 495 495 spin_unlock_irqrestore(&data_saved_lock, flags); 496 496 return 0; 497 497 } 498 - cpu_clear(cpu, data->cpu_event); 498 + cpumask_clear_cpu(cpu, &data->cpu_event); 499 499 if (data->saved_num) { 500 500 shift1_data_saved(data, data->saved_num - 1); 501 501 data->saved_num = 0; ··· 509 509 salinfo_log_new_read(cpu, data); 510 510 if (data->state == STATE_LOG_RECORD) { 511 511 spin_lock_irqsave(&data_saved_lock, flags); 512 - cpu_set(cpu, data->cpu_event); 512 + cpumask_set_cpu(cpu, &data->cpu_event); 513 513 salinfo_work_to_do(data); 514 514 spin_unlock_irqrestore(&data_saved_lock, flags); 515 515 } ··· 581 581 for (i = 0, data = salinfo_data; 582 582 i < ARRAY_SIZE(salinfo_data); 583 583 ++i, ++data) { 584 - cpu_set(cpu, data->cpu_event); 584 + cpumask_set_cpu(cpu, &data->cpu_event); 585 585 salinfo_work_to_do(data); 586 586 } 587 587 spin_unlock_irqrestore(&data_saved_lock, flags); ··· 601 601 shift1_data_saved(data, j); 602 602 } 603 603 } 604 - cpu_clear(cpu, data->cpu_event); 604 + cpumask_clear_cpu(cpu, &data->cpu_event); 605 605 } 606 606 spin_unlock_irqrestore(&data_saved_lock, flags); 607 607 break; ··· 659 659 660 660 /* we missed any events before now */ 661 661 for_each_online_cpu(j) 662 - cpu_set(j, data->cpu_event); 662 + cpumask_set_cpu(j, &data->cpu_event); 663 663 664 664 *sdir++ = dir; 665 665 }
+6 -5
arch/ia64/kernel/setup.c
··· 562 562 # ifdef CONFIG_ACPI_HOTPLUG_CPU 563 563 prefill_possible_map(); 564 564 # endif 565 - per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 566 - 32 : cpus_weight(early_cpu_possible_map)), 565 + per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? 566 + 32 : cpumask_weight(&early_cpu_possible_map)), 567 567 additional_cpus > 0 ? additional_cpus : 0); 568 568 # endif 569 569 #endif /* CONFIG_APCI_BOOT */ ··· 702 702 c->itc_freq / 1000000, c->itc_freq % 1000000, 703 703 lpj*HZ/500000, (lpj*HZ/5000) % 100); 704 704 #ifdef CONFIG_SMP 705 - seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); 705 + seq_printf(m, "siblings : %u\n", 706 + cpumask_weight(&cpu_core_map[cpunum])); 706 707 if (c->socket_id != -1) 707 708 seq_printf(m, "physical id: %u\n", c->socket_id); 708 709 if (c->threads_per_core > 1 || c->cores_per_socket > 1) ··· 934 933 * (must be done after per_cpu area is setup) 935 934 */ 936 935 if (smp_processor_id() == 0) { 937 - cpu_set(0, per_cpu(cpu_sibling_map, 0)); 938 - cpu_set(0, cpu_core_map[0]); 936 + cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0)); 937 + cpumask_set_cpu(0, &cpu_core_map[0]); 939 938 } else { 940 939 /* 941 940 * Set ar.k3 so that assembly code in MCA handler can compute
+3 -3
arch/ia64/kernel/smp.c
··· 262 262 preempt_disable(); 263 263 mycpu = smp_processor_id(); 264 264 265 - for_each_cpu_mask(cpu, cpumask) 265 + for_each_cpu(cpu, &cpumask) 266 266 counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; 267 267 268 268 mb(); 269 - for_each_cpu_mask(cpu, cpumask) { 269 + for_each_cpu(cpu, &cpumask) { 270 270 if (cpu == mycpu) 271 271 flush_mycpu = 1; 272 272 else ··· 276 276 if (flush_mycpu) 277 277 smp_local_flush_tlb(); 278 278 279 - for_each_cpu_mask(cpu, cpumask) 279 + for_each_cpu(cpu, &cpumask) 280 280 while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) 281 281 udelay(FLUSH_DELAY); 282 282
+22 -20
arch/ia64/kernel/smpboot.c
··· 434 434 /* 435 435 * Allow the master to continue. 436 436 */ 437 - cpu_set(cpuid, cpu_callin_map); 437 + cpumask_set_cpu(cpuid, &cpu_callin_map); 438 438 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); 439 439 } 440 440 ··· 475 475 */ 476 476 Dprintk("Waiting on callin_map ..."); 477 477 for (timeout = 0; timeout < 100000; timeout++) { 478 - if (cpu_isset(cpu, cpu_callin_map)) 478 + if (cpumask_test_cpu(cpu, &cpu_callin_map)) 479 479 break; /* It has booted */ 480 480 udelay(100); 481 481 } 482 482 Dprintk("\n"); 483 483 484 - if (!cpu_isset(cpu, cpu_callin_map)) { 484 + if (!cpumask_test_cpu(cpu, &cpu_callin_map)) { 485 485 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); 486 486 ia64_cpu_to_sapicid[cpu] = -1; 487 487 set_cpu_online(cpu, false); /* was set in smp_callin() */ ··· 541 541 542 542 smp_setup_percpu_timer(); 543 543 544 - cpu_set(0, cpu_callin_map); 544 + cpumask_set_cpu(0, &cpu_callin_map); 545 545 546 546 local_cpu_data->loops_per_jiffy = loops_per_jiffy; 547 547 ia64_cpu_to_sapicid[0] = boot_cpu_id; ··· 565 565 void smp_prepare_boot_cpu(void) 566 566 { 567 567 set_cpu_online(smp_processor_id(), true); 568 - cpu_set(smp_processor_id(), cpu_callin_map); 568 + cpumask_set_cpu(smp_processor_id(), &cpu_callin_map); 569 569 set_numa_node(cpu_to_node_map[smp_processor_id()]); 570 570 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 571 571 paravirt_post_smp_prepare_boot_cpu(); ··· 577 577 { 578 578 int i; 579 579 580 - for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) 581 - cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); 582 - for_each_cpu_mask(i, cpu_core_map[cpu]) 583 - cpu_clear(cpu, cpu_core_map[i]); 580 + for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) 581 + cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); 582 + for_each_cpu(i, &cpu_core_map[cpu]) 583 + cpumask_clear_cpu(cpu, &cpu_core_map[i]); 584 584 585 585 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; 586 586 } ··· 592 592 593 593 if (cpu_data(cpu)->threads_per_core == 1 && 594 594 cpu_data(cpu)->cores_per_socket == 1) { 595 - cpu_clear(cpu, cpu_core_map[cpu]); 596 - cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); 595 + cpumask_clear_cpu(cpu, &cpu_core_map[cpu]); 596 + cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); 597 597 return; 598 598 } 599 599 600 - last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); 600 + last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0); 601 601 602 602 /* remove it from all sibling map's */ 603 603 clear_cpu_sibling_map(cpu); ··· 673 673 remove_siblinginfo(cpu); 674 674 fixup_irqs(); 675 675 local_flush_tlb_all(); 676 - cpu_clear(cpu, cpu_callin_map); 676 + cpumask_clear_cpu(cpu, &cpu_callin_map); 677 677 return 0; 678 678 } 679 679 ··· 718 718 719 719 for_each_online_cpu(i) { 720 720 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { 721 - cpu_set(i, cpu_core_map[cpu]); 722 - cpu_set(cpu, cpu_core_map[i]); 721 + cpumask_set_cpu(i, &cpu_core_map[cpu]); 722 + cpumask_set_cpu(cpu, &cpu_core_map[i]); 723 723 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { 724 - cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 725 - cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 724 + cpumask_set_cpu(i, 725 + &per_cpu(cpu_sibling_map, cpu)); 726 + cpumask_set_cpu(cpu, 727 + &per_cpu(cpu_sibling_map, i)); 726 728 } 727 729 } 728 730 } ··· 744 742 * Already booted cpu? not valid anymore since we dont 745 743 * do idle loop tightspin anymore. 746 744 */ 747 - if (cpu_isset(cpu, cpu_callin_map)) 745 + if (cpumask_test_cpu(cpu, &cpu_callin_map)) 748 746 return -EINVAL; 749 747 750 748 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; ··· 755 753 756 754 if (cpu_data(cpu)->threads_per_core == 1 && 757 755 cpu_data(cpu)->cores_per_socket == 1) { 758 - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 759 - cpu_set(cpu, cpu_core_map[cpu]); 756 + cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); 757 + cpumask_set_cpu(cpu, &cpu_core_map[cpu]); 760 758 return 0; 761 759 } 762 760
+3 -3
arch/ia64/kernel/topology.c
··· 148 148 149 149 if (cpu_data(cpu)->threads_per_core <= 1 && 150 150 cpu_data(cpu)->cores_per_socket <= 1) { 151 - cpu_set(cpu, this_leaf->shared_cpu_map); 151 + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); 152 152 return; 153 153 } 154 154 ··· 164 164 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id 165 165 && cpu_data(j)->core_id == csi.log1_cid 166 166 && cpu_data(j)->thread_id == csi.log1_tid) 167 - cpu_set(j, this_leaf->shared_cpu_map); 167 + cpumask_set_cpu(j, &this_leaf->shared_cpu_map); 168 168 169 169 i++; 170 170 } while (i < num_shared && ··· 177 177 static void cache_shared_cpu_map_setup(unsigned int cpu, 178 178 struct cache_info * this_leaf) 179 179 { 180 - cpu_set(cpu, this_leaf->shared_cpu_map); 180 + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); 181 181 return; 182 182 } 183 183 #endif
+1 -1
arch/m32r/kernel/smpboot.c
··· 376 376 if (!cpumask_equal(&cpu_callin_map, cpu_online_mask)) 377 377 BUG(); 378 378 379 - for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) 379 + for_each_online_cpu(cpu_id) 380 380 show_cpu_info(cpu_id); 381 381 382 382 /*
+2 -2
arch/mips/bcm63xx/irq.c
··· 58 58 59 59 #ifdef CONFIG_SMP 60 60 if (m) 61 - enable &= cpu_isset(cpu, *m); 61 + enable &= cpumask_test_cpu(cpu, m); 62 62 else if (irqd_affinity_was_set(d)) 63 - enable &= cpu_isset(cpu, *d->affinity); 63 + enable &= cpumask_test_cpu(cpu, d->affinity); 64 64 #endif 65 65 return enable; 66 66 }
+2 -2
arch/mips/cavium-octeon/smp.c
··· 72 72 { 73 73 unsigned int i; 74 74 75 - for_each_cpu_mask(i, *mask) 75 + for_each_cpu(i, mask) 76 76 octeon_send_ipi_single(i, action); 77 77 } 78 78 ··· 239 239 return -ENOTSUPP; 240 240 241 241 set_cpu_online(cpu, false); 242 - cpu_clear(cpu, cpu_callin_map); 242 + cpumask_clear_cpu(cpu, &cpu_callin_map); 243 243 octeon_fixup_irqs(); 244 244 245 245 flush_cache_all();
+1 -1
arch/mips/include/asm/smp.h
··· 88 88 { 89 89 extern struct plat_smp_ops *mp_ops; /* private */ 90 90 91 - mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION); 91 + mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION); 92 92 } 93 93 94 94 static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+4 -4
arch/mips/kernel/crash.c
··· 25 25 return; 26 26 27 27 local_irq_disable(); 28 - if (!cpu_isset(cpu, cpus_in_crash)) 28 + if (!cpumask_test_cpu(cpu, &cpus_in_crash)) 29 29 crash_save_cpu(regs, cpu); 30 - cpu_set(cpu, cpus_in_crash); 30 + cpumask_set_cpu(cpu, &cpus_in_crash); 31 31 32 32 while (!atomic_read(&kexec_ready_to_reboot)) 33 33 cpu_relax(); ··· 50 50 */ 51 51 pr_emerg("Sending IPI to other cpus...\n"); 52 52 msecs = 10000; 53 - while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { 53 + while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { 54 54 cpu_relax(); 55 55 mdelay(1); 56 56 } ··· 66 66 crashing_cpu = smp_processor_id(); 67 67 crash_save_cpu(regs, crashing_cpu); 68 68 crash_kexec_prepare_cpus(); 69 - cpu_set(crashing_cpu, cpus_in_crash); 69 + cpumask_set_cpu(crashing_cpu, &cpus_in_crash); 70 70 }
+2 -2
arch/mips/kernel/mips-mt-fpaff.c
··· 114 114 /* Compute new global allowed CPU set if necessary */ 115 115 ti = task_thread_info(p); 116 116 if (test_ti_thread_flag(ti, TIF_FPUBOUND) && 117 - cpus_intersects(*new_mask, mt_fpu_cpumask)) { 118 - cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask); 117 + cpumask_intersects(new_mask, &mt_fpu_cpumask)) { 118 + cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask); 119 119 retval = set_cpus_allowed_ptr(p, effective_mask); 120 120 } else { 121 121 cpumask_copy(effective_mask, new_mask);
+1 -1
arch/mips/kernel/process.c
··· 49 49 void arch_cpu_idle_dead(void) 50 50 { 51 51 /* What the heck is this check doing ? */ 52 - if (!cpu_isset(smp_processor_id(), cpu_callin_map)) 52 + if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map)) 53 53 play_dead(); 54 54 } 55 55 #endif
+1 -1
arch/mips/kernel/smp-bmips.c
··· 362 362 pr_info("SMP: CPU%d is offline\n", cpu); 363 363 364 364 set_cpu_online(cpu, false); 365 - cpu_clear(cpu, cpu_callin_map); 365 + cpumask_clear_cpu(cpu, &cpu_callin_map); 366 366 clear_c0_status(IE_IRQ5); 367 367 368 368 local_flush_tlb_all();
+2 -2
arch/mips/kernel/smp-cmp.c
··· 66 66 #ifdef CONFIG_MIPS_MT_FPAFF 67 67 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 68 68 if (cpu_has_fpu) 69 - cpu_set(smp_processor_id(), mt_fpu_cpumask); 69 + cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); 70 70 #endif /* CONFIG_MIPS_MT_FPAFF */ 71 71 72 72 local_irq_enable(); ··· 110 110 #ifdef CONFIG_MIPS_MT_FPAFF 111 111 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 112 112 if (cpu_has_fpu) 113 - cpu_set(0, mt_fpu_cpumask); 113 + cpumask_set_cpu(0, &mt_fpu_cpumask); 114 114 #endif /* CONFIG_MIPS_MT_FPAFF */ 115 115 116 116 for (i = 1; i < NR_CPUS; i++) {
+2 -2
arch/mips/kernel/smp-cps.c
··· 290 290 #ifdef CONFIG_MIPS_MT_FPAFF 291 291 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 292 292 if (cpu_has_fpu) 293 - cpu_set(smp_processor_id(), mt_fpu_cpumask); 293 + cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); 294 294 #endif /* CONFIG_MIPS_MT_FPAFF */ 295 295 296 296 local_irq_enable(); ··· 313 313 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask); 314 314 smp_mb__after_atomic(); 315 315 set_cpu_online(cpu, false); 316 - cpu_clear(cpu, cpu_callin_map); 316 + cpumask_clear_cpu(cpu, &cpu_callin_map); 317 317 318 318 return 0; 319 319 }
+2 -2
arch/mips/kernel/smp-mt.c
··· 178 178 #ifdef CONFIG_MIPS_MT_FPAFF 179 179 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 180 180 if (cpu_has_fpu) 181 - cpu_set(smp_processor_id(), mt_fpu_cpumask); 181 + cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); 182 182 #endif /* CONFIG_MIPS_MT_FPAFF */ 183 183 184 184 local_irq_enable(); ··· 239 239 #ifdef CONFIG_MIPS_MT_FPAFF 240 240 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 241 241 if (cpu_has_fpu) 242 - cpu_set(0, mt_fpu_cpumask); 242 + cpumask_set_cpu(0, &mt_fpu_cpumask); 243 243 #endif /* CONFIG_MIPS_MT_FPAFF */ 244 244 if (!cpu_has_mipsmt) 245 245 return;
+13 -13
arch/mips/kernel/smp.c
··· 75 75 { 76 76 int i; 77 77 78 - cpu_set(cpu, cpu_sibling_setup_map); 78 + cpumask_set_cpu(cpu, &cpu_sibling_setup_map); 79 79 80 80 if (smp_num_siblings > 1) { 81 - for_each_cpu_mask(i, cpu_sibling_setup_map) { 81 + for_each_cpu(i, &cpu_sibling_setup_map) { 82 82 if (cpu_data[cpu].package == cpu_data[i].package && 83 83 cpu_data[cpu].core == cpu_data[i].core) { 84 - cpu_set(i, cpu_sibling_map[cpu]); 85 - cpu_set(cpu, cpu_sibling_map[i]); 84 + cpumask_set_cpu(i, &cpu_sibling_map[cpu]); 85 + cpumask_set_cpu(cpu, &cpu_sibling_map[i]); 86 86 } 87 87 } 88 88 } else 89 - cpu_set(cpu, cpu_sibling_map[cpu]); 89 + cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); 90 90 } 91 91 92 92 static inline void set_cpu_core_map(int cpu) 93 93 { 94 94 int i; 95 95 96 - cpu_set(cpu, cpu_core_setup_map); 96 + cpumask_set_cpu(cpu, &cpu_core_setup_map); 97 97 98 - for_each_cpu_mask(i, cpu_core_setup_map) { 98 + for_each_cpu(i, &cpu_core_setup_map) { 99 99 if (cpu_data[cpu].package == cpu_data[i].package) { 100 - cpu_set(i, cpu_core_map[cpu]); 101 - cpu_set(cpu, cpu_core_map[i]); 100 + cpumask_set_cpu(i, &cpu_core_map[cpu]); 101 + cpumask_set_cpu(cpu, &cpu_core_map[i]); 102 102 } 103 103 } 104 104 } ··· 138 138 cpu = smp_processor_id(); 139 139 cpu_data[cpu].udelay_val = loops_per_jiffy; 140 140 141 - cpu_set(cpu, cpu_coherent_mask); 141 + cpumask_set_cpu(cpu, &cpu_coherent_mask); 142 142 notify_cpu_starting(cpu); 143 143 144 144 set_cpu_online(cpu, true); ··· 146 146 set_cpu_sibling_map(cpu); 147 147 set_cpu_core_map(cpu); 148 148 149 - cpu_set(cpu, cpu_callin_map); 149 + cpumask_set_cpu(cpu, &cpu_callin_map); 150 150 151 151 synchronise_count_slave(cpu); 152 152 ··· 208 208 { 209 209 set_cpu_possible(0, true); 210 210 set_cpu_online(0, true); 211 - cpu_set(0, cpu_callin_map); 211 + cpumask_set_cpu(0, &cpu_callin_map); 212 212 } 213 213 214 214 int __cpu_up(unsigned int cpu, struct task_struct *tidle) ··· 218 218 /* 219 219 * Trust is futile. We should really have timeouts ... 220 220 */ 221 - while (!cpu_isset(cpu, cpu_callin_map)) 221 + while (!cpumask_test_cpu(cpu, &cpu_callin_map)) 222 222 udelay(100); 223 223 224 224 synchronise_count_master(cpu);
+3 -3
arch/mips/kernel/traps.c
··· 1153 1153 * restricted the allowed set to exclude any CPUs with FPUs, 1154 1154 * we'll skip the procedure. 1155 1155 */ 1156 - if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { 1156 + if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) { 1157 1157 cpumask_t tmask; 1158 1158 1159 1159 current->thread.user_cpus_allowed 1160 1160 = current->cpus_allowed; 1161 - cpus_and(tmask, current->cpus_allowed, 1162 - mt_fpu_cpumask); 1161 + cpumask_and(&tmask, &current->cpus_allowed, 1162 + &mt_fpu_cpumask); 1163 1163 set_cpus_allowed_ptr(current, &tmask); 1164 1164 set_thread_flag(TIF_FPUBOUND); 1165 1165 }
+2 -2
arch/mips/loongson/loongson-3/numa.c
··· 233 233 if (node_online(node)) { 234 234 szmem(node); 235 235 node_mem_init(node); 236 - cpus_clear(__node_data[(node)]->cpumask); 236 + cpumask_clear(&__node_data[(node)]->cpumask); 237 237 } 238 238 } 239 239 for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { ··· 244 244 if (loongson_sysconf.reserved_cpus_mask & (1<<cpu)) 245 245 continue; 246 246 247 - cpu_set(active_cpu, __node_data[(node)]->cpumask); 247 + cpumask_set_cpu(active_cpu, &__node_data[(node)]->cpumask); 248 248 pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node); 249 249 250 250 active_cpu++;
+1 -1
arch/mips/loongson/loongson-3/smp.c
··· 408 408 return -EBUSY; 409 409 410 410 set_cpu_online(cpu, false); 411 - cpu_clear(cpu, cpu_callin_map); 411 + cpumask_clear_cpu(cpu, &cpu_callin_map); 412 412 local_irq_save(flags); 413 413 fixup_irqs(); 414 414 local_irq_restore(flags);
+1 -1
arch/mips/paravirt/paravirt-smp.c
··· 75 75 { 76 76 unsigned int cpu; 77 77 78 - for_each_cpu_mask(cpu, *mask) 78 + for_each_cpu(cpu, mask) 79 79 paravirt_send_ipi_single(cpu, action); 80 80 } 81 81
+1 -1
arch/mips/sgi-ip27/ip27-init.c
··· 60 60 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 61 61 int i; 62 62 63 - cpu_set(smp_processor_id(), hub->h_cpus); 63 + cpumask_set_cpu(smp_processor_id(), &hub->h_cpus); 64 64 65 65 if (test_and_set_bit(cnode, hub_init_mask)) 66 66 return;
+5 -5
arch/mips/sgi-ip27/ip27-klnuma.c
··· 29 29 void __init setup_replication_mask(void) 30 30 { 31 31 /* Set only the master cnode's bit. The master cnode is always 0. */ 32 - cpus_clear(ktext_repmask); 33 - cpu_set(0, ktext_repmask); 32 + cpumask_clear(&ktext_repmask); 33 + cpumask_set_cpu(0, &ktext_repmask); 34 34 35 35 #ifdef CONFIG_REPLICATE_KTEXT 36 36 #ifndef CONFIG_MAPPED_KERNEL ··· 43 43 if (cnode == 0) 44 44 continue; 45 45 /* Advertise that we have a copy of the kernel */ 46 - cpu_set(cnode, ktext_repmask); 46 + cpumask_set_cpu(cnode, &ktext_repmask); 47 47 } 48 48 } 49 49 #endif ··· 99 99 client_nasid = COMPACT_TO_NASID_NODEID(cnode); 100 100 101 101 /* Check if this node should get a copy of the kernel */ 102 - if (cpu_isset(cnode, ktext_repmask)) { 102 + if (cpumask_test_cpu(cnode, &ktext_repmask)) { 103 103 server_nasid = client_nasid; 104 104 copy_kernel(server_nasid); 105 105 } ··· 124 124 loadbase += 16777216; 125 125 #endif 126 126 offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase; 127 - if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask))) 127 + if ((cnode == 0) || (cpumask_test_cpu(cnode, &ktext_repmask))) 128 128 return TO_NODE(nasid, offset) >> PAGE_SHIFT; 129 129 else 130 130 return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
+1 -1
arch/mips/sgi-ip27/ip27-memory.c
··· 404 404 NODE_DATA(node)->node_start_pfn = start_pfn; 405 405 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; 406 406 407 - cpus_clear(hub_data(node)->h_cpus); 407 + cpumask_clear(&hub_data(node)->h_cpus); 408 408 409 409 slot_freepfn += PFN_UP(sizeof(struct pglist_data) + 410 410 sizeof(struct hub_data));
+2 -2
arch/parisc/kernel/irq.c
··· 525 525 desc = irq_to_desc(irq); 526 526 cpumask_copy(&dest, desc->irq_data.affinity); 527 527 if (irqd_is_per_cpu(&desc->irq_data) && 528 - !cpu_isset(smp_processor_id(), dest)) { 529 - int cpu = first_cpu(dest); 528 + !cpumask_test_cpu(smp_processor_id(), &dest)) { 529 + int cpu = cpumask_first(&dest); 530 530 531 531 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", 532 532 irq, smp_processor_id(), cpu);
+1 -1
arch/powerpc/include/asm/cputhreads.h
··· 25 25 #define threads_per_core 1 26 26 #define threads_per_subcore 1 27 27 #define threads_shift 0 28 - #define threads_core_mask (CPU_MASK_CPU0) 28 + #define threads_core_mask (*get_cpu_mask(0)) 29 29 #endif 30 30 31 31 /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
+1 -1
arch/sh/include/asm/mmu_context.h
··· 99 99 { 100 100 int i; 101 101 102 - for (i = 0; i < num_online_cpus(); i++) 102 + for_each_online_cpu(i) 103 103 cpu_context(i, mm) = NO_CONTEXT; 104 104 105 105 return 0;
+3 -3
arch/sh/kernel/smp.c
··· 363 363 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); 364 364 } else { 365 365 int i; 366 - for (i = 0; i < num_online_cpus(); i++) 366 + for_each_online_cpu(i) 367 367 if (smp_processor_id() != i) 368 368 cpu_context(i, mm) = 0; 369 369 } ··· 400 400 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); 401 401 } else { 402 402 int i; 403 - for (i = 0; i < num_online_cpus(); i++) 403 + for_each_online_cpu(i) 404 404 if (smp_processor_id() != i) 405 405 cpu_context(i, mm) = 0; 406 406 } ··· 443 443 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); 444 444 } else { 445 445 int i; 446 - for (i = 0; i < num_online_cpus(); i++) 446 + for_each_online_cpu(i) 447 447 if (smp_processor_id() != i) 448 448 cpu_context(i, vma->vm_mm) = 0; 449 449 }
+2 -2
arch/sparc/kernel/time_32.c
··· 194 194 static void percpu_ce_setup(enum clock_event_mode mode, 195 195 struct clock_event_device *evt) 196 196 { 197 - int cpu = __first_cpu(evt->cpumask); 197 + int cpu = cpumask_first(evt->cpumask); 198 198 199 199 switch (mode) { 200 200 case CLOCK_EVT_MODE_PERIODIC: ··· 214 214 static int percpu_ce_set_next_event(unsigned long delta, 215 215 struct clock_event_device *evt) 216 216 { 217 - int cpu = __first_cpu(evt->cpumask); 217 + int cpu = cpumask_first(evt->cpumask); 218 218 unsigned int next = (unsigned int)delta; 219 219 220 220 sparc_config.load_profile_irq(cpu, next);
+1 -1
arch/tile/kernel/setup.c
··· 774 774 * though, there'll be no lowmem, so we just alloc_bootmem 775 775 * the memmap. There will be no percpu memory either. 776 776 */ 777 - if (i != 0 && cpu_isset(i, isolnodes)) { 777 + if (i != 0 && cpumask_test_cpu(i, &isolnodes)) { 778 778 node_memmap_pfn[i] = 779 779 alloc_bootmem_pfn(0, memmap_size, 0); 780 780 BUG_ON(node_percpu[i] != 0);
+2 -2
arch/x86/kernel/apic/x2apic_cluster.c
··· 171 171 for_each_online_cpu(cpu) { 172 172 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) 173 173 continue; 174 - __cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu)); 175 - __cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu)); 174 + cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); 175 + cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); 176 176 } 177 177 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); 178 178 free_cpumask_var(per_cpu(ipi_mask, this_cpu));
+2 -1
drivers/clocksource/dw_apb_timer.c
··· 117 117 unsigned long period; 118 118 struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); 119 119 120 - pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask), 120 + pr_debug("%s CPU %d mode=%d\n", __func__, 121 + cpumask_first(evt->cpumask), 121 122 mode); 122 123 123 124 switch (mode) {
+3 -3
drivers/cpuidle/coupled.c
··· 292 292 */ 293 293 smp_rmb(); 294 294 295 - for_each_cpu_mask(i, coupled->coupled_cpus) 295 + for_each_cpu(i, &coupled->coupled_cpus) 296 296 if (cpu_online(i) && coupled->requested_state[i] < state) 297 297 state = coupled->requested_state[i]; 298 298 ··· 338 338 { 339 339 int cpu; 340 340 341 - for_each_cpu_mask(cpu, coupled->coupled_cpus) 341 + for_each_cpu(cpu, &coupled->coupled_cpus) 342 342 if (cpu != this_cpu && cpu_online(cpu)) 343 343 cpuidle_coupled_poke(cpu); 344 344 } ··· 638 638 if (cpumask_empty(&dev->coupled_cpus)) 639 639 return 0; 640 640 641 - for_each_cpu_mask(cpu, dev->coupled_cpus) { 641 + for_each_cpu(cpu, &dev->coupled_cpus) { 642 642 other_dev = per_cpu(cpuidle_devices, cpu); 643 643 if (other_dev && other_dev->coupled) { 644 644 coupled = other_dev->coupled;
+2 -2
drivers/crypto/n2_core.c
··· 1754 1754 dev->dev.of_node->full_name); 1755 1755 return -EINVAL; 1756 1756 } 1757 - cpu_set(*id, p->sharing); 1757 + cpumask_set_cpu(*id, &p->sharing); 1758 1758 table[*id] = p; 1759 1759 } 1760 1760 return 0; ··· 1776 1776 return -ENOMEM; 1777 1777 } 1778 1778 1779 - cpus_clear(p->sharing); 1779 + cpumask_clear(&p->sharing); 1780 1780 spin_lock_init(&p->lock); 1781 1781 p->q_type = q_type; 1782 1782 INIT_LIST_HEAD(&p->jobs);
+1 -1
drivers/irqchip/irq-gic-v3.c
··· 567 567 */ 568 568 smp_wmb(); 569 569 570 - for_each_cpu_mask(cpu, *mask) { 570 + for_each_cpu(cpu, mask) { 571 571 u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; 572 572 u16 tlist; 573 573
+3 -3
drivers/irqchip/irq-mips-gic.c
··· 389 389 int i; 390 390 391 391 cpumask_and(&tmp, cpumask, cpu_online_mask); 392 - if (cpus_empty(tmp)) 392 + if (cpumask_empty(&tmp)) 393 393 return -EINVAL; 394 394 395 395 /* Assumption : cpumask refers to a single CPU */ 396 396 spin_lock_irqsave(&gic_lock, flags); 397 397 398 398 /* Re-route this IRQ */ 399 - gic_map_to_vpe(irq, first_cpu(tmp)); 399 + gic_map_to_vpe(irq, cpumask_first(&tmp)); 400 400 401 401 /* Update the pcpu_masks */ 402 402 for (i = 0; i < NR_CPUS; i++) 403 403 clear_bit(irq, pcpu_masks[i].pcpu_mask); 404 - set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 404 + set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 405 405 406 406 cpumask_copy(d->affinity, cpumask); 407 407 spin_unlock_irqrestore(&gic_lock, flags);
+2 -2
drivers/net/ethernet/tile/tilegx.c
··· 1123 1123 addr + i * sizeof(struct tile_net_comps); 1124 1124 1125 1125 /* If this is a network cpu, create an iqueue. */ 1126 - if (cpu_isset(cpu, network_cpus_map)) { 1126 + if (cpumask_test_cpu(cpu, &network_cpus_map)) { 1127 1127 order = get_order(NOTIF_RING_SIZE); 1128 1128 page = homecache_alloc_pages(GFP_KERNEL, order, cpu); 1129 1129 if (page == NULL) { ··· 1299 1299 int first_ring, ring; 1300 1300 int instance = mpipe_instance(dev); 1301 1301 struct mpipe_data *md = &mpipe_data[instance]; 1302 - int network_cpus_count = cpus_weight(network_cpus_map); 1302 + int network_cpus_count = cpumask_weight(&network_cpus_map); 1303 1303 1304 1304 if (!hash_default) { 1305 1305 netdev_err(dev, "Networking requires hash_default!\n");
+2 -4
drivers/scsi/hpsa.c
··· 6632 6632 6633 6633 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) 6634 6634 { 6635 - int i, cpu; 6635 + int cpu; 6636 6636 6637 - cpu = cpumask_first(cpu_online_mask); 6638 - for (i = 0; i < num_online_cpus(); i++) { 6637 + for_each_online_cpu(cpu) { 6639 6638 u32 *lockup_detected; 6640 6639 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 6641 6640 *lockup_detected = value; 6642 - cpu = cpumask_next(cpu, cpu_online_mask); 6643 6641 } 6644 6642 wmb(); /* be sure the per-cpu variables are out to memory */ 6645 6643 }
+11 -165
include/linux/cpumask.h
··· 11 11 #include <linux/bitmap.h> 12 12 #include <linux/bug.h> 13 13 14 + /* Don't assign or return these: may not be this big! */ 14 15 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 15 16 16 17 /** ··· 290 289 * @cpumask: the cpumask pointer 291 290 * 292 291 * Returns 1 if @cpu is set in @cpumask, else returns 0 293 - * 294 - * No static inline type checking - see Subtlety (1) above. 295 292 */ 296 - #define cpumask_test_cpu(cpu, cpumask) \ 297 - test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) 293 + static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) 294 + { 295 + return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); 296 + } 298 297 299 298 /** 300 299 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask ··· 610 609 */ 611 610 static inline size_t cpumask_size(void) 612 611 { 613 - /* FIXME: Once all cpumask assignments are eliminated, this 614 - * can be nr_cpumask_bits */ 615 - return BITS_TO_LONGS(NR_CPUS) * sizeof(long); 612 + return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); 616 613 } 617 614 618 615 /* ··· 767 768 #if NR_CPUS <= BITS_PER_LONG 768 769 #define CPU_BITS_ALL \ 769 770 { \ 770 - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 771 + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 771 772 } 772 773 773 774 #else /* NR_CPUS > BITS_PER_LONG */ ··· 775 776 #define CPU_BITS_ALL \ 776 777 { \ 777 778 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 778 - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 779 + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 779 780 } 780 781 #endif /* NR_CPUS > BITS_PER_LONG */ 781 782 ··· 796 797 nr_cpu_ids); 797 798 } 798 799 799 - /* 800 - * 801 - * From here down, all obsolete. Use cpumask_ variants! 802 - * 803 - */ 804 - #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 805 - #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) 806 - 807 - #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) 808 - 809 800 #if NR_CPUS <= BITS_PER_LONG 810 - 811 801 #define CPU_MASK_ALL \ 812 802 (cpumask_t) { { \ 813 - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 803 + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 814 804 } } 815 - 816 805 #else 817 - 818 806 #define CPU_MASK_ALL \ 819 807 (cpumask_t) { { \ 820 808 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ 821 - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ 809 + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ 822 810 } } 823 - 824 - #endif 811 + #endif /* NR_CPUS > BITS_PER_LONG */ 825 812 826 813 #define CPU_MASK_NONE \ 827 814 (cpumask_t) { { \ ··· 818 833 (cpumask_t) { { \ 819 834 [0] = 1UL \ 820 835 } } 821 - 822 - #if NR_CPUS == 1 823 - #define first_cpu(src) ({ (void)(src); 0; }) 824 - #define next_cpu(n, src) ({ (void)(src); 1; }) 825 - #define any_online_cpu(mask) 0 826 - #define for_each_cpu_mask(cpu, mask) \ 827 - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 828 - #else /* NR_CPUS > 1 */ 829 - int __first_cpu(const cpumask_t *srcp); 830 - int __next_cpu(int n, const cpumask_t *srcp); 831 - 832 - #define first_cpu(src) __first_cpu(&(src)) 833 - #define next_cpu(n, src) __next_cpu((n), &(src)) 834 - #define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask) 835 - #define for_each_cpu_mask(cpu, mask) \ 836 - for ((cpu) = -1; \ 837 - (cpu) = next_cpu((cpu), (mask)), \ 838 - (cpu) < NR_CPUS; ) 839 - #endif /* SMP */ 840 - 841 - #if NR_CPUS <= 64 842 - 843 - #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) 844 - 845 - #else /* NR_CPUS > 64 */ 846 - 847 - int __next_cpu_nr(int n, const cpumask_t *srcp); 848 - #define for_each_cpu_mask_nr(cpu, mask) \ 849 - for ((cpu) = -1; \ 850 - (cpu) = __next_cpu_nr((cpu), &(mask)), \ 851 - (cpu) < nr_cpu_ids; ) 852 - 853 - #endif /* NR_CPUS > 64 */ 854 - 855 - #define cpus_addr(src) ((src).bits) 856 - 857 - #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 858 - static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) 859 - { 860 - set_bit(cpu, dstp->bits); 861 - } 862 - 863 - #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) 864 - static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) 865 - { 866 - clear_bit(cpu, dstp->bits); 867 - } 868 - 869 - #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) 870 - static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits) 871 - { 872 - bitmap_fill(dstp->bits, nbits); 873 - } 874 - 875 - #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) 876 - static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits) 877 - { 878 - bitmap_zero(dstp->bits, nbits); 879 - } 880 - 881 - /* No static inline type checking - see Subtlety (1) above. */ 882 - #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) 883 - 884 - #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) 885 - static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) 886 - { 887 - return test_and_set_bit(cpu, addr->bits); 888 - } 889 - 890 - #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) 891 - static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, 892 - const cpumask_t *src2p, unsigned int nbits) 893 - { 894 - return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); 895 - } 896 - 897 - #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) 898 - static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, 899 - const cpumask_t *src2p, unsigned int nbits) 900 - { 901 - bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); 902 - } 903 - 904 - #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) 905 - static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, 906 - const cpumask_t *src2p, unsigned int nbits) 907 - { 908 - bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); 909 - } 910 - 911 - #define cpus_andnot(dst, src1, src2) \ 912 - __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) 913 - static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, 914 - const cpumask_t *src2p, unsigned int nbits) 915 - { 916 - return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); 917 - } 918 - 919 - #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) 920 - static inline int __cpus_equal(const cpumask_t *src1p, 921 - const cpumask_t *src2p, unsigned int nbits) 922 - { 923 - return bitmap_equal(src1p->bits, src2p->bits, nbits); 924 - } 925 - 926 - #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) 927 - static inline int __cpus_intersects(const cpumask_t *src1p, 928 - const cpumask_t *src2p, unsigned int nbits) 929 - { 930 - return bitmap_intersects(src1p->bits, src2p->bits, nbits); 931 - } 932 - 933 - #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) 934 - static inline int __cpus_subset(const cpumask_t *src1p, 935 - const cpumask_t *src2p, unsigned int nbits) 936 - { 937 - return bitmap_subset(src1p->bits, src2p->bits, nbits); 938 - } 939 - 940 - #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) 941 - static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits) 942 - { 943 - return bitmap_empty(srcp->bits, nbits); 944 - } 945 - 946 - #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) 947 - static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits) 948 - { 949 - return bitmap_weight(srcp->bits, nbits); 950 - } 951 - 952 - #define cpus_shift_left(dst, src, n) \ 953 - __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) 954 - static inline void __cpus_shift_left(cpumask_t *dstp, 955 - const cpumask_t *srcp, int n, int nbits) 956 - { 957 - bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); 958 - } 959 - #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ 960 836 961 837 #endif /* __LINUX_CPUMASK_H */
-4
lib/Kconfig
··· 396 396 them on the stack. This is a bit more expensive, but avoids 397 397 stack overflow. 398 398 399 - config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS 400 - bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS 401 - depends on BROKEN 402 - 403 399 config CPU_RMAP 404 400 bool 405 401 depends on SMP
-28
lib/cpumask.c
··· 5 5 #include <linux/export.h> 6 6 #include <linux/bootmem.h> 7 7 8 - int __first_cpu(const cpumask_t *srcp) 9 - { 10 - return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); 11 - } 12 - EXPORT_SYMBOL(__first_cpu); 13 - 14 - int __next_cpu(int n, const cpumask_t *srcp) 15 - { 16 - return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); 17 - } 18 - EXPORT_SYMBOL(__next_cpu); 19 - 20 - #if NR_CPUS > 64 21 - int __next_cpu_nr(int n, const cpumask_t *srcp) 22 - { 23 - return min_t(int, nr_cpu_ids, 24 - find_next_bit(srcp->bits, nr_cpu_ids, n+1)); 25 - } 26 - EXPORT_SYMBOL(__next_cpu_nr); 27 - #endif 28 - 29 8 /** 30 9 * cpumask_next_and - get the next cpu in *src1p & *src2p 31 10 * @n: the cpu prior to the place to search (ie. return will be > @n) ··· 69 90 dump_stack(); 70 91 } 71 92 #endif 72 - /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ 73 - if (*mask) { 74 - unsigned char *ptr = (unsigned char *)cpumask_bits(*mask); 75 - unsigned int tail; 76 - tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); 77 - memset(ptr + cpumask_size() - tail, 0, tail); 78 - } 79 93 80 94 return *mask != NULL; 81 95 }