Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers: fix up obsolete cpu function usage.

Thanks to spatch, plus manual removal of "&*". Then a sweep for
for_each_cpu_mask => for_each_cpu.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: netdev@vger.kernel.org

+13 -12
+2 -1
drivers/clocksource/dw_apb_timer.c
··· 117 117 unsigned long period; 118 118 struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); 119 119 120 - pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask), 120 + pr_debug("%s CPU %d mode=%d\n", __func__, 121 + cpumask_first(evt->cpumask), 121 122 mode); 122 123 123 124 switch (mode) {
+3 -3
drivers/cpuidle/coupled.c
··· 292 292 */ 293 293 smp_rmb(); 294 294 295 - for_each_cpu_mask(i, coupled->coupled_cpus) 295 + for_each_cpu(i, &coupled->coupled_cpus) 296 296 if (cpu_online(i) && coupled->requested_state[i] < state) 297 297 state = coupled->requested_state[i]; 298 298 ··· 338 338 { 339 339 int cpu; 340 340 341 - for_each_cpu_mask(cpu, coupled->coupled_cpus) 341 + for_each_cpu(cpu, &coupled->coupled_cpus) 342 342 if (cpu != this_cpu && cpu_online(cpu)) 343 343 cpuidle_coupled_poke(cpu); 344 344 } ··· 638 638 if (cpumask_empty(&dev->coupled_cpus)) 639 639 return 0; 640 640 641 - for_each_cpu_mask(cpu, dev->coupled_cpus) { 641 + for_each_cpu(cpu, &dev->coupled_cpus) { 642 642 other_dev = per_cpu(cpuidle_devices, cpu); 643 643 if (other_dev && other_dev->coupled) { 644 644 coupled = other_dev->coupled;
+2 -2
drivers/crypto/n2_core.c
··· 1754 1754 dev->dev.of_node->full_name); 1755 1755 return -EINVAL; 1756 1756 } 1757 - cpu_set(*id, p->sharing); 1757 + cpumask_set_cpu(*id, &p->sharing); 1758 1758 table[*id] = p; 1759 1759 } 1760 1760 return 0; ··· 1776 1776 return -ENOMEM; 1777 1777 } 1778 1778 1779 - cpus_clear(p->sharing); 1779 + cpumask_clear(&p->sharing); 1780 1780 spin_lock_init(&p->lock); 1781 1781 p->q_type = q_type; 1782 1782 INIT_LIST_HEAD(&p->jobs);
+1 -1
drivers/irqchip/irq-gic-v3.c
··· 512 512 */ 513 513 smp_wmb(); 514 514 515 - for_each_cpu_mask(cpu, *mask) { 515 + for_each_cpu(cpu, mask) { 516 516 u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; 517 517 u16 tlist; 518 518
+3 -3
drivers/irqchip/irq-mips-gic.c
··· 345 345 int i; 346 346 347 347 cpumask_and(&tmp, cpumask, cpu_online_mask); 348 - if (cpus_empty(tmp)) 348 + if (cpumask_empty(&tmp)) 349 349 return -EINVAL; 350 350 351 351 /* Assumption : cpumask refers to a single CPU */ 352 352 spin_lock_irqsave(&gic_lock, flags); 353 353 354 354 /* Re-route this IRQ */ 355 - gic_map_to_vpe(irq, first_cpu(tmp)); 355 + gic_map_to_vpe(irq, cpumask_first(&tmp)); 356 356 357 357 /* Update the pcpu_masks */ 358 358 for (i = 0; i < NR_CPUS; i++) 359 359 clear_bit(irq, pcpu_masks[i].pcpu_mask); 360 - set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 360 + set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 361 361 362 362 cpumask_copy(d->affinity, cpumask); 363 363 spin_unlock_irqrestore(&gic_lock, flags);
+2 -2
drivers/net/ethernet/tile/tilegx.c
··· 1122 1122 addr + i * sizeof(struct tile_net_comps); 1123 1123 1124 1124 /* If this is a network cpu, create an iqueue. */ 1125 - if (cpu_isset(cpu, network_cpus_map)) { 1125 + if (cpumask_test_cpu(cpu, &network_cpus_map)) { 1126 1126 order = get_order(NOTIF_RING_SIZE); 1127 1127 page = homecache_alloc_pages(GFP_KERNEL, order, cpu); 1128 1128 if (page == NULL) { ··· 1298 1298 int first_ring, ring; 1299 1299 int instance = mpipe_instance(dev); 1300 1300 struct mpipe_data *md = &mpipe_data[instance]; 1301 - int network_cpus_count = cpus_weight(network_cpus_map); 1301 + int network_cpus_count = cpumask_weight(&network_cpus_map); 1302 1302 1303 1303 if (!hash_default) { 1304 1304 netdev_err(dev, "Networking requires hash_default!\n");