Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[MIPS] Kill num_online_cpus() loops.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+27 -18
+2 -2
arch/mips/kernel/gdb-stub.c
··· 769 769 /* 770 770 * acquire the CPU spinlocks 771 771 */ 772 - for (i = num_online_cpus()-1; i >= 0; i--) 772 + for_each_online_cpu(i) 773 773 if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0) 774 774 panic("kgdb: couldn't get cpulock %d\n", i); 775 775 ··· 1044 1044 1045 1045 exit_kgdb_exception: 1046 1046 /* release locks so other CPUs can go */ 1047 - for (i = num_online_cpus()-1; i >= 0; i--) 1047 + for_each_online_cpu(i) 1048 1048 __raw_spin_unlock(&kgdb_cpulock[i]); 1049 1049 spin_unlock(&kgdb_lock); 1050 1050
+21 -12
arch/mips/kernel/smp.c
··· 375 375 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 376 376 smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); 377 377 } else { 378 - int i; 379 - for (i = 0; i < num_online_cpus(); i++) 380 - if (smp_processor_id() != i) 381 - cpu_context(i, mm) = 0; 378 + cpumask_t mask = cpu_online_map; 379 + unsigned int cpu; 380 + 381 + cpu_clear(smp_processor_id(), mask); 382 + for_each_online_cpu(cpu) 383 + if (cpu_context(cpu, mm)) 384 + cpu_context(cpu, mm) = 0; 382 385 } 383 386 local_flush_tlb_mm(mm); 384 387 ··· 414 411 fd.addr2 = end; 415 412 smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); 416 413 } else { 417 - int i; 418 - for (i = 0; i < num_online_cpus(); i++) 419 - if (smp_processor_id() != i) 420 - cpu_context(i, mm) = 0; 414 + cpumask_t mask = cpu_online_map; 415 + unsigned int cpu; 416 + 417 + cpu_clear(smp_processor_id(), mask); 418 + for_each_online_cpu(cpu) 419 + if (cpu_context(cpu, mm)) 420 + cpu_context(cpu, mm) = 0; 421 421 } 422 422 local_flush_tlb_range(vma, start, end); 423 423 preempt_enable(); ··· 459 453 fd.addr1 = page; 460 454 smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); 461 455 } else { 462 - int i; 463 - for (i = 0; i < num_online_cpus(); i++) 464 - if (smp_processor_id() != i) 465 - cpu_context(i, vma->vm_mm) = 0; 456 + cpumask_t mask = cpu_online_map; 457 + unsigned int cpu; 458 + 459 + cpu_clear(smp_processor_id(), mask); 460 + for_each_online_cpu(cpu) 461 + if (cpu_context(cpu, vma->vm_mm)) 462 + cpu_context(cpu, vma->vm_mm) = 0; 466 463 } 467 464 local_flush_tlb_page(vma, page); 468 465 preempt_enable();
+2 -2
arch/mips/kernel/smtc.c
··· 1264 1264 if (cpu_has_vtag_icache) 1265 1265 flush_icache_all(); 1266 1266 /* Traverse all online CPUs (hack requires contigous range) */ 1267 - for (i = 0; i < num_online_cpus(); i++) { 1267 + for_each_online_cpu(i) { 1268 1268 /* 1269 1269 * We don't need to worry about our own CPU, nor those of 1270 1270 * CPUs who don't share our TLB. ··· 1293 1293 /* 1294 1294 * SMTC shares the TLB within VPEs and possibly across all VPEs. 1295 1295 */ 1296 - for (i = 0; i < num_online_cpus(); i++) { 1296 + for_each_online_cpu(i) { 1297 1297 if ((smtc_status & SMTC_TLB_SHARED) || 1298 1298 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) 1299 1299 cpu_context(i, mm) = asid_cache(i) = asid;
+2 -2
include/asm-mips/mmu_context.h
··· 120 120 { 121 121 int i; 122 122 123 - for (i = 0; i < num_online_cpus(); i++) 123 + for_each_online_cpu(i) 124 124 cpu_context(i, mm) = 0; 125 125 126 126 return 0; ··· 284 284 int i; 285 285 286 286 /* SMTC shares the TLB (and ASIDs) across VPEs */ 287 - for (i = 0; i < num_online_cpus(); i++) { 287 + for_each_online_cpu(i) { 288 288 if((smtc_status & SMTC_TLB_SHARED) 289 289 || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) 290 290 cpu_context(i, mm) = 0;