Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] more for_each_cpu() conversions

When we stop allocating percpu memory for not-possible CPUs we must not touch
the percpu data for not-possible CPUs at all. The correct way of doing this
is to test cpu_possible() or to use for_each_cpu().

This patch is a kernel-wide sweep of all instances of NR_CPUS. I found very
few instances of this bug, if any. But the patch converts lots of open-coded
test to use the preferred helper macros.

Cc: Mikael Starvik <starvik@axis.com>
Cc: David Howells <dhowells@redhat.com>
Acked-by: Kyle McMartin <kyle@parisc-linux.org>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Christian Zankel <chris@zankel.net>
Cc: Philippe Elie <phil.el@wanadoo.fr>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Jens Axboe <axboe@suse.de>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Andrew Morton and committed by
Linus Torvalds
394e3902 63872f87

+137 -222
+4 -6
arch/cris/kernel/irq.c
··· 52 52 53 53 if (i == 0) { 54 54 seq_printf(p, " "); 55 - for (j=0; j<NR_CPUS; j++) 56 - if (cpu_online(j)) 57 - seq_printf(p, "CPU%d ",j); 55 + for_each_online_cpu(j) 56 + seq_printf(p, "CPU%d ",j); 58 57 seq_putc(p, '\n'); 59 58 } 60 59 ··· 66 67 #ifndef CONFIG_SMP 67 68 seq_printf(p, "%10u ", kstat_irqs(i)); 68 69 #else 69 - for (j = 0; j < NR_CPUS; j++) 70 - if (cpu_online(j)) 71 - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 70 + for_each_online_cpu(j) 71 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 72 72 #endif 73 73 seq_printf(p, " %14s", irq_desc[i].handler->typename); 74 74 seq_printf(p, " %s", action->name);
+4 -6
arch/frv/kernel/irq.c
··· 75 75 switch (i) { 76 76 case 0: 77 77 seq_printf(p, " "); 78 - for (j = 0; j < NR_CPUS; j++) 79 - if (cpu_online(j)) 80 - seq_printf(p, "CPU%d ",j); 78 + for_each_online_cpu(j) 79 + seq_printf(p, "CPU%d ",j); 81 80 82 81 seq_putc(p, '\n'); 83 82 break; ··· 99 100 #ifndef CONFIG_SMP 100 101 seq_printf(p, "%10u ", kstat_irqs(i)); 101 102 #else 102 - for (j = 0; j < NR_CPUS; j++) 103 - if (cpu_online(j)) 104 - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); 103 + for_each_online_cpu(j) 104 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); 105 105 #endif 106 106 107 107 level = group->sources[ix]->level - frv_irq_levels;
+1 -3
arch/i386/kernel/cpu/cpufreq/powernow-k8.c
··· 1145 1145 { 1146 1146 unsigned int i, supported_cpus = 0; 1147 1147 1148 - for (i=0; i<NR_CPUS; i++) { 1149 - if (!cpu_online(i)) 1150 - continue; 1148 + for_each_cpu(i) { 1151 1149 if (check_supported_cpu(i)) 1152 1150 supported_cpus++; 1153 1151 }
+9 -13
arch/i386/kernel/io_apic.c
··· 351 351 { 352 352 int i, j; 353 353 Dprintk("Rotating IRQs among CPUs.\n"); 354 - for (i = 0; i < NR_CPUS; i++) { 355 - for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { 354 + for_each_online_cpu(i) { 355 + for (j = 0; j < NR_IRQS; j++) { 356 356 if (!irq_desc[j].action) 357 357 continue; 358 358 /* Is it a significant load ? */ ··· 381 381 unsigned long imbalance = 0; 382 382 cpumask_t allowed_mask, target_cpu_mask, tmp; 383 383 384 - for (i = 0; i < NR_CPUS; i++) { 384 + for_each_cpu(i) { 385 385 int package_index; 386 386 CPU_IRQ(i) = 0; 387 387 if (!cpu_online(i)) ··· 422 422 } 423 423 } 424 424 /* Find the least loaded processor package */ 425 - for (i = 0; i < NR_CPUS; i++) { 426 - if (!cpu_online(i)) 427 - continue; 425 + for_each_online_cpu(i) { 428 426 if (i != CPU_TO_PACKAGEINDEX(i)) 429 427 continue; 430 428 if (min_cpu_irq > CPU_IRQ(i)) { ··· 439 441 */ 440 442 tmp_cpu_irq = 0; 441 443 tmp_loaded = -1; 442 - for (i = 0; i < NR_CPUS; i++) { 443 - if (!cpu_online(i)) 444 - continue; 444 + for_each_online_cpu(i) { 445 445 if (i != CPU_TO_PACKAGEINDEX(i)) 446 446 continue; 447 447 if (max_cpu_irq <= CPU_IRQ(i)) ··· 615 619 if (smp_num_siblings > 1 && !cpus_empty(tmp)) 616 620 physical_balance = 1; 617 621 618 - for (i = 0; i < NR_CPUS; i++) { 619 - if (!cpu_online(i)) 620 - continue; 622 + for_each_online_cpu(i) { 621 623 irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 622 624 irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 623 625 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { ··· 632 638 else 633 639 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); 634 640 failed: 635 - for (i = 0; i < NR_CPUS; i++) { 641 + for_each_cpu(i) { 636 642 kfree(irq_cpu_data[i].irq_delta); 643 + irq_cpu_data[i].irq_delta = NULL; 637 644 kfree(irq_cpu_data[i].last_irq); 645 + irq_cpu_data[i].last_irq = NULL; 638 646 } 639 647 return 0; 640 648 }
+2 -2
arch/i386/kernel/nmi.c
··· 143 143 local_irq_enable(); 144 144 mdelay((10*1000)/nmi_hz); // wait 10 ticks 145 145 146 - for (cpu = 0; cpu < NR_CPUS; cpu++) { 146 + for_each_cpu(cpu) { 147 147 #ifdef CONFIG_SMP 148 148 /* Check cpu_callin_map here because that is set 149 149 after the timer is started. */ ··· 510 510 * Just reset the alert counters, (other CPUs might be 511 511 * spinning on locks we hold): 512 512 */ 513 - for (i = 0; i < NR_CPUS; i++) 513 + for_each_cpu(i) 514 514 alert_counter[i] = 0; 515 515 516 516 /*
+2 -5
arch/i386/oprofile/nmi_int.c
··· 122 122 static void free_msrs(void) 123 123 { 124 124 int i; 125 - for (i = 0; i < NR_CPUS; ++i) { 125 + for_each_cpu(i) { 126 126 kfree(cpu_msrs[i].counters); 127 127 cpu_msrs[i].counters = NULL; 128 128 kfree(cpu_msrs[i].controls); ··· 138 138 size_t counters_size = sizeof(struct op_msr) * model->num_counters; 139 139 140 140 int i; 141 - for (i = 0; i < NR_CPUS; ++i) { 142 - if (!cpu_online(i)) 143 - continue; 144 - 141 + for_each_online_cpu(i) { 145 142 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); 146 143 if (!cpu_msrs[i].counters) { 147 144 success = 0;
+4 -6
arch/m32r/kernel/irq.c
··· 37 37 38 38 if (i == 0) { 39 39 seq_printf(p, " "); 40 - for (j=0; j<NR_CPUS; j++) 41 - if (cpu_online(j)) 42 - seq_printf(p, "CPU%d ",j); 40 + for_each_online_cpu(j) 41 + seq_printf(p, "CPU%d ",j); 43 42 seq_putc(p, '\n'); 44 43 } 45 44 ··· 51 52 #ifndef CONFIG_SMP 52 53 seq_printf(p, "%10u ", kstat_irqs(i)); 53 54 #else 54 - for (j = 0; j < NR_CPUS; j++) 55 - if (cpu_online(j)) 56 - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 55 + for_each_online_cpu(j) 56 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 57 57 #endif 58 58 seq_printf(p, " %14s", irq_desc[i].handler->typename); 59 59 seq_printf(p, " %s", action->name);
+4 -6
arch/mips/kernel/irq.c
··· 68 68 69 69 if (i == 0) { 70 70 seq_printf(p, " "); 71 - for (j=0; j<NR_CPUS; j++) 72 - if (cpu_online(j)) 73 - seq_printf(p, "CPU%d ",j); 71 + for_each_online_cpu(j) 72 + seq_printf(p, "CPU%d ",j); 74 73 seq_putc(p, '\n'); 75 74 } 76 75 ··· 82 83 #ifndef CONFIG_SMP 83 84 seq_printf(p, "%10u ", kstat_irqs(i)); 84 85 #else 85 - for (j = 0; j < NR_CPUS; j++) 86 - if (cpu_online(j)) 87 - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 86 + for_each_online_cpu(j) 87 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 88 88 #endif 89 89 seq_printf(p, " %14s", irq_desc[i].handler->typename); 90 90 seq_printf(p, " %s", action->name);
+2 -2
arch/mips/kernel/smp.c
··· 167 167 mb(); 168 168 169 169 /* Send a message to all other CPUs and wait for them to respond */ 170 - for (i = 0; i < NR_CPUS; i++) 171 - if (cpu_online(i) && i != cpu) 170 + for_each_online_cpu(i) 171 + if (i != cpu) 172 172 core_send_ipi(i, SMP_CALL_FUNCTION); 173 173 174 174 /* Wait for response */
+1 -4
arch/mips/sgi-ip27/ip27-irq.c
··· 88 88 { 89 89 int cpu, i; 90 90 91 - for (cpu = 0; cpu <= NR_CPUS; cpu++) { 91 + for_each_online_cpu(cpu) { 92 92 struct slice_data *si = cpu_data[cpu].data; 93 - 94 - if (!cpu_online(cpu)) 95 - continue; 96 93 97 94 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) 98 95 if (si->level_to_irq[i] == irq) {
+10 -15
arch/parisc/kernel/smp.c
··· 298 298 { 299 299 int i; 300 300 301 - for (i = 0; i < NR_CPUS; i++) { 302 - if (cpu_online(i) && i != smp_processor_id()) 301 + for_each_online_cpu(i) { 302 + if (i != smp_processor_id()) 303 303 send_IPI_single(i, op); 304 304 } 305 305 } ··· 643 643 if ( argc == 1 ){ 644 644 645 645 #ifdef DUMP_MORE_STATE 646 - for(i=0; i<NR_CPUS; i++) { 646 + for_each_online_cpu(i) { 647 647 int cpus_per_line = 4; 648 - if(cpu_online(i)) { 649 - if (j++ % cpus_per_line) 650 - printk(" %3d",i); 651 - else 652 - printk("\n %3d",i); 653 - } 648 + 649 + if (j++ % cpus_per_line) 650 + printk(" %3d",i); 651 + else 652 + printk("\n %3d",i); 654 653 } 655 654 printk("\n"); 656 655 #else ··· 658 659 } else if((argc==2) && !(strcmp(argv[1],"-l"))) { 659 660 printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n"); 660 661 #ifdef DUMP_MORE_STATE 661 - for(i=0;i<NR_CPUS;i++) { 662 - if (!cpu_online(i)) 663 - continue; 662 + for_each_online_cpu(i) { 664 663 if (cpu_data[i].cpuid != NO_PROC_ID) { 665 664 switch(cpu_data[i].state) { 666 665 case STATE_RENDEZVOUS: ··· 692 695 } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { 693 696 #ifdef DUMP_MORE_STATE 694 697 printk("\nCPUSTATE CPUID\n"); 695 - for (i=0;i<NR_CPUS;i++) { 696 - if (!cpu_online(i)) 697 - continue; 698 + for_each_online_cpu(i) { 698 699 if (cpu_data[i].cpuid != NO_PROC_ID) { 699 700 switch(cpu_data[i].state) { 700 701 case STATE_RENDEZVOUS:
+2 -3
arch/powerpc/kernel/irq.c
··· 135 135 #ifdef CONFIG_TAU_INT 136 136 if (tau_initialized){ 137 137 seq_puts(p, "TAU: "); 138 - for (j = 0; j < NR_CPUS; j++) 139 - if (cpu_online(j)) 140 - seq_printf(p, "%10u ", tau_interrupts(j)); 138 + for_each_online_cpu(j) 139 + seq_printf(p, "%10u ", tau_interrupts(j)); 141 140 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 142 141 } 143 142 #endif
+2 -3
arch/powerpc/kernel/setup-common.c
··· 162 162 #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) 163 163 unsigned long bogosum = 0; 164 164 int i; 165 - for (i = 0; i < NR_CPUS; ++i) 166 - if (cpu_online(i)) 167 - bogosum += loops_per_jiffy; 165 + for_each_online_cpu(i) 166 + bogosum += loops_per_jiffy; 168 167 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 169 168 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 170 169 #endif /* CONFIG_SMP && CONFIG_PPC32 */
+2 -3
arch/powerpc/kernel/setup_32.c
··· 272 272 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); 273 273 274 274 /* register CPU devices */ 275 - for (i = 0; i < NR_CPUS; i++) 276 - if (cpu_possible(i)) 277 - register_cpu(&cpu_devices[i], i, NULL); 275 + for_each_cpu(i) 276 + register_cpu(&cpu_devices[i], i, NULL); 278 277 279 278 /* call platform init */ 280 279 if (ppc_md.init != NULL) {
+1 -3
arch/powerpc/platforms/powermac/smp.c
··· 191 191 if (num_online_cpus() < 2) 192 192 return; 193 193 194 - for (i = 0; i < NR_CPUS; i++) { 195 - if (!cpu_online(i)) 196 - continue; 194 + for_each_online_cpu(i) { 197 195 if (target == MSG_ALL 198 196 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) 199 197 || target == i) {
+4 -6
arch/ppc/kernel/setup.c
··· 168 168 /* Show summary information */ 169 169 #ifdef CONFIG_SMP 170 170 unsigned long bogosum = 0; 171 - for (i = 0; i < NR_CPUS; ++i) 172 - if (cpu_online(i)) 173 - bogosum += cpu_data[i].loops_per_jiffy; 171 + for_each_online_cpu(i) 172 + bogosum += cpu_data[i].loops_per_jiffy; 174 173 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 175 174 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 176 175 #endif /* CONFIG_SMP */ ··· 711 712 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); 712 713 713 714 /* register CPU devices */ 714 - for (i = 0; i < NR_CPUS; i++) 715 - if (cpu_possible(i)) 716 - register_cpu(&cpu_devices[i], i, NULL); 715 + for_each_cpu(i) 716 + register_cpu(&cpu_devices[i], i, NULL); 717 717 718 718 /* call platform init */ 719 719 if (ppc_md.init != NULL) {
+1 -3
arch/s390/kernel/smp.c
··· 799 799 */ 800 800 print_cpu_info(&S390_lowcore.cpu_data); 801 801 802 - for(i = 0; i < NR_CPUS; i++) { 803 - if (!cpu_possible(i)) 804 - continue; 802 + for_each_cpu(i) { 805 803 lowcore_ptr[i] = (struct _lowcore *) 806 804 __get_free_pages(GFP_KERNEL|GFP_DMA, 807 805 sizeof(void*) == 8 ? 1 : 0);
+2 -3
arch/sh/kernel/irq.c
··· 35 35 36 36 if (i == 0) { 37 37 seq_puts(p, " "); 38 - for (j=0; j<NR_CPUS; j++) 39 - if (cpu_online(j)) 40 - seq_printf(p, "CPU%d ",j); 38 + for_each_online_cpu(j) 39 + seq_printf(p, "CPU%d ",j); 41 40 seq_putc(p, '\n'); 42 41 } 43 42
+2 -3
arch/sh/kernel/setup.c
··· 404 404 { 405 405 int cpu_id; 406 406 407 - for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) 408 - if (cpu_possible(cpu_id)) 409 - register_cpu(&cpu[cpu_id], cpu_id, NULL); 407 + for_each_cpu(cpu_id) 408 + register_cpu(&cpu[cpu_id], cpu_id, NULL); 410 409 411 410 return 0; 412 411 }
+2 -3
arch/sh64/kernel/irq.c
··· 53 53 54 54 if (i == 0) { 55 55 seq_puts(p, " "); 56 - for (j=0; j<NR_CPUS; j++) 57 - if (cpu_online(j)) 58 - seq_printf(p, "CPU%d ",j); 56 + for_each_online_cpu(j) 57 + seq_printf(p, "CPU%d ",j); 59 58 seq_putc(p, '\n'); 60 59 } 61 60
+2 -3
arch/sparc/kernel/irq.c
··· 184 184 #ifndef CONFIG_SMP 185 185 seq_printf(p, "%10u ", kstat_irqs(i)); 186 186 #else 187 - for (j = 0; j < NR_CPUS; j++) { 188 - if (cpu_online(j)) 189 - seq_printf(p, "%10u ", 187 + for_each_online_cpu(j) { 188 + seq_printf(p, "%10u ", 190 189 kstat_cpu(cpu_logical_map(j)).irqs[i]); 191 190 } 192 191 #endif
+10 -14
arch/sparc/kernel/smp.c
··· 243 243 return -EINVAL; 244 244 245 245 spin_lock_irqsave(&prof_setup_lock, flags); 246 - for(i = 0; i < NR_CPUS; i++) { 247 - if (cpu_possible(i)) 248 - load_profile_irq(i, lvl14_resolution / multiplier); 246 + for_each_cpu(i) { 247 + load_profile_irq(i, lvl14_resolution / multiplier); 249 248 prof_multiplier(i) = multiplier; 250 249 } 251 250 spin_unlock_irqrestore(&prof_setup_lock, flags); ··· 272 273 { 273 274 int i; 274 275 275 - for (i = 0; i < NR_CPUS; i++) { 276 - if (cpu_online(i)) 277 - seq_printf(m, 278 - "Cpu%dBogo\t: %lu.%02lu\n", 279 - i, 280 - cpu_data(i).udelay_val/(500000/HZ), 281 - (cpu_data(i).udelay_val/(5000/HZ))%100); 276 + for_each_online_cpu(i) { 277 + seq_printf(m, 278 + "Cpu%dBogo\t: %lu.%02lu\n", 279 + i, 280 + cpu_data(i).udelay_val/(500000/HZ), 281 + (cpu_data(i).udelay_val/(5000/HZ))%100); 282 282 } 283 283 } 284 284 ··· 286 288 int i; 287 289 288 290 seq_printf(m, "State:\n"); 289 - for (i = 0; i < NR_CPUS; i++) { 290 - if (cpu_online(i)) 291 - seq_printf(m, "CPU%d\t\t: online\n", i); 292 - } 291 + for_each_online_cpu(i) 292 + seq_printf(m, "CPU%d\t\t: online\n", i); 293 293 }
+3 -5
arch/sparc/kernel/sun4d_irq.c
··· 103 103 #ifndef CONFIG_SMP 104 104 seq_printf(p, "%10u ", kstat_irqs(i)); 105 105 #else 106 - for (x = 0; x < NR_CPUS; x++) { 107 - if (cpu_online(x)) 108 - seq_printf(p, "%10u ", 109 - kstat_cpu(cpu_logical_map(x)).irqs[i]); 110 - } 106 + for_each_online_cpu(x) 107 + seq_printf(p, "%10u ", 108 + kstat_cpu(cpu_logical_map(x)).irqs[i]); 111 109 #endif 112 110 seq_printf(p, "%c %s", 113 111 (action->flags & SA_INTERRUPT) ? '+' : ' ',
+3 -5
arch/sparc/kernel/sun4d_smp.c
··· 249 249 } else { 250 250 unsigned long bogosum = 0; 251 251 252 - for(i = 0; i < NR_CPUS; i++) { 253 - if (cpu_isset(i, cpu_present_map)) { 254 - bogosum += cpu_data(i).udelay_val; 255 - smp_highest_cpu = i; 256 - } 252 + for_each_present_cpu(i) { 253 + bogosum += cpu_data(i).udelay_val; 254 + smp_highest_cpu = i; 257 255 } 258 256 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); 259 257 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
+2 -4
arch/sparc/kernel/sun4m_smp.c
··· 218 218 cpu_present_map = cpumask_of_cpu(smp_processor_id()); 219 219 } else { 220 220 unsigned long bogosum = 0; 221 - for(i = 0; i < NR_CPUS; i++) { 222 - if (cpu_isset(i, cpu_present_map)) 223 - bogosum += cpu_data(i).udelay_val; 224 - } 221 + for_each_present_cpu(i) 222 + bogosum += cpu_data(i).udelay_val; 225 223 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", 226 224 cpucount + 1, 227 225 bogosum/(500000/HZ),
+1 -3
arch/sparc64/kernel/irq.c
··· 117 117 #ifndef CONFIG_SMP 118 118 seq_printf(p, "%10u ", kstat_irqs(i)); 119 119 #else 120 - for (j = 0; j < NR_CPUS; j++) { 121 - if (!cpu_online(j)) 122 - continue; 120 + for_each_online_cpu(j) { 123 121 seq_printf(p, "%10u ", 124 122 kstat_cpu(j).irqs[i]); 125 123 }
+12 -18
arch/sparc64/kernel/smp.c
··· 57 57 int i; 58 58 59 59 seq_printf(m, "State:\n"); 60 - for (i = 0; i < NR_CPUS; i++) { 61 - if (cpu_online(i)) 62 - seq_printf(m, 63 - "CPU%d:\t\tonline\n", i); 64 - } 60 + for_each_online_cpu(i) 61 + seq_printf(m, "CPU%d:\t\tonline\n", i); 65 62 } 66 63 67 64 void smp_bogo(struct seq_file *m) 68 65 { 69 66 int i; 70 67 71 - for (i = 0; i < NR_CPUS; i++) 72 - if (cpu_online(i)) 73 - seq_printf(m, 74 - "Cpu%dBogo\t: %lu.%02lu\n" 75 - "Cpu%dClkTck\t: %016lx\n", 76 - i, cpu_data(i).udelay_val / (500000/HZ), 77 - (cpu_data(i).udelay_val / (5000/HZ)) % 100, 78 - i, cpu_data(i).clock_tick); 68 + for_each_online_cpu(i) 69 + seq_printf(m, 70 + "Cpu%dBogo\t: %lu.%02lu\n" 71 + "Cpu%dClkTck\t: %016lx\n", 72 + i, cpu_data(i).udelay_val / (500000/HZ), 73 + (cpu_data(i).udelay_val / (5000/HZ)) % 100, 74 + i, cpu_data(i).clock_tick); 79 75 } 80 76 81 77 void __init smp_store_cpu_info(int id) ··· 1278 1282 return -EINVAL; 1279 1283 1280 1284 spin_lock_irqsave(&prof_setup_lock, flags); 1281 - for (i = 0; i < NR_CPUS; i++) 1285 + for_each_cpu(i) 1282 1286 prof_multiplier(i) = multiplier; 1283 1287 current_tick_offset = (timer_tick_offset / multiplier); 1284 1288 spin_unlock_irqrestore(&prof_setup_lock, flags); ··· 1380 1384 unsigned long bogosum = 0; 1381 1385 int i; 1382 1386 1383 - for (i = 0; i < NR_CPUS; i++) { 1384 - if (cpu_online(i)) 1385 - bogosum += cpu_data(i).udelay_val; 1386 - } 1387 + for_each_online_cpu(i) 1388 + bogosum += cpu_data(i).udelay_val; 1387 1389 printk("Total of %ld processors activated " 1388 1390 "(%lu.%02lu BogoMIPS).\n", 1389 1391 (long) num_online_cpus(),
+8 -13
arch/x86_64/kernel/irq.c
··· 38 38 39 39 if (i == 0) { 40 40 seq_printf(p, " "); 41 - for (j=0; j<NR_CPUS; j++) 42 - if (cpu_online(j)) 43 - seq_printf(p, "CPU%d ",j); 41 + for_each_online_cpu(j) 42 + seq_printf(p, "CPU%d ",j); 44 43 seq_putc(p, '\n'); 45 44 } 46 45 ··· 52 53 #ifndef CONFIG_SMP 53 54 seq_printf(p, "%10u ", kstat_irqs(i)); 54 55 #else 55 - for (j=0; j<NR_CPUS; j++) 56 - if (cpu_online(j)) 57 - seq_printf(p, "%10u ", 58 - kstat_cpu(j).irqs[i]); 56 + for_each_online_cpu(j) 57 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 59 58 #endif 60 59 seq_printf(p, " %14s", irq_desc[i].handler->typename); 61 60 ··· 65 68 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 66 69 } else if (i == NR_IRQS) { 67 70 seq_printf(p, "NMI: "); 68 - for (j = 0; j < NR_CPUS; j++) 69 - if (cpu_online(j)) 70 - seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); 71 + for_each_online_cpu(j) 72 + seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); 71 73 seq_putc(p, '\n'); 72 74 #ifdef CONFIG_X86_LOCAL_APIC 73 75 seq_printf(p, "LOC: "); 74 - for (j = 0; j < NR_CPUS; j++) 75 - if (cpu_online(j)) 76 - seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); 76 + for_each_online_cpu(j) 77 + seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); 77 78 seq_putc(p, '\n'); 78 79 #endif 79 80 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+1 -3
arch/x86_64/kernel/nmi.c
··· 162 162 local_irq_enable(); 163 163 mdelay((10*1000)/nmi_hz); // wait 10 ticks 164 164 165 - for (cpu = 0; cpu < NR_CPUS; cpu++) { 166 - if (!cpu_online(cpu)) 167 - continue; 165 + for_each_online_cpu(cpu) { 168 166 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { 169 167 endflag = 1; 170 168 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
+6 -9
arch/xtensa/kernel/irq.c
··· 83 83 84 84 if (i == 0) { 85 85 seq_printf(p, " "); 86 - for (j=0; j<NR_CPUS; j++) 87 - if (cpu_online(j)) 88 - seq_printf(p, "CPU%d ",j); 86 + for_each_online_cpu(j) 87 + seq_printf(p, "CPU%d ",j); 89 88 seq_putc(p, '\n'); 90 89 } 91 90 ··· 97 98 #ifndef CONFIG_SMP 98 99 seq_printf(p, "%10u ", kstat_irqs(i)); 99 100 #else 100 - for (j = 0; j < NR_CPUS; j++) 101 - if (cpu_online(j)) 102 - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 101 + for_each_online_cpu(j) 102 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 103 103 #endif 104 104 seq_printf(p, " %14s", irq_desc[i].handler->typename); 105 105 seq_printf(p, " %s", action->name); ··· 111 113 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 112 114 } else if (i == NR_IRQS) { 113 115 seq_printf(p, "NMI: "); 114 - for (j = 0; j < NR_CPUS; j++) 115 - if (cpu_online(j)) 116 - seq_printf(p, "%10u ", nmi_count(j)); 116 + for_each_online_cpu(j) 117 + seq_printf(p, "%10u ", nmi_count(j)); 117 118 seq_putc(p, '\n'); 118 119 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 119 120 }
+1 -3
drivers/net/loopback.c
··· 172 172 173 173 memset(stats, 0, sizeof(struct net_device_stats)); 174 174 175 - for (i=0; i < NR_CPUS; i++) { 175 + for_each_cpu(i) { 176 176 struct net_device_stats *lb_stats; 177 177 178 - if (!cpu_possible(i)) 179 - continue; 180 178 lb_stats = &per_cpu(loopback_stats, i); 181 179 stats->rx_bytes += lb_stats->rx_bytes; 182 180 stats->tx_bytes += lb_stats->tx_bytes;
+1 -2
drivers/oprofile/cpu_buffer.c
··· 38 38 { 39 39 int i; 40 40 41 - for_each_online_cpu(i) { 41 + for_each_online_cpu(i) 42 42 vfree(cpu_buffer[i].buffer); 43 - } 44 43 } 45 44 46 45 int alloc_cpu_buffers(void)
+2 -5
fs/xfs/linux-2.6/xfs_stats.c
··· 62 62 while (j < xstats[i].endpoint) { 63 63 val = 0; 64 64 /* sum over all cpus */ 65 - for (c = 0; c < NR_CPUS; c++) { 66 - if (!cpu_possible(c)) continue; 65 + for_each_cpu(c) 67 66 val += *(((__u32*)&per_cpu(xfsstats, c) + j)); 68 - } 69 67 len += sprintf(buffer + len, " %u", val); 70 68 j++; 71 69 } 72 70 buffer[len++] = '\n'; 73 71 } 74 72 /* extra precision counters */ 75 - for (i = 0; i < NR_CPUS; i++) { 76 - if (!cpu_possible(i)) continue; 73 + for_each_cpu(i) { 77 74 xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; 78 75 xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; 79 76 xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
+1 -2
fs/xfs/linux-2.6/xfs_sysctl.c
··· 38 38 39 39 if (!ret && write && *valp) { 40 40 printk("XFS Clearing xfsstats\n"); 41 - for (c = 0; c < NR_CPUS; c++) { 42 - if (!cpu_possible(c)) continue; 41 + for_each_cpu(c) { 43 42 preempt_disable(); 44 43 /* save vn_active, it's a universal truth! */ 45 44 vn_active = per_cpu(xfsstats, c).vn_active;
+2 -3
include/asm-alpha/mmu_context.h
··· 231 231 { 232 232 int i; 233 233 234 - for (i = 0; i < NR_CPUS; i++) 235 - if (cpu_online(i)) 236 - mm->context[i] = 0; 234 + for_each_online_cpu(i) 235 + mm->context[i] = 0; 237 236 if (tsk != current) 238 237 task_thread_info(tsk)->pcb.ptbr 239 238 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+2 -2
include/asm-alpha/topology.h
··· 27 27 cpumask_t node_cpu_mask = CPU_MASK_NONE; 28 28 int cpu; 29 29 30 - for(cpu = 0; cpu < NR_CPUS; cpu++) { 31 - if (cpu_online(cpu) && (cpu_to_node(cpu) == node)) 30 + for_each_online_cpu(cpu) { 31 + if (cpu_to_node(cpu) == node) 32 32 cpu_set(cpu, node_cpu_mask); 33 33 } 34 34
+3 -4
include/asm-generic/percpu.h
··· 19 19 #define percpu_modcopy(pcpudst, src, size) \ 20 20 do { \ 21 21 unsigned int __i; \ 22 - for (__i = 0; __i < NR_CPUS; __i++) \ 23 - if (cpu_possible(__i)) \ 24 - memcpy((pcpudst)+__per_cpu_offset[__i], \ 25 - (src), (size)); \ 22 + for_each_cpu(__i) \ 23 + memcpy((pcpudst)+__per_cpu_offset[__i], \ 24 + (src), (size)); \ 26 25 } while (0) 27 26 #else /* ! SMP */ 28 27
+3 -4
include/asm-powerpc/percpu.h
··· 27 27 #define percpu_modcopy(pcpudst, src, size) \ 28 28 do { \ 29 29 unsigned int __i; \ 30 - for (__i = 0; __i < NR_CPUS; __i++) \ 31 - if (cpu_possible(__i)) \ 32 - memcpy((pcpudst)+__per_cpu_offset(__i), \ 33 - (src), (size)); \ 30 + for_each_cpu(__i) \ 31 + memcpy((pcpudst)+__per_cpu_offset(__i), \ 32 + (src), (size)); \ 34 33 } while (0) 35 34 36 35 extern void setup_per_cpu_areas(void);
+3 -4
include/asm-s390/percpu.h
··· 46 46 #define percpu_modcopy(pcpudst, src, size) \ 47 47 do { \ 48 48 unsigned int __i; \ 49 - for (__i = 0; __i < NR_CPUS; __i++) \ 50 - if (cpu_possible(__i)) \ 51 - memcpy((pcpudst)+__per_cpu_offset[__i], \ 52 - (src), (size)); \ 49 + for_each_cpu(__i) \ 50 + memcpy((pcpudst)+__per_cpu_offset[__i], \ 51 + (src), (size)); \ 53 52 } while (0) 54 53 55 54 #else /* ! SMP */
+3 -4
include/asm-sparc64/percpu.h
··· 26 26 #define percpu_modcopy(pcpudst, src, size) \ 27 27 do { \ 28 28 unsigned int __i; \ 29 - for (__i = 0; __i < NR_CPUS; __i++) \ 30 - if (cpu_possible(__i)) \ 31 - memcpy((pcpudst)+__per_cpu_offset(__i), \ 32 - (src), (size)); \ 29 + for_each_cpu(__i) \ 30 + memcpy((pcpudst)+__per_cpu_offset(__i), \ 31 + (src), (size)); \ 33 32 } while (0) 34 33 #else /* ! SMP */ 35 34
+3 -4
include/asm-x86_64/percpu.h
··· 26 26 #define percpu_modcopy(pcpudst, src, size) \ 27 27 do { \ 28 28 unsigned int __i; \ 29 - for (__i = 0; __i < NR_CPUS; __i++) \ 30 - if (cpu_possible(__i)) \ 31 - memcpy((pcpudst)+__per_cpu_offset(__i), \ 32 - (src), (size)); \ 29 + for_each_cpu(__i) \ 30 + memcpy((pcpudst)+__per_cpu_offset(__i), \ 31 + (src), (size)); \ 33 32 } while (0) 34 33 35 34 extern void setup_per_cpu_areas(void);
+4 -10
include/linux/genhd.h
··· 149 149 ({ \ 150 150 typeof(gendiskp->dkstats->field) res = 0; \ 151 151 int i; \ 152 - for (i=0; i < NR_CPUS; i++) { \ 153 - if (!cpu_possible(i)) \ 154 - continue; \ 152 + for_each_cpu(i) \ 155 153 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ 156 - } \ 157 154 res; \ 158 155 }) 159 156 160 157 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { 161 158 int i; 162 - for (i=0; i < NR_CPUS; i++) { 163 - if (cpu_possible(i)) { 164 - memset(per_cpu_ptr(gendiskp->dkstats, i), value, 165 - sizeof (struct disk_stats)); 166 - } 167 - } 159 + for_each_cpu(i) 160 + memset(per_cpu_ptr(gendiskp->dkstats, i), value, 161 + sizeof (struct disk_stats)); 168 162 } 169 163 170 164 #else