Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] kernel/sched.c: whitespace cleanups

[akpm@osdl.org: additional cleanups]
Signed-off-by: Miguel Ojeda Sandonis <maxextreme@gmail.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Miguel Ojeda Sandonis and committed by
Linus Torvalds
33859f7f 62ab616d

+55 -40
+55 -40
kernel/sched.c
··· 466 466 seq_printf(seq, "domain%d %s", dcnt++, mask_str); 467 467 for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES; 468 468 itype++) { 469 - seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu", 469 + seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " 470 + "%lu", 470 471 sd->lb_cnt[itype], 471 472 sd->lb_balanced[itype], 472 473 sd->lb_failed[itype], ··· 477 476 sd->lb_nobusyq[itype], 478 477 sd->lb_nobusyg[itype]); 479 478 } 480 - seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", 479 + seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu" 480 + " %lu %lu %lu\n", 481 481 sd->alb_cnt, sd->alb_failed, sd->alb_pushed, 482 482 sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed, 483 483 sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, 484 - sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); 484 + sd->ttwu_wake_remote, sd->ttwu_move_affine, 485 + sd->ttwu_move_balance); 485 486 } 486 487 preempt_enable(); 487 488 #endif ··· 1457 1454 1458 1455 if (this_sd->flags & SD_WAKE_AFFINE) { 1459 1456 unsigned long tl = this_load; 1460 - unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu); 1457 + unsigned long tl_per_task; 1458 + 1459 + tl_per_task = cpu_avg_load_per_task(this_cpu); 1461 1460 1462 1461 /* 1463 1462 * If sync wakeup then subtract the (maximum possible) ··· 2492 2487 pwr_now /= SCHED_LOAD_SCALE; 2493 2488 2494 2489 /* Amount of load we'd subtract */ 2495 - tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power; 2490 + tmp = busiest_load_per_task * SCHED_LOAD_SCALE / 2491 + busiest->cpu_power; 2496 2492 if (max_load > tmp) 2497 2493 pwr_move += busiest->cpu_power * 2498 2494 min(busiest_load_per_task, max_load - tmp); 2499 2495 2500 2496 /* Amount of load we'd add */ 2501 - if (max_load*busiest->cpu_power < 2502 - busiest_load_per_task*SCHED_LOAD_SCALE) 2503 - tmp = max_load*busiest->cpu_power/this->cpu_power; 2497 + if (max_load * busiest->cpu_power < 2498 + busiest_load_per_task * SCHED_LOAD_SCALE) 2499 + tmp = max_load * busiest->cpu_power / this->cpu_power; 2504 2500 else 2505 - tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power; 2506 - pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp); 2501 + tmp = busiest_load_per_task * SCHED_LOAD_SCALE / 2502 + this->cpu_power; 2503 + pwr_move += this->cpu_power * 2504 + min(this_load_per_task, this_load + tmp); 2507 2505 pwr_move /= SCHED_LOAD_SCALE; 2508 2506 2509 2507 /* Move if we gain throughput */ ··· 3374 3366 /* 3375 3367 * Spinlock count overflowing soon? 3376 3368 */ 3377 - DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); 3369 + DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 3370 + PREEMPT_MASK - 10); 3378 3371 } 3379 3372 EXPORT_SYMBOL(add_preempt_count); 3380 3373 ··· 5448 5439 if (!(sd->flags & SD_LOAD_BALANCE)) { 5449 5440 printk("does not load-balance\n"); 5450 5441 if (sd->parent) 5451 - printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent"); 5442 + printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 5443 + " has parent"); 5452 5444 break; 5453 5445 } 5454 5446 5455 5447 printk("span %s\n", str); 5456 5448 5457 5449 if (!cpu_isset(cpu, sd->span)) 5458 - printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 5450 + printk(KERN_ERR "ERROR: domain->span does not contain " 5451 + "CPU%d\n", cpu); 5459 5452 if (!cpu_isset(cpu, group->cpumask)) 5460 - printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 5453 + printk(KERN_ERR "ERROR: domain->groups does not contain" 5454 + " CPU%d\n", cpu); 5461 5455 5462 5456 printk(KERN_DEBUG); 5463 5457 for (i = 0; i < level + 2; i++) ··· 5475 5463 5476 5464 if (!group->cpu_power) { 5477 5465 printk("\n"); 5478 - printk(KERN_ERR "ERROR: domain->cpu_power not set\n"); 5466 + printk(KERN_ERR "ERROR: domain->cpu_power not " 5467 + "set\n"); 5479 5468 } 5480 5469 5481 5470 if (!cpus_weight(group->cpumask)) { ··· 5499 5486 printk("\n"); 5500 5487 5501 5488 if (!cpus_equal(sd->span, groupmask)) 5502 - printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 5489 + printk(KERN_ERR "ERROR: groups don't span " 5490 + "domain->span\n"); 5503 5491 5504 5492 level++; 5505 5493 sd = sd->parent; 5494 + if (!sd) 5495 + continue; 5506 5496 5507 - if (sd) { 5508 - if (!cpus_subset(groupmask, sd->span)) 5509 - printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); 5510 - } 5497 + if (!cpus_subset(groupmask, sd->span)) 5498 + printk(KERN_ERR "ERROR: parent span is not a superset " 5499 + "of domain->span\n"); 5511 5500 5512 5501 } while (sd); 5513 5502 } ··· 5827 5812 */ 5828 5813 static void touch_cache(void *__cache, unsigned long __size) 5829 5814 { 5830 - unsigned long size = __size/sizeof(long), chunk1 = size/3, 5831 - chunk2 = 2*size/3; 5815 + unsigned long size = __size / sizeof(long); 5816 + unsigned long chunk1 = size / 3; 5817 + unsigned long chunk2 = 2 * size / 3; 5832 5818 unsigned long *cache = __cache; 5833 5819 int i; 5834 5820 ··· 5938 5922 */ 5939 5923 measure_one(cache, size, cpu1, cpu2); 5940 5924 for (i = 0; i < ITERATIONS; i++) 5941 - cost1 += measure_one(cache, size - i*1024, cpu1, cpu2); 5925 + cost1 += measure_one(cache, size - i * 1024, cpu1, cpu2); 5942 5926 5943 5927 measure_one(cache, size, cpu2, cpu1); 5944 5928 for (i = 0; i < ITERATIONS; i++) 5945 - cost1 += measure_one(cache, size - i*1024, cpu2, cpu1); 5929 + cost1 += measure_one(cache, size - i * 1024, cpu2, cpu1); 5946 5930 5947 5931 /* 5948 5932 * (We measure the non-migrating [cached] cost on both ··· 5952 5936 5953 5937 measure_one(cache, size, cpu1, cpu1); 5954 5938 for (i = 0; i < ITERATIONS; i++) 5955 - cost2 += measure_one(cache, size - i*1024, cpu1, cpu1); 5939 + cost2 += measure_one(cache, size - i * 1024, cpu1, cpu1); 5956 5940 5957 5941 measure_one(cache, size, cpu2, cpu2); 5958 5942 for (i = 0; i < ITERATIONS; i++) 5959 - cost2 += measure_one(cache, size - i*1024, cpu2, cpu2); 5943 + cost2 += measure_one(cache, size - i * 1024, cpu2, cpu2); 5960 5944 5961 5945 /* 5962 5946 * Get the per-iteration migration cost: 5963 5947 */ 5964 - do_div(cost1, 2*ITERATIONS); 5965 - do_div(cost2, 2*ITERATIONS); 5948 + do_div(cost1, 2 * ITERATIONS); 5949 + do_div(cost2, 2 * ITERATIONS); 5966 5950 5967 5951 return cost1 - cost2; 5968 5952 } ··· 6000 5984 */ 6001 5985 cache = vmalloc(max_size); 6002 5986 if (!cache) { 6003 - printk("could not vmalloc %d bytes for cache!\n", 2*max_size); 5987 + printk("could not vmalloc %d bytes for cache!\n", 2 * max_size); 6004 5988 return 1000000; /* return 1 msec on very small boxen */ 6005 5989 } 6006 5990 ··· 6025 6009 avg_fluct = (avg_fluct + fluct)/2; 6026 6010 6027 6011 if (migration_debug) 6028 - printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n", 6012 + printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): " 6013 + "(%8Ld %8Ld)\n", 6029 6014 cpu1, cpu2, size, 6030 6015 (long)cost / 1000000, 6031 6016 ((long)cost / 100000) % 10, ··· 6121 6104 -1 6122 6105 #endif 6123 6106 ); 6124 - if (system_state == SYSTEM_BOOTING) { 6125 - if (num_online_cpus() > 1) { 6126 - printk("migration_cost="); 6127 - for (distance = 0; distance <= max_distance; distance++) { 6128 - if (distance) 6129 - printk(","); 6130 - printk("%ld", (long)migration_cost[distance] / 1000); 6131 - } 6132 - printk("\n"); 6107 + if (system_state == SYSTEM_BOOTING && num_online_cpus() > 1) { 6108 + printk("migration_cost="); 6109 + for (distance = 0; distance <= max_distance; distance++) { 6110 + if (distance) 6111 + printk(","); 6112 + printk("%ld", (long)migration_cost[distance] / 1000); 6133 6113 } 6114 + printk("\n"); 6134 6115 } 6135 6116 j1 = jiffies; 6136 6117 if (migration_debug) 6137 - printk("migration: %ld seconds\n", (j1-j0)/HZ); 6118 + printk("migration: %ld seconds\n", (j1-j0) / HZ); 6138 6119 6139 6120 /* 6140 6121 * Move back to the original CPU. NUMA-Q gets confused