Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Implement accurate task and CPU time accounting

This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.

To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on

* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.

On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.

This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.

This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.

Signed-off-by: Paul Mackerras <paulus@samba.org>

+577 -17
+15
arch/powerpc/Kconfig
··· 250 250 def_bool y 251 251 depends on PPC_STD_MMU && PPC32 252 252 253 + config VIRT_CPU_ACCOUNTING 254 + bool "Deterministic task and CPU time accounting" 255 + depends on PPC64 256 + default y 257 + help 258 + Select this option to enable more accurate task and CPU time 259 + accounting. This is done by reading a CPU counter on each 260 + kernel entry and exit and on transitions within the kernel 261 + between system, softirq and hardirq state, so there is a 262 + small performance impact. This also enables accounting of 263 + stolen time on logically-partitioned systems running on 264 + IBM POWER5-based machines. 265 + 266 + If in doubt, say Y here. 267 + 253 268 config SMP 254 269 depends on PPC_STD_MMU 255 270 bool "Symmetric multi-processing support"
+3
arch/powerpc/kernel/asm-offsets.c
··· 137 137 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 138 138 DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); 139 139 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 140 + DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); 141 + DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 142 + DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 140 143 141 144 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); 142 145 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
+5 -2
arch/powerpc/kernel/entry_64.S
··· 61 61 std r12,_MSR(r1) 62 62 std r0,GPR0(r1) 63 63 std r10,GPR1(r1) 64 + ACCOUNT_CPU_USER_ENTRY(r10, r11) 64 65 std r2,GPR2(r1) 65 66 std r3,GPR3(r1) 66 67 std r4,GPR4(r1) ··· 169 168 stdcx. r0,0,r1 /* to clear the reservation */ 170 169 andi. r6,r8,MSR_PR 171 170 ld r4,_LINK(r1) 172 - beq- 1f /* only restore r13 if */ 173 - ld r13,GPR13(r1) /* returning to usermode */ 171 + beq- 1f 172 + ACCOUNT_CPU_USER_EXIT(r11, r12) 173 + ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 174 174 1: ld r2,GPR2(r1) 175 175 li r12,MSR_RI 176 176 andc r11,r10,r12 ··· 538 536 * userspace 539 537 */ 540 538 beq 1f 539 + ACCOUNT_CPU_USER_EXIT(r3, r4) 541 540 REST_GPR(13, r1) 542 541 1: 543 542 ld r3,_CTR(r1)
+9
arch/powerpc/kernel/head_64.S
··· 277 277 std r10,0(r1); /* make stack chain pointer */ \ 278 278 std r0,GPR0(r1); /* save r0 in stackframe */ \ 279 279 std r10,GPR1(r1); /* save r1 in stackframe */ \ 280 + ACCOUNT_CPU_USER_ENTRY(r9, r10); \ 280 281 std r2,GPR2(r1); /* save r2 in stackframe */ \ 281 282 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 282 283 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ ··· 845 844 ld r11,_NIP(r1) 846 845 andi. r3,r12,MSR_RI /* check if RI is set */ 847 846 beq- unrecov_fer 847 + 848 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING 849 + andi. r3,r12,MSR_PR 850 + beq 2f 851 + ACCOUNT_CPU_USER_EXIT(r3, r4) 852 + 2: 853 + #endif 854 + 848 855 ld r3,_CCR(r1) 849 856 ld r4,_LINK(r1) 850 857 ld r5,_CTR(r1)
+22 -8
arch/powerpc/kernel/irq.c
··· 369 369 return NO_IRQ; 370 370 371 371 } 372 + #endif /* CONFIG_PPC64 */ 372 373 373 374 #ifdef CONFIG_IRQSTACKS 374 375 struct thread_info *softirq_ctx[NR_CPUS]; ··· 393 392 } 394 393 } 395 394 395 + static inline void do_softirq_onstack(void) 396 + { 397 + struct thread_info *curtp, *irqtp; 398 + 399 + curtp = current_thread_info(); 400 + irqtp = softirq_ctx[smp_processor_id()]; 401 + irqtp->task = curtp->task; 402 + call_do_softirq(irqtp); 403 + irqtp->task = NULL; 404 + } 405 + 406 + #else 407 + #define do_softirq_onstack() __do_softirq() 408 + #endif /* CONFIG_IRQSTACKS */ 409 + 396 410 void do_softirq(void) 397 411 { 398 412 unsigned long flags; 399 - struct thread_info *curtp, *irqtp; 400 413 401 414 if (in_interrupt()) 402 415 return; ··· 418 403 local_irq_save(flags); 419 404 420 405 if (local_softirq_pending()) { 421 - curtp = current_thread_info(); 422 - irqtp = softirq_ctx[smp_processor_id()]; 423 - irqtp->task = curtp->task; 424 - call_do_softirq(irqtp); 425 - irqtp->task = NULL; 406 + account_system_vtime(current); 407 + local_bh_disable(); 408 + do_softirq_onstack(); 409 + account_system_vtime(current); 410 + __local_bh_enable(); 426 411 } 427 412 428 413 local_irq_restore(flags); 429 414 } 430 415 EXPORT_SYMBOL(do_softirq); 431 416 432 - #endif /* CONFIG_IRQSTACKS */ 433 - 417 + #ifdef CONFIG_PPC64 434 418 static int __init setup_noirqdistrib(char *str) 435 419 { 436 420 distribute_irqs = 0;
+6 -1
arch/powerpc/kernel/process.c
··· 45 45 #include <asm/mmu.h> 46 46 #include <asm/prom.h> 47 47 #include <asm/machdep.h> 48 + #include <asm/time.h> 48 49 #ifdef CONFIG_PPC64 49 50 #include <asm/firmware.h> 50 - #include <asm/time.h> 51 51 #endif 52 52 53 53 extern unsigned long _get_SP(void); ··· 328 328 #endif 329 329 330 330 local_irq_save(flags); 331 + 332 + account_system_vtime(current); 333 + account_process_vtime(current); 334 + calculate_steal_time(); 335 + 331 336 last = _switch(old_thread, new_thread); 332 337 333 338 local_irq_restore(flags);
+3 -1
arch/powerpc/kernel/smp.c
··· 541 541 smp_ops->take_timebase(); 542 542 543 543 if (system_state > SYSTEM_BOOTING) 544 - per_cpu(last_jiffy, cpu) = get_tb(); 544 + snapshot_timebase(); 545 545 546 546 spin_lock(&call_lock); 547 547 cpu_set(cpu, cpu_online_map); ··· 572 572 smp_ops->setup_cpu(boot_cpuid); 573 573 574 574 set_cpus_allowed(current, old_mask); 575 + 576 + snapshot_timebases(); 575 577 576 578 dump_numa_cpu_topology(); 577 579 }
+234 -2
arch/powerpc/kernel/time.c
··· 51 51 #include <linux/percpu.h> 52 52 #include <linux/rtc.h> 53 53 #include <linux/jiffies.h> 54 + #include <linux/posix-timers.h> 54 55 55 56 #include <asm/io.h> 56 57 #include <asm/processor.h> ··· 135 134 * has passed. 136 135 */ 137 136 DEFINE_PER_CPU(unsigned long, last_jiffy); 137 + 138 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING 139 + /* 140 + * Factors for converting from cputime_t (timebase ticks) to 141 + * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). 142 + * These are all stored as 0.64 fixed-point binary fractions. 143 + */ 144 + u64 __cputime_jiffies_factor; 145 + u64 __cputime_msec_factor; 146 + u64 __cputime_sec_factor; 147 + u64 __cputime_clockt_factor; 148 + 149 + static void calc_cputime_factors(void) 150 + { 151 + struct div_result res; 152 + 153 + div128_by_32(HZ, 0, tb_ticks_per_sec, &res); 154 + __cputime_jiffies_factor = res.result_low; 155 + div128_by_32(1000, 0, tb_ticks_per_sec, &res); 156 + __cputime_msec_factor = res.result_low; 157 + div128_by_32(1, 0, tb_ticks_per_sec, &res); 158 + __cputime_sec_factor = res.result_low; 159 + div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); 160 + __cputime_clockt_factor = res.result_low; 161 + } 162 + 163 + /* 164 + * Read the PURR on systems that have it, otherwise the timebase. 165 + */ 166 + static u64 read_purr(void) 167 + { 168 + if (cpu_has_feature(CPU_FTR_PURR)) 169 + return mfspr(SPRN_PURR); 170 + return mftb(); 171 + } 172 + 173 + /* 174 + * Account time for a transition between system, hard irq 175 + * or soft irq state. 176 + */ 177 + void account_system_vtime(struct task_struct *tsk) 178 + { 179 + u64 now, delta; 180 + unsigned long flags; 181 + 182 + local_irq_save(flags); 183 + now = read_purr(); 184 + delta = now - get_paca()->startpurr; 185 + get_paca()->startpurr = now; 186 + if (!in_interrupt()) { 187 + delta += get_paca()->system_time; 188 + get_paca()->system_time = 0; 189 + } 190 + account_system_time(tsk, 0, delta); 191 + local_irq_restore(flags); 192 + } 193 + 194 + /* 195 + * Transfer the user and system times accumulated in the paca 196 + * by the exception entry and exit code to the generic process 197 + * user and system time records. 198 + * Must be called with interrupts disabled. 199 + */ 200 + void account_process_vtime(struct task_struct *tsk) 201 + { 202 + cputime_t utime; 203 + 204 + utime = get_paca()->user_time; 205 + get_paca()->user_time = 0; 206 + account_user_time(tsk, utime); 207 + } 208 + 209 + static void account_process_time(struct pt_regs *regs) 210 + { 211 + int cpu = smp_processor_id(); 212 + 213 + account_process_vtime(current); 214 + run_local_timers(); 215 + if (rcu_pending(cpu)) 216 + rcu_check_callbacks(cpu, user_mode(regs)); 217 + scheduler_tick(); 218 + run_posix_cpu_timers(current); 219 + } 220 + 221 + #ifdef CONFIG_PPC_SPLPAR 222 + /* 223 + * Stuff for accounting stolen time. 224 + */ 225 + struct cpu_purr_data { 226 + int initialized; /* thread is running */ 227 + u64 tb0; /* timebase at origin time */ 228 + u64 purr0; /* PURR at origin time */ 229 + u64 tb; /* last TB value read */ 230 + u64 purr; /* last PURR value read */ 231 + u64 stolen; /* stolen time so far */ 232 + spinlock_t lock; 233 + }; 234 + 235 + static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); 236 + 237 + static void snapshot_tb_and_purr(void *data) 238 + { 239 + struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); 240 + 241 + p->tb0 = mftb(); 242 + p->purr0 = mfspr(SPRN_PURR); 243 + p->tb = p->tb0; 244 + p->purr = 0; 245 + wmb(); 246 + p->initialized = 1; 247 + } 248 + 249 + /* 250 + * Called during boot when all cpus have come up. 251 + */ 252 + void snapshot_timebases(void) 253 + { 254 + int cpu; 255 + 256 + if (!cpu_has_feature(CPU_FTR_PURR)) 257 + return; 258 + for_each_cpu(cpu) 259 + spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock); 260 + on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); 261 + } 262 + 263 + void calculate_steal_time(void) 264 + { 265 + u64 tb, purr, t0; 266 + s64 stolen; 267 + struct cpu_purr_data *p0, *pme, *phim; 268 + int cpu; 269 + 270 + if (!cpu_has_feature(CPU_FTR_PURR)) 271 + return; 272 + cpu = smp_processor_id(); 273 + pme = &per_cpu(cpu_purr_data, cpu); 274 + if (!pme->initialized) 275 + return; /* this can happen in early boot */ 276 + p0 = &per_cpu(cpu_purr_data, cpu & ~1); 277 + phim = &per_cpu(cpu_purr_data, cpu ^ 1); 278 + spin_lock(&p0->lock); 279 + tb = mftb(); 280 + purr = mfspr(SPRN_PURR) - pme->purr0; 281 + if (!phim->initialized || !cpu_online(cpu ^ 1)) { 282 + stolen = (tb - pme->tb) - (purr - pme->purr); 283 + } else { 284 + t0 = pme->tb0; 285 + if (phim->tb0 < t0) 286 + t0 = phim->tb0; 287 + stolen = phim->tb - t0 - phim->purr - purr - p0->stolen; 288 + } 289 + if (stolen > 0) { 290 + account_steal_time(current, stolen); 291 + p0->stolen += stolen; 292 + } 293 + pme->tb = tb; 294 + pme->purr = purr; 295 + spin_unlock(&p0->lock); 296 + } 297 + 298 + /* 299 + * Must be called before the cpu is added to the online map when 300 + * a cpu is being brought up at runtime. 301 + */ 302 + static void snapshot_purr(void) 303 + { 304 + int cpu; 305 + u64 purr; 306 + struct cpu_purr_data *p0, *pme, *phim; 307 + unsigned long flags; 308 + 309 + if (!cpu_has_feature(CPU_FTR_PURR)) 310 + return; 311 + cpu = smp_processor_id(); 312 + pme = &per_cpu(cpu_purr_data, cpu); 313 + p0 = &per_cpu(cpu_purr_data, cpu & ~1); 314 + phim = &per_cpu(cpu_purr_data, cpu ^ 1); 315 + spin_lock_irqsave(&p0->lock, flags); 316 + pme->tb = pme->tb0 = mftb(); 317 + purr = mfspr(SPRN_PURR); 318 + if (!phim->initialized) { 319 + pme->purr = 0; 320 + pme->purr0 = purr; 321 + } else { 322 + /* set p->purr and p->purr0 for no change in p0->stolen */ 323 + pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen; 324 + pme->purr0 = purr - pme->purr; 325 + } 326 + pme->initialized = 1; 327 + spin_unlock_irqrestore(&p0->lock, flags); 328 + } 329 + 330 + #endif /* CONFIG_PPC_SPLPAR */ 331 + 332 + #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ 333 + #define calc_cputime_factors() 334 + #define account_process_time(regs) update_process_times(user_mode(regs)) 335 + #define calculate_steal_time() do { } while (0) 336 + #endif 337 + 338 + #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) 339 + #define snapshot_purr() do { } while (0) 340 + #endif 341 + 342 + /* 343 + * Called when a cpu comes up after the system has finished booting, 344 + * i.e. as a result of a hotplug cpu action. 345 + */ 346 + void snapshot_timebase(void) 347 + { 348 + __get_cpu_var(last_jiffy) = get_tb(); 349 + snapshot_purr(); 350 + } 138 351 139 352 void __delay(unsigned long loops) 140 353 { ··· 597 382 new_tb_ticks_per_jiffy, sign, tick_diff ); 598 383 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 599 384 tb_ticks_per_sec = new_tb_ticks_per_sec; 385 + calc_cputime_factors(); 600 386 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 601 387 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 602 388 tb_to_xs = divres.result_low; ··· 646 430 irq_enter(); 647 431 648 432 profile_tick(CPU_PROFILING, regs); 433 + calculate_steal_time(); 649 434 650 435 #ifdef CONFIG_PPC_ISERIES 651 436 get_lppaca()->int_dword.fields.decr_int = 0; ··· 668 451 * is the case. 669 452 */ 670 453 if (!cpu_is_offline(cpu)) 671 - update_process_times(user_mode(regs)); 454 + account_process_time(regs); 672 455 673 456 /* 674 457 * No need to check whether cpu is offline here; boot_cpuid ··· 725 508 void __init smp_space_timers(unsigned int max_cpus) 726 509 { 727 510 int i; 511 + unsigned long half = tb_ticks_per_jiffy / 2; 728 512 unsigned long offset = tb_ticks_per_jiffy / max_cpus; 729 513 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); 730 514 731 515 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 732 516 previous_tb -= tb_ticks_per_jiffy; 517 + /* 518 + * The stolen time calculation for POWER5 shared-processor LPAR 519 + * systems works better if the two threads' timebase interrupts 520 + * are staggered by half a jiffy with respect to each other. 521 + */ 733 522 for_each_cpu(i) { 734 - if (i != boot_cpuid) { 523 + if (i == boot_cpuid) 524 + continue; 525 + if (i == (boot_cpuid ^ 1)) 526 + per_cpu(last_jiffy, i) = 527 + per_cpu(last_jiffy, boot_cpuid) - half; 528 + else if (i & 1) 529 + per_cpu(last_jiffy, i) = 530 + per_cpu(last_jiffy, i ^ 1) + half; 531 + else { 735 532 previous_tb += offset; 736 533 per_cpu(last_jiffy, i) = previous_tb; 737 534 } ··· 937 706 tb_ticks_per_sec = ppc_tb_freq; 938 707 tb_ticks_per_usec = ppc_tb_freq / 1000000; 939 708 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 709 + calc_cputime_factors(); 940 710 941 711 /* 942 712 * Calculate the length of each tick in ns. It will not be
+3 -1
include/asm-powerpc/cputable.h
··· 117 117 #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) 118 118 #define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000) 119 119 #define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000) 120 + #define CPU_FTR_PURR ASM_CONST(0x0000400000000000) 120 121 #else 121 122 /* ensure on 32b processors the flags are available for compiling but 122 123 * don't do anything */ ··· 133 132 #define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0) 134 133 #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0) 135 134 #define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0) 135 + #define CPU_FTR_PURR ASM_CONST(0x0) 136 136 #endif 137 137 138 138 #ifndef __ASSEMBLY__ ··· 318 316 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | 319 317 CPU_FTR_MMCRA | CPU_FTR_SMT | 320 318 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | 321 - CPU_FTR_MMCRA_SIHV, 319 + CPU_FTR_MMCRA_SIHV | CPU_FTR_PURR, 322 320 CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 323 321 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | 324 322 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT |
+202
include/asm-powerpc/cputime.h
··· 1 + /* 2 + * Definitions for measuring cputime on powerpc machines. 3 + * 4 + * Copyright (C) 2006 Paul Mackerras, IBM Corp. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + * 11 + * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in 12 + * the same units as the timebase. Otherwise we measure cpu time 13 + * in jiffies using the generic definitions. 14 + */ 15 + 16 + #ifndef __POWERPC_CPUTIME_H 17 + #define __POWERPC_CPUTIME_H 18 + 19 + #ifndef CONFIG_VIRT_CPU_ACCOUNTING 1 20 #include <asm-generic/cputime.h> 21 + #else 22 + 23 + #include <linux/types.h> 24 + #include <linux/time.h> 25 + #include <asm/div64.h> 26 + #include <asm/time.h> 27 + #include <asm/param.h> 28 + 29 + typedef u64 cputime_t; 30 + typedef u64 cputime64_t; 31 + 32 + #define cputime_zero ((cputime_t)0) 33 + #define cputime_max ((~((cputime_t)0) >> 1) - 1) 34 + #define cputime_add(__a, __b) ((__a) + (__b)) 35 + #define cputime_sub(__a, __b) ((__a) - (__b)) 36 + #define cputime_div(__a, __n) ((__a) / (__n)) 37 + #define cputime_halve(__a) ((__a) >> 1) 38 + #define cputime_eq(__a, __b) ((__a) == (__b)) 39 + #define cputime_gt(__a, __b) ((__a) > (__b)) 40 + #define cputime_ge(__a, __b) ((__a) >= (__b)) 41 + #define cputime_lt(__a, __b) ((__a) < (__b)) 42 + #define cputime_le(__a, __b) ((__a) <= (__b)) 43 + 44 + #define cputime64_zero ((cputime64_t)0) 45 + #define cputime64_add(__a, __b) ((__a) + (__b)) 46 + #define cputime_to_cputime64(__ct) (__ct) 47 + 48 + #ifdef __KERNEL__ 49 + 50 + /* 51 + * Convert cputime <-> jiffies 52 + */ 53 + extern u64 __cputime_jiffies_factor; 54 + 55 + static inline unsigned long cputime_to_jiffies(const cputime_t ct) 56 + { 57 + return mulhdu(ct, __cputime_jiffies_factor); 58 + } 59 + 60 + static inline cputime_t jiffies_to_cputime(const unsigned long jif) 61 + { 62 + cputime_t ct; 63 + unsigned long sec; 64 + 65 + /* have to be a little careful about overflow */ 66 + ct = jif % HZ; 67 + sec = jif / HZ; 68 + if (ct) { 69 + ct *= tb_ticks_per_sec; 70 + do_div(ct, HZ); 71 + } 72 + if (sec) 73 + ct += (cputime_t) sec * tb_ticks_per_sec; 74 + return ct; 75 + } 76 + 77 + static inline u64 cputime64_to_jiffies64(const cputime_t ct) 78 + { 79 + return mulhdu(ct, __cputime_jiffies_factor); 80 + } 81 + 82 + /* 83 + * Convert cputime <-> milliseconds 84 + */ 85 + extern u64 __cputime_msec_factor; 86 + 87 + static inline unsigned long cputime_to_msecs(const cputime_t ct) 88 + { 89 + return mulhdu(ct, __cputime_msec_factor); 90 + } 91 + 92 + static inline cputime_t msecs_to_cputime(const unsigned long ms) 93 + { 94 + cputime_t ct; 95 + unsigned long sec; 96 + 97 + /* have to be a little careful about overflow */ 98 + ct = ms % 1000; 99 + sec = ms / 1000; 100 + if (ct) { 101 + ct *= tb_ticks_per_sec; 102 + do_div(ct, 1000); 103 + } 104 + if (sec) 105 + ct += (cputime_t) sec * tb_ticks_per_sec; 106 + return ct; 107 + } 108 + 109 + /* 110 + * Convert cputime <-> seconds 111 + */ 112 + extern u64 __cputime_sec_factor; 113 + 114 + static inline unsigned long cputime_to_secs(const cputime_t ct) 115 + { 116 + return mulhdu(ct, __cputime_sec_factor); 117 + } 118 + 119 + static inline cputime_t secs_to_cputime(const unsigned long sec) 120 + { 121 + return (cputime_t) sec * tb_ticks_per_sec; 122 + } 123 + 124 + /* 125 + * Convert cputime <-> timespec 126 + */ 127 + static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) 128 + { 129 + u64 x = ct; 130 + unsigned int frac; 131 + 132 + frac = do_div(x, tb_ticks_per_sec); 133 + p->tv_sec = x; 134 + x = (u64) frac * 1000000000; 135 + do_div(x, tb_ticks_per_sec); 136 + p->tv_nsec = x; 137 + } 138 + 139 + static inline cputime_t timespec_to_cputime(const struct timespec *p) 140 + { 141 + cputime_t ct; 142 + 143 + ct = (u64) p->tv_nsec * tb_ticks_per_sec; 144 + do_div(ct, 1000000000); 145 + return ct + (u64) p->tv_sec * tb_ticks_per_sec; 146 + } 147 + 148 + /* 149 + * Convert cputime <-> timeval 150 + */ 151 + static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) 152 + { 153 + u64 x = ct; 154 + unsigned int frac; 155 + 156 + frac = do_div(x, tb_ticks_per_sec); 157 + p->tv_sec = x; 158 + x = (u64) frac * 1000000; 159 + do_div(x, tb_ticks_per_sec); 160 + p->tv_usec = x; 161 + } 162 + 163 + static inline cputime_t timeval_to_cputime(const struct timeval *p) 164 + { 165 + cputime_t ct; 166 + 167 + ct = (u64) p->tv_usec * tb_ticks_per_sec; 168 + do_div(ct, 1000000); 169 + return ct + (u64) p->tv_sec * tb_ticks_per_sec; 170 + } 171 + 172 + /* 173 + * Convert cputime <-> clock_t (units of 1/USER_HZ seconds) 174 + */ 175 + extern u64 __cputime_clockt_factor; 176 + 177 + static inline unsigned long cputime_to_clock_t(const cputime_t ct) 178 + { 179 + return mulhdu(ct, __cputime_clockt_factor); 180 + } 181 + 182 + static inline cputime_t clock_t_to_cputime(const unsigned long clk) 183 + { 184 + cputime_t ct; 185 + unsigned long sec; 186 + 187 + /* have to be a little careful about overflow */ 188 + ct = clk % USER_HZ; 189 + sec = clk / USER_HZ; 190 + if (ct) { 191 + ct *= tb_ticks_per_sec; 192 + do_div(ct, USER_HZ); 193 + } 194 + if (sec) 195 + ct += (cputime_t) sec * tb_ticks_per_sec; 196 + return ct; 197 + } 198 + 199 + #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) 200 + 201 + #endif /* __KERNEL__ */ 202 + #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 203 + #endif /* __POWERPC_CPUTIME_H */
+4 -2
include/asm-powerpc/irq.h
··· 479 479 struct irqaction; 480 480 struct pt_regs; 481 481 482 + #define __ARCH_HAS_DO_SOFTIRQ 483 + 484 + extern void __do_softirq(void); 485 + 482 486 #ifdef CONFIG_IRQSTACKS 483 487 /* 484 488 * Per-cpu stacks for handling hard and soft interrupts. ··· 494 490 extern void call_do_softirq(struct thread_info *tp); 495 491 extern int call___do_IRQ(int irq, struct pt_regs *regs, 496 492 struct thread_info *tp); 497 - 498 - #define __ARCH_HAS_DO_SOFTIRQ 499 493 500 494 #else 501 495 #define irq_ctx_init()
+5
include/asm-powerpc/paca.h
··· 96 96 u64 saved_r1; /* r1 save for RTAS calls */ 97 97 u64 saved_msr; /* MSR saved here by enter_rtas */ 98 98 u8 proc_enabled; /* irq soft-enable flag */ 99 + 100 + /* Stuff for accurate time accounting */ 101 + u64 user_time; /* accumulated usermode TB ticks */ 102 + u64 system_time; /* accumulated system TB ticks */ 103 + u64 startpurr; /* PURR/TB value snapshot */ 99 104 }; 100 105 101 106 extern struct paca_struct paca[];
+42
include/asm-powerpc/ppc_asm.h
··· 15 15 #define SZL (BITS_PER_LONG/8) 16 16 17 17 /* 18 + * Stuff for accurate CPU time accounting. 19 + * These macros handle transitions between user and system state 20 + * in exception entry and exit and accumulate time to the 21 + * user_time and system_time fields in the paca. 22 + */ 23 + 24 + #ifndef CONFIG_VIRT_CPU_ACCOUNTING 25 + #define ACCOUNT_CPU_USER_ENTRY(ra, rb) 26 + #define ACCOUNT_CPU_USER_EXIT(ra, rb) 27 + #else 28 + #define ACCOUNT_CPU_USER_ENTRY(ra, rb) \ 29 + beq 2f; /* if from kernel mode */ \ 30 + BEGIN_FTR_SECTION; \ 31 + mfspr ra,SPRN_PURR; /* get processor util. reg */ \ 32 + END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ 33 + BEGIN_FTR_SECTION; \ 34 + mftb ra; /* or get TB if no PURR */ \ 35 + END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ 36 + ld rb,PACA_STARTPURR(r13); \ 37 + std ra,PACA_STARTPURR(r13); \ 38 + subf rb,rb,ra; /* subtract start value */ \ 39 + ld ra,PACA_USER_TIME(r13); \ 40 + add ra,ra,rb; /* add on to user time */ \ 41 + std ra,PACA_USER_TIME(r13); \ 42 + 2: 43 + 44 + #define ACCOUNT_CPU_USER_EXIT(ra, rb) \ 45 + BEGIN_FTR_SECTION; \ 46 + mfspr ra,SPRN_PURR; /* get processor util. reg */ \ 47 + END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ 48 + BEGIN_FTR_SECTION; \ 49 + mftb ra; /* or get TB if no PURR */ \ 50 + END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ 51 + ld rb,PACA_STARTPURR(r13); \ 52 + std ra,PACA_STARTPURR(r13); \ 53 + subf rb,rb,ra; /* subtract start value */ \ 54 + ld ra,PACA_SYSTEM_TIME(r13); \ 55 + add ra,ra,rb; /* add on to user time */ \ 56 + std ra,PACA_SYSTEM_TIME(r13); 57 + #endif 58 + 59 + /* 18 60 * Macros for storing registers into and loading registers from 19 61 * exception frames. 20 62 */
+4
include/asm-powerpc/system.h
··· 424 424 create_branch(addr, func_addr, BRANCH_SET_LINK); 425 425 } 426 426 427 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING 428 + extern void account_system_vtime(struct task_struct *); 429 + #endif 430 + 427 431 #endif /* __KERNEL__ */ 428 432 #endif /* _ASM_POWERPC_SYSTEM_H */
+15
include/asm-powerpc/time.h
··· 41 41 42 42 extern void generic_calibrate_decr(void); 43 43 extern void wakeup_decrementer(void); 44 + extern void snapshot_timebase(void); 44 45 45 46 /* Some sane defaults: 125 MHz timebase, 1GHz processor */ 46 47 extern unsigned long ppc_proc_freq; ··· 221 220 }; 222 221 223 222 DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); 223 + 224 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING 225 + extern void account_process_vtime(struct task_struct *tsk); 226 + #else 227 + #define account_process_vtime(tsk) do { } while (0) 228 + #endif 229 + 230 + #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) 231 + extern void calculate_steal_time(void); 232 + extern void snapshot_timebases(void); 233 + #else 234 + #define calculate_steal_time() do { } while (0) 235 + #define snapshot_timebases() do { } while (0) 236 + #endif 224 237 225 238 #endif /* __KERNEL__ */ 226 239 #endif /* __PPC64_TIME_H */
+5
include/asm-ppc/time.h
··· 153 153 ({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;}) 154 154 155 155 unsigned mulhwu_scale_factor(unsigned, unsigned); 156 + 157 + #define account_process_vtime(tsk) do { } while (0) 158 + #define calculate_steal_time() do { } while (0) 159 + #define snapshot_timebases() do { } while (0) 160 + 156 161 #endif /* __ASM_TIME_H__ */ 157 162 #endif /* __KERNEL__ */