Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc32: provide VIRT_CPU_ACCOUNTING

This patch provides VIRT_CPU_ACCOUTING to PPC32 architecture.
PPC32 doesn't have the PACA structure, so we use the task_info
structure to store the accounting data.

In order to reuse on PPC32 the PPC64 functions, all u64 data has
been replaced by 'unsigned long' so that it is u32 on PPC32 and
u64 on PPC64

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>

authored by

Christophe Leroy and committed by
Scott Wood
c223c903 1afbf617

+158 -67
+1
arch/powerpc/Kconfig
··· 165 165 select ARCH_HAS_UBSAN_SANITIZE_ALL 166 166 select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT 167 167 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS 168 + select HAVE_VIRT_CPU_ACCOUNTING 168 169 169 170 config GENERIC_CSUM 170 171 def_bool CPU_LITTLE_ENDIAN
+24
arch/powerpc/include/asm/accounting.h
··· 1 + /* 2 + * Common time accounting prototypes and such for all ppc machines. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation; either version 7 + * 2 of the License, or (at your option) any later version. 8 + */ 9 + 10 + #ifndef __POWERPC_ACCOUNTING_H 11 + #define __POWERPC_ACCOUNTING_H 12 + 13 + /* Stuff for accurate time accounting */ 14 + struct cpu_accounting_data { 15 + unsigned long user_time; /* accumulated usermode TB ticks */ 16 + unsigned long system_time; /* accumulated system TB ticks */ 17 + unsigned long user_time_scaled; /* accumulated usermode SPURR ticks */ 18 + unsigned long starttime; /* TB value snapshot */ 19 + unsigned long starttime_user; /* TB value on exit to usermode */ 20 + unsigned long startspurr; /* SPURR value snapshot */ 21 + unsigned long utime_sspurr; /* ->user_time when ->startspurr set */ 22 + }; 23 + 24 + #endif
+11 -3
arch/powerpc/include/asm/cputime.h
··· 90 90 static inline cputime64_t jiffies64_to_cputime64(const u64 jif) 91 91 { 92 92 u64 ct; 93 - u64 sec; 93 + u64 sec = jif; 94 94 95 95 /* have to be a little careful about overflow */ 96 - ct = jif % HZ; 97 - sec = jif / HZ; 96 + ct = do_div(sec, HZ); 98 97 if (ct) { 99 98 ct *= tb_ticks_per_sec; 100 99 do_div(ct, HZ); ··· 229 230 230 231 #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) 231 232 233 + /* 234 + * PPC64 uses PACA which is task independent for storing accounting data while 235 + * PPC32 uses struct thread_info, therefore at task switch the accounting data 236 + * has to be populated in the new task 237 + */ 238 + #ifdef CONFIG_PPC64 232 239 static inline void arch_vtime_task_switch(struct task_struct *tsk) { } 240 + #else 241 + void arch_vtime_task_switch(struct task_struct *tsk); 242 + #endif 233 243 234 244 #endif /* __KERNEL__ */ 235 245 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+1 -1
arch/powerpc/include/asm/exception-64s.h
··· 287 287 std r0,GPR0(r1); /* save r0 in stackframe */ \ 288 288 std r10,GPR1(r1); /* save r1 in stackframe */ \ 289 289 beq 4f; /* if from kernel mode */ \ 290 - ACCOUNT_CPU_USER_ENTRY(r9, r10); \ 290 + ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \ 291 291 SAVE_PPR(area, r9, r10); \ 292 292 4: EXCEPTION_PROLOG_COMMON_2(area) \ 293 293 EXCEPTION_PROLOG_COMMON_3(n) \
+2 -7
arch/powerpc/include/asm/paca.h
··· 25 25 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 26 26 #include <asm/kvm_book3s_asm.h> 27 27 #endif 28 + #include <asm/accounting.h> 28 29 29 30 register struct paca_struct *local_paca asm("r13"); 30 31 ··· 185 184 #endif 186 185 187 186 /* Stuff for accurate time accounting */ 188 - u64 user_time; /* accumulated usermode TB ticks */ 189 - u64 system_time; /* accumulated system TB ticks */ 190 - u64 user_time_scaled; /* accumulated usermode SPURR ticks */ 191 - u64 starttime; /* TB value snapshot */ 192 - u64 starttime_user; /* TB value on exit to usermode */ 193 - u64 startspurr; /* SPURR value snapshot */ 194 - u64 utime_sspurr; /* ->user_time when ->startspurr set */ 187 + struct cpu_accounting_data accounting; 195 188 u64 stolen_time; /* TB ticks taken by hypervisor */ 196 189 u64 dtl_ridx; /* read index in dispatch log */ 197 190 struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
+12 -12
arch/powerpc/include/asm/ppc_asm.h
··· 24 24 */ 25 25 26 26 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 27 - #define ACCOUNT_CPU_USER_ENTRY(ra, rb) 28 - #define ACCOUNT_CPU_USER_EXIT(ra, rb) 27 + #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) 28 + #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) 29 29 #define ACCOUNT_STOLEN_TIME 30 30 #else 31 - #define ACCOUNT_CPU_USER_ENTRY(ra, rb) \ 31 + #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \ 32 32 MFTB(ra); /* get timebase */ \ 33 - ld rb,PACA_STARTTIME_USER(r13); \ 34 - std ra,PACA_STARTTIME(r13); \ 33 + PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \ 34 + PPC_STL ra, ACCOUNT_STARTTIME(ptr); \ 35 35 subf rb,rb,ra; /* subtract start value */ \ 36 - ld ra,PACA_USER_TIME(r13); \ 36 + PPC_LL ra, ACCOUNT_USER_TIME(ptr); \ 37 37 add ra,ra,rb; /* add on to user time */ \ 38 - std ra,PACA_USER_TIME(r13); \ 38 + PPC_STL ra, ACCOUNT_USER_TIME(ptr); \ 39 39 40 - #define ACCOUNT_CPU_USER_EXIT(ra, rb) \ 40 + #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \ 41 41 MFTB(ra); /* get timebase */ \ 42 - ld rb,PACA_STARTTIME(r13); \ 43 - std ra,PACA_STARTTIME_USER(r13); \ 42 + PPC_LL rb, ACCOUNT_STARTTIME(ptr); \ 43 + PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \ 44 44 subf rb,rb,ra; /* subtract start value */ \ 45 - ld ra,PACA_SYSTEM_TIME(r13); \ 45 + PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \ 46 46 add ra,ra,rb; /* add on to system time */ \ 47 - std ra,PACA_SYSTEM_TIME(r13) 47 + PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr) 48 48 49 49 #ifdef CONFIG_PPC_SPLPAR 50 50 #define ACCOUNT_STOLEN_TIME \
+1
arch/powerpc/include/asm/reg.h
··· 1294 1294 asm volatile("mfspr %0, %1" : "=r" (rval) : \ 1295 1295 "i" (SPRN_TBRU)); rval;}) 1296 1296 #endif 1297 + #define mftb() mftbl() 1297 1298 #endif /* !__powerpc64__ */ 1298 1299 1299 1300 #define mttbl(v) asm volatile("mttbl %0":: "r"(v))
+4
arch/powerpc/include/asm/thread_info.h
··· 33 33 #include <asm/processor.h> 34 34 #include <asm/page.h> 35 35 #include <linux/stringify.h> 36 + #include <asm/accounting.h> 36 37 37 38 /* 38 39 * low level task data. ··· 46 45 unsigned long local_flags; /* private flags for thread */ 47 46 #ifdef CONFIG_LIVEPATCH 48 47 unsigned long *livepatch_sp; 48 + #endif 49 + #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC32) 50 + struct cpu_accounting_data accounting; 49 51 #endif 50 52 /* low level flags - has atomic operations done on it */ 51 53 unsigned long flags ____cacheline_aligned_in_smp;
+19 -4
arch/powerpc/kernel/asm-offsets.c
··· 240 240 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 241 241 DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); 242 242 DEFINE(PACA_DSCR_DEFAULT, offsetof(struct paca_struct, dscr_default)); 243 - DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime)); 244 - DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user)); 245 - DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 246 - DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 243 + DEFINE(ACCOUNT_STARTTIME, 244 + offsetof(struct paca_struct, accounting.starttime)); 245 + DEFINE(ACCOUNT_STARTTIME_USER, 246 + offsetof(struct paca_struct, accounting.starttime_user)); 247 + DEFINE(ACCOUNT_USER_TIME, 248 + offsetof(struct paca_struct, accounting.user_time)); 249 + DEFINE(ACCOUNT_SYSTEM_TIME, 250 + offsetof(struct paca_struct, accounting.system_time)); 247 251 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 248 252 DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); 249 253 DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso)); 254 + #else /* CONFIG_PPC64 */ 255 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 256 + DEFINE(ACCOUNT_STARTTIME, 257 + offsetof(struct thread_info, accounting.starttime)); 258 + DEFINE(ACCOUNT_STARTTIME_USER, 259 + offsetof(struct thread_info, accounting.starttime_user)); 260 + DEFINE(ACCOUNT_USER_TIME, 261 + offsetof(struct thread_info, accounting.user_time)); 262 + DEFINE(ACCOUNT_SYSTEM_TIME, 263 + offsetof(struct thread_info, accounting.system_time)); 264 + #endif 250 265 #endif /* CONFIG_PPC64 */ 251 266 252 267 /* RTAS */
+17
arch/powerpc/kernel/entry_32.S
··· 175 175 addi r12,r12,-1 176 176 stw r12,4(r11) 177 177 #endif 178 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 179 + CURRENT_THREAD_INFO(r9, r1) 180 + tophys(r9, r9) 181 + ACCOUNT_CPU_USER_ENTRY(r9, r11, r12) 182 + #endif 183 + 178 184 b 3f 179 185 180 186 2: /* if from kernel, check interrupted DOZE/NAP mode and ··· 404 398 lwarx r7,0,r1 405 399 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) 406 400 stwcx. r0,0,r1 /* to clear the reservation */ 401 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 402 + andi. r4,r8,MSR_PR 403 + beq 3f 404 + CURRENT_THREAD_INFO(r4, r1) 405 + ACCOUNT_CPU_USER_EXIT(r4, r5, r7) 406 + 3: 407 + #endif 407 408 lwz r4,_LINK(r1) 408 409 lwz r5,_CCR(r1) 409 410 mtlr r4 ··· 781 768 lwz r0,THREAD+THREAD_DBCR0(r2) 782 769 andis. r10,r0,DBCR0_IDM@h 783 770 bnel- load_dbcr0 771 + #endif 772 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 773 + CURRENT_THREAD_INFO(r9, r1) 774 + ACCOUNT_CPU_USER_EXIT(r9, r10, r11) 784 775 #endif 785 776 786 777 b restore
+3 -3
arch/powerpc/kernel/entry_64.S
··· 72 72 std r0,GPR0(r1) 73 73 std r10,GPR1(r1) 74 74 beq 2f /* if from kernel mode */ 75 - ACCOUNT_CPU_USER_ENTRY(r10, r11) 75 + ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) 76 76 2: std r2,GPR2(r1) 77 77 std r3,GPR3(r1) 78 78 mfcr r2 ··· 246 246 ld r4,_LINK(r1) 247 247 248 248 beq- 1f 249 - ACCOUNT_CPU_USER_EXIT(r11, r12) 249 + ACCOUNT_CPU_USER_EXIT(r13, r11, r12) 250 250 251 251 BEGIN_FTR_SECTION 252 252 HMT_MEDIUM_LOW ··· 859 859 BEGIN_FTR_SECTION 860 860 mtspr SPRN_PPR,r2 /* Restore PPR */ 861 861 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 862 - ACCOUNT_CPU_USER_EXIT(r2, r4) 862 + ACCOUNT_CPU_USER_EXIT(r13, r2, r4) 863 863 REST_GPR(13, r1) 864 864 1: 865 865 mtspr SPRN_SRR1,r3
+2 -2
arch/powerpc/kernel/exceptions-64e.S
··· 386 386 std r10,_NIP(r1); /* save SRR0 to stackframe */ \ 387 387 std r11,_MSR(r1); /* save SRR1 to stackframe */ \ 388 388 beq 2f; /* if from kernel mode */ \ 389 - ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \ 389 + ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \ 390 390 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \ 391 391 ld r4,excf+EX_R11(r13); /* get back r11 */ \ 392 392 mfspr r5,scratch; /* get back r13 */ \ ··· 1059 1059 andi. r6,r10,MSR_PR 1060 1060 REST_2GPRS(6, r1) 1061 1061 beq 1f 1062 - ACCOUNT_CPU_USER_EXIT(r10, r11) 1062 + ACCOUNT_CPU_USER_EXIT(r13, r10, r11) 1063 1063 ld r0,GPR13(r1) 1064 1064 1065 1065 1: stdcx. r0,0,r1 /* to clear the reservation */
+54 -27
arch/powerpc/kernel/time.c
··· 167 167 168 168 cputime_t cputime_one_jiffy; 169 169 170 + #ifdef CONFIG_PPC_SPLPAR 170 171 void (*dtl_consumer)(struct dtl_entry *, u64); 172 + #endif 173 + 174 + #ifdef CONFIG_PPC64 175 + #define get_accounting(tsk) (&get_paca()->accounting) 176 + #else 177 + #define get_accounting(tsk) (&task_thread_info(tsk)->accounting) 178 + #endif 171 179 172 180 static void calc_cputime_factors(void) 173 181 { ··· 195 187 * Read the SPURR on systems that have it, otherwise the PURR, 196 188 * or if that doesn't exist return the timebase value passed in. 197 189 */ 198 - static u64 read_spurr(u64 tb) 190 + static unsigned long read_spurr(unsigned long tb) 199 191 { 200 192 if (cpu_has_feature(CPU_FTR_SPURR)) 201 193 return mfspr(SPRN_SPURR); ··· 258 250 void accumulate_stolen_time(void) 259 251 { 260 252 u64 sst, ust; 261 - 262 253 u8 save_soft_enabled = local_paca->soft_enabled; 254 + struct cpu_accounting_data *acct = &local_paca->accounting; 263 255 264 256 /* We are called early in the exception entry, before 265 257 * soft/hard_enabled are sync'ed to the expected state ··· 269 261 */ 270 262 local_paca->soft_enabled = 0; 271 263 272 - sst = scan_dispatch_log(local_paca->starttime_user); 273 - ust = scan_dispatch_log(local_paca->starttime); 274 - local_paca->system_time -= sst; 275 - local_paca->user_time -= ust; 264 + sst = scan_dispatch_log(acct->starttime_user); 265 + ust = scan_dispatch_log(acct->starttime); 266 + acct->system_time -= sst; 267 + acct->user_time -= ust; 276 268 local_paca->stolen_time += ust + sst; 277 269 278 270 local_paca->soft_enabled = save_soft_enabled; ··· 284 276 285 277 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) { 286 278 stolen = scan_dispatch_log(stop_tb); 287 - get_paca()->system_time -= stolen; 279 + get_paca()->accounting.system_time -= stolen; 288 280 } 289 281 290 282 stolen += get_paca()->stolen_time; ··· 304 296 * Account time for a transition between system, hard irq 305 297 * or soft irq state. 306 298 */ 307 - static u64 vtime_delta(struct task_struct *tsk, 308 - u64 *sys_scaled, u64 *stolen) 299 + static unsigned long vtime_delta(struct task_struct *tsk, 300 + unsigned long *sys_scaled, 301 + unsigned long *stolen) 309 302 { 310 - u64 now, nowscaled, deltascaled; 311 - u64 udelta, delta, user_scaled; 303 + unsigned long now, nowscaled, deltascaled; 304 + unsigned long udelta, delta, user_scaled; 305 + struct cpu_accounting_data *acct = get_accounting(tsk); 312 306 313 307 WARN_ON_ONCE(!irqs_disabled()); 314 308 315 309 now = mftb(); 316 310 nowscaled = read_spurr(now); 317 - get_paca()->system_time += now - get_paca()->starttime; 318 - get_paca()->starttime = now; 319 - deltascaled = nowscaled - get_paca()->startspurr; 320 - get_paca()->startspurr = nowscaled; 311 + acct->system_time += now - acct->starttime; 312 + acct->starttime = now; 313 + deltascaled = nowscaled - acct->startspurr; 314 + acct->startspurr = nowscaled; 321 315 322 316 *stolen = calculate_stolen_time(now); 323 317 324 - delta = get_paca()->system_time; 325 - get_paca()->system_time = 0; 326 - udelta = get_paca()->user_time - get_paca()->utime_sspurr; 327 - get_paca()->utime_sspurr = get_paca()->user_time; 318 + delta = acct->system_time; 319 + acct->system_time = 0; 320 + udelta = acct->user_time - acct->utime_sspurr; 321 + acct->utime_sspurr = acct->user_time; 328 322 329 323 /* 330 324 * Because we don't read the SPURR on every kernel entry/exit, ··· 348 338 *sys_scaled = deltascaled; 349 339 } 350 340 } 351 - get_paca()->user_time_scaled += user_scaled; 341 + acct->user_time_scaled += user_scaled; 352 342 353 343 return delta; 354 344 } 355 345 356 346 void vtime_account_system(struct task_struct *tsk) 357 347 { 358 - u64 delta, sys_scaled, stolen; 348 + unsigned long delta, sys_scaled, stolen; 359 349 360 350 delta = vtime_delta(tsk, &sys_scaled, &stolen); 361 351 account_system_time(tsk, 0, delta, sys_scaled); ··· 366 356 367 357 void vtime_account_idle(struct task_struct *tsk) 368 358 { 369 - u64 delta, sys_scaled, stolen; 359 + unsigned long delta, sys_scaled, stolen; 370 360 371 361 delta = vtime_delta(tsk, &sys_scaled, &stolen); 372 362 account_idle_time(delta + stolen); ··· 384 374 void vtime_account_user(struct task_struct *tsk) 385 375 { 386 376 cputime_t utime, utimescaled; 377 + struct cpu_accounting_data *acct = get_accounting(tsk); 387 378 388 - utime = get_paca()->user_time; 389 - utimescaled = get_paca()->user_time_scaled; 390 - get_paca()->user_time = 0; 391 - get_paca()->user_time_scaled = 0; 392 - get_paca()->utime_sspurr = 0; 379 + utime = acct->user_time; 380 + utimescaled = acct->user_time_scaled; 381 + acct->user_time = 0; 382 + acct->user_time_scaled = 0; 383 + acct->utime_sspurr = 0; 393 384 account_user_time(tsk, utime, utimescaled); 394 385 } 386 + 387 + #ifdef CONFIG_PPC32 388 + /* 389 + * Called from the context switch with interrupts disabled, to charge all 390 + * accumulated times to the current process, and to prepare accounting on 391 + * the next process. 392 + */ 393 + void arch_vtime_task_switch(struct task_struct *prev) 394 + { 395 + struct cpu_accounting_data *acct = get_accounting(current); 396 + 397 + acct->starttime = get_accounting(prev)->starttime; 398 + acct->system_time = 0; 399 + acct->user_time = 0; 400 + } 401 + #endif /* CONFIG_PPC32 */ 395 402 396 403 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 397 404 #define calc_cputime_factors()
-1
arch/powerpc/platforms/Kconfig.cputype
··· 1 1 config PPC64 2 2 bool "64-bit kernel" 3 3 default n 4 - select HAVE_VIRT_CPU_ACCOUNTING 5 4 select ZLIB_DEFLATE 6 5 help 7 6 This option selects whether a 32-bit or a 64-bit kernel
+7 -7
arch/powerpc/xmon/xmon.c
··· 2213 2213 DUMP(p, subcore_sibling_mask, "x"); 2214 2214 #endif 2215 2215 2216 - DUMP(p, user_time, "llx"); 2217 - DUMP(p, system_time, "llx"); 2218 - DUMP(p, user_time_scaled, "llx"); 2219 - DUMP(p, starttime, "llx"); 2220 - DUMP(p, starttime_user, "llx"); 2221 - DUMP(p, startspurr, "llx"); 2222 - DUMP(p, utime_sspurr, "llx"); 2216 + DUMP(p, accounting.user_time, "llx"); 2217 + DUMP(p, accounting.system_time, "llx"); 2218 + DUMP(p, accounting.user_time_scaled, "llx"); 2219 + DUMP(p, accounting.starttime, "llx"); 2220 + DUMP(p, accounting.starttime_user, "llx"); 2221 + DUMP(p, accounting.startspurr, "llx"); 2222 + DUMP(p, accounting.utime_sspurr, "llx"); 2223 2223 DUMP(p, stolen_time, "llx"); 2224 2224 #undef DUMP 2225 2225