Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cputime: Use a proper subsystem naming for vtime related APIs

Use a naming based on vtime as a prefix for virtual based
cputime accounting APIs:

- account_system_vtime() -> vtime_account()
- account_switch_vtime() -> vtime_task_switch()

It makes it easier to allow for further declension such
as vtime_account_system(), vtime_account_idle(), ... if we
want to find out the context we account to from generic code.

This also make it better to know on which subsystem these APIs
refer to.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>

+27 -27
+3 -3
arch/ia64/kernel/time.c
··· 88 88 * accumulated times to the current process, and to prepare accounting on 89 89 * the next process. 90 90 */ 91 - void account_switch_vtime(struct task_struct *prev) 91 + void vtime_task_switch(struct task_struct *prev) 92 92 { 93 93 struct thread_info *pi = task_thread_info(prev); 94 94 struct thread_info *ni = task_thread_info(current); ··· 116 116 * Account time for a transition between system, hard irq or soft irq state. 117 117 * Note that this function is called with interrupts enabled. 118 118 */ 119 - void account_system_vtime(struct task_struct *tsk) 119 + void vtime_account(struct task_struct *tsk) 120 120 { 121 121 struct thread_info *ti = task_thread_info(tsk); 122 122 unsigned long flags; ··· 138 138 139 139 local_irq_restore(flags); 140 140 } 141 - EXPORT_SYMBOL_GPL(account_system_vtime); 141 + EXPORT_SYMBOL_GPL(vtime_account); 142 142 143 143 /* 144 144 * Called from the timer interrupt handler to charge accumulated user time
+5 -5
arch/powerpc/kernel/time.c
··· 291 291 * Account time for a transition between system, hard irq 292 292 * or soft irq state. 293 293 */ 294 - void account_system_vtime(struct task_struct *tsk) 294 + void vtime_account(struct task_struct *tsk) 295 295 { 296 296 u64 now, nowscaled, delta, deltascaled; 297 297 unsigned long flags; ··· 343 343 } 344 344 local_irq_restore(flags); 345 345 } 346 - EXPORT_SYMBOL_GPL(account_system_vtime); 346 + EXPORT_SYMBOL_GPL(vtime_account); 347 347 348 348 /* 349 349 * Transfer the user and system times accumulated in the paca 350 350 * by the exception entry and exit code to the generic process 351 351 * user and system time records. 352 352 * Must be called with interrupts disabled. 353 - * Assumes that account_system_vtime() has been called recently 353 + * Assumes that vtime_account() has been called recently 354 354 * (i.e. since the last entry from usermode) so that 355 355 * get_paca()->user_time_scaled is up to date. 356 356 */ ··· 366 366 account_user_time(tsk, utime, utimescaled); 367 367 } 368 368 369 - void account_switch_vtime(struct task_struct *prev) 369 + void vtime_task_switch(struct task_struct *prev) 370 370 { 371 - account_system_vtime(prev); 371 + vtime_account(prev); 372 372 account_process_tick(prev, 0); 373 373 } 374 374
+3 -3
arch/s390/kernel/vtime.c
··· 99 99 return virt_timer_forward(user + system); 100 100 } 101 101 102 - void account_switch_vtime(struct task_struct *prev) 102 + void vtime_task_switch(struct task_struct *prev) 103 103 { 104 104 struct thread_info *ti; 105 105 ··· 122 122 * Update process times based on virtual cpu times stored by entry.S 123 123 * to the lowcore fields user_timer, system_timer & steal_clock. 124 124 */ 125 - void account_system_vtime(struct task_struct *tsk) 125 + void vtime_account(struct task_struct *tsk) 126 126 { 127 127 struct thread_info *ti = task_thread_info(tsk); 128 128 u64 timer, system; ··· 138 138 139 139 virt_timer_forward(system); 140 140 } 141 - EXPORT_SYMBOL_GPL(account_system_vtime); 141 + EXPORT_SYMBOL_GPL(vtime_account); 142 142 143 143 void __kprobes vtime_stop_cpu(void) 144 144 {
+4 -4
include/linux/hardirq.h
··· 132 132 struct task_struct; 133 133 134 134 #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) 135 - static inline void account_system_vtime(struct task_struct *tsk) 135 + static inline void vtime_account(struct task_struct *tsk) 136 136 { 137 137 } 138 138 #else 139 - extern void account_system_vtime(struct task_struct *tsk); 139 + extern void vtime_account(struct task_struct *tsk); 140 140 #endif 141 141 142 142 #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) ··· 162 162 */ 163 163 #define __irq_enter() \ 164 164 do { \ 165 - account_system_vtime(current); \ 165 + vtime_account(current); \ 166 166 add_preempt_count(HARDIRQ_OFFSET); \ 167 167 trace_hardirq_enter(); \ 168 168 } while (0) ··· 178 178 #define __irq_exit() \ 179 179 do { \ 180 180 trace_hardirq_exit(); \ 181 - account_system_vtime(current); \ 181 + vtime_account(current); \ 182 182 sub_preempt_count(HARDIRQ_OFFSET); \ 183 183 } while (0) 184 184
+2 -2
include/linux/kernel_stat.h
··· 131 131 extern void account_idle_ticks(unsigned long ticks); 132 132 133 133 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 134 - extern void account_switch_vtime(struct task_struct *prev); 134 + extern void vtime_task_switch(struct task_struct *prev); 135 135 #else 136 - static inline void account_switch_vtime(struct task_struct *prev) { } 136 + static inline void vtime_task_switch(struct task_struct *prev) { } 137 137 #endif 138 138 139 139 #endif /* _LINUX_KERNEL_STAT_H */
+2 -2
include/linux/kvm_host.h
··· 685 685 static inline void kvm_guest_enter(void) 686 686 { 687 687 BUG_ON(preemptible()); 688 - account_system_vtime(current); 688 + vtime_account(current); 689 689 current->flags |= PF_VCPU; 690 690 /* KVM does not hold any references to rcu protected data when it 691 691 * switches CPU into a guest mode. In fact switching to a guest mode ··· 699 699 700 700 static inline void kvm_guest_exit(void) 701 701 { 702 - account_system_vtime(current); 702 + vtime_account(current); 703 703 current->flags &= ~PF_VCPU; 704 704 } 705 705
+1 -1
kernel/sched/core.c
··· 1764 1764 * Manfred Spraul <manfred@colorfullife.com> 1765 1765 */ 1766 1766 prev_state = prev->state; 1767 - account_switch_vtime(prev); 1767 + vtime_task_switch(prev); 1768 1768 finish_arch_switch(prev); 1769 1769 perf_event_task_sched_in(prev, current); 1770 1770 finish_lock_switch(rq, prev);
+4 -4
kernel/sched/cputime.c
··· 10 10 11 11 /* 12 12 * There are no locks covering percpu hardirq/softirq time. 13 - * They are only modified in account_system_vtime, on corresponding CPU 13 + * They are only modified in vtime_account, on corresponding CPU 14 14 * with interrupts disabled. So, writes are safe. 15 15 * They are read and saved off onto struct rq in update_rq_clock(). 16 16 * This may result in other CPU reading this CPU's irq time and can 17 - * race with irq/account_system_vtime on this CPU. We would either get old 17 + * race with irq/vtime_account on this CPU. We would either get old 18 18 * or new value with a side effect of accounting a slice of irq time to wrong 19 19 * task when irq is in progress while we read rq->clock. That is a worthy 20 20 * compromise in place of having locks on each irq in account_system_time. ··· 43 43 * Called before incrementing preempt_count on {soft,}irq_enter 44 44 * and before decrementing preempt_count on {soft,}irq_exit. 45 45 */ 46 - void account_system_vtime(struct task_struct *curr) 46 + void vtime_account(struct task_struct *curr) 47 47 { 48 48 unsigned long flags; 49 49 s64 delta; ··· 73 73 irq_time_write_end(); 74 74 local_irq_restore(flags); 75 75 } 76 - EXPORT_SYMBOL_GPL(account_system_vtime); 76 + EXPORT_SYMBOL_GPL(vtime_account); 77 77 78 78 static int irqtime_account_hi_update(void) 79 79 {
+3 -3
kernel/softirq.c
··· 220 220 current->flags &= ~PF_MEMALLOC; 221 221 222 222 pending = local_softirq_pending(); 223 - account_system_vtime(current); 223 + vtime_account(current); 224 224 225 225 __local_bh_disable((unsigned long)__builtin_return_address(0), 226 226 SOFTIRQ_OFFSET); ··· 271 271 272 272 lockdep_softirq_exit(); 273 273 274 - account_system_vtime(current); 274 + vtime_account(current); 275 275 __local_bh_enable(SOFTIRQ_OFFSET); 276 276 tsk_restore_flags(current, old_flags, PF_MEMALLOC); 277 277 } ··· 340 340 */ 341 341 void irq_exit(void) 342 342 { 343 - account_system_vtime(current); 343 + vtime_account(current); 344 344 trace_hardirq_exit(); 345 345 sub_preempt_count(IRQ_EXIT_OFFSET); 346 346 if (!in_interrupt() && local_softirq_pending())