Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] improve precision of process accounting.

The unit of the cputime accouting values that are stored per process is
currently a microsecond. The CPU timer has a maximum granularity of
2**-12 microseconds. There is no benefit in storing the per process values
in the lesser precision and there is the disadvantage that the backend
has to do the rounding to microseconds. The better solution is to use
the maximum granularity of the CPU timer as cputime unit.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+84 -95
+21 -21
arch/s390/include/asm/cputime.h
··· 11 11 12 12 #include <asm/div64.h> 13 13 14 - /* We want to use micro-second resolution. */ 14 + /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 15 15 16 16 typedef unsigned long long cputime_t; 17 17 typedef unsigned long long cputime64_t; ··· 53 53 #define cputime_ge(__a, __b) ((__a) >= (__b)) 54 54 #define cputime_lt(__a, __b) ((__a) < (__b)) 55 55 #define cputime_le(__a, __b) ((__a) <= (__b)) 56 - #define cputime_to_jiffies(__ct) (__div((__ct), 1000000 / HZ)) 56 + #define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ)) 57 57 #define cputime_to_scaled(__ct) (__ct) 58 - #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (1000000 / HZ)) 58 + #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ)) 59 59 60 60 #define cputime64_zero (0ULL) 61 61 #define cputime64_add(__a, __b) ((__a) + (__b)) ··· 64 64 static inline u64 65 65 cputime64_to_jiffies64(cputime64_t cputime) 66 66 { 67 - do_div(cputime, 1000000 / HZ); 67 + do_div(cputime, 4096000000ULL / HZ); 68 68 return cputime; 69 69 } 70 70 ··· 74 74 static inline unsigned int 75 75 cputime_to_msecs(const cputime_t cputime) 76 76 { 77 - return __div(cputime, 1000); 77 + return __div(cputime, 4096000); 78 78 } 79 79 80 80 static inline cputime_t 81 81 msecs_to_cputime(const unsigned int m) 82 82 { 83 - return (cputime_t) m * 1000; 83 + return (cputime_t) m * 4096000; 84 84 } 85 85 86 86 /* ··· 89 89 static inline unsigned int 90 90 cputime_to_secs(const cputime_t cputime) 91 91 { 92 - return __div(cputime, 1000000); 92 + return __div(cputime, 2048000000) >> 1; 93 93 } 94 94 95 95 static inline cputime_t 96 96 secs_to_cputime(const unsigned int s) 97 97 { 98 - return (cputime_t) s * 1000000; 98 + return (cputime_t) s * 4096000000ULL; 99 99 } 100 100 101 101 /* ··· 104 104 static inline cputime_t 105 105 timespec_to_cputime(const struct timespec *value) 106 106 { 107 - return value->tv_nsec / 1000 + (u64) value->tv_sec * 1000000; 107 + return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL; 108 108 } 109 109 110 110 static inline void ··· 114 114 register_pair rp; 115 115 116 116 rp.pair = cputime >> 1; 117 - asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1)); 118 - value->tv_nsec = rp.subreg.even * 1000; 117 + asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 118 + value->tv_nsec = rp.subreg.even * 1000 / 4096; 119 119 value->tv_sec = rp.subreg.odd; 120 120 #else 121 - value->tv_nsec = (cputime % 1000000) * 1000; 122 - value->tv_sec = cputime / 1000000; 121 + value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096; 122 + value->tv_sec = cputime / 4096000000ULL; 123 123 #endif 124 124 } 125 125 ··· 131 131 static inline cputime_t 132 132 timeval_to_cputime(const struct timeval *value) 133 133 { 134 - return value->tv_usec + (u64) value->tv_sec * 1000000; 134 + return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL; 135 135 } 136 136 137 137 static inline void ··· 141 141 register_pair rp; 142 142 143 143 rp.pair = cputime >> 1; 144 - asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1)); 145 - value->tv_usec = rp.subreg.even; 144 + asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 145 + value->tv_usec = rp.subreg.even / 4096; 146 146 value->tv_sec = rp.subreg.odd; 147 147 #else 148 - value->tv_usec = cputime % 1000000; 149 - value->tv_sec = cputime / 1000000; 148 + value->tv_usec = cputime % 4096000000ULL; 149 + value->tv_sec = cputime / 4096000000ULL; 150 150 #endif 151 151 } 152 152 ··· 156 156 static inline clock_t 157 157 cputime_to_clock_t(cputime_t cputime) 158 158 { 159 - return __div(cputime, 1000000 / USER_HZ); 159 + return __div(cputime, 4096000000ULL / USER_HZ); 160 160 } 161 161 162 162 static inline cputime_t 163 163 clock_t_to_cputime(unsigned long x) 164 164 { 165 - return (cputime_t) x * (1000000 / USER_HZ); 165 + return (cputime_t) x * (4096000000ULL / USER_HZ); 166 166 } 167 167 168 168 /* ··· 171 171 static inline clock_t 172 172 cputime64_to_clock_t(cputime64_t cputime) 173 173 { 174 - return __div(cputime, 1000000 / USER_HZ); 174 + return __div(cputime, 4096000000ULL / USER_HZ); 175 175 } 176 176 177 177 #endif /* _S390_CPUTIME_H */
+20 -20
arch/s390/include/asm/lowcore.h
··· 67 67 #define __LC_SYNC_ENTER_TIMER 0x248 68 68 #define __LC_ASYNC_ENTER_TIMER 0x250 69 69 #define __LC_EXIT_TIMER 0x258 70 - #define __LC_LAST_UPDATE_TIMER 0x260 71 - #define __LC_USER_TIMER 0x268 72 - #define __LC_SYSTEM_TIMER 0x270 73 - #define __LC_LAST_UPDATE_CLOCK 0x278 74 - #define __LC_STEAL_CLOCK 0x280 70 + #define __LC_USER_TIMER 0x260 71 + #define __LC_SYSTEM_TIMER 0x268 72 + #define __LC_STEAL_TIMER 0x270 73 + #define __LC_LAST_UPDATE_TIMER 0x278 74 + #define __LC_LAST_UPDATE_CLOCK 0x280 75 75 #define __LC_RETURN_MCCK_PSW 0x288 76 76 #define __LC_KERNEL_STACK 0xC40 77 77 #define __LC_THREAD_INFO 0xC44 ··· 89 89 #define __LC_SYNC_ENTER_TIMER 0x250 90 90 #define __LC_ASYNC_ENTER_TIMER 0x258 91 91 #define __LC_EXIT_TIMER 0x260 92 - #define __LC_LAST_UPDATE_TIMER 0x268 93 - #define __LC_USER_TIMER 0x270 94 - #define __LC_SYSTEM_TIMER 0x278 95 - #define __LC_LAST_UPDATE_CLOCK 0x280 96 - #define __LC_STEAL_CLOCK 0x288 92 + #define __LC_USER_TIMER 0x268 93 + #define __LC_SYSTEM_TIMER 0x270 94 + #define __LC_STEAL_TIMER 0x278 95 + #define __LC_LAST_UPDATE_TIMER 0x280 96 + #define __LC_LAST_UPDATE_CLOCK 0x288 97 97 #define __LC_RETURN_MCCK_PSW 0x290 98 98 #define __LC_KERNEL_STACK 0xD40 99 99 #define __LC_THREAD_INFO 0xD48 ··· 252 252 __u64 sync_enter_timer; /* 0x248 */ 253 253 __u64 async_enter_timer; /* 0x250 */ 254 254 __u64 exit_timer; /* 0x258 */ 255 - __u64 last_update_timer; /* 0x260 */ 256 - __u64 user_timer; /* 0x268 */ 257 - __u64 system_timer; /* 0x270 */ 258 - __u64 last_update_clock; /* 0x278 */ 259 - __u64 steal_clock; /* 0x280 */ 255 + __u64 user_timer; /* 0x260 */ 256 + __u64 system_timer; /* 0x268 */ 257 + __u64 steal_timer; /* 0x270 */ 258 + __u64 last_update_timer; /* 0x278 */ 259 + __u64 last_update_clock; /* 0x280 */ 260 260 psw_t return_mcck_psw; /* 0x288 */ 261 261 __u8 pad8[0xc00-0x290]; /* 0x290 */ 262 262 ··· 343 343 __u64 sync_enter_timer; /* 0x250 */ 344 344 __u64 async_enter_timer; /* 0x258 */ 345 345 __u64 exit_timer; /* 0x260 */ 346 - __u64 last_update_timer; /* 0x268 */ 347 - __u64 user_timer; /* 0x270 */ 348 - __u64 system_timer; /* 0x278 */ 349 - __u64 last_update_clock; /* 0x280 */ 350 - __u64 steal_clock; /* 0x288 */ 346 + __u64 user_timer; /* 0x268 */ 347 + __u64 system_timer; /* 0x270 */ 348 + __u64 steal_timer; /* 0x278 */ 349 + __u64 last_update_timer; /* 0x280 */ 350 + __u64 last_update_clock; /* 0x288 */ 351 351 psw_t return_mcck_psw; /* 0x290 */ 352 352 __u8 pad8[0xc00-0x2a0]; /* 0x2a0 */ 353 353 /* System info area */
+2 -2
arch/s390/include/asm/system.h
··· 99 99 prev = __switch_to(prev,next); \ 100 100 } while (0) 101 101 102 - extern void account_vtime(struct task_struct *); 102 + extern void account_vtime(struct task_struct *, struct task_struct *); 103 103 extern void account_tick_vtime(struct task_struct *); 104 104 extern void account_system_vtime(struct task_struct *); 105 105 ··· 121 121 122 122 #define finish_arch_switch(prev) do { \ 123 123 set_fs(current->thread.mm_segment); \ 124 - account_vtime(prev); \ 124 + account_vtime(prev, current); \ 125 125 } while (0) 126 126 127 127 #define nop() asm volatile("nop")
+2
arch/s390/include/asm/thread_info.h
··· 47 47 unsigned int cpu; /* current CPU */ 48 48 int preempt_count; /* 0 => preemptable, <0 => BUG */ 49 49 struct restart_block restart_block; 50 + __u64 user_timer; 51 + __u64 system_timer; 50 52 }; 51 53 52 54 /*
+39 -52
arch/s390/kernel/vtime.c
··· 31 31 * Update process times based on virtual cpu times stored by entry.S 32 32 * to the lowcore fields user_timer, system_timer & steal_clock. 33 33 */ 34 - void account_process_tick(struct task_struct *tsk, int user_tick) 34 + static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) 35 35 { 36 - cputime_t cputime; 37 - __u64 timer, clock; 38 - int rcu_user_flag; 36 + struct thread_info *ti = task_thread_info(tsk); 37 + __u64 timer, clock, user, system, steal; 39 38 40 39 timer = S390_lowcore.last_update_timer; 41 40 clock = S390_lowcore.last_update_clock; ··· 43 44 : "=m" (S390_lowcore.last_update_timer), 44 45 "=m" (S390_lowcore.last_update_clock) ); 45 46 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 46 - S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock; 47 + S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 47 48 48 - cputime = S390_lowcore.user_timer >> 12; 49 - rcu_user_flag = cputime != 0; 50 - S390_lowcore.user_timer -= cputime << 12; 51 - S390_lowcore.steal_clock -= cputime << 12; 52 - account_user_time(tsk, cputime, cputime); 49 + user = S390_lowcore.user_timer - ti->user_timer; 50 + S390_lowcore.steal_timer -= user; 51 + ti->user_timer = S390_lowcore.user_timer; 52 + account_user_time(tsk, user, user); 53 53 54 - cputime = S390_lowcore.system_timer >> 12; 55 - S390_lowcore.system_timer -= cputime << 12; 56 - S390_lowcore.steal_clock -= cputime << 12; 54 + system = S390_lowcore.system_timer - ti->system_timer; 55 + S390_lowcore.steal_timer -= system; 56 + ti->system_timer = S390_lowcore.system_timer; 57 57 if (idle_task(smp_processor_id()) != current) 58 - account_system_time(tsk, HARDIRQ_OFFSET, cputime, cputime); 58 + account_system_time(tsk, hardirq_offset, system, system); 59 59 else 60 - account_idle_time(cputime); 60 + account_idle_time(system); 61 61 62 - cputime = S390_lowcore.steal_clock; 63 - if ((__s64) cputime > 0) { 64 - cputime >>= 12; 65 - S390_lowcore.steal_clock -= cputime << 12; 62 + steal = S390_lowcore.steal_timer; 63 + if ((s64) steal > 0) { 64 + S390_lowcore.steal_timer = 0; 66 65 if (idle_task(smp_processor_id()) != current) 67 - account_steal_time(cputime); 66 + account_steal_time(steal); 68 67 else 69 - account_idle_time(cputime); 68 + account_idle_time(steal); 70 69 } 71 70 } 72 71 73 - /* 74 - * Update process times based on virtual cpu times stored by entry.S 75 - * to the lowcore fields user_timer, system_timer & steal_clock. 76 - */ 77 - void account_vtime(struct task_struct *tsk) 72 + void account_vtime(struct task_struct *prev, struct task_struct *next) 78 73 { 79 - cputime_t cputime; 80 - __u64 timer; 74 + struct thread_info *ti; 81 75 82 - timer = S390_lowcore.last_update_timer; 83 - asm volatile (" STPT %0" /* Store current cpu timer value */ 84 - : "=m" (S390_lowcore.last_update_timer) ); 85 - S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 76 + do_account_vtime(prev, 0); 77 + ti = task_thread_info(prev); 78 + ti->user_timer = S390_lowcore.user_timer; 79 + ti->system_timer = S390_lowcore.system_timer; 80 + ti = task_thread_info(next); 81 + S390_lowcore.user_timer = ti->user_timer; 82 + S390_lowcore.system_timer = ti->system_timer; 83 + } 86 84 87 - cputime = S390_lowcore.user_timer >> 12; 88 - S390_lowcore.user_timer -= cputime << 12; 89 - S390_lowcore.steal_clock -= cputime << 12; 90 - account_user_time(tsk, cputime, cputime); 91 - 92 - cputime = S390_lowcore.system_timer >> 12; 93 - S390_lowcore.system_timer -= cputime << 12; 94 - S390_lowcore.steal_clock -= cputime << 12; 95 - if (idle_task(smp_processor_id()) != current) 96 - account_system_time(tsk, 0, cputime, cputime); 97 - else 98 - account_idle_time(cputime); 85 + void account_process_tick(struct task_struct *tsk, int user_tick) 86 + { 87 + do_account_vtime(tsk, HARDIRQ_OFFSET); 99 88 } 100 89 101 90 /* ··· 92 105 */ 93 106 void account_system_vtime(struct task_struct *tsk) 94 107 { 95 - cputime_t cputime; 96 - __u64 timer; 108 + struct thread_info *ti = task_thread_info(tsk); 109 + __u64 timer, system; 97 110 98 111 timer = S390_lowcore.last_update_timer; 99 112 asm volatile (" STPT %0" /* Store current cpu timer value */ 100 113 : "=m" (S390_lowcore.last_update_timer) ); 101 114 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 102 115 103 - cputime = S390_lowcore.system_timer >> 12; 104 - S390_lowcore.system_timer -= cputime << 12; 105 - S390_lowcore.steal_clock -= cputime << 12; 116 + system = S390_lowcore.system_timer - ti->system_timer; 117 + S390_lowcore.steal_timer -= system; 118 + ti->system_timer = S390_lowcore.system_timer; 106 119 if (in_irq() || idle_task(smp_processor_id()) != current) 107 - account_system_time(tsk, 0, cputime, cputime); 120 + account_system_time(tsk, 0, system, system); 108 121 else 109 - account_idle_time(cputime); 122 + account_idle_time(system); 110 123 } 111 124 EXPORT_SYMBOL_GPL(account_system_vtime); 112 125 ··· 477 490 /* kick the virtual timer */ 478 491 S390_lowcore.exit_timer = VTIMER_MAX_SLICE; 479 492 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; 480 - asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 481 493 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); 494 + asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 482 495 483 496 /* enable cpu timer interrupts */ 484 497 __ctl_set_bit(0,10);