Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cputime: Use accessors to read task cputime stats

This is in preparation for the full dynticks feature. While
remotely reading the cputime of a task running in a full
dynticks CPU, we'll need to do some extra-computation. This
way we can account the time it spent tickless in userspace
since its last cputime snapshot.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Li Zhong <zhong@linux.vnet.ibm.com>
Cc: Namhyung Kim <namhyung.kim@lge.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>

+144 -49
+4 -2
arch/alpha/kernel/osf_sys.c
··· 1139 1139 SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) 1140 1140 { 1141 1141 struct rusage32 r; 1142 + cputime_t utime, stime; 1142 1143 1143 1144 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) 1144 1145 return -EINVAL; ··· 1147 1146 memset(&r, 0, sizeof(r)); 1148 1147 switch (who) { 1149 1148 case RUSAGE_SELF: 1150 - jiffies_to_timeval32(current->utime, &r.ru_utime); 1151 - jiffies_to_timeval32(current->stime, &r.ru_stime); 1149 + task_cputime(current, &utime, &stime); 1150 + jiffies_to_timeval32(utime, &r.ru_utime); 1151 + jiffies_to_timeval32(stime, &r.ru_stime); 1152 1152 r.ru_minflt = current->min_flt; 1153 1153 r.ru_majflt = current->maj_flt; 1154 1154 break;
+6 -5
arch/x86/kernel/apm_32.c
··· 899 899 static int use_apm_idle; /* = 0 */ 900 900 static unsigned int last_jiffies; /* = 0 */ 901 901 static unsigned int last_stime; /* = 0 */ 902 + cputime_t stime; 902 903 903 904 int apm_idle_done = 0; 904 905 unsigned int jiffies_since_last_check = jiffies - last_jiffies; ··· 907 906 908 907 WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012"); 909 908 recalc: 909 + task_cputime(current, NULL, &stime); 910 910 if (jiffies_since_last_check > IDLE_CALC_LIMIT) { 911 911 use_apm_idle = 0; 912 - last_jiffies = jiffies; 913 - last_stime = current->stime; 914 912 } else if (jiffies_since_last_check > idle_period) { 915 913 unsigned int idle_percentage; 916 914 917 - idle_percentage = current->stime - last_stime; 915 + idle_percentage = stime - last_stime; 918 916 idle_percentage *= 100; 919 917 idle_percentage /= jiffies_since_last_check; 920 918 use_apm_idle = (idle_percentage > idle_threshold); 921 919 if (apm_info.forbid_idle) 922 920 use_apm_idle = 0; 923 - last_jiffies = jiffies; 924 - last_stime = current->stime; 925 921 } 922 + 923 + last_jiffies = jiffies; 924 + last_stime = stime; 926 925 927 926 bucket = IDLE_LEAKY_MAX; 928 927
+6 -1
drivers/isdn/mISDN/stack.c
··· 18 18 #include <linux/slab.h> 19 19 #include <linux/mISDNif.h> 20 20 #include <linux/kthread.h> 21 + #include <linux/sched.h> 21 22 #include "core.h" 22 23 23 24 static u_int *debug; ··· 203 202 mISDNStackd(void *data) 204 203 { 205 204 struct mISDNstack *st = data; 205 + #ifdef MISDN_MSG_STATS 206 + cputime_t utime, stime; 207 + #endif 206 208 int err = 0; 207 209 208 210 sigfillset(&current->blocked); ··· 307 303 "msg %d sleep %d stopped\n", 308 304 dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt, 309 305 st->stopped_cnt); 306 + task_cputime(st->thread, &utime, &stime); 310 307 printk(KERN_DEBUG 311 308 "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n", 312 - dev_name(&st->dev->dev), st->thread->utime, st->thread->stime); 309 + dev_name(&st->dev->dev), utime, stime); 313 310 printk(KERN_DEBUG 314 311 "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n", 315 312 dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
+6 -2
fs/binfmt_elf.c
··· 33 33 #include <linux/elf.h> 34 34 #include <linux/utsname.h> 35 35 #include <linux/coredump.h> 36 + #include <linux/sched.h> 36 37 #include <asm/uaccess.h> 37 38 #include <asm/param.h> 38 39 #include <asm/page.h> ··· 1321 1320 cputime_to_timeval(cputime.utime, &prstatus->pr_utime); 1322 1321 cputime_to_timeval(cputime.stime, &prstatus->pr_stime); 1323 1322 } else { 1324 - cputime_to_timeval(p->utime, &prstatus->pr_utime); 1325 - cputime_to_timeval(p->stime, &prstatus->pr_stime); 1323 + cputime_t utime, stime; 1324 + 1325 + task_cputime(p, &utime, &stime); 1326 + cputime_to_timeval(utime, &prstatus->pr_utime); 1327 + cputime_to_timeval(stime, &prstatus->pr_stime); 1326 1328 } 1327 1329 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); 1328 1330 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+5 -2
fs/binfmt_elf_fdpic.c
··· 1375 1375 cputime_to_timeval(cputime.utime, &prstatus->pr_utime); 1376 1376 cputime_to_timeval(cputime.stime, &prstatus->pr_stime); 1377 1377 } else { 1378 - cputime_to_timeval(p->utime, &prstatus->pr_utime); 1379 - cputime_to_timeval(p->stime, &prstatus->pr_stime); 1378 + cputime_t utime, stime; 1379 + 1380 + task_cputime(p, &utime, &stime); 1381 + cputime_to_timeval(utime, &prstatus->pr_utime); 1382 + cputime_to_timeval(stime, &prstatus->pr_stime); 1380 1383 } 1381 1384 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); 1382 1385 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+2 -2
fs/proc/array.c
··· 449 449 do { 450 450 min_flt += t->min_flt; 451 451 maj_flt += t->maj_flt; 452 - gtime += t->gtime; 452 + gtime += task_gtime(t); 453 453 t = next_thread(t); 454 454 } while (t != task); 455 455 ··· 472 472 min_flt = task->min_flt; 473 473 maj_flt = task->maj_flt; 474 474 task_cputime_adjusted(task, &utime, &stime); 475 - gtime = task->gtime; 475 + gtime = task_gtime(task); 476 476 } 477 477 478 478 /* scale priority and nice values from timeslices to -20..20 */
+23
include/linux/sched.h
··· 1792 1792 __put_task_struct(t); 1793 1793 } 1794 1794 1795 + static inline cputime_t task_gtime(struct task_struct *t) 1796 + { 1797 + return t->gtime; 1798 + } 1799 + 1800 + static inline void task_cputime(struct task_struct *t, 1801 + cputime_t *utime, cputime_t *stime) 1802 + { 1803 + if (utime) 1804 + *utime = t->utime; 1805 + if (stime) 1806 + *stime = t->stime; 1807 + } 1808 + 1809 + static inline void task_cputime_scaled(struct task_struct *t, 1810 + cputime_t *utimescaled, 1811 + cputime_t *stimescaled) 1812 + { 1813 + if (utimescaled) 1814 + *utimescaled = t->utimescaled; 1815 + if (stimescaled) 1816 + *stimescaled = t->stimescaled; 1817 + } 1795 1818 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1796 1819 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1797 1820
+3
include/linux/tsacct_kern.h
··· 23 23 #ifdef CONFIG_TASK_XACCT 24 24 extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p); 25 25 extern void acct_update_integrals(struct task_struct *tsk); 26 + extern void acct_account_cputime(struct task_struct *tsk); 26 27 extern void acct_clear_integrals(struct task_struct *tsk); 27 28 #else 28 29 static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) 29 30 {} 30 31 static inline void acct_update_integrals(struct task_struct *tsk) 32 + {} 33 + static inline void acct_account_cputime(struct task_struct *tsk) 31 34 {} 32 35 static inline void acct_clear_integrals(struct task_struct *tsk) 33 36 {}
+4 -2
kernel/acct.c
··· 566 566 void acct_collect(long exitcode, int group_dead) 567 567 { 568 568 struct pacct_struct *pacct = &current->signal->pacct; 569 + cputime_t utime, stime; 569 570 unsigned long vsize = 0; 570 571 571 572 if (group_dead && current->mm) { ··· 594 593 pacct->ac_flag |= ACORE; 595 594 if (current->flags & PF_SIGNALED) 596 595 pacct->ac_flag |= AXSIG; 597 - pacct->ac_utime += current->utime; 598 - pacct->ac_stime += current->stime; 596 + task_cputime(current, &utime, &stime); 597 + pacct->ac_utime += utime; 598 + pacct->ac_stime += stime; 599 599 pacct->ac_minflt += current->min_flt; 600 600 pacct->ac_majflt += current->maj_flt; 601 601 spin_unlock_irq(&current->sighand->siglock);
+3 -1
kernel/cpu.c
··· 224 224 static inline void check_for_tasks(int cpu) 225 225 { 226 226 struct task_struct *p; 227 + cputime_t utime, stime; 227 228 228 229 write_lock_irq(&tasklist_lock); 229 230 for_each_process(p) { 231 + task_cputime(p, &utime, &stime); 230 232 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 231 - (p->utime || p->stime)) 233 + (utime || stime)) 232 234 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 233 235 "(state = %ld, flags = %x)\n", 234 236 p->comm, task_pid_nr(p), cpu,
+5 -2
kernel/delayacct.c
··· 106 106 unsigned long long t2, t3; 107 107 unsigned long flags; 108 108 struct timespec ts; 109 + cputime_t utime, stime, stimescaled, utimescaled; 109 110 110 111 /* Though tsk->delays accessed later, early exit avoids 111 112 * unnecessary returning of other data ··· 115 114 goto done; 116 115 117 116 tmp = (s64)d->cpu_run_real_total; 118 - cputime_to_timespec(tsk->utime + tsk->stime, &ts); 117 + task_cputime(tsk, &utime, &stime); 118 + cputime_to_timespec(utime + stime, &ts); 119 119 tmp += timespec_to_ns(&ts); 120 120 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; 121 121 122 122 tmp = (s64)d->cpu_scaled_run_real_total; 123 - cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts); 123 + task_cputime_scaled(tsk, &utimescaled, &stimescaled); 124 + cputime_to_timespec(utimescaled + stimescaled, &ts); 124 125 tmp += timespec_to_ns(&ts); 125 126 d->cpu_scaled_run_real_total = 126 127 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
+6 -4
kernel/exit.c
··· 85 85 bool group_dead = thread_group_leader(tsk); 86 86 struct sighand_struct *sighand; 87 87 struct tty_struct *uninitialized_var(tty); 88 + cputime_t utime, stime; 88 89 89 90 sighand = rcu_dereference_check(tsk->sighand, 90 91 lockdep_tasklist_lock_is_held()); ··· 124 123 * We won't ever get here for the group leader, since it 125 124 * will have been the last reference on the signal_struct. 126 125 */ 127 - sig->utime += tsk->utime; 128 - sig->stime += tsk->stime; 129 - sig->gtime += tsk->gtime; 126 + task_cputime(tsk, &utime, &stime); 127 + sig->utime += utime; 128 + sig->stime += stime; 129 + sig->gtime += task_gtime(tsk); 130 130 sig->min_flt += tsk->min_flt; 131 131 sig->maj_flt += tsk->maj_flt; 132 132 sig->nvcsw += tsk->nvcsw; ··· 1094 1092 sig = p->signal; 1095 1093 psig->cutime += tgutime + sig->cutime; 1096 1094 psig->cstime += tgstime + sig->cstime; 1097 - psig->cgtime += p->gtime + sig->gtime + sig->cgtime; 1095 + psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1098 1096 psig->cmin_flt += 1099 1097 p->min_flt + sig->min_flt + sig->cmin_flt; 1100 1098 psig->cmaj_flt +=
+22 -6
kernel/posix-cpu-timers.c
··· 155 155 156 156 static inline cputime_t prof_ticks(struct task_struct *p) 157 157 { 158 - return p->utime + p->stime; 158 + cputime_t utime, stime; 159 + 160 + task_cputime(p, &utime, &stime); 161 + 162 + return utime + stime; 159 163 } 160 164 static inline cputime_t virt_ticks(struct task_struct *p) 161 165 { 162 - return p->utime; 166 + cputime_t utime; 167 + 168 + task_cputime(p, &utime, NULL); 169 + 170 + return utime; 163 171 } 164 172 165 173 static int ··· 479 471 */ 480 472 void posix_cpu_timers_exit(struct task_struct *tsk) 481 473 { 474 + cputime_t utime, stime; 475 + 482 476 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 483 477 sizeof(unsigned long long)); 478 + task_cputime(tsk, &utime, &stime); 484 479 cleanup_timers(tsk->cpu_timers, 485 - tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); 480 + utime, stime, tsk->se.sum_exec_runtime); 486 481 487 482 } 488 483 void posix_cpu_timers_exit_group(struct task_struct *tsk) 489 484 { 490 485 struct signal_struct *const sig = tsk->signal; 486 + cputime_t utime, stime; 491 487 488 + task_cputime(tsk, &utime, &stime); 492 489 cleanup_timers(tsk->signal->cpu_timers, 493 - tsk->utime + sig->utime, tsk->stime + sig->stime, 490 + utime + sig->utime, stime + sig->stime, 494 491 tsk->se.sum_exec_runtime + sig->sum_sched_runtime); 495 492 } 496 493 ··· 1239 1226 static inline int fastpath_timer_check(struct task_struct *tsk) 1240 1227 { 1241 1228 struct signal_struct *sig; 1229 + cputime_t utime, stime; 1230 + 1231 + task_cputime(tsk, &utime, &stime); 1242 1232 1243 1233 if (!task_cputime_zero(&tsk->cputime_expires)) { 1244 1234 struct task_cputime task_sample = { 1245 - .utime = tsk->utime, 1246 - .stime = tsk->stime, 1235 + .utime = utime, 1236 + .stime = stime, 1247 1237 .sum_exec_runtime = tsk->se.sum_exec_runtime 1248 1238 }; 1249 1239
+7 -6
kernel/sched/cputime.c
··· 164 164 task_group_account_field(p, index, (__force u64) cputime); 165 165 166 166 /* Account for user time used */ 167 - acct_update_integrals(p); 167 + acct_account_cputime(p); 168 168 } 169 169 170 170 /* ··· 214 214 task_group_account_field(p, index, (__force u64) cputime); 215 215 216 216 /* Account for system time used */ 217 - acct_update_integrals(p); 217 + acct_account_cputime(p); 218 218 } 219 219 220 220 /* ··· 296 296 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) 297 297 { 298 298 struct signal_struct *sig = tsk->signal; 299 + cputime_t utime, stime; 299 300 struct task_struct *t; 300 301 301 302 times->utime = sig->utime; ··· 310 309 311 310 t = tsk; 312 311 do { 313 - times->utime += t->utime; 314 - times->stime += t->stime; 312 + task_cputime(tsk, &utime, &stime); 313 + times->utime += utime; 314 + times->stime += stime; 315 315 times->sum_exec_runtime += task_sched_runtime(t); 316 316 } while_each_thread(tsk, t); 317 317 out: ··· 590 588 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 591 589 { 592 590 struct task_cputime cputime = { 593 - .utime = p->utime, 594 - .stime = p->stime, 595 591 .sum_exec_runtime = p->se.sum_exec_runtime, 596 592 }; 597 593 594 + task_cputime(p, &cputime.utime, &cputime.stime); 598 595 cputime_adjust(&cputime, &p->prev_cputime, ut, st); 599 596 } 600 597
+8 -4
kernel/signal.c
··· 1638 1638 unsigned long flags; 1639 1639 struct sighand_struct *psig; 1640 1640 bool autoreap = false; 1641 + cputime_t utime, stime; 1641 1642 1642 1643 BUG_ON(sig == -1); 1643 1644 ··· 1676 1675 task_uid(tsk)); 1677 1676 rcu_read_unlock(); 1678 1677 1679 - info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); 1680 - info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); 1678 + task_cputime(tsk, &utime, &stime); 1679 + info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime); 1680 + info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime); 1681 1681 1682 1682 info.si_status = tsk->exit_code & 0x7f; 1683 1683 if (tsk->exit_code & 0x80) ··· 1742 1740 unsigned long flags; 1743 1741 struct task_struct *parent; 1744 1742 struct sighand_struct *sighand; 1743 + cputime_t utime, stime; 1745 1744 1746 1745 if (for_ptracer) { 1747 1746 parent = tsk->parent; ··· 1761 1758 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 1762 1759 rcu_read_unlock(); 1763 1760 1764 - info.si_utime = cputime_to_clock_t(tsk->utime); 1765 - info.si_stime = cputime_to_clock_t(tsk->stime); 1761 + task_cputime(tsk, &utime, &stime); 1762 + info.si_utime = cputime_to_clock_t(utime); 1763 + info.si_stime = cputime_to_clock_t(stime); 1766 1764 1767 1765 info.si_code = why; 1768 1766 switch (why) {
+34 -10
kernel/tsacct.c
··· 32 32 { 33 33 const struct cred *tcred; 34 34 struct timespec uptime, ts; 35 + cputime_t utime, stime, utimescaled, stimescaled; 35 36 u64 ac_etime; 36 37 37 38 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); ··· 66 65 stats->ac_ppid = pid_alive(tsk) ? 67 66 task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0; 68 67 rcu_read_unlock(); 69 - stats->ac_utime = cputime_to_usecs(tsk->utime); 70 - stats->ac_stime = cputime_to_usecs(tsk->stime); 71 - stats->ac_utimescaled = cputime_to_usecs(tsk->utimescaled); 72 - stats->ac_stimescaled = cputime_to_usecs(tsk->stimescaled); 68 + 69 + task_cputime(tsk, &utime, &stime); 70 + stats->ac_utime = cputime_to_usecs(utime); 71 + stats->ac_stime = cputime_to_usecs(stime); 72 + 73 + task_cputime_scaled(tsk, &utimescaled, &stimescaled); 74 + stats->ac_utimescaled = cputime_to_usecs(utimescaled); 75 + stats->ac_stimescaled = cputime_to_usecs(stimescaled); 76 + 73 77 stats->ac_minflt = tsk->min_flt; 74 78 stats->ac_majflt = tsk->maj_flt; 75 79 ··· 121 115 #undef KB 122 116 #undef MB 123 117 124 - /** 125 - * acct_update_integrals - update mm integral fields in task_struct 126 - * @tsk: task_struct for accounting 127 - */ 128 - void acct_update_integrals(struct task_struct *tsk) 118 + static void __acct_update_integrals(struct task_struct *tsk, 119 + cputime_t utime, cputime_t stime) 129 120 { 130 121 if (likely(tsk->mm)) { 131 122 cputime_t time, dtime; ··· 131 128 u64 delta; 132 129 133 130 local_irq_save(flags); 134 - time = tsk->stime + tsk->utime; 131 + time = stime + utime; 135 132 dtime = time - tsk->acct_timexpd; 136 133 jiffies_to_timeval(cputime_to_jiffies(dtime), &value); 137 134 delta = value.tv_sec; ··· 145 142 out: 146 143 local_irq_restore(flags); 147 144 } 145 + } 146 + 147 + /** 148 + * acct_update_integrals - update mm integral fields in task_struct 149 + * @tsk: task_struct for accounting 150 + */ 151 + void acct_update_integrals(struct task_struct *tsk) 152 + { 153 + cputime_t utime, stime; 154 + 155 + task_cputime(tsk, &utime, &stime); 156 + __acct_update_integrals(tsk, utime, stime); 157 + } 158 + 159 + /** 160 + * acct_account_cputime - update mm integral after cputime update 161 + * @tsk: task_struct for accounting 162 + */ 163 + void acct_account_cputime(struct task_struct *tsk) 164 + { 165 + __acct_update_integrals(tsk, tsk->utime, tsk->stime); 148 166 } 149 167 150 168 /**