at v3.9-rc2 3.3 kB view raw
1#ifndef _LINUX_KERNEL_STAT_H 2#define _LINUX_KERNEL_STAT_H 3 4#include <linux/smp.h> 5#include <linux/threads.h> 6#include <linux/percpu.h> 7#include <linux/cpumask.h> 8#include <linux/interrupt.h> 9#include <linux/sched.h> 10#include <linux/vtime.h> 11#include <asm/irq.h> 12#include <asm/cputime.h> 13 14/* 15 * 'kernel_stat.h' contains the definitions needed for doing 16 * some kernel statistics (CPU usage, context switches ...), 17 * used by rstatd/perfmeter 18 */ 19 20enum cpu_usage_stat { 21 CPUTIME_USER, 22 CPUTIME_NICE, 23 CPUTIME_SYSTEM, 24 CPUTIME_SOFTIRQ, 25 CPUTIME_IRQ, 26 CPUTIME_IDLE, 27 CPUTIME_IOWAIT, 28 CPUTIME_STEAL, 29 CPUTIME_GUEST, 30 CPUTIME_GUEST_NICE, 31 NR_STATS, 32}; 33 34struct kernel_cpustat { 35 u64 cpustat[NR_STATS]; 36}; 37 38struct kernel_stat { 39#ifndef CONFIG_GENERIC_HARDIRQS 40 unsigned int irqs[NR_IRQS]; 41#endif 42 unsigned long irqs_sum; 43 unsigned int softirqs[NR_SOFTIRQS]; 44}; 45 46DECLARE_PER_CPU(struct kernel_stat, kstat); 47DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 48 49/* Must have preemption disabled for this to be meaningful. */ 50#define kstat_this_cpu (&__get_cpu_var(kstat)) 51#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) 52#define kstat_cpu(cpu) per_cpu(kstat, cpu) 53#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) 54 55extern unsigned long long nr_context_switches(void); 56 57#ifndef CONFIG_GENERIC_HARDIRQS 58 59struct irq_desc; 60 61static inline void kstat_incr_irqs_this_cpu(unsigned int irq, 62 struct irq_desc *desc) 63{ 64 __this_cpu_inc(kstat.irqs[irq]); 65 __this_cpu_inc(kstat.irqs_sum); 66} 67 68static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 69{ 70 return kstat_cpu(cpu).irqs[irq]; 71} 72#else 73#include <linux/irq.h> 74extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); 75 76#define kstat_incr_irqs_this_cpu(irqno, DESC) \ 77do { \ 78 __this_cpu_inc(*(DESC)->kstat_irqs); \ 79 __this_cpu_inc(kstat.irqs_sum); \ 80} while (0) 81 82#endif 83 84static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) 85{ 86 __this_cpu_inc(kstat.softirqs[irq]); 87} 88 89static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) 90{ 91 return kstat_cpu(cpu).softirqs[irq]; 92} 93 94/* 95 * Number of interrupts per specific IRQ source, since bootup 96 */ 97#ifndef CONFIG_GENERIC_HARDIRQS 98static inline unsigned int kstat_irqs(unsigned int irq) 99{ 100 unsigned int sum = 0; 101 int cpu; 102 103 for_each_possible_cpu(cpu) 104 sum += kstat_irqs_cpu(irq, cpu); 105 106 return sum; 107} 108#else 109extern unsigned int kstat_irqs(unsigned int irq); 110#endif 111 112/* 113 * Number of interrupts per cpu, since bootup 114 */ 115static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) 116{ 117 return kstat_cpu(cpu).irqs_sum; 118} 119 120/* 121 * Lock/unlock the current runqueue - to extract task statistics: 122 */ 123extern unsigned long long task_delta_exec(struct task_struct *); 124 125extern void account_user_time(struct task_struct *, cputime_t, cputime_t); 126extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); 127extern void account_steal_time(cputime_t); 128extern void account_idle_time(cputime_t); 129 130#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 131static inline void account_process_tick(struct task_struct *tsk, int user) 132{ 133 vtime_account_user(tsk); 134} 135#else 136extern void account_process_tick(struct task_struct *, int user); 137#endif 138 139extern void account_steal_ticks(unsigned long ticks); 140extern void account_idle_ticks(unsigned long ticks); 141 142#endif /* _LINUX_KERNEL_STAT_H */