]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_KERNEL_STAT_H | |
2 | #define _LINUX_KERNEL_STAT_H | |
3 | ||
4 | #include <linux/smp.h> | |
5 | #include <linux/threads.h> | |
6 | #include <linux/percpu.h> | |
7 | #include <linux/cpumask.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/vtime.h> | |
11 | #include <asm/irq.h> | |
12 | #include <asm/cputime.h> | |
13 | ||
14 | /* | |
15 | * 'kernel_stat.h' contains the definitions needed for doing | |
16 | * some kernel statistics (CPU usage, context switches ...), | |
17 | * used by rstatd/perfmeter | |
18 | */ | |
19 | ||
20 | enum cpu_usage_stat { | |
21 | CPUTIME_USER, | |
22 | CPUTIME_NICE, | |
23 | CPUTIME_SYSTEM, | |
24 | CPUTIME_SOFTIRQ, | |
25 | CPUTIME_IRQ, | |
26 | CPUTIME_IDLE, | |
27 | CPUTIME_IOWAIT, | |
28 | CPUTIME_STEAL, | |
29 | CPUTIME_GUEST, | |
30 | CPUTIME_GUEST_NICE, | |
31 | NR_STATS, | |
32 | }; | |
33 | ||
34 | struct kernel_cpustat { | |
35 | u64 cpustat[NR_STATS]; | |
36 | }; | |
37 | ||
38 | struct kernel_stat { | |
39 | unsigned long irqs_sum; | |
40 | unsigned int softirqs[NR_SOFTIRQS]; | |
41 | }; | |
42 | ||
43 | DECLARE_PER_CPU(struct kernel_stat, kstat); | |
44 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); | |
45 | ||
46 | /* Must have preemption disabled for this to be meaningful. */ | |
47 | #define kstat_this_cpu (&__get_cpu_var(kstat)) | |
48 | #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) | |
49 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | |
50 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) | |
51 | ||
52 | extern unsigned long long nr_context_switches(void); | |
53 | ||
54 | #include <linux/irq.h> | |
55 | extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); | |
56 | ||
57 | #define kstat_incr_irqs_this_cpu(irqno, DESC) \ | |
58 | do { \ | |
59 | __this_cpu_inc(*(DESC)->kstat_irqs); \ | |
60 | __this_cpu_inc(kstat.irqs_sum); \ | |
61 | } while (0) | |
62 | ||
63 | static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) | |
64 | { | |
65 | __this_cpu_inc(kstat.softirqs[irq]); | |
66 | } | |
67 | ||
68 | static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) | |
69 | { | |
70 | return kstat_cpu(cpu).softirqs[irq]; | |
71 | } | |
72 | ||
73 | /* | |
74 | * Number of interrupts per specific IRQ source, since bootup | |
75 | */ | |
76 | extern unsigned int kstat_irqs(unsigned int irq); | |
77 | ||
78 | /* | |
79 | * Number of interrupts per cpu, since bootup | |
80 | */ | |
81 | static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) | |
82 | { | |
83 | return kstat_cpu(cpu).irqs_sum; | |
84 | } | |
85 | ||
86 | /* | |
87 | * Lock/unlock the current runqueue - to extract task statistics: | |
88 | */ | |
89 | extern unsigned long long task_delta_exec(struct task_struct *); | |
90 | ||
91 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); | |
92 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); | |
93 | extern void account_steal_time(cputime_t); | |
94 | extern void account_idle_time(cputime_t); | |
95 | ||
96 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
97 | static inline void account_process_tick(struct task_struct *tsk, int user) | |
98 | { | |
99 | vtime_account_user(tsk); | |
100 | } | |
101 | #else | |
102 | extern void account_process_tick(struct task_struct *, int user); | |
103 | #endif | |
104 | ||
105 | extern void account_steal_ticks(unsigned long ticks); | |
106 | extern void account_idle_ticks(unsigned long ticks); | |
107 | ||
108 | #endif /* _LINUX_KERNEL_STAT_H */ |