]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_KERNEL_STAT_H | |
2 | #define _LINUX_KERNEL_STAT_H | |
3 | ||
4 | #include <linux/smp.h> | |
5 | #include <linux/threads.h> | |
6 | #include <linux/percpu.h> | |
7 | #include <linux/cpumask.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/vtime.h> | |
11 | #include <asm/irq.h> | |
12 | ||
13 | /* | |
14 | * 'kernel_stat.h' contains the definitions needed for doing | |
15 | * some kernel statistics (CPU usage, context switches ...), | |
16 | * used by rstatd/perfmeter | |
17 | */ | |
18 | ||
19 | enum cpu_usage_stat { | |
20 | CPUTIME_USER, | |
21 | CPUTIME_NICE, | |
22 | CPUTIME_SYSTEM, | |
23 | CPUTIME_SOFTIRQ, | |
24 | CPUTIME_IRQ, | |
25 | CPUTIME_IDLE, | |
26 | CPUTIME_IOWAIT, | |
27 | CPUTIME_STEAL, | |
28 | CPUTIME_GUEST, | |
29 | CPUTIME_GUEST_NICE, | |
30 | NR_STATS, | |
31 | }; | |
32 | ||
33 | struct kernel_cpustat { | |
34 | u64 cpustat[NR_STATS]; | |
35 | }; | |
36 | ||
37 | struct kernel_stat { | |
38 | unsigned long irqs_sum; | |
39 | unsigned int softirqs[NR_SOFTIRQS]; | |
40 | }; | |
41 | ||
42 | DECLARE_PER_CPU(struct kernel_stat, kstat); | |
43 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); | |
44 | ||
45 | /* Must have preemption disabled for this to be meaningful. */ | |
46 | #define kstat_this_cpu this_cpu_ptr(&kstat) | |
47 | #define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat) | |
48 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | |
49 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) | |
50 | ||
51 | extern unsigned long long nr_context_switches(void); | |
52 | ||
53 | extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); | |
54 | extern void kstat_incr_irq_this_cpu(unsigned int irq); | |
55 | ||
56 | static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) | |
57 | { | |
58 | __this_cpu_inc(kstat.softirqs[irq]); | |
59 | } | |
60 | ||
61 | static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) | |
62 | { | |
63 | return kstat_cpu(cpu).softirqs[irq]; | |
64 | } | |
65 | ||
66 | /* | |
67 | * Number of interrupts per specific IRQ source, since bootup | |
68 | */ | |
69 | extern unsigned int kstat_irqs(unsigned int irq); | |
70 | extern unsigned int kstat_irqs_usr(unsigned int irq); | |
71 | ||
72 | /* | |
73 | * Number of interrupts per cpu, since bootup | |
74 | */ | |
75 | static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) | |
76 | { | |
77 | return kstat_cpu(cpu).irqs_sum; | |
78 | } | |
79 | ||
80 | extern void account_user_time(struct task_struct *, u64); | |
81 | extern void account_guest_time(struct task_struct *, u64); | |
82 | extern void account_system_time(struct task_struct *, int, u64); | |
83 | extern void account_system_index_time(struct task_struct *, u64, | |
84 | enum cpu_usage_stat); | |
85 | extern void account_steal_time(u64); | |
86 | extern void account_idle_time(u64); | |
87 | ||
88 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
89 | static inline void account_process_tick(struct task_struct *tsk, int user) | |
90 | { | |
91 | vtime_flush(tsk); | |
92 | } | |
93 | #else | |
94 | extern void account_process_tick(struct task_struct *, int user); | |
95 | #endif | |
96 | ||
97 | extern void account_idle_ticks(unsigned long ticks); | |
98 | ||
99 | #endif /* _LINUX_KERNEL_STAT_H */ |