xref: /linux/include/linux/kernel_stat.h (revision 6c8c1406)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KERNEL_STAT_H
3 #define _LINUX_KERNEL_STAT_H
4 
5 #include <linux/smp.h>
6 #include <linux/threads.h>
7 #include <linux/percpu.h>
8 #include <linux/cpumask.h>
9 #include <linux/interrupt.h>
10 #include <linux/sched.h>
11 #include <linux/vtime.h>
12 #include <asm/irq.h>
13 
14 /*
15  * 'kernel_stat.h' contains the definitions needed for doing
16  * some kernel statistics (CPU usage, context switches ...),
17  * used by rstatd/perfmeter
18  */
19 
20 enum cpu_usage_stat {
21 	CPUTIME_USER,
22 	CPUTIME_NICE,
23 	CPUTIME_SYSTEM,
24 	CPUTIME_SOFTIRQ,
25 	CPUTIME_IRQ,
26 	CPUTIME_IDLE,
27 	CPUTIME_IOWAIT,
28 	CPUTIME_STEAL,
29 	CPUTIME_GUEST,
30 	CPUTIME_GUEST_NICE,
31 #ifdef CONFIG_SCHED_CORE
32 	CPUTIME_FORCEIDLE,
33 #endif
34 	NR_STATS,
35 };
36 
37 struct kernel_cpustat {
38 	u64 cpustat[NR_STATS];
39 };
40 
41 struct kernel_stat {
42 	unsigned long irqs_sum;
43 	unsigned int softirqs[NR_SOFTIRQS];
44 };
45 
46 DECLARE_PER_CPU(struct kernel_stat, kstat);
47 DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
48 
49 /* Must have preemption disabled for this to be meaningful. */
50 #define kstat_this_cpu this_cpu_ptr(&kstat)
51 #define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
52 #define kstat_cpu(cpu) per_cpu(kstat, cpu)
53 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
54 
55 extern unsigned long long nr_context_switches(void);
56 
57 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
58 extern void kstat_incr_irq_this_cpu(unsigned int irq);
59 
60 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
61 {
62 	__this_cpu_inc(kstat.softirqs[irq]);
63 }
64 
65 static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
66 {
67        return kstat_cpu(cpu).softirqs[irq];
68 }
69 
70 /*
71  * Number of interrupts per specific IRQ source, since bootup
72  */
73 extern unsigned int kstat_irqs_usr(unsigned int irq);
74 
75 /*
76  * Number of interrupts per cpu, since bootup
77  */
78 static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
79 {
80 	return kstat_cpu(cpu).irqs_sum;
81 }
82 
83 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
84 extern u64 kcpustat_field(struct kernel_cpustat *kcpustat,
85 			  enum cpu_usage_stat usage, int cpu);
86 extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu);
87 #else
88 static inline u64 kcpustat_field(struct kernel_cpustat *kcpustat,
89 				 enum cpu_usage_stat usage, int cpu)
90 {
91 	return kcpustat->cpustat[usage];
92 }
93 
94 static inline void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
95 {
96 	*dst = kcpustat_cpu(cpu);
97 }
98 
99 #endif
100 
101 extern void account_user_time(struct task_struct *, u64);
102 extern void account_guest_time(struct task_struct *, u64);
103 extern void account_system_time(struct task_struct *, int, u64);
104 extern void account_system_index_time(struct task_struct *, u64,
105 				      enum cpu_usage_stat);
106 extern void account_steal_time(u64);
107 extern void account_idle_time(u64);
108 extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);
109 
110 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
111 static inline void account_process_tick(struct task_struct *tsk, int user)
112 {
113 	vtime_flush(tsk);
114 }
115 #else
116 extern void account_process_tick(struct task_struct *, int user);
117 #endif
118 
119 extern void account_idle_ticks(unsigned long ticks);
120 
121 #ifdef CONFIG_SCHED_CORE
122 extern void __account_forceidle_time(struct task_struct *tsk, u64 delta);
123 #endif
124 
125 #endif /* _LINUX_KERNEL_STAT_H */
126