1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* delayacct.c - per-task delay accounting
3  *
4  * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/sched/task.h>
9 #include <linux/sched/cputime.h>
10 #include <linux/slab.h>
11 #include <linux/taskstats.h>
12 #include <linux/time.h>
13 #include <linux/sysctl.h>
14 #include <linux/delayacct.h>
15 #include <linux/module.h>
16 
17 int delayacct_on __read_mostly = 1;	/* Delay accounting turned on/off */
18 EXPORT_SYMBOL_GPL(delayacct_on);
19 struct kmem_cache *delayacct_cache;
20 
delayacct_setup_disable(char * str)21 static int __init delayacct_setup_disable(char *str)
22 {
23 	delayacct_on = 0;
24 	return 1;
25 }
26 __setup("nodelayacct", delayacct_setup_disable);
27 
delayacct_init(void)28 void delayacct_init(void)
29 {
30 	delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC|SLAB_ACCOUNT);
31 	delayacct_tsk_init(&init_task);
32 }
33 
__delayacct_tsk_init(struct task_struct * tsk)34 void __delayacct_tsk_init(struct task_struct *tsk)
35 {
36 	tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
37 	if (tsk->delays)
38 		raw_spin_lock_init(&tsk->delays->lock);
39 }
40 
41 /*
42  * Finish delay accounting for a statistic using its timestamps (@start),
43  * accumalator (@total) and @count
44  */
delayacct_end(raw_spinlock_t * lock,u64 * start,u64 * total,u32 * count)45 static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total,
46 			  u32 *count)
47 {
48 	s64 ns = ktime_get_ns() - *start;
49 	unsigned long flags;
50 
51 	if (ns > 0) {
52 		raw_spin_lock_irqsave(lock, flags);
53 		*total += ns;
54 		(*count)++;
55 		raw_spin_unlock_irqrestore(lock, flags);
56 	}
57 }
58 
__delayacct_blkio_start(void)59 void __delayacct_blkio_start(void)
60 {
61 	current->delays->blkio_start = ktime_get_ns();
62 }
63 
64 /*
65  * We cannot rely on the `current` macro, as we haven't yet switched back to
66  * the process being woken.
67  */
__delayacct_blkio_end(struct task_struct * p)68 void __delayacct_blkio_end(struct task_struct *p)
69 {
70 	struct task_delay_info *delays = p->delays;
71 	u64 *total;
72 	u32 *count;
73 
74 	if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
75 		total = &delays->swapin_delay;
76 		count = &delays->swapin_count;
77 	} else {
78 		total = &delays->blkio_delay;
79 		count = &delays->blkio_count;
80 	}
81 
82 	delayacct_end(&delays->lock, &delays->blkio_start, total, count);
83 }
84 
__delayacct_add_tsk(struct taskstats * d,struct task_struct * tsk)85 int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
86 {
87 	u64 utime, stime, stimescaled, utimescaled;
88 	unsigned long long t2, t3;
89 	unsigned long flags, t1;
90 	s64 tmp;
91 
92 	task_cputime(tsk, &utime, &stime);
93 	tmp = (s64)d->cpu_run_real_total;
94 	tmp += utime + stime;
95 	d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
96 
97 	task_cputime_scaled(tsk, &utimescaled, &stimescaled);
98 	tmp = (s64)d->cpu_scaled_run_real_total;
99 	tmp += utimescaled + stimescaled;
100 	d->cpu_scaled_run_real_total =
101 		(tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
102 
103 	/*
104 	 * No locking available for sched_info (and too expensive to add one)
105 	 * Mitigate by taking snapshot of values
106 	 */
107 	t1 = tsk->sched_info.pcount;
108 	t2 = tsk->sched_info.run_delay;
109 	t3 = tsk->se.sum_exec_runtime;
110 
111 	d->cpu_count += t1;
112 
113 	tmp = (s64)d->cpu_delay_total + t2;
114 	d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
115 
116 	tmp = (s64)d->cpu_run_virtual_total + t3;
117 	d->cpu_run_virtual_total =
118 		(tmp < (s64)d->cpu_run_virtual_total) ?	0 : tmp;
119 
120 	/* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
121 
122 	raw_spin_lock_irqsave(&tsk->delays->lock, flags);
123 	tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
124 	d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
125 	tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
126 	d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
127 	tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
128 	d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
129 	tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
130 	d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
131 	d->blkio_count += tsk->delays->blkio_count;
132 	d->swapin_count += tsk->delays->swapin_count;
133 	d->freepages_count += tsk->delays->freepages_count;
134 	d->thrashing_count += tsk->delays->thrashing_count;
135 	raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
136 
137 	return 0;
138 }
139 
__delayacct_blkio_ticks(struct task_struct * tsk)140 __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
141 {
142 	__u64 ret;
143 	unsigned long flags;
144 
145 	raw_spin_lock_irqsave(&tsk->delays->lock, flags);
146 	ret = nsec_to_clock_t(tsk->delays->blkio_delay +
147 				tsk->delays->swapin_delay);
148 	raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
149 	return ret;
150 }
151 
__delayacct_freepages_start(void)152 void __delayacct_freepages_start(void)
153 {
154 	current->delays->freepages_start = ktime_get_ns();
155 }
156 
__delayacct_freepages_end(void)157 void __delayacct_freepages_end(void)
158 {
159 	delayacct_end(
160 		&current->delays->lock,
161 		&current->delays->freepages_start,
162 		&current->delays->freepages_delay,
163 		&current->delays->freepages_count);
164 }
165 
__delayacct_thrashing_start(void)166 void __delayacct_thrashing_start(void)
167 {
168 	current->delays->thrashing_start = ktime_get_ns();
169 }
170 
__delayacct_thrashing_end(void)171 void __delayacct_thrashing_end(void)
172 {
173 	delayacct_end(&current->delays->lock,
174 		      &current->delays->thrashing_start,
175 		      &current->delays->thrashing_delay,
176 		      &current->delays->thrashing_count);
177 }
178