xref: /linux/kernel/sched/cputime.c (revision 89d6910c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple CPU accounting cgroup controller
4  */
5 
6 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
7  #include <asm/cputime.h>
8 #endif
9 
10 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
11 
12 /*
13  * There are no locks covering percpu hardirq/softirq time.
14  * They are only modified in vtime_account, on corresponding CPU
15  * with interrupts disabled. So, writes are safe.
16  * They are read and saved off onto struct rq in update_rq_clock().
17  * This may result in other CPU reading this CPU's irq time and can
18  * race with irq/vtime_account on this CPU. We would either get old
19  * or new value with a side effect of accounting a slice of irq time to wrong
20  * task when irq is in progress while we read rq->clock. That is a worthy
21  * compromise in place of having locks on each irq in account_system_time.
22  */
23 DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
24 
25 static int sched_clock_irqtime;
26 
enable_sched_clock_irqtime(void)27 void enable_sched_clock_irqtime(void)
28 {
29 	sched_clock_irqtime = 1;
30 }
31 
disable_sched_clock_irqtime(void)32 void disable_sched_clock_irqtime(void)
33 {
34 	sched_clock_irqtime = 0;
35 }
36 
irqtime_account_delta(struct irqtime * irqtime,u64 delta,enum cpu_usage_stat idx)37 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
38 				  enum cpu_usage_stat idx)
39 {
40 	u64 *cpustat = kcpustat_this_cpu->cpustat;
41 
42 	u64_stats_update_begin(&irqtime->sync);
43 	cpustat[idx] += delta;
44 	irqtime->total += delta;
45 	irqtime->tick_delta += delta;
46 	u64_stats_update_end(&irqtime->sync);
47 }
48 
49 /*
50  * Called after incrementing preempt_count on {soft,}irq_enter
51  * and before decrementing preempt_count on {soft,}irq_exit.
52  */
irqtime_account_irq(struct task_struct * curr,unsigned int offset)53 void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
54 {
55 	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
56 	unsigned int pc;
57 	s64 delta;
58 	int cpu;
59 
60 	if (!sched_clock_irqtime)
61 		return;
62 
63 	cpu = smp_processor_id();
64 	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
65 	irqtime->irq_start_time += delta;
66 	pc = irq_count() - offset;
67 
68 	/*
69 	 * We do not account for softirq time from ksoftirqd here.
70 	 * We want to continue accounting softirq time to ksoftirqd thread
71 	 * in that case, so as not to confuse scheduler with a special task
72 	 * that do not consume any time, but still wants to run.
73 	 */
74 	if (pc & HARDIRQ_MASK)
75 		irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
76 	else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
77 		irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
78 }
79 
irqtime_tick_accounted(u64 maxtime)80 static u64 irqtime_tick_accounted(u64 maxtime)
81 {
82 	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
83 	u64 delta;
84 
85 	delta = min(irqtime->tick_delta, maxtime);
86 	irqtime->tick_delta -= delta;
87 
88 	return delta;
89 }
90 
91 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
92 
93 #define sched_clock_irqtime	(0)
94 
irqtime_tick_accounted(u64 dummy)95 static u64 irqtime_tick_accounted(u64 dummy)
96 {
97 	return 0;
98 }
99 
100 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
101 
task_group_account_field(struct task_struct * p,int index,u64 tmp)102 static inline void task_group_account_field(struct task_struct *p, int index,
103 					    u64 tmp)
104 {
105 	/*
106 	 * Since all updates are sure to touch the root cgroup, we
107 	 * get ourselves ahead and touch it first. If the root cgroup
108 	 * is the only cgroup, then nothing else should be necessary.
109 	 *
110 	 */
111 	__this_cpu_add(kernel_cpustat.cpustat[index], tmp);
112 
113 	cgroup_account_cputime_field(p, index, tmp);
114 }
115 
116 /*
117  * Account user CPU time to a process.
118  * @p: the process that the CPU time gets accounted to
119  * @cputime: the CPU time spent in user space since the last update
120  */
account_user_time(struct task_struct * p,u64 cputime)121 void account_user_time(struct task_struct *p, u64 cputime)
122 {
123 	int index;
124 
125 	/* Add user time to process. */
126 	p->utime += cputime;
127 	account_group_user_time(p, cputime);
128 
129 	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
130 
131 	/* Add user time to cpustat. */
132 	task_group_account_field(p, index, cputime);
133 
134 	/* Account for user time used */
135 	acct_account_cputime(p);
136 }
137 
138 /*
139  * Account guest CPU time to a process.
140  * @p: the process that the CPU time gets accounted to
141  * @cputime: the CPU time spent in virtual machine since the last update
142  */
account_guest_time(struct task_struct * p,u64 cputime)143 void account_guest_time(struct task_struct *p, u64 cputime)
144 {
145 	u64 *cpustat = kcpustat_this_cpu->cpustat;
146 
147 	/* Add guest time to process. */
148 	p->utime += cputime;
149 	account_group_user_time(p, cputime);
150 	p->gtime += cputime;
151 
152 	/* Add guest time to cpustat. */
153 	if (task_nice(p) > 0) {
154 		task_group_account_field(p, CPUTIME_NICE, cputime);
155 		cpustat[CPUTIME_GUEST_NICE] += cputime;
156 	} else {
157 		task_group_account_field(p, CPUTIME_USER, cputime);
158 		cpustat[CPUTIME_GUEST] += cputime;
159 	}
160 }
161 
162 /*
163  * Account system CPU time to a process and desired cpustat field
164  * @p: the process that the CPU time gets accounted to
165  * @cputime: the CPU time spent in kernel space since the last update
166  * @index: pointer to cpustat field that has to be updated
167  */
account_system_index_time(struct task_struct * p,u64 cputime,enum cpu_usage_stat index)168 void account_system_index_time(struct task_struct *p,
169 			       u64 cputime, enum cpu_usage_stat index)
170 {
171 	/* Add system time to process. */
172 	p->stime += cputime;
173 	account_group_system_time(p, cputime);
174 
175 	/* Add system time to cpustat. */
176 	task_group_account_field(p, index, cputime);
177 
178 	/* Account for system time used */
179 	acct_account_cputime(p);
180 }
181 
182 /*
183  * Account system CPU time to a process.
184  * @p: the process that the CPU time gets accounted to
185  * @hardirq_offset: the offset to subtract from hardirq_count()
186  * @cputime: the CPU time spent in kernel space since the last update
187  */
account_system_time(struct task_struct * p,int hardirq_offset,u64 cputime)188 void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
189 {
190 	int index;
191 
192 	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
193 		account_guest_time(p, cputime);
194 		return;
195 	}
196 
197 	if (hardirq_count() - hardirq_offset)
198 		index = CPUTIME_IRQ;
199 	else if (in_serving_softirq())
200 		index = CPUTIME_SOFTIRQ;
201 	else
202 		index = CPUTIME_SYSTEM;
203 
204 	account_system_index_time(p, cputime, index);
205 }
206 
207 /*
208  * Account for involuntary wait time.
209  * @cputime: the CPU time spent in involuntary wait
210  */
account_steal_time(u64 cputime)211 void account_steal_time(u64 cputime)
212 {
213 	u64 *cpustat = kcpustat_this_cpu->cpustat;
214 
215 	cpustat[CPUTIME_STEAL] += cputime;
216 }
217 
218 /*
219  * Account for idle time.
220  * @cputime: the CPU time spent in idle wait
221  */
account_idle_time(u64 cputime)222 void account_idle_time(u64 cputime)
223 {
224 	u64 *cpustat = kcpustat_this_cpu->cpustat;
225 	struct rq *rq = this_rq();
226 
227 	if (atomic_read(&rq->nr_iowait) > 0)
228 		cpustat[CPUTIME_IOWAIT] += cputime;
229 	else
230 		cpustat[CPUTIME_IDLE] += cputime;
231 }
232 
233 
234 #ifdef CONFIG_SCHED_CORE
235 /*
236  * Account for forceidle time due to core scheduling.
237  *
238  * REQUIRES: schedstat is enabled.
239  */
__account_forceidle_time(struct task_struct * p,u64 delta)240 void __account_forceidle_time(struct task_struct *p, u64 delta)
241 {
242 	__schedstat_add(p->stats.core_forceidle_sum, delta);
243 
244 	task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
245 }
246 #endif
247 
248 /*
249  * When a guest is interrupted for a longer amount of time, missed clock
250  * ticks are not redelivered later. Due to that, this function may on
251  * occasion account more time than the calling functions think elapsed.
252  */
steal_account_process_time(u64 maxtime)253 static __always_inline u64 steal_account_process_time(u64 maxtime)
254 {
255 #ifdef CONFIG_PARAVIRT
256 	if (static_key_false(&paravirt_steal_enabled)) {
257 		u64 steal;
258 
259 		steal = paravirt_steal_clock(smp_processor_id());
260 		steal -= this_rq()->prev_steal_time;
261 		steal = min(steal, maxtime);
262 		account_steal_time(steal);
263 		this_rq()->prev_steal_time += steal;
264 
265 		return steal;
266 	}
267 #endif
268 	return 0;
269 }
270 
271 /*
272  * Account how much elapsed time was spent in steal, irq, or softirq time.
273  */
account_other_time(u64 max)274 static inline u64 account_other_time(u64 max)
275 {
276 	u64 accounted;
277 
278 	lockdep_assert_irqs_disabled();
279 
280 	accounted = steal_account_process_time(max);
281 
282 	if (accounted < max)
283 		accounted += irqtime_tick_accounted(max - accounted);
284 
285 	return accounted;
286 }
287 
288 #ifdef CONFIG_64BIT
read_sum_exec_runtime(struct task_struct * t)289 static inline u64 read_sum_exec_runtime(struct task_struct *t)
290 {
291 	return t->se.sum_exec_runtime;
292 }
293 #else
read_sum_exec_runtime(struct task_struct * t)294 static u64 read_sum_exec_runtime(struct task_struct *t)
295 {
296 	u64 ns;
297 	struct rq_flags rf;
298 	struct rq *rq;
299 
300 	rq = task_rq_lock(t, &rf);
301 	ns = t->se.sum_exec_runtime;
302 	task_rq_unlock(rq, t, &rf);
303 
304 	return ns;
305 }
306 #endif
307 
308 /*
309  * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
310  * tasks (sum on group iteration) belonging to @tsk's group.
311  */
thread_group_cputime(struct task_struct * tsk,struct task_cputime * times)312 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
313 {
314 	struct signal_struct *sig = tsk->signal;
315 	u64 utime, stime;
316 	struct task_struct *t;
317 	unsigned int seq, nextseq;
318 	unsigned long flags;
319 
320 	/*
321 	 * Update current task runtime to account pending time since last
322 	 * scheduler action or thread_group_cputime() call. This thread group
323 	 * might have other running tasks on different CPUs, but updating
324 	 * their runtime can affect syscall performance, so we skip account
325 	 * those pending times and rely only on values updated on tick or
326 	 * other scheduler action.
327 	 */
328 	if (same_thread_group(current, tsk))
329 		(void) task_sched_runtime(current);
330 
331 	rcu_read_lock();
332 	/* Attempt a lockless read on the first round. */
333 	nextseq = 0;
334 	do {
335 		seq = nextseq;
336 		flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
337 		times->utime = sig->utime;
338 		times->stime = sig->stime;
339 		times->sum_exec_runtime = sig->sum_sched_runtime;
340 
341 		for_each_thread(tsk, t) {
342 			task_cputime(t, &utime, &stime);
343 			times->utime += utime;
344 			times->stime += stime;
345 			times->sum_exec_runtime += read_sum_exec_runtime(t);
346 		}
347 		/* If lockless access failed, take the lock. */
348 		nextseq = 1;
349 	} while (need_seqretry(&sig->stats_lock, seq));
350 	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
351 	rcu_read_unlock();
352 }
353 
354 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
355 /*
356  * Account a tick to a process and cpustat
357  * @p: the process that the CPU time gets accounted to
358  * @user_tick: is the tick from userspace
359  * @rq: the pointer to rq
360  *
361  * Tick demultiplexing follows the order
362  * - pending hardirq update
363  * - pending softirq update
364  * - user_time
365  * - idle_time
366  * - system time
367  *   - check for guest_time
368  *   - else account as system_time
369  *
370  * Check for hardirq is done both for system and user time as there is
371  * no timer going off while we are on hardirq and hence we may never get an
372  * opportunity to update it solely in system time.
373  * p->stime and friends are only updated on system time and not on irq
374  * softirq as those do not count in task exec_runtime any more.
375  */
irqtime_account_process_tick(struct task_struct * p,int user_tick,int ticks)376 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
377 					 int ticks)
378 {
379 	u64 other, cputime = TICK_NSEC * ticks;
380 
381 	/*
382 	 * When returning from idle, many ticks can get accounted at
383 	 * once, including some ticks of steal, irq, and softirq time.
384 	 * Subtract those ticks from the amount of time accounted to
385 	 * idle, or potentially user or system time. Due to rounding,
386 	 * other time can exceed ticks occasionally.
387 	 */
388 	other = account_other_time(ULONG_MAX);
389 	if (other >= cputime)
390 		return;
391 
392 	cputime -= other;
393 
394 	if (this_cpu_ksoftirqd() == p) {
395 		/*
396 		 * ksoftirqd time do not get accounted in cpu_softirq_time.
397 		 * So, we have to handle it separately here.
398 		 * Also, p->stime needs to be updated for ksoftirqd.
399 		 */
400 		account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
401 	} else if (user_tick) {
402 		account_user_time(p, cputime);
403 	} else if (p == this_rq()->idle) {
404 		account_idle_time(cputime);
405 	} else if (p->flags & PF_VCPU) { /* System time or guest time */
406 		account_guest_time(p, cputime);
407 	} else {
408 		account_system_index_time(p, cputime, CPUTIME_SYSTEM);
409 	}
410 }
411 
irqtime_account_idle_ticks(int ticks)412 static void irqtime_account_idle_ticks(int ticks)
413 {
414 	irqtime_account_process_tick(current, 0, ticks);
415 }
416 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
irqtime_account_idle_ticks(int ticks)417 static inline void irqtime_account_idle_ticks(int ticks) { }
irqtime_account_process_tick(struct task_struct * p,int user_tick,int nr_ticks)418 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
419 						int nr_ticks) { }
420 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
421 
422 /*
423  * Use precise platform statistics if available:
424  */
425 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
426 
vtime_account_irq(struct task_struct * tsk,unsigned int offset)427 void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
428 {
429 	unsigned int pc = irq_count() - offset;
430 
431 	if (pc & HARDIRQ_OFFSET) {
432 		vtime_account_hardirq(tsk);
433 	} else if (pc & SOFTIRQ_OFFSET) {
434 		vtime_account_softirq(tsk);
435 	} else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
436 		   is_idle_task(tsk)) {
437 		vtime_account_idle(tsk);
438 	} else {
439 		vtime_account_kernel(tsk);
440 	}
441 }
442 
cputime_adjust(struct task_cputime * curr,struct prev_cputime * prev,u64 * ut,u64 * st)443 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
444 		    u64 *ut, u64 *st)
445 {
446 	*ut = curr->utime;
447 	*st = curr->stime;
448 }
449 
task_cputime_adjusted(struct task_struct * p,u64 * ut,u64 * st)450 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
451 {
452 	*ut = p->utime;
453 	*st = p->stime;
454 }
455 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
456 
thread_group_cputime_adjusted(struct task_struct * p,u64 * ut,u64 * st)457 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
458 {
459 	struct task_cputime cputime;
460 
461 	thread_group_cputime(p, &cputime);
462 
463 	*ut = cputime.utime;
464 	*st = cputime.stime;
465 }
466 
467 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
468 
469 /*
470  * Account a single tick of CPU time.
471  * @p: the process that the CPU time gets accounted to
472  * @user_tick: indicates if the tick is a user or a system tick
473  */
account_process_tick(struct task_struct * p,int user_tick)474 void account_process_tick(struct task_struct *p, int user_tick)
475 {
476 	u64 cputime, steal;
477 
478 	if (vtime_accounting_enabled_this_cpu())
479 		return;
480 
481 	if (sched_clock_irqtime) {
482 		irqtime_account_process_tick(p, user_tick, 1);
483 		return;
484 	}
485 
486 	cputime = TICK_NSEC;
487 	steal = steal_account_process_time(ULONG_MAX);
488 
489 	if (steal >= cputime)
490 		return;
491 
492 	cputime -= steal;
493 
494 	if (user_tick)
495 		account_user_time(p, cputime);
496 	else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
497 		account_system_time(p, HARDIRQ_OFFSET, cputime);
498 	else
499 		account_idle_time(cputime);
500 }
501 
502 /*
503  * Account multiple ticks of idle time.
504  * @ticks: number of stolen ticks
505  */
account_idle_ticks(unsigned long ticks)506 void account_idle_ticks(unsigned long ticks)
507 {
508 	u64 cputime, steal;
509 
510 	if (sched_clock_irqtime) {
511 		irqtime_account_idle_ticks(ticks);
512 		return;
513 	}
514 
515 	cputime = ticks * TICK_NSEC;
516 	steal = steal_account_process_time(ULONG_MAX);
517 
518 	if (steal >= cputime)
519 		return;
520 
521 	cputime -= steal;
522 	account_idle_time(cputime);
523 }
524 
525 /*
526  * Adjust tick based cputime random precision against scheduler runtime
527  * accounting.
528  *
529  * Tick based cputime accounting depend on random scheduling timeslices of a
530  * task to be interrupted or not by the timer.  Depending on these
531  * circumstances, the number of these interrupts may be over or
532  * under-optimistic, matching the real user and system cputime with a variable
533  * precision.
534  *
535  * Fix this by scaling these tick based values against the total runtime
536  * accounted by the CFS scheduler.
537  *
538  * This code provides the following guarantees:
539  *
540  *   stime + utime == rtime
541  *   stime_i+1 >= stime_i, utime_i+1 >= utime_i
542  *
543  * Assuming that rtime_i+1 >= rtime_i.
544  */
cputime_adjust(struct task_cputime * curr,struct prev_cputime * prev,u64 * ut,u64 * st)545 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
546 		    u64 *ut, u64 *st)
547 {
548 	u64 rtime, stime, utime;
549 	unsigned long flags;
550 
551 	/* Serialize concurrent callers such that we can honour our guarantees */
552 	raw_spin_lock_irqsave(&prev->lock, flags);
553 	rtime = curr->sum_exec_runtime;
554 
555 	/*
556 	 * This is possible under two circumstances:
557 	 *  - rtime isn't monotonic after all (a bug);
558 	 *  - we got reordered by the lock.
559 	 *
560 	 * In both cases this acts as a filter such that the rest of the code
561 	 * can assume it is monotonic regardless of anything else.
562 	 */
563 	if (prev->stime + prev->utime >= rtime)
564 		goto out;
565 
566 	stime = curr->stime;
567 	utime = curr->utime;
568 
569 	/*
570 	 * If either stime or utime are 0, assume all runtime is userspace.
571 	 * Once a task gets some ticks, the monotonicity code at 'update:'
572 	 * will ensure things converge to the observed ratio.
573 	 */
574 	if (stime == 0) {
575 		utime = rtime;
576 		goto update;
577 	}
578 
579 	if (utime == 0) {
580 		stime = rtime;
581 		goto update;
582 	}
583 
584 	stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
585 
586 update:
587 	/*
588 	 * Make sure stime doesn't go backwards; this preserves monotonicity
589 	 * for utime because rtime is monotonic.
590 	 *
591 	 *  utime_i+1 = rtime_i+1 - stime_i
592 	 *            = rtime_i+1 - (rtime_i - utime_i)
593 	 *            = (rtime_i+1 - rtime_i) + utime_i
594 	 *            >= utime_i
595 	 */
596 	if (stime < prev->stime)
597 		stime = prev->stime;
598 	utime = rtime - stime;
599 
600 	/*
601 	 * Make sure utime doesn't go backwards; this still preserves
602 	 * monotonicity for stime, analogous argument to above.
603 	 */
604 	if (utime < prev->utime) {
605 		utime = prev->utime;
606 		stime = rtime - utime;
607 	}
608 
609 	prev->stime = stime;
610 	prev->utime = utime;
611 out:
612 	*ut = prev->utime;
613 	*st = prev->stime;
614 	raw_spin_unlock_irqrestore(&prev->lock, flags);
615 }
616 
task_cputime_adjusted(struct task_struct * p,u64 * ut,u64 * st)617 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
618 {
619 	struct task_cputime cputime = {
620 		.sum_exec_runtime = p->se.sum_exec_runtime,
621 	};
622 
623 	if (task_cputime(p, &cputime.utime, &cputime.stime))
624 		cputime.sum_exec_runtime = task_sched_runtime(p);
625 	cputime_adjust(&cputime, &p->prev_cputime, ut, st);
626 }
627 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
628 
thread_group_cputime_adjusted(struct task_struct * p,u64 * ut,u64 * st)629 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
630 {
631 	struct task_cputime cputime;
632 
633 	thread_group_cputime(p, &cputime);
634 	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
635 }
636 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
637 
638 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
vtime_delta(struct vtime * vtime)639 static u64 vtime_delta(struct vtime *vtime)
640 {
641 	unsigned long long clock;
642 
643 	clock = sched_clock();
644 	if (clock < vtime->starttime)
645 		return 0;
646 
647 	return clock - vtime->starttime;
648 }
649 
get_vtime_delta(struct vtime * vtime)650 static u64 get_vtime_delta(struct vtime *vtime)
651 {
652 	u64 delta = vtime_delta(vtime);
653 	u64 other;
654 
655 	/*
656 	 * Unlike tick based timing, vtime based timing never has lost
657 	 * ticks, and no need for steal time accounting to make up for
658 	 * lost ticks. Vtime accounts a rounded version of actual
659 	 * elapsed time. Limit account_other_time to prevent rounding
660 	 * errors from causing elapsed vtime to go negative.
661 	 */
662 	other = account_other_time(delta);
663 	WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
664 	vtime->starttime += delta;
665 
666 	return delta - other;
667 }
668 
vtime_account_system(struct task_struct * tsk,struct vtime * vtime)669 static void vtime_account_system(struct task_struct *tsk,
670 				 struct vtime *vtime)
671 {
672 	vtime->stime += get_vtime_delta(vtime);
673 	if (vtime->stime >= TICK_NSEC) {
674 		account_system_time(tsk, irq_count(), vtime->stime);
675 		vtime->stime = 0;
676 	}
677 }
678 
vtime_account_guest(struct task_struct * tsk,struct vtime * vtime)679 static void vtime_account_guest(struct task_struct *tsk,
680 				struct vtime *vtime)
681 {
682 	vtime->gtime += get_vtime_delta(vtime);
683 	if (vtime->gtime >= TICK_NSEC) {
684 		account_guest_time(tsk, vtime->gtime);
685 		vtime->gtime = 0;
686 	}
687 }
688 
__vtime_account_kernel(struct task_struct * tsk,struct vtime * vtime)689 static void __vtime_account_kernel(struct task_struct *tsk,
690 				   struct vtime *vtime)
691 {
692 	/* We might have scheduled out from guest path */
693 	if (vtime->state == VTIME_GUEST)
694 		vtime_account_guest(tsk, vtime);
695 	else
696 		vtime_account_system(tsk, vtime);
697 }
698 
vtime_account_kernel(struct task_struct * tsk)699 void vtime_account_kernel(struct task_struct *tsk)
700 {
701 	struct vtime *vtime = &tsk->vtime;
702 
703 	if (!vtime_delta(vtime))
704 		return;
705 
706 	write_seqcount_begin(&vtime->seqcount);
707 	__vtime_account_kernel(tsk, vtime);
708 	write_seqcount_end(&vtime->seqcount);
709 }
710 
vtime_user_enter(struct task_struct * tsk)711 void vtime_user_enter(struct task_struct *tsk)
712 {
713 	struct vtime *vtime = &tsk->vtime;
714 
715 	write_seqcount_begin(&vtime->seqcount);
716 	vtime_account_system(tsk, vtime);
717 	vtime->state = VTIME_USER;
718 	write_seqcount_end(&vtime->seqcount);
719 }
720 
vtime_user_exit(struct task_struct * tsk)721 void vtime_user_exit(struct task_struct *tsk)
722 {
723 	struct vtime *vtime = &tsk->vtime;
724 
725 	write_seqcount_begin(&vtime->seqcount);
726 	vtime->utime += get_vtime_delta(vtime);
727 	if (vtime->utime >= TICK_NSEC) {
728 		account_user_time(tsk, vtime->utime);
729 		vtime->utime = 0;
730 	}
731 	vtime->state = VTIME_SYS;
732 	write_seqcount_end(&vtime->seqcount);
733 }
734 
vtime_guest_enter(struct task_struct * tsk)735 void vtime_guest_enter(struct task_struct *tsk)
736 {
737 	struct vtime *vtime = &tsk->vtime;
738 	/*
739 	 * The flags must be updated under the lock with
740 	 * the vtime_starttime flush and update.
741 	 * That enforces a right ordering and update sequence
742 	 * synchronization against the reader (task_gtime())
743 	 * that can thus safely catch up with a tickless delta.
744 	 */
745 	write_seqcount_begin(&vtime->seqcount);
746 	vtime_account_system(tsk, vtime);
747 	tsk->flags |= PF_VCPU;
748 	vtime->state = VTIME_GUEST;
749 	write_seqcount_end(&vtime->seqcount);
750 }
751 EXPORT_SYMBOL_GPL(vtime_guest_enter);
752 
vtime_guest_exit(struct task_struct * tsk)753 void vtime_guest_exit(struct task_struct *tsk)
754 {
755 	struct vtime *vtime = &tsk->vtime;
756 
757 	write_seqcount_begin(&vtime->seqcount);
758 	vtime_account_guest(tsk, vtime);
759 	tsk->flags &= ~PF_VCPU;
760 	vtime->state = VTIME_SYS;
761 	write_seqcount_end(&vtime->seqcount);
762 }
763 EXPORT_SYMBOL_GPL(vtime_guest_exit);
764 
vtime_account_idle(struct task_struct * tsk)765 void vtime_account_idle(struct task_struct *tsk)
766 {
767 	account_idle_time(get_vtime_delta(&tsk->vtime));
768 }
769 
vtime_task_switch_generic(struct task_struct * prev)770 void vtime_task_switch_generic(struct task_struct *prev)
771 {
772 	struct vtime *vtime = &prev->vtime;
773 
774 	write_seqcount_begin(&vtime->seqcount);
775 	if (vtime->state == VTIME_IDLE)
776 		vtime_account_idle(prev);
777 	else
778 		__vtime_account_kernel(prev, vtime);
779 	vtime->state = VTIME_INACTIVE;
780 	vtime->cpu = -1;
781 	write_seqcount_end(&vtime->seqcount);
782 
783 	vtime = &current->vtime;
784 
785 	write_seqcount_begin(&vtime->seqcount);
786 	if (is_idle_task(current))
787 		vtime->state = VTIME_IDLE;
788 	else if (current->flags & PF_VCPU)
789 		vtime->state = VTIME_GUEST;
790 	else
791 		vtime->state = VTIME_SYS;
792 	vtime->starttime = sched_clock();
793 	vtime->cpu = smp_processor_id();
794 	write_seqcount_end(&vtime->seqcount);
795 }
796 
vtime_init_idle(struct task_struct * t,int cpu)797 void vtime_init_idle(struct task_struct *t, int cpu)
798 {
799 	struct vtime *vtime = &t->vtime;
800 	unsigned long flags;
801 
802 	local_irq_save(flags);
803 	write_seqcount_begin(&vtime->seqcount);
804 	vtime->state = VTIME_IDLE;
805 	vtime->starttime = sched_clock();
806 	vtime->cpu = cpu;
807 	write_seqcount_end(&vtime->seqcount);
808 	local_irq_restore(flags);
809 }
810 
task_gtime(struct task_struct * t)811 u64 task_gtime(struct task_struct *t)
812 {
813 	struct vtime *vtime = &t->vtime;
814 	unsigned int seq;
815 	u64 gtime;
816 
817 	if (!vtime_accounting_enabled())
818 		return t->gtime;
819 
820 	do {
821 		seq = read_seqcount_begin(&vtime->seqcount);
822 
823 		gtime = t->gtime;
824 		if (vtime->state == VTIME_GUEST)
825 			gtime += vtime->gtime + vtime_delta(vtime);
826 
827 	} while (read_seqcount_retry(&vtime->seqcount, seq));
828 
829 	return gtime;
830 }
831 
832 /*
833  * Fetch cputime raw values from fields of task_struct and
834  * add up the pending nohz execution time since the last
835  * cputime snapshot.
836  */
task_cputime(struct task_struct * t,u64 * utime,u64 * stime)837 bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
838 {
839 	struct vtime *vtime = &t->vtime;
840 	unsigned int seq;
841 	u64 delta;
842 	int ret;
843 
844 	if (!vtime_accounting_enabled()) {
845 		*utime = t->utime;
846 		*stime = t->stime;
847 		return false;
848 	}
849 
850 	do {
851 		ret = false;
852 		seq = read_seqcount_begin(&vtime->seqcount);
853 
854 		*utime = t->utime;
855 		*stime = t->stime;
856 
857 		/* Task is sleeping or idle, nothing to add */
858 		if (vtime->state < VTIME_SYS)
859 			continue;
860 
861 		ret = true;
862 		delta = vtime_delta(vtime);
863 
864 		/*
865 		 * Task runs either in user (including guest) or kernel space,
866 		 * add pending nohz time to the right place.
867 		 */
868 		if (vtime->state == VTIME_SYS)
869 			*stime += vtime->stime + delta;
870 		else
871 			*utime += vtime->utime + delta;
872 	} while (read_seqcount_retry(&vtime->seqcount, seq));
873 
874 	return ret;
875 }
876 
vtime_state_fetch(struct vtime * vtime,int cpu)877 static int vtime_state_fetch(struct vtime *vtime, int cpu)
878 {
879 	int state = READ_ONCE(vtime->state);
880 
881 	/*
882 	 * We raced against a context switch, fetch the
883 	 * kcpustat task again.
884 	 */
885 	if (vtime->cpu != cpu && vtime->cpu != -1)
886 		return -EAGAIN;
887 
888 	/*
889 	 * Two possible things here:
890 	 * 1) We are seeing the scheduling out task (prev) or any past one.
891 	 * 2) We are seeing the scheduling in task (next) but it hasn't
892 	 *    passed though vtime_task_switch() yet so the pending
893 	 *    cputime of the prev task may not be flushed yet.
894 	 *
895 	 * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
896 	 */
897 	if (state == VTIME_INACTIVE)
898 		return -EAGAIN;
899 
900 	return state;
901 }
902 
kcpustat_user_vtime(struct vtime * vtime)903 static u64 kcpustat_user_vtime(struct vtime *vtime)
904 {
905 	if (vtime->state == VTIME_USER)
906 		return vtime->utime + vtime_delta(vtime);
907 	else if (vtime->state == VTIME_GUEST)
908 		return vtime->gtime + vtime_delta(vtime);
909 	return 0;
910 }
911 
kcpustat_field_vtime(u64 * cpustat,struct task_struct * tsk,enum cpu_usage_stat usage,int cpu,u64 * val)912 static int kcpustat_field_vtime(u64 *cpustat,
913 				struct task_struct *tsk,
914 				enum cpu_usage_stat usage,
915 				int cpu, u64 *val)
916 {
917 	struct vtime *vtime = &tsk->vtime;
918 	unsigned int seq;
919 
920 	do {
921 		int state;
922 
923 		seq = read_seqcount_begin(&vtime->seqcount);
924 
925 		state = vtime_state_fetch(vtime, cpu);
926 		if (state < 0)
927 			return state;
928 
929 		*val = cpustat[usage];
930 
931 		/*
932 		 * Nice VS unnice cputime accounting may be inaccurate if
933 		 * the nice value has changed since the last vtime update.
934 		 * But proper fix would involve interrupting target on nice
935 		 * updates which is a no go on nohz_full (although the scheduler
936 		 * may still interrupt the target if rescheduling is needed...)
937 		 */
938 		switch (usage) {
939 		case CPUTIME_SYSTEM:
940 			if (state == VTIME_SYS)
941 				*val += vtime->stime + vtime_delta(vtime);
942 			break;
943 		case CPUTIME_USER:
944 			if (task_nice(tsk) <= 0)
945 				*val += kcpustat_user_vtime(vtime);
946 			break;
947 		case CPUTIME_NICE:
948 			if (task_nice(tsk) > 0)
949 				*val += kcpustat_user_vtime(vtime);
950 			break;
951 		case CPUTIME_GUEST:
952 			if (state == VTIME_GUEST && task_nice(tsk) <= 0)
953 				*val += vtime->gtime + vtime_delta(vtime);
954 			break;
955 		case CPUTIME_GUEST_NICE:
956 			if (state == VTIME_GUEST && task_nice(tsk) > 0)
957 				*val += vtime->gtime + vtime_delta(vtime);
958 			break;
959 		default:
960 			break;
961 		}
962 	} while (read_seqcount_retry(&vtime->seqcount, seq));
963 
964 	return 0;
965 }
966 
kcpustat_field(struct kernel_cpustat * kcpustat,enum cpu_usage_stat usage,int cpu)967 u64 kcpustat_field(struct kernel_cpustat *kcpustat,
968 		   enum cpu_usage_stat usage, int cpu)
969 {
970 	u64 *cpustat = kcpustat->cpustat;
971 	u64 val = cpustat[usage];
972 	struct rq *rq;
973 	int err;
974 
975 	if (!vtime_accounting_enabled_cpu(cpu))
976 		return val;
977 
978 	rq = cpu_rq(cpu);
979 
980 	for (;;) {
981 		struct task_struct *curr;
982 
983 		rcu_read_lock();
984 		curr = rcu_dereference(rq->curr);
985 		if (WARN_ON_ONCE(!curr)) {
986 			rcu_read_unlock();
987 			return cpustat[usage];
988 		}
989 
990 		err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
991 		rcu_read_unlock();
992 
993 		if (!err)
994 			return val;
995 
996 		cpu_relax();
997 	}
998 }
999 EXPORT_SYMBOL_GPL(kcpustat_field);
1000 
kcpustat_cpu_fetch_vtime(struct kernel_cpustat * dst,const struct kernel_cpustat * src,struct task_struct * tsk,int cpu)1001 static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
1002 				    const struct kernel_cpustat *src,
1003 				    struct task_struct *tsk, int cpu)
1004 {
1005 	struct vtime *vtime = &tsk->vtime;
1006 	unsigned int seq;
1007 
1008 	do {
1009 		u64 *cpustat;
1010 		u64 delta;
1011 		int state;
1012 
1013 		seq = read_seqcount_begin(&vtime->seqcount);
1014 
1015 		state = vtime_state_fetch(vtime, cpu);
1016 		if (state < 0)
1017 			return state;
1018 
1019 		*dst = *src;
1020 		cpustat = dst->cpustat;
1021 
1022 		/* Task is sleeping, dead or idle, nothing to add */
1023 		if (state < VTIME_SYS)
1024 			continue;
1025 
1026 		delta = vtime_delta(vtime);
1027 
1028 		/*
1029 		 * Task runs either in user (including guest) or kernel space,
1030 		 * add pending nohz time to the right place.
1031 		 */
1032 		if (state == VTIME_SYS) {
1033 			cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
1034 		} else if (state == VTIME_USER) {
1035 			if (task_nice(tsk) > 0)
1036 				cpustat[CPUTIME_NICE] += vtime->utime + delta;
1037 			else
1038 				cpustat[CPUTIME_USER] += vtime->utime + delta;
1039 		} else {
1040 			WARN_ON_ONCE(state != VTIME_GUEST);
1041 			if (task_nice(tsk) > 0) {
1042 				cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
1043 				cpustat[CPUTIME_NICE] += vtime->gtime + delta;
1044 			} else {
1045 				cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
1046 				cpustat[CPUTIME_USER] += vtime->gtime + delta;
1047 			}
1048 		}
1049 	} while (read_seqcount_retry(&vtime->seqcount, seq));
1050 
1051 	return 0;
1052 }
1053 
kcpustat_cpu_fetch(struct kernel_cpustat * dst,int cpu)1054 void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
1055 {
1056 	const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
1057 	struct rq *rq;
1058 	int err;
1059 
1060 	if (!vtime_accounting_enabled_cpu(cpu)) {
1061 		*dst = *src;
1062 		return;
1063 	}
1064 
1065 	rq = cpu_rq(cpu);
1066 
1067 	for (;;) {
1068 		struct task_struct *curr;
1069 
1070 		rcu_read_lock();
1071 		curr = rcu_dereference(rq->curr);
1072 		if (WARN_ON_ONCE(!curr)) {
1073 			rcu_read_unlock();
1074 			*dst = *src;
1075 			return;
1076 		}
1077 
1078 		err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
1079 		rcu_read_unlock();
1080 
1081 		if (!err)
1082 			return;
1083 
1084 		cpu_relax();
1085 	}
1086 }
1087 EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
1088 
1089 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
1090