1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
3
4 #include <linux/sched/cputime.h>
5
6 #include <linux/bpf.h>
7 #include <linux/btf.h>
8 #include <linux/btf_ids.h>
9
10 #include <trace/events/cgroup.h>
11
12 static DEFINE_SPINLOCK(cgroup_rstat_lock);
13 static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
14
15 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
16
cgroup_rstat_cpu(struct cgroup * cgrp,int cpu)17 static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
18 {
19 return per_cpu_ptr(cgrp->rstat_cpu, cpu);
20 }
21
22 /*
23 * Helper functions for rstat per CPU lock (cgroup_rstat_cpu_lock).
24 *
25 * This makes it easier to diagnose locking issues and contention in
26 * production environments. The parameter @fast_path determine the
27 * tracepoints being added, allowing us to diagnose "flush" related
28 * operations without handling high-frequency fast-path "update" events.
29 */
30 static __always_inline
_cgroup_rstat_cpu_lock(raw_spinlock_t * cpu_lock,int cpu,struct cgroup * cgrp,const bool fast_path)31 unsigned long _cgroup_rstat_cpu_lock(raw_spinlock_t *cpu_lock, int cpu,
32 struct cgroup *cgrp, const bool fast_path)
33 {
34 unsigned long flags;
35 bool contended;
36
37 /*
38 * The _irqsave() is needed because cgroup_rstat_lock is
39 * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
40 * this lock with the _irq() suffix only disables interrupts on
41 * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
42 * interrupts on both configurations. The _irqsave() ensures
43 * that interrupts are always disabled and later restored.
44 */
45 contended = !raw_spin_trylock_irqsave(cpu_lock, flags);
46 if (contended) {
47 if (fast_path)
48 trace_cgroup_rstat_cpu_lock_contended_fastpath(cgrp, cpu, contended);
49 else
50 trace_cgroup_rstat_cpu_lock_contended(cgrp, cpu, contended);
51
52 raw_spin_lock_irqsave(cpu_lock, flags);
53 }
54
55 if (fast_path)
56 trace_cgroup_rstat_cpu_locked_fastpath(cgrp, cpu, contended);
57 else
58 trace_cgroup_rstat_cpu_locked(cgrp, cpu, contended);
59
60 return flags;
61 }
62
63 static __always_inline
_cgroup_rstat_cpu_unlock(raw_spinlock_t * cpu_lock,int cpu,struct cgroup * cgrp,unsigned long flags,const bool fast_path)64 void _cgroup_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu,
65 struct cgroup *cgrp, unsigned long flags,
66 const bool fast_path)
67 {
68 if (fast_path)
69 trace_cgroup_rstat_cpu_unlock_fastpath(cgrp, cpu, false);
70 else
71 trace_cgroup_rstat_cpu_unlock(cgrp, cpu, false);
72
73 raw_spin_unlock_irqrestore(cpu_lock, flags);
74 }
75
76 /**
77 * cgroup_rstat_updated - keep track of updated rstat_cpu
78 * @cgrp: target cgroup
79 * @cpu: cpu on which rstat_cpu was updated
80 *
81 * @cgrp's rstat_cpu on @cpu was updated. Put it on the parent's matching
82 * rstat_cpu->updated_children list. See the comment on top of
83 * cgroup_rstat_cpu definition for details.
84 */
cgroup_rstat_updated(struct cgroup * cgrp,int cpu)85 __bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
86 {
87 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
88 unsigned long flags;
89
90 /*
91 * Speculative already-on-list test. This may race leading to
92 * temporary inaccuracies, which is fine.
93 *
94 * Because @parent's updated_children is terminated with @parent
95 * instead of NULL, we can tell whether @cgrp is on the list by
96 * testing the next pointer for NULL.
97 */
98 if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next))
99 return;
100
101 flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, true);
102
103 /* put @cgrp and all ancestors on the corresponding updated lists */
104 while (true) {
105 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
106 struct cgroup *parent = cgroup_parent(cgrp);
107 struct cgroup_rstat_cpu *prstatc;
108
109 /*
110 * Both additions and removals are bottom-up. If a cgroup
111 * is already in the tree, all ancestors are.
112 */
113 if (rstatc->updated_next)
114 break;
115
116 /* Root has no parent to link it to, but mark it busy */
117 if (!parent) {
118 rstatc->updated_next = cgrp;
119 break;
120 }
121
122 prstatc = cgroup_rstat_cpu(parent, cpu);
123 rstatc->updated_next = prstatc->updated_children;
124 prstatc->updated_children = cgrp;
125
126 cgrp = parent;
127 }
128
129 _cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, true);
130 }
131
132 /**
133 * cgroup_rstat_push_children - push children cgroups into the given list
134 * @head: current head of the list (= subtree root)
135 * @child: first child of the root
136 * @cpu: target cpu
137 * Return: A new singly linked list of cgroups to be flush
138 *
139 * Iteratively traverse down the cgroup_rstat_cpu updated tree level by
140 * level and push all the parents first before their next level children
141 * into a singly linked list built from the tail backward like "pushing"
142 * cgroups into a stack. The root is pushed by the caller.
143 */
cgroup_rstat_push_children(struct cgroup * head,struct cgroup * child,int cpu)144 static struct cgroup *cgroup_rstat_push_children(struct cgroup *head,
145 struct cgroup *child, int cpu)
146 {
147 struct cgroup *chead = child; /* Head of child cgroup level */
148 struct cgroup *ghead = NULL; /* Head of grandchild cgroup level */
149 struct cgroup *parent, *grandchild;
150 struct cgroup_rstat_cpu *crstatc;
151
152 child->rstat_flush_next = NULL;
153
154 next_level:
155 while (chead) {
156 child = chead;
157 chead = child->rstat_flush_next;
158 parent = cgroup_parent(child);
159
160 /* updated_next is parent cgroup terminated */
161 while (child != parent) {
162 child->rstat_flush_next = head;
163 head = child;
164 crstatc = cgroup_rstat_cpu(child, cpu);
165 grandchild = crstatc->updated_children;
166 if (grandchild != child) {
167 /* Push the grand child to the next level */
168 crstatc->updated_children = child;
169 grandchild->rstat_flush_next = ghead;
170 ghead = grandchild;
171 }
172 child = crstatc->updated_next;
173 crstatc->updated_next = NULL;
174 }
175 }
176
177 if (ghead) {
178 chead = ghead;
179 ghead = NULL;
180 goto next_level;
181 }
182 return head;
183 }
184
185 /**
186 * cgroup_rstat_updated_list - return a list of updated cgroups to be flushed
187 * @root: root of the cgroup subtree to traverse
188 * @cpu: target cpu
189 * Return: A singly linked list of cgroups to be flushed
190 *
191 * Walks the updated rstat_cpu tree on @cpu from @root. During traversal,
192 * each returned cgroup is unlinked from the updated tree.
193 *
194 * The only ordering guarantee is that, for a parent and a child pair
195 * covered by a given traversal, the child is before its parent in
196 * the list.
197 *
198 * Note that updated_children is self terminated and points to a list of
199 * child cgroups if not empty. Whereas updated_next is like a sibling link
200 * within the children list and terminated by the parent cgroup. An exception
201 * here is the cgroup root whose updated_next can be self terminated.
202 */
cgroup_rstat_updated_list(struct cgroup * root,int cpu)203 static struct cgroup *cgroup_rstat_updated_list(struct cgroup *root, int cpu)
204 {
205 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
206 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(root, cpu);
207 struct cgroup *head = NULL, *parent, *child;
208 unsigned long flags;
209
210 flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, root, false);
211
212 /* Return NULL if this subtree is not on-list */
213 if (!rstatc->updated_next)
214 goto unlock_ret;
215
216 /*
217 * Unlink @root from its parent. As the updated_children list is
218 * singly linked, we have to walk it to find the removal point.
219 */
220 parent = cgroup_parent(root);
221 if (parent) {
222 struct cgroup_rstat_cpu *prstatc;
223 struct cgroup **nextp;
224
225 prstatc = cgroup_rstat_cpu(parent, cpu);
226 nextp = &prstatc->updated_children;
227 while (*nextp != root) {
228 struct cgroup_rstat_cpu *nrstatc;
229
230 nrstatc = cgroup_rstat_cpu(*nextp, cpu);
231 WARN_ON_ONCE(*nextp == parent);
232 nextp = &nrstatc->updated_next;
233 }
234 *nextp = rstatc->updated_next;
235 }
236
237 rstatc->updated_next = NULL;
238
239 /* Push @root to the list first before pushing the children */
240 head = root;
241 root->rstat_flush_next = NULL;
242 child = rstatc->updated_children;
243 rstatc->updated_children = root;
244 if (child != root)
245 head = cgroup_rstat_push_children(head, child, cpu);
246 unlock_ret:
247 _cgroup_rstat_cpu_unlock(cpu_lock, cpu, root, flags, false);
248 return head;
249 }
250
251 /*
252 * A hook for bpf stat collectors to attach to and flush their stats.
253 * Together with providing bpf kfuncs for cgroup_rstat_updated() and
254 * cgroup_rstat_flush(), this enables a complete workflow where bpf progs that
255 * collect cgroup stats can integrate with rstat for efficient flushing.
256 *
257 * A static noinline declaration here could cause the compiler to optimize away
258 * the function. A global noinline declaration will keep the definition, but may
259 * optimize away the callsite. Therefore, __weak is needed to ensure that the
260 * call is still emitted, by telling the compiler that we don't know what the
261 * function might eventually be.
262 */
263
264 __bpf_hook_start();
265
bpf_rstat_flush(struct cgroup * cgrp,struct cgroup * parent,int cpu)266 __weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
267 struct cgroup *parent, int cpu)
268 {
269 }
270
271 __bpf_hook_end();
272
273 /*
274 * Helper functions for locking cgroup_rstat_lock.
275 *
276 * This makes it easier to diagnose locking issues and contention in
277 * production environments. The parameter @cpu_in_loop indicate lock
278 * was released and re-taken when collection data from the CPUs. The
279 * value -1 is used when obtaining the main lock else this is the CPU
280 * number processed last.
281 */
__cgroup_rstat_lock(struct cgroup * cgrp,int cpu_in_loop)282 static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop)
283 __acquires(&cgroup_rstat_lock)
284 {
285 bool contended;
286
287 contended = !spin_trylock_irq(&cgroup_rstat_lock);
288 if (contended) {
289 trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended);
290 spin_lock_irq(&cgroup_rstat_lock);
291 }
292 trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended);
293 }
294
__cgroup_rstat_unlock(struct cgroup * cgrp,int cpu_in_loop)295 static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
296 __releases(&cgroup_rstat_lock)
297 {
298 trace_cgroup_rstat_unlock(cgrp, cpu_in_loop, false);
299 spin_unlock_irq(&cgroup_rstat_lock);
300 }
301
302 /* see cgroup_rstat_flush() */
cgroup_rstat_flush_locked(struct cgroup * cgrp)303 static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
304 __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
305 {
306 int cpu;
307
308 lockdep_assert_held(&cgroup_rstat_lock);
309
310 for_each_possible_cpu(cpu) {
311 struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
312
313 for (; pos; pos = pos->rstat_flush_next) {
314 struct cgroup_subsys_state *css;
315
316 cgroup_base_stat_flush(pos, cpu);
317 bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
318
319 rcu_read_lock();
320 list_for_each_entry_rcu(css, &pos->rstat_css_list,
321 rstat_css_node)
322 css->ss->css_rstat_flush(css, cpu);
323 rcu_read_unlock();
324 }
325
326 /* play nice and yield if necessary */
327 if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
328 __cgroup_rstat_unlock(cgrp, cpu);
329 if (!cond_resched())
330 cpu_relax();
331 __cgroup_rstat_lock(cgrp, cpu);
332 }
333 }
334 }
335
336 /**
337 * cgroup_rstat_flush - flush stats in @cgrp's subtree
338 * @cgrp: target cgroup
339 *
340 * Collect all per-cpu stats in @cgrp's subtree into the global counters
341 * and propagate them upwards. After this function returns, all cgroups in
342 * the subtree have up-to-date ->stat.
343 *
344 * This also gets all cgroups in the subtree including @cgrp off the
345 * ->updated_children lists.
346 *
347 * This function may block.
348 */
cgroup_rstat_flush(struct cgroup * cgrp)349 __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
350 {
351 might_sleep();
352
353 __cgroup_rstat_lock(cgrp, -1);
354 cgroup_rstat_flush_locked(cgrp);
355 __cgroup_rstat_unlock(cgrp, -1);
356 }
357
358 /**
359 * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
360 * @cgrp: target cgroup
361 *
362 * Flush stats in @cgrp's subtree and prevent further flushes. Must be
363 * paired with cgroup_rstat_flush_release().
364 *
365 * This function may block.
366 */
cgroup_rstat_flush_hold(struct cgroup * cgrp)367 void cgroup_rstat_flush_hold(struct cgroup *cgrp)
368 __acquires(&cgroup_rstat_lock)
369 {
370 might_sleep();
371 __cgroup_rstat_lock(cgrp, -1);
372 cgroup_rstat_flush_locked(cgrp);
373 }
374
375 /**
376 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
377 * @cgrp: cgroup used by tracepoint
378 */
cgroup_rstat_flush_release(struct cgroup * cgrp)379 void cgroup_rstat_flush_release(struct cgroup *cgrp)
380 __releases(&cgroup_rstat_lock)
381 {
382 __cgroup_rstat_unlock(cgrp, -1);
383 }
384
cgroup_rstat_init(struct cgroup * cgrp)385 int cgroup_rstat_init(struct cgroup *cgrp)
386 {
387 int cpu;
388
389 /* the root cgrp has rstat_cpu preallocated */
390 if (!cgrp->rstat_cpu) {
391 cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
392 if (!cgrp->rstat_cpu)
393 return -ENOMEM;
394 }
395
396 /* ->updated_children list is self terminated */
397 for_each_possible_cpu(cpu) {
398 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
399
400 rstatc->updated_children = cgrp;
401 u64_stats_init(&rstatc->bsync);
402 }
403
404 return 0;
405 }
406
cgroup_rstat_exit(struct cgroup * cgrp)407 void cgroup_rstat_exit(struct cgroup *cgrp)
408 {
409 int cpu;
410
411 cgroup_rstat_flush(cgrp);
412
413 /* sanity check */
414 for_each_possible_cpu(cpu) {
415 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
416
417 if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
418 WARN_ON_ONCE(rstatc->updated_next))
419 return;
420 }
421
422 free_percpu(cgrp->rstat_cpu);
423 cgrp->rstat_cpu = NULL;
424 }
425
cgroup_rstat_boot(void)426 void __init cgroup_rstat_boot(void)
427 {
428 int cpu;
429
430 for_each_possible_cpu(cpu)
431 raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
432 }
433
434 /*
435 * Functions for cgroup basic resource statistics implemented on top of
436 * rstat.
437 */
cgroup_base_stat_add(struct cgroup_base_stat * dst_bstat,struct cgroup_base_stat * src_bstat)438 static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
439 struct cgroup_base_stat *src_bstat)
440 {
441 dst_bstat->cputime.utime += src_bstat->cputime.utime;
442 dst_bstat->cputime.stime += src_bstat->cputime.stime;
443 dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
444 #ifdef CONFIG_SCHED_CORE
445 dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
446 #endif
447 }
448
cgroup_base_stat_sub(struct cgroup_base_stat * dst_bstat,struct cgroup_base_stat * src_bstat)449 static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
450 struct cgroup_base_stat *src_bstat)
451 {
452 dst_bstat->cputime.utime -= src_bstat->cputime.utime;
453 dst_bstat->cputime.stime -= src_bstat->cputime.stime;
454 dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
455 #ifdef CONFIG_SCHED_CORE
456 dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
457 #endif
458 }
459
cgroup_base_stat_flush(struct cgroup * cgrp,int cpu)460 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
461 {
462 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
463 struct cgroup *parent = cgroup_parent(cgrp);
464 struct cgroup_rstat_cpu *prstatc;
465 struct cgroup_base_stat delta;
466 unsigned seq;
467
468 /* Root-level stats are sourced from system-wide CPU stats */
469 if (!parent)
470 return;
471
472 /* fetch the current per-cpu values */
473 do {
474 seq = __u64_stats_fetch_begin(&rstatc->bsync);
475 delta = rstatc->bstat;
476 } while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
477
478 /* propagate per-cpu delta to cgroup and per-cpu global statistics */
479 cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
480 cgroup_base_stat_add(&cgrp->bstat, &delta);
481 cgroup_base_stat_add(&rstatc->last_bstat, &delta);
482 cgroup_base_stat_add(&rstatc->subtree_bstat, &delta);
483
484 /* propagate cgroup and per-cpu global delta to parent (unless that's root) */
485 if (cgroup_parent(parent)) {
486 delta = cgrp->bstat;
487 cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
488 cgroup_base_stat_add(&parent->bstat, &delta);
489 cgroup_base_stat_add(&cgrp->last_bstat, &delta);
490
491 delta = rstatc->subtree_bstat;
492 prstatc = cgroup_rstat_cpu(parent, cpu);
493 cgroup_base_stat_sub(&delta, &rstatc->last_subtree_bstat);
494 cgroup_base_stat_add(&prstatc->subtree_bstat, &delta);
495 cgroup_base_stat_add(&rstatc->last_subtree_bstat, &delta);
496 }
497 }
498
499 static struct cgroup_rstat_cpu *
cgroup_base_stat_cputime_account_begin(struct cgroup * cgrp,unsigned long * flags)500 cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
501 {
502 struct cgroup_rstat_cpu *rstatc;
503
504 rstatc = get_cpu_ptr(cgrp->rstat_cpu);
505 *flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
506 return rstatc;
507 }
508
cgroup_base_stat_cputime_account_end(struct cgroup * cgrp,struct cgroup_rstat_cpu * rstatc,unsigned long flags)509 static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
510 struct cgroup_rstat_cpu *rstatc,
511 unsigned long flags)
512 {
513 u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
514 cgroup_rstat_updated(cgrp, smp_processor_id());
515 put_cpu_ptr(rstatc);
516 }
517
__cgroup_account_cputime(struct cgroup * cgrp,u64 delta_exec)518 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
519 {
520 struct cgroup_rstat_cpu *rstatc;
521 unsigned long flags;
522
523 rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
524 rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
525 cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
526 }
527
__cgroup_account_cputime_field(struct cgroup * cgrp,enum cpu_usage_stat index,u64 delta_exec)528 void __cgroup_account_cputime_field(struct cgroup *cgrp,
529 enum cpu_usage_stat index, u64 delta_exec)
530 {
531 struct cgroup_rstat_cpu *rstatc;
532 unsigned long flags;
533
534 rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
535
536 switch (index) {
537 case CPUTIME_USER:
538 case CPUTIME_NICE:
539 rstatc->bstat.cputime.utime += delta_exec;
540 break;
541 case CPUTIME_SYSTEM:
542 case CPUTIME_IRQ:
543 case CPUTIME_SOFTIRQ:
544 rstatc->bstat.cputime.stime += delta_exec;
545 break;
546 #ifdef CONFIG_SCHED_CORE
547 case CPUTIME_FORCEIDLE:
548 rstatc->bstat.forceidle_sum += delta_exec;
549 break;
550 #endif
551 default:
552 break;
553 }
554
555 cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
556 }
557
558 /*
559 * compute the cputime for the root cgroup by getting the per cpu data
560 * at a global level, then categorizing the fields in a manner consistent
561 * with how it is done by __cgroup_account_cputime_field for each bit of
562 * cpu time attributed to a cgroup.
563 */
root_cgroup_cputime(struct cgroup_base_stat * bstat)564 static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
565 {
566 struct task_cputime *cputime = &bstat->cputime;
567 int i;
568
569 memset(bstat, 0, sizeof(*bstat));
570 for_each_possible_cpu(i) {
571 struct kernel_cpustat kcpustat;
572 u64 *cpustat = kcpustat.cpustat;
573 u64 user = 0;
574 u64 sys = 0;
575
576 kcpustat_cpu_fetch(&kcpustat, i);
577
578 user += cpustat[CPUTIME_USER];
579 user += cpustat[CPUTIME_NICE];
580 cputime->utime += user;
581
582 sys += cpustat[CPUTIME_SYSTEM];
583 sys += cpustat[CPUTIME_IRQ];
584 sys += cpustat[CPUTIME_SOFTIRQ];
585 cputime->stime += sys;
586
587 cputime->sum_exec_runtime += user;
588 cputime->sum_exec_runtime += sys;
589 cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
590
591 #ifdef CONFIG_SCHED_CORE
592 bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
593 #endif
594 }
595 }
596
597
cgroup_force_idle_show(struct seq_file * seq,struct cgroup_base_stat * bstat)598 static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat *bstat)
599 {
600 #ifdef CONFIG_SCHED_CORE
601 u64 forceidle_time = bstat->forceidle_sum;
602
603 do_div(forceidle_time, NSEC_PER_USEC);
604 seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
605 #endif
606 }
607
cgroup_base_stat_cputime_show(struct seq_file * seq)608 void cgroup_base_stat_cputime_show(struct seq_file *seq)
609 {
610 struct cgroup *cgrp = seq_css(seq)->cgroup;
611 u64 usage, utime, stime;
612
613 if (cgroup_parent(cgrp)) {
614 cgroup_rstat_flush_hold(cgrp);
615 usage = cgrp->bstat.cputime.sum_exec_runtime;
616 cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
617 &utime, &stime);
618 cgroup_rstat_flush_release(cgrp);
619 } else {
620 /* cgrp->bstat of root is not actually used, reuse it */
621 root_cgroup_cputime(&cgrp->bstat);
622 usage = cgrp->bstat.cputime.sum_exec_runtime;
623 utime = cgrp->bstat.cputime.utime;
624 stime = cgrp->bstat.cputime.stime;
625 }
626
627 do_div(usage, NSEC_PER_USEC);
628 do_div(utime, NSEC_PER_USEC);
629 do_div(stime, NSEC_PER_USEC);
630
631 seq_printf(seq, "usage_usec %llu\n"
632 "user_usec %llu\n"
633 "system_usec %llu\n",
634 usage, utime, stime);
635
636 cgroup_force_idle_show(seq, &cgrp->bstat);
637 }
638
639 /* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
640 BTF_KFUNCS_START(bpf_rstat_kfunc_ids)
641 BTF_ID_FLAGS(func, cgroup_rstat_updated)
642 BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE)
643 BTF_KFUNCS_END(bpf_rstat_kfunc_ids)
644
645 static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
646 .owner = THIS_MODULE,
647 .set = &bpf_rstat_kfunc_ids,
648 };
649
bpf_rstat_kfunc_init(void)650 static int __init bpf_rstat_kfunc_init(void)
651 {
652 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
653 &bpf_rstat_kfunc_set);
654 }
655 late_initcall(bpf_rstat_kfunc_init);
656