1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/debug.c
4 *
5 * Print the CFS rbtree and other debugging details
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 */
9 #include "sched.h"
10
11 /*
12 * This allows printing both to /proc/sched_debug and
13 * to the console
14 */
15 #define SEQ_printf(m, x...) \
16 do { \
17 if (m) \
18 seq_printf(m, x); \
19 else \
20 pr_cont(x); \
21 } while (0)
22
23 /*
24 * Ease the printing of nsec fields:
25 */
nsec_high(unsigned long long nsec)26 static long long nsec_high(unsigned long long nsec)
27 {
28 if ((long long)nsec < 0) {
29 nsec = -nsec;
30 do_div(nsec, 1000000);
31 return -nsec;
32 }
33 do_div(nsec, 1000000);
34
35 return nsec;
36 }
37
nsec_low(unsigned long long nsec)38 static unsigned long nsec_low(unsigned long long nsec)
39 {
40 if ((long long)nsec < 0)
41 nsec = -nsec;
42
43 return do_div(nsec, 1000000);
44 }
45
46 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
47
48 #define SCHED_FEAT(name, enabled) \
49 #name ,
50
51 static const char * const sched_feat_names[] = {
52 #include "features.h"
53 };
54
55 #undef SCHED_FEAT
56
sched_feat_show(struct seq_file * m,void * v)57 static int sched_feat_show(struct seq_file *m, void *v)
58 {
59 int i;
60
61 for (i = 0; i < __SCHED_FEAT_NR; i++) {
62 if (!(sysctl_sched_features & (1UL << i)))
63 seq_puts(m, "NO_");
64 seq_printf(m, "%s ", sched_feat_names[i]);
65 }
66 seq_puts(m, "\n");
67
68 return 0;
69 }
70
71 #ifdef CONFIG_JUMP_LABEL
72
73 #define jump_label_key__true STATIC_KEY_INIT_TRUE
74 #define jump_label_key__false STATIC_KEY_INIT_FALSE
75
76 #define SCHED_FEAT(name, enabled) \
77 jump_label_key__##enabled ,
78
79 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
80 #include "features.h"
81 };
82
83 #undef SCHED_FEAT
84
sched_feat_disable(int i)85 static void sched_feat_disable(int i)
86 {
87 static_key_disable_cpuslocked(&sched_feat_keys[i]);
88 }
89
sched_feat_enable(int i)90 static void sched_feat_enable(int i)
91 {
92 static_key_enable_cpuslocked(&sched_feat_keys[i]);
93 }
94 #else
sched_feat_disable(int i)95 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)96 static void sched_feat_enable(int i) { };
97 #endif /* CONFIG_JUMP_LABEL */
98
sched_feat_set(char * cmp)99 static int sched_feat_set(char *cmp)
100 {
101 int i;
102 int neg = 0;
103
104 if (strncmp(cmp, "NO_", 3) == 0) {
105 neg = 1;
106 cmp += 3;
107 }
108
109 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
110 if (i < 0)
111 return i;
112
113 if (neg) {
114 sysctl_sched_features &= ~(1UL << i);
115 sched_feat_disable(i);
116 } else {
117 sysctl_sched_features |= (1UL << i);
118 sched_feat_enable(i);
119 }
120
121 return 0;
122 }
123
124 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)125 sched_feat_write(struct file *filp, const char __user *ubuf,
126 size_t cnt, loff_t *ppos)
127 {
128 char buf[64];
129 char *cmp;
130 int ret;
131 struct inode *inode;
132
133 if (cnt > 63)
134 cnt = 63;
135
136 if (copy_from_user(&buf, ubuf, cnt))
137 return -EFAULT;
138
139 buf[cnt] = 0;
140 cmp = strstrip(buf);
141
142 /* Ensure the static_key remains in a consistent state */
143 inode = file_inode(filp);
144 cpus_read_lock();
145 inode_lock(inode);
146 ret = sched_feat_set(cmp);
147 inode_unlock(inode);
148 cpus_read_unlock();
149 if (ret < 0)
150 return ret;
151
152 *ppos += cnt;
153
154 return cnt;
155 }
156
sched_feat_open(struct inode * inode,struct file * filp)157 static int sched_feat_open(struct inode *inode, struct file *filp)
158 {
159 return single_open(filp, sched_feat_show, NULL);
160 }
161
162 static const struct file_operations sched_feat_fops = {
163 .open = sched_feat_open,
164 .write = sched_feat_write,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168 };
169
170 #ifdef CONFIG_SMP
171
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)172 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
173 size_t cnt, loff_t *ppos)
174 {
175 char buf[16];
176
177 if (cnt > 15)
178 cnt = 15;
179
180 if (copy_from_user(&buf, ubuf, cnt))
181 return -EFAULT;
182
183 if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
184 return -EINVAL;
185
186 if (sched_update_scaling())
187 return -EINVAL;
188
189 *ppos += cnt;
190 return cnt;
191 }
192
sched_scaling_show(struct seq_file * m,void * v)193 static int sched_scaling_show(struct seq_file *m, void *v)
194 {
195 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
196 return 0;
197 }
198
sched_scaling_open(struct inode * inode,struct file * filp)199 static int sched_scaling_open(struct inode *inode, struct file *filp)
200 {
201 return single_open(filp, sched_scaling_show, NULL);
202 }
203
204 static const struct file_operations sched_scaling_fops = {
205 .open = sched_scaling_open,
206 .write = sched_scaling_write,
207 .read = seq_read,
208 .llseek = seq_lseek,
209 .release = single_release,
210 };
211
212 #endif /* SMP */
213
214 #ifdef CONFIG_PREEMPT_DYNAMIC
215
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)216 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
217 size_t cnt, loff_t *ppos)
218 {
219 char buf[16];
220 int mode;
221
222 if (cnt > 15)
223 cnt = 15;
224
225 if (copy_from_user(&buf, ubuf, cnt))
226 return -EFAULT;
227
228 buf[cnt] = 0;
229 mode = sched_dynamic_mode(strstrip(buf));
230 if (mode < 0)
231 return mode;
232
233 sched_dynamic_update(mode);
234
235 *ppos += cnt;
236
237 return cnt;
238 }
239
sched_dynamic_show(struct seq_file * m,void * v)240 static int sched_dynamic_show(struct seq_file *m, void *v)
241 {
242 static const char * preempt_modes[] = {
243 "none", "voluntary", "full"
244 };
245 int i;
246
247 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
248 if (preempt_dynamic_mode == i)
249 seq_puts(m, "(");
250 seq_puts(m, preempt_modes[i]);
251 if (preempt_dynamic_mode == i)
252 seq_puts(m, ")");
253
254 seq_puts(m, " ");
255 }
256
257 seq_puts(m, "\n");
258 return 0;
259 }
260
sched_dynamic_open(struct inode * inode,struct file * filp)261 static int sched_dynamic_open(struct inode *inode, struct file *filp)
262 {
263 return single_open(filp, sched_dynamic_show, NULL);
264 }
265
266 static const struct file_operations sched_dynamic_fops = {
267 .open = sched_dynamic_open,
268 .write = sched_dynamic_write,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272 };
273
274 #endif /* CONFIG_PREEMPT_DYNAMIC */
275
276 __read_mostly bool sched_debug_verbose;
277
278 static const struct seq_operations sched_debug_sops;
279
sched_debug_open(struct inode * inode,struct file * filp)280 static int sched_debug_open(struct inode *inode, struct file *filp)
281 {
282 return seq_open(filp, &sched_debug_sops);
283 }
284
285 static const struct file_operations sched_debug_fops = {
286 .open = sched_debug_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = seq_release,
290 };
291
292 static struct dentry *debugfs_sched;
293
sched_init_debug(void)294 static __init int sched_init_debug(void)
295 {
296 struct dentry __maybe_unused *numa;
297
298 debugfs_sched = debugfs_create_dir("sched", NULL);
299
300 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
301 debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
302 #ifdef CONFIG_PREEMPT_DYNAMIC
303 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
304 #endif
305
306 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
307 debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
308 debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
309
310 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
311 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
312
313 #ifdef CONFIG_SMP
314 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
315 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
316 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
317
318 mutex_lock(&sched_domains_mutex);
319 update_sched_domain_debugfs();
320 mutex_unlock(&sched_domains_mutex);
321 #endif
322
323 #ifdef CONFIG_NUMA_BALANCING
324 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
325
326 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
327 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
328 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
329 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
330 #endif
331
332 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
333
334 return 0;
335 }
336 late_initcall(sched_init_debug);
337
338 #ifdef CONFIG_SMP
339
340 static cpumask_var_t sd_sysctl_cpus;
341 static struct dentry *sd_dentry;
342
sd_flags_show(struct seq_file * m,void * v)343 static int sd_flags_show(struct seq_file *m, void *v)
344 {
345 unsigned long flags = *(unsigned int *)m->private;
346 int idx;
347
348 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
349 seq_puts(m, sd_flag_debug[idx].name);
350 seq_puts(m, " ");
351 }
352 seq_puts(m, "\n");
353
354 return 0;
355 }
356
sd_flags_open(struct inode * inode,struct file * file)357 static int sd_flags_open(struct inode *inode, struct file *file)
358 {
359 return single_open(file, sd_flags_show, inode->i_private);
360 }
361
362 static const struct file_operations sd_flags_fops = {
363 .open = sd_flags_open,
364 .read = seq_read,
365 .llseek = seq_lseek,
366 .release = single_release,
367 };
368
register_sd(struct sched_domain * sd,struct dentry * parent)369 static void register_sd(struct sched_domain *sd, struct dentry *parent)
370 {
371 #define SDM(type, mode, member) \
372 debugfs_create_##type(#member, mode, parent, &sd->member)
373
374 SDM(ulong, 0644, min_interval);
375 SDM(ulong, 0644, max_interval);
376 SDM(u64, 0644, max_newidle_lb_cost);
377 SDM(u32, 0644, busy_factor);
378 SDM(u32, 0644, imbalance_pct);
379 SDM(u32, 0644, cache_nice_tries);
380 SDM(str, 0444, name);
381
382 #undef SDM
383
384 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
385 }
386
update_sched_domain_debugfs(void)387 void update_sched_domain_debugfs(void)
388 {
389 int cpu, i;
390
391 if (!cpumask_available(sd_sysctl_cpus)) {
392 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
393 return;
394 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
395 }
396
397 if (!sd_dentry)
398 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
399
400 for_each_cpu(cpu, sd_sysctl_cpus) {
401 struct sched_domain *sd;
402 struct dentry *d_cpu;
403 char buf[32];
404
405 snprintf(buf, sizeof(buf), "cpu%d", cpu);
406 debugfs_remove(debugfs_lookup(buf, sd_dentry));
407 d_cpu = debugfs_create_dir(buf, sd_dentry);
408
409 i = 0;
410 for_each_domain(cpu, sd) {
411 struct dentry *d_sd;
412
413 snprintf(buf, sizeof(buf), "domain%d", i);
414 d_sd = debugfs_create_dir(buf, d_cpu);
415
416 register_sd(sd, d_sd);
417 i++;
418 }
419
420 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
421 }
422 }
423
dirty_sched_domain_sysctl(int cpu)424 void dirty_sched_domain_sysctl(int cpu)
425 {
426 if (cpumask_available(sd_sysctl_cpus))
427 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
428 }
429
430 #endif /* CONFIG_SMP */
431
432 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)433 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
434 {
435 struct sched_entity *se = tg->se[cpu];
436
437 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
438 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
439 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
440 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
441
442 if (!se)
443 return;
444
445 PN(se->exec_start);
446 PN(se->vruntime);
447 PN(se->sum_exec_runtime);
448
449 if (schedstat_enabled()) {
450 PN_SCHEDSTAT(se->statistics.wait_start);
451 PN_SCHEDSTAT(se->statistics.sleep_start);
452 PN_SCHEDSTAT(se->statistics.block_start);
453 PN_SCHEDSTAT(se->statistics.sleep_max);
454 PN_SCHEDSTAT(se->statistics.block_max);
455 PN_SCHEDSTAT(se->statistics.exec_max);
456 PN_SCHEDSTAT(se->statistics.slice_max);
457 PN_SCHEDSTAT(se->statistics.wait_max);
458 PN_SCHEDSTAT(se->statistics.wait_sum);
459 P_SCHEDSTAT(se->statistics.wait_count);
460 }
461
462 P(se->load.weight);
463 #ifdef CONFIG_SMP
464 P(se->avg.load_avg);
465 P(se->avg.util_avg);
466 P(se->avg.runnable_avg);
467 #endif
468
469 #undef PN_SCHEDSTAT
470 #undef PN
471 #undef P_SCHEDSTAT
472 #undef P
473 }
474 #endif
475
476 #ifdef CONFIG_CGROUP_SCHED
477 static DEFINE_SPINLOCK(sched_debug_lock);
478 static char group_path[PATH_MAX];
479
task_group_path(struct task_group * tg,char * path,int plen)480 static void task_group_path(struct task_group *tg, char *path, int plen)
481 {
482 if (autogroup_path(tg, path, plen))
483 return;
484
485 cgroup_path(tg->css.cgroup, path, plen);
486 }
487
488 /*
489 * Only 1 SEQ_printf_task_group_path() caller can use the full length
490 * group_path[] for cgroup path. Other simultaneous callers will have
491 * to use a shorter stack buffer. A "..." suffix is appended at the end
492 * of the stack buffer so that it will show up in case the output length
493 * matches the given buffer size to indicate possible path name truncation.
494 */
495 #define SEQ_printf_task_group_path(m, tg, fmt...) \
496 { \
497 if (spin_trylock(&sched_debug_lock)) { \
498 task_group_path(tg, group_path, sizeof(group_path)); \
499 SEQ_printf(m, fmt, group_path); \
500 spin_unlock(&sched_debug_lock); \
501 } else { \
502 char buf[128]; \
503 char *bufend = buf + sizeof(buf) - 3; \
504 task_group_path(tg, buf, bufend - buf); \
505 strcpy(bufend - 1, "..."); \
506 SEQ_printf(m, fmt, buf); \
507 } \
508 }
509 #endif
510
511 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)512 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
513 {
514 if (task_current(rq, p))
515 SEQ_printf(m, ">R");
516 else
517 SEQ_printf(m, " %c", task_state_to_char(p));
518
519 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
520 p->comm, task_pid_nr(p),
521 SPLIT_NS(p->se.vruntime),
522 (long long)(p->nvcsw + p->nivcsw),
523 p->prio);
524
525 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
526 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
527 SPLIT_NS(p->se.sum_exec_runtime),
528 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
529
530 #ifdef CONFIG_NUMA_BALANCING
531 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
532 #endif
533 #ifdef CONFIG_CGROUP_SCHED
534 SEQ_printf_task_group_path(m, task_group(p), " %s")
535 #endif
536
537 SEQ_printf(m, "\n");
538 }
539
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)540 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
541 {
542 struct task_struct *g, *p;
543
544 SEQ_printf(m, "\n");
545 SEQ_printf(m, "runnable tasks:\n");
546 SEQ_printf(m, " S task PID tree-key switches prio"
547 " wait-time sum-exec sum-sleep\n");
548 SEQ_printf(m, "-------------------------------------------------------"
549 "------------------------------------------------------\n");
550
551 rcu_read_lock();
552 for_each_process_thread(g, p) {
553 if (task_cpu(p) != rq_cpu)
554 continue;
555
556 print_task(m, rq, p);
557 }
558 rcu_read_unlock();
559 }
560
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)561 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
562 {
563 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
564 spread, rq0_min_vruntime, spread0;
565 struct rq *rq = cpu_rq(cpu);
566 struct sched_entity *last;
567 unsigned long flags;
568
569 #ifdef CONFIG_FAIR_GROUP_SCHED
570 SEQ_printf(m, "\n");
571 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
572 #else
573 SEQ_printf(m, "\n");
574 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
575 #endif
576 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
577 SPLIT_NS(cfs_rq->exec_clock));
578
579 raw_spin_lock_irqsave(&rq->lock, flags);
580 if (rb_first_cached(&cfs_rq->tasks_timeline))
581 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
582 last = __pick_last_entity(cfs_rq);
583 if (last)
584 max_vruntime = last->vruntime;
585 min_vruntime = cfs_rq->min_vruntime;
586 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
587 raw_spin_unlock_irqrestore(&rq->lock, flags);
588 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
589 SPLIT_NS(MIN_vruntime));
590 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
591 SPLIT_NS(min_vruntime));
592 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
593 SPLIT_NS(max_vruntime));
594 spread = max_vruntime - MIN_vruntime;
595 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
596 SPLIT_NS(spread));
597 spread0 = min_vruntime - rq0_min_vruntime;
598 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
599 SPLIT_NS(spread0));
600 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
601 cfs_rq->nr_spread_over);
602 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
603 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
604 #ifdef CONFIG_SMP
605 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
606 cfs_rq->avg.load_avg);
607 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
608 cfs_rq->avg.runnable_avg);
609 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
610 cfs_rq->avg.util_avg);
611 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
612 cfs_rq->avg.util_est.enqueued);
613 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
614 cfs_rq->removed.load_avg);
615 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
616 cfs_rq->removed.util_avg);
617 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
618 cfs_rq->removed.runnable_avg);
619 #ifdef CONFIG_FAIR_GROUP_SCHED
620 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
621 cfs_rq->tg_load_avg_contrib);
622 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
623 atomic_long_read(&cfs_rq->tg->load_avg));
624 #endif
625 #endif
626 #ifdef CONFIG_CFS_BANDWIDTH
627 SEQ_printf(m, " .%-30s: %d\n", "throttled",
628 cfs_rq->throttled);
629 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
630 cfs_rq->throttle_count);
631 #endif
632
633 #ifdef CONFIG_FAIR_GROUP_SCHED
634 print_cfs_group_stats(m, cpu, cfs_rq->tg);
635 #endif
636 }
637
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)638 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
639 {
640 #ifdef CONFIG_RT_GROUP_SCHED
641 SEQ_printf(m, "\n");
642 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
643 #else
644 SEQ_printf(m, "\n");
645 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
646 #endif
647
648 #define P(x) \
649 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
650 #define PU(x) \
651 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
652 #define PN(x) \
653 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
654
655 PU(rt_nr_running);
656 #ifdef CONFIG_SMP
657 PU(rt_nr_migratory);
658 #endif
659 P(rt_throttled);
660 PN(rt_time);
661 PN(rt_runtime);
662
663 #undef PN
664 #undef PU
665 #undef P
666 }
667
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)668 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
669 {
670 struct dl_bw *dl_bw;
671
672 SEQ_printf(m, "\n");
673 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
674
675 #define PU(x) \
676 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
677
678 PU(dl_nr_running);
679 #ifdef CONFIG_SMP
680 PU(dl_nr_migratory);
681 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
682 #else
683 dl_bw = &dl_rq->dl_bw;
684 #endif
685 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
686 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
687
688 #undef PU
689 }
690
print_cpu(struct seq_file * m,int cpu)691 static void print_cpu(struct seq_file *m, int cpu)
692 {
693 struct rq *rq = cpu_rq(cpu);
694
695 #ifdef CONFIG_X86
696 {
697 unsigned int freq = cpu_khz ? : 1;
698
699 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
700 cpu, freq / 1000, (freq % 1000));
701 }
702 #else
703 SEQ_printf(m, "cpu#%d\n", cpu);
704 #endif
705
706 #define P(x) \
707 do { \
708 if (sizeof(rq->x) == 4) \
709 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
710 else \
711 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
712 } while (0)
713
714 #define PN(x) \
715 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
716
717 P(nr_running);
718 P(nr_switches);
719 P(nr_uninterruptible);
720 PN(next_balance);
721 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
722 PN(clock);
723 PN(clock_task);
724 #undef P
725 #undef PN
726
727 #ifdef CONFIG_SMP
728 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
729 P64(avg_idle);
730 P64(max_idle_balance_cost);
731 #undef P64
732 #endif
733
734 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
735 if (schedstat_enabled()) {
736 P(yld_count);
737 P(sched_count);
738 P(sched_goidle);
739 P(ttwu_count);
740 P(ttwu_local);
741 }
742 #undef P
743
744 print_cfs_stats(m, cpu);
745 print_rt_stats(m, cpu);
746 print_dl_stats(m, cpu);
747
748 print_rq(m, rq, cpu);
749 SEQ_printf(m, "\n");
750 }
751
752 static const char *sched_tunable_scaling_names[] = {
753 "none",
754 "logarithmic",
755 "linear"
756 };
757
sched_debug_header(struct seq_file * m)758 static void sched_debug_header(struct seq_file *m)
759 {
760 u64 ktime, sched_clk, cpu_clk;
761 unsigned long flags;
762
763 local_irq_save(flags);
764 ktime = ktime_to_ns(ktime_get());
765 sched_clk = sched_clock();
766 cpu_clk = local_clock();
767 local_irq_restore(flags);
768
769 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
770 init_utsname()->release,
771 (int)strcspn(init_utsname()->version, " "),
772 init_utsname()->version);
773
774 #define P(x) \
775 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
776 #define PN(x) \
777 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
778 PN(ktime);
779 PN(sched_clk);
780 PN(cpu_clk);
781 P(jiffies);
782 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
783 P(sched_clock_stable());
784 #endif
785 #undef PN
786 #undef P
787
788 SEQ_printf(m, "\n");
789 SEQ_printf(m, "sysctl_sched\n");
790
791 #define P(x) \
792 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
793 #define PN(x) \
794 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
795 PN(sysctl_sched_latency);
796 PN(sysctl_sched_min_granularity);
797 PN(sysctl_sched_wakeup_granularity);
798 P(sysctl_sched_child_runs_first);
799 P(sysctl_sched_features);
800 #undef PN
801 #undef P
802
803 SEQ_printf(m, " .%-40s: %d (%s)\n",
804 "sysctl_sched_tunable_scaling",
805 sysctl_sched_tunable_scaling,
806 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
807 SEQ_printf(m, "\n");
808 }
809
sched_debug_show(struct seq_file * m,void * v)810 static int sched_debug_show(struct seq_file *m, void *v)
811 {
812 int cpu = (unsigned long)(v - 2);
813
814 if (cpu != -1)
815 print_cpu(m, cpu);
816 else
817 sched_debug_header(m);
818
819 return 0;
820 }
821
sysrq_sched_debug_show(void)822 void sysrq_sched_debug_show(void)
823 {
824 int cpu;
825
826 sched_debug_header(NULL);
827 for_each_online_cpu(cpu) {
828 /*
829 * Need to reset softlockup watchdogs on all CPUs, because
830 * another CPU might be blocked waiting for us to process
831 * an IPI or stop_machine.
832 */
833 touch_nmi_watchdog();
834 touch_all_softlockup_watchdogs();
835 print_cpu(NULL, cpu);
836 }
837 }
838
839 /*
840 * This iterator needs some explanation.
841 * It returns 1 for the header position.
842 * This means 2 is CPU 0.
843 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
844 * to use cpumask_* to iterate over the CPUs.
845 */
sched_debug_start(struct seq_file * file,loff_t * offset)846 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
847 {
848 unsigned long n = *offset;
849
850 if (n == 0)
851 return (void *) 1;
852
853 n--;
854
855 if (n > 0)
856 n = cpumask_next(n - 1, cpu_online_mask);
857 else
858 n = cpumask_first(cpu_online_mask);
859
860 *offset = n + 1;
861
862 if (n < nr_cpu_ids)
863 return (void *)(unsigned long)(n + 2);
864
865 return NULL;
866 }
867
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)868 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
869 {
870 (*offset)++;
871 return sched_debug_start(file, offset);
872 }
873
sched_debug_stop(struct seq_file * file,void * data)874 static void sched_debug_stop(struct seq_file *file, void *data)
875 {
876 }
877
878 static const struct seq_operations sched_debug_sops = {
879 .start = sched_debug_start,
880 .next = sched_debug_next,
881 .stop = sched_debug_stop,
882 .show = sched_debug_show,
883 };
884
885 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
886 #define __P(F) __PS(#F, F)
887 #define P(F) __PS(#F, p->F)
888 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
889 #define __PN(F) __PSN(#F, F)
890 #define PN(F) __PSN(#F, p->F)
891
892
893 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)894 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
895 unsigned long tpf, unsigned long gsf, unsigned long gpf)
896 {
897 SEQ_printf(m, "numa_faults node=%d ", node);
898 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
899 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
900 }
901 #endif
902
903
sched_show_numa(struct task_struct * p,struct seq_file * m)904 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
905 {
906 #ifdef CONFIG_NUMA_BALANCING
907 struct mempolicy *pol;
908
909 if (p->mm)
910 P(mm->numa_scan_seq);
911
912 task_lock(p);
913 pol = p->mempolicy;
914 if (pol && !(pol->flags & MPOL_F_MORON))
915 pol = NULL;
916 mpol_get(pol);
917 task_unlock(p);
918
919 P(numa_pages_migrated);
920 P(numa_preferred_nid);
921 P(total_numa_faults);
922 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
923 task_node(p), task_numa_group_id(p));
924 show_numa_stats(p, m);
925 mpol_put(pol);
926 #endif
927 }
928
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)929 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
930 struct seq_file *m)
931 {
932 unsigned long nr_switches;
933
934 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
935 get_nr_threads(p));
936 SEQ_printf(m,
937 "---------------------------------------------------------"
938 "----------\n");
939
940 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
941 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
942
943 PN(se.exec_start);
944 PN(se.vruntime);
945 PN(se.sum_exec_runtime);
946
947 nr_switches = p->nvcsw + p->nivcsw;
948
949 P(se.nr_migrations);
950
951 if (schedstat_enabled()) {
952 u64 avg_atom, avg_per_cpu;
953
954 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
955 PN_SCHEDSTAT(se.statistics.wait_start);
956 PN_SCHEDSTAT(se.statistics.sleep_start);
957 PN_SCHEDSTAT(se.statistics.block_start);
958 PN_SCHEDSTAT(se.statistics.sleep_max);
959 PN_SCHEDSTAT(se.statistics.block_max);
960 PN_SCHEDSTAT(se.statistics.exec_max);
961 PN_SCHEDSTAT(se.statistics.slice_max);
962 PN_SCHEDSTAT(se.statistics.wait_max);
963 PN_SCHEDSTAT(se.statistics.wait_sum);
964 P_SCHEDSTAT(se.statistics.wait_count);
965 PN_SCHEDSTAT(se.statistics.iowait_sum);
966 P_SCHEDSTAT(se.statistics.iowait_count);
967 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
968 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
969 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
970 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
971 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
972 P_SCHEDSTAT(se.statistics.nr_wakeups);
973 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
974 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
975 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
976 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
977 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
978 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
979 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
980 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
981
982 avg_atom = p->se.sum_exec_runtime;
983 if (nr_switches)
984 avg_atom = div64_ul(avg_atom, nr_switches);
985 else
986 avg_atom = -1LL;
987
988 avg_per_cpu = p->se.sum_exec_runtime;
989 if (p->se.nr_migrations) {
990 avg_per_cpu = div64_u64(avg_per_cpu,
991 p->se.nr_migrations);
992 } else {
993 avg_per_cpu = -1LL;
994 }
995
996 __PN(avg_atom);
997 __PN(avg_per_cpu);
998 }
999
1000 __P(nr_switches);
1001 __PS("nr_voluntary_switches", p->nvcsw);
1002 __PS("nr_involuntary_switches", p->nivcsw);
1003
1004 P(se.load.weight);
1005 #ifdef CONFIG_SMP
1006 P(se.avg.load_sum);
1007 P(se.avg.runnable_sum);
1008 P(se.avg.util_sum);
1009 P(se.avg.load_avg);
1010 P(se.avg.runnable_avg);
1011 P(se.avg.util_avg);
1012 P(se.avg.last_update_time);
1013 P(se.avg.util_est.ewma);
1014 P(se.avg.util_est.enqueued);
1015 #endif
1016 #ifdef CONFIG_UCLAMP_TASK
1017 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1018 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1019 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1020 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1021 #endif
1022 P(policy);
1023 P(prio);
1024 if (task_has_dl_policy(p)) {
1025 P(dl.runtime);
1026 P(dl.deadline);
1027 }
1028 #undef PN_SCHEDSTAT
1029 #undef P_SCHEDSTAT
1030
1031 {
1032 unsigned int this_cpu = raw_smp_processor_id();
1033 u64 t0, t1;
1034
1035 t0 = cpu_clock(this_cpu);
1036 t1 = cpu_clock(this_cpu);
1037 __PS("clock-delta", t1-t0);
1038 }
1039
1040 sched_show_numa(p, m);
1041 }
1042
proc_sched_set_task(struct task_struct * p)1043 void proc_sched_set_task(struct task_struct *p)
1044 {
1045 #ifdef CONFIG_SCHEDSTATS
1046 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1047 #endif
1048 }
1049
resched_latency_warn(int cpu,u64 latency)1050 void resched_latency_warn(int cpu, u64 latency)
1051 {
1052 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1053
1054 WARN(__ratelimit(&latency_check_ratelimit),
1055 "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1056 "without schedule\n",
1057 cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1058 }
1059