1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/core.c
4 *
5 * Core kernel CPU scheduler code
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
9 */
10 #include <linux/highmem.h>
11 #include <linux/hrtimer_api.h>
12 #include <linux/ktime_api.h>
13 #include <linux/sched/signal.h>
14 #include <linux/syscalls_api.h>
15 #include <linux/debug_locks.h>
16 #include <linux/prefetch.h>
17 #include <linux/capability.h>
18 #include <linux/pgtable_api.h>
19 #include <linux/wait_bit.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock_api.h>
22 #include <linux/cpumask_api.h>
23 #include <linux/lockdep_api.h>
24 #include <linux/hardirq.h>
25 #include <linux/softirq.h>
26 #include <linux/refcount_api.h>
27 #include <linux/topology.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/cond_resched.h>
30 #include <linux/sched/cputime.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/hotplug.h>
33 #include <linux/sched/init.h>
34 #include <linux/sched/isolation.h>
35 #include <linux/sched/loadavg.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/nohz.h>
38 #include <linux/sched/rseq_api.h>
39 #include <linux/sched/rt.h>
40
41 #include <linux/blkdev.h>
42 #include <linux/context_tracking.h>
43 #include <linux/cpuset.h>
44 #include <linux/delayacct.h>
45 #include <linux/init_task.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioprio.h>
48 #include <linux/kallsyms.h>
49 #include <linux/kcov.h>
50 #include <linux/kprobes.h>
51 #include <linux/llist_api.h>
52 #include <linux/mmu_context.h>
53 #include <linux/mmzone.h>
54 #include <linux/mutex_api.h>
55 #include <linux/nmi.h>
56 #include <linux/nospec.h>
57 #include <linux/perf_event_api.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcuwait_api.h>
61 #include <linux/rseq.h>
62 #include <linux/sched/wake_q.h>
63 #include <linux/scs.h>
64 #include <linux/slab.h>
65 #include <linux/syscalls.h>
66 #include <linux/vtime.h>
67 #include <linux/wait_api.h>
68 #include <linux/workqueue_api.h>
69
70 #ifdef CONFIG_PREEMPT_DYNAMIC
71 # ifdef CONFIG_GENERIC_ENTRY
72 # include <linux/entry-common.h>
73 # endif
74 #endif
75
76 #include <uapi/linux/sched/types.h>
77
78 #include <asm/irq_regs.h>
79 #include <asm/switch_to.h>
80 #include <asm/tlb.h>
81
82 #define CREATE_TRACE_POINTS
83 #include <linux/sched/rseq_api.h>
84 #include <trace/events/sched.h>
85 #include <trace/events/ipi.h>
86 #undef CREATE_TRACE_POINTS
87
88 #include "sched.h"
89 #include "stats.h"
90
91 #include "autogroup.h"
92 #include "pelt.h"
93 #include "smp.h"
94 #include "stats.h"
95
96 #include "../workqueue_internal.h"
97 #include "../../io_uring/io-wq.h"
98 #include "../smpboot.h"
99
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
102
103 /*
104 * Export tracepoints that act as a bare tracehook (ie: have no trace event
105 * associated with them) to allow external modules to probe them.
106 */
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
119
120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
121
122 #ifdef CONFIG_SCHED_DEBUG
123 /*
124 * Debugging: various feature bits
125 *
126 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
127 * sysctl_sched_features, defined in sched.h, to allow constants propagation
128 * at compile time and compiler optimization based on features default.
129 */
130 #define SCHED_FEAT(name, enabled) \
131 (1UL << __SCHED_FEAT_##name) * enabled |
132 const_debug unsigned int sysctl_sched_features =
133 #include "features.h"
134 0;
135 #undef SCHED_FEAT
136
137 /*
138 * Print a warning if need_resched is set for the given duration (if
139 * LATENCY_WARN is enabled).
140 *
141 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
142 * per boot.
143 */
144 __read_mostly int sysctl_resched_latency_warn_ms = 100;
145 __read_mostly int sysctl_resched_latency_warn_once = 1;
146 #endif /* CONFIG_SCHED_DEBUG */
147
148 /*
149 * Number of tasks to iterate in a single balance run.
150 * Limited because this is done with IRQs disabled.
151 */
152 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
153
154 __read_mostly int scheduler_running;
155
156 #ifdef CONFIG_SCHED_CORE
157
158 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
159
160 /* kernel prio, less is more */
__task_prio(const struct task_struct * p)161 static inline int __task_prio(const struct task_struct *p)
162 {
163 if (p->sched_class == &stop_sched_class) /* trumps deadline */
164 return -2;
165
166 if (p->dl_server)
167 return -1; /* deadline */
168
169 if (rt_or_dl_prio(p->prio))
170 return p->prio; /* [-1, 99] */
171
172 if (p->sched_class == &idle_sched_class)
173 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
174
175 if (task_on_scx(p))
176 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
177
178 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
179 }
180
181 /*
182 * l(a,b)
183 * le(a,b) := !l(b,a)
184 * g(a,b) := l(b,a)
185 * ge(a,b) := !l(a,b)
186 */
187
188 /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)189 static inline bool prio_less(const struct task_struct *a,
190 const struct task_struct *b, bool in_fi)
191 {
192
193 int pa = __task_prio(a), pb = __task_prio(b);
194
195 if (-pa < -pb)
196 return true;
197
198 if (-pb < -pa)
199 return false;
200
201 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
202 const struct sched_dl_entity *a_dl, *b_dl;
203
204 a_dl = &a->dl;
205 /*
206 * Since,'a' and 'b' can be CFS tasks served by DL server,
207 * __task_prio() can return -1 (for DL) even for those. In that
208 * case, get to the dl_server's DL entity.
209 */
210 if (a->dl_server)
211 a_dl = a->dl_server;
212
213 b_dl = &b->dl;
214 if (b->dl_server)
215 b_dl = b->dl_server;
216
217 return !dl_time_before(a_dl->deadline, b_dl->deadline);
218 }
219
220 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
221 return cfs_prio_less(a, b, in_fi);
222
223 #ifdef CONFIG_SCHED_CLASS_EXT
224 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
225 return scx_prio_less(a, b, in_fi);
226 #endif
227
228 return false;
229 }
230
__sched_core_less(const struct task_struct * a,const struct task_struct * b)231 static inline bool __sched_core_less(const struct task_struct *a,
232 const struct task_struct *b)
233 {
234 if (a->core_cookie < b->core_cookie)
235 return true;
236
237 if (a->core_cookie > b->core_cookie)
238 return false;
239
240 /* flip prio, so high prio is leftmost */
241 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
242 return true;
243
244 return false;
245 }
246
247 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
248
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)249 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
250 {
251 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
252 }
253
rb_sched_core_cmp(const void * key,const struct rb_node * node)254 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
255 {
256 const struct task_struct *p = __node_2_sc(node);
257 unsigned long cookie = (unsigned long)key;
258
259 if (cookie < p->core_cookie)
260 return -1;
261
262 if (cookie > p->core_cookie)
263 return 1;
264
265 return 0;
266 }
267
sched_core_enqueue(struct rq * rq,struct task_struct * p)268 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
269 {
270 if (p->se.sched_delayed)
271 return;
272
273 rq->core->core_task_seq++;
274
275 if (!p->core_cookie)
276 return;
277
278 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
279 }
280
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)281 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
282 {
283 if (p->se.sched_delayed)
284 return;
285
286 rq->core->core_task_seq++;
287
288 if (sched_core_enqueued(p)) {
289 rb_erase(&p->core_node, &rq->core_tree);
290 RB_CLEAR_NODE(&p->core_node);
291 }
292
293 /*
294 * Migrating the last task off the cpu, with the cpu in forced idle
295 * state. Reschedule to create an accounting edge for forced idle,
296 * and re-examine whether the core is still in forced idle state.
297 */
298 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
299 rq->core->core_forceidle_count && rq->curr == rq->idle)
300 resched_curr(rq);
301 }
302
sched_task_is_throttled(struct task_struct * p,int cpu)303 static int sched_task_is_throttled(struct task_struct *p, int cpu)
304 {
305 if (p->sched_class->task_is_throttled)
306 return p->sched_class->task_is_throttled(p, cpu);
307
308 return 0;
309 }
310
sched_core_next(struct task_struct * p,unsigned long cookie)311 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
312 {
313 struct rb_node *node = &p->core_node;
314 int cpu = task_cpu(p);
315
316 do {
317 node = rb_next(node);
318 if (!node)
319 return NULL;
320
321 p = __node_2_sc(node);
322 if (p->core_cookie != cookie)
323 return NULL;
324
325 } while (sched_task_is_throttled(p, cpu));
326
327 return p;
328 }
329
330 /*
331 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
332 * If no suitable task is found, NULL will be returned.
333 */
sched_core_find(struct rq * rq,unsigned long cookie)334 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
335 {
336 struct task_struct *p;
337 struct rb_node *node;
338
339 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
340 if (!node)
341 return NULL;
342
343 p = __node_2_sc(node);
344 if (!sched_task_is_throttled(p, rq->cpu))
345 return p;
346
347 return sched_core_next(p, cookie);
348 }
349
350 /*
351 * Magic required such that:
352 *
353 * raw_spin_rq_lock(rq);
354 * ...
355 * raw_spin_rq_unlock(rq);
356 *
357 * ends up locking and unlocking the _same_ lock, and all CPUs
358 * always agree on what rq has what lock.
359 *
360 * XXX entirely possible to selectively enable cores, don't bother for now.
361 */
362
363 static DEFINE_MUTEX(sched_core_mutex);
364 static atomic_t sched_core_count;
365 static struct cpumask sched_core_mask;
366
sched_core_lock(int cpu,unsigned long * flags)367 static void sched_core_lock(int cpu, unsigned long *flags)
368 {
369 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
370 int t, i = 0;
371
372 local_irq_save(*flags);
373 for_each_cpu(t, smt_mask)
374 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
375 }
376
sched_core_unlock(int cpu,unsigned long * flags)377 static void sched_core_unlock(int cpu, unsigned long *flags)
378 {
379 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
380 int t;
381
382 for_each_cpu(t, smt_mask)
383 raw_spin_unlock(&cpu_rq(t)->__lock);
384 local_irq_restore(*flags);
385 }
386
__sched_core_flip(bool enabled)387 static void __sched_core_flip(bool enabled)
388 {
389 unsigned long flags;
390 int cpu, t;
391
392 cpus_read_lock();
393
394 /*
395 * Toggle the online cores, one by one.
396 */
397 cpumask_copy(&sched_core_mask, cpu_online_mask);
398 for_each_cpu(cpu, &sched_core_mask) {
399 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
400
401 sched_core_lock(cpu, &flags);
402
403 for_each_cpu(t, smt_mask)
404 cpu_rq(t)->core_enabled = enabled;
405
406 cpu_rq(cpu)->core->core_forceidle_start = 0;
407
408 sched_core_unlock(cpu, &flags);
409
410 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
411 }
412
413 /*
414 * Toggle the offline CPUs.
415 */
416 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
417 cpu_rq(cpu)->core_enabled = enabled;
418
419 cpus_read_unlock();
420 }
421
sched_core_assert_empty(void)422 static void sched_core_assert_empty(void)
423 {
424 int cpu;
425
426 for_each_possible_cpu(cpu)
427 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
428 }
429
__sched_core_enable(void)430 static void __sched_core_enable(void)
431 {
432 static_branch_enable(&__sched_core_enabled);
433 /*
434 * Ensure all previous instances of raw_spin_rq_*lock() have finished
435 * and future ones will observe !sched_core_disabled().
436 */
437 synchronize_rcu();
438 __sched_core_flip(true);
439 sched_core_assert_empty();
440 }
441
__sched_core_disable(void)442 static void __sched_core_disable(void)
443 {
444 sched_core_assert_empty();
445 __sched_core_flip(false);
446 static_branch_disable(&__sched_core_enabled);
447 }
448
sched_core_get(void)449 void sched_core_get(void)
450 {
451 if (atomic_inc_not_zero(&sched_core_count))
452 return;
453
454 mutex_lock(&sched_core_mutex);
455 if (!atomic_read(&sched_core_count))
456 __sched_core_enable();
457
458 smp_mb__before_atomic();
459 atomic_inc(&sched_core_count);
460 mutex_unlock(&sched_core_mutex);
461 }
462
__sched_core_put(struct work_struct * work)463 static void __sched_core_put(struct work_struct *work)
464 {
465 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
466 __sched_core_disable();
467 mutex_unlock(&sched_core_mutex);
468 }
469 }
470
sched_core_put(void)471 void sched_core_put(void)
472 {
473 static DECLARE_WORK(_work, __sched_core_put);
474
475 /*
476 * "There can be only one"
477 *
478 * Either this is the last one, or we don't actually need to do any
479 * 'work'. If it is the last *again*, we rely on
480 * WORK_STRUCT_PENDING_BIT.
481 */
482 if (!atomic_add_unless(&sched_core_count, -1, 1))
483 schedule_work(&_work);
484 }
485
486 #else /* !CONFIG_SCHED_CORE */
487
sched_core_enqueue(struct rq * rq,struct task_struct * p)488 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
489 static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)490 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
491
492 #endif /* CONFIG_SCHED_CORE */
493
494 /*
495 * Serialization rules:
496 *
497 * Lock order:
498 *
499 * p->pi_lock
500 * rq->lock
501 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
502 *
503 * rq1->lock
504 * rq2->lock where: rq1 < rq2
505 *
506 * Regular state:
507 *
508 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509 * local CPU's rq->lock, it optionally removes the task from the runqueue and
510 * always looks at the local rq data structures to find the most eligible task
511 * to run next.
512 *
513 * Task enqueue is also under rq->lock, possibly taken from another CPU.
514 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
515 * the local CPU to avoid bouncing the runqueue state around [ see
516 * ttwu_queue_wakelist() ]
517 *
518 * Task wakeup, specifically wakeups that involve migration, are horribly
519 * complicated to avoid having to take two rq->locks.
520 *
521 * Special state:
522 *
523 * System-calls and anything external will use task_rq_lock() which acquires
524 * both p->pi_lock and rq->lock. As a consequence the state they change is
525 * stable while holding either lock:
526 *
527 * - sched_setaffinity()/
528 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
529 * - set_user_nice(): p->se.load, p->*prio
530 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
531 * p->se.load, p->rt_priority,
532 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
533 * - sched_setnuma(): p->numa_preferred_nid
534 * - sched_move_task(): p->sched_task_group
535 * - uclamp_update_active() p->uclamp*
536 *
537 * p->state <- TASK_*:
538 *
539 * is changed locklessly using set_current_state(), __set_current_state() or
540 * set_special_state(), see their respective comments, or by
541 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
542 * concurrent self.
543 *
544 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
545 *
546 * is set by activate_task() and cleared by deactivate_task(), under
547 * rq->lock. Non-zero indicates the task is runnable, the special
548 * ON_RQ_MIGRATING state is used for migration without holding both
549 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
550 *
551 * Additionally it is possible to be ->on_rq but still be considered not
552 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
553 * but will be dequeued as soon as they get picked again. See the
554 * task_is_runnable() helper.
555 *
556 * p->on_cpu <- { 0, 1 }:
557 *
558 * is set by prepare_task() and cleared by finish_task() such that it will be
559 * set before p is scheduled-in and cleared after p is scheduled-out, both
560 * under rq->lock. Non-zero indicates the task is running on its CPU.
561 *
562 * [ The astute reader will observe that it is possible for two tasks on one
563 * CPU to have ->on_cpu = 1 at the same time. ]
564 *
565 * task_cpu(p): is changed by set_task_cpu(), the rules are:
566 *
567 * - Don't call set_task_cpu() on a blocked task:
568 *
569 * We don't care what CPU we're not running on, this simplifies hotplug,
570 * the CPU assignment of blocked tasks isn't required to be valid.
571 *
572 * - for try_to_wake_up(), called under p->pi_lock:
573 *
574 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
575 *
576 * - for migration called under rq->lock:
577 * [ see task_on_rq_migrating() in task_rq_lock() ]
578 *
579 * o move_queued_task()
580 * o detach_task()
581 *
582 * - for migration called under double_rq_lock():
583 *
584 * o __migrate_swap_task()
585 * o push_rt_task() / pull_rt_task()
586 * o push_dl_task() / pull_dl_task()
587 * o dl_task_offline_migration()
588 *
589 */
590
raw_spin_rq_lock_nested(struct rq * rq,int subclass)591 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
592 {
593 raw_spinlock_t *lock;
594
595 /* Matches synchronize_rcu() in __sched_core_enable() */
596 preempt_disable();
597 if (sched_core_disabled()) {
598 raw_spin_lock_nested(&rq->__lock, subclass);
599 /* preempt_count *MUST* be > 1 */
600 preempt_enable_no_resched();
601 return;
602 }
603
604 for (;;) {
605 lock = __rq_lockp(rq);
606 raw_spin_lock_nested(lock, subclass);
607 if (likely(lock == __rq_lockp(rq))) {
608 /* preempt_count *MUST* be > 1 */
609 preempt_enable_no_resched();
610 return;
611 }
612 raw_spin_unlock(lock);
613 }
614 }
615
raw_spin_rq_trylock(struct rq * rq)616 bool raw_spin_rq_trylock(struct rq *rq)
617 {
618 raw_spinlock_t *lock;
619 bool ret;
620
621 /* Matches synchronize_rcu() in __sched_core_enable() */
622 preempt_disable();
623 if (sched_core_disabled()) {
624 ret = raw_spin_trylock(&rq->__lock);
625 preempt_enable();
626 return ret;
627 }
628
629 for (;;) {
630 lock = __rq_lockp(rq);
631 ret = raw_spin_trylock(lock);
632 if (!ret || (likely(lock == __rq_lockp(rq)))) {
633 preempt_enable();
634 return ret;
635 }
636 raw_spin_unlock(lock);
637 }
638 }
639
raw_spin_rq_unlock(struct rq * rq)640 void raw_spin_rq_unlock(struct rq *rq)
641 {
642 raw_spin_unlock(rq_lockp(rq));
643 }
644
645 #ifdef CONFIG_SMP
646 /*
647 * double_rq_lock - safely lock two runqueues
648 */
double_rq_lock(struct rq * rq1,struct rq * rq2)649 void double_rq_lock(struct rq *rq1, struct rq *rq2)
650 {
651 lockdep_assert_irqs_disabled();
652
653 if (rq_order_less(rq2, rq1))
654 swap(rq1, rq2);
655
656 raw_spin_rq_lock(rq1);
657 if (__rq_lockp(rq1) != __rq_lockp(rq2))
658 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
659
660 double_rq_clock_clear_update(rq1, rq2);
661 }
662 #endif
663
664 /*
665 * __task_rq_lock - lock the rq @p resides on.
666 */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)667 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
668 __acquires(rq->lock)
669 {
670 struct rq *rq;
671
672 lockdep_assert_held(&p->pi_lock);
673
674 for (;;) {
675 rq = task_rq(p);
676 raw_spin_rq_lock(rq);
677 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
678 rq_pin_lock(rq, rf);
679 return rq;
680 }
681 raw_spin_rq_unlock(rq);
682
683 while (unlikely(task_on_rq_migrating(p)))
684 cpu_relax();
685 }
686 }
687
688 /*
689 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
690 */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)691 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
692 __acquires(p->pi_lock)
693 __acquires(rq->lock)
694 {
695 struct rq *rq;
696
697 for (;;) {
698 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
699 rq = task_rq(p);
700 raw_spin_rq_lock(rq);
701 /*
702 * move_queued_task() task_rq_lock()
703 *
704 * ACQUIRE (rq->lock)
705 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
706 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
707 * [S] ->cpu = new_cpu [L] task_rq()
708 * [L] ->on_rq
709 * RELEASE (rq->lock)
710 *
711 * If we observe the old CPU in task_rq_lock(), the acquire of
712 * the old rq->lock will fully serialize against the stores.
713 *
714 * If we observe the new CPU in task_rq_lock(), the address
715 * dependency headed by '[L] rq = task_rq()' and the acquire
716 * will pair with the WMB to ensure we then also see migrating.
717 */
718 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
719 rq_pin_lock(rq, rf);
720 return rq;
721 }
722 raw_spin_rq_unlock(rq);
723 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
724
725 while (unlikely(task_on_rq_migrating(p)))
726 cpu_relax();
727 }
728 }
729
730 /*
731 * RQ-clock updating methods:
732 */
733
update_rq_clock_task(struct rq * rq,s64 delta)734 static void update_rq_clock_task(struct rq *rq, s64 delta)
735 {
736 /*
737 * In theory, the compile should just see 0 here, and optimize out the call
738 * to sched_rt_avg_update. But I don't trust it...
739 */
740 s64 __maybe_unused steal = 0, irq_delta = 0;
741
742 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
743 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
744
745 /*
746 * Since irq_time is only updated on {soft,}irq_exit, we might run into
747 * this case when a previous update_rq_clock() happened inside a
748 * {soft,}IRQ region.
749 *
750 * When this happens, we stop ->clock_task and only update the
751 * prev_irq_time stamp to account for the part that fit, so that a next
752 * update will consume the rest. This ensures ->clock_task is
753 * monotonic.
754 *
755 * It does however cause some slight miss-attribution of {soft,}IRQ
756 * time, a more accurate solution would be to update the irq_time using
757 * the current rq->clock timestamp, except that would require using
758 * atomic ops.
759 */
760 if (irq_delta > delta)
761 irq_delta = delta;
762
763 rq->prev_irq_time += irq_delta;
764 delta -= irq_delta;
765 delayacct_irq(rq->curr, irq_delta);
766 #endif
767 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
768 if (static_key_false((¶virt_steal_rq_enabled))) {
769 steal = paravirt_steal_clock(cpu_of(rq));
770 steal -= rq->prev_steal_time_rq;
771
772 if (unlikely(steal > delta))
773 steal = delta;
774
775 rq->prev_steal_time_rq += steal;
776 delta -= steal;
777 }
778 #endif
779
780 rq->clock_task += delta;
781
782 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
783 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
784 update_irq_load_avg(rq, irq_delta + steal);
785 #endif
786 update_rq_clock_pelt(rq, delta);
787 }
788
update_rq_clock(struct rq * rq)789 void update_rq_clock(struct rq *rq)
790 {
791 s64 delta;
792
793 lockdep_assert_rq_held(rq);
794
795 if (rq->clock_update_flags & RQCF_ACT_SKIP)
796 return;
797
798 #ifdef CONFIG_SCHED_DEBUG
799 if (sched_feat(WARN_DOUBLE_CLOCK))
800 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
801 rq->clock_update_flags |= RQCF_UPDATED;
802 #endif
803
804 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
805 if (delta < 0)
806 return;
807 rq->clock += delta;
808 update_rq_clock_task(rq, delta);
809 }
810
811 #ifdef CONFIG_SCHED_HRTICK
812 /*
813 * Use HR-timers to deliver accurate preemption points.
814 */
815
hrtick_clear(struct rq * rq)816 static void hrtick_clear(struct rq *rq)
817 {
818 if (hrtimer_active(&rq->hrtick_timer))
819 hrtimer_cancel(&rq->hrtick_timer);
820 }
821
822 /*
823 * High-resolution timer tick.
824 * Runs from hardirq context with interrupts disabled.
825 */
hrtick(struct hrtimer * timer)826 static enum hrtimer_restart hrtick(struct hrtimer *timer)
827 {
828 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
829 struct rq_flags rf;
830
831 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
832
833 rq_lock(rq, &rf);
834 update_rq_clock(rq);
835 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
836 rq_unlock(rq, &rf);
837
838 return HRTIMER_NORESTART;
839 }
840
841 #ifdef CONFIG_SMP
842
__hrtick_restart(struct rq * rq)843 static void __hrtick_restart(struct rq *rq)
844 {
845 struct hrtimer *timer = &rq->hrtick_timer;
846 ktime_t time = rq->hrtick_time;
847
848 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
849 }
850
851 /*
852 * called from hardirq (IPI) context
853 */
__hrtick_start(void * arg)854 static void __hrtick_start(void *arg)
855 {
856 struct rq *rq = arg;
857 struct rq_flags rf;
858
859 rq_lock(rq, &rf);
860 __hrtick_restart(rq);
861 rq_unlock(rq, &rf);
862 }
863
864 /*
865 * Called to set the hrtick timer state.
866 *
867 * called with rq->lock held and IRQs disabled
868 */
hrtick_start(struct rq * rq,u64 delay)869 void hrtick_start(struct rq *rq, u64 delay)
870 {
871 struct hrtimer *timer = &rq->hrtick_timer;
872 s64 delta;
873
874 /*
875 * Don't schedule slices shorter than 10000ns, that just
876 * doesn't make sense and can cause timer DoS.
877 */
878 delta = max_t(s64, delay, 10000LL);
879 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
880
881 if (rq == this_rq())
882 __hrtick_restart(rq);
883 else
884 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
885 }
886
887 #else
888 /*
889 * Called to set the hrtick timer state.
890 *
891 * called with rq->lock held and IRQs disabled
892 */
hrtick_start(struct rq * rq,u64 delay)893 void hrtick_start(struct rq *rq, u64 delay)
894 {
895 /*
896 * Don't schedule slices shorter than 10000ns, that just
897 * doesn't make sense. Rely on vruntime for fairness.
898 */
899 delay = max_t(u64, delay, 10000LL);
900 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
901 HRTIMER_MODE_REL_PINNED_HARD);
902 }
903
904 #endif /* CONFIG_SMP */
905
hrtick_rq_init(struct rq * rq)906 static void hrtick_rq_init(struct rq *rq)
907 {
908 #ifdef CONFIG_SMP
909 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
910 #endif
911 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
912 rq->hrtick_timer.function = hrtick;
913 }
914 #else /* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)915 static inline void hrtick_clear(struct rq *rq)
916 {
917 }
918
hrtick_rq_init(struct rq * rq)919 static inline void hrtick_rq_init(struct rq *rq)
920 {
921 }
922 #endif /* CONFIG_SCHED_HRTICK */
923
924 /*
925 * try_cmpxchg based fetch_or() macro so it works for different integer types:
926 */
927 #define fetch_or(ptr, mask) \
928 ({ \
929 typeof(ptr) _ptr = (ptr); \
930 typeof(mask) _mask = (mask); \
931 typeof(*_ptr) _val = *_ptr; \
932 \
933 do { \
934 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
935 _val; \
936 })
937
938 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
939 /*
940 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
941 * this avoids any races wrt polling state changes and thereby avoids
942 * spurious IPIs.
943 */
set_nr_and_not_polling(struct task_struct * p)944 static inline bool set_nr_and_not_polling(struct task_struct *p)
945 {
946 struct thread_info *ti = task_thread_info(p);
947 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
948 }
949
950 /*
951 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
952 *
953 * If this returns true, then the idle task promises to call
954 * sched_ttwu_pending() and reschedule soon.
955 */
set_nr_if_polling(struct task_struct * p)956 static bool set_nr_if_polling(struct task_struct *p)
957 {
958 struct thread_info *ti = task_thread_info(p);
959 typeof(ti->flags) val = READ_ONCE(ti->flags);
960
961 do {
962 if (!(val & _TIF_POLLING_NRFLAG))
963 return false;
964 if (val & _TIF_NEED_RESCHED)
965 return true;
966 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
967
968 return true;
969 }
970
971 #else
set_nr_and_not_polling(struct task_struct * p)972 static inline bool set_nr_and_not_polling(struct task_struct *p)
973 {
974 set_tsk_need_resched(p);
975 return true;
976 }
977
978 #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)979 static inline bool set_nr_if_polling(struct task_struct *p)
980 {
981 return false;
982 }
983 #endif
984 #endif
985
__wake_q_add(struct wake_q_head * head,struct task_struct * task)986 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
987 {
988 struct wake_q_node *node = &task->wake_q;
989
990 /*
991 * Atomically grab the task, if ->wake_q is !nil already it means
992 * it's already queued (either by us or someone else) and will get the
993 * wakeup due to that.
994 *
995 * In order to ensure that a pending wakeup will observe our pending
996 * state, even in the failed case, an explicit smp_mb() must be used.
997 */
998 smp_mb__before_atomic();
999 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
1000 return false;
1001
1002 /*
1003 * The head is context local, there can be no concurrency.
1004 */
1005 *head->lastp = node;
1006 head->lastp = &node->next;
1007 return true;
1008 }
1009
1010 /**
1011 * wake_q_add() - queue a wakeup for 'later' waking.
1012 * @head: the wake_q_head to add @task to
1013 * @task: the task to queue for 'later' wakeup
1014 *
1015 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1016 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1017 * instantly.
1018 *
1019 * This function must be used as-if it were wake_up_process(); IOW the task
1020 * must be ready to be woken at this location.
1021 */
wake_q_add(struct wake_q_head * head,struct task_struct * task)1022 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1023 {
1024 if (__wake_q_add(head, task))
1025 get_task_struct(task);
1026 }
1027
1028 /**
1029 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1030 * @head: the wake_q_head to add @task to
1031 * @task: the task to queue for 'later' wakeup
1032 *
1033 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1034 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1035 * instantly.
1036 *
1037 * This function must be used as-if it were wake_up_process(); IOW the task
1038 * must be ready to be woken at this location.
1039 *
1040 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1041 * that already hold reference to @task can call the 'safe' version and trust
1042 * wake_q to do the right thing depending whether or not the @task is already
1043 * queued for wakeup.
1044 */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1045 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1046 {
1047 if (!__wake_q_add(head, task))
1048 put_task_struct(task);
1049 }
1050
wake_up_q(struct wake_q_head * head)1051 void wake_up_q(struct wake_q_head *head)
1052 {
1053 struct wake_q_node *node = head->first;
1054
1055 while (node != WAKE_Q_TAIL) {
1056 struct task_struct *task;
1057
1058 task = container_of(node, struct task_struct, wake_q);
1059 /* Task can safely be re-inserted now: */
1060 node = node->next;
1061 task->wake_q.next = NULL;
1062
1063 /*
1064 * wake_up_process() executes a full barrier, which pairs with
1065 * the queueing in wake_q_add() so as not to miss wakeups.
1066 */
1067 wake_up_process(task);
1068 put_task_struct(task);
1069 }
1070 }
1071
1072 /*
1073 * resched_curr - mark rq's current task 'to be rescheduled now'.
1074 *
1075 * On UP this means the setting of the need_resched flag, on SMP it
1076 * might also involve a cross-CPU call to trigger the scheduler on
1077 * the target CPU.
1078 */
resched_curr(struct rq * rq)1079 void resched_curr(struct rq *rq)
1080 {
1081 struct task_struct *curr = rq->curr;
1082 int cpu;
1083
1084 lockdep_assert_rq_held(rq);
1085
1086 if (test_tsk_need_resched(curr))
1087 return;
1088
1089 cpu = cpu_of(rq);
1090
1091 if (cpu == smp_processor_id()) {
1092 set_tsk_need_resched(curr);
1093 set_preempt_need_resched();
1094 return;
1095 }
1096
1097 if (set_nr_and_not_polling(curr))
1098 smp_send_reschedule(cpu);
1099 else
1100 trace_sched_wake_idle_without_ipi(cpu);
1101 }
1102
resched_cpu(int cpu)1103 void resched_cpu(int cpu)
1104 {
1105 struct rq *rq = cpu_rq(cpu);
1106 unsigned long flags;
1107
1108 raw_spin_rq_lock_irqsave(rq, flags);
1109 if (cpu_online(cpu) || cpu == smp_processor_id())
1110 resched_curr(rq);
1111 raw_spin_rq_unlock_irqrestore(rq, flags);
1112 }
1113
1114 #ifdef CONFIG_SMP
1115 #ifdef CONFIG_NO_HZ_COMMON
1116 /*
1117 * In the semi idle case, use the nearest busy CPU for migrating timers
1118 * from an idle CPU. This is good for power-savings.
1119 *
1120 * We don't do similar optimization for completely idle system, as
1121 * selecting an idle CPU will add more delays to the timers than intended
1122 * (as that CPU's timer base may not be up to date wrt jiffies etc).
1123 */
get_nohz_timer_target(void)1124 int get_nohz_timer_target(void)
1125 {
1126 int i, cpu = smp_processor_id(), default_cpu = -1;
1127 struct sched_domain *sd;
1128 const struct cpumask *hk_mask;
1129
1130 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1131 if (!idle_cpu(cpu))
1132 return cpu;
1133 default_cpu = cpu;
1134 }
1135
1136 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1137
1138 guard(rcu)();
1139
1140 for_each_domain(cpu, sd) {
1141 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1142 if (cpu == i)
1143 continue;
1144
1145 if (!idle_cpu(i))
1146 return i;
1147 }
1148 }
1149
1150 if (default_cpu == -1)
1151 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1152
1153 return default_cpu;
1154 }
1155
1156 /*
1157 * When add_timer_on() enqueues a timer into the timer wheel of an
1158 * idle CPU then this timer might expire before the next timer event
1159 * which is scheduled to wake up that CPU. In case of a completely
1160 * idle system the next event might even be infinite time into the
1161 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1162 * leaves the inner idle loop so the newly added timer is taken into
1163 * account when the CPU goes back to idle and evaluates the timer
1164 * wheel for the next timer event.
1165 */
wake_up_idle_cpu(int cpu)1166 static void wake_up_idle_cpu(int cpu)
1167 {
1168 struct rq *rq = cpu_rq(cpu);
1169
1170 if (cpu == smp_processor_id())
1171 return;
1172
1173 /*
1174 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1175 * part of the idle loop. This forces an exit from the idle loop
1176 * and a round trip to schedule(). Now this could be optimized
1177 * because a simple new idle loop iteration is enough to
1178 * re-evaluate the next tick. Provided some re-ordering of tick
1179 * nohz functions that would need to follow TIF_NR_POLLING
1180 * clearing:
1181 *
1182 * - On most architectures, a simple fetch_or on ti::flags with a
1183 * "0" value would be enough to know if an IPI needs to be sent.
1184 *
1185 * - x86 needs to perform a last need_resched() check between
1186 * monitor and mwait which doesn't take timers into account.
1187 * There a dedicated TIF_TIMER flag would be required to
1188 * fetch_or here and be checked along with TIF_NEED_RESCHED
1189 * before mwait().
1190 *
1191 * However, remote timer enqueue is not such a frequent event
1192 * and testing of the above solutions didn't appear to report
1193 * much benefits.
1194 */
1195 if (set_nr_and_not_polling(rq->idle))
1196 smp_send_reschedule(cpu);
1197 else
1198 trace_sched_wake_idle_without_ipi(cpu);
1199 }
1200
wake_up_full_nohz_cpu(int cpu)1201 static bool wake_up_full_nohz_cpu(int cpu)
1202 {
1203 /*
1204 * We just need the target to call irq_exit() and re-evaluate
1205 * the next tick. The nohz full kick at least implies that.
1206 * If needed we can still optimize that later with an
1207 * empty IRQ.
1208 */
1209 if (cpu_is_offline(cpu))
1210 return true; /* Don't try to wake offline CPUs. */
1211 if (tick_nohz_full_cpu(cpu)) {
1212 if (cpu != smp_processor_id() ||
1213 tick_nohz_tick_stopped())
1214 tick_nohz_full_kick_cpu(cpu);
1215 return true;
1216 }
1217
1218 return false;
1219 }
1220
1221 /*
1222 * Wake up the specified CPU. If the CPU is going offline, it is the
1223 * caller's responsibility to deal with the lost wakeup, for example,
1224 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1225 */
wake_up_nohz_cpu(int cpu)1226 void wake_up_nohz_cpu(int cpu)
1227 {
1228 if (!wake_up_full_nohz_cpu(cpu))
1229 wake_up_idle_cpu(cpu);
1230 }
1231
nohz_csd_func(void * info)1232 static void nohz_csd_func(void *info)
1233 {
1234 struct rq *rq = info;
1235 int cpu = cpu_of(rq);
1236 unsigned int flags;
1237
1238 /*
1239 * Release the rq::nohz_csd.
1240 */
1241 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1242 WARN_ON(!(flags & NOHZ_KICK_MASK));
1243
1244 rq->idle_balance = idle_cpu(cpu);
1245 if (rq->idle_balance && !need_resched()) {
1246 rq->nohz_idle_balance = flags;
1247 raise_softirq_irqoff(SCHED_SOFTIRQ);
1248 }
1249 }
1250
1251 #endif /* CONFIG_NO_HZ_COMMON */
1252
1253 #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1254 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1255 {
1256 if (rq->nr_running != 1)
1257 return false;
1258
1259 if (p->sched_class != &fair_sched_class)
1260 return false;
1261
1262 if (!task_on_rq_queued(p))
1263 return false;
1264
1265 return true;
1266 }
1267
sched_can_stop_tick(struct rq * rq)1268 bool sched_can_stop_tick(struct rq *rq)
1269 {
1270 int fifo_nr_running;
1271
1272 /* Deadline tasks, even if single, need the tick */
1273 if (rq->dl.dl_nr_running)
1274 return false;
1275
1276 /*
1277 * If there are more than one RR tasks, we need the tick to affect the
1278 * actual RR behaviour.
1279 */
1280 if (rq->rt.rr_nr_running) {
1281 if (rq->rt.rr_nr_running == 1)
1282 return true;
1283 else
1284 return false;
1285 }
1286
1287 /*
1288 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1289 * forced preemption between FIFO tasks.
1290 */
1291 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1292 if (fifo_nr_running)
1293 return true;
1294
1295 /*
1296 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1297 * left. For CFS, if there's more than one we need the tick for
1298 * involuntary preemption. For SCX, ask.
1299 */
1300 if (scx_enabled() && !scx_can_stop_tick(rq))
1301 return false;
1302
1303 if (rq->cfs.nr_running > 1)
1304 return false;
1305
1306 /*
1307 * If there is one task and it has CFS runtime bandwidth constraints
1308 * and it's on the cpu now we don't want to stop the tick.
1309 * This check prevents clearing the bit if a newly enqueued task here is
1310 * dequeued by migrating while the constrained task continues to run.
1311 * E.g. going from 2->1 without going through pick_next_task().
1312 */
1313 if (__need_bw_check(rq, rq->curr)) {
1314 if (cfs_task_bw_constrained(rq->curr))
1315 return false;
1316 }
1317
1318 return true;
1319 }
1320 #endif /* CONFIG_NO_HZ_FULL */
1321 #endif /* CONFIG_SMP */
1322
1323 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1324 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1325 /*
1326 * Iterate task_group tree rooted at *from, calling @down when first entering a
1327 * node and @up when leaving it for the final time.
1328 *
1329 * Caller must hold rcu_lock or sufficient equivalent.
1330 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1331 int walk_tg_tree_from(struct task_group *from,
1332 tg_visitor down, tg_visitor up, void *data)
1333 {
1334 struct task_group *parent, *child;
1335 int ret;
1336
1337 parent = from;
1338
1339 down:
1340 ret = (*down)(parent, data);
1341 if (ret)
1342 goto out;
1343 list_for_each_entry_rcu(child, &parent->children, siblings) {
1344 parent = child;
1345 goto down;
1346
1347 up:
1348 continue;
1349 }
1350 ret = (*up)(parent, data);
1351 if (ret || parent == from)
1352 goto out;
1353
1354 child = parent;
1355 parent = parent->parent;
1356 if (parent)
1357 goto up;
1358 out:
1359 return ret;
1360 }
1361
tg_nop(struct task_group * tg,void * data)1362 int tg_nop(struct task_group *tg, void *data)
1363 {
1364 return 0;
1365 }
1366 #endif
1367
set_load_weight(struct task_struct * p,bool update_load)1368 void set_load_weight(struct task_struct *p, bool update_load)
1369 {
1370 int prio = p->static_prio - MAX_RT_PRIO;
1371 struct load_weight lw;
1372
1373 if (task_has_idle_policy(p)) {
1374 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1375 lw.inv_weight = WMULT_IDLEPRIO;
1376 } else {
1377 lw.weight = scale_load(sched_prio_to_weight[prio]);
1378 lw.inv_weight = sched_prio_to_wmult[prio];
1379 }
1380
1381 /*
1382 * SCHED_OTHER tasks have to update their load when changing their
1383 * weight
1384 */
1385 if (update_load && p->sched_class->reweight_task)
1386 p->sched_class->reweight_task(task_rq(p), p, &lw);
1387 else
1388 p->se.load = lw;
1389 }
1390
1391 #ifdef CONFIG_UCLAMP_TASK
1392 /*
1393 * Serializes updates of utilization clamp values
1394 *
1395 * The (slow-path) user-space triggers utilization clamp value updates which
1396 * can require updates on (fast-path) scheduler's data structures used to
1397 * support enqueue/dequeue operations.
1398 * While the per-CPU rq lock protects fast-path update operations, user-space
1399 * requests are serialized using a mutex to reduce the risk of conflicting
1400 * updates or API abuses.
1401 */
1402 static DEFINE_MUTEX(uclamp_mutex);
1403
1404 /* Max allowed minimum utilization */
1405 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1406
1407 /* Max allowed maximum utilization */
1408 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1409
1410 /*
1411 * By default RT tasks run at the maximum performance point/capacity of the
1412 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1413 * SCHED_CAPACITY_SCALE.
1414 *
1415 * This knob allows admins to change the default behavior when uclamp is being
1416 * used. In battery powered devices, particularly, running at the maximum
1417 * capacity and frequency will increase energy consumption and shorten the
1418 * battery life.
1419 *
1420 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1421 *
1422 * This knob will not override the system default sched_util_clamp_min defined
1423 * above.
1424 */
1425 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1426
1427 /* All clamps are required to be less or equal than these values */
1428 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1429
1430 /*
1431 * This static key is used to reduce the uclamp overhead in the fast path. It
1432 * primarily disables the call to uclamp_rq_{inc, dec}() in
1433 * enqueue/dequeue_task().
1434 *
1435 * This allows users to continue to enable uclamp in their kernel config with
1436 * minimum uclamp overhead in the fast path.
1437 *
1438 * As soon as userspace modifies any of the uclamp knobs, the static key is
1439 * enabled, since we have an actual users that make use of uclamp
1440 * functionality.
1441 *
1442 * The knobs that would enable this static key are:
1443 *
1444 * * A task modifying its uclamp value with sched_setattr().
1445 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1446 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1447 */
1448 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1449
1450 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1451 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1452 unsigned int clamp_value)
1453 {
1454 /*
1455 * Avoid blocked utilization pushing up the frequency when we go
1456 * idle (which drops the max-clamp) by retaining the last known
1457 * max-clamp.
1458 */
1459 if (clamp_id == UCLAMP_MAX) {
1460 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1461 return clamp_value;
1462 }
1463
1464 return uclamp_none(UCLAMP_MIN);
1465 }
1466
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1467 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1468 unsigned int clamp_value)
1469 {
1470 /* Reset max-clamp retention only on idle exit */
1471 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1472 return;
1473
1474 uclamp_rq_set(rq, clamp_id, clamp_value);
1475 }
1476
1477 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1478 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1479 unsigned int clamp_value)
1480 {
1481 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1482 int bucket_id = UCLAMP_BUCKETS - 1;
1483
1484 /*
1485 * Since both min and max clamps are max aggregated, find the
1486 * top most bucket with tasks in.
1487 */
1488 for ( ; bucket_id >= 0; bucket_id--) {
1489 if (!bucket[bucket_id].tasks)
1490 continue;
1491 return bucket[bucket_id].value;
1492 }
1493
1494 /* No tasks -- default clamp values */
1495 return uclamp_idle_value(rq, clamp_id, clamp_value);
1496 }
1497
__uclamp_update_util_min_rt_default(struct task_struct * p)1498 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1499 {
1500 unsigned int default_util_min;
1501 struct uclamp_se *uc_se;
1502
1503 lockdep_assert_held(&p->pi_lock);
1504
1505 uc_se = &p->uclamp_req[UCLAMP_MIN];
1506
1507 /* Only sync if user didn't override the default */
1508 if (uc_se->user_defined)
1509 return;
1510
1511 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1512 uclamp_se_set(uc_se, default_util_min, false);
1513 }
1514
uclamp_update_util_min_rt_default(struct task_struct * p)1515 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1516 {
1517 if (!rt_task(p))
1518 return;
1519
1520 /* Protect updates to p->uclamp_* */
1521 guard(task_rq_lock)(p);
1522 __uclamp_update_util_min_rt_default(p);
1523 }
1524
1525 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1526 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1527 {
1528 /* Copy by value as we could modify it */
1529 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1530 #ifdef CONFIG_UCLAMP_TASK_GROUP
1531 unsigned int tg_min, tg_max, value;
1532
1533 /*
1534 * Tasks in autogroups or root task group will be
1535 * restricted by system defaults.
1536 */
1537 if (task_group_is_autogroup(task_group(p)))
1538 return uc_req;
1539 if (task_group(p) == &root_task_group)
1540 return uc_req;
1541
1542 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1543 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1544 value = uc_req.value;
1545 value = clamp(value, tg_min, tg_max);
1546 uclamp_se_set(&uc_req, value, false);
1547 #endif
1548
1549 return uc_req;
1550 }
1551
1552 /*
1553 * The effective clamp bucket index of a task depends on, by increasing
1554 * priority:
1555 * - the task specific clamp value, when explicitly requested from userspace
1556 * - the task group effective clamp value, for tasks not either in the root
1557 * group or in an autogroup
1558 * - the system default clamp value, defined by the sysadmin
1559 */
1560 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1561 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1562 {
1563 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1564 struct uclamp_se uc_max = uclamp_default[clamp_id];
1565
1566 /* System default restrictions always apply */
1567 if (unlikely(uc_req.value > uc_max.value))
1568 return uc_max;
1569
1570 return uc_req;
1571 }
1572
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1573 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1574 {
1575 struct uclamp_se uc_eff;
1576
1577 /* Task currently refcounted: use back-annotated (effective) value */
1578 if (p->uclamp[clamp_id].active)
1579 return (unsigned long)p->uclamp[clamp_id].value;
1580
1581 uc_eff = uclamp_eff_get(p, clamp_id);
1582
1583 return (unsigned long)uc_eff.value;
1584 }
1585
1586 /*
1587 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1588 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1589 * updates the rq's clamp value if required.
1590 *
1591 * Tasks can have a task-specific value requested from user-space, track
1592 * within each bucket the maximum value for tasks refcounted in it.
1593 * This "local max aggregation" allows to track the exact "requested" value
1594 * for each bucket when all its RUNNABLE tasks require the same clamp.
1595 */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1596 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1597 enum uclamp_id clamp_id)
1598 {
1599 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1600 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1601 struct uclamp_bucket *bucket;
1602
1603 lockdep_assert_rq_held(rq);
1604
1605 /* Update task effective clamp */
1606 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1607
1608 bucket = &uc_rq->bucket[uc_se->bucket_id];
1609 bucket->tasks++;
1610 uc_se->active = true;
1611
1612 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1613
1614 /*
1615 * Local max aggregation: rq buckets always track the max
1616 * "requested" clamp value of its RUNNABLE tasks.
1617 */
1618 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1619 bucket->value = uc_se->value;
1620
1621 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1622 uclamp_rq_set(rq, clamp_id, uc_se->value);
1623 }
1624
1625 /*
1626 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1627 * is released. If this is the last task reference counting the rq's max
1628 * active clamp value, then the rq's clamp value is updated.
1629 *
1630 * Both refcounted tasks and rq's cached clamp values are expected to be
1631 * always valid. If it's detected they are not, as defensive programming,
1632 * enforce the expected state and warn.
1633 */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1634 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1635 enum uclamp_id clamp_id)
1636 {
1637 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1638 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1639 struct uclamp_bucket *bucket;
1640 unsigned int bkt_clamp;
1641 unsigned int rq_clamp;
1642
1643 lockdep_assert_rq_held(rq);
1644
1645 /*
1646 * If sched_uclamp_used was enabled after task @p was enqueued,
1647 * we could end up with unbalanced call to uclamp_rq_dec_id().
1648 *
1649 * In this case the uc_se->active flag should be false since no uclamp
1650 * accounting was performed at enqueue time and we can just return
1651 * here.
1652 *
1653 * Need to be careful of the following enqueue/dequeue ordering
1654 * problem too
1655 *
1656 * enqueue(taskA)
1657 * // sched_uclamp_used gets enabled
1658 * enqueue(taskB)
1659 * dequeue(taskA)
1660 * // Must not decrement bucket->tasks here
1661 * dequeue(taskB)
1662 *
1663 * where we could end up with stale data in uc_se and
1664 * bucket[uc_se->bucket_id].
1665 *
1666 * The following check here eliminates the possibility of such race.
1667 */
1668 if (unlikely(!uc_se->active))
1669 return;
1670
1671 bucket = &uc_rq->bucket[uc_se->bucket_id];
1672
1673 SCHED_WARN_ON(!bucket->tasks);
1674 if (likely(bucket->tasks))
1675 bucket->tasks--;
1676
1677 uc_se->active = false;
1678
1679 /*
1680 * Keep "local max aggregation" simple and accept to (possibly)
1681 * overboost some RUNNABLE tasks in the same bucket.
1682 * The rq clamp bucket value is reset to its base value whenever
1683 * there are no more RUNNABLE tasks refcounting it.
1684 */
1685 if (likely(bucket->tasks))
1686 return;
1687
1688 rq_clamp = uclamp_rq_get(rq, clamp_id);
1689 /*
1690 * Defensive programming: this should never happen. If it happens,
1691 * e.g. due to future modification, warn and fix up the expected value.
1692 */
1693 SCHED_WARN_ON(bucket->value > rq_clamp);
1694 if (bucket->value >= rq_clamp) {
1695 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1696 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1697 }
1698 }
1699
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1700 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1701 {
1702 enum uclamp_id clamp_id;
1703
1704 /*
1705 * Avoid any overhead until uclamp is actually used by the userspace.
1706 *
1707 * The condition is constructed such that a NOP is generated when
1708 * sched_uclamp_used is disabled.
1709 */
1710 if (!static_branch_unlikely(&sched_uclamp_used))
1711 return;
1712
1713 if (unlikely(!p->sched_class->uclamp_enabled))
1714 return;
1715
1716 if (p->se.sched_delayed)
1717 return;
1718
1719 for_each_clamp_id(clamp_id)
1720 uclamp_rq_inc_id(rq, p, clamp_id);
1721
1722 /* Reset clamp idle holding when there is one RUNNABLE task */
1723 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1724 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1725 }
1726
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1727 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1728 {
1729 enum uclamp_id clamp_id;
1730
1731 /*
1732 * Avoid any overhead until uclamp is actually used by the userspace.
1733 *
1734 * The condition is constructed such that a NOP is generated when
1735 * sched_uclamp_used is disabled.
1736 */
1737 if (!static_branch_unlikely(&sched_uclamp_used))
1738 return;
1739
1740 if (unlikely(!p->sched_class->uclamp_enabled))
1741 return;
1742
1743 if (p->se.sched_delayed)
1744 return;
1745
1746 for_each_clamp_id(clamp_id)
1747 uclamp_rq_dec_id(rq, p, clamp_id);
1748 }
1749
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1750 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1751 enum uclamp_id clamp_id)
1752 {
1753 if (!p->uclamp[clamp_id].active)
1754 return;
1755
1756 uclamp_rq_dec_id(rq, p, clamp_id);
1757 uclamp_rq_inc_id(rq, p, clamp_id);
1758
1759 /*
1760 * Make sure to clear the idle flag if we've transiently reached 0
1761 * active tasks on rq.
1762 */
1763 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1764 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1765 }
1766
1767 static inline void
uclamp_update_active(struct task_struct * p)1768 uclamp_update_active(struct task_struct *p)
1769 {
1770 enum uclamp_id clamp_id;
1771 struct rq_flags rf;
1772 struct rq *rq;
1773
1774 /*
1775 * Lock the task and the rq where the task is (or was) queued.
1776 *
1777 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1778 * price to pay to safely serialize util_{min,max} updates with
1779 * enqueues, dequeues and migration operations.
1780 * This is the same locking schema used by __set_cpus_allowed_ptr().
1781 */
1782 rq = task_rq_lock(p, &rf);
1783
1784 /*
1785 * Setting the clamp bucket is serialized by task_rq_lock().
1786 * If the task is not yet RUNNABLE and its task_struct is not
1787 * affecting a valid clamp bucket, the next time it's enqueued,
1788 * it will already see the updated clamp bucket value.
1789 */
1790 for_each_clamp_id(clamp_id)
1791 uclamp_rq_reinc_id(rq, p, clamp_id);
1792
1793 task_rq_unlock(rq, p, &rf);
1794 }
1795
1796 #ifdef CONFIG_UCLAMP_TASK_GROUP
1797 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1798 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1799 {
1800 struct css_task_iter it;
1801 struct task_struct *p;
1802
1803 css_task_iter_start(css, 0, &it);
1804 while ((p = css_task_iter_next(&it)))
1805 uclamp_update_active(p);
1806 css_task_iter_end(&it);
1807 }
1808
1809 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1810 #endif
1811
1812 #ifdef CONFIG_SYSCTL
1813 #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1814 static void uclamp_update_root_tg(void)
1815 {
1816 struct task_group *tg = &root_task_group;
1817
1818 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1819 sysctl_sched_uclamp_util_min, false);
1820 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1821 sysctl_sched_uclamp_util_max, false);
1822
1823 guard(rcu)();
1824 cpu_util_update_eff(&root_task_group.css);
1825 }
1826 #else
uclamp_update_root_tg(void)1827 static void uclamp_update_root_tg(void) { }
1828 #endif
1829
uclamp_sync_util_min_rt_default(void)1830 static void uclamp_sync_util_min_rt_default(void)
1831 {
1832 struct task_struct *g, *p;
1833
1834 /*
1835 * copy_process() sysctl_uclamp
1836 * uclamp_min_rt = X;
1837 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1838 * // link thread smp_mb__after_spinlock()
1839 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1840 * sched_post_fork() for_each_process_thread()
1841 * __uclamp_sync_rt() __uclamp_sync_rt()
1842 *
1843 * Ensures that either sched_post_fork() will observe the new
1844 * uclamp_min_rt or for_each_process_thread() will observe the new
1845 * task.
1846 */
1847 read_lock(&tasklist_lock);
1848 smp_mb__after_spinlock();
1849 read_unlock(&tasklist_lock);
1850
1851 guard(rcu)();
1852 for_each_process_thread(g, p)
1853 uclamp_update_util_min_rt_default(p);
1854 }
1855
sysctl_sched_uclamp_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1856 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1857 void *buffer, size_t *lenp, loff_t *ppos)
1858 {
1859 bool update_root_tg = false;
1860 int old_min, old_max, old_min_rt;
1861 int result;
1862
1863 guard(mutex)(&uclamp_mutex);
1864
1865 old_min = sysctl_sched_uclamp_util_min;
1866 old_max = sysctl_sched_uclamp_util_max;
1867 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1868
1869 result = proc_dointvec(table, write, buffer, lenp, ppos);
1870 if (result)
1871 goto undo;
1872 if (!write)
1873 return 0;
1874
1875 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1876 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1877 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1878
1879 result = -EINVAL;
1880 goto undo;
1881 }
1882
1883 if (old_min != sysctl_sched_uclamp_util_min) {
1884 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1885 sysctl_sched_uclamp_util_min, false);
1886 update_root_tg = true;
1887 }
1888 if (old_max != sysctl_sched_uclamp_util_max) {
1889 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1890 sysctl_sched_uclamp_util_max, false);
1891 update_root_tg = true;
1892 }
1893
1894 if (update_root_tg) {
1895 static_branch_enable(&sched_uclamp_used);
1896 uclamp_update_root_tg();
1897 }
1898
1899 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1900 static_branch_enable(&sched_uclamp_used);
1901 uclamp_sync_util_min_rt_default();
1902 }
1903
1904 /*
1905 * We update all RUNNABLE tasks only when task groups are in use.
1906 * Otherwise, keep it simple and do just a lazy update at each next
1907 * task enqueue time.
1908 */
1909 return 0;
1910
1911 undo:
1912 sysctl_sched_uclamp_util_min = old_min;
1913 sysctl_sched_uclamp_util_max = old_max;
1914 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1915 return result;
1916 }
1917 #endif
1918
uclamp_fork(struct task_struct * p)1919 static void uclamp_fork(struct task_struct *p)
1920 {
1921 enum uclamp_id clamp_id;
1922
1923 /*
1924 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1925 * as the task is still at its early fork stages.
1926 */
1927 for_each_clamp_id(clamp_id)
1928 p->uclamp[clamp_id].active = false;
1929
1930 if (likely(!p->sched_reset_on_fork))
1931 return;
1932
1933 for_each_clamp_id(clamp_id) {
1934 uclamp_se_set(&p->uclamp_req[clamp_id],
1935 uclamp_none(clamp_id), false);
1936 }
1937 }
1938
uclamp_post_fork(struct task_struct * p)1939 static void uclamp_post_fork(struct task_struct *p)
1940 {
1941 uclamp_update_util_min_rt_default(p);
1942 }
1943
init_uclamp_rq(struct rq * rq)1944 static void __init init_uclamp_rq(struct rq *rq)
1945 {
1946 enum uclamp_id clamp_id;
1947 struct uclamp_rq *uc_rq = rq->uclamp;
1948
1949 for_each_clamp_id(clamp_id) {
1950 uc_rq[clamp_id] = (struct uclamp_rq) {
1951 .value = uclamp_none(clamp_id)
1952 };
1953 }
1954
1955 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1956 }
1957
init_uclamp(void)1958 static void __init init_uclamp(void)
1959 {
1960 struct uclamp_se uc_max = {};
1961 enum uclamp_id clamp_id;
1962 int cpu;
1963
1964 for_each_possible_cpu(cpu)
1965 init_uclamp_rq(cpu_rq(cpu));
1966
1967 for_each_clamp_id(clamp_id) {
1968 uclamp_se_set(&init_task.uclamp_req[clamp_id],
1969 uclamp_none(clamp_id), false);
1970 }
1971
1972 /* System defaults allow max clamp values for both indexes */
1973 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
1974 for_each_clamp_id(clamp_id) {
1975 uclamp_default[clamp_id] = uc_max;
1976 #ifdef CONFIG_UCLAMP_TASK_GROUP
1977 root_task_group.uclamp_req[clamp_id] = uc_max;
1978 root_task_group.uclamp[clamp_id] = uc_max;
1979 #endif
1980 }
1981 }
1982
1983 #else /* !CONFIG_UCLAMP_TASK */
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1984 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1985 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_fork(struct task_struct * p)1986 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)1987 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)1988 static inline void init_uclamp(void) { }
1989 #endif /* CONFIG_UCLAMP_TASK */
1990
sched_task_on_rq(struct task_struct * p)1991 bool sched_task_on_rq(struct task_struct *p)
1992 {
1993 return task_on_rq_queued(p);
1994 }
1995
get_wchan(struct task_struct * p)1996 unsigned long get_wchan(struct task_struct *p)
1997 {
1998 unsigned long ip = 0;
1999 unsigned int state;
2000
2001 if (!p || p == current)
2002 return 0;
2003
2004 /* Only get wchan if task is blocked and we can keep it that way. */
2005 raw_spin_lock_irq(&p->pi_lock);
2006 state = READ_ONCE(p->__state);
2007 smp_rmb(); /* see try_to_wake_up() */
2008 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2009 ip = __get_wchan(p);
2010 raw_spin_unlock_irq(&p->pi_lock);
2011
2012 return ip;
2013 }
2014
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2015 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2016 {
2017 if (!(flags & ENQUEUE_NOCLOCK))
2018 update_rq_clock(rq);
2019
2020 p->sched_class->enqueue_task(rq, p, flags);
2021 /*
2022 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
2023 * ->sched_delayed.
2024 */
2025 uclamp_rq_inc(rq, p);
2026
2027 if (!(flags & ENQUEUE_RESTORE)) {
2028 sched_info_enqueue(rq, p);
2029 psi_enqueue(p, flags & ENQUEUE_MIGRATED);
2030 }
2031
2032 if (sched_core_enabled(rq))
2033 sched_core_enqueue(rq, p);
2034 }
2035
2036 /*
2037 * Must only return false when DEQUEUE_SLEEP.
2038 */
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2039 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2040 {
2041 if (sched_core_enabled(rq))
2042 sched_core_dequeue(rq, p, flags);
2043
2044 if (!(flags & DEQUEUE_NOCLOCK))
2045 update_rq_clock(rq);
2046
2047 if (!(flags & DEQUEUE_SAVE)) {
2048 sched_info_dequeue(rq, p);
2049 psi_dequeue(p, !(flags & DEQUEUE_SLEEP));
2050 }
2051
2052 /*
2053 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2054 * and mark the task ->sched_delayed.
2055 */
2056 uclamp_rq_dec(rq, p);
2057 return p->sched_class->dequeue_task(rq, p, flags);
2058 }
2059
activate_task(struct rq * rq,struct task_struct * p,int flags)2060 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2061 {
2062 if (task_on_rq_migrating(p))
2063 flags |= ENQUEUE_MIGRATED;
2064 if (flags & ENQUEUE_MIGRATED)
2065 sched_mm_cid_migrate_to(rq, p);
2066
2067 enqueue_task(rq, p, flags);
2068
2069 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2070 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2071 }
2072
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2073 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2074 {
2075 SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
2076
2077 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2078 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2079
2080 /*
2081 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2082 * dequeue_task() and cleared *after* enqueue_task().
2083 */
2084
2085 dequeue_task(rq, p, flags);
2086 }
2087
block_task(struct rq * rq,struct task_struct * p,int flags)2088 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2089 {
2090 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2091 __block_task(rq, p);
2092 }
2093
2094 /**
2095 * task_curr - is this task currently executing on a CPU?
2096 * @p: the task in question.
2097 *
2098 * Return: 1 if the task is currently executing. 0 otherwise.
2099 */
task_curr(const struct task_struct * p)2100 inline int task_curr(const struct task_struct *p)
2101 {
2102 return cpu_curr(task_cpu(p)) == p;
2103 }
2104
2105 /*
2106 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2107 * mess with locking.
2108 */
check_class_changing(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class)2109 void check_class_changing(struct rq *rq, struct task_struct *p,
2110 const struct sched_class *prev_class)
2111 {
2112 if (prev_class != p->sched_class && p->sched_class->switching_to)
2113 p->sched_class->switching_to(rq, p);
2114 }
2115
2116 /*
2117 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2118 * use the balance_callback list if you want balancing.
2119 *
2120 * this means any call to check_class_changed() must be followed by a call to
2121 * balance_callback().
2122 */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)2123 void check_class_changed(struct rq *rq, struct task_struct *p,
2124 const struct sched_class *prev_class,
2125 int oldprio)
2126 {
2127 if (prev_class != p->sched_class) {
2128 if (prev_class->switched_from)
2129 prev_class->switched_from(rq, p);
2130
2131 p->sched_class->switched_to(rq, p);
2132 } else if (oldprio != p->prio || dl_task(p))
2133 p->sched_class->prio_changed(rq, p, oldprio);
2134 }
2135
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2136 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2137 {
2138 if (p->sched_class == rq->curr->sched_class)
2139 rq->curr->sched_class->wakeup_preempt(rq, p, flags);
2140 else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2141 resched_curr(rq);
2142
2143 /*
2144 * A queue event has occurred, and we're going to schedule. In
2145 * this case, we can save a useless back to back clock update.
2146 */
2147 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2148 rq_clock_skip_update(rq);
2149 }
2150
2151 static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2152 int __task_state_match(struct task_struct *p, unsigned int state)
2153 {
2154 if (READ_ONCE(p->__state) & state)
2155 return 1;
2156
2157 if (READ_ONCE(p->saved_state) & state)
2158 return -1;
2159
2160 return 0;
2161 }
2162
2163 static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2164 int task_state_match(struct task_struct *p, unsigned int state)
2165 {
2166 /*
2167 * Serialize against current_save_and_set_rtlock_wait_state(),
2168 * current_restore_rtlock_saved_state(), and __refrigerator().
2169 */
2170 guard(raw_spinlock_irq)(&p->pi_lock);
2171 return __task_state_match(p, state);
2172 }
2173
2174 /*
2175 * wait_task_inactive - wait for a thread to unschedule.
2176 *
2177 * Wait for the thread to block in any of the states set in @match_state.
2178 * If it changes, i.e. @p might have woken up, then return zero. When we
2179 * succeed in waiting for @p to be off its CPU, we return a positive number
2180 * (its total switch count). If a second call a short while later returns the
2181 * same number, the caller can be sure that @p has remained unscheduled the
2182 * whole time.
2183 *
2184 * The caller must ensure that the task *will* unschedule sometime soon,
2185 * else this function might spin for a *long* time. This function can't
2186 * be called with interrupts off, or it may introduce deadlock with
2187 * smp_call_function() if an IPI is sent by the same process we are
2188 * waiting to become inactive.
2189 */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2190 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2191 {
2192 int running, queued, match;
2193 struct rq_flags rf;
2194 unsigned long ncsw;
2195 struct rq *rq;
2196
2197 for (;;) {
2198 /*
2199 * We do the initial early heuristics without holding
2200 * any task-queue locks at all. We'll only try to get
2201 * the runqueue lock when things look like they will
2202 * work out!
2203 */
2204 rq = task_rq(p);
2205
2206 /*
2207 * If the task is actively running on another CPU
2208 * still, just relax and busy-wait without holding
2209 * any locks.
2210 *
2211 * NOTE! Since we don't hold any locks, it's not
2212 * even sure that "rq" stays as the right runqueue!
2213 * But we don't care, since "task_on_cpu()" will
2214 * return false if the runqueue has changed and p
2215 * is actually now running somewhere else!
2216 */
2217 while (task_on_cpu(rq, p)) {
2218 if (!task_state_match(p, match_state))
2219 return 0;
2220 cpu_relax();
2221 }
2222
2223 /*
2224 * Ok, time to look more closely! We need the rq
2225 * lock now, to be *sure*. If we're wrong, we'll
2226 * just go back and repeat.
2227 */
2228 rq = task_rq_lock(p, &rf);
2229 trace_sched_wait_task(p);
2230 running = task_on_cpu(rq, p);
2231 queued = task_on_rq_queued(p);
2232 ncsw = 0;
2233 if ((match = __task_state_match(p, match_state))) {
2234 /*
2235 * When matching on p->saved_state, consider this task
2236 * still queued so it will wait.
2237 */
2238 if (match < 0)
2239 queued = 1;
2240 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2241 }
2242 task_rq_unlock(rq, p, &rf);
2243
2244 /*
2245 * If it changed from the expected state, bail out now.
2246 */
2247 if (unlikely(!ncsw))
2248 break;
2249
2250 /*
2251 * Was it really running after all now that we
2252 * checked with the proper locks actually held?
2253 *
2254 * Oops. Go back and try again..
2255 */
2256 if (unlikely(running)) {
2257 cpu_relax();
2258 continue;
2259 }
2260
2261 /*
2262 * It's not enough that it's not actively running,
2263 * it must be off the runqueue _entirely_, and not
2264 * preempted!
2265 *
2266 * So if it was still runnable (but just not actively
2267 * running right now), it's preempted, and we should
2268 * yield - it could be a while.
2269 */
2270 if (unlikely(queued)) {
2271 ktime_t to = NSEC_PER_SEC / HZ;
2272
2273 set_current_state(TASK_UNINTERRUPTIBLE);
2274 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2275 continue;
2276 }
2277
2278 /*
2279 * Ahh, all good. It wasn't running, and it wasn't
2280 * runnable, which means that it will never become
2281 * running in the future either. We're all done!
2282 */
2283 break;
2284 }
2285
2286 return ncsw;
2287 }
2288
2289 #ifdef CONFIG_SMP
2290
2291 static void
2292 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2293
migrate_disable_switch(struct rq * rq,struct task_struct * p)2294 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2295 {
2296 struct affinity_context ac = {
2297 .new_mask = cpumask_of(rq->cpu),
2298 .flags = SCA_MIGRATE_DISABLE,
2299 };
2300
2301 if (likely(!p->migration_disabled))
2302 return;
2303
2304 if (p->cpus_ptr != &p->cpus_mask)
2305 return;
2306
2307 /*
2308 * Violates locking rules! See comment in __do_set_cpus_allowed().
2309 */
2310 __do_set_cpus_allowed(p, &ac);
2311 }
2312
migrate_disable(void)2313 void migrate_disable(void)
2314 {
2315 struct task_struct *p = current;
2316
2317 if (p->migration_disabled) {
2318 #ifdef CONFIG_DEBUG_PREEMPT
2319 /*
2320 *Warn about overflow half-way through the range.
2321 */
2322 WARN_ON_ONCE((s16)p->migration_disabled < 0);
2323 #endif
2324 p->migration_disabled++;
2325 return;
2326 }
2327
2328 guard(preempt)();
2329 this_rq()->nr_pinned++;
2330 p->migration_disabled = 1;
2331 }
2332 EXPORT_SYMBOL_GPL(migrate_disable);
2333
migrate_enable(void)2334 void migrate_enable(void)
2335 {
2336 struct task_struct *p = current;
2337 struct affinity_context ac = {
2338 .new_mask = &p->cpus_mask,
2339 .flags = SCA_MIGRATE_ENABLE,
2340 };
2341
2342 #ifdef CONFIG_DEBUG_PREEMPT
2343 /*
2344 * Check both overflow from migrate_disable() and superfluous
2345 * migrate_enable().
2346 */
2347 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
2348 return;
2349 #endif
2350
2351 if (p->migration_disabled > 1) {
2352 p->migration_disabled--;
2353 return;
2354 }
2355
2356 /*
2357 * Ensure stop_task runs either before or after this, and that
2358 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2359 */
2360 guard(preempt)();
2361 if (p->cpus_ptr != &p->cpus_mask)
2362 __set_cpus_allowed_ptr(p, &ac);
2363 /*
2364 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2365 * regular cpus_mask, otherwise things that race (eg.
2366 * select_fallback_rq) get confused.
2367 */
2368 barrier();
2369 p->migration_disabled = 0;
2370 this_rq()->nr_pinned--;
2371 }
2372 EXPORT_SYMBOL_GPL(migrate_enable);
2373
rq_has_pinned_tasks(struct rq * rq)2374 static inline bool rq_has_pinned_tasks(struct rq *rq)
2375 {
2376 return rq->nr_pinned;
2377 }
2378
2379 /*
2380 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2381 * __set_cpus_allowed_ptr() and select_fallback_rq().
2382 */
is_cpu_allowed(struct task_struct * p,int cpu)2383 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2384 {
2385 /* When not in the task's cpumask, no point in looking further. */
2386 if (!task_allowed_on_cpu(p, cpu))
2387 return false;
2388
2389 /* migrate_disabled() must be allowed to finish. */
2390 if (is_migration_disabled(p))
2391 return cpu_online(cpu);
2392
2393 /* Non kernel threads are not allowed during either online or offline. */
2394 if (!(p->flags & PF_KTHREAD))
2395 return cpu_active(cpu);
2396
2397 /* KTHREAD_IS_PER_CPU is always allowed. */
2398 if (kthread_is_per_cpu(p))
2399 return cpu_online(cpu);
2400
2401 /* Regular kernel threads don't get to stay during offline. */
2402 if (cpu_dying(cpu))
2403 return false;
2404
2405 /* But are allowed during online. */
2406 return cpu_online(cpu);
2407 }
2408
2409 /*
2410 * This is how migration works:
2411 *
2412 * 1) we invoke migration_cpu_stop() on the target CPU using
2413 * stop_one_cpu().
2414 * 2) stopper starts to run (implicitly forcing the migrated thread
2415 * off the CPU)
2416 * 3) it checks whether the migrated task is still in the wrong runqueue.
2417 * 4) if it's in the wrong runqueue then the migration thread removes
2418 * it and puts it into the right queue.
2419 * 5) stopper completes and stop_one_cpu() returns and the migration
2420 * is done.
2421 */
2422
2423 /*
2424 * move_queued_task - move a queued task to new rq.
2425 *
2426 * Returns (locked) new rq. Old rq's lock is released.
2427 */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2428 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2429 struct task_struct *p, int new_cpu)
2430 {
2431 lockdep_assert_rq_held(rq);
2432
2433 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2434 set_task_cpu(p, new_cpu);
2435 rq_unlock(rq, rf);
2436
2437 rq = cpu_rq(new_cpu);
2438
2439 rq_lock(rq, rf);
2440 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2441 activate_task(rq, p, 0);
2442 wakeup_preempt(rq, p, 0);
2443
2444 return rq;
2445 }
2446
2447 struct migration_arg {
2448 struct task_struct *task;
2449 int dest_cpu;
2450 struct set_affinity_pending *pending;
2451 };
2452
2453 /*
2454 * @refs: number of wait_for_completion()
2455 * @stop_pending: is @stop_work in use
2456 */
2457 struct set_affinity_pending {
2458 refcount_t refs;
2459 unsigned int stop_pending;
2460 struct completion done;
2461 struct cpu_stop_work stop_work;
2462 struct migration_arg arg;
2463 };
2464
2465 /*
2466 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2467 * this because either it can't run here any more (set_cpus_allowed()
2468 * away from this CPU, or CPU going down), or because we're
2469 * attempting to rebalance this task on exec (sched_exec).
2470 *
2471 * So we race with normal scheduler movements, but that's OK, as long
2472 * as the task is no longer on this CPU.
2473 */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2474 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2475 struct task_struct *p, int dest_cpu)
2476 {
2477 /* Affinity changed (again). */
2478 if (!is_cpu_allowed(p, dest_cpu))
2479 return rq;
2480
2481 rq = move_queued_task(rq, rf, p, dest_cpu);
2482
2483 return rq;
2484 }
2485
2486 /*
2487 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2488 * and performs thread migration by bumping thread off CPU then
2489 * 'pushing' onto another runqueue.
2490 */
migration_cpu_stop(void * data)2491 static int migration_cpu_stop(void *data)
2492 {
2493 struct migration_arg *arg = data;
2494 struct set_affinity_pending *pending = arg->pending;
2495 struct task_struct *p = arg->task;
2496 struct rq *rq = this_rq();
2497 bool complete = false;
2498 struct rq_flags rf;
2499
2500 /*
2501 * The original target CPU might have gone down and we might
2502 * be on another CPU but it doesn't matter.
2503 */
2504 local_irq_save(rf.flags);
2505 /*
2506 * We need to explicitly wake pending tasks before running
2507 * __migrate_task() such that we will not miss enforcing cpus_ptr
2508 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2509 */
2510 flush_smp_call_function_queue();
2511
2512 raw_spin_lock(&p->pi_lock);
2513 rq_lock(rq, &rf);
2514
2515 /*
2516 * If we were passed a pending, then ->stop_pending was set, thus
2517 * p->migration_pending must have remained stable.
2518 */
2519 WARN_ON_ONCE(pending && pending != p->migration_pending);
2520
2521 /*
2522 * If task_rq(p) != rq, it cannot be migrated here, because we're
2523 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2524 * we're holding p->pi_lock.
2525 */
2526 if (task_rq(p) == rq) {
2527 if (is_migration_disabled(p))
2528 goto out;
2529
2530 if (pending) {
2531 p->migration_pending = NULL;
2532 complete = true;
2533
2534 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2535 goto out;
2536 }
2537
2538 if (task_on_rq_queued(p)) {
2539 update_rq_clock(rq);
2540 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2541 } else {
2542 p->wake_cpu = arg->dest_cpu;
2543 }
2544
2545 /*
2546 * XXX __migrate_task() can fail, at which point we might end
2547 * up running on a dodgy CPU, AFAICT this can only happen
2548 * during CPU hotplug, at which point we'll get pushed out
2549 * anyway, so it's probably not a big deal.
2550 */
2551
2552 } else if (pending) {
2553 /*
2554 * This happens when we get migrated between migrate_enable()'s
2555 * preempt_enable() and scheduling the stopper task. At that
2556 * point we're a regular task again and not current anymore.
2557 *
2558 * A !PREEMPT kernel has a giant hole here, which makes it far
2559 * more likely.
2560 */
2561
2562 /*
2563 * The task moved before the stopper got to run. We're holding
2564 * ->pi_lock, so the allowed mask is stable - if it got
2565 * somewhere allowed, we're done.
2566 */
2567 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2568 p->migration_pending = NULL;
2569 complete = true;
2570 goto out;
2571 }
2572
2573 /*
2574 * When migrate_enable() hits a rq mis-match we can't reliably
2575 * determine is_migration_disabled() and so have to chase after
2576 * it.
2577 */
2578 WARN_ON_ONCE(!pending->stop_pending);
2579 preempt_disable();
2580 task_rq_unlock(rq, p, &rf);
2581 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2582 &pending->arg, &pending->stop_work);
2583 preempt_enable();
2584 return 0;
2585 }
2586 out:
2587 if (pending)
2588 pending->stop_pending = false;
2589 task_rq_unlock(rq, p, &rf);
2590
2591 if (complete)
2592 complete_all(&pending->done);
2593
2594 return 0;
2595 }
2596
push_cpu_stop(void * arg)2597 int push_cpu_stop(void *arg)
2598 {
2599 struct rq *lowest_rq = NULL, *rq = this_rq();
2600 struct task_struct *p = arg;
2601
2602 raw_spin_lock_irq(&p->pi_lock);
2603 raw_spin_rq_lock(rq);
2604
2605 if (task_rq(p) != rq)
2606 goto out_unlock;
2607
2608 if (is_migration_disabled(p)) {
2609 p->migration_flags |= MDF_PUSH;
2610 goto out_unlock;
2611 }
2612
2613 p->migration_flags &= ~MDF_PUSH;
2614
2615 if (p->sched_class->find_lock_rq)
2616 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2617
2618 if (!lowest_rq)
2619 goto out_unlock;
2620
2621 // XXX validate p is still the highest prio task
2622 if (task_rq(p) == rq) {
2623 deactivate_task(rq, p, 0);
2624 set_task_cpu(p, lowest_rq->cpu);
2625 activate_task(lowest_rq, p, 0);
2626 resched_curr(lowest_rq);
2627 }
2628
2629 double_unlock_balance(rq, lowest_rq);
2630
2631 out_unlock:
2632 rq->push_busy = false;
2633 raw_spin_rq_unlock(rq);
2634 raw_spin_unlock_irq(&p->pi_lock);
2635
2636 put_task_struct(p);
2637 return 0;
2638 }
2639
2640 /*
2641 * sched_class::set_cpus_allowed must do the below, but is not required to
2642 * actually call this function.
2643 */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2644 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2645 {
2646 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2647 p->cpus_ptr = ctx->new_mask;
2648 return;
2649 }
2650
2651 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2652 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2653
2654 /*
2655 * Swap in a new user_cpus_ptr if SCA_USER flag set
2656 */
2657 if (ctx->flags & SCA_USER)
2658 swap(p->user_cpus_ptr, ctx->user_mask);
2659 }
2660
2661 static void
__do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2662 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2663 {
2664 struct rq *rq = task_rq(p);
2665 bool queued, running;
2666
2667 /*
2668 * This here violates the locking rules for affinity, since we're only
2669 * supposed to change these variables while holding both rq->lock and
2670 * p->pi_lock.
2671 *
2672 * HOWEVER, it magically works, because ttwu() is the only code that
2673 * accesses these variables under p->pi_lock and only does so after
2674 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2675 * before finish_task().
2676 *
2677 * XXX do further audits, this smells like something putrid.
2678 */
2679 if (ctx->flags & SCA_MIGRATE_DISABLE)
2680 SCHED_WARN_ON(!p->on_cpu);
2681 else
2682 lockdep_assert_held(&p->pi_lock);
2683
2684 queued = task_on_rq_queued(p);
2685 running = task_current(rq, p);
2686
2687 if (queued) {
2688 /*
2689 * Because __kthread_bind() calls this on blocked tasks without
2690 * holding rq->lock.
2691 */
2692 lockdep_assert_rq_held(rq);
2693 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2694 }
2695 if (running)
2696 put_prev_task(rq, p);
2697
2698 p->sched_class->set_cpus_allowed(p, ctx);
2699
2700 if (queued)
2701 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2702 if (running)
2703 set_next_task(rq, p);
2704 }
2705
2706 /*
2707 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2708 * affinity (if any) should be destroyed too.
2709 */
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)2710 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2711 {
2712 struct affinity_context ac = {
2713 .new_mask = new_mask,
2714 .user_mask = NULL,
2715 .flags = SCA_USER, /* clear the user requested mask */
2716 };
2717 union cpumask_rcuhead {
2718 cpumask_t cpumask;
2719 struct rcu_head rcu;
2720 };
2721
2722 __do_set_cpus_allowed(p, &ac);
2723
2724 /*
2725 * Because this is called with p->pi_lock held, it is not possible
2726 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2727 * kfree_rcu().
2728 */
2729 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2730 }
2731
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2732 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2733 int node)
2734 {
2735 cpumask_t *user_mask;
2736 unsigned long flags;
2737
2738 /*
2739 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2740 * may differ by now due to racing.
2741 */
2742 dst->user_cpus_ptr = NULL;
2743
2744 /*
2745 * This check is racy and losing the race is a valid situation.
2746 * It is not worth the extra overhead of taking the pi_lock on
2747 * every fork/clone.
2748 */
2749 if (data_race(!src->user_cpus_ptr))
2750 return 0;
2751
2752 user_mask = alloc_user_cpus_ptr(node);
2753 if (!user_mask)
2754 return -ENOMEM;
2755
2756 /*
2757 * Use pi_lock to protect content of user_cpus_ptr
2758 *
2759 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2760 * do_set_cpus_allowed().
2761 */
2762 raw_spin_lock_irqsave(&src->pi_lock, flags);
2763 if (src->user_cpus_ptr) {
2764 swap(dst->user_cpus_ptr, user_mask);
2765 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2766 }
2767 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2768
2769 if (unlikely(user_mask))
2770 kfree(user_mask);
2771
2772 return 0;
2773 }
2774
clear_user_cpus_ptr(struct task_struct * p)2775 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2776 {
2777 struct cpumask *user_mask = NULL;
2778
2779 swap(p->user_cpus_ptr, user_mask);
2780
2781 return user_mask;
2782 }
2783
release_user_cpus_ptr(struct task_struct * p)2784 void release_user_cpus_ptr(struct task_struct *p)
2785 {
2786 kfree(clear_user_cpus_ptr(p));
2787 }
2788
2789 /*
2790 * This function is wildly self concurrent; here be dragons.
2791 *
2792 *
2793 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2794 * designated task is enqueued on an allowed CPU. If that task is currently
2795 * running, we have to kick it out using the CPU stopper.
2796 *
2797 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2798 * Consider:
2799 *
2800 * Initial conditions: P0->cpus_mask = [0, 1]
2801 *
2802 * P0@CPU0 P1
2803 *
2804 * migrate_disable();
2805 * <preempted>
2806 * set_cpus_allowed_ptr(P0, [1]);
2807 *
2808 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2809 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2810 * This means we need the following scheme:
2811 *
2812 * P0@CPU0 P1
2813 *
2814 * migrate_disable();
2815 * <preempted>
2816 * set_cpus_allowed_ptr(P0, [1]);
2817 * <blocks>
2818 * <resumes>
2819 * migrate_enable();
2820 * __set_cpus_allowed_ptr();
2821 * <wakes local stopper>
2822 * `--> <woken on migration completion>
2823 *
2824 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2825 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2826 * task p are serialized by p->pi_lock, which we can leverage: the one that
2827 * should come into effect at the end of the Migrate-Disable region is the last
2828 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2829 * but we still need to properly signal those waiting tasks at the appropriate
2830 * moment.
2831 *
2832 * This is implemented using struct set_affinity_pending. The first
2833 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2834 * setup an instance of that struct and install it on the targeted task_struct.
2835 * Any and all further callers will reuse that instance. Those then wait for
2836 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2837 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2838 *
2839 *
2840 * (1) In the cases covered above. There is one more where the completion is
2841 * signaled within affine_move_task() itself: when a subsequent affinity request
2842 * occurs after the stopper bailed out due to the targeted task still being
2843 * Migrate-Disable. Consider:
2844 *
2845 * Initial conditions: P0->cpus_mask = [0, 1]
2846 *
2847 * CPU0 P1 P2
2848 * <P0>
2849 * migrate_disable();
2850 * <preempted>
2851 * set_cpus_allowed_ptr(P0, [1]);
2852 * <blocks>
2853 * <migration/0>
2854 * migration_cpu_stop()
2855 * is_migration_disabled()
2856 * <bails>
2857 * set_cpus_allowed_ptr(P0, [0, 1]);
2858 * <signal completion>
2859 * <awakes>
2860 *
2861 * Note that the above is safe vs a concurrent migrate_enable(), as any
2862 * pending affinity completion is preceded by an uninstallation of
2863 * p->migration_pending done with p->pi_lock held.
2864 */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2865 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2866 int dest_cpu, unsigned int flags)
2867 __releases(rq->lock)
2868 __releases(p->pi_lock)
2869 {
2870 struct set_affinity_pending my_pending = { }, *pending = NULL;
2871 bool stop_pending, complete = false;
2872
2873 /* Can the task run on the task's current CPU? If so, we're done */
2874 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2875 struct task_struct *push_task = NULL;
2876
2877 if ((flags & SCA_MIGRATE_ENABLE) &&
2878 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2879 rq->push_busy = true;
2880 push_task = get_task_struct(p);
2881 }
2882
2883 /*
2884 * If there are pending waiters, but no pending stop_work,
2885 * then complete now.
2886 */
2887 pending = p->migration_pending;
2888 if (pending && !pending->stop_pending) {
2889 p->migration_pending = NULL;
2890 complete = true;
2891 }
2892
2893 preempt_disable();
2894 task_rq_unlock(rq, p, rf);
2895 if (push_task) {
2896 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2897 p, &rq->push_work);
2898 }
2899 preempt_enable();
2900
2901 if (complete)
2902 complete_all(&pending->done);
2903
2904 return 0;
2905 }
2906
2907 if (!(flags & SCA_MIGRATE_ENABLE)) {
2908 /* serialized by p->pi_lock */
2909 if (!p->migration_pending) {
2910 /* Install the request */
2911 refcount_set(&my_pending.refs, 1);
2912 init_completion(&my_pending.done);
2913 my_pending.arg = (struct migration_arg) {
2914 .task = p,
2915 .dest_cpu = dest_cpu,
2916 .pending = &my_pending,
2917 };
2918
2919 p->migration_pending = &my_pending;
2920 } else {
2921 pending = p->migration_pending;
2922 refcount_inc(&pending->refs);
2923 /*
2924 * Affinity has changed, but we've already installed a
2925 * pending. migration_cpu_stop() *must* see this, else
2926 * we risk a completion of the pending despite having a
2927 * task on a disallowed CPU.
2928 *
2929 * Serialized by p->pi_lock, so this is safe.
2930 */
2931 pending->arg.dest_cpu = dest_cpu;
2932 }
2933 }
2934 pending = p->migration_pending;
2935 /*
2936 * - !MIGRATE_ENABLE:
2937 * we'll have installed a pending if there wasn't one already.
2938 *
2939 * - MIGRATE_ENABLE:
2940 * we're here because the current CPU isn't matching anymore,
2941 * the only way that can happen is because of a concurrent
2942 * set_cpus_allowed_ptr() call, which should then still be
2943 * pending completion.
2944 *
2945 * Either way, we really should have a @pending here.
2946 */
2947 if (WARN_ON_ONCE(!pending)) {
2948 task_rq_unlock(rq, p, rf);
2949 return -EINVAL;
2950 }
2951
2952 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2953 /*
2954 * MIGRATE_ENABLE gets here because 'p == current', but for
2955 * anything else we cannot do is_migration_disabled(), punt
2956 * and have the stopper function handle it all race-free.
2957 */
2958 stop_pending = pending->stop_pending;
2959 if (!stop_pending)
2960 pending->stop_pending = true;
2961
2962 if (flags & SCA_MIGRATE_ENABLE)
2963 p->migration_flags &= ~MDF_PUSH;
2964
2965 preempt_disable();
2966 task_rq_unlock(rq, p, rf);
2967 if (!stop_pending) {
2968 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2969 &pending->arg, &pending->stop_work);
2970 }
2971 preempt_enable();
2972
2973 if (flags & SCA_MIGRATE_ENABLE)
2974 return 0;
2975 } else {
2976
2977 if (!is_migration_disabled(p)) {
2978 if (task_on_rq_queued(p))
2979 rq = move_queued_task(rq, rf, p, dest_cpu);
2980
2981 if (!pending->stop_pending) {
2982 p->migration_pending = NULL;
2983 complete = true;
2984 }
2985 }
2986 task_rq_unlock(rq, p, rf);
2987
2988 if (complete)
2989 complete_all(&pending->done);
2990 }
2991
2992 wait_for_completion(&pending->done);
2993
2994 if (refcount_dec_and_test(&pending->refs))
2995 wake_up_var(&pending->refs); /* No UaF, just an address */
2996
2997 /*
2998 * Block the original owner of &pending until all subsequent callers
2999 * have seen the completion and decremented the refcount
3000 */
3001 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3002
3003 /* ARGH */
3004 WARN_ON_ONCE(my_pending.stop_pending);
3005
3006 return 0;
3007 }
3008
3009 /*
3010 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3011 */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)3012 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3013 struct affinity_context *ctx,
3014 struct rq *rq,
3015 struct rq_flags *rf)
3016 __releases(rq->lock)
3017 __releases(p->pi_lock)
3018 {
3019 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3020 const struct cpumask *cpu_valid_mask = cpu_active_mask;
3021 bool kthread = p->flags & PF_KTHREAD;
3022 unsigned int dest_cpu;
3023 int ret = 0;
3024
3025 update_rq_clock(rq);
3026
3027 if (kthread || is_migration_disabled(p)) {
3028 /*
3029 * Kernel threads are allowed on online && !active CPUs,
3030 * however, during cpu-hot-unplug, even these might get pushed
3031 * away if not KTHREAD_IS_PER_CPU.
3032 *
3033 * Specifically, migration_disabled() tasks must not fail the
3034 * cpumask_any_and_distribute() pick below, esp. so on
3035 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3036 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3037 */
3038 cpu_valid_mask = cpu_online_mask;
3039 }
3040
3041 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3042 ret = -EINVAL;
3043 goto out;
3044 }
3045
3046 /*
3047 * Must re-check here, to close a race against __kthread_bind(),
3048 * sched_setaffinity() is not guaranteed to observe the flag.
3049 */
3050 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3051 ret = -EINVAL;
3052 goto out;
3053 }
3054
3055 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3056 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3057 if (ctx->flags & SCA_USER)
3058 swap(p->user_cpus_ptr, ctx->user_mask);
3059 goto out;
3060 }
3061
3062 if (WARN_ON_ONCE(p == current &&
3063 is_migration_disabled(p) &&
3064 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3065 ret = -EBUSY;
3066 goto out;
3067 }
3068 }
3069
3070 /*
3071 * Picking a ~random cpu helps in cases where we are changing affinity
3072 * for groups of tasks (ie. cpuset), so that load balancing is not
3073 * immediately required to distribute the tasks within their new mask.
3074 */
3075 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3076 if (dest_cpu >= nr_cpu_ids) {
3077 ret = -EINVAL;
3078 goto out;
3079 }
3080
3081 __do_set_cpus_allowed(p, ctx);
3082
3083 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3084
3085 out:
3086 task_rq_unlock(rq, p, rf);
3087
3088 return ret;
3089 }
3090
3091 /*
3092 * Change a given task's CPU affinity. Migrate the thread to a
3093 * proper CPU and schedule it away if the CPU it's executing on
3094 * is removed from the allowed bitmask.
3095 *
3096 * NOTE: the caller must have a valid reference to the task, the
3097 * task must not exit() & deallocate itself prematurely. The
3098 * call is not atomic; no spinlocks may be held.
3099 */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3100 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3101 {
3102 struct rq_flags rf;
3103 struct rq *rq;
3104
3105 rq = task_rq_lock(p, &rf);
3106 /*
3107 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3108 * flags are set.
3109 */
3110 if (p->user_cpus_ptr &&
3111 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3112 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3113 ctx->new_mask = rq->scratch_mask;
3114
3115 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3116 }
3117
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3118 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3119 {
3120 struct affinity_context ac = {
3121 .new_mask = new_mask,
3122 .flags = 0,
3123 };
3124
3125 return __set_cpus_allowed_ptr(p, &ac);
3126 }
3127 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3128
3129 /*
3130 * Change a given task's CPU affinity to the intersection of its current
3131 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3132 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3133 * affinity or use cpu_online_mask instead.
3134 *
3135 * If the resulting mask is empty, leave the affinity unchanged and return
3136 * -EINVAL.
3137 */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3138 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3139 struct cpumask *new_mask,
3140 const struct cpumask *subset_mask)
3141 {
3142 struct affinity_context ac = {
3143 .new_mask = new_mask,
3144 .flags = 0,
3145 };
3146 struct rq_flags rf;
3147 struct rq *rq;
3148 int err;
3149
3150 rq = task_rq_lock(p, &rf);
3151
3152 /*
3153 * Forcefully restricting the affinity of a deadline task is
3154 * likely to cause problems, so fail and noisily override the
3155 * mask entirely.
3156 */
3157 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3158 err = -EPERM;
3159 goto err_unlock;
3160 }
3161
3162 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3163 err = -EINVAL;
3164 goto err_unlock;
3165 }
3166
3167 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3168
3169 err_unlock:
3170 task_rq_unlock(rq, p, &rf);
3171 return err;
3172 }
3173
3174 /*
3175 * Restrict the CPU affinity of task @p so that it is a subset of
3176 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3177 * old affinity mask. If the resulting mask is empty, we warn and walk
3178 * up the cpuset hierarchy until we find a suitable mask.
3179 */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3180 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3181 {
3182 cpumask_var_t new_mask;
3183 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3184
3185 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3186
3187 /*
3188 * __migrate_task() can fail silently in the face of concurrent
3189 * offlining of the chosen destination CPU, so take the hotplug
3190 * lock to ensure that the migration succeeds.
3191 */
3192 cpus_read_lock();
3193 if (!cpumask_available(new_mask))
3194 goto out_set_mask;
3195
3196 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3197 goto out_free_mask;
3198
3199 /*
3200 * We failed to find a valid subset of the affinity mask for the
3201 * task, so override it based on its cpuset hierarchy.
3202 */
3203 cpuset_cpus_allowed(p, new_mask);
3204 override_mask = new_mask;
3205
3206 out_set_mask:
3207 if (printk_ratelimit()) {
3208 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3209 task_pid_nr(p), p->comm,
3210 cpumask_pr_args(override_mask));
3211 }
3212
3213 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3214 out_free_mask:
3215 cpus_read_unlock();
3216 free_cpumask_var(new_mask);
3217 }
3218
3219 /*
3220 * Restore the affinity of a task @p which was previously restricted by a
3221 * call to force_compatible_cpus_allowed_ptr().
3222 *
3223 * It is the caller's responsibility to serialise this with any calls to
3224 * force_compatible_cpus_allowed_ptr(@p).
3225 */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3226 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3227 {
3228 struct affinity_context ac = {
3229 .new_mask = task_user_cpus(p),
3230 .flags = 0,
3231 };
3232 int ret;
3233
3234 /*
3235 * Try to restore the old affinity mask with __sched_setaffinity().
3236 * Cpuset masking will be done there too.
3237 */
3238 ret = __sched_setaffinity(p, &ac);
3239 WARN_ON_ONCE(ret);
3240 }
3241
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3242 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3243 {
3244 #ifdef CONFIG_SCHED_DEBUG
3245 unsigned int state = READ_ONCE(p->__state);
3246
3247 /*
3248 * We should never call set_task_cpu() on a blocked task,
3249 * ttwu() will sort out the placement.
3250 */
3251 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3252
3253 /*
3254 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3255 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3256 * time relying on p->on_rq.
3257 */
3258 WARN_ON_ONCE(state == TASK_RUNNING &&
3259 p->sched_class == &fair_sched_class &&
3260 (p->on_rq && !task_on_rq_migrating(p)));
3261
3262 #ifdef CONFIG_LOCKDEP
3263 /*
3264 * The caller should hold either p->pi_lock or rq->lock, when changing
3265 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3266 *
3267 * sched_move_task() holds both and thus holding either pins the cgroup,
3268 * see task_group().
3269 *
3270 * Furthermore, all task_rq users should acquire both locks, see
3271 * task_rq_lock().
3272 */
3273 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3274 lockdep_is_held(__rq_lockp(task_rq(p)))));
3275 #endif
3276 /*
3277 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3278 */
3279 WARN_ON_ONCE(!cpu_online(new_cpu));
3280
3281 WARN_ON_ONCE(is_migration_disabled(p));
3282 #endif
3283
3284 trace_sched_migrate_task(p, new_cpu);
3285
3286 if (task_cpu(p) != new_cpu) {
3287 if (p->sched_class->migrate_task_rq)
3288 p->sched_class->migrate_task_rq(p, new_cpu);
3289 p->se.nr_migrations++;
3290 rseq_migrate(p);
3291 sched_mm_cid_migrate_from(p);
3292 perf_event_task_migrate(p);
3293 }
3294
3295 __set_task_cpu(p, new_cpu);
3296 }
3297
3298 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3299 static void __migrate_swap_task(struct task_struct *p, int cpu)
3300 {
3301 if (task_on_rq_queued(p)) {
3302 struct rq *src_rq, *dst_rq;
3303 struct rq_flags srf, drf;
3304
3305 src_rq = task_rq(p);
3306 dst_rq = cpu_rq(cpu);
3307
3308 rq_pin_lock(src_rq, &srf);
3309 rq_pin_lock(dst_rq, &drf);
3310
3311 deactivate_task(src_rq, p, 0);
3312 set_task_cpu(p, cpu);
3313 activate_task(dst_rq, p, 0);
3314 wakeup_preempt(dst_rq, p, 0);
3315
3316 rq_unpin_lock(dst_rq, &drf);
3317 rq_unpin_lock(src_rq, &srf);
3318
3319 } else {
3320 /*
3321 * Task isn't running anymore; make it appear like we migrated
3322 * it before it went to sleep. This means on wakeup we make the
3323 * previous CPU our target instead of where it really is.
3324 */
3325 p->wake_cpu = cpu;
3326 }
3327 }
3328
3329 struct migration_swap_arg {
3330 struct task_struct *src_task, *dst_task;
3331 int src_cpu, dst_cpu;
3332 };
3333
migrate_swap_stop(void * data)3334 static int migrate_swap_stop(void *data)
3335 {
3336 struct migration_swap_arg *arg = data;
3337 struct rq *src_rq, *dst_rq;
3338
3339 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3340 return -EAGAIN;
3341
3342 src_rq = cpu_rq(arg->src_cpu);
3343 dst_rq = cpu_rq(arg->dst_cpu);
3344
3345 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3346 guard(double_rq_lock)(src_rq, dst_rq);
3347
3348 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3349 return -EAGAIN;
3350
3351 if (task_cpu(arg->src_task) != arg->src_cpu)
3352 return -EAGAIN;
3353
3354 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3355 return -EAGAIN;
3356
3357 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3358 return -EAGAIN;
3359
3360 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3361 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3362
3363 return 0;
3364 }
3365
3366 /*
3367 * Cross migrate two tasks
3368 */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3369 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3370 int target_cpu, int curr_cpu)
3371 {
3372 struct migration_swap_arg arg;
3373 int ret = -EINVAL;
3374
3375 arg = (struct migration_swap_arg){
3376 .src_task = cur,
3377 .src_cpu = curr_cpu,
3378 .dst_task = p,
3379 .dst_cpu = target_cpu,
3380 };
3381
3382 if (arg.src_cpu == arg.dst_cpu)
3383 goto out;
3384
3385 /*
3386 * These three tests are all lockless; this is OK since all of them
3387 * will be re-checked with proper locks held further down the line.
3388 */
3389 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3390 goto out;
3391
3392 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3393 goto out;
3394
3395 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3396 goto out;
3397
3398 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3399 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3400
3401 out:
3402 return ret;
3403 }
3404 #endif /* CONFIG_NUMA_BALANCING */
3405
3406 /***
3407 * kick_process - kick a running thread to enter/exit the kernel
3408 * @p: the to-be-kicked thread
3409 *
3410 * Cause a process which is running on another CPU to enter
3411 * kernel-mode, without any delay. (to get signals handled.)
3412 *
3413 * NOTE: this function doesn't have to take the runqueue lock,
3414 * because all it wants to ensure is that the remote task enters
3415 * the kernel. If the IPI races and the task has been migrated
3416 * to another CPU then no harm is done and the purpose has been
3417 * achieved as well.
3418 */
kick_process(struct task_struct * p)3419 void kick_process(struct task_struct *p)
3420 {
3421 guard(preempt)();
3422 int cpu = task_cpu(p);
3423
3424 if ((cpu != smp_processor_id()) && task_curr(p))
3425 smp_send_reschedule(cpu);
3426 }
3427 EXPORT_SYMBOL_GPL(kick_process);
3428
3429 /*
3430 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3431 *
3432 * A few notes on cpu_active vs cpu_online:
3433 *
3434 * - cpu_active must be a subset of cpu_online
3435 *
3436 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3437 * see __set_cpus_allowed_ptr(). At this point the newly online
3438 * CPU isn't yet part of the sched domains, and balancing will not
3439 * see it.
3440 *
3441 * - on CPU-down we clear cpu_active() to mask the sched domains and
3442 * avoid the load balancer to place new tasks on the to be removed
3443 * CPU. Existing tasks will remain running there and will be taken
3444 * off.
3445 *
3446 * This means that fallback selection must not select !active CPUs.
3447 * And can assume that any active CPU must be online. Conversely
3448 * select_task_rq() below may allow selection of !active CPUs in order
3449 * to satisfy the above rules.
3450 */
select_fallback_rq(int cpu,struct task_struct * p)3451 static int select_fallback_rq(int cpu, struct task_struct *p)
3452 {
3453 int nid = cpu_to_node(cpu);
3454 const struct cpumask *nodemask = NULL;
3455 enum { cpuset, possible, fail } state = cpuset;
3456 int dest_cpu;
3457
3458 /*
3459 * If the node that the CPU is on has been offlined, cpu_to_node()
3460 * will return -1. There is no CPU on the node, and we should
3461 * select the CPU on the other node.
3462 */
3463 if (nid != -1) {
3464 nodemask = cpumask_of_node(nid);
3465
3466 /* Look for allowed, online CPU in same node. */
3467 for_each_cpu(dest_cpu, nodemask) {
3468 if (is_cpu_allowed(p, dest_cpu))
3469 return dest_cpu;
3470 }
3471 }
3472
3473 for (;;) {
3474 /* Any allowed, online CPU? */
3475 for_each_cpu(dest_cpu, p->cpus_ptr) {
3476 if (!is_cpu_allowed(p, dest_cpu))
3477 continue;
3478
3479 goto out;
3480 }
3481
3482 /* No more Mr. Nice Guy. */
3483 switch (state) {
3484 case cpuset:
3485 if (cpuset_cpus_allowed_fallback(p)) {
3486 state = possible;
3487 break;
3488 }
3489 fallthrough;
3490 case possible:
3491 /*
3492 * XXX When called from select_task_rq() we only
3493 * hold p->pi_lock and again violate locking order.
3494 *
3495 * More yuck to audit.
3496 */
3497 do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3498 state = fail;
3499 break;
3500 case fail:
3501 BUG();
3502 break;
3503 }
3504 }
3505
3506 out:
3507 if (state != cpuset) {
3508 /*
3509 * Don't tell them about moving exiting tasks or
3510 * kernel threads (both mm NULL), since they never
3511 * leave kernel.
3512 */
3513 if (p->mm && printk_ratelimit()) {
3514 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3515 task_pid_nr(p), p->comm, cpu);
3516 }
3517 }
3518
3519 return dest_cpu;
3520 }
3521
3522 /*
3523 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3524 */
3525 static inline
select_task_rq(struct task_struct * p,int cpu,int * wake_flags)3526 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3527 {
3528 lockdep_assert_held(&p->pi_lock);
3529
3530 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3531 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3532 *wake_flags |= WF_RQ_SELECTED;
3533 } else {
3534 cpu = cpumask_any(p->cpus_ptr);
3535 }
3536
3537 /*
3538 * In order not to call set_task_cpu() on a blocking task we need
3539 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3540 * CPU.
3541 *
3542 * Since this is common to all placement strategies, this lives here.
3543 *
3544 * [ this allows ->select_task() to simply return task_cpu(p) and
3545 * not worry about this generic constraint ]
3546 */
3547 if (unlikely(!is_cpu_allowed(p, cpu)))
3548 cpu = select_fallback_rq(task_cpu(p), p);
3549
3550 return cpu;
3551 }
3552
sched_set_stop_task(int cpu,struct task_struct * stop)3553 void sched_set_stop_task(int cpu, struct task_struct *stop)
3554 {
3555 static struct lock_class_key stop_pi_lock;
3556 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3557 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3558
3559 if (stop) {
3560 /*
3561 * Make it appear like a SCHED_FIFO task, its something
3562 * userspace knows about and won't get confused about.
3563 *
3564 * Also, it will make PI more or less work without too
3565 * much confusion -- but then, stop work should not
3566 * rely on PI working anyway.
3567 */
3568 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3569
3570 stop->sched_class = &stop_sched_class;
3571
3572 /*
3573 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3574 * adjust the effective priority of a task. As a result,
3575 * rt_mutex_setprio() can trigger (RT) balancing operations,
3576 * which can then trigger wakeups of the stop thread to push
3577 * around the current task.
3578 *
3579 * The stop task itself will never be part of the PI-chain, it
3580 * never blocks, therefore that ->pi_lock recursion is safe.
3581 * Tell lockdep about this by placing the stop->pi_lock in its
3582 * own class.
3583 */
3584 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3585 }
3586
3587 cpu_rq(cpu)->stop = stop;
3588
3589 if (old_stop) {
3590 /*
3591 * Reset it back to a normal scheduling class so that
3592 * it can die in pieces.
3593 */
3594 old_stop->sched_class = &rt_sched_class;
3595 }
3596 }
3597
3598 #else /* CONFIG_SMP */
3599
migrate_disable_switch(struct rq * rq,struct task_struct * p)3600 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3601
rq_has_pinned_tasks(struct rq * rq)3602 static inline bool rq_has_pinned_tasks(struct rq *rq)
3603 {
3604 return false;
3605 }
3606
3607 #endif /* !CONFIG_SMP */
3608
3609 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3610 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3611 {
3612 struct rq *rq;
3613
3614 if (!schedstat_enabled())
3615 return;
3616
3617 rq = this_rq();
3618
3619 #ifdef CONFIG_SMP
3620 if (cpu == rq->cpu) {
3621 __schedstat_inc(rq->ttwu_local);
3622 __schedstat_inc(p->stats.nr_wakeups_local);
3623 } else {
3624 struct sched_domain *sd;
3625
3626 __schedstat_inc(p->stats.nr_wakeups_remote);
3627
3628 guard(rcu)();
3629 for_each_domain(rq->cpu, sd) {
3630 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3631 __schedstat_inc(sd->ttwu_wake_remote);
3632 break;
3633 }
3634 }
3635 }
3636
3637 if (wake_flags & WF_MIGRATED)
3638 __schedstat_inc(p->stats.nr_wakeups_migrate);
3639 #endif /* CONFIG_SMP */
3640
3641 __schedstat_inc(rq->ttwu_count);
3642 __schedstat_inc(p->stats.nr_wakeups);
3643
3644 if (wake_flags & WF_SYNC)
3645 __schedstat_inc(p->stats.nr_wakeups_sync);
3646 }
3647
3648 /*
3649 * Mark the task runnable.
3650 */
ttwu_do_wakeup(struct task_struct * p)3651 static inline void ttwu_do_wakeup(struct task_struct *p)
3652 {
3653 WRITE_ONCE(p->__state, TASK_RUNNING);
3654 trace_sched_wakeup(p);
3655 }
3656
3657 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3658 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3659 struct rq_flags *rf)
3660 {
3661 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3662
3663 lockdep_assert_rq_held(rq);
3664
3665 if (p->sched_contributes_to_load)
3666 rq->nr_uninterruptible--;
3667
3668 #ifdef CONFIG_SMP
3669 if (wake_flags & WF_RQ_SELECTED)
3670 en_flags |= ENQUEUE_RQ_SELECTED;
3671 if (wake_flags & WF_MIGRATED)
3672 en_flags |= ENQUEUE_MIGRATED;
3673 else
3674 #endif
3675 if (p->in_iowait) {
3676 delayacct_blkio_end(p);
3677 atomic_dec(&task_rq(p)->nr_iowait);
3678 }
3679
3680 activate_task(rq, p, en_flags);
3681 wakeup_preempt(rq, p, wake_flags);
3682
3683 ttwu_do_wakeup(p);
3684
3685 #ifdef CONFIG_SMP
3686 if (p->sched_class->task_woken) {
3687 /*
3688 * Our task @p is fully woken up and running; so it's safe to
3689 * drop the rq->lock, hereafter rq is only used for statistics.
3690 */
3691 rq_unpin_lock(rq, rf);
3692 p->sched_class->task_woken(rq, p);
3693 rq_repin_lock(rq, rf);
3694 }
3695
3696 if (rq->idle_stamp) {
3697 u64 delta = rq_clock(rq) - rq->idle_stamp;
3698 u64 max = 2*rq->max_idle_balance_cost;
3699
3700 update_avg(&rq->avg_idle, delta);
3701
3702 if (rq->avg_idle > max)
3703 rq->avg_idle = max;
3704
3705 rq->idle_stamp = 0;
3706 }
3707 #endif
3708 }
3709
3710 /*
3711 * Consider @p being inside a wait loop:
3712 *
3713 * for (;;) {
3714 * set_current_state(TASK_UNINTERRUPTIBLE);
3715 *
3716 * if (CONDITION)
3717 * break;
3718 *
3719 * schedule();
3720 * }
3721 * __set_current_state(TASK_RUNNING);
3722 *
3723 * between set_current_state() and schedule(). In this case @p is still
3724 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3725 * an atomic manner.
3726 *
3727 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3728 * then schedule() must still happen and p->state can be changed to
3729 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3730 * need to do a full wakeup with enqueue.
3731 *
3732 * Returns: %true when the wakeup is done,
3733 * %false otherwise.
3734 */
ttwu_runnable(struct task_struct * p,int wake_flags)3735 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3736 {
3737 struct rq_flags rf;
3738 struct rq *rq;
3739 int ret = 0;
3740
3741 rq = __task_rq_lock(p, &rf);
3742 if (task_on_rq_queued(p)) {
3743 update_rq_clock(rq);
3744 if (p->se.sched_delayed)
3745 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3746 if (!task_on_cpu(rq, p)) {
3747 /*
3748 * When on_rq && !on_cpu the task is preempted, see if
3749 * it should preempt the task that is current now.
3750 */
3751 wakeup_preempt(rq, p, wake_flags);
3752 }
3753 ttwu_do_wakeup(p);
3754 ret = 1;
3755 }
3756 __task_rq_unlock(rq, &rf);
3757
3758 return ret;
3759 }
3760
3761 #ifdef CONFIG_SMP
sched_ttwu_pending(void * arg)3762 void sched_ttwu_pending(void *arg)
3763 {
3764 struct llist_node *llist = arg;
3765 struct rq *rq = this_rq();
3766 struct task_struct *p, *t;
3767 struct rq_flags rf;
3768
3769 if (!llist)
3770 return;
3771
3772 rq_lock_irqsave(rq, &rf);
3773 update_rq_clock(rq);
3774
3775 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3776 if (WARN_ON_ONCE(p->on_cpu))
3777 smp_cond_load_acquire(&p->on_cpu, !VAL);
3778
3779 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3780 set_task_cpu(p, cpu_of(rq));
3781
3782 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3783 }
3784
3785 /*
3786 * Must be after enqueueing at least once task such that
3787 * idle_cpu() does not observe a false-negative -- if it does,
3788 * it is possible for select_idle_siblings() to stack a number
3789 * of tasks on this CPU during that window.
3790 *
3791 * It is OK to clear ttwu_pending when another task pending.
3792 * We will receive IPI after local IRQ enabled and then enqueue it.
3793 * Since now nr_running > 0, idle_cpu() will always get correct result.
3794 */
3795 WRITE_ONCE(rq->ttwu_pending, 0);
3796 rq_unlock_irqrestore(rq, &rf);
3797 }
3798
3799 /*
3800 * Prepare the scene for sending an IPI for a remote smp_call
3801 *
3802 * Returns true if the caller can proceed with sending the IPI.
3803 * Returns false otherwise.
3804 */
call_function_single_prep_ipi(int cpu)3805 bool call_function_single_prep_ipi(int cpu)
3806 {
3807 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3808 trace_sched_wake_idle_without_ipi(cpu);
3809 return false;
3810 }
3811
3812 return true;
3813 }
3814
3815 /*
3816 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3817 * necessary. The wakee CPU on receipt of the IPI will queue the task
3818 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3819 * of the wakeup instead of the waker.
3820 */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3821 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3822 {
3823 struct rq *rq = cpu_rq(cpu);
3824
3825 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3826
3827 WRITE_ONCE(rq->ttwu_pending, 1);
3828 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3829 }
3830
wake_up_if_idle(int cpu)3831 void wake_up_if_idle(int cpu)
3832 {
3833 struct rq *rq = cpu_rq(cpu);
3834
3835 guard(rcu)();
3836 if (is_idle_task(rcu_dereference(rq->curr))) {
3837 guard(rq_lock_irqsave)(rq);
3838 if (is_idle_task(rq->curr))
3839 resched_curr(rq);
3840 }
3841 }
3842
cpus_equal_capacity(int this_cpu,int that_cpu)3843 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3844 {
3845 if (!sched_asym_cpucap_active())
3846 return true;
3847
3848 if (this_cpu == that_cpu)
3849 return true;
3850
3851 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3852 }
3853
cpus_share_cache(int this_cpu,int that_cpu)3854 bool cpus_share_cache(int this_cpu, int that_cpu)
3855 {
3856 if (this_cpu == that_cpu)
3857 return true;
3858
3859 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3860 }
3861
3862 /*
3863 * Whether CPUs are share cache resources, which means LLC on non-cluster
3864 * machines and LLC tag or L2 on machines with clusters.
3865 */
cpus_share_resources(int this_cpu,int that_cpu)3866 bool cpus_share_resources(int this_cpu, int that_cpu)
3867 {
3868 if (this_cpu == that_cpu)
3869 return true;
3870
3871 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3872 }
3873
ttwu_queue_cond(struct task_struct * p,int cpu)3874 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3875 {
3876 /*
3877 * The BPF scheduler may depend on select_task_rq() being invoked during
3878 * wakeups. In addition, @p may end up executing on a different CPU
3879 * regardless of what happens in the wakeup path making the ttwu_queue
3880 * optimization less meaningful. Skip if on SCX.
3881 */
3882 if (task_on_scx(p))
3883 return false;
3884
3885 /*
3886 * Do not complicate things with the async wake_list while the CPU is
3887 * in hotplug state.
3888 */
3889 if (!cpu_active(cpu))
3890 return false;
3891
3892 /* Ensure the task will still be allowed to run on the CPU. */
3893 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3894 return false;
3895
3896 /*
3897 * If the CPU does not share cache, then queue the task on the
3898 * remote rqs wakelist to avoid accessing remote data.
3899 */
3900 if (!cpus_share_cache(smp_processor_id(), cpu))
3901 return true;
3902
3903 if (cpu == smp_processor_id())
3904 return false;
3905
3906 /*
3907 * If the wakee cpu is idle, or the task is descheduling and the
3908 * only running task on the CPU, then use the wakelist to offload
3909 * the task activation to the idle (or soon-to-be-idle) CPU as
3910 * the current CPU is likely busy. nr_running is checked to
3911 * avoid unnecessary task stacking.
3912 *
3913 * Note that we can only get here with (wakee) p->on_rq=0,
3914 * p->on_cpu can be whatever, we've done the dequeue, so
3915 * the wakee has been accounted out of ->nr_running.
3916 */
3917 if (!cpu_rq(cpu)->nr_running)
3918 return true;
3919
3920 return false;
3921 }
3922
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3923 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3924 {
3925 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3926 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3927 __ttwu_queue_wakelist(p, cpu, wake_flags);
3928 return true;
3929 }
3930
3931 return false;
3932 }
3933
3934 #else /* !CONFIG_SMP */
3935
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3936 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3937 {
3938 return false;
3939 }
3940
3941 #endif /* CONFIG_SMP */
3942
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)3943 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3944 {
3945 struct rq *rq = cpu_rq(cpu);
3946 struct rq_flags rf;
3947
3948 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3949 return;
3950
3951 rq_lock(rq, &rf);
3952 update_rq_clock(rq);
3953 ttwu_do_activate(rq, p, wake_flags, &rf);
3954 rq_unlock(rq, &rf);
3955 }
3956
3957 /*
3958 * Invoked from try_to_wake_up() to check whether the task can be woken up.
3959 *
3960 * The caller holds p::pi_lock if p != current or has preemption
3961 * disabled when p == current.
3962 *
3963 * The rules of saved_state:
3964 *
3965 * The related locking code always holds p::pi_lock when updating
3966 * p::saved_state, which means the code is fully serialized in both cases.
3967 *
3968 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
3969 * No other bits set. This allows to distinguish all wakeup scenarios.
3970 *
3971 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
3972 * allows us to prevent early wakeup of tasks before they can be run on
3973 * asymmetric ISA architectures (eg ARMv9).
3974 */
3975 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)3976 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
3977 {
3978 int match;
3979
3980 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
3981 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
3982 state != TASK_RTLOCK_WAIT);
3983 }
3984
3985 *success = !!(match = __task_state_match(p, state));
3986
3987 /*
3988 * Saved state preserves the task state across blocking on
3989 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
3990 * set p::saved_state to TASK_RUNNING, but do not wake the task
3991 * because it waits for a lock wakeup or __thaw_task(). Also
3992 * indicate success because from the regular waker's point of
3993 * view this has succeeded.
3994 *
3995 * After acquiring the lock the task will restore p::__state
3996 * from p::saved_state which ensures that the regular
3997 * wakeup is not lost. The restore will also set
3998 * p::saved_state to TASK_RUNNING so any further tests will
3999 * not result in false positives vs. @success
4000 */
4001 if (match < 0)
4002 p->saved_state = TASK_RUNNING;
4003
4004 return match > 0;
4005 }
4006
4007 /*
4008 * Notes on Program-Order guarantees on SMP systems.
4009 *
4010 * MIGRATION
4011 *
4012 * The basic program-order guarantee on SMP systems is that when a task [t]
4013 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4014 * execution on its new CPU [c1].
4015 *
4016 * For migration (of runnable tasks) this is provided by the following means:
4017 *
4018 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4019 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4020 * rq(c1)->lock (if not at the same time, then in that order).
4021 * C) LOCK of the rq(c1)->lock scheduling in task
4022 *
4023 * Release/acquire chaining guarantees that B happens after A and C after B.
4024 * Note: the CPU doing B need not be c0 or c1
4025 *
4026 * Example:
4027 *
4028 * CPU0 CPU1 CPU2
4029 *
4030 * LOCK rq(0)->lock
4031 * sched-out X
4032 * sched-in Y
4033 * UNLOCK rq(0)->lock
4034 *
4035 * LOCK rq(0)->lock // orders against CPU0
4036 * dequeue X
4037 * UNLOCK rq(0)->lock
4038 *
4039 * LOCK rq(1)->lock
4040 * enqueue X
4041 * UNLOCK rq(1)->lock
4042 *
4043 * LOCK rq(1)->lock // orders against CPU2
4044 * sched-out Z
4045 * sched-in X
4046 * UNLOCK rq(1)->lock
4047 *
4048 *
4049 * BLOCKING -- aka. SLEEP + WAKEUP
4050 *
4051 * For blocking we (obviously) need to provide the same guarantee as for
4052 * migration. However the means are completely different as there is no lock
4053 * chain to provide order. Instead we do:
4054 *
4055 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4056 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4057 *
4058 * Example:
4059 *
4060 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4061 *
4062 * LOCK rq(0)->lock LOCK X->pi_lock
4063 * dequeue X
4064 * sched-out X
4065 * smp_store_release(X->on_cpu, 0);
4066 *
4067 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4068 * X->state = WAKING
4069 * set_task_cpu(X,2)
4070 *
4071 * LOCK rq(2)->lock
4072 * enqueue X
4073 * X->state = RUNNING
4074 * UNLOCK rq(2)->lock
4075 *
4076 * LOCK rq(2)->lock // orders against CPU1
4077 * sched-out Z
4078 * sched-in X
4079 * UNLOCK rq(2)->lock
4080 *
4081 * UNLOCK X->pi_lock
4082 * UNLOCK rq(0)->lock
4083 *
4084 *
4085 * However, for wakeups there is a second guarantee we must provide, namely we
4086 * must ensure that CONDITION=1 done by the caller can not be reordered with
4087 * accesses to the task state; see try_to_wake_up() and set_current_state().
4088 */
4089
4090 /**
4091 * try_to_wake_up - wake up a thread
4092 * @p: the thread to be awakened
4093 * @state: the mask of task states that can be woken
4094 * @wake_flags: wake modifier flags (WF_*)
4095 *
4096 * Conceptually does:
4097 *
4098 * If (@state & @p->state) @p->state = TASK_RUNNING.
4099 *
4100 * If the task was not queued/runnable, also place it back on a runqueue.
4101 *
4102 * This function is atomic against schedule() which would dequeue the task.
4103 *
4104 * It issues a full memory barrier before accessing @p->state, see the comment
4105 * with set_current_state().
4106 *
4107 * Uses p->pi_lock to serialize against concurrent wake-ups.
4108 *
4109 * Relies on p->pi_lock stabilizing:
4110 * - p->sched_class
4111 * - p->cpus_ptr
4112 * - p->sched_task_group
4113 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4114 *
4115 * Tries really hard to only take one task_rq(p)->lock for performance.
4116 * Takes rq->lock in:
4117 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4118 * - ttwu_queue() -- new rq, for enqueue of the task;
4119 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4120 *
4121 * As a consequence we race really badly with just about everything. See the
4122 * many memory barriers and their comments for details.
4123 *
4124 * Return: %true if @p->state changes (an actual wakeup was done),
4125 * %false otherwise.
4126 */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4127 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4128 {
4129 guard(preempt)();
4130 int cpu, success = 0;
4131
4132 wake_flags |= WF_TTWU;
4133
4134 if (p == current) {
4135 /*
4136 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4137 * == smp_processor_id()'. Together this means we can special
4138 * case the whole 'p->on_rq && ttwu_runnable()' case below
4139 * without taking any locks.
4140 *
4141 * Specifically, given current runs ttwu() we must be before
4142 * schedule()'s block_task(), as such this must not observe
4143 * sched_delayed.
4144 *
4145 * In particular:
4146 * - we rely on Program-Order guarantees for all the ordering,
4147 * - we're serialized against set_special_state() by virtue of
4148 * it disabling IRQs (this allows not taking ->pi_lock).
4149 */
4150 SCHED_WARN_ON(p->se.sched_delayed);
4151 if (!ttwu_state_match(p, state, &success))
4152 goto out;
4153
4154 trace_sched_waking(p);
4155 ttwu_do_wakeup(p);
4156 goto out;
4157 }
4158
4159 /*
4160 * If we are going to wake up a thread waiting for CONDITION we
4161 * need to ensure that CONDITION=1 done by the caller can not be
4162 * reordered with p->state check below. This pairs with smp_store_mb()
4163 * in set_current_state() that the waiting thread does.
4164 */
4165 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4166 smp_mb__after_spinlock();
4167 if (!ttwu_state_match(p, state, &success))
4168 break;
4169
4170 trace_sched_waking(p);
4171
4172 /*
4173 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4174 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4175 * in smp_cond_load_acquire() below.
4176 *
4177 * sched_ttwu_pending() try_to_wake_up()
4178 * STORE p->on_rq = 1 LOAD p->state
4179 * UNLOCK rq->lock
4180 *
4181 * __schedule() (switch to task 'p')
4182 * LOCK rq->lock smp_rmb();
4183 * smp_mb__after_spinlock();
4184 * UNLOCK rq->lock
4185 *
4186 * [task p]
4187 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4188 *
4189 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4190 * __schedule(). See the comment for smp_mb__after_spinlock().
4191 *
4192 * A similar smp_rmb() lives in __task_needs_rq_lock().
4193 */
4194 smp_rmb();
4195 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4196 break;
4197
4198 #ifdef CONFIG_SMP
4199 /*
4200 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4201 * possible to, falsely, observe p->on_cpu == 0.
4202 *
4203 * One must be running (->on_cpu == 1) in order to remove oneself
4204 * from the runqueue.
4205 *
4206 * __schedule() (switch to task 'p') try_to_wake_up()
4207 * STORE p->on_cpu = 1 LOAD p->on_rq
4208 * UNLOCK rq->lock
4209 *
4210 * __schedule() (put 'p' to sleep)
4211 * LOCK rq->lock smp_rmb();
4212 * smp_mb__after_spinlock();
4213 * STORE p->on_rq = 0 LOAD p->on_cpu
4214 *
4215 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4216 * __schedule(). See the comment for smp_mb__after_spinlock().
4217 *
4218 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4219 * schedule()'s deactivate_task() has 'happened' and p will no longer
4220 * care about it's own p->state. See the comment in __schedule().
4221 */
4222 smp_acquire__after_ctrl_dep();
4223
4224 /*
4225 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4226 * == 0), which means we need to do an enqueue, change p->state to
4227 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4228 * enqueue, such as ttwu_queue_wakelist().
4229 */
4230 WRITE_ONCE(p->__state, TASK_WAKING);
4231
4232 /*
4233 * If the owning (remote) CPU is still in the middle of schedule() with
4234 * this task as prev, considering queueing p on the remote CPUs wake_list
4235 * which potentially sends an IPI instead of spinning on p->on_cpu to
4236 * let the waker make forward progress. This is safe because IRQs are
4237 * disabled and the IPI will deliver after on_cpu is cleared.
4238 *
4239 * Ensure we load task_cpu(p) after p->on_cpu:
4240 *
4241 * set_task_cpu(p, cpu);
4242 * STORE p->cpu = @cpu
4243 * __schedule() (switch to task 'p')
4244 * LOCK rq->lock
4245 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4246 * STORE p->on_cpu = 1 LOAD p->cpu
4247 *
4248 * to ensure we observe the correct CPU on which the task is currently
4249 * scheduling.
4250 */
4251 if (smp_load_acquire(&p->on_cpu) &&
4252 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4253 break;
4254
4255 /*
4256 * If the owning (remote) CPU is still in the middle of schedule() with
4257 * this task as prev, wait until it's done referencing the task.
4258 *
4259 * Pairs with the smp_store_release() in finish_task().
4260 *
4261 * This ensures that tasks getting woken will be fully ordered against
4262 * their previous state and preserve Program Order.
4263 */
4264 smp_cond_load_acquire(&p->on_cpu, !VAL);
4265
4266 cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4267 if (task_cpu(p) != cpu) {
4268 if (p->in_iowait) {
4269 delayacct_blkio_end(p);
4270 atomic_dec(&task_rq(p)->nr_iowait);
4271 }
4272
4273 wake_flags |= WF_MIGRATED;
4274 psi_ttwu_dequeue(p);
4275 set_task_cpu(p, cpu);
4276 }
4277 #else
4278 cpu = task_cpu(p);
4279 #endif /* CONFIG_SMP */
4280
4281 ttwu_queue(p, cpu, wake_flags);
4282 }
4283 out:
4284 if (success)
4285 ttwu_stat(p, task_cpu(p), wake_flags);
4286
4287 return success;
4288 }
4289
__task_needs_rq_lock(struct task_struct * p)4290 static bool __task_needs_rq_lock(struct task_struct *p)
4291 {
4292 unsigned int state = READ_ONCE(p->__state);
4293
4294 /*
4295 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4296 * the task is blocked. Make sure to check @state since ttwu() can drop
4297 * locks at the end, see ttwu_queue_wakelist().
4298 */
4299 if (state == TASK_RUNNING || state == TASK_WAKING)
4300 return true;
4301
4302 /*
4303 * Ensure we load p->on_rq after p->__state, otherwise it would be
4304 * possible to, falsely, observe p->on_rq == 0.
4305 *
4306 * See try_to_wake_up() for a longer comment.
4307 */
4308 smp_rmb();
4309 if (p->on_rq)
4310 return true;
4311
4312 #ifdef CONFIG_SMP
4313 /*
4314 * Ensure the task has finished __schedule() and will not be referenced
4315 * anymore. Again, see try_to_wake_up() for a longer comment.
4316 */
4317 smp_rmb();
4318 smp_cond_load_acquire(&p->on_cpu, !VAL);
4319 #endif
4320
4321 return false;
4322 }
4323
4324 /**
4325 * task_call_func - Invoke a function on task in fixed state
4326 * @p: Process for which the function is to be invoked, can be @current.
4327 * @func: Function to invoke.
4328 * @arg: Argument to function.
4329 *
4330 * Fix the task in it's current state by avoiding wakeups and or rq operations
4331 * and call @func(@arg) on it. This function can use task_is_runnable() and
4332 * task_curr() to work out what the state is, if required. Given that @func
4333 * can be invoked with a runqueue lock held, it had better be quite
4334 * lightweight.
4335 *
4336 * Returns:
4337 * Whatever @func returns
4338 */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4339 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4340 {
4341 struct rq *rq = NULL;
4342 struct rq_flags rf;
4343 int ret;
4344
4345 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4346
4347 if (__task_needs_rq_lock(p))
4348 rq = __task_rq_lock(p, &rf);
4349
4350 /*
4351 * At this point the task is pinned; either:
4352 * - blocked and we're holding off wakeups (pi->lock)
4353 * - woken, and we're holding off enqueue (rq->lock)
4354 * - queued, and we're holding off schedule (rq->lock)
4355 * - running, and we're holding off de-schedule (rq->lock)
4356 *
4357 * The called function (@func) can use: task_curr(), p->on_rq and
4358 * p->__state to differentiate between these states.
4359 */
4360 ret = func(p, arg);
4361
4362 if (rq)
4363 rq_unlock(rq, &rf);
4364
4365 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4366 return ret;
4367 }
4368
4369 /**
4370 * cpu_curr_snapshot - Return a snapshot of the currently running task
4371 * @cpu: The CPU on which to snapshot the task.
4372 *
4373 * Returns the task_struct pointer of the task "currently" running on
4374 * the specified CPU.
4375 *
4376 * If the specified CPU was offline, the return value is whatever it
4377 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4378 * task, but there is no guarantee. Callers wishing a useful return
4379 * value must take some action to ensure that the specified CPU remains
4380 * online throughout.
4381 *
4382 * This function executes full memory barriers before and after fetching
4383 * the pointer, which permits the caller to confine this function's fetch
4384 * with respect to the caller's accesses to other shared variables.
4385 */
cpu_curr_snapshot(int cpu)4386 struct task_struct *cpu_curr_snapshot(int cpu)
4387 {
4388 struct rq *rq = cpu_rq(cpu);
4389 struct task_struct *t;
4390 struct rq_flags rf;
4391
4392 rq_lock_irqsave(rq, &rf);
4393 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4394 t = rcu_dereference(cpu_curr(cpu));
4395 rq_unlock_irqrestore(rq, &rf);
4396 smp_mb(); /* Pairing determined by caller's synchronization design. */
4397
4398 return t;
4399 }
4400
4401 /**
4402 * wake_up_process - Wake up a specific process
4403 * @p: The process to be woken up.
4404 *
4405 * Attempt to wake up the nominated process and move it to the set of runnable
4406 * processes.
4407 *
4408 * Return: 1 if the process was woken up, 0 if it was already running.
4409 *
4410 * This function executes a full memory barrier before accessing the task state.
4411 */
wake_up_process(struct task_struct * p)4412 int wake_up_process(struct task_struct *p)
4413 {
4414 return try_to_wake_up(p, TASK_NORMAL, 0);
4415 }
4416 EXPORT_SYMBOL(wake_up_process);
4417
wake_up_state(struct task_struct * p,unsigned int state)4418 int wake_up_state(struct task_struct *p, unsigned int state)
4419 {
4420 return try_to_wake_up(p, state, 0);
4421 }
4422
4423 /*
4424 * Perform scheduler related setup for a newly forked process p.
4425 * p is forked by current.
4426 *
4427 * __sched_fork() is basic setup used by init_idle() too:
4428 */
__sched_fork(unsigned long clone_flags,struct task_struct * p)4429 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4430 {
4431 p->on_rq = 0;
4432
4433 p->se.on_rq = 0;
4434 p->se.exec_start = 0;
4435 p->se.sum_exec_runtime = 0;
4436 p->se.prev_sum_exec_runtime = 0;
4437 p->se.nr_migrations = 0;
4438 p->se.vruntime = 0;
4439 p->se.vlag = 0;
4440 INIT_LIST_HEAD(&p->se.group_node);
4441
4442 /* A delayed task cannot be in clone(). */
4443 SCHED_WARN_ON(p->se.sched_delayed);
4444
4445 #ifdef CONFIG_FAIR_GROUP_SCHED
4446 p->se.cfs_rq = NULL;
4447 #endif
4448
4449 #ifdef CONFIG_SCHEDSTATS
4450 /* Even if schedstat is disabled, there should not be garbage */
4451 memset(&p->stats, 0, sizeof(p->stats));
4452 #endif
4453
4454 init_dl_entity(&p->dl);
4455
4456 INIT_LIST_HEAD(&p->rt.run_list);
4457 p->rt.timeout = 0;
4458 p->rt.time_slice = sched_rr_timeslice;
4459 p->rt.on_rq = 0;
4460 p->rt.on_list = 0;
4461
4462 #ifdef CONFIG_SCHED_CLASS_EXT
4463 init_scx_entity(&p->scx);
4464 #endif
4465
4466 #ifdef CONFIG_PREEMPT_NOTIFIERS
4467 INIT_HLIST_HEAD(&p->preempt_notifiers);
4468 #endif
4469
4470 #ifdef CONFIG_COMPACTION
4471 p->capture_control = NULL;
4472 #endif
4473 init_numa_balancing(clone_flags, p);
4474 #ifdef CONFIG_SMP
4475 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4476 p->migration_pending = NULL;
4477 #endif
4478 init_sched_mm_cid(p);
4479 }
4480
4481 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4482
4483 #ifdef CONFIG_NUMA_BALANCING
4484
4485 int sysctl_numa_balancing_mode;
4486
__set_numabalancing_state(bool enabled)4487 static void __set_numabalancing_state(bool enabled)
4488 {
4489 if (enabled)
4490 static_branch_enable(&sched_numa_balancing);
4491 else
4492 static_branch_disable(&sched_numa_balancing);
4493 }
4494
set_numabalancing_state(bool enabled)4495 void set_numabalancing_state(bool enabled)
4496 {
4497 if (enabled)
4498 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4499 else
4500 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4501 __set_numabalancing_state(enabled);
4502 }
4503
4504 #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4505 static void reset_memory_tiering(void)
4506 {
4507 struct pglist_data *pgdat;
4508
4509 for_each_online_pgdat(pgdat) {
4510 pgdat->nbp_threshold = 0;
4511 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4512 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4513 }
4514 }
4515
sysctl_numa_balancing(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4516 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4517 void *buffer, size_t *lenp, loff_t *ppos)
4518 {
4519 struct ctl_table t;
4520 int err;
4521 int state = sysctl_numa_balancing_mode;
4522
4523 if (write && !capable(CAP_SYS_ADMIN))
4524 return -EPERM;
4525
4526 t = *table;
4527 t.data = &state;
4528 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4529 if (err < 0)
4530 return err;
4531 if (write) {
4532 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4533 (state & NUMA_BALANCING_MEMORY_TIERING))
4534 reset_memory_tiering();
4535 sysctl_numa_balancing_mode = state;
4536 __set_numabalancing_state(state);
4537 }
4538 return err;
4539 }
4540 #endif
4541 #endif
4542
4543 #ifdef CONFIG_SCHEDSTATS
4544
4545 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4546
set_schedstats(bool enabled)4547 static void set_schedstats(bool enabled)
4548 {
4549 if (enabled)
4550 static_branch_enable(&sched_schedstats);
4551 else
4552 static_branch_disable(&sched_schedstats);
4553 }
4554
force_schedstat_enabled(void)4555 void force_schedstat_enabled(void)
4556 {
4557 if (!schedstat_enabled()) {
4558 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4559 static_branch_enable(&sched_schedstats);
4560 }
4561 }
4562
setup_schedstats(char * str)4563 static int __init setup_schedstats(char *str)
4564 {
4565 int ret = 0;
4566 if (!str)
4567 goto out;
4568
4569 if (!strcmp(str, "enable")) {
4570 set_schedstats(true);
4571 ret = 1;
4572 } else if (!strcmp(str, "disable")) {
4573 set_schedstats(false);
4574 ret = 1;
4575 }
4576 out:
4577 if (!ret)
4578 pr_warn("Unable to parse schedstats=\n");
4579
4580 return ret;
4581 }
4582 __setup("schedstats=", setup_schedstats);
4583
4584 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4585 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4586 size_t *lenp, loff_t *ppos)
4587 {
4588 struct ctl_table t;
4589 int err;
4590 int state = static_branch_likely(&sched_schedstats);
4591
4592 if (write && !capable(CAP_SYS_ADMIN))
4593 return -EPERM;
4594
4595 t = *table;
4596 t.data = &state;
4597 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4598 if (err < 0)
4599 return err;
4600 if (write)
4601 set_schedstats(state);
4602 return err;
4603 }
4604 #endif /* CONFIG_PROC_SYSCTL */
4605 #endif /* CONFIG_SCHEDSTATS */
4606
4607 #ifdef CONFIG_SYSCTL
4608 static struct ctl_table sched_core_sysctls[] = {
4609 #ifdef CONFIG_SCHEDSTATS
4610 {
4611 .procname = "sched_schedstats",
4612 .data = NULL,
4613 .maxlen = sizeof(unsigned int),
4614 .mode = 0644,
4615 .proc_handler = sysctl_schedstats,
4616 .extra1 = SYSCTL_ZERO,
4617 .extra2 = SYSCTL_ONE,
4618 },
4619 #endif /* CONFIG_SCHEDSTATS */
4620 #ifdef CONFIG_UCLAMP_TASK
4621 {
4622 .procname = "sched_util_clamp_min",
4623 .data = &sysctl_sched_uclamp_util_min,
4624 .maxlen = sizeof(unsigned int),
4625 .mode = 0644,
4626 .proc_handler = sysctl_sched_uclamp_handler,
4627 },
4628 {
4629 .procname = "sched_util_clamp_max",
4630 .data = &sysctl_sched_uclamp_util_max,
4631 .maxlen = sizeof(unsigned int),
4632 .mode = 0644,
4633 .proc_handler = sysctl_sched_uclamp_handler,
4634 },
4635 {
4636 .procname = "sched_util_clamp_min_rt_default",
4637 .data = &sysctl_sched_uclamp_util_min_rt_default,
4638 .maxlen = sizeof(unsigned int),
4639 .mode = 0644,
4640 .proc_handler = sysctl_sched_uclamp_handler,
4641 },
4642 #endif /* CONFIG_UCLAMP_TASK */
4643 #ifdef CONFIG_NUMA_BALANCING
4644 {
4645 .procname = "numa_balancing",
4646 .data = NULL, /* filled in by handler */
4647 .maxlen = sizeof(unsigned int),
4648 .mode = 0644,
4649 .proc_handler = sysctl_numa_balancing,
4650 .extra1 = SYSCTL_ZERO,
4651 .extra2 = SYSCTL_FOUR,
4652 },
4653 #endif /* CONFIG_NUMA_BALANCING */
4654 };
sched_core_sysctl_init(void)4655 static int __init sched_core_sysctl_init(void)
4656 {
4657 register_sysctl_init("kernel", sched_core_sysctls);
4658 return 0;
4659 }
4660 late_initcall(sched_core_sysctl_init);
4661 #endif /* CONFIG_SYSCTL */
4662
4663 /*
4664 * fork()/clone()-time setup:
4665 */
sched_fork(unsigned long clone_flags,struct task_struct * p)4666 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4667 {
4668 __sched_fork(clone_flags, p);
4669 /*
4670 * We mark the process as NEW here. This guarantees that
4671 * nobody will actually run it, and a signal or other external
4672 * event cannot wake it up and insert it on the runqueue either.
4673 */
4674 p->__state = TASK_NEW;
4675
4676 /*
4677 * Make sure we do not leak PI boosting priority to the child.
4678 */
4679 p->prio = current->normal_prio;
4680
4681 uclamp_fork(p);
4682
4683 /*
4684 * Revert to default priority/policy on fork if requested.
4685 */
4686 if (unlikely(p->sched_reset_on_fork)) {
4687 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4688 p->policy = SCHED_NORMAL;
4689 p->static_prio = NICE_TO_PRIO(0);
4690 p->rt_priority = 0;
4691 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4692 p->static_prio = NICE_TO_PRIO(0);
4693
4694 p->prio = p->normal_prio = p->static_prio;
4695 set_load_weight(p, false);
4696 p->se.custom_slice = 0;
4697 p->se.slice = sysctl_sched_base_slice;
4698
4699 /*
4700 * We don't need the reset flag anymore after the fork. It has
4701 * fulfilled its duty:
4702 */
4703 p->sched_reset_on_fork = 0;
4704 }
4705
4706 if (dl_prio(p->prio))
4707 return -EAGAIN;
4708
4709 scx_pre_fork(p);
4710
4711 if (rt_prio(p->prio)) {
4712 p->sched_class = &rt_sched_class;
4713 #ifdef CONFIG_SCHED_CLASS_EXT
4714 } else if (task_should_scx(p->policy)) {
4715 p->sched_class = &ext_sched_class;
4716 #endif
4717 } else {
4718 p->sched_class = &fair_sched_class;
4719 }
4720
4721 init_entity_runnable_average(&p->se);
4722
4723
4724 #ifdef CONFIG_SCHED_INFO
4725 if (likely(sched_info_on()))
4726 memset(&p->sched_info, 0, sizeof(p->sched_info));
4727 #endif
4728 #if defined(CONFIG_SMP)
4729 p->on_cpu = 0;
4730 #endif
4731 init_task_preempt_count(p);
4732 #ifdef CONFIG_SMP
4733 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4734 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4735 #endif
4736 return 0;
4737 }
4738
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4739 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4740 {
4741 unsigned long flags;
4742
4743 /*
4744 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4745 * required yet, but lockdep gets upset if rules are violated.
4746 */
4747 raw_spin_lock_irqsave(&p->pi_lock, flags);
4748 #ifdef CONFIG_CGROUP_SCHED
4749 if (1) {
4750 struct task_group *tg;
4751 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4752 struct task_group, css);
4753 tg = autogroup_task_group(p, tg);
4754 p->sched_task_group = tg;
4755 }
4756 #endif
4757 rseq_migrate(p);
4758 /*
4759 * We're setting the CPU for the first time, we don't migrate,
4760 * so use __set_task_cpu().
4761 */
4762 __set_task_cpu(p, smp_processor_id());
4763 if (p->sched_class->task_fork)
4764 p->sched_class->task_fork(p);
4765 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4766
4767 return scx_fork(p);
4768 }
4769
sched_cancel_fork(struct task_struct * p)4770 void sched_cancel_fork(struct task_struct *p)
4771 {
4772 scx_cancel_fork(p);
4773 }
4774
sched_post_fork(struct task_struct * p)4775 void sched_post_fork(struct task_struct *p)
4776 {
4777 uclamp_post_fork(p);
4778 scx_post_fork(p);
4779 }
4780
to_ratio(u64 period,u64 runtime)4781 unsigned long to_ratio(u64 period, u64 runtime)
4782 {
4783 if (runtime == RUNTIME_INF)
4784 return BW_UNIT;
4785
4786 /*
4787 * Doing this here saves a lot of checks in all
4788 * the calling paths, and returning zero seems
4789 * safe for them anyway.
4790 */
4791 if (period == 0)
4792 return 0;
4793
4794 return div64_u64(runtime << BW_SHIFT, period);
4795 }
4796
4797 /*
4798 * wake_up_new_task - wake up a newly created task for the first time.
4799 *
4800 * This function will do some initial scheduler statistics housekeeping
4801 * that must be done for every newly created context, then puts the task
4802 * on the runqueue and wakes it.
4803 */
wake_up_new_task(struct task_struct * p)4804 void wake_up_new_task(struct task_struct *p)
4805 {
4806 struct rq_flags rf;
4807 struct rq *rq;
4808 int wake_flags = WF_FORK;
4809
4810 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4811 WRITE_ONCE(p->__state, TASK_RUNNING);
4812 #ifdef CONFIG_SMP
4813 /*
4814 * Fork balancing, do it here and not earlier because:
4815 * - cpus_ptr can change in the fork path
4816 * - any previously selected CPU might disappear through hotplug
4817 *
4818 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4819 * as we're not fully set-up yet.
4820 */
4821 p->recent_used_cpu = task_cpu(p);
4822 rseq_migrate(p);
4823 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4824 #endif
4825 rq = __task_rq_lock(p, &rf);
4826 update_rq_clock(rq);
4827 post_init_entity_util_avg(p);
4828
4829 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4830 trace_sched_wakeup_new(p);
4831 wakeup_preempt(rq, p, wake_flags);
4832 #ifdef CONFIG_SMP
4833 if (p->sched_class->task_woken) {
4834 /*
4835 * Nothing relies on rq->lock after this, so it's fine to
4836 * drop it.
4837 */
4838 rq_unpin_lock(rq, &rf);
4839 p->sched_class->task_woken(rq, p);
4840 rq_repin_lock(rq, &rf);
4841 }
4842 #endif
4843 task_rq_unlock(rq, p, &rf);
4844 }
4845
4846 #ifdef CONFIG_PREEMPT_NOTIFIERS
4847
4848 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4849
preempt_notifier_inc(void)4850 void preempt_notifier_inc(void)
4851 {
4852 static_branch_inc(&preempt_notifier_key);
4853 }
4854 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4855
preempt_notifier_dec(void)4856 void preempt_notifier_dec(void)
4857 {
4858 static_branch_dec(&preempt_notifier_key);
4859 }
4860 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4861
4862 /**
4863 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4864 * @notifier: notifier struct to register
4865 */
preempt_notifier_register(struct preempt_notifier * notifier)4866 void preempt_notifier_register(struct preempt_notifier *notifier)
4867 {
4868 if (!static_branch_unlikely(&preempt_notifier_key))
4869 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4870
4871 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4872 }
4873 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4874
4875 /**
4876 * preempt_notifier_unregister - no longer interested in preemption notifications
4877 * @notifier: notifier struct to unregister
4878 *
4879 * This is *not* safe to call from within a preemption notifier.
4880 */
preempt_notifier_unregister(struct preempt_notifier * notifier)4881 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4882 {
4883 hlist_del(¬ifier->link);
4884 }
4885 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4886
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4887 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4888 {
4889 struct preempt_notifier *notifier;
4890
4891 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4892 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4893 }
4894
fire_sched_in_preempt_notifiers(struct task_struct * curr)4895 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4896 {
4897 if (static_branch_unlikely(&preempt_notifier_key))
4898 __fire_sched_in_preempt_notifiers(curr);
4899 }
4900
4901 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4902 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4903 struct task_struct *next)
4904 {
4905 struct preempt_notifier *notifier;
4906
4907 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4908 notifier->ops->sched_out(notifier, next);
4909 }
4910
4911 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4912 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4913 struct task_struct *next)
4914 {
4915 if (static_branch_unlikely(&preempt_notifier_key))
4916 __fire_sched_out_preempt_notifiers(curr, next);
4917 }
4918
4919 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4920
fire_sched_in_preempt_notifiers(struct task_struct * curr)4921 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4922 {
4923 }
4924
4925 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4926 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4927 struct task_struct *next)
4928 {
4929 }
4930
4931 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4932
prepare_task(struct task_struct * next)4933 static inline void prepare_task(struct task_struct *next)
4934 {
4935 #ifdef CONFIG_SMP
4936 /*
4937 * Claim the task as running, we do this before switching to it
4938 * such that any running task will have this set.
4939 *
4940 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4941 * its ordering comment.
4942 */
4943 WRITE_ONCE(next->on_cpu, 1);
4944 #endif
4945 }
4946
finish_task(struct task_struct * prev)4947 static inline void finish_task(struct task_struct *prev)
4948 {
4949 #ifdef CONFIG_SMP
4950 /*
4951 * This must be the very last reference to @prev from this CPU. After
4952 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4953 * must ensure this doesn't happen until the switch is completely
4954 * finished.
4955 *
4956 * In particular, the load of prev->state in finish_task_switch() must
4957 * happen before this.
4958 *
4959 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4960 */
4961 smp_store_release(&prev->on_cpu, 0);
4962 #endif
4963 }
4964
4965 #ifdef CONFIG_SMP
4966
do_balance_callbacks(struct rq * rq,struct balance_callback * head)4967 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
4968 {
4969 void (*func)(struct rq *rq);
4970 struct balance_callback *next;
4971
4972 lockdep_assert_rq_held(rq);
4973
4974 while (head) {
4975 func = (void (*)(struct rq *))head->func;
4976 next = head->next;
4977 head->next = NULL;
4978 head = next;
4979
4980 func(rq);
4981 }
4982 }
4983
4984 static void balance_push(struct rq *rq);
4985
4986 /*
4987 * balance_push_callback is a right abuse of the callback interface and plays
4988 * by significantly different rules.
4989 *
4990 * Where the normal balance_callback's purpose is to be ran in the same context
4991 * that queued it (only later, when it's safe to drop rq->lock again),
4992 * balance_push_callback is specifically targeted at __schedule().
4993 *
4994 * This abuse is tolerated because it places all the unlikely/odd cases behind
4995 * a single test, namely: rq->balance_callback == NULL.
4996 */
4997 struct balance_callback balance_push_callback = {
4998 .next = NULL,
4999 .func = balance_push,
5000 };
5001
5002 static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)5003 __splice_balance_callbacks(struct rq *rq, bool split)
5004 {
5005 struct balance_callback *head = rq->balance_callback;
5006
5007 if (likely(!head))
5008 return NULL;
5009
5010 lockdep_assert_rq_held(rq);
5011 /*
5012 * Must not take balance_push_callback off the list when
5013 * splice_balance_callbacks() and balance_callbacks() are not
5014 * in the same rq->lock section.
5015 *
5016 * In that case it would be possible for __schedule() to interleave
5017 * and observe the list empty.
5018 */
5019 if (split && head == &balance_push_callback)
5020 head = NULL;
5021 else
5022 rq->balance_callback = NULL;
5023
5024 return head;
5025 }
5026
splice_balance_callbacks(struct rq * rq)5027 struct balance_callback *splice_balance_callbacks(struct rq *rq)
5028 {
5029 return __splice_balance_callbacks(rq, true);
5030 }
5031
__balance_callbacks(struct rq * rq)5032 static void __balance_callbacks(struct rq *rq)
5033 {
5034 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5035 }
5036
balance_callbacks(struct rq * rq,struct balance_callback * head)5037 void balance_callbacks(struct rq *rq, struct balance_callback *head)
5038 {
5039 unsigned long flags;
5040
5041 if (unlikely(head)) {
5042 raw_spin_rq_lock_irqsave(rq, flags);
5043 do_balance_callbacks(rq, head);
5044 raw_spin_rq_unlock_irqrestore(rq, flags);
5045 }
5046 }
5047
5048 #else
5049
__balance_callbacks(struct rq * rq)5050 static inline void __balance_callbacks(struct rq *rq)
5051 {
5052 }
5053
5054 #endif
5055
5056 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)5057 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5058 {
5059 /*
5060 * Since the runqueue lock will be released by the next
5061 * task (which is an invalid locking op but in the case
5062 * of the scheduler it's an obvious special-case), so we
5063 * do an early lockdep release here:
5064 */
5065 rq_unpin_lock(rq, rf);
5066 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5067 #ifdef CONFIG_DEBUG_SPINLOCK
5068 /* this is a valid case when another task releases the spinlock */
5069 rq_lockp(rq)->owner = next;
5070 #endif
5071 }
5072
finish_lock_switch(struct rq * rq)5073 static inline void finish_lock_switch(struct rq *rq)
5074 {
5075 /*
5076 * If we are tracking spinlock dependencies then we have to
5077 * fix up the runqueue lock - which gets 'carried over' from
5078 * prev into current:
5079 */
5080 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5081 __balance_callbacks(rq);
5082 raw_spin_rq_unlock_irq(rq);
5083 }
5084
5085 /*
5086 * NOP if the arch has not defined these:
5087 */
5088
5089 #ifndef prepare_arch_switch
5090 # define prepare_arch_switch(next) do { } while (0)
5091 #endif
5092
5093 #ifndef finish_arch_post_lock_switch
5094 # define finish_arch_post_lock_switch() do { } while (0)
5095 #endif
5096
kmap_local_sched_out(void)5097 static inline void kmap_local_sched_out(void)
5098 {
5099 #ifdef CONFIG_KMAP_LOCAL
5100 if (unlikely(current->kmap_ctrl.idx))
5101 __kmap_local_sched_out();
5102 #endif
5103 }
5104
kmap_local_sched_in(void)5105 static inline void kmap_local_sched_in(void)
5106 {
5107 #ifdef CONFIG_KMAP_LOCAL
5108 if (unlikely(current->kmap_ctrl.idx))
5109 __kmap_local_sched_in();
5110 #endif
5111 }
5112
5113 /**
5114 * prepare_task_switch - prepare to switch tasks
5115 * @rq: the runqueue preparing to switch
5116 * @prev: the current task that is being switched out
5117 * @next: the task we are going to switch to.
5118 *
5119 * This is called with the rq lock held and interrupts off. It must
5120 * be paired with a subsequent finish_task_switch after the context
5121 * switch.
5122 *
5123 * prepare_task_switch sets up locking and calls architecture specific
5124 * hooks.
5125 */
5126 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5127 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5128 struct task_struct *next)
5129 {
5130 kcov_prepare_switch(prev);
5131 sched_info_switch(rq, prev, next);
5132 perf_event_task_sched_out(prev, next);
5133 rseq_preempt(prev);
5134 fire_sched_out_preempt_notifiers(prev, next);
5135 kmap_local_sched_out();
5136 prepare_task(next);
5137 prepare_arch_switch(next);
5138 }
5139
5140 /**
5141 * finish_task_switch - clean up after a task-switch
5142 * @prev: the thread we just switched away from.
5143 *
5144 * finish_task_switch must be called after the context switch, paired
5145 * with a prepare_task_switch call before the context switch.
5146 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5147 * and do any other architecture-specific cleanup actions.
5148 *
5149 * Note that we may have delayed dropping an mm in context_switch(). If
5150 * so, we finish that here outside of the runqueue lock. (Doing it
5151 * with the lock held can cause deadlocks; see schedule() for
5152 * details.)
5153 *
5154 * The context switch have flipped the stack from under us and restored the
5155 * local variables which were saved when this task called schedule() in the
5156 * past. 'prev == current' is still correct but we need to recalculate this_rq
5157 * because prev may have moved to another CPU.
5158 */
finish_task_switch(struct task_struct * prev)5159 static struct rq *finish_task_switch(struct task_struct *prev)
5160 __releases(rq->lock)
5161 {
5162 struct rq *rq = this_rq();
5163 struct mm_struct *mm = rq->prev_mm;
5164 unsigned int prev_state;
5165
5166 /*
5167 * The previous task will have left us with a preempt_count of 2
5168 * because it left us after:
5169 *
5170 * schedule()
5171 * preempt_disable(); // 1
5172 * __schedule()
5173 * raw_spin_lock_irq(&rq->lock) // 2
5174 *
5175 * Also, see FORK_PREEMPT_COUNT.
5176 */
5177 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5178 "corrupted preempt_count: %s/%d/0x%x\n",
5179 current->comm, current->pid, preempt_count()))
5180 preempt_count_set(FORK_PREEMPT_COUNT);
5181
5182 rq->prev_mm = NULL;
5183
5184 /*
5185 * A task struct has one reference for the use as "current".
5186 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5187 * schedule one last time. The schedule call will never return, and
5188 * the scheduled task must drop that reference.
5189 *
5190 * We must observe prev->state before clearing prev->on_cpu (in
5191 * finish_task), otherwise a concurrent wakeup can get prev
5192 * running on another CPU and we could rave with its RUNNING -> DEAD
5193 * transition, resulting in a double drop.
5194 */
5195 prev_state = READ_ONCE(prev->__state);
5196 vtime_task_switch(prev);
5197 perf_event_task_sched_in(prev, current);
5198 finish_task(prev);
5199 tick_nohz_task_switch();
5200 finish_lock_switch(rq);
5201 finish_arch_post_lock_switch();
5202 kcov_finish_switch(current);
5203 /*
5204 * kmap_local_sched_out() is invoked with rq::lock held and
5205 * interrupts disabled. There is no requirement for that, but the
5206 * sched out code does not have an interrupt enabled section.
5207 * Restoring the maps on sched in does not require interrupts being
5208 * disabled either.
5209 */
5210 kmap_local_sched_in();
5211
5212 fire_sched_in_preempt_notifiers(current);
5213 /*
5214 * When switching through a kernel thread, the loop in
5215 * membarrier_{private,global}_expedited() may have observed that
5216 * kernel thread and not issued an IPI. It is therefore possible to
5217 * schedule between user->kernel->user threads without passing though
5218 * switch_mm(). Membarrier requires a barrier after storing to
5219 * rq->curr, before returning to userspace, so provide them here:
5220 *
5221 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5222 * provided by mmdrop_lazy_tlb(),
5223 * - a sync_core for SYNC_CORE.
5224 */
5225 if (mm) {
5226 membarrier_mm_sync_core_before_usermode(mm);
5227 mmdrop_lazy_tlb_sched(mm);
5228 }
5229
5230 if (unlikely(prev_state == TASK_DEAD)) {
5231 if (prev->sched_class->task_dead)
5232 prev->sched_class->task_dead(prev);
5233
5234 /* Task is done with its stack. */
5235 put_task_stack(prev);
5236
5237 put_task_struct_rcu_user(prev);
5238 }
5239
5240 return rq;
5241 }
5242
5243 /**
5244 * schedule_tail - first thing a freshly forked thread must call.
5245 * @prev: the thread we just switched away from.
5246 */
schedule_tail(struct task_struct * prev)5247 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5248 __releases(rq->lock)
5249 {
5250 /*
5251 * New tasks start with FORK_PREEMPT_COUNT, see there and
5252 * finish_task_switch() for details.
5253 *
5254 * finish_task_switch() will drop rq->lock() and lower preempt_count
5255 * and the preempt_enable() will end up enabling preemption (on
5256 * PREEMPT_COUNT kernels).
5257 */
5258
5259 finish_task_switch(prev);
5260 preempt_enable();
5261
5262 if (current->set_child_tid)
5263 put_user(task_pid_vnr(current), current->set_child_tid);
5264
5265 calculate_sigpending();
5266 }
5267
5268 /*
5269 * context_switch - switch to the new MM and the new thread's register state.
5270 */
5271 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5272 context_switch(struct rq *rq, struct task_struct *prev,
5273 struct task_struct *next, struct rq_flags *rf)
5274 {
5275 prepare_task_switch(rq, prev, next);
5276
5277 /*
5278 * For paravirt, this is coupled with an exit in switch_to to
5279 * combine the page table reload and the switch backend into
5280 * one hypercall.
5281 */
5282 arch_start_context_switch(prev);
5283
5284 /*
5285 * kernel -> kernel lazy + transfer active
5286 * user -> kernel lazy + mmgrab_lazy_tlb() active
5287 *
5288 * kernel -> user switch + mmdrop_lazy_tlb() active
5289 * user -> user switch
5290 *
5291 * switch_mm_cid() needs to be updated if the barriers provided
5292 * by context_switch() are modified.
5293 */
5294 if (!next->mm) { // to kernel
5295 enter_lazy_tlb(prev->active_mm, next);
5296
5297 next->active_mm = prev->active_mm;
5298 if (prev->mm) // from user
5299 mmgrab_lazy_tlb(prev->active_mm);
5300 else
5301 prev->active_mm = NULL;
5302 } else { // to user
5303 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5304 /*
5305 * sys_membarrier() requires an smp_mb() between setting
5306 * rq->curr / membarrier_switch_mm() and returning to userspace.
5307 *
5308 * The below provides this either through switch_mm(), or in
5309 * case 'prev->active_mm == next->mm' through
5310 * finish_task_switch()'s mmdrop().
5311 */
5312 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5313 lru_gen_use_mm(next->mm);
5314
5315 if (!prev->mm) { // from kernel
5316 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5317 rq->prev_mm = prev->active_mm;
5318 prev->active_mm = NULL;
5319 }
5320 }
5321
5322 /* switch_mm_cid() requires the memory barriers above. */
5323 switch_mm_cid(rq, prev, next);
5324
5325 prepare_lock_switch(rq, next, rf);
5326
5327 /* Here we just switch the register state and the stack. */
5328 switch_to(prev, next, prev);
5329 barrier();
5330
5331 return finish_task_switch(prev);
5332 }
5333
5334 /*
5335 * nr_running and nr_context_switches:
5336 *
5337 * externally visible scheduler statistics: current number of runnable
5338 * threads, total number of context switches performed since bootup.
5339 */
nr_running(void)5340 unsigned int nr_running(void)
5341 {
5342 unsigned int i, sum = 0;
5343
5344 for_each_online_cpu(i)
5345 sum += cpu_rq(i)->nr_running;
5346
5347 return sum;
5348 }
5349
5350 /*
5351 * Check if only the current task is running on the CPU.
5352 *
5353 * Caution: this function does not check that the caller has disabled
5354 * preemption, thus the result might have a time-of-check-to-time-of-use
5355 * race. The caller is responsible to use it correctly, for example:
5356 *
5357 * - from a non-preemptible section (of course)
5358 *
5359 * - from a thread that is bound to a single CPU
5360 *
5361 * - in a loop with very short iterations (e.g. a polling loop)
5362 */
single_task_running(void)5363 bool single_task_running(void)
5364 {
5365 return raw_rq()->nr_running == 1;
5366 }
5367 EXPORT_SYMBOL(single_task_running);
5368
nr_context_switches_cpu(int cpu)5369 unsigned long long nr_context_switches_cpu(int cpu)
5370 {
5371 return cpu_rq(cpu)->nr_switches;
5372 }
5373
nr_context_switches(void)5374 unsigned long long nr_context_switches(void)
5375 {
5376 int i;
5377 unsigned long long sum = 0;
5378
5379 for_each_possible_cpu(i)
5380 sum += cpu_rq(i)->nr_switches;
5381
5382 return sum;
5383 }
5384
5385 /*
5386 * Consumers of these two interfaces, like for example the cpuidle menu
5387 * governor, are using nonsensical data. Preferring shallow idle state selection
5388 * for a CPU that has IO-wait which might not even end up running the task when
5389 * it does become runnable.
5390 */
5391
nr_iowait_cpu(int cpu)5392 unsigned int nr_iowait_cpu(int cpu)
5393 {
5394 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5395 }
5396
5397 /*
5398 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5399 *
5400 * The idea behind IO-wait account is to account the idle time that we could
5401 * have spend running if it were not for IO. That is, if we were to improve the
5402 * storage performance, we'd have a proportional reduction in IO-wait time.
5403 *
5404 * This all works nicely on UP, where, when a task blocks on IO, we account
5405 * idle time as IO-wait, because if the storage were faster, it could've been
5406 * running and we'd not be idle.
5407 *
5408 * This has been extended to SMP, by doing the same for each CPU. This however
5409 * is broken.
5410 *
5411 * Imagine for instance the case where two tasks block on one CPU, only the one
5412 * CPU will have IO-wait accounted, while the other has regular idle. Even
5413 * though, if the storage were faster, both could've ran at the same time,
5414 * utilising both CPUs.
5415 *
5416 * This means, that when looking globally, the current IO-wait accounting on
5417 * SMP is a lower bound, by reason of under accounting.
5418 *
5419 * Worse, since the numbers are provided per CPU, they are sometimes
5420 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5421 * associated with any one particular CPU, it can wake to another CPU than it
5422 * blocked on. This means the per CPU IO-wait number is meaningless.
5423 *
5424 * Task CPU affinities can make all that even more 'interesting'.
5425 */
5426
nr_iowait(void)5427 unsigned int nr_iowait(void)
5428 {
5429 unsigned int i, sum = 0;
5430
5431 for_each_possible_cpu(i)
5432 sum += nr_iowait_cpu(i);
5433
5434 return sum;
5435 }
5436
5437 #ifdef CONFIG_SMP
5438
5439 /*
5440 * sched_exec - execve() is a valuable balancing opportunity, because at
5441 * this point the task has the smallest effective memory and cache footprint.
5442 */
sched_exec(void)5443 void sched_exec(void)
5444 {
5445 struct task_struct *p = current;
5446 struct migration_arg arg;
5447 int dest_cpu;
5448
5449 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5450 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5451 if (dest_cpu == smp_processor_id())
5452 return;
5453
5454 if (unlikely(!cpu_active(dest_cpu)))
5455 return;
5456
5457 arg = (struct migration_arg){ p, dest_cpu };
5458 }
5459 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5460 }
5461
5462 #endif
5463
5464 DEFINE_PER_CPU(struct kernel_stat, kstat);
5465 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5466
5467 EXPORT_PER_CPU_SYMBOL(kstat);
5468 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5469
5470 /*
5471 * The function fair_sched_class.update_curr accesses the struct curr
5472 * and its field curr->exec_start; when called from task_sched_runtime(),
5473 * we observe a high rate of cache misses in practice.
5474 * Prefetching this data results in improved performance.
5475 */
prefetch_curr_exec_start(struct task_struct * p)5476 static inline void prefetch_curr_exec_start(struct task_struct *p)
5477 {
5478 #ifdef CONFIG_FAIR_GROUP_SCHED
5479 struct sched_entity *curr = p->se.cfs_rq->curr;
5480 #else
5481 struct sched_entity *curr = task_rq(p)->cfs.curr;
5482 #endif
5483 prefetch(curr);
5484 prefetch(&curr->exec_start);
5485 }
5486
5487 /*
5488 * Return accounted runtime for the task.
5489 * In case the task is currently running, return the runtime plus current's
5490 * pending runtime that have not been accounted yet.
5491 */
task_sched_runtime(struct task_struct * p)5492 unsigned long long task_sched_runtime(struct task_struct *p)
5493 {
5494 struct rq_flags rf;
5495 struct rq *rq;
5496 u64 ns;
5497
5498 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5499 /*
5500 * 64-bit doesn't need locks to atomically read a 64-bit value.
5501 * So we have a optimization chance when the task's delta_exec is 0.
5502 * Reading ->on_cpu is racy, but this is OK.
5503 *
5504 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5505 * If we race with it entering CPU, unaccounted time is 0. This is
5506 * indistinguishable from the read occurring a few cycles earlier.
5507 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5508 * been accounted, so we're correct here as well.
5509 */
5510 if (!p->on_cpu || !task_on_rq_queued(p))
5511 return p->se.sum_exec_runtime;
5512 #endif
5513
5514 rq = task_rq_lock(p, &rf);
5515 /*
5516 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5517 * project cycles that may never be accounted to this
5518 * thread, breaking clock_gettime().
5519 */
5520 if (task_current(rq, p) && task_on_rq_queued(p)) {
5521 prefetch_curr_exec_start(p);
5522 update_rq_clock(rq);
5523 p->sched_class->update_curr(rq);
5524 }
5525 ns = p->se.sum_exec_runtime;
5526 task_rq_unlock(rq, p, &rf);
5527
5528 return ns;
5529 }
5530
5531 #ifdef CONFIG_SCHED_DEBUG
cpu_resched_latency(struct rq * rq)5532 static u64 cpu_resched_latency(struct rq *rq)
5533 {
5534 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5535 u64 resched_latency, now = rq_clock(rq);
5536 static bool warned_once;
5537
5538 if (sysctl_resched_latency_warn_once && warned_once)
5539 return 0;
5540
5541 if (!need_resched() || !latency_warn_ms)
5542 return 0;
5543
5544 if (system_state == SYSTEM_BOOTING)
5545 return 0;
5546
5547 if (!rq->last_seen_need_resched_ns) {
5548 rq->last_seen_need_resched_ns = now;
5549 rq->ticks_without_resched = 0;
5550 return 0;
5551 }
5552
5553 rq->ticks_without_resched++;
5554 resched_latency = now - rq->last_seen_need_resched_ns;
5555 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5556 return 0;
5557
5558 warned_once = true;
5559
5560 return resched_latency;
5561 }
5562
setup_resched_latency_warn_ms(char * str)5563 static int __init setup_resched_latency_warn_ms(char *str)
5564 {
5565 long val;
5566
5567 if ((kstrtol(str, 0, &val))) {
5568 pr_warn("Unable to set resched_latency_warn_ms\n");
5569 return 1;
5570 }
5571
5572 sysctl_resched_latency_warn_ms = val;
5573 return 1;
5574 }
5575 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5576 #else
cpu_resched_latency(struct rq * rq)5577 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5578 #endif /* CONFIG_SCHED_DEBUG */
5579
5580 /*
5581 * This function gets called by the timer code, with HZ frequency.
5582 * We call it with interrupts disabled.
5583 */
sched_tick(void)5584 void sched_tick(void)
5585 {
5586 int cpu = smp_processor_id();
5587 struct rq *rq = cpu_rq(cpu);
5588 struct task_struct *curr;
5589 struct rq_flags rf;
5590 unsigned long hw_pressure;
5591 u64 resched_latency;
5592
5593 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5594 arch_scale_freq_tick();
5595
5596 sched_clock_tick();
5597
5598 rq_lock(rq, &rf);
5599
5600 curr = rq->curr;
5601 psi_account_irqtime(rq, curr, NULL);
5602
5603 update_rq_clock(rq);
5604 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5605 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5606 curr->sched_class->task_tick(rq, curr, 0);
5607 if (sched_feat(LATENCY_WARN))
5608 resched_latency = cpu_resched_latency(rq);
5609 calc_global_load_tick(rq);
5610 sched_core_tick(rq);
5611 task_tick_mm_cid(rq, curr);
5612 scx_tick(rq);
5613
5614 rq_unlock(rq, &rf);
5615
5616 if (sched_feat(LATENCY_WARN) && resched_latency)
5617 resched_latency_warn(cpu, resched_latency);
5618
5619 perf_event_task_tick();
5620
5621 if (curr->flags & PF_WQ_WORKER)
5622 wq_worker_tick(curr);
5623
5624 #ifdef CONFIG_SMP
5625 if (!scx_switched_all()) {
5626 rq->idle_balance = idle_cpu(cpu);
5627 sched_balance_trigger(rq);
5628 }
5629 #endif
5630 }
5631
5632 #ifdef CONFIG_NO_HZ_FULL
5633
5634 struct tick_work {
5635 int cpu;
5636 atomic_t state;
5637 struct delayed_work work;
5638 };
5639 /* Values for ->state, see diagram below. */
5640 #define TICK_SCHED_REMOTE_OFFLINE 0
5641 #define TICK_SCHED_REMOTE_OFFLINING 1
5642 #define TICK_SCHED_REMOTE_RUNNING 2
5643
5644 /*
5645 * State diagram for ->state:
5646 *
5647 *
5648 * TICK_SCHED_REMOTE_OFFLINE
5649 * | ^
5650 * | |
5651 * | | sched_tick_remote()
5652 * | |
5653 * | |
5654 * +--TICK_SCHED_REMOTE_OFFLINING
5655 * | ^
5656 * | |
5657 * sched_tick_start() | | sched_tick_stop()
5658 * | |
5659 * V |
5660 * TICK_SCHED_REMOTE_RUNNING
5661 *
5662 *
5663 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5664 * and sched_tick_start() are happy to leave the state in RUNNING.
5665 */
5666
5667 static struct tick_work __percpu *tick_work_cpu;
5668
sched_tick_remote(struct work_struct * work)5669 static void sched_tick_remote(struct work_struct *work)
5670 {
5671 struct delayed_work *dwork = to_delayed_work(work);
5672 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5673 int cpu = twork->cpu;
5674 struct rq *rq = cpu_rq(cpu);
5675 int os;
5676
5677 /*
5678 * Handle the tick only if it appears the remote CPU is running in full
5679 * dynticks mode. The check is racy by nature, but missing a tick or
5680 * having one too much is no big deal because the scheduler tick updates
5681 * statistics and checks timeslices in a time-independent way, regardless
5682 * of when exactly it is running.
5683 */
5684 if (tick_nohz_tick_stopped_cpu(cpu)) {
5685 guard(rq_lock_irq)(rq);
5686 struct task_struct *curr = rq->curr;
5687
5688 if (cpu_online(cpu)) {
5689 update_rq_clock(rq);
5690
5691 if (!is_idle_task(curr)) {
5692 /*
5693 * Make sure the next tick runs within a
5694 * reasonable amount of time.
5695 */
5696 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5697 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5698 }
5699 curr->sched_class->task_tick(rq, curr, 0);
5700
5701 calc_load_nohz_remote(rq);
5702 }
5703 }
5704
5705 /*
5706 * Run the remote tick once per second (1Hz). This arbitrary
5707 * frequency is large enough to avoid overload but short enough
5708 * to keep scheduler internal stats reasonably up to date. But
5709 * first update state to reflect hotplug activity if required.
5710 */
5711 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5712 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5713 if (os == TICK_SCHED_REMOTE_RUNNING)
5714 queue_delayed_work(system_unbound_wq, dwork, HZ);
5715 }
5716
sched_tick_start(int cpu)5717 static void sched_tick_start(int cpu)
5718 {
5719 int os;
5720 struct tick_work *twork;
5721
5722 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5723 return;
5724
5725 WARN_ON_ONCE(!tick_work_cpu);
5726
5727 twork = per_cpu_ptr(tick_work_cpu, cpu);
5728 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5729 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5730 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5731 twork->cpu = cpu;
5732 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5733 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5734 }
5735 }
5736
5737 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5738 static void sched_tick_stop(int cpu)
5739 {
5740 struct tick_work *twork;
5741 int os;
5742
5743 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5744 return;
5745
5746 WARN_ON_ONCE(!tick_work_cpu);
5747
5748 twork = per_cpu_ptr(tick_work_cpu, cpu);
5749 /* There cannot be competing actions, but don't rely on stop-machine. */
5750 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5751 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5752 /* Don't cancel, as this would mess up the state machine. */
5753 }
5754 #endif /* CONFIG_HOTPLUG_CPU */
5755
sched_tick_offload_init(void)5756 int __init sched_tick_offload_init(void)
5757 {
5758 tick_work_cpu = alloc_percpu(struct tick_work);
5759 BUG_ON(!tick_work_cpu);
5760 return 0;
5761 }
5762
5763 #else /* !CONFIG_NO_HZ_FULL */
sched_tick_start(int cpu)5764 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5765 static inline void sched_tick_stop(int cpu) { }
5766 #endif
5767
5768 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5769 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5770 /*
5771 * If the value passed in is equal to the current preempt count
5772 * then we just disabled preemption. Start timing the latency.
5773 */
preempt_latency_start(int val)5774 static inline void preempt_latency_start(int val)
5775 {
5776 if (preempt_count() == val) {
5777 unsigned long ip = get_lock_parent_ip();
5778 #ifdef CONFIG_DEBUG_PREEMPT
5779 current->preempt_disable_ip = ip;
5780 #endif
5781 trace_preempt_off(CALLER_ADDR0, ip);
5782 }
5783 }
5784
preempt_count_add(int val)5785 void preempt_count_add(int val)
5786 {
5787 #ifdef CONFIG_DEBUG_PREEMPT
5788 /*
5789 * Underflow?
5790 */
5791 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5792 return;
5793 #endif
5794 __preempt_count_add(val);
5795 #ifdef CONFIG_DEBUG_PREEMPT
5796 /*
5797 * Spinlock count overflowing soon?
5798 */
5799 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5800 PREEMPT_MASK - 10);
5801 #endif
5802 preempt_latency_start(val);
5803 }
5804 EXPORT_SYMBOL(preempt_count_add);
5805 NOKPROBE_SYMBOL(preempt_count_add);
5806
5807 /*
5808 * If the value passed in equals to the current preempt count
5809 * then we just enabled preemption. Stop timing the latency.
5810 */
preempt_latency_stop(int val)5811 static inline void preempt_latency_stop(int val)
5812 {
5813 if (preempt_count() == val)
5814 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5815 }
5816
preempt_count_sub(int val)5817 void preempt_count_sub(int val)
5818 {
5819 #ifdef CONFIG_DEBUG_PREEMPT
5820 /*
5821 * Underflow?
5822 */
5823 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5824 return;
5825 /*
5826 * Is the spinlock portion underflowing?
5827 */
5828 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5829 !(preempt_count() & PREEMPT_MASK)))
5830 return;
5831 #endif
5832
5833 preempt_latency_stop(val);
5834 __preempt_count_sub(val);
5835 }
5836 EXPORT_SYMBOL(preempt_count_sub);
5837 NOKPROBE_SYMBOL(preempt_count_sub);
5838
5839 #else
preempt_latency_start(int val)5840 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5841 static inline void preempt_latency_stop(int val) { }
5842 #endif
5843
get_preempt_disable_ip(struct task_struct * p)5844 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5845 {
5846 #ifdef CONFIG_DEBUG_PREEMPT
5847 return p->preempt_disable_ip;
5848 #else
5849 return 0;
5850 #endif
5851 }
5852
5853 /*
5854 * Print scheduling while atomic bug:
5855 */
__schedule_bug(struct task_struct * prev)5856 static noinline void __schedule_bug(struct task_struct *prev)
5857 {
5858 /* Save this before calling printk(), since that will clobber it */
5859 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5860
5861 if (oops_in_progress)
5862 return;
5863
5864 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5865 prev->comm, prev->pid, preempt_count());
5866
5867 debug_show_held_locks(prev);
5868 print_modules();
5869 if (irqs_disabled())
5870 print_irqtrace_events(prev);
5871 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5872 pr_err("Preemption disabled at:");
5873 print_ip_sym(KERN_ERR, preempt_disable_ip);
5874 }
5875 check_panic_on_warn("scheduling while atomic");
5876
5877 dump_stack();
5878 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5879 }
5880
5881 /*
5882 * Various schedule()-time debugging checks and statistics:
5883 */
schedule_debug(struct task_struct * prev,bool preempt)5884 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5885 {
5886 #ifdef CONFIG_SCHED_STACK_END_CHECK
5887 if (task_stack_end_corrupted(prev))
5888 panic("corrupted stack end detected inside scheduler\n");
5889
5890 if (task_scs_end_corrupted(prev))
5891 panic("corrupted shadow stack detected inside scheduler\n");
5892 #endif
5893
5894 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5895 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5896 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5897 prev->comm, prev->pid, prev->non_block_count);
5898 dump_stack();
5899 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5900 }
5901 #endif
5902
5903 if (unlikely(in_atomic_preempt_off())) {
5904 __schedule_bug(prev);
5905 preempt_count_set(PREEMPT_DISABLED);
5906 }
5907 rcu_sleep_check();
5908 SCHED_WARN_ON(ct_state() == CT_STATE_USER);
5909
5910 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5911
5912 schedstat_inc(this_rq()->sched_count);
5913 }
5914
prev_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5915 static void prev_balance(struct rq *rq, struct task_struct *prev,
5916 struct rq_flags *rf)
5917 {
5918 const struct sched_class *start_class = prev->sched_class;
5919 const struct sched_class *class;
5920
5921 #ifdef CONFIG_SCHED_CLASS_EXT
5922 /*
5923 * SCX requires a balance() call before every pick_task() including when
5924 * waking up from SCHED_IDLE. If @start_class is below SCX, start from
5925 * SCX instead. Also, set a flag to detect missing balance() call.
5926 */
5927 if (scx_enabled()) {
5928 rq->scx.flags |= SCX_RQ_BAL_PENDING;
5929 if (sched_class_above(&ext_sched_class, start_class))
5930 start_class = &ext_sched_class;
5931 }
5932 #endif
5933
5934 /*
5935 * We must do the balancing pass before put_prev_task(), such
5936 * that when we release the rq->lock the task is in the same
5937 * state as before we took rq->lock.
5938 *
5939 * We can terminate the balance pass as soon as we know there is
5940 * a runnable task of @class priority or higher.
5941 */
5942 for_active_class_range(class, start_class, &idle_sched_class) {
5943 if (class->balance && class->balance(rq, prev, rf))
5944 break;
5945 }
5946 }
5947
5948 /*
5949 * Pick up the highest-prio task:
5950 */
5951 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5952 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5953 {
5954 const struct sched_class *class;
5955 struct task_struct *p;
5956
5957 rq->dl_server = NULL;
5958
5959 if (scx_enabled())
5960 goto restart;
5961
5962 /*
5963 * Optimization: we know that if all tasks are in the fair class we can
5964 * call that function directly, but only if the @prev task wasn't of a
5965 * higher scheduling class, because otherwise those lose the
5966 * opportunity to pull in more work from other CPUs.
5967 */
5968 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
5969 rq->nr_running == rq->cfs.h_nr_running)) {
5970
5971 p = pick_next_task_fair(rq, prev, rf);
5972 if (unlikely(p == RETRY_TASK))
5973 goto restart;
5974
5975 /* Assume the next prioritized class is idle_sched_class */
5976 if (!p) {
5977 p = pick_task_idle(rq);
5978 put_prev_set_next_task(rq, prev, p);
5979 }
5980
5981 return p;
5982 }
5983
5984 restart:
5985 prev_balance(rq, prev, rf);
5986
5987 for_each_active_class(class) {
5988 if (class->pick_next_task) {
5989 p = class->pick_next_task(rq, prev);
5990 if (p)
5991 return p;
5992 } else {
5993 p = class->pick_task(rq);
5994 if (p) {
5995 put_prev_set_next_task(rq, prev, p);
5996 return p;
5997 }
5998 }
5999 }
6000
6001 BUG(); /* The idle class should always have a runnable task. */
6002 }
6003
6004 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)6005 static inline bool is_task_rq_idle(struct task_struct *t)
6006 {
6007 return (task_rq(t)->idle == t);
6008 }
6009
cookie_equals(struct task_struct * a,unsigned long cookie)6010 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6011 {
6012 return is_task_rq_idle(a) || (a->core_cookie == cookie);
6013 }
6014
cookie_match(struct task_struct * a,struct task_struct * b)6015 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6016 {
6017 if (is_task_rq_idle(a) || is_task_rq_idle(b))
6018 return true;
6019
6020 return a->core_cookie == b->core_cookie;
6021 }
6022
pick_task(struct rq * rq)6023 static inline struct task_struct *pick_task(struct rq *rq)
6024 {
6025 const struct sched_class *class;
6026 struct task_struct *p;
6027
6028 rq->dl_server = NULL;
6029
6030 for_each_active_class(class) {
6031 p = class->pick_task(rq);
6032 if (p)
6033 return p;
6034 }
6035
6036 BUG(); /* The idle class should always have a runnable task. */
6037 }
6038
6039 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6040
6041 static void queue_core_balance(struct rq *rq);
6042
6043 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6044 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6045 {
6046 struct task_struct *next, *p, *max = NULL;
6047 const struct cpumask *smt_mask;
6048 bool fi_before = false;
6049 bool core_clock_updated = (rq == rq->core);
6050 unsigned long cookie;
6051 int i, cpu, occ = 0;
6052 struct rq *rq_i;
6053 bool need_sync;
6054
6055 if (!sched_core_enabled(rq))
6056 return __pick_next_task(rq, prev, rf);
6057
6058 cpu = cpu_of(rq);
6059
6060 /* Stopper task is switching into idle, no need core-wide selection. */
6061 if (cpu_is_offline(cpu)) {
6062 /*
6063 * Reset core_pick so that we don't enter the fastpath when
6064 * coming online. core_pick would already be migrated to
6065 * another cpu during offline.
6066 */
6067 rq->core_pick = NULL;
6068 rq->core_dl_server = NULL;
6069 return __pick_next_task(rq, prev, rf);
6070 }
6071
6072 /*
6073 * If there were no {en,de}queues since we picked (IOW, the task
6074 * pointers are all still valid), and we haven't scheduled the last
6075 * pick yet, do so now.
6076 *
6077 * rq->core_pick can be NULL if no selection was made for a CPU because
6078 * it was either offline or went offline during a sibling's core-wide
6079 * selection. In this case, do a core-wide selection.
6080 */
6081 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6082 rq->core->core_pick_seq != rq->core_sched_seq &&
6083 rq->core_pick) {
6084 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6085
6086 next = rq->core_pick;
6087 rq->dl_server = rq->core_dl_server;
6088 rq->core_pick = NULL;
6089 rq->core_dl_server = NULL;
6090 goto out_set_next;
6091 }
6092
6093 prev_balance(rq, prev, rf);
6094
6095 smt_mask = cpu_smt_mask(cpu);
6096 need_sync = !!rq->core->core_cookie;
6097
6098 /* reset state */
6099 rq->core->core_cookie = 0UL;
6100 if (rq->core->core_forceidle_count) {
6101 if (!core_clock_updated) {
6102 update_rq_clock(rq->core);
6103 core_clock_updated = true;
6104 }
6105 sched_core_account_forceidle(rq);
6106 /* reset after accounting force idle */
6107 rq->core->core_forceidle_start = 0;
6108 rq->core->core_forceidle_count = 0;
6109 rq->core->core_forceidle_occupation = 0;
6110 need_sync = true;
6111 fi_before = true;
6112 }
6113
6114 /*
6115 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6116 *
6117 * @task_seq guards the task state ({en,de}queues)
6118 * @pick_seq is the @task_seq we did a selection on
6119 * @sched_seq is the @pick_seq we scheduled
6120 *
6121 * However, preemptions can cause multiple picks on the same task set.
6122 * 'Fix' this by also increasing @task_seq for every pick.
6123 */
6124 rq->core->core_task_seq++;
6125
6126 /*
6127 * Optimize for common case where this CPU has no cookies
6128 * and there are no cookied tasks running on siblings.
6129 */
6130 if (!need_sync) {
6131 next = pick_task(rq);
6132 if (!next->core_cookie) {
6133 rq->core_pick = NULL;
6134 rq->core_dl_server = NULL;
6135 /*
6136 * For robustness, update the min_vruntime_fi for
6137 * unconstrained picks as well.
6138 */
6139 WARN_ON_ONCE(fi_before);
6140 task_vruntime_update(rq, next, false);
6141 goto out_set_next;
6142 }
6143 }
6144
6145 /*
6146 * For each thread: do the regular task pick and find the max prio task
6147 * amongst them.
6148 *
6149 * Tie-break prio towards the current CPU
6150 */
6151 for_each_cpu_wrap(i, smt_mask, cpu) {
6152 rq_i = cpu_rq(i);
6153
6154 /*
6155 * Current cpu always has its clock updated on entrance to
6156 * pick_next_task(). If the current cpu is not the core,
6157 * the core may also have been updated above.
6158 */
6159 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6160 update_rq_clock(rq_i);
6161
6162 rq_i->core_pick = p = pick_task(rq_i);
6163 rq_i->core_dl_server = rq_i->dl_server;
6164
6165 if (!max || prio_less(max, p, fi_before))
6166 max = p;
6167 }
6168
6169 cookie = rq->core->core_cookie = max->core_cookie;
6170
6171 /*
6172 * For each thread: try and find a runnable task that matches @max or
6173 * force idle.
6174 */
6175 for_each_cpu(i, smt_mask) {
6176 rq_i = cpu_rq(i);
6177 p = rq_i->core_pick;
6178
6179 if (!cookie_equals(p, cookie)) {
6180 p = NULL;
6181 if (cookie)
6182 p = sched_core_find(rq_i, cookie);
6183 if (!p)
6184 p = idle_sched_class.pick_task(rq_i);
6185 }
6186
6187 rq_i->core_pick = p;
6188 rq_i->core_dl_server = NULL;
6189
6190 if (p == rq_i->idle) {
6191 if (rq_i->nr_running) {
6192 rq->core->core_forceidle_count++;
6193 if (!fi_before)
6194 rq->core->core_forceidle_seq++;
6195 }
6196 } else {
6197 occ++;
6198 }
6199 }
6200
6201 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6202 rq->core->core_forceidle_start = rq_clock(rq->core);
6203 rq->core->core_forceidle_occupation = occ;
6204 }
6205
6206 rq->core->core_pick_seq = rq->core->core_task_seq;
6207 next = rq->core_pick;
6208 rq->core_sched_seq = rq->core->core_pick_seq;
6209
6210 /* Something should have been selected for current CPU */
6211 WARN_ON_ONCE(!next);
6212
6213 /*
6214 * Reschedule siblings
6215 *
6216 * NOTE: L1TF -- at this point we're no longer running the old task and
6217 * sending an IPI (below) ensures the sibling will no longer be running
6218 * their task. This ensures there is no inter-sibling overlap between
6219 * non-matching user state.
6220 */
6221 for_each_cpu(i, smt_mask) {
6222 rq_i = cpu_rq(i);
6223
6224 /*
6225 * An online sibling might have gone offline before a task
6226 * could be picked for it, or it might be offline but later
6227 * happen to come online, but its too late and nothing was
6228 * picked for it. That's Ok - it will pick tasks for itself,
6229 * so ignore it.
6230 */
6231 if (!rq_i->core_pick)
6232 continue;
6233
6234 /*
6235 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6236 * fi_before fi update?
6237 * 0 0 1
6238 * 0 1 1
6239 * 1 0 1
6240 * 1 1 0
6241 */
6242 if (!(fi_before && rq->core->core_forceidle_count))
6243 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6244
6245 rq_i->core_pick->core_occupation = occ;
6246
6247 if (i == cpu) {
6248 rq_i->core_pick = NULL;
6249 rq_i->core_dl_server = NULL;
6250 continue;
6251 }
6252
6253 /* Did we break L1TF mitigation requirements? */
6254 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6255
6256 if (rq_i->curr == rq_i->core_pick) {
6257 rq_i->core_pick = NULL;
6258 rq_i->core_dl_server = NULL;
6259 continue;
6260 }
6261
6262 resched_curr(rq_i);
6263 }
6264
6265 out_set_next:
6266 put_prev_set_next_task(rq, prev, next);
6267 if (rq->core->core_forceidle_count && next == rq->idle)
6268 queue_core_balance(rq);
6269
6270 return next;
6271 }
6272
try_steal_cookie(int this,int that)6273 static bool try_steal_cookie(int this, int that)
6274 {
6275 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6276 struct task_struct *p;
6277 unsigned long cookie;
6278 bool success = false;
6279
6280 guard(irq)();
6281 guard(double_rq_lock)(dst, src);
6282
6283 cookie = dst->core->core_cookie;
6284 if (!cookie)
6285 return false;
6286
6287 if (dst->curr != dst->idle)
6288 return false;
6289
6290 p = sched_core_find(src, cookie);
6291 if (!p)
6292 return false;
6293
6294 do {
6295 if (p == src->core_pick || p == src->curr)
6296 goto next;
6297
6298 if (!is_cpu_allowed(p, this))
6299 goto next;
6300
6301 if (p->core_occupation > dst->idle->core_occupation)
6302 goto next;
6303 /*
6304 * sched_core_find() and sched_core_next() will ensure
6305 * that task @p is not throttled now, we also need to
6306 * check whether the runqueue of the destination CPU is
6307 * being throttled.
6308 */
6309 if (sched_task_is_throttled(p, this))
6310 goto next;
6311
6312 deactivate_task(src, p, 0);
6313 set_task_cpu(p, this);
6314 activate_task(dst, p, 0);
6315
6316 resched_curr(dst);
6317
6318 success = true;
6319 break;
6320
6321 next:
6322 p = sched_core_next(p, cookie);
6323 } while (p);
6324
6325 return success;
6326 }
6327
steal_cookie_task(int cpu,struct sched_domain * sd)6328 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6329 {
6330 int i;
6331
6332 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6333 if (i == cpu)
6334 continue;
6335
6336 if (need_resched())
6337 break;
6338
6339 if (try_steal_cookie(cpu, i))
6340 return true;
6341 }
6342
6343 return false;
6344 }
6345
sched_core_balance(struct rq * rq)6346 static void sched_core_balance(struct rq *rq)
6347 {
6348 struct sched_domain *sd;
6349 int cpu = cpu_of(rq);
6350
6351 guard(preempt)();
6352 guard(rcu)();
6353
6354 raw_spin_rq_unlock_irq(rq);
6355 for_each_domain(cpu, sd) {
6356 if (need_resched())
6357 break;
6358
6359 if (steal_cookie_task(cpu, sd))
6360 break;
6361 }
6362 raw_spin_rq_lock_irq(rq);
6363 }
6364
6365 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6366
queue_core_balance(struct rq * rq)6367 static void queue_core_balance(struct rq *rq)
6368 {
6369 if (!sched_core_enabled(rq))
6370 return;
6371
6372 if (!rq->core->core_cookie)
6373 return;
6374
6375 if (!rq->nr_running) /* not forced idle */
6376 return;
6377
6378 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6379 }
6380
6381 DEFINE_LOCK_GUARD_1(core_lock, int,
6382 sched_core_lock(*_T->lock, &_T->flags),
6383 sched_core_unlock(*_T->lock, &_T->flags),
6384 unsigned long flags)
6385
sched_core_cpu_starting(unsigned int cpu)6386 static void sched_core_cpu_starting(unsigned int cpu)
6387 {
6388 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6389 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6390 int t;
6391
6392 guard(core_lock)(&cpu);
6393
6394 WARN_ON_ONCE(rq->core != rq);
6395
6396 /* if we're the first, we'll be our own leader */
6397 if (cpumask_weight(smt_mask) == 1)
6398 return;
6399
6400 /* find the leader */
6401 for_each_cpu(t, smt_mask) {
6402 if (t == cpu)
6403 continue;
6404 rq = cpu_rq(t);
6405 if (rq->core == rq) {
6406 core_rq = rq;
6407 break;
6408 }
6409 }
6410
6411 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6412 return;
6413
6414 /* install and validate core_rq */
6415 for_each_cpu(t, smt_mask) {
6416 rq = cpu_rq(t);
6417
6418 if (t == cpu)
6419 rq->core = core_rq;
6420
6421 WARN_ON_ONCE(rq->core != core_rq);
6422 }
6423 }
6424
sched_core_cpu_deactivate(unsigned int cpu)6425 static void sched_core_cpu_deactivate(unsigned int cpu)
6426 {
6427 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6428 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6429 int t;
6430
6431 guard(core_lock)(&cpu);
6432
6433 /* if we're the last man standing, nothing to do */
6434 if (cpumask_weight(smt_mask) == 1) {
6435 WARN_ON_ONCE(rq->core != rq);
6436 return;
6437 }
6438
6439 /* if we're not the leader, nothing to do */
6440 if (rq->core != rq)
6441 return;
6442
6443 /* find a new leader */
6444 for_each_cpu(t, smt_mask) {
6445 if (t == cpu)
6446 continue;
6447 core_rq = cpu_rq(t);
6448 break;
6449 }
6450
6451 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6452 return;
6453
6454 /* copy the shared state to the new leader */
6455 core_rq->core_task_seq = rq->core_task_seq;
6456 core_rq->core_pick_seq = rq->core_pick_seq;
6457 core_rq->core_cookie = rq->core_cookie;
6458 core_rq->core_forceidle_count = rq->core_forceidle_count;
6459 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6460 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6461
6462 /*
6463 * Accounting edge for forced idle is handled in pick_next_task().
6464 * Don't need another one here, since the hotplug thread shouldn't
6465 * have a cookie.
6466 */
6467 core_rq->core_forceidle_start = 0;
6468
6469 /* install new leader */
6470 for_each_cpu(t, smt_mask) {
6471 rq = cpu_rq(t);
6472 rq->core = core_rq;
6473 }
6474 }
6475
sched_core_cpu_dying(unsigned int cpu)6476 static inline void sched_core_cpu_dying(unsigned int cpu)
6477 {
6478 struct rq *rq = cpu_rq(cpu);
6479
6480 if (rq->core != rq)
6481 rq->core = rq;
6482 }
6483
6484 #else /* !CONFIG_SCHED_CORE */
6485
sched_core_cpu_starting(unsigned int cpu)6486 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6487 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6488 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6489
6490 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6491 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6492 {
6493 return __pick_next_task(rq, prev, rf);
6494 }
6495
6496 #endif /* CONFIG_SCHED_CORE */
6497
6498 /*
6499 * Constants for the sched_mode argument of __schedule().
6500 *
6501 * The mode argument allows RT enabled kernels to differentiate a
6502 * preemption from blocking on an 'sleeping' spin/rwlock.
6503 */
6504 #define SM_IDLE (-1)
6505 #define SM_NONE 0
6506 #define SM_PREEMPT 1
6507 #define SM_RTLOCK_WAIT 2
6508
6509 /*
6510 * __schedule() is the main scheduler function.
6511 *
6512 * The main means of driving the scheduler and thus entering this function are:
6513 *
6514 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6515 *
6516 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6517 * paths. For example, see arch/x86/entry_64.S.
6518 *
6519 * To drive preemption between tasks, the scheduler sets the flag in timer
6520 * interrupt handler sched_tick().
6521 *
6522 * 3. Wakeups don't really cause entry into schedule(). They add a
6523 * task to the run-queue and that's it.
6524 *
6525 * Now, if the new task added to the run-queue preempts the current
6526 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6527 * called on the nearest possible occasion:
6528 *
6529 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6530 *
6531 * - in syscall or exception context, at the next outmost
6532 * preempt_enable(). (this might be as soon as the wake_up()'s
6533 * spin_unlock()!)
6534 *
6535 * - in IRQ context, return from interrupt-handler to
6536 * preemptible context
6537 *
6538 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6539 * then at the next:
6540 *
6541 * - cond_resched() call
6542 * - explicit schedule() call
6543 * - return from syscall or exception to user-space
6544 * - return from interrupt-handler to user-space
6545 *
6546 * WARNING: must be called with preemption disabled!
6547 */
__schedule(int sched_mode)6548 static void __sched notrace __schedule(int sched_mode)
6549 {
6550 struct task_struct *prev, *next;
6551 /*
6552 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6553 * as a preemption by schedule_debug() and RCU.
6554 */
6555 bool preempt = sched_mode > SM_NONE;
6556 bool block = false;
6557 unsigned long *switch_count;
6558 unsigned long prev_state;
6559 struct rq_flags rf;
6560 struct rq *rq;
6561 int cpu;
6562
6563 cpu = smp_processor_id();
6564 rq = cpu_rq(cpu);
6565 prev = rq->curr;
6566
6567 schedule_debug(prev, preempt);
6568
6569 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6570 hrtick_clear(rq);
6571
6572 local_irq_disable();
6573 rcu_note_context_switch(preempt);
6574
6575 /*
6576 * Make sure that signal_pending_state()->signal_pending() below
6577 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6578 * done by the caller to avoid the race with signal_wake_up():
6579 *
6580 * __set_current_state(@state) signal_wake_up()
6581 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6582 * wake_up_state(p, state)
6583 * LOCK rq->lock LOCK p->pi_state
6584 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6585 * if (signal_pending_state()) if (p->state & @state)
6586 *
6587 * Also, the membarrier system call requires a full memory barrier
6588 * after coming from user-space, before storing to rq->curr; this
6589 * barrier matches a full barrier in the proximity of the membarrier
6590 * system call exit.
6591 */
6592 rq_lock(rq, &rf);
6593 smp_mb__after_spinlock();
6594
6595 /* Promote REQ to ACT */
6596 rq->clock_update_flags <<= 1;
6597 update_rq_clock(rq);
6598 rq->clock_update_flags = RQCF_UPDATED;
6599
6600 switch_count = &prev->nivcsw;
6601
6602 /* Task state changes only considers SM_PREEMPT as preemption */
6603 preempt = sched_mode == SM_PREEMPT;
6604
6605 /*
6606 * We must load prev->state once (task_struct::state is volatile), such
6607 * that we form a control dependency vs deactivate_task() below.
6608 */
6609 prev_state = READ_ONCE(prev->__state);
6610 if (sched_mode == SM_IDLE) {
6611 /* SCX must consult the BPF scheduler to tell if rq is empty */
6612 if (!rq->nr_running && !scx_enabled()) {
6613 next = prev;
6614 goto picked;
6615 }
6616 } else if (!preempt && prev_state) {
6617 if (signal_pending_state(prev_state, prev)) {
6618 WRITE_ONCE(prev->__state, TASK_RUNNING);
6619 } else {
6620 int flags = DEQUEUE_NOCLOCK;
6621
6622 prev->sched_contributes_to_load =
6623 (prev_state & TASK_UNINTERRUPTIBLE) &&
6624 !(prev_state & TASK_NOLOAD) &&
6625 !(prev_state & TASK_FROZEN);
6626
6627 if (unlikely(is_special_task_state(prev_state)))
6628 flags |= DEQUEUE_SPECIAL;
6629
6630 /*
6631 * __schedule() ttwu()
6632 * prev_state = prev->state; if (p->on_rq && ...)
6633 * if (prev_state) goto out;
6634 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6635 * p->state = TASK_WAKING
6636 *
6637 * Where __schedule() and ttwu() have matching control dependencies.
6638 *
6639 * After this, schedule() must not care about p->state any more.
6640 */
6641 block_task(rq, prev, flags);
6642 block = true;
6643 }
6644 switch_count = &prev->nvcsw;
6645 }
6646
6647 next = pick_next_task(rq, prev, &rf);
6648 picked:
6649 clear_tsk_need_resched(prev);
6650 clear_preempt_need_resched();
6651 #ifdef CONFIG_SCHED_DEBUG
6652 rq->last_seen_need_resched_ns = 0;
6653 #endif
6654
6655 if (likely(prev != next)) {
6656 rq->nr_switches++;
6657 /*
6658 * RCU users of rcu_dereference(rq->curr) may not see
6659 * changes to task_struct made by pick_next_task().
6660 */
6661 RCU_INIT_POINTER(rq->curr, next);
6662 /*
6663 * The membarrier system call requires each architecture
6664 * to have a full memory barrier after updating
6665 * rq->curr, before returning to user-space.
6666 *
6667 * Here are the schemes providing that barrier on the
6668 * various architectures:
6669 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6670 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6671 * on PowerPC and on RISC-V.
6672 * - finish_lock_switch() for weakly-ordered
6673 * architectures where spin_unlock is a full barrier,
6674 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6675 * is a RELEASE barrier),
6676 *
6677 * The barrier matches a full barrier in the proximity of
6678 * the membarrier system call entry.
6679 *
6680 * On RISC-V, this barrier pairing is also needed for the
6681 * SYNC_CORE command when switching between processes, cf.
6682 * the inline comments in membarrier_arch_switch_mm().
6683 */
6684 ++*switch_count;
6685
6686 migrate_disable_switch(rq, prev);
6687 psi_account_irqtime(rq, prev, next);
6688 psi_sched_switch(prev, next, block);
6689
6690 trace_sched_switch(preempt, prev, next, prev_state);
6691
6692 /* Also unlocks the rq: */
6693 rq = context_switch(rq, prev, next, &rf);
6694 } else {
6695 rq_unpin_lock(rq, &rf);
6696 __balance_callbacks(rq);
6697 raw_spin_rq_unlock_irq(rq);
6698 }
6699 }
6700
do_task_dead(void)6701 void __noreturn do_task_dead(void)
6702 {
6703 /* Causes final put_task_struct in finish_task_switch(): */
6704 set_special_state(TASK_DEAD);
6705
6706 /* Tell freezer to ignore us: */
6707 current->flags |= PF_NOFREEZE;
6708
6709 __schedule(SM_NONE);
6710 BUG();
6711
6712 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6713 for (;;)
6714 cpu_relax();
6715 }
6716
sched_submit_work(struct task_struct * tsk)6717 static inline void sched_submit_work(struct task_struct *tsk)
6718 {
6719 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6720 unsigned int task_flags;
6721
6722 /*
6723 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6724 * will use a blocking primitive -- which would lead to recursion.
6725 */
6726 lock_map_acquire_try(&sched_map);
6727
6728 task_flags = tsk->flags;
6729 /*
6730 * If a worker goes to sleep, notify and ask workqueue whether it
6731 * wants to wake up a task to maintain concurrency.
6732 */
6733 if (task_flags & PF_WQ_WORKER)
6734 wq_worker_sleeping(tsk);
6735 else if (task_flags & PF_IO_WORKER)
6736 io_wq_worker_sleeping(tsk);
6737
6738 /*
6739 * spinlock and rwlock must not flush block requests. This will
6740 * deadlock if the callback attempts to acquire a lock which is
6741 * already acquired.
6742 */
6743 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6744
6745 /*
6746 * If we are going to sleep and we have plugged IO queued,
6747 * make sure to submit it to avoid deadlocks.
6748 */
6749 blk_flush_plug(tsk->plug, true);
6750
6751 lock_map_release(&sched_map);
6752 }
6753
sched_update_worker(struct task_struct * tsk)6754 static void sched_update_worker(struct task_struct *tsk)
6755 {
6756 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6757 if (tsk->flags & PF_BLOCK_TS)
6758 blk_plug_invalidate_ts(tsk);
6759 if (tsk->flags & PF_WQ_WORKER)
6760 wq_worker_running(tsk);
6761 else if (tsk->flags & PF_IO_WORKER)
6762 io_wq_worker_running(tsk);
6763 }
6764 }
6765
__schedule_loop(int sched_mode)6766 static __always_inline void __schedule_loop(int sched_mode)
6767 {
6768 do {
6769 preempt_disable();
6770 __schedule(sched_mode);
6771 sched_preempt_enable_no_resched();
6772 } while (need_resched());
6773 }
6774
schedule(void)6775 asmlinkage __visible void __sched schedule(void)
6776 {
6777 struct task_struct *tsk = current;
6778
6779 #ifdef CONFIG_RT_MUTEXES
6780 lockdep_assert(!tsk->sched_rt_mutex);
6781 #endif
6782
6783 if (!task_is_running(tsk))
6784 sched_submit_work(tsk);
6785 __schedule_loop(SM_NONE);
6786 sched_update_worker(tsk);
6787 }
6788 EXPORT_SYMBOL(schedule);
6789
6790 /*
6791 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6792 * state (have scheduled out non-voluntarily) by making sure that all
6793 * tasks have either left the run queue or have gone into user space.
6794 * As idle tasks do not do either, they must not ever be preempted
6795 * (schedule out non-voluntarily).
6796 *
6797 * schedule_idle() is similar to schedule_preempt_disable() except that it
6798 * never enables preemption because it does not call sched_submit_work().
6799 */
schedule_idle(void)6800 void __sched schedule_idle(void)
6801 {
6802 /*
6803 * As this skips calling sched_submit_work(), which the idle task does
6804 * regardless because that function is a NOP when the task is in a
6805 * TASK_RUNNING state, make sure this isn't used someplace that the
6806 * current task can be in any other state. Note, idle is always in the
6807 * TASK_RUNNING state.
6808 */
6809 WARN_ON_ONCE(current->__state);
6810 do {
6811 __schedule(SM_IDLE);
6812 } while (need_resched());
6813 }
6814
6815 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)6816 asmlinkage __visible void __sched schedule_user(void)
6817 {
6818 /*
6819 * If we come here after a random call to set_need_resched(),
6820 * or we have been woken up remotely but the IPI has not yet arrived,
6821 * we haven't yet exited the RCU idle mode. Do it here manually until
6822 * we find a better solution.
6823 *
6824 * NB: There are buggy callers of this function. Ideally we
6825 * should warn if prev_state != CT_STATE_USER, but that will trigger
6826 * too frequently to make sense yet.
6827 */
6828 enum ctx_state prev_state = exception_enter();
6829 schedule();
6830 exception_exit(prev_state);
6831 }
6832 #endif
6833
6834 /**
6835 * schedule_preempt_disabled - called with preemption disabled
6836 *
6837 * Returns with preemption disabled. Note: preempt_count must be 1
6838 */
schedule_preempt_disabled(void)6839 void __sched schedule_preempt_disabled(void)
6840 {
6841 sched_preempt_enable_no_resched();
6842 schedule();
6843 preempt_disable();
6844 }
6845
6846 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)6847 void __sched notrace schedule_rtlock(void)
6848 {
6849 __schedule_loop(SM_RTLOCK_WAIT);
6850 }
6851 NOKPROBE_SYMBOL(schedule_rtlock);
6852 #endif
6853
preempt_schedule_common(void)6854 static void __sched notrace preempt_schedule_common(void)
6855 {
6856 do {
6857 /*
6858 * Because the function tracer can trace preempt_count_sub()
6859 * and it also uses preempt_enable/disable_notrace(), if
6860 * NEED_RESCHED is set, the preempt_enable_notrace() called
6861 * by the function tracer will call this function again and
6862 * cause infinite recursion.
6863 *
6864 * Preemption must be disabled here before the function
6865 * tracer can trace. Break up preempt_disable() into two
6866 * calls. One to disable preemption without fear of being
6867 * traced. The other to still record the preemption latency,
6868 * which can also be traced by the function tracer.
6869 */
6870 preempt_disable_notrace();
6871 preempt_latency_start(1);
6872 __schedule(SM_PREEMPT);
6873 preempt_latency_stop(1);
6874 preempt_enable_no_resched_notrace();
6875
6876 /*
6877 * Check again in case we missed a preemption opportunity
6878 * between schedule and now.
6879 */
6880 } while (need_resched());
6881 }
6882
6883 #ifdef CONFIG_PREEMPTION
6884 /*
6885 * This is the entry point to schedule() from in-kernel preemption
6886 * off of preempt_enable.
6887 */
preempt_schedule(void)6888 asmlinkage __visible void __sched notrace preempt_schedule(void)
6889 {
6890 /*
6891 * If there is a non-zero preempt_count or interrupts are disabled,
6892 * we do not want to preempt the current task. Just return..
6893 */
6894 if (likely(!preemptible()))
6895 return;
6896 preempt_schedule_common();
6897 }
6898 NOKPROBE_SYMBOL(preempt_schedule);
6899 EXPORT_SYMBOL(preempt_schedule);
6900
6901 #ifdef CONFIG_PREEMPT_DYNAMIC
6902 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6903 #ifndef preempt_schedule_dynamic_enabled
6904 #define preempt_schedule_dynamic_enabled preempt_schedule
6905 #define preempt_schedule_dynamic_disabled NULL
6906 #endif
6907 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6908 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6909 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6910 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)6911 void __sched notrace dynamic_preempt_schedule(void)
6912 {
6913 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6914 return;
6915 preempt_schedule();
6916 }
6917 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6918 EXPORT_SYMBOL(dynamic_preempt_schedule);
6919 #endif
6920 #endif
6921
6922 /**
6923 * preempt_schedule_notrace - preempt_schedule called by tracing
6924 *
6925 * The tracing infrastructure uses preempt_enable_notrace to prevent
6926 * recursion and tracing preempt enabling caused by the tracing
6927 * infrastructure itself. But as tracing can happen in areas coming
6928 * from userspace or just about to enter userspace, a preempt enable
6929 * can occur before user_exit() is called. This will cause the scheduler
6930 * to be called when the system is still in usermode.
6931 *
6932 * To prevent this, the preempt_enable_notrace will use this function
6933 * instead of preempt_schedule() to exit user context if needed before
6934 * calling the scheduler.
6935 */
preempt_schedule_notrace(void)6936 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
6937 {
6938 enum ctx_state prev_ctx;
6939
6940 if (likely(!preemptible()))
6941 return;
6942
6943 do {
6944 /*
6945 * Because the function tracer can trace preempt_count_sub()
6946 * and it also uses preempt_enable/disable_notrace(), if
6947 * NEED_RESCHED is set, the preempt_enable_notrace() called
6948 * by the function tracer will call this function again and
6949 * cause infinite recursion.
6950 *
6951 * Preemption must be disabled here before the function
6952 * tracer can trace. Break up preempt_disable() into two
6953 * calls. One to disable preemption without fear of being
6954 * traced. The other to still record the preemption latency,
6955 * which can also be traced by the function tracer.
6956 */
6957 preempt_disable_notrace();
6958 preempt_latency_start(1);
6959 /*
6960 * Needs preempt disabled in case user_exit() is traced
6961 * and the tracer calls preempt_enable_notrace() causing
6962 * an infinite recursion.
6963 */
6964 prev_ctx = exception_enter();
6965 __schedule(SM_PREEMPT);
6966 exception_exit(prev_ctx);
6967
6968 preempt_latency_stop(1);
6969 preempt_enable_no_resched_notrace();
6970 } while (need_resched());
6971 }
6972 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
6973
6974 #ifdef CONFIG_PREEMPT_DYNAMIC
6975 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6976 #ifndef preempt_schedule_notrace_dynamic_enabled
6977 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
6978 #define preempt_schedule_notrace_dynamic_disabled NULL
6979 #endif
6980 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
6981 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
6982 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6983 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)6984 void __sched notrace dynamic_preempt_schedule_notrace(void)
6985 {
6986 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
6987 return;
6988 preempt_schedule_notrace();
6989 }
6990 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
6991 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
6992 #endif
6993 #endif
6994
6995 #endif /* CONFIG_PREEMPTION */
6996
6997 /*
6998 * This is the entry point to schedule() from kernel preemption
6999 * off of IRQ context.
7000 * Note, that this is called and return with IRQs disabled. This will
7001 * protect us against recursive calling from IRQ contexts.
7002 */
preempt_schedule_irq(void)7003 asmlinkage __visible void __sched preempt_schedule_irq(void)
7004 {
7005 enum ctx_state prev_state;
7006
7007 /* Catch callers which need to be fixed */
7008 BUG_ON(preempt_count() || !irqs_disabled());
7009
7010 prev_state = exception_enter();
7011
7012 do {
7013 preempt_disable();
7014 local_irq_enable();
7015 __schedule(SM_PREEMPT);
7016 local_irq_disable();
7017 sched_preempt_enable_no_resched();
7018 } while (need_resched());
7019
7020 exception_exit(prev_state);
7021 }
7022
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7023 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7024 void *key)
7025 {
7026 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7027 return try_to_wake_up(curr->private, mode, wake_flags);
7028 }
7029 EXPORT_SYMBOL(default_wake_function);
7030
__setscheduler_class(int policy,int prio)7031 const struct sched_class *__setscheduler_class(int policy, int prio)
7032 {
7033 if (dl_prio(prio))
7034 return &dl_sched_class;
7035
7036 if (rt_prio(prio))
7037 return &rt_sched_class;
7038
7039 #ifdef CONFIG_SCHED_CLASS_EXT
7040 if (task_should_scx(policy))
7041 return &ext_sched_class;
7042 #endif
7043
7044 return &fair_sched_class;
7045 }
7046
7047 #ifdef CONFIG_RT_MUTEXES
7048
7049 /*
7050 * Would be more useful with typeof()/auto_type but they don't mix with
7051 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7052 * name such that if someone were to implement this function we get to compare
7053 * notes.
7054 */
7055 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7056
rt_mutex_pre_schedule(void)7057 void rt_mutex_pre_schedule(void)
7058 {
7059 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7060 sched_submit_work(current);
7061 }
7062
rt_mutex_schedule(void)7063 void rt_mutex_schedule(void)
7064 {
7065 lockdep_assert(current->sched_rt_mutex);
7066 __schedule_loop(SM_NONE);
7067 }
7068
rt_mutex_post_schedule(void)7069 void rt_mutex_post_schedule(void)
7070 {
7071 sched_update_worker(current);
7072 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7073 }
7074
7075 /*
7076 * rt_mutex_setprio - set the current priority of a task
7077 * @p: task to boost
7078 * @pi_task: donor task
7079 *
7080 * This function changes the 'effective' priority of a task. It does
7081 * not touch ->normal_prio like __setscheduler().
7082 *
7083 * Used by the rt_mutex code to implement priority inheritance
7084 * logic. Call site only calls if the priority of the task changed.
7085 */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7086 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7087 {
7088 int prio, oldprio, queued, running, queue_flag =
7089 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7090 const struct sched_class *prev_class, *next_class;
7091 struct rq_flags rf;
7092 struct rq *rq;
7093
7094 /* XXX used to be waiter->prio, not waiter->task->prio */
7095 prio = __rt_effective_prio(pi_task, p->normal_prio);
7096
7097 /*
7098 * If nothing changed; bail early.
7099 */
7100 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7101 return;
7102
7103 rq = __task_rq_lock(p, &rf);
7104 update_rq_clock(rq);
7105 /*
7106 * Set under pi_lock && rq->lock, such that the value can be used under
7107 * either lock.
7108 *
7109 * Note that there is loads of tricky to make this pointer cache work
7110 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7111 * ensure a task is de-boosted (pi_task is set to NULL) before the
7112 * task is allowed to run again (and can exit). This ensures the pointer
7113 * points to a blocked task -- which guarantees the task is present.
7114 */
7115 p->pi_top_task = pi_task;
7116
7117 /*
7118 * For FIFO/RR we only need to set prio, if that matches we're done.
7119 */
7120 if (prio == p->prio && !dl_prio(prio))
7121 goto out_unlock;
7122
7123 /*
7124 * Idle task boosting is a no-no in general. There is one
7125 * exception, when PREEMPT_RT and NOHZ is active:
7126 *
7127 * The idle task calls get_next_timer_interrupt() and holds
7128 * the timer wheel base->lock on the CPU and another CPU wants
7129 * to access the timer (probably to cancel it). We can safely
7130 * ignore the boosting request, as the idle CPU runs this code
7131 * with interrupts disabled and will complete the lock
7132 * protected section without being interrupted. So there is no
7133 * real need to boost.
7134 */
7135 if (unlikely(p == rq->idle)) {
7136 WARN_ON(p != rq->curr);
7137 WARN_ON(p->pi_blocked_on);
7138 goto out_unlock;
7139 }
7140
7141 trace_sched_pi_setprio(p, pi_task);
7142 oldprio = p->prio;
7143
7144 if (oldprio == prio)
7145 queue_flag &= ~DEQUEUE_MOVE;
7146
7147 prev_class = p->sched_class;
7148 next_class = __setscheduler_class(p->policy, prio);
7149
7150 if (prev_class != next_class && p->se.sched_delayed)
7151 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
7152
7153 queued = task_on_rq_queued(p);
7154 running = task_current(rq, p);
7155 if (queued)
7156 dequeue_task(rq, p, queue_flag);
7157 if (running)
7158 put_prev_task(rq, p);
7159
7160 /*
7161 * Boosting condition are:
7162 * 1. -rt task is running and holds mutex A
7163 * --> -dl task blocks on mutex A
7164 *
7165 * 2. -dl task is running and holds mutex A
7166 * --> -dl task blocks on mutex A and could preempt the
7167 * running task
7168 */
7169 if (dl_prio(prio)) {
7170 if (!dl_prio(p->normal_prio) ||
7171 (pi_task && dl_prio(pi_task->prio) &&
7172 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7173 p->dl.pi_se = pi_task->dl.pi_se;
7174 queue_flag |= ENQUEUE_REPLENISH;
7175 } else {
7176 p->dl.pi_se = &p->dl;
7177 }
7178 } else if (rt_prio(prio)) {
7179 if (dl_prio(oldprio))
7180 p->dl.pi_se = &p->dl;
7181 if (oldprio < prio)
7182 queue_flag |= ENQUEUE_HEAD;
7183 } else {
7184 if (dl_prio(oldprio))
7185 p->dl.pi_se = &p->dl;
7186 if (rt_prio(oldprio))
7187 p->rt.timeout = 0;
7188 }
7189
7190 p->sched_class = next_class;
7191 p->prio = prio;
7192
7193 check_class_changing(rq, p, prev_class);
7194
7195 if (queued)
7196 enqueue_task(rq, p, queue_flag);
7197 if (running)
7198 set_next_task(rq, p);
7199
7200 check_class_changed(rq, p, prev_class, oldprio);
7201 out_unlock:
7202 /* Avoid rq from going away on us: */
7203 preempt_disable();
7204
7205 rq_unpin_lock(rq, &rf);
7206 __balance_callbacks(rq);
7207 raw_spin_rq_unlock(rq);
7208
7209 preempt_enable();
7210 }
7211 #endif
7212
7213 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)7214 int __sched __cond_resched(void)
7215 {
7216 if (should_resched(0)) {
7217 preempt_schedule_common();
7218 return 1;
7219 }
7220 /*
7221 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
7222 * whether the current CPU is in an RCU read-side critical section,
7223 * so the tick can report quiescent states even for CPUs looping
7224 * in kernel context. In contrast, in non-preemptible kernels,
7225 * RCU readers leave no in-memory hints, which means that CPU-bound
7226 * processes executing in kernel context might never report an
7227 * RCU quiescent state. Therefore, the following code causes
7228 * cond_resched() to report a quiescent state, but only when RCU
7229 * is in urgent need of one.
7230 */
7231 #ifndef CONFIG_PREEMPT_RCU
7232 rcu_all_qs();
7233 #endif
7234 return 0;
7235 }
7236 EXPORT_SYMBOL(__cond_resched);
7237 #endif
7238
7239 #ifdef CONFIG_PREEMPT_DYNAMIC
7240 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7241 #define cond_resched_dynamic_enabled __cond_resched
7242 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7243 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7244 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7245
7246 #define might_resched_dynamic_enabled __cond_resched
7247 #define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7248 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7249 EXPORT_STATIC_CALL_TRAMP(might_resched);
7250 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7251 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)7252 int __sched dynamic_cond_resched(void)
7253 {
7254 klp_sched_try_switch();
7255 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7256 return 0;
7257 return __cond_resched();
7258 }
7259 EXPORT_SYMBOL(dynamic_cond_resched);
7260
7261 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)7262 int __sched dynamic_might_resched(void)
7263 {
7264 if (!static_branch_unlikely(&sk_dynamic_might_resched))
7265 return 0;
7266 return __cond_resched();
7267 }
7268 EXPORT_SYMBOL(dynamic_might_resched);
7269 #endif
7270 #endif
7271
7272 /*
7273 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7274 * call schedule, and on return reacquire the lock.
7275 *
7276 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7277 * operations here to prevent schedule() from being called twice (once via
7278 * spin_unlock(), once by hand).
7279 */
__cond_resched_lock(spinlock_t * lock)7280 int __cond_resched_lock(spinlock_t *lock)
7281 {
7282 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7283 int ret = 0;
7284
7285 lockdep_assert_held(lock);
7286
7287 if (spin_needbreak(lock) || resched) {
7288 spin_unlock(lock);
7289 if (!_cond_resched())
7290 cpu_relax();
7291 ret = 1;
7292 spin_lock(lock);
7293 }
7294 return ret;
7295 }
7296 EXPORT_SYMBOL(__cond_resched_lock);
7297
__cond_resched_rwlock_read(rwlock_t * lock)7298 int __cond_resched_rwlock_read(rwlock_t *lock)
7299 {
7300 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7301 int ret = 0;
7302
7303 lockdep_assert_held_read(lock);
7304
7305 if (rwlock_needbreak(lock) || resched) {
7306 read_unlock(lock);
7307 if (!_cond_resched())
7308 cpu_relax();
7309 ret = 1;
7310 read_lock(lock);
7311 }
7312 return ret;
7313 }
7314 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7315
__cond_resched_rwlock_write(rwlock_t * lock)7316 int __cond_resched_rwlock_write(rwlock_t *lock)
7317 {
7318 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7319 int ret = 0;
7320
7321 lockdep_assert_held_write(lock);
7322
7323 if (rwlock_needbreak(lock) || resched) {
7324 write_unlock(lock);
7325 if (!_cond_resched())
7326 cpu_relax();
7327 ret = 1;
7328 write_lock(lock);
7329 }
7330 return ret;
7331 }
7332 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7333
7334 #ifdef CONFIG_PREEMPT_DYNAMIC
7335
7336 #ifdef CONFIG_GENERIC_ENTRY
7337 #include <linux/entry-common.h>
7338 #endif
7339
7340 /*
7341 * SC:cond_resched
7342 * SC:might_resched
7343 * SC:preempt_schedule
7344 * SC:preempt_schedule_notrace
7345 * SC:irqentry_exit_cond_resched
7346 *
7347 *
7348 * NONE:
7349 * cond_resched <- __cond_resched
7350 * might_resched <- RET0
7351 * preempt_schedule <- NOP
7352 * preempt_schedule_notrace <- NOP
7353 * irqentry_exit_cond_resched <- NOP
7354 *
7355 * VOLUNTARY:
7356 * cond_resched <- __cond_resched
7357 * might_resched <- __cond_resched
7358 * preempt_schedule <- NOP
7359 * preempt_schedule_notrace <- NOP
7360 * irqentry_exit_cond_resched <- NOP
7361 *
7362 * FULL:
7363 * cond_resched <- RET0
7364 * might_resched <- RET0
7365 * preempt_schedule <- preempt_schedule
7366 * preempt_schedule_notrace <- preempt_schedule_notrace
7367 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7368 */
7369
7370 enum {
7371 preempt_dynamic_undefined = -1,
7372 preempt_dynamic_none,
7373 preempt_dynamic_voluntary,
7374 preempt_dynamic_full,
7375 };
7376
7377 int preempt_dynamic_mode = preempt_dynamic_undefined;
7378
sched_dynamic_mode(const char * str)7379 int sched_dynamic_mode(const char *str)
7380 {
7381 if (!strcmp(str, "none"))
7382 return preempt_dynamic_none;
7383
7384 if (!strcmp(str, "voluntary"))
7385 return preempt_dynamic_voluntary;
7386
7387 if (!strcmp(str, "full"))
7388 return preempt_dynamic_full;
7389
7390 return -EINVAL;
7391 }
7392
7393 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7394 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7395 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
7396 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7397 #define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
7398 #define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
7399 #else
7400 #error "Unsupported PREEMPT_DYNAMIC mechanism"
7401 #endif
7402
7403 static DEFINE_MUTEX(sched_dynamic_mutex);
7404 static bool klp_override;
7405
__sched_dynamic_update(int mode)7406 static void __sched_dynamic_update(int mode)
7407 {
7408 /*
7409 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7410 * the ZERO state, which is invalid.
7411 */
7412 if (!klp_override)
7413 preempt_dynamic_enable(cond_resched);
7414 preempt_dynamic_enable(might_resched);
7415 preempt_dynamic_enable(preempt_schedule);
7416 preempt_dynamic_enable(preempt_schedule_notrace);
7417 preempt_dynamic_enable(irqentry_exit_cond_resched);
7418
7419 switch (mode) {
7420 case preempt_dynamic_none:
7421 if (!klp_override)
7422 preempt_dynamic_enable(cond_resched);
7423 preempt_dynamic_disable(might_resched);
7424 preempt_dynamic_disable(preempt_schedule);
7425 preempt_dynamic_disable(preempt_schedule_notrace);
7426 preempt_dynamic_disable(irqentry_exit_cond_resched);
7427 if (mode != preempt_dynamic_mode)
7428 pr_info("Dynamic Preempt: none\n");
7429 break;
7430
7431 case preempt_dynamic_voluntary:
7432 if (!klp_override)
7433 preempt_dynamic_enable(cond_resched);
7434 preempt_dynamic_enable(might_resched);
7435 preempt_dynamic_disable(preempt_schedule);
7436 preempt_dynamic_disable(preempt_schedule_notrace);
7437 preempt_dynamic_disable(irqentry_exit_cond_resched);
7438 if (mode != preempt_dynamic_mode)
7439 pr_info("Dynamic Preempt: voluntary\n");
7440 break;
7441
7442 case preempt_dynamic_full:
7443 if (!klp_override)
7444 preempt_dynamic_disable(cond_resched);
7445 preempt_dynamic_disable(might_resched);
7446 preempt_dynamic_enable(preempt_schedule);
7447 preempt_dynamic_enable(preempt_schedule_notrace);
7448 preempt_dynamic_enable(irqentry_exit_cond_resched);
7449 if (mode != preempt_dynamic_mode)
7450 pr_info("Dynamic Preempt: full\n");
7451 break;
7452 }
7453
7454 preempt_dynamic_mode = mode;
7455 }
7456
sched_dynamic_update(int mode)7457 void sched_dynamic_update(int mode)
7458 {
7459 mutex_lock(&sched_dynamic_mutex);
7460 __sched_dynamic_update(mode);
7461 mutex_unlock(&sched_dynamic_mutex);
7462 }
7463
7464 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7465
klp_cond_resched(void)7466 static int klp_cond_resched(void)
7467 {
7468 __klp_sched_try_switch();
7469 return __cond_resched();
7470 }
7471
sched_dynamic_klp_enable(void)7472 void sched_dynamic_klp_enable(void)
7473 {
7474 mutex_lock(&sched_dynamic_mutex);
7475
7476 klp_override = true;
7477 static_call_update(cond_resched, klp_cond_resched);
7478
7479 mutex_unlock(&sched_dynamic_mutex);
7480 }
7481
sched_dynamic_klp_disable(void)7482 void sched_dynamic_klp_disable(void)
7483 {
7484 mutex_lock(&sched_dynamic_mutex);
7485
7486 klp_override = false;
7487 __sched_dynamic_update(preempt_dynamic_mode);
7488
7489 mutex_unlock(&sched_dynamic_mutex);
7490 }
7491
7492 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
7493
setup_preempt_mode(char * str)7494 static int __init setup_preempt_mode(char *str)
7495 {
7496 int mode = sched_dynamic_mode(str);
7497 if (mode < 0) {
7498 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7499 return 0;
7500 }
7501
7502 sched_dynamic_update(mode);
7503 return 1;
7504 }
7505 __setup("preempt=", setup_preempt_mode);
7506
preempt_dynamic_init(void)7507 static void __init preempt_dynamic_init(void)
7508 {
7509 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7510 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7511 sched_dynamic_update(preempt_dynamic_none);
7512 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7513 sched_dynamic_update(preempt_dynamic_voluntary);
7514 } else {
7515 /* Default static call setting, nothing to do */
7516 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7517 preempt_dynamic_mode = preempt_dynamic_full;
7518 pr_info("Dynamic Preempt: full\n");
7519 }
7520 }
7521 }
7522
7523 #define PREEMPT_MODEL_ACCESSOR(mode) \
7524 bool preempt_model_##mode(void) \
7525 { \
7526 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7527 return preempt_dynamic_mode == preempt_dynamic_##mode; \
7528 } \
7529 EXPORT_SYMBOL_GPL(preempt_model_##mode)
7530
7531 PREEMPT_MODEL_ACCESSOR(none);
7532 PREEMPT_MODEL_ACCESSOR(voluntary);
7533 PREEMPT_MODEL_ACCESSOR(full);
7534
7535 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7536
preempt_dynamic_init(void)7537 static inline void preempt_dynamic_init(void) { }
7538
7539 #endif /* CONFIG_PREEMPT_DYNAMIC */
7540
io_schedule_prepare(void)7541 int io_schedule_prepare(void)
7542 {
7543 int old_iowait = current->in_iowait;
7544
7545 current->in_iowait = 1;
7546 blk_flush_plug(current->plug, true);
7547 return old_iowait;
7548 }
7549
io_schedule_finish(int token)7550 void io_schedule_finish(int token)
7551 {
7552 current->in_iowait = token;
7553 }
7554
7555 /*
7556 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7557 * that process accounting knows that this is a task in IO wait state.
7558 */
io_schedule_timeout(long timeout)7559 long __sched io_schedule_timeout(long timeout)
7560 {
7561 int token;
7562 long ret;
7563
7564 token = io_schedule_prepare();
7565 ret = schedule_timeout(timeout);
7566 io_schedule_finish(token);
7567
7568 return ret;
7569 }
7570 EXPORT_SYMBOL(io_schedule_timeout);
7571
io_schedule(void)7572 void __sched io_schedule(void)
7573 {
7574 int token;
7575
7576 token = io_schedule_prepare();
7577 schedule();
7578 io_schedule_finish(token);
7579 }
7580 EXPORT_SYMBOL(io_schedule);
7581
sched_show_task(struct task_struct * p)7582 void sched_show_task(struct task_struct *p)
7583 {
7584 unsigned long free;
7585 int ppid;
7586
7587 if (!try_get_task_stack(p))
7588 return;
7589
7590 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7591
7592 if (task_is_running(p))
7593 pr_cont(" running task ");
7594 free = stack_not_used(p);
7595 ppid = 0;
7596 rcu_read_lock();
7597 if (pid_alive(p))
7598 ppid = task_pid_nr(rcu_dereference(p->real_parent));
7599 rcu_read_unlock();
7600 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
7601 free, task_pid_nr(p), task_tgid_nr(p),
7602 ppid, read_task_thread_flags(p));
7603
7604 print_worker_info(KERN_INFO, p);
7605 print_stop_info(KERN_INFO, p);
7606 print_scx_info(KERN_INFO, p);
7607 show_stack(p, NULL, KERN_INFO);
7608 put_task_stack(p);
7609 }
7610 EXPORT_SYMBOL_GPL(sched_show_task);
7611
7612 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)7613 state_filter_match(unsigned long state_filter, struct task_struct *p)
7614 {
7615 unsigned int state = READ_ONCE(p->__state);
7616
7617 /* no filter, everything matches */
7618 if (!state_filter)
7619 return true;
7620
7621 /* filter, but doesn't match */
7622 if (!(state & state_filter))
7623 return false;
7624
7625 /*
7626 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7627 * TASK_KILLABLE).
7628 */
7629 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7630 return false;
7631
7632 return true;
7633 }
7634
7635
show_state_filter(unsigned int state_filter)7636 void show_state_filter(unsigned int state_filter)
7637 {
7638 struct task_struct *g, *p;
7639
7640 rcu_read_lock();
7641 for_each_process_thread(g, p) {
7642 /*
7643 * reset the NMI-timeout, listing all files on a slow
7644 * console might take a lot of time:
7645 * Also, reset softlockup watchdogs on all CPUs, because
7646 * another CPU might be blocked waiting for us to process
7647 * an IPI.
7648 */
7649 touch_nmi_watchdog();
7650 touch_all_softlockup_watchdogs();
7651 if (state_filter_match(state_filter, p))
7652 sched_show_task(p);
7653 }
7654
7655 #ifdef CONFIG_SCHED_DEBUG
7656 if (!state_filter)
7657 sysrq_sched_debug_show();
7658 #endif
7659 rcu_read_unlock();
7660 /*
7661 * Only show locks if all tasks are dumped:
7662 */
7663 if (!state_filter)
7664 debug_show_all_locks();
7665 }
7666
7667 /**
7668 * init_idle - set up an idle thread for a given CPU
7669 * @idle: task in question
7670 * @cpu: CPU the idle task belongs to
7671 *
7672 * NOTE: this function does not set the idle thread's NEED_RESCHED
7673 * flag, to make booting more robust.
7674 */
init_idle(struct task_struct * idle,int cpu)7675 void __init init_idle(struct task_struct *idle, int cpu)
7676 {
7677 #ifdef CONFIG_SMP
7678 struct affinity_context ac = (struct affinity_context) {
7679 .new_mask = cpumask_of(cpu),
7680 .flags = 0,
7681 };
7682 #endif
7683 struct rq *rq = cpu_rq(cpu);
7684 unsigned long flags;
7685
7686 __sched_fork(0, idle);
7687
7688 raw_spin_lock_irqsave(&idle->pi_lock, flags);
7689 raw_spin_rq_lock(rq);
7690
7691 idle->__state = TASK_RUNNING;
7692 idle->se.exec_start = sched_clock();
7693 /*
7694 * PF_KTHREAD should already be set at this point; regardless, make it
7695 * look like a proper per-CPU kthread.
7696 */
7697 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7698 kthread_set_per_cpu(idle, cpu);
7699
7700 #ifdef CONFIG_SMP
7701 /*
7702 * It's possible that init_idle() gets called multiple times on a task,
7703 * in that case do_set_cpus_allowed() will not do the right thing.
7704 *
7705 * And since this is boot we can forgo the serialization.
7706 */
7707 set_cpus_allowed_common(idle, &ac);
7708 #endif
7709 /*
7710 * We're having a chicken and egg problem, even though we are
7711 * holding rq->lock, the CPU isn't yet set to this CPU so the
7712 * lockdep check in task_group() will fail.
7713 *
7714 * Similar case to sched_fork(). / Alternatively we could
7715 * use task_rq_lock() here and obtain the other rq->lock.
7716 *
7717 * Silence PROVE_RCU
7718 */
7719 rcu_read_lock();
7720 __set_task_cpu(idle, cpu);
7721 rcu_read_unlock();
7722
7723 rq->idle = idle;
7724 rcu_assign_pointer(rq->curr, idle);
7725 idle->on_rq = TASK_ON_RQ_QUEUED;
7726 #ifdef CONFIG_SMP
7727 idle->on_cpu = 1;
7728 #endif
7729 raw_spin_rq_unlock(rq);
7730 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7731
7732 /* Set the preempt count _outside_ the spinlocks! */
7733 init_idle_preempt_count(idle, cpu);
7734
7735 /*
7736 * The idle tasks have their own, simple scheduling class:
7737 */
7738 idle->sched_class = &idle_sched_class;
7739 ftrace_graph_init_idle_task(idle, cpu);
7740 vtime_init_idle(idle, cpu);
7741 #ifdef CONFIG_SMP
7742 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7743 #endif
7744 }
7745
7746 #ifdef CONFIG_SMP
7747
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)7748 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7749 const struct cpumask *trial)
7750 {
7751 int ret = 1;
7752
7753 if (cpumask_empty(cur))
7754 return ret;
7755
7756 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7757
7758 return ret;
7759 }
7760
task_can_attach(struct task_struct * p)7761 int task_can_attach(struct task_struct *p)
7762 {
7763 int ret = 0;
7764
7765 /*
7766 * Kthreads which disallow setaffinity shouldn't be moved
7767 * to a new cpuset; we don't want to change their CPU
7768 * affinity and isolating such threads by their set of
7769 * allowed nodes is unnecessary. Thus, cpusets are not
7770 * applicable for such threads. This prevents checking for
7771 * success of set_cpus_allowed_ptr() on all attached tasks
7772 * before cpus_mask may be changed.
7773 */
7774 if (p->flags & PF_NO_SETAFFINITY)
7775 ret = -EINVAL;
7776
7777 return ret;
7778 }
7779
7780 bool sched_smp_initialized __read_mostly;
7781
7782 #ifdef CONFIG_NUMA_BALANCING
7783 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)7784 int migrate_task_to(struct task_struct *p, int target_cpu)
7785 {
7786 struct migration_arg arg = { p, target_cpu };
7787 int curr_cpu = task_cpu(p);
7788
7789 if (curr_cpu == target_cpu)
7790 return 0;
7791
7792 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7793 return -EINVAL;
7794
7795 /* TODO: This is not properly updating schedstats */
7796
7797 trace_sched_move_numa(p, curr_cpu, target_cpu);
7798 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7799 }
7800
7801 /*
7802 * Requeue a task on a given node and accurately track the number of NUMA
7803 * tasks on the runqueues
7804 */
sched_setnuma(struct task_struct * p,int nid)7805 void sched_setnuma(struct task_struct *p, int nid)
7806 {
7807 bool queued, running;
7808 struct rq_flags rf;
7809 struct rq *rq;
7810
7811 rq = task_rq_lock(p, &rf);
7812 queued = task_on_rq_queued(p);
7813 running = task_current(rq, p);
7814
7815 if (queued)
7816 dequeue_task(rq, p, DEQUEUE_SAVE);
7817 if (running)
7818 put_prev_task(rq, p);
7819
7820 p->numa_preferred_nid = nid;
7821
7822 if (queued)
7823 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7824 if (running)
7825 set_next_task(rq, p);
7826 task_rq_unlock(rq, p, &rf);
7827 }
7828 #endif /* CONFIG_NUMA_BALANCING */
7829
7830 #ifdef CONFIG_HOTPLUG_CPU
7831 /*
7832 * Ensure that the idle task is using init_mm right before its CPU goes
7833 * offline.
7834 */
idle_task_exit(void)7835 void idle_task_exit(void)
7836 {
7837 struct mm_struct *mm = current->active_mm;
7838
7839 BUG_ON(cpu_online(smp_processor_id()));
7840 BUG_ON(current != this_rq()->idle);
7841
7842 if (mm != &init_mm) {
7843 switch_mm(mm, &init_mm, current);
7844 finish_arch_post_lock_switch();
7845 }
7846
7847 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7848 }
7849
__balance_push_cpu_stop(void * arg)7850 static int __balance_push_cpu_stop(void *arg)
7851 {
7852 struct task_struct *p = arg;
7853 struct rq *rq = this_rq();
7854 struct rq_flags rf;
7855 int cpu;
7856
7857 raw_spin_lock_irq(&p->pi_lock);
7858 rq_lock(rq, &rf);
7859
7860 update_rq_clock(rq);
7861
7862 if (task_rq(p) == rq && task_on_rq_queued(p)) {
7863 cpu = select_fallback_rq(rq->cpu, p);
7864 rq = __migrate_task(rq, &rf, p, cpu);
7865 }
7866
7867 rq_unlock(rq, &rf);
7868 raw_spin_unlock_irq(&p->pi_lock);
7869
7870 put_task_struct(p);
7871
7872 return 0;
7873 }
7874
7875 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7876
7877 /*
7878 * Ensure we only run per-cpu kthreads once the CPU goes !active.
7879 *
7880 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7881 * effective when the hotplug motion is down.
7882 */
balance_push(struct rq * rq)7883 static void balance_push(struct rq *rq)
7884 {
7885 struct task_struct *push_task = rq->curr;
7886
7887 lockdep_assert_rq_held(rq);
7888
7889 /*
7890 * Ensure the thing is persistent until balance_push_set(.on = false);
7891 */
7892 rq->balance_callback = &balance_push_callback;
7893
7894 /*
7895 * Only active while going offline and when invoked on the outgoing
7896 * CPU.
7897 */
7898 if (!cpu_dying(rq->cpu) || rq != this_rq())
7899 return;
7900
7901 /*
7902 * Both the cpu-hotplug and stop task are in this case and are
7903 * required to complete the hotplug process.
7904 */
7905 if (kthread_is_per_cpu(push_task) ||
7906 is_migration_disabled(push_task)) {
7907
7908 /*
7909 * If this is the idle task on the outgoing CPU try to wake
7910 * up the hotplug control thread which might wait for the
7911 * last task to vanish. The rcuwait_active() check is
7912 * accurate here because the waiter is pinned on this CPU
7913 * and can't obviously be running in parallel.
7914 *
7915 * On RT kernels this also has to check whether there are
7916 * pinned and scheduled out tasks on the runqueue. They
7917 * need to leave the migrate disabled section first.
7918 */
7919 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
7920 rcuwait_active(&rq->hotplug_wait)) {
7921 raw_spin_rq_unlock(rq);
7922 rcuwait_wake_up(&rq->hotplug_wait);
7923 raw_spin_rq_lock(rq);
7924 }
7925 return;
7926 }
7927
7928 get_task_struct(push_task);
7929 /*
7930 * Temporarily drop rq->lock such that we can wake-up the stop task.
7931 * Both preemption and IRQs are still disabled.
7932 */
7933 preempt_disable();
7934 raw_spin_rq_unlock(rq);
7935 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
7936 this_cpu_ptr(&push_work));
7937 preempt_enable();
7938 /*
7939 * At this point need_resched() is true and we'll take the loop in
7940 * schedule(). The next pick is obviously going to be the stop task
7941 * which kthread_is_per_cpu() and will push this task away.
7942 */
7943 raw_spin_rq_lock(rq);
7944 }
7945
balance_push_set(int cpu,bool on)7946 static void balance_push_set(int cpu, bool on)
7947 {
7948 struct rq *rq = cpu_rq(cpu);
7949 struct rq_flags rf;
7950
7951 rq_lock_irqsave(rq, &rf);
7952 if (on) {
7953 WARN_ON_ONCE(rq->balance_callback);
7954 rq->balance_callback = &balance_push_callback;
7955 } else if (rq->balance_callback == &balance_push_callback) {
7956 rq->balance_callback = NULL;
7957 }
7958 rq_unlock_irqrestore(rq, &rf);
7959 }
7960
7961 /*
7962 * Invoked from a CPUs hotplug control thread after the CPU has been marked
7963 * inactive. All tasks which are not per CPU kernel threads are either
7964 * pushed off this CPU now via balance_push() or placed on a different CPU
7965 * during wakeup. Wait until the CPU is quiescent.
7966 */
balance_hotplug_wait(void)7967 static void balance_hotplug_wait(void)
7968 {
7969 struct rq *rq = this_rq();
7970
7971 rcuwait_wait_event(&rq->hotplug_wait,
7972 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
7973 TASK_UNINTERRUPTIBLE);
7974 }
7975
7976 #else
7977
balance_push(struct rq * rq)7978 static inline void balance_push(struct rq *rq)
7979 {
7980 }
7981
balance_push_set(int cpu,bool on)7982 static inline void balance_push_set(int cpu, bool on)
7983 {
7984 }
7985
balance_hotplug_wait(void)7986 static inline void balance_hotplug_wait(void)
7987 {
7988 }
7989
7990 #endif /* CONFIG_HOTPLUG_CPU */
7991
set_rq_online(struct rq * rq)7992 void set_rq_online(struct rq *rq)
7993 {
7994 if (!rq->online) {
7995 const struct sched_class *class;
7996
7997 cpumask_set_cpu(rq->cpu, rq->rd->online);
7998 rq->online = 1;
7999
8000 for_each_class(class) {
8001 if (class->rq_online)
8002 class->rq_online(rq);
8003 }
8004 }
8005 }
8006
set_rq_offline(struct rq * rq)8007 void set_rq_offline(struct rq *rq)
8008 {
8009 if (rq->online) {
8010 const struct sched_class *class;
8011
8012 update_rq_clock(rq);
8013 for_each_class(class) {
8014 if (class->rq_offline)
8015 class->rq_offline(rq);
8016 }
8017
8018 cpumask_clear_cpu(rq->cpu, rq->rd->online);
8019 rq->online = 0;
8020 }
8021 }
8022
sched_set_rq_online(struct rq * rq,int cpu)8023 static inline void sched_set_rq_online(struct rq *rq, int cpu)
8024 {
8025 struct rq_flags rf;
8026
8027 rq_lock_irqsave(rq, &rf);
8028 if (rq->rd) {
8029 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8030 set_rq_online(rq);
8031 }
8032 rq_unlock_irqrestore(rq, &rf);
8033 }
8034
sched_set_rq_offline(struct rq * rq,int cpu)8035 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8036 {
8037 struct rq_flags rf;
8038
8039 rq_lock_irqsave(rq, &rf);
8040 if (rq->rd) {
8041 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8042 set_rq_offline(rq);
8043 }
8044 rq_unlock_irqrestore(rq, &rf);
8045 }
8046
8047 /*
8048 * used to mark begin/end of suspend/resume:
8049 */
8050 static int num_cpus_frozen;
8051
8052 /*
8053 * Update cpusets according to cpu_active mask. If cpusets are
8054 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8055 * around partition_sched_domains().
8056 *
8057 * If we come here as part of a suspend/resume, don't touch cpusets because we
8058 * want to restore it back to its original state upon resume anyway.
8059 */
cpuset_cpu_active(void)8060 static void cpuset_cpu_active(void)
8061 {
8062 if (cpuhp_tasks_frozen) {
8063 /*
8064 * num_cpus_frozen tracks how many CPUs are involved in suspend
8065 * resume sequence. As long as this is not the last online
8066 * operation in the resume sequence, just build a single sched
8067 * domain, ignoring cpusets.
8068 */
8069 partition_sched_domains(1, NULL, NULL);
8070 if (--num_cpus_frozen)
8071 return;
8072 /*
8073 * This is the last CPU online operation. So fall through and
8074 * restore the original sched domains by considering the
8075 * cpuset configurations.
8076 */
8077 cpuset_force_rebuild();
8078 }
8079 cpuset_update_active_cpus();
8080 }
8081
cpuset_cpu_inactive(unsigned int cpu)8082 static int cpuset_cpu_inactive(unsigned int cpu)
8083 {
8084 if (!cpuhp_tasks_frozen) {
8085 int ret = dl_bw_check_overflow(cpu);
8086
8087 if (ret)
8088 return ret;
8089 cpuset_update_active_cpus();
8090 } else {
8091 num_cpus_frozen++;
8092 partition_sched_domains(1, NULL, NULL);
8093 }
8094 return 0;
8095 }
8096
sched_smt_present_inc(int cpu)8097 static inline void sched_smt_present_inc(int cpu)
8098 {
8099 #ifdef CONFIG_SCHED_SMT
8100 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8101 static_branch_inc_cpuslocked(&sched_smt_present);
8102 #endif
8103 }
8104
sched_smt_present_dec(int cpu)8105 static inline void sched_smt_present_dec(int cpu)
8106 {
8107 #ifdef CONFIG_SCHED_SMT
8108 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8109 static_branch_dec_cpuslocked(&sched_smt_present);
8110 #endif
8111 }
8112
sched_cpu_activate(unsigned int cpu)8113 int sched_cpu_activate(unsigned int cpu)
8114 {
8115 struct rq *rq = cpu_rq(cpu);
8116
8117 /*
8118 * Clear the balance_push callback and prepare to schedule
8119 * regular tasks.
8120 */
8121 balance_push_set(cpu, false);
8122
8123 /*
8124 * When going up, increment the number of cores with SMT present.
8125 */
8126 sched_smt_present_inc(cpu);
8127 set_cpu_active(cpu, true);
8128
8129 if (sched_smp_initialized) {
8130 sched_update_numa(cpu, true);
8131 sched_domains_numa_masks_set(cpu);
8132 cpuset_cpu_active();
8133 }
8134
8135 scx_rq_activate(rq);
8136
8137 /*
8138 * Put the rq online, if not already. This happens:
8139 *
8140 * 1) In the early boot process, because we build the real domains
8141 * after all CPUs have been brought up.
8142 *
8143 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8144 * domains.
8145 */
8146 sched_set_rq_online(rq, cpu);
8147
8148 return 0;
8149 }
8150
sched_cpu_deactivate(unsigned int cpu)8151 int sched_cpu_deactivate(unsigned int cpu)
8152 {
8153 struct rq *rq = cpu_rq(cpu);
8154 int ret;
8155
8156 /*
8157 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8158 * load balancing when not active
8159 */
8160 nohz_balance_exit_idle(rq);
8161
8162 set_cpu_active(cpu, false);
8163
8164 /*
8165 * From this point forward, this CPU will refuse to run any task that
8166 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8167 * push those tasks away until this gets cleared, see
8168 * sched_cpu_dying().
8169 */
8170 balance_push_set(cpu, true);
8171
8172 /*
8173 * We've cleared cpu_active_mask / set balance_push, wait for all
8174 * preempt-disabled and RCU users of this state to go away such that
8175 * all new such users will observe it.
8176 *
8177 * Specifically, we rely on ttwu to no longer target this CPU, see
8178 * ttwu_queue_cond() and is_cpu_allowed().
8179 *
8180 * Do sync before park smpboot threads to take care the RCU boost case.
8181 */
8182 synchronize_rcu();
8183
8184 sched_set_rq_offline(rq, cpu);
8185
8186 scx_rq_deactivate(rq);
8187
8188 /*
8189 * When going down, decrement the number of cores with SMT present.
8190 */
8191 sched_smt_present_dec(cpu);
8192
8193 #ifdef CONFIG_SCHED_SMT
8194 sched_core_cpu_deactivate(cpu);
8195 #endif
8196
8197 if (!sched_smp_initialized)
8198 return 0;
8199
8200 sched_update_numa(cpu, false);
8201 ret = cpuset_cpu_inactive(cpu);
8202 if (ret) {
8203 sched_smt_present_inc(cpu);
8204 sched_set_rq_online(rq, cpu);
8205 balance_push_set(cpu, false);
8206 set_cpu_active(cpu, true);
8207 sched_update_numa(cpu, true);
8208 return ret;
8209 }
8210 sched_domains_numa_masks_clear(cpu);
8211 return 0;
8212 }
8213
sched_rq_cpu_starting(unsigned int cpu)8214 static void sched_rq_cpu_starting(unsigned int cpu)
8215 {
8216 struct rq *rq = cpu_rq(cpu);
8217
8218 rq->calc_load_update = calc_load_update;
8219 update_max_interval();
8220 }
8221
sched_cpu_starting(unsigned int cpu)8222 int sched_cpu_starting(unsigned int cpu)
8223 {
8224 sched_core_cpu_starting(cpu);
8225 sched_rq_cpu_starting(cpu);
8226 sched_tick_start(cpu);
8227 return 0;
8228 }
8229
8230 #ifdef CONFIG_HOTPLUG_CPU
8231
8232 /*
8233 * Invoked immediately before the stopper thread is invoked to bring the
8234 * CPU down completely. At this point all per CPU kthreads except the
8235 * hotplug thread (current) and the stopper thread (inactive) have been
8236 * either parked or have been unbound from the outgoing CPU. Ensure that
8237 * any of those which might be on the way out are gone.
8238 *
8239 * If after this point a bound task is being woken on this CPU then the
8240 * responsible hotplug callback has failed to do it's job.
8241 * sched_cpu_dying() will catch it with the appropriate fireworks.
8242 */
sched_cpu_wait_empty(unsigned int cpu)8243 int sched_cpu_wait_empty(unsigned int cpu)
8244 {
8245 balance_hotplug_wait();
8246 return 0;
8247 }
8248
8249 /*
8250 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8251 * might have. Called from the CPU stopper task after ensuring that the
8252 * stopper is the last running task on the CPU, so nr_active count is
8253 * stable. We need to take the tear-down thread which is calling this into
8254 * account, so we hand in adjust = 1 to the load calculation.
8255 *
8256 * Also see the comment "Global load-average calculations".
8257 */
calc_load_migrate(struct rq * rq)8258 static void calc_load_migrate(struct rq *rq)
8259 {
8260 long delta = calc_load_fold_active(rq, 1);
8261
8262 if (delta)
8263 atomic_long_add(delta, &calc_load_tasks);
8264 }
8265
dump_rq_tasks(struct rq * rq,const char * loglvl)8266 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8267 {
8268 struct task_struct *g, *p;
8269 int cpu = cpu_of(rq);
8270
8271 lockdep_assert_rq_held(rq);
8272
8273 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8274 for_each_process_thread(g, p) {
8275 if (task_cpu(p) != cpu)
8276 continue;
8277
8278 if (!task_on_rq_queued(p))
8279 continue;
8280
8281 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8282 }
8283 }
8284
sched_cpu_dying(unsigned int cpu)8285 int sched_cpu_dying(unsigned int cpu)
8286 {
8287 struct rq *rq = cpu_rq(cpu);
8288 struct rq_flags rf;
8289
8290 /* Handle pending wakeups and then migrate everything off */
8291 sched_tick_stop(cpu);
8292
8293 rq_lock_irqsave(rq, &rf);
8294 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8295 WARN(true, "Dying CPU not properly vacated!");
8296 dump_rq_tasks(rq, KERN_WARNING);
8297 }
8298 rq_unlock_irqrestore(rq, &rf);
8299
8300 calc_load_migrate(rq);
8301 update_max_interval();
8302 hrtick_clear(rq);
8303 sched_core_cpu_dying(cpu);
8304 return 0;
8305 }
8306 #endif
8307
sched_init_smp(void)8308 void __init sched_init_smp(void)
8309 {
8310 sched_init_numa(NUMA_NO_NODE);
8311
8312 /*
8313 * There's no userspace yet to cause hotplug operations; hence all the
8314 * CPU masks are stable and all blatant races in the below code cannot
8315 * happen.
8316 */
8317 mutex_lock(&sched_domains_mutex);
8318 sched_init_domains(cpu_active_mask);
8319 mutex_unlock(&sched_domains_mutex);
8320
8321 /* Move init over to a non-isolated CPU */
8322 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8323 BUG();
8324 current->flags &= ~PF_NO_SETAFFINITY;
8325 sched_init_granularity();
8326
8327 init_sched_rt_class();
8328 init_sched_dl_class();
8329
8330 sched_smp_initialized = true;
8331 }
8332
migration_init(void)8333 static int __init migration_init(void)
8334 {
8335 sched_cpu_starting(smp_processor_id());
8336 return 0;
8337 }
8338 early_initcall(migration_init);
8339
8340 #else
sched_init_smp(void)8341 void __init sched_init_smp(void)
8342 {
8343 sched_init_granularity();
8344 }
8345 #endif /* CONFIG_SMP */
8346
in_sched_functions(unsigned long addr)8347 int in_sched_functions(unsigned long addr)
8348 {
8349 return in_lock_functions(addr) ||
8350 (addr >= (unsigned long)__sched_text_start
8351 && addr < (unsigned long)__sched_text_end);
8352 }
8353
8354 #ifdef CONFIG_CGROUP_SCHED
8355 /*
8356 * Default task group.
8357 * Every task in system belongs to this group at bootup.
8358 */
8359 struct task_group root_task_group;
8360 LIST_HEAD(task_groups);
8361
8362 /* Cacheline aligned slab cache for task_group */
8363 static struct kmem_cache *task_group_cache __ro_after_init;
8364 #endif
8365
sched_init(void)8366 void __init sched_init(void)
8367 {
8368 unsigned long ptr = 0;
8369 int i;
8370
8371 /* Make sure the linker didn't screw up */
8372 #ifdef CONFIG_SMP
8373 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8374 #endif
8375 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8376 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8377 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8378 #ifdef CONFIG_SCHED_CLASS_EXT
8379 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8380 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8381 #endif
8382
8383 wait_bit_init();
8384
8385 #ifdef CONFIG_FAIR_GROUP_SCHED
8386 ptr += 2 * nr_cpu_ids * sizeof(void **);
8387 #endif
8388 #ifdef CONFIG_RT_GROUP_SCHED
8389 ptr += 2 * nr_cpu_ids * sizeof(void **);
8390 #endif
8391 if (ptr) {
8392 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8393
8394 #ifdef CONFIG_FAIR_GROUP_SCHED
8395 root_task_group.se = (struct sched_entity **)ptr;
8396 ptr += nr_cpu_ids * sizeof(void **);
8397
8398 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8399 ptr += nr_cpu_ids * sizeof(void **);
8400
8401 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8402 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8403 #endif /* CONFIG_FAIR_GROUP_SCHED */
8404 #ifdef CONFIG_EXT_GROUP_SCHED
8405 root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
8406 #endif /* CONFIG_EXT_GROUP_SCHED */
8407 #ifdef CONFIG_RT_GROUP_SCHED
8408 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8409 ptr += nr_cpu_ids * sizeof(void **);
8410
8411 root_task_group.rt_rq = (struct rt_rq **)ptr;
8412 ptr += nr_cpu_ids * sizeof(void **);
8413
8414 #endif /* CONFIG_RT_GROUP_SCHED */
8415 }
8416
8417 #ifdef CONFIG_SMP
8418 init_defrootdomain();
8419 #endif
8420
8421 #ifdef CONFIG_RT_GROUP_SCHED
8422 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8423 global_rt_period(), global_rt_runtime());
8424 #endif /* CONFIG_RT_GROUP_SCHED */
8425
8426 #ifdef CONFIG_CGROUP_SCHED
8427 task_group_cache = KMEM_CACHE(task_group, 0);
8428
8429 list_add(&root_task_group.list, &task_groups);
8430 INIT_LIST_HEAD(&root_task_group.children);
8431 INIT_LIST_HEAD(&root_task_group.siblings);
8432 autogroup_init(&init_task);
8433 #endif /* CONFIG_CGROUP_SCHED */
8434
8435 for_each_possible_cpu(i) {
8436 struct rq *rq;
8437
8438 rq = cpu_rq(i);
8439 raw_spin_lock_init(&rq->__lock);
8440 rq->nr_running = 0;
8441 rq->calc_load_active = 0;
8442 rq->calc_load_update = jiffies + LOAD_FREQ;
8443 init_cfs_rq(&rq->cfs);
8444 init_rt_rq(&rq->rt);
8445 init_dl_rq(&rq->dl);
8446 #ifdef CONFIG_FAIR_GROUP_SCHED
8447 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8448 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8449 /*
8450 * How much CPU bandwidth does root_task_group get?
8451 *
8452 * In case of task-groups formed through the cgroup filesystem, it
8453 * gets 100% of the CPU resources in the system. This overall
8454 * system CPU resource is divided among the tasks of
8455 * root_task_group and its child task-groups in a fair manner,
8456 * based on each entity's (task or task-group's) weight
8457 * (se->load.weight).
8458 *
8459 * In other words, if root_task_group has 10 tasks of weight
8460 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8461 * then A0's share of the CPU resource is:
8462 *
8463 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8464 *
8465 * We achieve this by letting root_task_group's tasks sit
8466 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8467 */
8468 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8469 #endif /* CONFIG_FAIR_GROUP_SCHED */
8470
8471 #ifdef CONFIG_RT_GROUP_SCHED
8472 /*
8473 * This is required for init cpu because rt.c:__enable_runtime()
8474 * starts working after scheduler_running, which is not the case
8475 * yet.
8476 */
8477 rq->rt.rt_runtime = global_rt_runtime();
8478 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8479 #endif
8480 #ifdef CONFIG_SMP
8481 rq->sd = NULL;
8482 rq->rd = NULL;
8483 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8484 rq->balance_callback = &balance_push_callback;
8485 rq->active_balance = 0;
8486 rq->next_balance = jiffies;
8487 rq->push_cpu = 0;
8488 rq->cpu = i;
8489 rq->online = 0;
8490 rq->idle_stamp = 0;
8491 rq->avg_idle = 2*sysctl_sched_migration_cost;
8492 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8493
8494 INIT_LIST_HEAD(&rq->cfs_tasks);
8495
8496 rq_attach_root(rq, &def_root_domain);
8497 #ifdef CONFIG_NO_HZ_COMMON
8498 rq->last_blocked_load_update_tick = jiffies;
8499 atomic_set(&rq->nohz_flags, 0);
8500
8501 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8502 #endif
8503 #ifdef CONFIG_HOTPLUG_CPU
8504 rcuwait_init(&rq->hotplug_wait);
8505 #endif
8506 #endif /* CONFIG_SMP */
8507 hrtick_rq_init(rq);
8508 atomic_set(&rq->nr_iowait, 0);
8509 fair_server_init(rq);
8510
8511 #ifdef CONFIG_SCHED_CORE
8512 rq->core = rq;
8513 rq->core_pick = NULL;
8514 rq->core_dl_server = NULL;
8515 rq->core_enabled = 0;
8516 rq->core_tree = RB_ROOT;
8517 rq->core_forceidle_count = 0;
8518 rq->core_forceidle_occupation = 0;
8519 rq->core_forceidle_start = 0;
8520
8521 rq->core_cookie = 0UL;
8522 #endif
8523 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8524 }
8525
8526 set_load_weight(&init_task, false);
8527 init_task.se.slice = sysctl_sched_base_slice,
8528
8529 /*
8530 * The boot idle thread does lazy MMU switching as well:
8531 */
8532 mmgrab_lazy_tlb(&init_mm);
8533 enter_lazy_tlb(&init_mm, current);
8534
8535 /*
8536 * The idle task doesn't need the kthread struct to function, but it
8537 * is dressed up as a per-CPU kthread and thus needs to play the part
8538 * if we want to avoid special-casing it in code that deals with per-CPU
8539 * kthreads.
8540 */
8541 WARN_ON(!set_kthread_struct(current));
8542
8543 /*
8544 * Make us the idle thread. Technically, schedule() should not be
8545 * called from this thread, however somewhere below it might be,
8546 * but because we are the idle thread, we just pick up running again
8547 * when this runqueue becomes "idle".
8548 */
8549 init_idle(current, smp_processor_id());
8550
8551 calc_load_update = jiffies + LOAD_FREQ;
8552
8553 #ifdef CONFIG_SMP
8554 idle_thread_set_boot_cpu();
8555 balance_push_set(smp_processor_id(), false);
8556 #endif
8557 init_sched_fair_class();
8558 init_sched_ext_class();
8559
8560 psi_init();
8561
8562 init_uclamp();
8563
8564 preempt_dynamic_init();
8565
8566 scheduler_running = 1;
8567 }
8568
8569 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8570
__might_sleep(const char * file,int line)8571 void __might_sleep(const char *file, int line)
8572 {
8573 unsigned int state = get_current_state();
8574 /*
8575 * Blocking primitives will set (and therefore destroy) current->state,
8576 * since we will exit with TASK_RUNNING make sure we enter with it,
8577 * otherwise we will destroy state.
8578 */
8579 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8580 "do not call blocking ops when !TASK_RUNNING; "
8581 "state=%x set at [<%p>] %pS\n", state,
8582 (void *)current->task_state_change,
8583 (void *)current->task_state_change);
8584
8585 __might_resched(file, line, 0);
8586 }
8587 EXPORT_SYMBOL(__might_sleep);
8588
print_preempt_disable_ip(int preempt_offset,unsigned long ip)8589 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8590 {
8591 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8592 return;
8593
8594 if (preempt_count() == preempt_offset)
8595 return;
8596
8597 pr_err("Preemption disabled at:");
8598 print_ip_sym(KERN_ERR, ip);
8599 }
8600
resched_offsets_ok(unsigned int offsets)8601 static inline bool resched_offsets_ok(unsigned int offsets)
8602 {
8603 unsigned int nested = preempt_count();
8604
8605 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8606
8607 return nested == offsets;
8608 }
8609
__might_resched(const char * file,int line,unsigned int offsets)8610 void __might_resched(const char *file, int line, unsigned int offsets)
8611 {
8612 /* Ratelimiting timestamp: */
8613 static unsigned long prev_jiffy;
8614
8615 unsigned long preempt_disable_ip;
8616
8617 /* WARN_ON_ONCE() by default, no rate limit required: */
8618 rcu_sleep_check();
8619
8620 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8621 !is_idle_task(current) && !current->non_block_count) ||
8622 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8623 oops_in_progress)
8624 return;
8625
8626 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8627 return;
8628 prev_jiffy = jiffies;
8629
8630 /* Save this before calling printk(), since that will clobber it: */
8631 preempt_disable_ip = get_preempt_disable_ip(current);
8632
8633 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8634 file, line);
8635 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8636 in_atomic(), irqs_disabled(), current->non_block_count,
8637 current->pid, current->comm);
8638 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8639 offsets & MIGHT_RESCHED_PREEMPT_MASK);
8640
8641 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8642 pr_err("RCU nest depth: %d, expected: %u\n",
8643 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8644 }
8645
8646 if (task_stack_end_corrupted(current))
8647 pr_emerg("Thread overran stack, or stack corrupted\n");
8648
8649 debug_show_held_locks(current);
8650 if (irqs_disabled())
8651 print_irqtrace_events(current);
8652
8653 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8654 preempt_disable_ip);
8655
8656 dump_stack();
8657 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8658 }
8659 EXPORT_SYMBOL(__might_resched);
8660
__cant_sleep(const char * file,int line,int preempt_offset)8661 void __cant_sleep(const char *file, int line, int preempt_offset)
8662 {
8663 static unsigned long prev_jiffy;
8664
8665 if (irqs_disabled())
8666 return;
8667
8668 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8669 return;
8670
8671 if (preempt_count() > preempt_offset)
8672 return;
8673
8674 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8675 return;
8676 prev_jiffy = jiffies;
8677
8678 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8679 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8680 in_atomic(), irqs_disabled(),
8681 current->pid, current->comm);
8682
8683 debug_show_held_locks(current);
8684 dump_stack();
8685 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8686 }
8687 EXPORT_SYMBOL_GPL(__cant_sleep);
8688
8689 #ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)8690 void __cant_migrate(const char *file, int line)
8691 {
8692 static unsigned long prev_jiffy;
8693
8694 if (irqs_disabled())
8695 return;
8696
8697 if (is_migration_disabled(current))
8698 return;
8699
8700 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8701 return;
8702
8703 if (preempt_count() > 0)
8704 return;
8705
8706 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8707 return;
8708 prev_jiffy = jiffies;
8709
8710 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8711 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8712 in_atomic(), irqs_disabled(), is_migration_disabled(current),
8713 current->pid, current->comm);
8714
8715 debug_show_held_locks(current);
8716 dump_stack();
8717 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8718 }
8719 EXPORT_SYMBOL_GPL(__cant_migrate);
8720 #endif
8721 #endif
8722
8723 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)8724 void normalize_rt_tasks(void)
8725 {
8726 struct task_struct *g, *p;
8727 struct sched_attr attr = {
8728 .sched_policy = SCHED_NORMAL,
8729 };
8730
8731 read_lock(&tasklist_lock);
8732 for_each_process_thread(g, p) {
8733 /*
8734 * Only normalize user tasks:
8735 */
8736 if (p->flags & PF_KTHREAD)
8737 continue;
8738
8739 p->se.exec_start = 0;
8740 schedstat_set(p->stats.wait_start, 0);
8741 schedstat_set(p->stats.sleep_start, 0);
8742 schedstat_set(p->stats.block_start, 0);
8743
8744 if (!rt_or_dl_task(p)) {
8745 /*
8746 * Renice negative nice level userspace
8747 * tasks back to 0:
8748 */
8749 if (task_nice(p) < 0)
8750 set_user_nice(p, 0);
8751 continue;
8752 }
8753
8754 __sched_setscheduler(p, &attr, false, false);
8755 }
8756 read_unlock(&tasklist_lock);
8757 }
8758
8759 #endif /* CONFIG_MAGIC_SYSRQ */
8760
8761 #if defined(CONFIG_KGDB_KDB)
8762 /*
8763 * These functions are only useful for KDB.
8764 *
8765 * They can only be called when the whole system has been
8766 * stopped - every CPU needs to be quiescent, and no scheduling
8767 * activity can take place. Using them for anything else would
8768 * be a serious bug, and as a result, they aren't even visible
8769 * under any other configuration.
8770 */
8771
8772 /**
8773 * curr_task - return the current task for a given CPU.
8774 * @cpu: the processor in question.
8775 *
8776 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8777 *
8778 * Return: The current task for @cpu.
8779 */
curr_task(int cpu)8780 struct task_struct *curr_task(int cpu)
8781 {
8782 return cpu_curr(cpu);
8783 }
8784
8785 #endif /* defined(CONFIG_KGDB_KDB) */
8786
8787 #ifdef CONFIG_CGROUP_SCHED
8788 /* task_group_lock serializes the addition/removal of task groups */
8789 static DEFINE_SPINLOCK(task_group_lock);
8790
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)8791 static inline void alloc_uclamp_sched_group(struct task_group *tg,
8792 struct task_group *parent)
8793 {
8794 #ifdef CONFIG_UCLAMP_TASK_GROUP
8795 enum uclamp_id clamp_id;
8796
8797 for_each_clamp_id(clamp_id) {
8798 uclamp_se_set(&tg->uclamp_req[clamp_id],
8799 uclamp_none(clamp_id), false);
8800 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8801 }
8802 #endif
8803 }
8804
sched_free_group(struct task_group * tg)8805 static void sched_free_group(struct task_group *tg)
8806 {
8807 free_fair_sched_group(tg);
8808 free_rt_sched_group(tg);
8809 autogroup_free(tg);
8810 kmem_cache_free(task_group_cache, tg);
8811 }
8812
sched_free_group_rcu(struct rcu_head * rcu)8813 static void sched_free_group_rcu(struct rcu_head *rcu)
8814 {
8815 sched_free_group(container_of(rcu, struct task_group, rcu));
8816 }
8817
sched_unregister_group(struct task_group * tg)8818 static void sched_unregister_group(struct task_group *tg)
8819 {
8820 unregister_fair_sched_group(tg);
8821 unregister_rt_sched_group(tg);
8822 /*
8823 * We have to wait for yet another RCU grace period to expire, as
8824 * print_cfs_stats() might run concurrently.
8825 */
8826 call_rcu(&tg->rcu, sched_free_group_rcu);
8827 }
8828
8829 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)8830 struct task_group *sched_create_group(struct task_group *parent)
8831 {
8832 struct task_group *tg;
8833
8834 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8835 if (!tg)
8836 return ERR_PTR(-ENOMEM);
8837
8838 if (!alloc_fair_sched_group(tg, parent))
8839 goto err;
8840
8841 if (!alloc_rt_sched_group(tg, parent))
8842 goto err;
8843
8844 scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
8845 alloc_uclamp_sched_group(tg, parent);
8846
8847 return tg;
8848
8849 err:
8850 sched_free_group(tg);
8851 return ERR_PTR(-ENOMEM);
8852 }
8853
sched_online_group(struct task_group * tg,struct task_group * parent)8854 void sched_online_group(struct task_group *tg, struct task_group *parent)
8855 {
8856 unsigned long flags;
8857
8858 spin_lock_irqsave(&task_group_lock, flags);
8859 list_add_rcu(&tg->list, &task_groups);
8860
8861 /* Root should already exist: */
8862 WARN_ON(!parent);
8863
8864 tg->parent = parent;
8865 INIT_LIST_HEAD(&tg->children);
8866 list_add_rcu(&tg->siblings, &parent->children);
8867 spin_unlock_irqrestore(&task_group_lock, flags);
8868
8869 online_fair_sched_group(tg);
8870 }
8871
8872 /* RCU callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)8873 static void sched_unregister_group_rcu(struct rcu_head *rhp)
8874 {
8875 /* Now it should be safe to free those cfs_rqs: */
8876 sched_unregister_group(container_of(rhp, struct task_group, rcu));
8877 }
8878
sched_destroy_group(struct task_group * tg)8879 void sched_destroy_group(struct task_group *tg)
8880 {
8881 /* Wait for possible concurrent references to cfs_rqs complete: */
8882 call_rcu(&tg->rcu, sched_unregister_group_rcu);
8883 }
8884
sched_release_group(struct task_group * tg)8885 void sched_release_group(struct task_group *tg)
8886 {
8887 unsigned long flags;
8888
8889 /*
8890 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
8891 * sched_cfs_period_timer()).
8892 *
8893 * For this to be effective, we have to wait for all pending users of
8894 * this task group to leave their RCU critical section to ensure no new
8895 * user will see our dying task group any more. Specifically ensure
8896 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
8897 *
8898 * We therefore defer calling unregister_fair_sched_group() to
8899 * sched_unregister_group() which is guarantied to get called only after the
8900 * current RCU grace period has expired.
8901 */
8902 spin_lock_irqsave(&task_group_lock, flags);
8903 list_del_rcu(&tg->list);
8904 list_del_rcu(&tg->siblings);
8905 spin_unlock_irqrestore(&task_group_lock, flags);
8906 }
8907
sched_get_task_group(struct task_struct * tsk)8908 static struct task_group *sched_get_task_group(struct task_struct *tsk)
8909 {
8910 struct task_group *tg;
8911
8912 /*
8913 * All callers are synchronized by task_rq_lock(); we do not use RCU
8914 * which is pointless here. Thus, we pass "true" to task_css_check()
8915 * to prevent lockdep warnings.
8916 */
8917 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8918 struct task_group, css);
8919 tg = autogroup_task_group(tsk, tg);
8920
8921 return tg;
8922 }
8923
sched_change_group(struct task_struct * tsk,struct task_group * group)8924 static void sched_change_group(struct task_struct *tsk, struct task_group *group)
8925 {
8926 tsk->sched_task_group = group;
8927
8928 #ifdef CONFIG_FAIR_GROUP_SCHED
8929 if (tsk->sched_class->task_change_group)
8930 tsk->sched_class->task_change_group(tsk);
8931 else
8932 #endif
8933 set_task_rq(tsk, task_cpu(tsk));
8934 }
8935
8936 /*
8937 * Change task's runqueue when it moves between groups.
8938 *
8939 * The caller of this function should have put the task in its new group by
8940 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
8941 * its new group.
8942 */
sched_move_task(struct task_struct * tsk)8943 void sched_move_task(struct task_struct *tsk)
8944 {
8945 int queued, running, queue_flags =
8946 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
8947 struct task_group *group;
8948 struct rq *rq;
8949
8950 CLASS(task_rq_lock, rq_guard)(tsk);
8951 rq = rq_guard.rq;
8952
8953 /*
8954 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
8955 * group changes.
8956 */
8957 group = sched_get_task_group(tsk);
8958 if (group == tsk->sched_task_group)
8959 return;
8960
8961 update_rq_clock(rq);
8962
8963 running = task_current(rq, tsk);
8964 queued = task_on_rq_queued(tsk);
8965
8966 if (queued)
8967 dequeue_task(rq, tsk, queue_flags);
8968 if (running)
8969 put_prev_task(rq, tsk);
8970
8971 sched_change_group(tsk, group);
8972 scx_move_task(tsk);
8973
8974 if (queued)
8975 enqueue_task(rq, tsk, queue_flags);
8976 if (running) {
8977 set_next_task(rq, tsk);
8978 /*
8979 * After changing group, the running task may have joined a
8980 * throttled one but it's still the running task. Trigger a
8981 * resched to make sure that task can still run.
8982 */
8983 resched_curr(rq);
8984 }
8985 }
8986
8987 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)8988 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8989 {
8990 struct task_group *parent = css_tg(parent_css);
8991 struct task_group *tg;
8992
8993 if (!parent) {
8994 /* This is early initialization for the top cgroup */
8995 return &root_task_group.css;
8996 }
8997
8998 tg = sched_create_group(parent);
8999 if (IS_ERR(tg))
9000 return ERR_PTR(-ENOMEM);
9001
9002 return &tg->css;
9003 }
9004
9005 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)9006 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9007 {
9008 struct task_group *tg = css_tg(css);
9009 struct task_group *parent = css_tg(css->parent);
9010 int ret;
9011
9012 ret = scx_tg_online(tg);
9013 if (ret)
9014 return ret;
9015
9016 if (parent)
9017 sched_online_group(tg, parent);
9018
9019 #ifdef CONFIG_UCLAMP_TASK_GROUP
9020 /* Propagate the effective uclamp value for the new group */
9021 guard(mutex)(&uclamp_mutex);
9022 guard(rcu)();
9023 cpu_util_update_eff(css);
9024 #endif
9025
9026 return 0;
9027 }
9028
cpu_cgroup_css_offline(struct cgroup_subsys_state * css)9029 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9030 {
9031 struct task_group *tg = css_tg(css);
9032
9033 scx_tg_offline(tg);
9034 }
9035
cpu_cgroup_css_released(struct cgroup_subsys_state * css)9036 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9037 {
9038 struct task_group *tg = css_tg(css);
9039
9040 sched_release_group(tg);
9041 }
9042
cpu_cgroup_css_free(struct cgroup_subsys_state * css)9043 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9044 {
9045 struct task_group *tg = css_tg(css);
9046
9047 /*
9048 * Relies on the RCU grace period between css_released() and this.
9049 */
9050 sched_unregister_group(tg);
9051 }
9052
cpu_cgroup_can_attach(struct cgroup_taskset * tset)9053 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9054 {
9055 #ifdef CONFIG_RT_GROUP_SCHED
9056 struct task_struct *task;
9057 struct cgroup_subsys_state *css;
9058
9059 cgroup_taskset_for_each(task, css, tset) {
9060 if (!sched_rt_can_attach(css_tg(css), task))
9061 return -EINVAL;
9062 }
9063 #endif
9064 return scx_cgroup_can_attach(tset);
9065 }
9066
cpu_cgroup_attach(struct cgroup_taskset * tset)9067 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9068 {
9069 struct task_struct *task;
9070 struct cgroup_subsys_state *css;
9071
9072 cgroup_taskset_for_each(task, css, tset)
9073 sched_move_task(task);
9074
9075 scx_cgroup_finish_attach();
9076 }
9077
cpu_cgroup_cancel_attach(struct cgroup_taskset * tset)9078 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9079 {
9080 scx_cgroup_cancel_attach(tset);
9081 }
9082
9083 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)9084 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9085 {
9086 struct cgroup_subsys_state *top_css = css;
9087 struct uclamp_se *uc_parent = NULL;
9088 struct uclamp_se *uc_se = NULL;
9089 unsigned int eff[UCLAMP_CNT];
9090 enum uclamp_id clamp_id;
9091 unsigned int clamps;
9092
9093 lockdep_assert_held(&uclamp_mutex);
9094 SCHED_WARN_ON(!rcu_read_lock_held());
9095
9096 css_for_each_descendant_pre(css, top_css) {
9097 uc_parent = css_tg(css)->parent
9098 ? css_tg(css)->parent->uclamp : NULL;
9099
9100 for_each_clamp_id(clamp_id) {
9101 /* Assume effective clamps matches requested clamps */
9102 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9103 /* Cap effective clamps with parent's effective clamps */
9104 if (uc_parent &&
9105 eff[clamp_id] > uc_parent[clamp_id].value) {
9106 eff[clamp_id] = uc_parent[clamp_id].value;
9107 }
9108 }
9109 /* Ensure protection is always capped by limit */
9110 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9111
9112 /* Propagate most restrictive effective clamps */
9113 clamps = 0x0;
9114 uc_se = css_tg(css)->uclamp;
9115 for_each_clamp_id(clamp_id) {
9116 if (eff[clamp_id] == uc_se[clamp_id].value)
9117 continue;
9118 uc_se[clamp_id].value = eff[clamp_id];
9119 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9120 clamps |= (0x1 << clamp_id);
9121 }
9122 if (!clamps) {
9123 css = css_rightmost_descendant(css);
9124 continue;
9125 }
9126
9127 /* Immediately update descendants RUNNABLE tasks */
9128 uclamp_update_active_tasks(css);
9129 }
9130 }
9131
9132 /*
9133 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9134 * C expression. Since there is no way to convert a macro argument (N) into a
9135 * character constant, use two levels of macros.
9136 */
9137 #define _POW10(exp) ((unsigned int)1e##exp)
9138 #define POW10(exp) _POW10(exp)
9139
9140 struct uclamp_request {
9141 #define UCLAMP_PERCENT_SHIFT 2
9142 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9143 s64 percent;
9144 u64 util;
9145 int ret;
9146 };
9147
9148 static inline struct uclamp_request
capacity_from_percent(char * buf)9149 capacity_from_percent(char *buf)
9150 {
9151 struct uclamp_request req = {
9152 .percent = UCLAMP_PERCENT_SCALE,
9153 .util = SCHED_CAPACITY_SCALE,
9154 .ret = 0,
9155 };
9156
9157 buf = strim(buf);
9158 if (strcmp(buf, "max")) {
9159 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9160 &req.percent);
9161 if (req.ret)
9162 return req;
9163 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9164 req.ret = -ERANGE;
9165 return req;
9166 }
9167
9168 req.util = req.percent << SCHED_CAPACITY_SHIFT;
9169 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9170 }
9171
9172 return req;
9173 }
9174
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)9175 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9176 size_t nbytes, loff_t off,
9177 enum uclamp_id clamp_id)
9178 {
9179 struct uclamp_request req;
9180 struct task_group *tg;
9181
9182 req = capacity_from_percent(buf);
9183 if (req.ret)
9184 return req.ret;
9185
9186 static_branch_enable(&sched_uclamp_used);
9187
9188 guard(mutex)(&uclamp_mutex);
9189 guard(rcu)();
9190
9191 tg = css_tg(of_css(of));
9192 if (tg->uclamp_req[clamp_id].value != req.util)
9193 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9194
9195 /*
9196 * Because of not recoverable conversion rounding we keep track of the
9197 * exact requested value
9198 */
9199 tg->uclamp_pct[clamp_id] = req.percent;
9200
9201 /* Update effective clamps to track the most restrictive value */
9202 cpu_util_update_eff(of_css(of));
9203
9204 return nbytes;
9205 }
9206
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9207 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9208 char *buf, size_t nbytes,
9209 loff_t off)
9210 {
9211 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9212 }
9213
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9214 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9215 char *buf, size_t nbytes,
9216 loff_t off)
9217 {
9218 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9219 }
9220
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)9221 static inline void cpu_uclamp_print(struct seq_file *sf,
9222 enum uclamp_id clamp_id)
9223 {
9224 struct task_group *tg;
9225 u64 util_clamp;
9226 u64 percent;
9227 u32 rem;
9228
9229 scoped_guard (rcu) {
9230 tg = css_tg(seq_css(sf));
9231 util_clamp = tg->uclamp_req[clamp_id].value;
9232 }
9233
9234 if (util_clamp == SCHED_CAPACITY_SCALE) {
9235 seq_puts(sf, "max\n");
9236 return;
9237 }
9238
9239 percent = tg->uclamp_pct[clamp_id];
9240 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9241 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9242 }
9243
cpu_uclamp_min_show(struct seq_file * sf,void * v)9244 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9245 {
9246 cpu_uclamp_print(sf, UCLAMP_MIN);
9247 return 0;
9248 }
9249
cpu_uclamp_max_show(struct seq_file * sf,void * v)9250 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9251 {
9252 cpu_uclamp_print(sf, UCLAMP_MAX);
9253 return 0;
9254 }
9255 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9256
9257 #ifdef CONFIG_GROUP_SCHED_WEIGHT
tg_weight(struct task_group * tg)9258 static unsigned long tg_weight(struct task_group *tg)
9259 {
9260 #ifdef CONFIG_FAIR_GROUP_SCHED
9261 return scale_load_down(tg->shares);
9262 #else
9263 return sched_weight_from_cgroup(tg->scx_weight);
9264 #endif
9265 }
9266
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)9267 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9268 struct cftype *cftype, u64 shareval)
9269 {
9270 int ret;
9271
9272 if (shareval > scale_load_down(ULONG_MAX))
9273 shareval = MAX_SHARES;
9274 ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9275 if (!ret)
9276 scx_group_set_weight(css_tg(css),
9277 sched_weight_to_cgroup(shareval));
9278 return ret;
9279 }
9280
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9281 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9282 struct cftype *cft)
9283 {
9284 return tg_weight(css_tg(css));
9285 }
9286 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9287
9288 #ifdef CONFIG_CFS_BANDWIDTH
9289 static DEFINE_MUTEX(cfs_constraints_mutex);
9290
9291 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
9292 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
9293 /* More than 203 days if BW_SHIFT equals 20. */
9294 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
9295
9296 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9297
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota,u64 burst)9298 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
9299 u64 burst)
9300 {
9301 int i, ret = 0, runtime_enabled, runtime_was_enabled;
9302 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9303
9304 if (tg == &root_task_group)
9305 return -EINVAL;
9306
9307 /*
9308 * Ensure we have at some amount of bandwidth every period. This is
9309 * to prevent reaching a state of large arrears when throttled via
9310 * entity_tick() resulting in prolonged exit starvation.
9311 */
9312 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9313 return -EINVAL;
9314
9315 /*
9316 * Likewise, bound things on the other side by preventing insane quota
9317 * periods. This also allows us to normalize in computing quota
9318 * feasibility.
9319 */
9320 if (period > max_cfs_quota_period)
9321 return -EINVAL;
9322
9323 /*
9324 * Bound quota to defend quota against overflow during bandwidth shift.
9325 */
9326 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9327 return -EINVAL;
9328
9329 if (quota != RUNTIME_INF && (burst > quota ||
9330 burst + quota > max_cfs_runtime))
9331 return -EINVAL;
9332
9333 /*
9334 * Prevent race between setting of cfs_rq->runtime_enabled and
9335 * unthrottle_offline_cfs_rqs().
9336 */
9337 guard(cpus_read_lock)();
9338 guard(mutex)(&cfs_constraints_mutex);
9339
9340 ret = __cfs_schedulable(tg, period, quota);
9341 if (ret)
9342 return ret;
9343
9344 runtime_enabled = quota != RUNTIME_INF;
9345 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9346 /*
9347 * If we need to toggle cfs_bandwidth_used, off->on must occur
9348 * before making related changes, and on->off must occur afterwards
9349 */
9350 if (runtime_enabled && !runtime_was_enabled)
9351 cfs_bandwidth_usage_inc();
9352
9353 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9354 cfs_b->period = ns_to_ktime(period);
9355 cfs_b->quota = quota;
9356 cfs_b->burst = burst;
9357
9358 __refill_cfs_bandwidth_runtime(cfs_b);
9359
9360 /*
9361 * Restart the period timer (if active) to handle new
9362 * period expiry:
9363 */
9364 if (runtime_enabled)
9365 start_cfs_bandwidth(cfs_b);
9366 }
9367
9368 for_each_online_cpu(i) {
9369 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9370 struct rq *rq = cfs_rq->rq;
9371
9372 guard(rq_lock_irq)(rq);
9373 cfs_rq->runtime_enabled = runtime_enabled;
9374 cfs_rq->runtime_remaining = 0;
9375
9376 if (cfs_rq->throttled)
9377 unthrottle_cfs_rq(cfs_rq);
9378 }
9379
9380 if (runtime_was_enabled && !runtime_enabled)
9381 cfs_bandwidth_usage_dec();
9382
9383 return 0;
9384 }
9385
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)9386 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9387 {
9388 u64 quota, period, burst;
9389
9390 period = ktime_to_ns(tg->cfs_bandwidth.period);
9391 burst = tg->cfs_bandwidth.burst;
9392 if (cfs_quota_us < 0)
9393 quota = RUNTIME_INF;
9394 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
9395 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9396 else
9397 return -EINVAL;
9398
9399 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9400 }
9401
tg_get_cfs_quota(struct task_group * tg)9402 static long tg_get_cfs_quota(struct task_group *tg)
9403 {
9404 u64 quota_us;
9405
9406 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9407 return -1;
9408
9409 quota_us = tg->cfs_bandwidth.quota;
9410 do_div(quota_us, NSEC_PER_USEC);
9411
9412 return quota_us;
9413 }
9414
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)9415 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9416 {
9417 u64 quota, period, burst;
9418
9419 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9420 return -EINVAL;
9421
9422 period = (u64)cfs_period_us * NSEC_PER_USEC;
9423 quota = tg->cfs_bandwidth.quota;
9424 burst = tg->cfs_bandwidth.burst;
9425
9426 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9427 }
9428
tg_get_cfs_period(struct task_group * tg)9429 static long tg_get_cfs_period(struct task_group *tg)
9430 {
9431 u64 cfs_period_us;
9432
9433 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9434 do_div(cfs_period_us, NSEC_PER_USEC);
9435
9436 return cfs_period_us;
9437 }
9438
tg_set_cfs_burst(struct task_group * tg,long cfs_burst_us)9439 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
9440 {
9441 u64 quota, period, burst;
9442
9443 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
9444 return -EINVAL;
9445
9446 burst = (u64)cfs_burst_us * NSEC_PER_USEC;
9447 period = ktime_to_ns(tg->cfs_bandwidth.period);
9448 quota = tg->cfs_bandwidth.quota;
9449
9450 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9451 }
9452
tg_get_cfs_burst(struct task_group * tg)9453 static long tg_get_cfs_burst(struct task_group *tg)
9454 {
9455 u64 burst_us;
9456
9457 burst_us = tg->cfs_bandwidth.burst;
9458 do_div(burst_us, NSEC_PER_USEC);
9459
9460 return burst_us;
9461 }
9462
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9463 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9464 struct cftype *cft)
9465 {
9466 return tg_get_cfs_quota(css_tg(css));
9467 }
9468
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)9469 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9470 struct cftype *cftype, s64 cfs_quota_us)
9471 {
9472 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
9473 }
9474
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9475 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9476 struct cftype *cft)
9477 {
9478 return tg_get_cfs_period(css_tg(css));
9479 }
9480
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)9481 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9482 struct cftype *cftype, u64 cfs_period_us)
9483 {
9484 return tg_set_cfs_period(css_tg(css), cfs_period_us);
9485 }
9486
cpu_cfs_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9487 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
9488 struct cftype *cft)
9489 {
9490 return tg_get_cfs_burst(css_tg(css));
9491 }
9492
cpu_cfs_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_burst_us)9493 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
9494 struct cftype *cftype, u64 cfs_burst_us)
9495 {
9496 return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
9497 }
9498
9499 struct cfs_schedulable_data {
9500 struct task_group *tg;
9501 u64 period, quota;
9502 };
9503
9504 /*
9505 * normalize group quota/period to be quota/max_period
9506 * note: units are usecs
9507 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)9508 static u64 normalize_cfs_quota(struct task_group *tg,
9509 struct cfs_schedulable_data *d)
9510 {
9511 u64 quota, period;
9512
9513 if (tg == d->tg) {
9514 period = d->period;
9515 quota = d->quota;
9516 } else {
9517 period = tg_get_cfs_period(tg);
9518 quota = tg_get_cfs_quota(tg);
9519 }
9520
9521 /* note: these should typically be equivalent */
9522 if (quota == RUNTIME_INF || quota == -1)
9523 return RUNTIME_INF;
9524
9525 return to_ratio(period, quota);
9526 }
9527
tg_cfs_schedulable_down(struct task_group * tg,void * data)9528 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9529 {
9530 struct cfs_schedulable_data *d = data;
9531 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9532 s64 quota = 0, parent_quota = -1;
9533
9534 if (!tg->parent) {
9535 quota = RUNTIME_INF;
9536 } else {
9537 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9538
9539 quota = normalize_cfs_quota(tg, d);
9540 parent_quota = parent_b->hierarchical_quota;
9541
9542 /*
9543 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9544 * always take the non-RUNTIME_INF min. On cgroup1, only
9545 * inherit when no limit is set. In both cases this is used
9546 * by the scheduler to determine if a given CFS task has a
9547 * bandwidth constraint at some higher level.
9548 */
9549 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9550 if (quota == RUNTIME_INF)
9551 quota = parent_quota;
9552 else if (parent_quota != RUNTIME_INF)
9553 quota = min(quota, parent_quota);
9554 } else {
9555 if (quota == RUNTIME_INF)
9556 quota = parent_quota;
9557 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9558 return -EINVAL;
9559 }
9560 }
9561 cfs_b->hierarchical_quota = quota;
9562
9563 return 0;
9564 }
9565
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)9566 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9567 {
9568 struct cfs_schedulable_data data = {
9569 .tg = tg,
9570 .period = period,
9571 .quota = quota,
9572 };
9573
9574 if (quota != RUNTIME_INF) {
9575 do_div(data.period, NSEC_PER_USEC);
9576 do_div(data.quota, NSEC_PER_USEC);
9577 }
9578
9579 guard(rcu)();
9580 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9581 }
9582
cpu_cfs_stat_show(struct seq_file * sf,void * v)9583 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9584 {
9585 struct task_group *tg = css_tg(seq_css(sf));
9586 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9587
9588 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9589 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9590 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9591
9592 if (schedstat_enabled() && tg != &root_task_group) {
9593 struct sched_statistics *stats;
9594 u64 ws = 0;
9595 int i;
9596
9597 for_each_possible_cpu(i) {
9598 stats = __schedstats_from_se(tg->se[i]);
9599 ws += schedstat_val(stats->wait_sum);
9600 }
9601
9602 seq_printf(sf, "wait_sum %llu\n", ws);
9603 }
9604
9605 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9606 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9607
9608 return 0;
9609 }
9610
throttled_time_self(struct task_group * tg)9611 static u64 throttled_time_self(struct task_group *tg)
9612 {
9613 int i;
9614 u64 total = 0;
9615
9616 for_each_possible_cpu(i) {
9617 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9618 }
9619
9620 return total;
9621 }
9622
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)9623 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9624 {
9625 struct task_group *tg = css_tg(seq_css(sf));
9626
9627 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9628
9629 return 0;
9630 }
9631 #endif /* CONFIG_CFS_BANDWIDTH */
9632
9633 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)9634 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9635 struct cftype *cft, s64 val)
9636 {
9637 return sched_group_set_rt_runtime(css_tg(css), val);
9638 }
9639
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)9640 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9641 struct cftype *cft)
9642 {
9643 return sched_group_rt_runtime(css_tg(css));
9644 }
9645
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)9646 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9647 struct cftype *cftype, u64 rt_period_us)
9648 {
9649 return sched_group_set_rt_period(css_tg(css), rt_period_us);
9650 }
9651
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)9652 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9653 struct cftype *cft)
9654 {
9655 return sched_group_rt_period(css_tg(css));
9656 }
9657 #endif /* CONFIG_RT_GROUP_SCHED */
9658
9659 #ifdef CONFIG_GROUP_SCHED_WEIGHT
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9660 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9661 struct cftype *cft)
9662 {
9663 return css_tg(css)->idle;
9664 }
9665
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)9666 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9667 struct cftype *cft, s64 idle)
9668 {
9669 int ret;
9670
9671 ret = sched_group_set_idle(css_tg(css), idle);
9672 if (!ret)
9673 scx_group_set_idle(css_tg(css), idle);
9674 return ret;
9675 }
9676 #endif
9677
9678 static struct cftype cpu_legacy_files[] = {
9679 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9680 {
9681 .name = "shares",
9682 .read_u64 = cpu_shares_read_u64,
9683 .write_u64 = cpu_shares_write_u64,
9684 },
9685 {
9686 .name = "idle",
9687 .read_s64 = cpu_idle_read_s64,
9688 .write_s64 = cpu_idle_write_s64,
9689 },
9690 #endif
9691 #ifdef CONFIG_CFS_BANDWIDTH
9692 {
9693 .name = "cfs_quota_us",
9694 .read_s64 = cpu_cfs_quota_read_s64,
9695 .write_s64 = cpu_cfs_quota_write_s64,
9696 },
9697 {
9698 .name = "cfs_period_us",
9699 .read_u64 = cpu_cfs_period_read_u64,
9700 .write_u64 = cpu_cfs_period_write_u64,
9701 },
9702 {
9703 .name = "cfs_burst_us",
9704 .read_u64 = cpu_cfs_burst_read_u64,
9705 .write_u64 = cpu_cfs_burst_write_u64,
9706 },
9707 {
9708 .name = "stat",
9709 .seq_show = cpu_cfs_stat_show,
9710 },
9711 {
9712 .name = "stat.local",
9713 .seq_show = cpu_cfs_local_stat_show,
9714 },
9715 #endif
9716 #ifdef CONFIG_RT_GROUP_SCHED
9717 {
9718 .name = "rt_runtime_us",
9719 .read_s64 = cpu_rt_runtime_read,
9720 .write_s64 = cpu_rt_runtime_write,
9721 },
9722 {
9723 .name = "rt_period_us",
9724 .read_u64 = cpu_rt_period_read_uint,
9725 .write_u64 = cpu_rt_period_write_uint,
9726 },
9727 #endif
9728 #ifdef CONFIG_UCLAMP_TASK_GROUP
9729 {
9730 .name = "uclamp.min",
9731 .flags = CFTYPE_NOT_ON_ROOT,
9732 .seq_show = cpu_uclamp_min_show,
9733 .write = cpu_uclamp_min_write,
9734 },
9735 {
9736 .name = "uclamp.max",
9737 .flags = CFTYPE_NOT_ON_ROOT,
9738 .seq_show = cpu_uclamp_max_show,
9739 .write = cpu_uclamp_max_write,
9740 },
9741 #endif
9742 { } /* Terminate */
9743 };
9744
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9745 static int cpu_extra_stat_show(struct seq_file *sf,
9746 struct cgroup_subsys_state *css)
9747 {
9748 #ifdef CONFIG_CFS_BANDWIDTH
9749 {
9750 struct task_group *tg = css_tg(css);
9751 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9752 u64 throttled_usec, burst_usec;
9753
9754 throttled_usec = cfs_b->throttled_time;
9755 do_div(throttled_usec, NSEC_PER_USEC);
9756 burst_usec = cfs_b->burst_time;
9757 do_div(burst_usec, NSEC_PER_USEC);
9758
9759 seq_printf(sf, "nr_periods %d\n"
9760 "nr_throttled %d\n"
9761 "throttled_usec %llu\n"
9762 "nr_bursts %d\n"
9763 "burst_usec %llu\n",
9764 cfs_b->nr_periods, cfs_b->nr_throttled,
9765 throttled_usec, cfs_b->nr_burst, burst_usec);
9766 }
9767 #endif
9768 return 0;
9769 }
9770
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9771 static int cpu_local_stat_show(struct seq_file *sf,
9772 struct cgroup_subsys_state *css)
9773 {
9774 #ifdef CONFIG_CFS_BANDWIDTH
9775 {
9776 struct task_group *tg = css_tg(css);
9777 u64 throttled_self_usec;
9778
9779 throttled_self_usec = throttled_time_self(tg);
9780 do_div(throttled_self_usec, NSEC_PER_USEC);
9781
9782 seq_printf(sf, "throttled_usec %llu\n",
9783 throttled_self_usec);
9784 }
9785 #endif
9786 return 0;
9787 }
9788
9789 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9790
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9791 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9792 struct cftype *cft)
9793 {
9794 return sched_weight_to_cgroup(tg_weight(css_tg(css)));
9795 }
9796
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 cgrp_weight)9797 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9798 struct cftype *cft, u64 cgrp_weight)
9799 {
9800 unsigned long weight;
9801 int ret;
9802
9803 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
9804 return -ERANGE;
9805
9806 weight = sched_weight_from_cgroup(cgrp_weight);
9807
9808 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9809 if (!ret)
9810 scx_group_set_weight(css_tg(css), cgrp_weight);
9811 return ret;
9812 }
9813
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9814 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9815 struct cftype *cft)
9816 {
9817 unsigned long weight = tg_weight(css_tg(css));
9818 int last_delta = INT_MAX;
9819 int prio, delta;
9820
9821 /* find the closest nice value to the current weight */
9822 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9823 delta = abs(sched_prio_to_weight[prio] - weight);
9824 if (delta >= last_delta)
9825 break;
9826 last_delta = delta;
9827 }
9828
9829 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9830 }
9831
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)9832 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9833 struct cftype *cft, s64 nice)
9834 {
9835 unsigned long weight;
9836 int idx, ret;
9837
9838 if (nice < MIN_NICE || nice > MAX_NICE)
9839 return -ERANGE;
9840
9841 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9842 idx = array_index_nospec(idx, 40);
9843 weight = sched_prio_to_weight[idx];
9844
9845 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9846 if (!ret)
9847 scx_group_set_weight(css_tg(css),
9848 sched_weight_to_cgroup(weight));
9849 return ret;
9850 }
9851 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9852
cpu_period_quota_print(struct seq_file * sf,long period,long quota)9853 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9854 long period, long quota)
9855 {
9856 if (quota < 0)
9857 seq_puts(sf, "max");
9858 else
9859 seq_printf(sf, "%ld", quota);
9860
9861 seq_printf(sf, " %ld\n", period);
9862 }
9863
9864 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * periodp,u64 * quotap)9865 static int __maybe_unused cpu_period_quota_parse(char *buf,
9866 u64 *periodp, u64 *quotap)
9867 {
9868 char tok[21]; /* U64_MAX */
9869
9870 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
9871 return -EINVAL;
9872
9873 *periodp *= NSEC_PER_USEC;
9874
9875 if (sscanf(tok, "%llu", quotap))
9876 *quotap *= NSEC_PER_USEC;
9877 else if (!strcmp(tok, "max"))
9878 *quotap = RUNTIME_INF;
9879 else
9880 return -EINVAL;
9881
9882 return 0;
9883 }
9884
9885 #ifdef CONFIG_CFS_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)9886 static int cpu_max_show(struct seq_file *sf, void *v)
9887 {
9888 struct task_group *tg = css_tg(seq_css(sf));
9889
9890 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
9891 return 0;
9892 }
9893
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9894 static ssize_t cpu_max_write(struct kernfs_open_file *of,
9895 char *buf, size_t nbytes, loff_t off)
9896 {
9897 struct task_group *tg = css_tg(of_css(of));
9898 u64 period = tg_get_cfs_period(tg);
9899 u64 burst = tg->cfs_bandwidth.burst;
9900 u64 quota;
9901 int ret;
9902
9903 ret = cpu_period_quota_parse(buf, &period, "a);
9904 if (!ret)
9905 ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
9906 return ret ?: nbytes;
9907 }
9908 #endif
9909
9910 static struct cftype cpu_files[] = {
9911 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9912 {
9913 .name = "weight",
9914 .flags = CFTYPE_NOT_ON_ROOT,
9915 .read_u64 = cpu_weight_read_u64,
9916 .write_u64 = cpu_weight_write_u64,
9917 },
9918 {
9919 .name = "weight.nice",
9920 .flags = CFTYPE_NOT_ON_ROOT,
9921 .read_s64 = cpu_weight_nice_read_s64,
9922 .write_s64 = cpu_weight_nice_write_s64,
9923 },
9924 {
9925 .name = "idle",
9926 .flags = CFTYPE_NOT_ON_ROOT,
9927 .read_s64 = cpu_idle_read_s64,
9928 .write_s64 = cpu_idle_write_s64,
9929 },
9930 #endif
9931 #ifdef CONFIG_CFS_BANDWIDTH
9932 {
9933 .name = "max",
9934 .flags = CFTYPE_NOT_ON_ROOT,
9935 .seq_show = cpu_max_show,
9936 .write = cpu_max_write,
9937 },
9938 {
9939 .name = "max.burst",
9940 .flags = CFTYPE_NOT_ON_ROOT,
9941 .read_u64 = cpu_cfs_burst_read_u64,
9942 .write_u64 = cpu_cfs_burst_write_u64,
9943 },
9944 #endif
9945 #ifdef CONFIG_UCLAMP_TASK_GROUP
9946 {
9947 .name = "uclamp.min",
9948 .flags = CFTYPE_NOT_ON_ROOT,
9949 .seq_show = cpu_uclamp_min_show,
9950 .write = cpu_uclamp_min_write,
9951 },
9952 {
9953 .name = "uclamp.max",
9954 .flags = CFTYPE_NOT_ON_ROOT,
9955 .seq_show = cpu_uclamp_max_show,
9956 .write = cpu_uclamp_max_write,
9957 },
9958 #endif
9959 { } /* terminate */
9960 };
9961
9962 struct cgroup_subsys cpu_cgrp_subsys = {
9963 .css_alloc = cpu_cgroup_css_alloc,
9964 .css_online = cpu_cgroup_css_online,
9965 .css_offline = cpu_cgroup_css_offline,
9966 .css_released = cpu_cgroup_css_released,
9967 .css_free = cpu_cgroup_css_free,
9968 .css_extra_stat_show = cpu_extra_stat_show,
9969 .css_local_stat_show = cpu_local_stat_show,
9970 .can_attach = cpu_cgroup_can_attach,
9971 .attach = cpu_cgroup_attach,
9972 .cancel_attach = cpu_cgroup_cancel_attach,
9973 .legacy_cftypes = cpu_legacy_files,
9974 .dfl_cftypes = cpu_files,
9975 .early_init = true,
9976 .threaded = true,
9977 };
9978
9979 #endif /* CONFIG_CGROUP_SCHED */
9980
dump_cpu_task(int cpu)9981 void dump_cpu_task(int cpu)
9982 {
9983 if (in_hardirq() && cpu == smp_processor_id()) {
9984 struct pt_regs *regs;
9985
9986 regs = get_irq_regs();
9987 if (regs) {
9988 show_regs(regs);
9989 return;
9990 }
9991 }
9992
9993 if (trigger_single_cpu_backtrace(cpu))
9994 return;
9995
9996 pr_info("Task dump for CPU %d:\n", cpu);
9997 sched_show_task(cpu_curr(cpu));
9998 }
9999
10000 /*
10001 * Nice levels are multiplicative, with a gentle 10% change for every
10002 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10003 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10004 * that remained on nice 0.
10005 *
10006 * The "10% effect" is relative and cumulative: from _any_ nice level,
10007 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10008 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10009 * If a task goes up by ~10% and another task goes down by ~10% then
10010 * the relative distance between them is ~25%.)
10011 */
10012 const int sched_prio_to_weight[40] = {
10013 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10014 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10015 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10016 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10017 /* 0 */ 1024, 820, 655, 526, 423,
10018 /* 5 */ 335, 272, 215, 172, 137,
10019 /* 10 */ 110, 87, 70, 56, 45,
10020 /* 15 */ 36, 29, 23, 18, 15,
10021 };
10022
10023 /*
10024 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10025 *
10026 * In cases where the weight does not change often, we can use the
10027 * pre-calculated inverse to speed up arithmetics by turning divisions
10028 * into multiplications:
10029 */
10030 const u32 sched_prio_to_wmult[40] = {
10031 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10032 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10033 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10034 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10035 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10036 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10037 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10038 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10039 };
10040
call_trace_sched_update_nr_running(struct rq * rq,int count)10041 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10042 {
10043 trace_sched_update_nr_running_tp(rq, count);
10044 }
10045
10046 #ifdef CONFIG_SCHED_MM_CID
10047
10048 /*
10049 * @cid_lock: Guarantee forward-progress of cid allocation.
10050 *
10051 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10052 * is only used when contention is detected by the lock-free allocation so
10053 * forward progress can be guaranteed.
10054 */
10055 DEFINE_RAW_SPINLOCK(cid_lock);
10056
10057 /*
10058 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10059 *
10060 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10061 * detected, it is set to 1 to ensure that all newly coming allocations are
10062 * serialized by @cid_lock until the allocation which detected contention
10063 * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10064 * of a cid allocation.
10065 */
10066 int use_cid_lock;
10067
10068 /*
10069 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10070 * concurrently with respect to the execution of the source runqueue context
10071 * switch.
10072 *
10073 * There is one basic properties we want to guarantee here:
10074 *
10075 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10076 * used by a task. That would lead to concurrent allocation of the cid and
10077 * userspace corruption.
10078 *
10079 * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10080 * that a pair of loads observe at least one of a pair of stores, which can be
10081 * shown as:
10082 *
10083 * X = Y = 0
10084 *
10085 * w[X]=1 w[Y]=1
10086 * MB MB
10087 * r[Y]=y r[X]=x
10088 *
10089 * Which guarantees that x==0 && y==0 is impossible. But rather than using
10090 * values 0 and 1, this algorithm cares about specific state transitions of the
10091 * runqueue current task (as updated by the scheduler context switch), and the
10092 * per-mm/cpu cid value.
10093 *
10094 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10095 * task->mm != mm for the rest of the discussion. There are two scheduler state
10096 * transitions on context switch we care about:
10097 *
10098 * (TSA) Store to rq->curr with transition from (N) to (Y)
10099 *
10100 * (TSB) Store to rq->curr with transition from (Y) to (N)
10101 *
10102 * On the remote-clear side, there is one transition we care about:
10103 *
10104 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10105 *
10106 * There is also a transition to UNSET state which can be performed from all
10107 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10108 * guarantees that only a single thread will succeed:
10109 *
10110 * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10111 *
10112 * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10113 * when a thread is actively using the cid (property (1)).
10114 *
10115 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10116 *
10117 * Scenario A) (TSA)+(TMA) (from next task perspective)
10118 *
10119 * CPU0 CPU1
10120 *
10121 * Context switch CS-1 Remote-clear
10122 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10123 * (implied barrier after cmpxchg)
10124 * - switch_mm_cid()
10125 * - memory barrier (see switch_mm_cid()
10126 * comment explaining how this barrier
10127 * is combined with other scheduler
10128 * barriers)
10129 * - mm_cid_get (next)
10130 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10131 *
10132 * This Dekker ensures that either task (Y) is observed by the
10133 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10134 * observed.
10135 *
10136 * If task (Y) store is observed by rcu_dereference(), it means that there is
10137 * still an active task on the cpu. Remote-clear will therefore not transition
10138 * to UNSET, which fulfills property (1).
10139 *
10140 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10141 * it will move its state to UNSET, which clears the percpu cid perhaps
10142 * uselessly (which is not an issue for correctness). Because task (Y) is not
10143 * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10144 * state to UNSET is done with a cmpxchg expecting that the old state has the
10145 * LAZY flag set, only one thread will successfully UNSET.
10146 *
10147 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10148 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10149 * CPU1 will observe task (Y) and do nothing more, which is fine.
10150 *
10151 * What we are effectively preventing with this Dekker is a scenario where
10152 * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10153 * because this would UNSET a cid which is actively used.
10154 */
10155
sched_mm_cid_migrate_from(struct task_struct * t)10156 void sched_mm_cid_migrate_from(struct task_struct *t)
10157 {
10158 t->migrate_from_cpu = task_cpu(t);
10159 }
10160
10161 static
__sched_mm_cid_migrate_from_fetch_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid)10162 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
10163 struct task_struct *t,
10164 struct mm_cid *src_pcpu_cid)
10165 {
10166 struct mm_struct *mm = t->mm;
10167 struct task_struct *src_task;
10168 int src_cid, last_mm_cid;
10169
10170 if (!mm)
10171 return -1;
10172
10173 last_mm_cid = t->last_mm_cid;
10174 /*
10175 * If the migrated task has no last cid, or if the current
10176 * task on src rq uses the cid, it means the source cid does not need
10177 * to be moved to the destination cpu.
10178 */
10179 if (last_mm_cid == -1)
10180 return -1;
10181 src_cid = READ_ONCE(src_pcpu_cid->cid);
10182 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
10183 return -1;
10184
10185 /*
10186 * If we observe an active task using the mm on this rq, it means we
10187 * are not the last task to be migrated from this cpu for this mm, so
10188 * there is no need to move src_cid to the destination cpu.
10189 */
10190 guard(rcu)();
10191 src_task = rcu_dereference(src_rq->curr);
10192 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10193 t->last_mm_cid = -1;
10194 return -1;
10195 }
10196
10197 return src_cid;
10198 }
10199
10200 static
__sched_mm_cid_migrate_from_try_steal_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid,int src_cid)10201 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
10202 struct task_struct *t,
10203 struct mm_cid *src_pcpu_cid,
10204 int src_cid)
10205 {
10206 struct task_struct *src_task;
10207 struct mm_struct *mm = t->mm;
10208 int lazy_cid;
10209
10210 if (src_cid == -1)
10211 return -1;
10212
10213 /*
10214 * Attempt to clear the source cpu cid to move it to the destination
10215 * cpu.
10216 */
10217 lazy_cid = mm_cid_set_lazy_put(src_cid);
10218 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10219 return -1;
10220
10221 /*
10222 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10223 * rq->curr->mm matches the scheduler barrier in context_switch()
10224 * between store to rq->curr and load of prev and next task's
10225 * per-mm/cpu cid.
10226 *
10227 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10228 * rq->curr->mm_cid_active matches the barrier in
10229 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10230 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10231 * load of per-mm/cpu cid.
10232 */
10233
10234 /*
10235 * If we observe an active task using the mm on this rq after setting
10236 * the lazy-put flag, this task will be responsible for transitioning
10237 * from lazy-put flag set to MM_CID_UNSET.
10238 */
10239 scoped_guard (rcu) {
10240 src_task = rcu_dereference(src_rq->curr);
10241 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10242 /*
10243 * We observed an active task for this mm, there is therefore
10244 * no point in moving this cid to the destination cpu.
10245 */
10246 t->last_mm_cid = -1;
10247 return -1;
10248 }
10249 }
10250
10251 /*
10252 * The src_cid is unused, so it can be unset.
10253 */
10254 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10255 return -1;
10256 return src_cid;
10257 }
10258
10259 /*
10260 * Migration to dst cpu. Called with dst_rq lock held.
10261 * Interrupts are disabled, which keeps the window of cid ownership without the
10262 * source rq lock held small.
10263 */
sched_mm_cid_migrate_to(struct rq * dst_rq,struct task_struct * t)10264 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10265 {
10266 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10267 struct mm_struct *mm = t->mm;
10268 int src_cid, dst_cid, src_cpu;
10269 struct rq *src_rq;
10270
10271 lockdep_assert_rq_held(dst_rq);
10272
10273 if (!mm)
10274 return;
10275 src_cpu = t->migrate_from_cpu;
10276 if (src_cpu == -1) {
10277 t->last_mm_cid = -1;
10278 return;
10279 }
10280 /*
10281 * Move the src cid if the dst cid is unset. This keeps id
10282 * allocation closest to 0 in cases where few threads migrate around
10283 * many CPUs.
10284 *
10285 * If destination cid is already set, we may have to just clear
10286 * the src cid to ensure compactness in frequent migrations
10287 * scenarios.
10288 *
10289 * It is not useful to clear the src cid when the number of threads is
10290 * greater or equal to the number of allowed CPUs, because user-space
10291 * can expect that the number of allowed cids can reach the number of
10292 * allowed CPUs.
10293 */
10294 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
10295 dst_cid = READ_ONCE(dst_pcpu_cid->cid);
10296 if (!mm_cid_is_unset(dst_cid) &&
10297 atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
10298 return;
10299 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10300 src_rq = cpu_rq(src_cpu);
10301 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10302 if (src_cid == -1)
10303 return;
10304 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10305 src_cid);
10306 if (src_cid == -1)
10307 return;
10308 if (!mm_cid_is_unset(dst_cid)) {
10309 __mm_cid_put(mm, src_cid);
10310 return;
10311 }
10312 /* Move src_cid to dst cpu. */
10313 mm_cid_snapshot_time(dst_rq, mm);
10314 WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
10315 }
10316
sched_mm_cid_remote_clear(struct mm_struct * mm,struct mm_cid * pcpu_cid,int cpu)10317 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10318 int cpu)
10319 {
10320 struct rq *rq = cpu_rq(cpu);
10321 struct task_struct *t;
10322 int cid, lazy_cid;
10323
10324 cid = READ_ONCE(pcpu_cid->cid);
10325 if (!mm_cid_is_valid(cid))
10326 return;
10327
10328 /*
10329 * Clear the cpu cid if it is set to keep cid allocation compact. If
10330 * there happens to be other tasks left on the source cpu using this
10331 * mm, the next task using this mm will reallocate its cid on context
10332 * switch.
10333 */
10334 lazy_cid = mm_cid_set_lazy_put(cid);
10335 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10336 return;
10337
10338 /*
10339 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10340 * rq->curr->mm matches the scheduler barrier in context_switch()
10341 * between store to rq->curr and load of prev and next task's
10342 * per-mm/cpu cid.
10343 *
10344 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10345 * rq->curr->mm_cid_active matches the barrier in
10346 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10347 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10348 * load of per-mm/cpu cid.
10349 */
10350
10351 /*
10352 * If we observe an active task using the mm on this rq after setting
10353 * the lazy-put flag, that task will be responsible for transitioning
10354 * from lazy-put flag set to MM_CID_UNSET.
10355 */
10356 scoped_guard (rcu) {
10357 t = rcu_dereference(rq->curr);
10358 if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10359 return;
10360 }
10361
10362 /*
10363 * The cid is unused, so it can be unset.
10364 * Disable interrupts to keep the window of cid ownership without rq
10365 * lock small.
10366 */
10367 scoped_guard (irqsave) {
10368 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10369 __mm_cid_put(mm, cid);
10370 }
10371 }
10372
sched_mm_cid_remote_clear_old(struct mm_struct * mm,int cpu)10373 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10374 {
10375 struct rq *rq = cpu_rq(cpu);
10376 struct mm_cid *pcpu_cid;
10377 struct task_struct *curr;
10378 u64 rq_clock;
10379
10380 /*
10381 * rq->clock load is racy on 32-bit but one spurious clear once in a
10382 * while is irrelevant.
10383 */
10384 rq_clock = READ_ONCE(rq->clock);
10385 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10386
10387 /*
10388 * In order to take care of infrequently scheduled tasks, bump the time
10389 * snapshot associated with this cid if an active task using the mm is
10390 * observed on this rq.
10391 */
10392 scoped_guard (rcu) {
10393 curr = rcu_dereference(rq->curr);
10394 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10395 WRITE_ONCE(pcpu_cid->time, rq_clock);
10396 return;
10397 }
10398 }
10399
10400 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10401 return;
10402 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10403 }
10404
sched_mm_cid_remote_clear_weight(struct mm_struct * mm,int cpu,int weight)10405 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10406 int weight)
10407 {
10408 struct mm_cid *pcpu_cid;
10409 int cid;
10410
10411 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10412 cid = READ_ONCE(pcpu_cid->cid);
10413 if (!mm_cid_is_valid(cid) || cid < weight)
10414 return;
10415 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10416 }
10417
task_mm_cid_work(struct callback_head * work)10418 static void task_mm_cid_work(struct callback_head *work)
10419 {
10420 unsigned long now = jiffies, old_scan, next_scan;
10421 struct task_struct *t = current;
10422 struct cpumask *cidmask;
10423 struct mm_struct *mm;
10424 int weight, cpu;
10425
10426 SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
10427
10428 work->next = work; /* Prevent double-add */
10429 if (t->flags & PF_EXITING)
10430 return;
10431 mm = t->mm;
10432 if (!mm)
10433 return;
10434 old_scan = READ_ONCE(mm->mm_cid_next_scan);
10435 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10436 if (!old_scan) {
10437 unsigned long res;
10438
10439 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10440 if (res != old_scan)
10441 old_scan = res;
10442 else
10443 old_scan = next_scan;
10444 }
10445 if (time_before(now, old_scan))
10446 return;
10447 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10448 return;
10449 cidmask = mm_cidmask(mm);
10450 /* Clear cids that were not recently used. */
10451 for_each_possible_cpu(cpu)
10452 sched_mm_cid_remote_clear_old(mm, cpu);
10453 weight = cpumask_weight(cidmask);
10454 /*
10455 * Clear cids that are greater or equal to the cidmask weight to
10456 * recompact it.
10457 */
10458 for_each_possible_cpu(cpu)
10459 sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10460 }
10461
init_sched_mm_cid(struct task_struct * t)10462 void init_sched_mm_cid(struct task_struct *t)
10463 {
10464 struct mm_struct *mm = t->mm;
10465 int mm_users = 0;
10466
10467 if (mm) {
10468 mm_users = atomic_read(&mm->mm_users);
10469 if (mm_users == 1)
10470 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10471 }
10472 t->cid_work.next = &t->cid_work; /* Protect against double add */
10473 init_task_work(&t->cid_work, task_mm_cid_work);
10474 }
10475
task_tick_mm_cid(struct rq * rq,struct task_struct * curr)10476 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10477 {
10478 struct callback_head *work = &curr->cid_work;
10479 unsigned long now = jiffies;
10480
10481 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10482 work->next != work)
10483 return;
10484 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10485 return;
10486
10487 /* No page allocation under rq lock */
10488 task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
10489 }
10490
sched_mm_cid_exit_signals(struct task_struct * t)10491 void sched_mm_cid_exit_signals(struct task_struct *t)
10492 {
10493 struct mm_struct *mm = t->mm;
10494 struct rq *rq;
10495
10496 if (!mm)
10497 return;
10498
10499 preempt_disable();
10500 rq = this_rq();
10501 guard(rq_lock_irqsave)(rq);
10502 preempt_enable_no_resched(); /* holding spinlock */
10503 WRITE_ONCE(t->mm_cid_active, 0);
10504 /*
10505 * Store t->mm_cid_active before loading per-mm/cpu cid.
10506 * Matches barrier in sched_mm_cid_remote_clear_old().
10507 */
10508 smp_mb();
10509 mm_cid_put(mm);
10510 t->last_mm_cid = t->mm_cid = -1;
10511 }
10512
sched_mm_cid_before_execve(struct task_struct * t)10513 void sched_mm_cid_before_execve(struct task_struct *t)
10514 {
10515 struct mm_struct *mm = t->mm;
10516 struct rq *rq;
10517
10518 if (!mm)
10519 return;
10520
10521 preempt_disable();
10522 rq = this_rq();
10523 guard(rq_lock_irqsave)(rq);
10524 preempt_enable_no_resched(); /* holding spinlock */
10525 WRITE_ONCE(t->mm_cid_active, 0);
10526 /*
10527 * Store t->mm_cid_active before loading per-mm/cpu cid.
10528 * Matches barrier in sched_mm_cid_remote_clear_old().
10529 */
10530 smp_mb();
10531 mm_cid_put(mm);
10532 t->last_mm_cid = t->mm_cid = -1;
10533 }
10534
sched_mm_cid_after_execve(struct task_struct * t)10535 void sched_mm_cid_after_execve(struct task_struct *t)
10536 {
10537 struct mm_struct *mm = t->mm;
10538 struct rq *rq;
10539
10540 if (!mm)
10541 return;
10542
10543 preempt_disable();
10544 rq = this_rq();
10545 scoped_guard (rq_lock_irqsave, rq) {
10546 preempt_enable_no_resched(); /* holding spinlock */
10547 WRITE_ONCE(t->mm_cid_active, 1);
10548 /*
10549 * Store t->mm_cid_active before loading per-mm/cpu cid.
10550 * Matches barrier in sched_mm_cid_remote_clear_old().
10551 */
10552 smp_mb();
10553 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
10554 }
10555 rseq_set_notify_resume(t);
10556 }
10557
sched_mm_cid_fork(struct task_struct * t)10558 void sched_mm_cid_fork(struct task_struct *t)
10559 {
10560 WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
10561 t->mm_cid_active = 1;
10562 }
10563 #endif
10564
10565 #ifdef CONFIG_SCHED_CLASS_EXT
sched_deq_and_put_task(struct task_struct * p,int queue_flags,struct sched_enq_and_set_ctx * ctx)10566 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
10567 struct sched_enq_and_set_ctx *ctx)
10568 {
10569 struct rq *rq = task_rq(p);
10570
10571 lockdep_assert_rq_held(rq);
10572
10573 *ctx = (struct sched_enq_and_set_ctx){
10574 .p = p,
10575 .queue_flags = queue_flags,
10576 .queued = task_on_rq_queued(p),
10577 .running = task_current(rq, p),
10578 };
10579
10580 update_rq_clock(rq);
10581 if (ctx->queued)
10582 dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
10583 if (ctx->running)
10584 put_prev_task(rq, p);
10585 }
10586
sched_enq_and_set_task(struct sched_enq_and_set_ctx * ctx)10587 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
10588 {
10589 struct rq *rq = task_rq(ctx->p);
10590
10591 lockdep_assert_rq_held(rq);
10592
10593 if (ctx->queued)
10594 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
10595 if (ctx->running)
10596 set_next_task(rq, ctx->p);
10597 }
10598 #endif /* CONFIG_SCHED_CLASS_EXT */
10599