xref: /linux/kernel/sched/cpupri.c (revision f86fd32d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/cpupri.c
4  *
5  *  CPU priority management
6  *
7  *  Copyright (C) 2007-2008 Novell
8  *
9  *  Author: Gregory Haskins <ghaskins@novell.com>
10  *
11  *  This code tracks the priority of each CPU so that global migration
12  *  decisions are easy to calculate.  Each CPU can be in a state as follows:
13  *
14  *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
15  *
16  *  going from the lowest priority to the highest.  CPUs in the INVALID state
17  *  are not eligible for routing.  The system maintains this state with
18  *  a 2 dimensional bitmap (the first for priority class, the second for CPUs
19  *  in that class).  Therefore a typical application without affinity
20  *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
21  *  searches).  For tasks with affinity restrictions, the algorithm has a
22  *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
23  *  yields the worst case search is fairly contrived.
24  */
25 #include "sched.h"
26 
27 /* Convert between a 140 based task->prio, and our 102 based cpupri */
28 static int convert_prio(int prio)
29 {
30 	int cpupri;
31 
32 	if (prio == CPUPRI_INVALID)
33 		cpupri = CPUPRI_INVALID;
34 	else if (prio == MAX_PRIO)
35 		cpupri = CPUPRI_IDLE;
36 	else if (prio >= MAX_RT_PRIO)
37 		cpupri = CPUPRI_NORMAL;
38 	else
39 		cpupri = MAX_RT_PRIO - prio + 1;
40 
41 	return cpupri;
42 }
43 
44 /**
45  * cpupri_find - find the best (lowest-pri) CPU in the system
46  * @cp: The cpupri context
47  * @p: The task
48  * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
49  * @fitness_fn: A pointer to a function to do custom checks whether the CPU
50  *              fits a specific criteria so that we only return those CPUs.
51  *
52  * Note: This function returns the recommended CPUs as calculated during the
53  * current invocation.  By the time the call returns, the CPUs may have in
54  * fact changed priorities any number of times.  While not ideal, it is not
55  * an issue of correctness since the normal rebalancer logic will correct
56  * any discrepancies created by racing against the uncertainty of the current
57  * priority configuration.
58  *
59  * Return: (int)bool - CPUs were found
60  */
61 int cpupri_find(struct cpupri *cp, struct task_struct *p,
62 		struct cpumask *lowest_mask,
63 		bool (*fitness_fn)(struct task_struct *p, int cpu))
64 {
65 	int idx = 0;
66 	int task_pri = convert_prio(p->prio);
67 
68 	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
69 
70 	for (idx = 0; idx < task_pri; idx++) {
71 		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
72 		int skip = 0;
73 
74 		if (!atomic_read(&(vec)->count))
75 			skip = 1;
76 		/*
77 		 * When looking at the vector, we need to read the counter,
78 		 * do a memory barrier, then read the mask.
79 		 *
80 		 * Note: This is still all racey, but we can deal with it.
81 		 *  Ideally, we only want to look at masks that are set.
82 		 *
83 		 *  If a mask is not set, then the only thing wrong is that we
84 		 *  did a little more work than necessary.
85 		 *
86 		 *  If we read a zero count but the mask is set, because of the
87 		 *  memory barriers, that can only happen when the highest prio
88 		 *  task for a run queue has left the run queue, in which case,
89 		 *  it will be followed by a pull. If the task we are processing
90 		 *  fails to find a proper place to go, that pull request will
91 		 *  pull this task if the run queue is running at a lower
92 		 *  priority.
93 		 */
94 		smp_rmb();
95 
96 		/* Need to do the rmb for every iteration */
97 		if (skip)
98 			continue;
99 
100 		if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
101 			continue;
102 
103 		if (lowest_mask) {
104 			int cpu;
105 
106 			cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
107 
108 			/*
109 			 * We have to ensure that we have at least one bit
110 			 * still set in the array, since the map could have
111 			 * been concurrently emptied between the first and
112 			 * second reads of vec->mask.  If we hit this
113 			 * condition, simply act as though we never hit this
114 			 * priority level and continue on.
115 			 */
116 			if (cpumask_empty(lowest_mask))
117 				continue;
118 
119 			if (!fitness_fn)
120 				return 1;
121 
122 			/* Ensure the capacity of the CPUs fit the task */
123 			for_each_cpu(cpu, lowest_mask) {
124 				if (!fitness_fn(p, cpu))
125 					cpumask_clear_cpu(cpu, lowest_mask);
126 			}
127 
128 			/*
129 			 * If no CPU at the current priority can fit the task
130 			 * continue looking
131 			 */
132 			if (cpumask_empty(lowest_mask))
133 				continue;
134 		}
135 
136 		return 1;
137 	}
138 
139 	return 0;
140 }
141 
142 /**
143  * cpupri_set - update the CPU priority setting
144  * @cp: The cpupri context
145  * @cpu: The target CPU
146  * @newpri: The priority (INVALID-RT99) to assign to this CPU
147  *
148  * Note: Assumes cpu_rq(cpu)->lock is locked
149  *
150  * Returns: (void)
151  */
152 void cpupri_set(struct cpupri *cp, int cpu, int newpri)
153 {
154 	int *currpri = &cp->cpu_to_pri[cpu];
155 	int oldpri = *currpri;
156 	int do_mb = 0;
157 
158 	newpri = convert_prio(newpri);
159 
160 	BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
161 
162 	if (newpri == oldpri)
163 		return;
164 
165 	/*
166 	 * If the CPU was currently mapped to a different value, we
167 	 * need to map it to the new value then remove the old value.
168 	 * Note, we must add the new value first, otherwise we risk the
169 	 * cpu being missed by the priority loop in cpupri_find.
170 	 */
171 	if (likely(newpri != CPUPRI_INVALID)) {
172 		struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
173 
174 		cpumask_set_cpu(cpu, vec->mask);
175 		/*
176 		 * When adding a new vector, we update the mask first,
177 		 * do a write memory barrier, and then update the count, to
178 		 * make sure the vector is visible when count is set.
179 		 */
180 		smp_mb__before_atomic();
181 		atomic_inc(&(vec)->count);
182 		do_mb = 1;
183 	}
184 	if (likely(oldpri != CPUPRI_INVALID)) {
185 		struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
186 
187 		/*
188 		 * Because the order of modification of the vec->count
189 		 * is important, we must make sure that the update
190 		 * of the new prio is seen before we decrement the
191 		 * old prio. This makes sure that the loop sees
192 		 * one or the other when we raise the priority of
193 		 * the run queue. We don't care about when we lower the
194 		 * priority, as that will trigger an rt pull anyway.
195 		 *
196 		 * We only need to do a memory barrier if we updated
197 		 * the new priority vec.
198 		 */
199 		if (do_mb)
200 			smp_mb__after_atomic();
201 
202 		/*
203 		 * When removing from the vector, we decrement the counter first
204 		 * do a memory barrier and then clear the mask.
205 		 */
206 		atomic_dec(&(vec)->count);
207 		smp_mb__after_atomic();
208 		cpumask_clear_cpu(cpu, vec->mask);
209 	}
210 
211 	*currpri = newpri;
212 }
213 
214 /**
215  * cpupri_init - initialize the cpupri structure
216  * @cp: The cpupri context
217  *
218  * Return: -ENOMEM on memory allocation failure.
219  */
220 int cpupri_init(struct cpupri *cp)
221 {
222 	int i;
223 
224 	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
225 		struct cpupri_vec *vec = &cp->pri_to_cpu[i];
226 
227 		atomic_set(&vec->count, 0);
228 		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
229 			goto cleanup;
230 	}
231 
232 	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
233 	if (!cp->cpu_to_pri)
234 		goto cleanup;
235 
236 	for_each_possible_cpu(i)
237 		cp->cpu_to_pri[i] = CPUPRI_INVALID;
238 
239 	return 0;
240 
241 cleanup:
242 	for (i--; i >= 0; i--)
243 		free_cpumask_var(cp->pri_to_cpu[i].mask);
244 	return -ENOMEM;
245 }
246 
247 /**
248  * cpupri_cleanup - clean up the cpupri structure
249  * @cp: The cpupri context
250  */
251 void cpupri_cleanup(struct cpupri *cp)
252 {
253 	int i;
254 
255 	kfree(cp->cpu_to_pri);
256 	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
257 		free_cpumask_var(cp->pri_to_cpu[i].mask);
258 }
259