]>
Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6e0534f2 | 2 | /* |
391e43da | 3 | * kernel/sched/cpupri.c |
6e0534f2 GH |
4 | * |
5 | * CPU priority management | |
6 | * | |
7 | * Copyright (C) 2007-2008 Novell | |
8 | * | |
9 | * Author: Gregory Haskins <ghaskins@novell.com> | |
10 | * | |
11 | * This code tracks the priority of each CPU so that global migration | |
12 | * decisions are easy to calculate. Each CPU can be in a state as follows: | |
13 | * | |
14 | * (INVALID), IDLE, NORMAL, RT1, ... RT99 | |
15 | * | |
16 | * going from the lowest priority to the highest. CPUs in the INVALID state | |
17 | * are not eligible for routing. The system maintains this state with | |
97fb7a0a | 18 | * a 2 dimensional bitmap (the first for priority class, the second for CPUs |
6e0534f2 GH |
19 | * in that class). Therefore a typical application without affinity |
20 | * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit | |
21 | * searches). For tasks with affinity restrictions, the algorithm has a | |
22 | * worst case complexity of O(min(102, nr_domcpus)), though the scenario that | |
23 | * yields the worst case search is fairly contrived. | |
6e0534f2 | 24 | */ |
325ea10c | 25 | #include "sched.h" |
6e0534f2 GH |
26 | |
27 | /* Convert between a 140 based task->prio, and our 102 based cpupri */ | |
28 | static int convert_prio(int prio) | |
29 | { | |
30 | int cpupri; | |
31 | ||
32 | if (prio == CPUPRI_INVALID) | |
33 | cpupri = CPUPRI_INVALID; | |
34 | else if (prio == MAX_PRIO) | |
35 | cpupri = CPUPRI_IDLE; | |
36 | else if (prio >= MAX_RT_PRIO) | |
37 | cpupri = CPUPRI_NORMAL; | |
38 | else | |
39 | cpupri = MAX_RT_PRIO - prio + 1; | |
40 | ||
41 | return cpupri; | |
42 | } | |
43 | ||
d9cb236b QY |
44 | static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p, |
45 | struct cpumask *lowest_mask, int idx) | |
46 | { | |
47 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; | |
48 | int skip = 0; | |
49 | ||
50 | if (!atomic_read(&(vec)->count)) | |
51 | skip = 1; | |
52 | /* | |
53 | * When looking at the vector, we need to read the counter, | |
54 | * do a memory barrier, then read the mask. | |
55 | * | |
56 | * Note: This is still all racey, but we can deal with it. | |
57 | * Ideally, we only want to look at masks that are set. | |
58 | * | |
59 | * If a mask is not set, then the only thing wrong is that we | |
60 | * did a little more work than necessary. | |
61 | * | |
62 | * If we read a zero count but the mask is set, because of the | |
63 | * memory barriers, that can only happen when the highest prio | |
64 | * task for a run queue has left the run queue, in which case, | |
65 | * it will be followed by a pull. If the task we are processing | |
66 | * fails to find a proper place to go, that pull request will | |
67 | * pull this task if the run queue is running at a lower | |
68 | * priority. | |
69 | */ | |
70 | smp_rmb(); | |
71 | ||
72 | /* Need to do the rmb for every iteration */ | |
73 | if (skip) | |
74 | return 0; | |
75 | ||
76 | if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) | |
77 | return 0; | |
78 | ||
79 | if (lowest_mask) { | |
80 | cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); | |
81 | ||
82 | /* | |
83 | * We have to ensure that we have at least one bit | |
84 | * still set in the array, since the map could have | |
85 | * been concurrently emptied between the first and | |
86 | * second reads of vec->mask. If we hit this | |
87 | * condition, simply act as though we never hit this | |
88 | * priority level and continue on. | |
89 | */ | |
90 | if (cpumask_empty(lowest_mask)) | |
91 | return 0; | |
92 | } | |
93 | ||
94 | return 1; | |
95 | } | |
96 | ||
a1bd02e1 QY |
97 | int cpupri_find(struct cpupri *cp, struct task_struct *p, |
98 | struct cpumask *lowest_mask) | |
99 | { | |
100 | return cpupri_find_fitness(cp, p, lowest_mask, NULL); | |
101 | } | |
102 | ||
6e0534f2 | 103 | /** |
a1bd02e1 | 104 | * cpupri_find_fitness - find the best (lowest-pri) CPU in the system |
6e0534f2 GH |
105 | * @cp: The cpupri context |
106 | * @p: The task | |
13b8bd0a | 107 | * @lowest_mask: A mask to fill in with selected CPUs (or NULL) |
804d402f QY |
108 | * @fitness_fn: A pointer to a function to do custom checks whether the CPU |
109 | * fits a specific criteria so that we only return those CPUs. | |
6e0534f2 GH |
110 | * |
111 | * Note: This function returns the recommended CPUs as calculated during the | |
2a61aa40 | 112 | * current invocation. By the time the call returns, the CPUs may have in |
6e0534f2 GH |
113 | * fact changed priorities any number of times. While not ideal, it is not |
114 | * an issue of correctness since the normal rebalancer logic will correct | |
115 | * any discrepancies created by racing against the uncertainty of the current | |
116 | * priority configuration. | |
117 | * | |
e69f6186 | 118 | * Return: (int)bool - CPUs were found |
6e0534f2 | 119 | */ |
a1bd02e1 | 120 | int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p, |
804d402f QY |
121 | struct cpumask *lowest_mask, |
122 | bool (*fitness_fn)(struct task_struct *p, int cpu)) | |
6e0534f2 | 123 | { |
014acbf0 | 124 | int task_pri = convert_prio(p->prio); |
e94f80f6 | 125 | int idx, cpu; |
6e0534f2 | 126 | |
6227cb00 | 127 | BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); |
c92211d9 SR |
128 | |
129 | for (idx = 0; idx < task_pri; idx++) { | |
d473750b | 130 | |
d9cb236b | 131 | if (!__cpupri_find(cp, p, lowest_mask, idx)) |
6e0534f2 GH |
132 | continue; |
133 | ||
d9cb236b QY |
134 | if (!lowest_mask || !fitness_fn) |
135 | return 1; | |
804d402f | 136 | |
d9cb236b QY |
137 | /* Ensure the capacity of the CPUs fit the task */ |
138 | for_each_cpu(cpu, lowest_mask) { | |
139 | if (!fitness_fn(p, cpu)) | |
140 | cpumask_clear_cpu(cpu, lowest_mask); | |
141 | } | |
07903af1 | 142 | |
d9cb236b QY |
143 | /* |
144 | * If no CPU at the current priority can fit the task | |
145 | * continue looking | |
146 | */ | |
e94f80f6 | 147 | if (cpumask_empty(lowest_mask)) |
d9cb236b | 148 | continue; |
07903af1 | 149 | |
6e0534f2 GH |
150 | return 1; |
151 | } | |
152 | ||
d9cb236b | 153 | /* |
e94f80f6 QY |
154 | * If we failed to find a fitting lowest_mask, kick off a new search |
155 | * but without taking into account any fitness criteria this time. | |
d9cb236b QY |
156 | * |
157 | * This rule favours honouring priority over fitting the task in the | |
158 | * correct CPU (Capacity Awareness being the only user now). | |
159 | * The idea is that if a higher priority task can run, then it should | |
160 | * run even if this ends up being on unfitting CPU. | |
161 | * | |
162 | * The cost of this trade-off is not entirely clear and will probably | |
163 | * be good for some workloads and bad for others. | |
164 | * | |
165 | * The main idea here is that if some CPUs were overcommitted, we try | |
166 | * to spread which is what the scheduler traditionally did. Sys admins | |
167 | * must do proper RT planning to avoid overloading the system if they | |
168 | * really care. | |
169 | */ | |
e94f80f6 QY |
170 | if (fitness_fn) |
171 | return cpupri_find(cp, p, lowest_mask); | |
d9cb236b | 172 | |
6e0534f2 GH |
173 | return 0; |
174 | } | |
175 | ||
176 | /** | |
97fb7a0a | 177 | * cpupri_set - update the CPU priority setting |
6e0534f2 | 178 | * @cp: The cpupri context |
97fb7a0a | 179 | * @cpu: The target CPU |
fa757281 | 180 | * @newpri: The priority (INVALID-RT99) to assign to this CPU |
6e0534f2 GH |
181 | * |
182 | * Note: Assumes cpu_rq(cpu)->lock is locked | |
183 | * | |
184 | * Returns: (void) | |
185 | */ | |
186 | void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |
187 | { | |
014acbf0 YX |
188 | int *currpri = &cp->cpu_to_pri[cpu]; |
189 | int oldpri = *currpri; | |
190 | int do_mb = 0; | |
6e0534f2 GH |
191 | |
192 | newpri = convert_prio(newpri); | |
193 | ||
194 | BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); | |
195 | ||
196 | if (newpri == oldpri) | |
197 | return; | |
198 | ||
199 | /* | |
97fb7a0a | 200 | * If the CPU was currently mapped to a different value, we |
c3a2ae3d SR |
201 | * need to map it to the new value then remove the old value. |
202 | * Note, we must add the new value first, otherwise we risk the | |
5710f15b | 203 | * cpu being missed by the priority loop in cpupri_find. |
6e0534f2 | 204 | */ |
6e0534f2 GH |
205 | if (likely(newpri != CPUPRI_INVALID)) { |
206 | struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; | |
207 | ||
68e74568 | 208 | cpumask_set_cpu(cpu, vec->mask); |
c92211d9 SR |
209 | /* |
210 | * When adding a new vector, we update the mask first, | |
211 | * do a write memory barrier, and then update the count, to | |
212 | * make sure the vector is visible when count is set. | |
213 | */ | |
4e857c58 | 214 | smp_mb__before_atomic(); |
c92211d9 | 215 | atomic_inc(&(vec)->count); |
d473750b | 216 | do_mb = 1; |
6e0534f2 | 217 | } |
c3a2ae3d SR |
218 | if (likely(oldpri != CPUPRI_INVALID)) { |
219 | struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; | |
220 | ||
d473750b SR |
221 | /* |
222 | * Because the order of modification of the vec->count | |
223 | * is important, we must make sure that the update | |
224 | * of the new prio is seen before we decrement the | |
225 | * old prio. This makes sure that the loop sees | |
226 | * one or the other when we raise the priority of | |
227 | * the run queue. We don't care about when we lower the | |
228 | * priority, as that will trigger an rt pull anyway. | |
229 | * | |
230 | * We only need to do a memory barrier if we updated | |
231 | * the new priority vec. | |
232 | */ | |
233 | if (do_mb) | |
4e857c58 | 234 | smp_mb__after_atomic(); |
d473750b | 235 | |
c92211d9 SR |
236 | /* |
237 | * When removing from the vector, we decrement the counter first | |
238 | * do a memory barrier and then clear the mask. | |
239 | */ | |
240 | atomic_dec(&(vec)->count); | |
4e857c58 | 241 | smp_mb__after_atomic(); |
c3a2ae3d | 242 | cpumask_clear_cpu(cpu, vec->mask); |
c3a2ae3d | 243 | } |
6e0534f2 GH |
244 | |
245 | *currpri = newpri; | |
246 | } | |
247 | ||
248 | /** | |
249 | * cpupri_init - initialize the cpupri structure | |
250 | * @cp: The cpupri context | |
251 | * | |
e69f6186 | 252 | * Return: -ENOMEM on memory allocation failure. |
6e0534f2 | 253 | */ |
68c38fc3 | 254 | int cpupri_init(struct cpupri *cp) |
6e0534f2 GH |
255 | { |
256 | int i; | |
257 | ||
6e0534f2 GH |
258 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { |
259 | struct cpupri_vec *vec = &cp->pri_to_cpu[i]; | |
260 | ||
c92211d9 | 261 | atomic_set(&vec->count, 0); |
68c38fc3 | 262 | if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) |
68e74568 | 263 | goto cleanup; |
6e0534f2 GH |
264 | } |
265 | ||
4dac0b63 PZ |
266 | cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); |
267 | if (!cp->cpu_to_pri) | |
268 | goto cleanup; | |
269 | ||
6e0534f2 GH |
270 | for_each_possible_cpu(i) |
271 | cp->cpu_to_pri[i] = CPUPRI_INVALID; | |
4dac0b63 | 272 | |
68e74568 RR |
273 | return 0; |
274 | ||
275 | cleanup: | |
276 | for (i--; i >= 0; i--) | |
277 | free_cpumask_var(cp->pri_to_cpu[i].mask); | |
278 | return -ENOMEM; | |
6e0534f2 GH |
279 | } |
280 | ||
68e74568 RR |
281 | /** |
282 | * cpupri_cleanup - clean up the cpupri structure | |
283 | * @cp: The cpupri context | |
284 | */ | |
285 | void cpupri_cleanup(struct cpupri *cp) | |
286 | { | |
287 | int i; | |
6e0534f2 | 288 | |
4dac0b63 | 289 | kfree(cp->cpu_to_pri); |
68e74568 RR |
290 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) |
291 | free_cpumask_var(cp->pri_to_cpu[i].mask); | |
292 | } |