]>
Commit | Line | Data |
---|---|---|
6e0534f2 | 1 | /* |
391e43da | 2 | * kernel/sched/cpupri.c |
6e0534f2 GH |
3 | * |
4 | * CPU priority management | |
5 | * | |
6 | * Copyright (C) 2007-2008 Novell | |
7 | * | |
8 | * Author: Gregory Haskins <ghaskins@novell.com> | |
9 | * | |
10 | * This code tracks the priority of each CPU so that global migration | |
11 | * decisions are easy to calculate. Each CPU can be in a state as follows: | |
12 | * | |
13 | * (INVALID), IDLE, NORMAL, RT1, ... RT99 | |
14 | * | |
15 | * going from the lowest priority to the highest. CPUs in the INVALID state | |
16 | * are not eligible for routing. The system maintains this state with | |
17 | * a 2 dimensional bitmap (the first for priority class, the second for cpus | |
18 | * in that class). Therefore a typical application without affinity | |
19 | * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit | |
20 | * searches). For tasks with affinity restrictions, the algorithm has a | |
21 | * worst case complexity of O(min(102, nr_domcpus)), though the scenario that | |
22 | * yields the worst case search is fairly contrived. | |
23 | * | |
24 | * This program is free software; you can redistribute it and/or | |
25 | * modify it under the terms of the GNU General Public License | |
26 | * as published by the Free Software Foundation; version 2 | |
27 | * of the License. | |
28 | */ | |
29 | ||
5a0e3ad6 | 30 | #include <linux/gfp.h> |
8bd75c77 CW |
31 | #include <linux/sched.h> |
32 | #include <linux/sched/rt.h> | |
391e43da | 33 | #include "cpupri.h" |
6e0534f2 GH |
34 | |
35 | /* Convert between a 140 based task->prio, and our 102 based cpupri */ | |
36 | static int convert_prio(int prio) | |
37 | { | |
38 | int cpupri; | |
39 | ||
40 | if (prio == CPUPRI_INVALID) | |
41 | cpupri = CPUPRI_INVALID; | |
42 | else if (prio == MAX_PRIO) | |
43 | cpupri = CPUPRI_IDLE; | |
44 | else if (prio >= MAX_RT_PRIO) | |
45 | cpupri = CPUPRI_NORMAL; | |
46 | else | |
47 | cpupri = MAX_RT_PRIO - prio + 1; | |
48 | ||
49 | return cpupri; | |
50 | } | |
51 | ||
6e0534f2 GH |
52 | /** |
53 | * cpupri_find - find the best (lowest-pri) CPU in the system | |
54 | * @cp: The cpupri context | |
55 | * @p: The task | |
13b8bd0a | 56 | * @lowest_mask: A mask to fill in with selected CPUs (or NULL) |
6e0534f2 GH |
57 | * |
58 | * Note: This function returns the recommended CPUs as calculated during the | |
2a61aa40 | 59 | * current invocation. By the time the call returns, the CPUs may have in |
6e0534f2 GH |
60 | * fact changed priorities any number of times. While not ideal, it is not |
61 | * an issue of correctness since the normal rebalancer logic will correct | |
62 | * any discrepancies created by racing against the uncertainty of the current | |
63 | * priority configuration. | |
64 | * | |
e69f6186 | 65 | * Return: (int)bool - CPUs were found |
6e0534f2 GH |
66 | */ |
67 | int cpupri_find(struct cpupri *cp, struct task_struct *p, | |
68e74568 | 68 | struct cpumask *lowest_mask) |
6e0534f2 | 69 | { |
014acbf0 YX |
70 | int idx = 0; |
71 | int task_pri = convert_prio(p->prio); | |
6e0534f2 | 72 | |
c92211d9 SR |
73 | if (task_pri >= MAX_RT_PRIO) |
74 | return 0; | |
75 | ||
76 | for (idx = 0; idx < task_pri; idx++) { | |
6e0534f2 | 77 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; |
d473750b | 78 | int skip = 0; |
6e0534f2 | 79 | |
c92211d9 | 80 | if (!atomic_read(&(vec)->count)) |
d473750b | 81 | skip = 1; |
c92211d9 SR |
82 | /* |
83 | * When looking at the vector, we need to read the counter, | |
84 | * do a memory barrier, then read the mask. | |
85 | * | |
86 | * Note: This is still all racey, but we can deal with it. | |
87 | * Ideally, we only want to look at masks that are set. | |
88 | * | |
89 | * If a mask is not set, then the only thing wrong is that we | |
90 | * did a little more work than necessary. | |
91 | * | |
92 | * If we read a zero count but the mask is set, because of the | |
93 | * memory barriers, that can only happen when the highest prio | |
94 | * task for a run queue has left the run queue, in which case, | |
95 | * it will be followed by a pull. If the task we are processing | |
96 | * fails to find a proper place to go, that pull request will | |
97 | * pull this task if the run queue is running at a lower | |
98 | * priority. | |
99 | */ | |
100 | smp_rmb(); | |
6e0534f2 | 101 | |
d473750b SR |
102 | /* Need to do the rmb for every iteration */ |
103 | if (skip) | |
104 | continue; | |
105 | ||
68e74568 | 106 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
6e0534f2 GH |
107 | continue; |
108 | ||
07903af1 | 109 | if (lowest_mask) { |
13b8bd0a | 110 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); |
07903af1 GH |
111 | |
112 | /* | |
113 | * We have to ensure that we have at least one bit | |
114 | * still set in the array, since the map could have | |
115 | * been concurrently emptied between the first and | |
116 | * second reads of vec->mask. If we hit this | |
117 | * condition, simply act as though we never hit this | |
118 | * priority level and continue on. | |
119 | */ | |
120 | if (cpumask_any(lowest_mask) >= nr_cpu_ids) | |
121 | continue; | |
122 | } | |
123 | ||
6e0534f2 GH |
124 | return 1; |
125 | } | |
126 | ||
127 | return 0; | |
128 | } | |
129 | ||
130 | /** | |
131 | * cpupri_set - update the cpu priority setting | |
132 | * @cp: The cpupri context | |
133 | * @cpu: The target cpu | |
fa757281 | 134 | * @newpri: The priority (INVALID-RT99) to assign to this CPU |
6e0534f2 GH |
135 | * |
136 | * Note: Assumes cpu_rq(cpu)->lock is locked | |
137 | * | |
138 | * Returns: (void) | |
139 | */ | |
140 | void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |
141 | { | |
014acbf0 YX |
142 | int *currpri = &cp->cpu_to_pri[cpu]; |
143 | int oldpri = *currpri; | |
144 | int do_mb = 0; | |
6e0534f2 GH |
145 | |
146 | newpri = convert_prio(newpri); | |
147 | ||
148 | BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); | |
149 | ||
150 | if (newpri == oldpri) | |
151 | return; | |
152 | ||
153 | /* | |
154 | * If the cpu was currently mapped to a different value, we | |
c3a2ae3d SR |
155 | * need to map it to the new value then remove the old value. |
156 | * Note, we must add the new value first, otherwise we risk the | |
5710f15b | 157 | * cpu being missed by the priority loop in cpupri_find. |
6e0534f2 | 158 | */ |
6e0534f2 GH |
159 | if (likely(newpri != CPUPRI_INVALID)) { |
160 | struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; | |
161 | ||
68e74568 | 162 | cpumask_set_cpu(cpu, vec->mask); |
c92211d9 SR |
163 | /* |
164 | * When adding a new vector, we update the mask first, | |
165 | * do a write memory barrier, and then update the count, to | |
166 | * make sure the vector is visible when count is set. | |
167 | */ | |
d473750b | 168 | smp_mb__before_atomic_inc(); |
c92211d9 | 169 | atomic_inc(&(vec)->count); |
d473750b | 170 | do_mb = 1; |
6e0534f2 | 171 | } |
c3a2ae3d SR |
172 | if (likely(oldpri != CPUPRI_INVALID)) { |
173 | struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; | |
174 | ||
d473750b SR |
175 | /* |
176 | * Because the order of modification of the vec->count | |
177 | * is important, we must make sure that the update | |
178 | * of the new prio is seen before we decrement the | |
179 | * old prio. This makes sure that the loop sees | |
180 | * one or the other when we raise the priority of | |
181 | * the run queue. We don't care about when we lower the | |
182 | * priority, as that will trigger an rt pull anyway. | |
183 | * | |
184 | * We only need to do a memory barrier if we updated | |
185 | * the new priority vec. | |
186 | */ | |
187 | if (do_mb) | |
188 | smp_mb__after_atomic_inc(); | |
189 | ||
c92211d9 SR |
190 | /* |
191 | * When removing from the vector, we decrement the counter first | |
192 | * do a memory barrier and then clear the mask. | |
193 | */ | |
194 | atomic_dec(&(vec)->count); | |
d473750b | 195 | smp_mb__after_atomic_inc(); |
c3a2ae3d | 196 | cpumask_clear_cpu(cpu, vec->mask); |
c3a2ae3d | 197 | } |
6e0534f2 GH |
198 | |
199 | *currpri = newpri; | |
200 | } | |
201 | ||
202 | /** | |
203 | * cpupri_init - initialize the cpupri structure | |
204 | * @cp: The cpupri context | |
205 | * | |
e69f6186 | 206 | * Return: -ENOMEM on memory allocation failure. |
6e0534f2 | 207 | */ |
68c38fc3 | 208 | int cpupri_init(struct cpupri *cp) |
6e0534f2 GH |
209 | { |
210 | int i; | |
211 | ||
212 | memset(cp, 0, sizeof(*cp)); | |
213 | ||
214 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { | |
215 | struct cpupri_vec *vec = &cp->pri_to_cpu[i]; | |
216 | ||
c92211d9 | 217 | atomic_set(&vec->count, 0); |
68c38fc3 | 218 | if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) |
68e74568 | 219 | goto cleanup; |
6e0534f2 GH |
220 | } |
221 | ||
222 | for_each_possible_cpu(i) | |
223 | cp->cpu_to_pri[i] = CPUPRI_INVALID; | |
68e74568 RR |
224 | return 0; |
225 | ||
226 | cleanup: | |
227 | for (i--; i >= 0; i--) | |
228 | free_cpumask_var(cp->pri_to_cpu[i].mask); | |
229 | return -ENOMEM; | |
6e0534f2 GH |
230 | } |
231 | ||
68e74568 RR |
232 | /** |
233 | * cpupri_cleanup - clean up the cpupri structure | |
234 | * @cp: The cpupri context | |
235 | */ | |
236 | void cpupri_cleanup(struct cpupri *cp) | |
237 | { | |
238 | int i; | |
6e0534f2 | 239 | |
68e74568 RR |
240 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) |
241 | free_cpumask_var(cp->pri_to_cpu[i].mask); | |
242 | } |