]>
Commit | Line | Data |
---|---|---|
f2cb1360 IM |
1 | /* |
2 | * Scheduler topology setup/handling methods | |
3 | */ | |
4 | #include <linux/sched.h> | |
5 | #include <linux/mutex.h> | |
6 | ||
7 | #include "sched.h" | |
8 | ||
9 | DEFINE_MUTEX(sched_domains_mutex); | |
10 | ||
11 | /* Protected by sched_domains_mutex: */ | |
12 | cpumask_var_t sched_domains_tmpmask; | |
1676330e | 13 | cpumask_var_t sched_domains_tmpmask2; |
f2cb1360 IM |
14 | |
15 | #ifdef CONFIG_SCHED_DEBUG | |
16 | ||
17 | static __read_mostly int sched_debug_enabled; | |
18 | ||
19 | static int __init sched_debug_setup(char *str) | |
20 | { | |
21 | sched_debug_enabled = 1; | |
22 | ||
23 | return 0; | |
24 | } | |
25 | early_param("sched_debug", sched_debug_setup); | |
26 | ||
27 | static inline bool sched_debug(void) | |
28 | { | |
29 | return sched_debug_enabled; | |
30 | } | |
31 | ||
32 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |
33 | struct cpumask *groupmask) | |
34 | { | |
35 | struct sched_group *group = sd->groups; | |
36 | ||
37 | cpumask_clear(groupmask); | |
38 | ||
005f874d | 39 | printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); |
f2cb1360 IM |
40 | |
41 | if (!(sd->flags & SD_LOAD_BALANCE)) { | |
42 | printk("does not load-balance\n"); | |
43 | if (sd->parent) | |
44 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" | |
45 | " has parent"); | |
46 | return -1; | |
47 | } | |
48 | ||
005f874d | 49 | printk(KERN_CONT "span=%*pbl level=%s\n", |
f2cb1360 IM |
50 | cpumask_pr_args(sched_domain_span(sd)), sd->name); |
51 | ||
52 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { | |
53 | printk(KERN_ERR "ERROR: domain->span does not contain " | |
54 | "CPU%d\n", cpu); | |
55 | } | |
ae4df9d6 | 56 | if (!cpumask_test_cpu(cpu, sched_group_span(group))) { |
f2cb1360 IM |
57 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
58 | " CPU%d\n", cpu); | |
59 | } | |
60 | ||
61 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); | |
62 | do { | |
63 | if (!group) { | |
64 | printk("\n"); | |
65 | printk(KERN_ERR "ERROR: group is NULL\n"); | |
66 | break; | |
67 | } | |
68 | ||
ae4df9d6 | 69 | if (!cpumask_weight(sched_group_span(group))) { |
f2cb1360 IM |
70 | printk(KERN_CONT "\n"); |
71 | printk(KERN_ERR "ERROR: empty group\n"); | |
72 | break; | |
73 | } | |
74 | ||
75 | if (!(sd->flags & SD_OVERLAP) && | |
ae4df9d6 | 76 | cpumask_intersects(groupmask, sched_group_span(group))) { |
f2cb1360 IM |
77 | printk(KERN_CONT "\n"); |
78 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | |
79 | break; | |
80 | } | |
81 | ||
ae4df9d6 | 82 | cpumask_or(groupmask, groupmask, sched_group_span(group)); |
f2cb1360 | 83 | |
005f874d PZ |
84 | printk(KERN_CONT " %d:{ span=%*pbl", |
85 | group->sgc->id, | |
ae4df9d6 | 86 | cpumask_pr_args(sched_group_span(group))); |
b0151c25 | 87 | |
af218122 | 88 | if ((sd->flags & SD_OVERLAP) && |
ae4df9d6 | 89 | !cpumask_equal(group_balance_mask(group), sched_group_span(group))) { |
005f874d | 90 | printk(KERN_CONT " mask=%*pbl", |
e5c14b1f | 91 | cpumask_pr_args(group_balance_mask(group))); |
b0151c25 PZ |
92 | } |
93 | ||
005f874d PZ |
94 | if (group->sgc->capacity != SCHED_CAPACITY_SCALE) |
95 | printk(KERN_CONT " cap=%lu", group->sgc->capacity); | |
f2cb1360 | 96 | |
a420b063 PZ |
97 | if (group == sd->groups && sd->child && |
98 | !cpumask_equal(sched_domain_span(sd->child), | |
ae4df9d6 | 99 | sched_group_span(group))) { |
a420b063 PZ |
100 | printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); |
101 | } | |
102 | ||
005f874d PZ |
103 | printk(KERN_CONT " }"); |
104 | ||
f2cb1360 | 105 | group = group->next; |
b0151c25 PZ |
106 | |
107 | if (group != sd->groups) | |
108 | printk(KERN_CONT ","); | |
109 | ||
f2cb1360 IM |
110 | } while (group != sd->groups); |
111 | printk(KERN_CONT "\n"); | |
112 | ||
113 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) | |
114 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | |
115 | ||
116 | if (sd->parent && | |
117 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | |
118 | printk(KERN_ERR "ERROR: parent span is not a superset " | |
119 | "of domain->span\n"); | |
120 | return 0; | |
121 | } | |
122 | ||
123 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | |
124 | { | |
125 | int level = 0; | |
126 | ||
127 | if (!sched_debug_enabled) | |
128 | return; | |
129 | ||
130 | if (!sd) { | |
131 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); | |
132 | return; | |
133 | } | |
134 | ||
005f874d | 135 | printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); |
f2cb1360 IM |
136 | |
137 | for (;;) { | |
138 | if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) | |
139 | break; | |
140 | level++; | |
141 | sd = sd->parent; | |
142 | if (!sd) | |
143 | break; | |
144 | } | |
145 | } | |
146 | #else /* !CONFIG_SCHED_DEBUG */ | |
147 | ||
148 | # define sched_debug_enabled 0 | |
149 | # define sched_domain_debug(sd, cpu) do { } while (0) | |
150 | static inline bool sched_debug(void) | |
151 | { | |
152 | return false; | |
153 | } | |
154 | #endif /* CONFIG_SCHED_DEBUG */ | |
155 | ||
156 | static int sd_degenerate(struct sched_domain *sd) | |
157 | { | |
158 | if (cpumask_weight(sched_domain_span(sd)) == 1) | |
159 | return 1; | |
160 | ||
161 | /* Following flags need at least 2 groups */ | |
162 | if (sd->flags & (SD_LOAD_BALANCE | | |
163 | SD_BALANCE_NEWIDLE | | |
164 | SD_BALANCE_FORK | | |
165 | SD_BALANCE_EXEC | | |
166 | SD_SHARE_CPUCAPACITY | | |
167 | SD_ASYM_CPUCAPACITY | | |
168 | SD_SHARE_PKG_RESOURCES | | |
169 | SD_SHARE_POWERDOMAIN)) { | |
170 | if (sd->groups != sd->groups->next) | |
171 | return 0; | |
172 | } | |
173 | ||
174 | /* Following flags don't use groups */ | |
175 | if (sd->flags & (SD_WAKE_AFFINE)) | |
176 | return 0; | |
177 | ||
178 | return 1; | |
179 | } | |
180 | ||
181 | static int | |
182 | sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |
183 | { | |
184 | unsigned long cflags = sd->flags, pflags = parent->flags; | |
185 | ||
186 | if (sd_degenerate(parent)) | |
187 | return 1; | |
188 | ||
189 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) | |
190 | return 0; | |
191 | ||
192 | /* Flags needing groups don't count if only 1 group in parent */ | |
193 | if (parent->groups == parent->groups->next) { | |
194 | pflags &= ~(SD_LOAD_BALANCE | | |
195 | SD_BALANCE_NEWIDLE | | |
196 | SD_BALANCE_FORK | | |
197 | SD_BALANCE_EXEC | | |
198 | SD_ASYM_CPUCAPACITY | | |
199 | SD_SHARE_CPUCAPACITY | | |
200 | SD_SHARE_PKG_RESOURCES | | |
201 | SD_PREFER_SIBLING | | |
202 | SD_SHARE_POWERDOMAIN); | |
203 | if (nr_node_ids == 1) | |
204 | pflags &= ~SD_SERIALIZE; | |
205 | } | |
206 | if (~cflags & pflags) | |
207 | return 0; | |
208 | ||
209 | return 1; | |
210 | } | |
211 | ||
212 | static void free_rootdomain(struct rcu_head *rcu) | |
213 | { | |
214 | struct root_domain *rd = container_of(rcu, struct root_domain, rcu); | |
215 | ||
216 | cpupri_cleanup(&rd->cpupri); | |
217 | cpudl_cleanup(&rd->cpudl); | |
218 | free_cpumask_var(rd->dlo_mask); | |
219 | free_cpumask_var(rd->rto_mask); | |
220 | free_cpumask_var(rd->online); | |
221 | free_cpumask_var(rd->span); | |
222 | kfree(rd); | |
223 | } | |
224 | ||
225 | void rq_attach_root(struct rq *rq, struct root_domain *rd) | |
226 | { | |
227 | struct root_domain *old_rd = NULL; | |
228 | unsigned long flags; | |
229 | ||
230 | raw_spin_lock_irqsave(&rq->lock, flags); | |
231 | ||
232 | if (rq->rd) { | |
233 | old_rd = rq->rd; | |
234 | ||
235 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) | |
236 | set_rq_offline(rq); | |
237 | ||
238 | cpumask_clear_cpu(rq->cpu, old_rd->span); | |
239 | ||
240 | /* | |
241 | * If we dont want to free the old_rd yet then | |
242 | * set old_rd to NULL to skip the freeing later | |
243 | * in this function: | |
244 | */ | |
245 | if (!atomic_dec_and_test(&old_rd->refcount)) | |
246 | old_rd = NULL; | |
247 | } | |
248 | ||
249 | atomic_inc(&rd->refcount); | |
250 | rq->rd = rd; | |
251 | ||
252 | cpumask_set_cpu(rq->cpu, rd->span); | |
253 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) | |
254 | set_rq_online(rq); | |
255 | ||
256 | raw_spin_unlock_irqrestore(&rq->lock, flags); | |
257 | ||
258 | if (old_rd) | |
259 | call_rcu_sched(&old_rd->rcu, free_rootdomain); | |
260 | } | |
261 | ||
262 | static int init_rootdomain(struct root_domain *rd) | |
263 | { | |
264 | memset(rd, 0, sizeof(*rd)); | |
265 | ||
266 | if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) | |
267 | goto out; | |
268 | if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) | |
269 | goto free_span; | |
270 | if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) | |
271 | goto free_online; | |
272 | if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | |
273 | goto free_dlo_mask; | |
274 | ||
275 | init_dl_bw(&rd->dl_bw); | |
276 | if (cpudl_init(&rd->cpudl) != 0) | |
277 | goto free_rto_mask; | |
278 | ||
279 | if (cpupri_init(&rd->cpupri) != 0) | |
280 | goto free_cpudl; | |
281 | return 0; | |
282 | ||
283 | free_cpudl: | |
284 | cpudl_cleanup(&rd->cpudl); | |
285 | free_rto_mask: | |
286 | free_cpumask_var(rd->rto_mask); | |
287 | free_dlo_mask: | |
288 | free_cpumask_var(rd->dlo_mask); | |
289 | free_online: | |
290 | free_cpumask_var(rd->online); | |
291 | free_span: | |
292 | free_cpumask_var(rd->span); | |
293 | out: | |
294 | return -ENOMEM; | |
295 | } | |
296 | ||
297 | /* | |
298 | * By default the system creates a single root-domain with all CPUs as | |
299 | * members (mimicking the global state we have today). | |
300 | */ | |
301 | struct root_domain def_root_domain; | |
302 | ||
303 | void init_defrootdomain(void) | |
304 | { | |
305 | init_rootdomain(&def_root_domain); | |
306 | ||
307 | atomic_set(&def_root_domain.refcount, 1); | |
308 | } | |
309 | ||
310 | static struct root_domain *alloc_rootdomain(void) | |
311 | { | |
312 | struct root_domain *rd; | |
313 | ||
314 | rd = kmalloc(sizeof(*rd), GFP_KERNEL); | |
315 | if (!rd) | |
316 | return NULL; | |
317 | ||
318 | if (init_rootdomain(rd) != 0) { | |
319 | kfree(rd); | |
320 | return NULL; | |
321 | } | |
322 | ||
323 | return rd; | |
324 | } | |
325 | ||
326 | static void free_sched_groups(struct sched_group *sg, int free_sgc) | |
327 | { | |
328 | struct sched_group *tmp, *first; | |
329 | ||
330 | if (!sg) | |
331 | return; | |
332 | ||
333 | first = sg; | |
334 | do { | |
335 | tmp = sg->next; | |
336 | ||
337 | if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) | |
338 | kfree(sg->sgc); | |
339 | ||
340 | kfree(sg); | |
341 | sg = tmp; | |
342 | } while (sg != first); | |
343 | } | |
344 | ||
345 | static void destroy_sched_domain(struct sched_domain *sd) | |
346 | { | |
347 | /* | |
348 | * If its an overlapping domain it has private groups, iterate and | |
349 | * nuke them all. | |
350 | */ | |
351 | if (sd->flags & SD_OVERLAP) { | |
352 | free_sched_groups(sd->groups, 1); | |
353 | } else if (atomic_dec_and_test(&sd->groups->ref)) { | |
354 | kfree(sd->groups->sgc); | |
355 | kfree(sd->groups); | |
356 | } | |
357 | if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) | |
358 | kfree(sd->shared); | |
359 | kfree(sd); | |
360 | } | |
361 | ||
362 | static void destroy_sched_domains_rcu(struct rcu_head *rcu) | |
363 | { | |
364 | struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); | |
365 | ||
366 | while (sd) { | |
367 | struct sched_domain *parent = sd->parent; | |
368 | destroy_sched_domain(sd); | |
369 | sd = parent; | |
370 | } | |
371 | } | |
372 | ||
373 | static void destroy_sched_domains(struct sched_domain *sd) | |
374 | { | |
375 | if (sd) | |
376 | call_rcu(&sd->rcu, destroy_sched_domains_rcu); | |
377 | } | |
378 | ||
379 | /* | |
380 | * Keep a special pointer to the highest sched_domain that has | |
381 | * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this | |
382 | * allows us to avoid some pointer chasing select_idle_sibling(). | |
383 | * | |
384 | * Also keep a unique ID per domain (we use the first CPU number in | |
385 | * the cpumask of the domain), this allows us to quickly tell if | |
386 | * two CPUs are in the same cache domain, see cpus_share_cache(). | |
387 | */ | |
388 | DEFINE_PER_CPU(struct sched_domain *, sd_llc); | |
389 | DEFINE_PER_CPU(int, sd_llc_size); | |
390 | DEFINE_PER_CPU(int, sd_llc_id); | |
391 | DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); | |
392 | DEFINE_PER_CPU(struct sched_domain *, sd_numa); | |
393 | DEFINE_PER_CPU(struct sched_domain *, sd_asym); | |
394 | ||
395 | static void update_top_cache_domain(int cpu) | |
396 | { | |
397 | struct sched_domain_shared *sds = NULL; | |
398 | struct sched_domain *sd; | |
399 | int id = cpu; | |
400 | int size = 1; | |
401 | ||
402 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); | |
403 | if (sd) { | |
404 | id = cpumask_first(sched_domain_span(sd)); | |
405 | size = cpumask_weight(sched_domain_span(sd)); | |
406 | sds = sd->shared; | |
407 | } | |
408 | ||
409 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | |
410 | per_cpu(sd_llc_size, cpu) = size; | |
411 | per_cpu(sd_llc_id, cpu) = id; | |
412 | rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); | |
413 | ||
414 | sd = lowest_flag_domain(cpu, SD_NUMA); | |
415 | rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); | |
416 | ||
417 | sd = highest_flag_domain(cpu, SD_ASYM_PACKING); | |
418 | rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); | |
419 | } | |
420 | ||
421 | /* | |
422 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must | |
423 | * hold the hotplug lock. | |
424 | */ | |
425 | static void | |
426 | cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |
427 | { | |
428 | struct rq *rq = cpu_rq(cpu); | |
429 | struct sched_domain *tmp; | |
430 | ||
431 | /* Remove the sched domains which do not contribute to scheduling. */ | |
432 | for (tmp = sd; tmp; ) { | |
433 | struct sched_domain *parent = tmp->parent; | |
434 | if (!parent) | |
435 | break; | |
436 | ||
437 | if (sd_parent_degenerate(tmp, parent)) { | |
438 | tmp->parent = parent->parent; | |
439 | if (parent->parent) | |
440 | parent->parent->child = tmp; | |
441 | /* | |
442 | * Transfer SD_PREFER_SIBLING down in case of a | |
443 | * degenerate parent; the spans match for this | |
444 | * so the property transfers. | |
445 | */ | |
446 | if (parent->flags & SD_PREFER_SIBLING) | |
447 | tmp->flags |= SD_PREFER_SIBLING; | |
448 | destroy_sched_domain(parent); | |
449 | } else | |
450 | tmp = tmp->parent; | |
451 | } | |
452 | ||
453 | if (sd && sd_degenerate(sd)) { | |
454 | tmp = sd; | |
455 | sd = sd->parent; | |
456 | destroy_sched_domain(tmp); | |
457 | if (sd) | |
458 | sd->child = NULL; | |
459 | } | |
460 | ||
461 | sched_domain_debug(sd, cpu); | |
462 | ||
463 | rq_attach_root(rq, rd); | |
464 | tmp = rq->sd; | |
465 | rcu_assign_pointer(rq->sd, sd); | |
466 | destroy_sched_domains(tmp); | |
467 | ||
468 | update_top_cache_domain(cpu); | |
469 | } | |
470 | ||
471 | /* Setup the mask of CPUs configured for isolated domains */ | |
472 | static int __init isolated_cpu_setup(char *str) | |
473 | { | |
474 | int ret; | |
475 | ||
476 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | |
477 | ret = cpulist_parse(str, cpu_isolated_map); | |
478 | if (ret) { | |
479 | pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids); | |
480 | return 0; | |
481 | } | |
482 | return 1; | |
483 | } | |
484 | __setup("isolcpus=", isolated_cpu_setup); | |
485 | ||
486 | struct s_data { | |
487 | struct sched_domain ** __percpu sd; | |
488 | struct root_domain *rd; | |
489 | }; | |
490 | ||
491 | enum s_alloc { | |
492 | sa_rootdomain, | |
493 | sa_sd, | |
494 | sa_sd_storage, | |
495 | sa_none, | |
496 | }; | |
497 | ||
35a566e6 PZ |
498 | /* |
499 | * Return the canonical balance CPU for this group, this is the first CPU | |
e5c14b1f | 500 | * of this group that's also in the balance mask. |
35a566e6 | 501 | * |
e5c14b1f PZ |
502 | * The balance mask are all those CPUs that could actually end up at this |
503 | * group. See build_balance_mask(). | |
35a566e6 PZ |
504 | * |
505 | * Also see should_we_balance(). | |
506 | */ | |
507 | int group_balance_cpu(struct sched_group *sg) | |
508 | { | |
e5c14b1f | 509 | return cpumask_first(group_balance_mask(sg)); |
35a566e6 PZ |
510 | } |
511 | ||
512 | ||
513 | /* | |
514 | * NUMA topology (first read the regular topology blurb below) | |
515 | * | |
516 | * Given a node-distance table, for example: | |
517 | * | |
518 | * node 0 1 2 3 | |
519 | * 0: 10 20 30 20 | |
520 | * 1: 20 10 20 30 | |
521 | * 2: 30 20 10 20 | |
522 | * 3: 20 30 20 10 | |
523 | * | |
524 | * which represents a 4 node ring topology like: | |
525 | * | |
526 | * 0 ----- 1 | |
527 | * | | | |
528 | * | | | |
529 | * | | | |
530 | * 3 ----- 2 | |
531 | * | |
532 | * We want to construct domains and groups to represent this. The way we go | |
533 | * about doing this is to build the domains on 'hops'. For each NUMA level we | |
534 | * construct the mask of all nodes reachable in @level hops. | |
535 | * | |
536 | * For the above NUMA topology that gives 3 levels: | |
537 | * | |
538 | * NUMA-2 0-3 0-3 0-3 0-3 | |
539 | * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2} | |
540 | * | |
541 | * NUMA-1 0-1,3 0-2 1-3 0,2-3 | |
542 | * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3} | |
543 | * | |
544 | * NUMA-0 0 1 2 3 | |
545 | * | |
546 | * | |
547 | * As can be seen; things don't nicely line up as with the regular topology. | |
548 | * When we iterate a domain in child domain chunks some nodes can be | |
549 | * represented multiple times -- hence the "overlap" naming for this part of | |
550 | * the topology. | |
551 | * | |
552 | * In order to minimize this overlap, we only build enough groups to cover the | |
553 | * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3. | |
554 | * | |
555 | * Because: | |
556 | * | |
557 | * - the first group of each domain is its child domain; this | |
558 | * gets us the first 0-1,3 | |
559 | * - the only uncovered node is 2, who's child domain is 1-3. | |
560 | * | |
561 | * However, because of the overlap, computing a unique CPU for each group is | |
562 | * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both | |
563 | * groups include the CPUs of Node-0, while those CPUs would not in fact ever | |
564 | * end up at those groups (they would end up in group: 0-1,3). | |
565 | * | |
e5c14b1f | 566 | * To correct this we have to introduce the group balance mask. This mask |
35a566e6 PZ |
567 | * will contain those CPUs in the group that can reach this group given the |
568 | * (child) domain tree. | |
569 | * | |
570 | * With this we can once again compute balance_cpu and sched_group_capacity | |
571 | * relations. | |
572 | * | |
573 | * XXX include words on how balance_cpu is unique and therefore can be | |
574 | * used for sched_group_capacity links. | |
575 | * | |
576 | * | |
577 | * Another 'interesting' topology is: | |
578 | * | |
579 | * node 0 1 2 3 | |
580 | * 0: 10 20 20 30 | |
581 | * 1: 20 10 20 20 | |
582 | * 2: 20 20 10 20 | |
583 | * 3: 30 20 20 10 | |
584 | * | |
585 | * Which looks a little like: | |
586 | * | |
587 | * 0 ----- 1 | |
588 | * | / | | |
589 | * | / | | |
590 | * | / | | |
591 | * 2 ----- 3 | |
592 | * | |
593 | * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3 | |
594 | * are not. | |
595 | * | |
596 | * This leads to a few particularly weird cases where the sched_domain's are | |
597 | * not of the same number for each cpu. Consider: | |
598 | * | |
599 | * NUMA-2 0-3 0-3 | |
600 | * groups: {0-2},{1-3} {1-3},{0-2} | |
601 | * | |
602 | * NUMA-1 0-2 0-3 0-3 1-3 | |
603 | * | |
604 | * NUMA-0 0 1 2 3 | |
605 | * | |
606 | */ | |
607 | ||
608 | ||
f2cb1360 | 609 | /* |
e5c14b1f PZ |
610 | * Build the balance mask; it contains only those CPUs that can arrive at this |
611 | * group and should be considered to continue balancing. | |
35a566e6 PZ |
612 | * |
613 | * We do this during the group creation pass, therefore the group information | |
614 | * isn't complete yet, however since each group represents a (child) domain we | |
615 | * can fully construct this using the sched_domain bits (which are already | |
616 | * complete). | |
f2cb1360 | 617 | */ |
1676330e | 618 | static void |
e5c14b1f | 619 | build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) |
f2cb1360 | 620 | { |
ae4df9d6 | 621 | const struct cpumask *sg_span = sched_group_span(sg); |
f2cb1360 IM |
622 | struct sd_data *sdd = sd->private; |
623 | struct sched_domain *sibling; | |
624 | int i; | |
625 | ||
1676330e PZ |
626 | cpumask_clear(mask); |
627 | ||
f32d782e | 628 | for_each_cpu(i, sg_span) { |
f2cb1360 | 629 | sibling = *per_cpu_ptr(sdd->sd, i); |
73bb059f PZ |
630 | |
631 | /* | |
632 | * Can happen in the asymmetric case, where these siblings are | |
633 | * unused. The mask will not be empty because those CPUs that | |
634 | * do have the top domain _should_ span the domain. | |
635 | */ | |
636 | if (!sibling->child) | |
637 | continue; | |
638 | ||
639 | /* If we would not end up here, we can't continue from here */ | |
640 | if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) | |
f2cb1360 IM |
641 | continue; |
642 | ||
1676330e | 643 | cpumask_set_cpu(i, mask); |
f2cb1360 | 644 | } |
73bb059f PZ |
645 | |
646 | /* We must not have empty masks here */ | |
1676330e | 647 | WARN_ON_ONCE(cpumask_empty(mask)); |
f2cb1360 IM |
648 | } |
649 | ||
650 | /* | |
35a566e6 PZ |
651 | * XXX: This creates per-node group entries; since the load-balancer will |
652 | * immediately access remote memory to construct this group's load-balance | |
653 | * statistics having the groups node local is of dubious benefit. | |
f2cb1360 | 654 | */ |
8c033469 LRV |
655 | static struct sched_group * |
656 | build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) | |
657 | { | |
658 | struct sched_group *sg; | |
659 | struct cpumask *sg_span; | |
660 | ||
661 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), | |
662 | GFP_KERNEL, cpu_to_node(cpu)); | |
663 | ||
664 | if (!sg) | |
665 | return NULL; | |
666 | ||
ae4df9d6 | 667 | sg_span = sched_group_span(sg); |
8c033469 LRV |
668 | if (sd->child) |
669 | cpumask_copy(sg_span, sched_domain_span(sd->child)); | |
670 | else | |
671 | cpumask_copy(sg_span, sched_domain_span(sd)); | |
672 | ||
673 | return sg; | |
674 | } | |
675 | ||
676 | static void init_overlap_sched_group(struct sched_domain *sd, | |
1676330e | 677 | struct sched_group *sg) |
8c033469 | 678 | { |
1676330e | 679 | struct cpumask *mask = sched_domains_tmpmask2; |
8c033469 LRV |
680 | struct sd_data *sdd = sd->private; |
681 | struct cpumask *sg_span; | |
1676330e PZ |
682 | int cpu; |
683 | ||
e5c14b1f | 684 | build_balance_mask(sd, sg, mask); |
ae4df9d6 | 685 | cpu = cpumask_first_and(sched_group_span(sg), mask); |
8c033469 LRV |
686 | |
687 | sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); | |
688 | if (atomic_inc_return(&sg->sgc->ref) == 1) | |
e5c14b1f | 689 | cpumask_copy(group_balance_mask(sg), mask); |
35a566e6 | 690 | else |
e5c14b1f | 691 | WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); |
8c033469 LRV |
692 | |
693 | /* | |
694 | * Initialize sgc->capacity such that even if we mess up the | |
695 | * domains and no possible iteration will get us here, we won't | |
696 | * die on a /0 trap. | |
697 | */ | |
ae4df9d6 | 698 | sg_span = sched_group_span(sg); |
8c033469 LRV |
699 | sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); |
700 | sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; | |
701 | } | |
702 | ||
f2cb1360 IM |
703 | static int |
704 | build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |
705 | { | |
91eaed0d | 706 | struct sched_group *first = NULL, *last = NULL, *sg; |
f2cb1360 IM |
707 | const struct cpumask *span = sched_domain_span(sd); |
708 | struct cpumask *covered = sched_domains_tmpmask; | |
709 | struct sd_data *sdd = sd->private; | |
710 | struct sched_domain *sibling; | |
711 | int i; | |
712 | ||
713 | cpumask_clear(covered); | |
714 | ||
0372dd27 | 715 | for_each_cpu_wrap(i, span, cpu) { |
f2cb1360 IM |
716 | struct cpumask *sg_span; |
717 | ||
718 | if (cpumask_test_cpu(i, covered)) | |
719 | continue; | |
720 | ||
721 | sibling = *per_cpu_ptr(sdd->sd, i); | |
722 | ||
c20e1ea4 LRV |
723 | /* |
724 | * Asymmetric node setups can result in situations where the | |
725 | * domain tree is of unequal depth, make sure to skip domains | |
726 | * that already cover the entire range. | |
727 | * | |
728 | * In that case build_sched_domains() will have terminated the | |
729 | * iteration early and our sibling sd spans will be empty. | |
730 | * Domains should always include the CPU they're built on, so | |
731 | * check that. | |
732 | */ | |
f2cb1360 IM |
733 | if (!cpumask_test_cpu(i, sched_domain_span(sibling))) |
734 | continue; | |
735 | ||
8c033469 | 736 | sg = build_group_from_child_sched_domain(sibling, cpu); |
f2cb1360 IM |
737 | if (!sg) |
738 | goto fail; | |
739 | ||
ae4df9d6 | 740 | sg_span = sched_group_span(sg); |
f2cb1360 IM |
741 | cpumask_or(covered, covered, sg_span); |
742 | ||
1676330e | 743 | init_overlap_sched_group(sd, sg); |
f2cb1360 | 744 | |
f2cb1360 IM |
745 | if (!first) |
746 | first = sg; | |
747 | if (last) | |
748 | last->next = sg; | |
749 | last = sg; | |
750 | last->next = first; | |
751 | } | |
91eaed0d | 752 | sd->groups = first; |
f2cb1360 IM |
753 | |
754 | return 0; | |
755 | ||
756 | fail: | |
757 | free_sched_groups(first, 0); | |
758 | ||
759 | return -ENOMEM; | |
760 | } | |
761 | ||
35a566e6 PZ |
762 | |
763 | /* | |
764 | * Package topology (also see the load-balance blurb in fair.c) | |
765 | * | |
766 | * The scheduler builds a tree structure to represent a number of important | |
767 | * topology features. By default (default_topology[]) these include: | |
768 | * | |
769 | * - Simultaneous multithreading (SMT) | |
770 | * - Multi-Core Cache (MC) | |
771 | * - Package (DIE) | |
772 | * | |
773 | * Where the last one more or less denotes everything up to a NUMA node. | |
774 | * | |
775 | * The tree consists of 3 primary data structures: | |
776 | * | |
777 | * sched_domain -> sched_group -> sched_group_capacity | |
778 | * ^ ^ ^ ^ | |
779 | * `-' `-' | |
780 | * | |
781 | * The sched_domains are per-cpu and have a two way link (parent & child) and | |
782 | * denote the ever growing mask of CPUs belonging to that level of topology. | |
783 | * | |
784 | * Each sched_domain has a circular (double) linked list of sched_group's, each | |
785 | * denoting the domains of the level below (or individual CPUs in case of the | |
786 | * first domain level). The sched_group linked by a sched_domain includes the | |
787 | * CPU of that sched_domain [*]. | |
788 | * | |
789 | * Take for instance a 2 threaded, 2 core, 2 cache cluster part: | |
790 | * | |
791 | * CPU 0 1 2 3 4 5 6 7 | |
792 | * | |
793 | * DIE [ ] | |
794 | * MC [ ] [ ] | |
795 | * SMT [ ] [ ] [ ] [ ] | |
796 | * | |
797 | * - or - | |
798 | * | |
799 | * DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7 | |
800 | * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7 | |
801 | * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7 | |
802 | * | |
803 | * CPU 0 1 2 3 4 5 6 7 | |
804 | * | |
805 | * One way to think about it is: sched_domain moves you up and down among these | |
806 | * topology levels, while sched_group moves you sideways through it, at child | |
807 | * domain granularity. | |
808 | * | |
809 | * sched_group_capacity ensures each unique sched_group has shared storage. | |
810 | * | |
811 | * There are two related construction problems, both require a CPU that | |
812 | * uniquely identify each group (for a given domain): | |
813 | * | |
814 | * - The first is the balance_cpu (see should_we_balance() and the | |
815 | * load-balance blub in fair.c); for each group we only want 1 CPU to | |
816 | * continue balancing at a higher domain. | |
817 | * | |
818 | * - The second is the sched_group_capacity; we want all identical groups | |
819 | * to share a single sched_group_capacity. | |
820 | * | |
821 | * Since these topologies are exclusive by construction. That is, its | |
822 | * impossible for an SMT thread to belong to multiple cores, and cores to | |
823 | * be part of multiple caches. There is a very clear and unique location | |
824 | * for each CPU in the hierarchy. | |
825 | * | |
826 | * Therefore computing a unique CPU for each group is trivial (the iteration | |
827 | * mask is redundant and set all 1s; all CPUs in a group will end up at _that_ | |
828 | * group), we can simply pick the first CPU in each group. | |
829 | * | |
830 | * | |
831 | * [*] in other words, the first group of each domain is its child domain. | |
832 | */ | |
833 | ||
0c0e776a | 834 | static struct sched_group *get_group(int cpu, struct sd_data *sdd) |
f2cb1360 IM |
835 | { |
836 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); | |
837 | struct sched_domain *child = sd->child; | |
0c0e776a | 838 | struct sched_group *sg; |
f2cb1360 IM |
839 | |
840 | if (child) | |
841 | cpu = cpumask_first(sched_domain_span(child)); | |
842 | ||
0c0e776a PZ |
843 | sg = *per_cpu_ptr(sdd->sg, cpu); |
844 | sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); | |
845 | ||
846 | /* For claim_allocations: */ | |
847 | atomic_inc(&sg->ref); | |
848 | atomic_inc(&sg->sgc->ref); | |
f2cb1360 | 849 | |
0c0e776a | 850 | if (child) { |
ae4df9d6 PZ |
851 | cpumask_copy(sched_group_span(sg), sched_domain_span(child)); |
852 | cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); | |
0c0e776a | 853 | } else { |
ae4df9d6 | 854 | cpumask_set_cpu(cpu, sched_group_span(sg)); |
e5c14b1f | 855 | cpumask_set_cpu(cpu, group_balance_mask(sg)); |
f2cb1360 IM |
856 | } |
857 | ||
ae4df9d6 | 858 | sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); |
0c0e776a PZ |
859 | sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; |
860 | ||
861 | return sg; | |
f2cb1360 IM |
862 | } |
863 | ||
864 | /* | |
865 | * build_sched_groups will build a circular linked list of the groups | |
866 | * covered by the given span, and will set each group's ->cpumask correctly, | |
867 | * and ->cpu_capacity to 0. | |
868 | * | |
869 | * Assumes the sched_domain tree is fully constructed | |
870 | */ | |
871 | static int | |
872 | build_sched_groups(struct sched_domain *sd, int cpu) | |
873 | { | |
874 | struct sched_group *first = NULL, *last = NULL; | |
875 | struct sd_data *sdd = sd->private; | |
876 | const struct cpumask *span = sched_domain_span(sd); | |
877 | struct cpumask *covered; | |
878 | int i; | |
879 | ||
f2cb1360 IM |
880 | lockdep_assert_held(&sched_domains_mutex); |
881 | covered = sched_domains_tmpmask; | |
882 | ||
883 | cpumask_clear(covered); | |
884 | ||
0c0e776a | 885 | for_each_cpu_wrap(i, span, cpu) { |
f2cb1360 | 886 | struct sched_group *sg; |
f2cb1360 IM |
887 | |
888 | if (cpumask_test_cpu(i, covered)) | |
889 | continue; | |
890 | ||
0c0e776a | 891 | sg = get_group(i, sdd); |
f2cb1360 | 892 | |
ae4df9d6 | 893 | cpumask_or(covered, covered, sched_group_span(sg)); |
f2cb1360 IM |
894 | |
895 | if (!first) | |
896 | first = sg; | |
897 | if (last) | |
898 | last->next = sg; | |
899 | last = sg; | |
900 | } | |
901 | last->next = first; | |
0c0e776a | 902 | sd->groups = first; |
f2cb1360 IM |
903 | |
904 | return 0; | |
905 | } | |
906 | ||
907 | /* | |
908 | * Initialize sched groups cpu_capacity. | |
909 | * | |
910 | * cpu_capacity indicates the capacity of sched group, which is used while | |
911 | * distributing the load between different sched groups in a sched domain. | |
912 | * Typically cpu_capacity for all the groups in a sched domain will be same | |
913 | * unless there are asymmetries in the topology. If there are asymmetries, | |
914 | * group having more cpu_capacity will pickup more load compared to the | |
915 | * group having less cpu_capacity. | |
916 | */ | |
917 | static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) | |
918 | { | |
919 | struct sched_group *sg = sd->groups; | |
920 | ||
921 | WARN_ON(!sg); | |
922 | ||
923 | do { | |
924 | int cpu, max_cpu = -1; | |
925 | ||
ae4df9d6 | 926 | sg->group_weight = cpumask_weight(sched_group_span(sg)); |
f2cb1360 IM |
927 | |
928 | if (!(sd->flags & SD_ASYM_PACKING)) | |
929 | goto next; | |
930 | ||
ae4df9d6 | 931 | for_each_cpu(cpu, sched_group_span(sg)) { |
f2cb1360 IM |
932 | if (max_cpu < 0) |
933 | max_cpu = cpu; | |
934 | else if (sched_asym_prefer(cpu, max_cpu)) | |
935 | max_cpu = cpu; | |
936 | } | |
937 | sg->asym_prefer_cpu = max_cpu; | |
938 | ||
939 | next: | |
940 | sg = sg->next; | |
941 | } while (sg != sd->groups); | |
942 | ||
943 | if (cpu != group_balance_cpu(sg)) | |
944 | return; | |
945 | ||
946 | update_group_capacity(sd, cpu); | |
947 | } | |
948 | ||
949 | /* | |
950 | * Initializers for schedule domains | |
951 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() | |
952 | */ | |
953 | ||
954 | static int default_relax_domain_level = -1; | |
955 | int sched_domain_level_max; | |
956 | ||
957 | static int __init setup_relax_domain_level(char *str) | |
958 | { | |
959 | if (kstrtoint(str, 0, &default_relax_domain_level)) | |
960 | pr_warn("Unable to set relax_domain_level\n"); | |
961 | ||
962 | return 1; | |
963 | } | |
964 | __setup("relax_domain_level=", setup_relax_domain_level); | |
965 | ||
966 | static void set_domain_attribute(struct sched_domain *sd, | |
967 | struct sched_domain_attr *attr) | |
968 | { | |
969 | int request; | |
970 | ||
971 | if (!attr || attr->relax_domain_level < 0) { | |
972 | if (default_relax_domain_level < 0) | |
973 | return; | |
974 | else | |
975 | request = default_relax_domain_level; | |
976 | } else | |
977 | request = attr->relax_domain_level; | |
978 | if (request < sd->level) { | |
979 | /* Turn off idle balance on this domain: */ | |
980 | sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); | |
981 | } else { | |
982 | /* Turn on idle balance on this domain: */ | |
983 | sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); | |
984 | } | |
985 | } | |
986 | ||
987 | static void __sdt_free(const struct cpumask *cpu_map); | |
988 | static int __sdt_alloc(const struct cpumask *cpu_map); | |
989 | ||
990 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |
991 | const struct cpumask *cpu_map) | |
992 | { | |
993 | switch (what) { | |
994 | case sa_rootdomain: | |
995 | if (!atomic_read(&d->rd->refcount)) | |
996 | free_rootdomain(&d->rd->rcu); | |
997 | /* Fall through */ | |
998 | case sa_sd: | |
999 | free_percpu(d->sd); | |
1000 | /* Fall through */ | |
1001 | case sa_sd_storage: | |
1002 | __sdt_free(cpu_map); | |
1003 | /* Fall through */ | |
1004 | case sa_none: | |
1005 | break; | |
1006 | } | |
1007 | } | |
1008 | ||
1009 | static enum s_alloc | |
1010 | __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) | |
1011 | { | |
1012 | memset(d, 0, sizeof(*d)); | |
1013 | ||
1014 | if (__sdt_alloc(cpu_map)) | |
1015 | return sa_sd_storage; | |
1016 | d->sd = alloc_percpu(struct sched_domain *); | |
1017 | if (!d->sd) | |
1018 | return sa_sd_storage; | |
1019 | d->rd = alloc_rootdomain(); | |
1020 | if (!d->rd) | |
1021 | return sa_sd; | |
1022 | return sa_rootdomain; | |
1023 | } | |
1024 | ||
1025 | /* | |
1026 | * NULL the sd_data elements we've used to build the sched_domain and | |
1027 | * sched_group structure so that the subsequent __free_domain_allocs() | |
1028 | * will not free the data we're using. | |
1029 | */ | |
1030 | static void claim_allocations(int cpu, struct sched_domain *sd) | |
1031 | { | |
1032 | struct sd_data *sdd = sd->private; | |
1033 | ||
1034 | WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); | |
1035 | *per_cpu_ptr(sdd->sd, cpu) = NULL; | |
1036 | ||
1037 | if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) | |
1038 | *per_cpu_ptr(sdd->sds, cpu) = NULL; | |
1039 | ||
1040 | if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) | |
1041 | *per_cpu_ptr(sdd->sg, cpu) = NULL; | |
1042 | ||
1043 | if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) | |
1044 | *per_cpu_ptr(sdd->sgc, cpu) = NULL; | |
1045 | } | |
1046 | ||
1047 | #ifdef CONFIG_NUMA | |
1048 | static int sched_domains_numa_levels; | |
1049 | enum numa_topology_type sched_numa_topology_type; | |
1050 | static int *sched_domains_numa_distance; | |
1051 | int sched_max_numa_distance; | |
1052 | static struct cpumask ***sched_domains_numa_masks; | |
1053 | static int sched_domains_curr_level; | |
1054 | #endif | |
1055 | ||
1056 | /* | |
1057 | * SD_flags allowed in topology descriptions. | |
1058 | * | |
1059 | * These flags are purely descriptive of the topology and do not prescribe | |
1060 | * behaviour. Behaviour is artificial and mapped in the below sd_init() | |
1061 | * function: | |
1062 | * | |
1063 | * SD_SHARE_CPUCAPACITY - describes SMT topologies | |
1064 | * SD_SHARE_PKG_RESOURCES - describes shared caches | |
1065 | * SD_NUMA - describes NUMA topologies | |
1066 | * SD_SHARE_POWERDOMAIN - describes shared power domain | |
1067 | * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies | |
1068 | * | |
1069 | * Odd one out, which beside describing the topology has a quirk also | |
1070 | * prescribes the desired behaviour that goes along with it: | |
1071 | * | |
1072 | * SD_ASYM_PACKING - describes SMT quirks | |
1073 | */ | |
1074 | #define TOPOLOGY_SD_FLAGS \ | |
1075 | (SD_SHARE_CPUCAPACITY | \ | |
1076 | SD_SHARE_PKG_RESOURCES | \ | |
1077 | SD_NUMA | \ | |
1078 | SD_ASYM_PACKING | \ | |
1079 | SD_ASYM_CPUCAPACITY | \ | |
1080 | SD_SHARE_POWERDOMAIN) | |
1081 | ||
1082 | static struct sched_domain * | |
1083 | sd_init(struct sched_domain_topology_level *tl, | |
1084 | const struct cpumask *cpu_map, | |
1085 | struct sched_domain *child, int cpu) | |
1086 | { | |
1087 | struct sd_data *sdd = &tl->data; | |
1088 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); | |
1089 | int sd_id, sd_weight, sd_flags = 0; | |
1090 | ||
1091 | #ifdef CONFIG_NUMA | |
1092 | /* | |
1093 | * Ugly hack to pass state to sd_numa_mask()... | |
1094 | */ | |
1095 | sched_domains_curr_level = tl->numa_level; | |
1096 | #endif | |
1097 | ||
1098 | sd_weight = cpumask_weight(tl->mask(cpu)); | |
1099 | ||
1100 | if (tl->sd_flags) | |
1101 | sd_flags = (*tl->sd_flags)(); | |
1102 | if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, | |
1103 | "wrong sd_flags in topology description\n")) | |
1104 | sd_flags &= ~TOPOLOGY_SD_FLAGS; | |
1105 | ||
1106 | *sd = (struct sched_domain){ | |
1107 | .min_interval = sd_weight, | |
1108 | .max_interval = 2*sd_weight, | |
1109 | .busy_factor = 32, | |
1110 | .imbalance_pct = 125, | |
1111 | ||
1112 | .cache_nice_tries = 0, | |
1113 | .busy_idx = 0, | |
1114 | .idle_idx = 0, | |
1115 | .newidle_idx = 0, | |
1116 | .wake_idx = 0, | |
1117 | .forkexec_idx = 0, | |
1118 | ||
1119 | .flags = 1*SD_LOAD_BALANCE | |
1120 | | 1*SD_BALANCE_NEWIDLE | |
1121 | | 1*SD_BALANCE_EXEC | |
1122 | | 1*SD_BALANCE_FORK | |
1123 | | 0*SD_BALANCE_WAKE | |
1124 | | 1*SD_WAKE_AFFINE | |
1125 | | 0*SD_SHARE_CPUCAPACITY | |
1126 | | 0*SD_SHARE_PKG_RESOURCES | |
1127 | | 0*SD_SERIALIZE | |
1128 | | 0*SD_PREFER_SIBLING | |
1129 | | 0*SD_NUMA | |
1130 | | sd_flags | |
1131 | , | |
1132 | ||
1133 | .last_balance = jiffies, | |
1134 | .balance_interval = sd_weight, | |
1135 | .smt_gain = 0, | |
1136 | .max_newidle_lb_cost = 0, | |
1137 | .next_decay_max_lb_cost = jiffies, | |
1138 | .child = child, | |
1139 | #ifdef CONFIG_SCHED_DEBUG | |
1140 | .name = tl->name, | |
1141 | #endif | |
1142 | }; | |
1143 | ||
1144 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); | |
1145 | sd_id = cpumask_first(sched_domain_span(sd)); | |
1146 | ||
1147 | /* | |
1148 | * Convert topological properties into behaviour. | |
1149 | */ | |
1150 | ||
1151 | if (sd->flags & SD_ASYM_CPUCAPACITY) { | |
1152 | struct sched_domain *t = sd; | |
1153 | ||
1154 | for_each_lower_domain(t) | |
1155 | t->flags |= SD_BALANCE_WAKE; | |
1156 | } | |
1157 | ||
1158 | if (sd->flags & SD_SHARE_CPUCAPACITY) { | |
1159 | sd->flags |= SD_PREFER_SIBLING; | |
1160 | sd->imbalance_pct = 110; | |
1161 | sd->smt_gain = 1178; /* ~15% */ | |
1162 | ||
1163 | } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { | |
1164 | sd->imbalance_pct = 117; | |
1165 | sd->cache_nice_tries = 1; | |
1166 | sd->busy_idx = 2; | |
1167 | ||
1168 | #ifdef CONFIG_NUMA | |
1169 | } else if (sd->flags & SD_NUMA) { | |
1170 | sd->cache_nice_tries = 2; | |
1171 | sd->busy_idx = 3; | |
1172 | sd->idle_idx = 2; | |
1173 | ||
1174 | sd->flags |= SD_SERIALIZE; | |
1175 | if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { | |
1176 | sd->flags &= ~(SD_BALANCE_EXEC | | |
1177 | SD_BALANCE_FORK | | |
1178 | SD_WAKE_AFFINE); | |
1179 | } | |
1180 | ||
1181 | #endif | |
1182 | } else { | |
1183 | sd->flags |= SD_PREFER_SIBLING; | |
1184 | sd->cache_nice_tries = 1; | |
1185 | sd->busy_idx = 2; | |
1186 | sd->idle_idx = 1; | |
1187 | } | |
1188 | ||
1189 | /* | |
1190 | * For all levels sharing cache; connect a sched_domain_shared | |
1191 | * instance. | |
1192 | */ | |
1193 | if (sd->flags & SD_SHARE_PKG_RESOURCES) { | |
1194 | sd->shared = *per_cpu_ptr(sdd->sds, sd_id); | |
1195 | atomic_inc(&sd->shared->ref); | |
1196 | atomic_set(&sd->shared->nr_busy_cpus, sd_weight); | |
1197 | } | |
1198 | ||
1199 | sd->private = sdd; | |
1200 | ||
1201 | return sd; | |
1202 | } | |
1203 | ||
1204 | /* | |
1205 | * Topology list, bottom-up. | |
1206 | */ | |
1207 | static struct sched_domain_topology_level default_topology[] = { | |
1208 | #ifdef CONFIG_SCHED_SMT | |
1209 | { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, | |
1210 | #endif | |
1211 | #ifdef CONFIG_SCHED_MC | |
1212 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, | |
1213 | #endif | |
1214 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, | |
1215 | { NULL, }, | |
1216 | }; | |
1217 | ||
1218 | static struct sched_domain_topology_level *sched_domain_topology = | |
1219 | default_topology; | |
1220 | ||
1221 | #define for_each_sd_topology(tl) \ | |
1222 | for (tl = sched_domain_topology; tl->mask; tl++) | |
1223 | ||
1224 | void set_sched_topology(struct sched_domain_topology_level *tl) | |
1225 | { | |
1226 | if (WARN_ON_ONCE(sched_smp_initialized)) | |
1227 | return; | |
1228 | ||
1229 | sched_domain_topology = tl; | |
1230 | } | |
1231 | ||
1232 | #ifdef CONFIG_NUMA | |
1233 | ||
1234 | static const struct cpumask *sd_numa_mask(int cpu) | |
1235 | { | |
1236 | return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; | |
1237 | } | |
1238 | ||
1239 | static void sched_numa_warn(const char *str) | |
1240 | { | |
1241 | static int done = false; | |
1242 | int i,j; | |
1243 | ||
1244 | if (done) | |
1245 | return; | |
1246 | ||
1247 | done = true; | |
1248 | ||
1249 | printk(KERN_WARNING "ERROR: %s\n\n", str); | |
1250 | ||
1251 | for (i = 0; i < nr_node_ids; i++) { | |
1252 | printk(KERN_WARNING " "); | |
1253 | for (j = 0; j < nr_node_ids; j++) | |
1254 | printk(KERN_CONT "%02d ", node_distance(i,j)); | |
1255 | printk(KERN_CONT "\n"); | |
1256 | } | |
1257 | printk(KERN_WARNING "\n"); | |
1258 | } | |
1259 | ||
1260 | bool find_numa_distance(int distance) | |
1261 | { | |
1262 | int i; | |
1263 | ||
1264 | if (distance == node_distance(0, 0)) | |
1265 | return true; | |
1266 | ||
1267 | for (i = 0; i < sched_domains_numa_levels; i++) { | |
1268 | if (sched_domains_numa_distance[i] == distance) | |
1269 | return true; | |
1270 | } | |
1271 | ||
1272 | return false; | |
1273 | } | |
1274 | ||
1275 | /* | |
1276 | * A system can have three types of NUMA topology: | |
1277 | * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system | |
1278 | * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes | |
1279 | * NUMA_BACKPLANE: nodes can reach other nodes through a backplane | |
1280 | * | |
1281 | * The difference between a glueless mesh topology and a backplane | |
1282 | * topology lies in whether communication between not directly | |
1283 | * connected nodes goes through intermediary nodes (where programs | |
1284 | * could run), or through backplane controllers. This affects | |
1285 | * placement of programs. | |
1286 | * | |
1287 | * The type of topology can be discerned with the following tests: | |
1288 | * - If the maximum distance between any nodes is 1 hop, the system | |
1289 | * is directly connected. | |
1290 | * - If for two nodes A and B, located N > 1 hops away from each other, | |
1291 | * there is an intermediary node C, which is < N hops away from both | |
1292 | * nodes A and B, the system is a glueless mesh. | |
1293 | */ | |
1294 | static void init_numa_topology_type(void) | |
1295 | { | |
1296 | int a, b, c, n; | |
1297 | ||
1298 | n = sched_max_numa_distance; | |
1299 | ||
1300 | if (sched_domains_numa_levels <= 1) { | |
1301 | sched_numa_topology_type = NUMA_DIRECT; | |
1302 | return; | |
1303 | } | |
1304 | ||
1305 | for_each_online_node(a) { | |
1306 | for_each_online_node(b) { | |
1307 | /* Find two nodes furthest removed from each other. */ | |
1308 | if (node_distance(a, b) < n) | |
1309 | continue; | |
1310 | ||
1311 | /* Is there an intermediary node between a and b? */ | |
1312 | for_each_online_node(c) { | |
1313 | if (node_distance(a, c) < n && | |
1314 | node_distance(b, c) < n) { | |
1315 | sched_numa_topology_type = | |
1316 | NUMA_GLUELESS_MESH; | |
1317 | return; | |
1318 | } | |
1319 | } | |
1320 | ||
1321 | sched_numa_topology_type = NUMA_BACKPLANE; | |
1322 | return; | |
1323 | } | |
1324 | } | |
1325 | } | |
1326 | ||
1327 | void sched_init_numa(void) | |
1328 | { | |
1329 | int next_distance, curr_distance = node_distance(0, 0); | |
1330 | struct sched_domain_topology_level *tl; | |
1331 | int level = 0; | |
1332 | int i, j, k; | |
1333 | ||
1334 | sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); | |
1335 | if (!sched_domains_numa_distance) | |
1336 | return; | |
1337 | ||
1338 | /* | |
1339 | * O(nr_nodes^2) deduplicating selection sort -- in order to find the | |
1340 | * unique distances in the node_distance() table. | |
1341 | * | |
1342 | * Assumes node_distance(0,j) includes all distances in | |
1343 | * node_distance(i,j) in order to avoid cubic time. | |
1344 | */ | |
1345 | next_distance = curr_distance; | |
1346 | for (i = 0; i < nr_node_ids; i++) { | |
1347 | for (j = 0; j < nr_node_ids; j++) { | |
1348 | for (k = 0; k < nr_node_ids; k++) { | |
1349 | int distance = node_distance(i, k); | |
1350 | ||
1351 | if (distance > curr_distance && | |
1352 | (distance < next_distance || | |
1353 | next_distance == curr_distance)) | |
1354 | next_distance = distance; | |
1355 | ||
1356 | /* | |
1357 | * While not a strong assumption it would be nice to know | |
1358 | * about cases where if node A is connected to B, B is not | |
1359 | * equally connected to A. | |
1360 | */ | |
1361 | if (sched_debug() && node_distance(k, i) != distance) | |
1362 | sched_numa_warn("Node-distance not symmetric"); | |
1363 | ||
1364 | if (sched_debug() && i && !find_numa_distance(distance)) | |
1365 | sched_numa_warn("Node-0 not representative"); | |
1366 | } | |
1367 | if (next_distance != curr_distance) { | |
1368 | sched_domains_numa_distance[level++] = next_distance; | |
1369 | sched_domains_numa_levels = level; | |
1370 | curr_distance = next_distance; | |
1371 | } else break; | |
1372 | } | |
1373 | ||
1374 | /* | |
1375 | * In case of sched_debug() we verify the above assumption. | |
1376 | */ | |
1377 | if (!sched_debug()) | |
1378 | break; | |
1379 | } | |
1380 | ||
1381 | if (!level) | |
1382 | return; | |
1383 | ||
1384 | /* | |
1385 | * 'level' contains the number of unique distances, excluding the | |
1386 | * identity distance node_distance(i,i). | |
1387 | * | |
1388 | * The sched_domains_numa_distance[] array includes the actual distance | |
1389 | * numbers. | |
1390 | */ | |
1391 | ||
1392 | /* | |
1393 | * Here, we should temporarily reset sched_domains_numa_levels to 0. | |
1394 | * If it fails to allocate memory for array sched_domains_numa_masks[][], | |
1395 | * the array will contain less then 'level' members. This could be | |
1396 | * dangerous when we use it to iterate array sched_domains_numa_masks[][] | |
1397 | * in other functions. | |
1398 | * | |
1399 | * We reset it to 'level' at the end of this function. | |
1400 | */ | |
1401 | sched_domains_numa_levels = 0; | |
1402 | ||
1403 | sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); | |
1404 | if (!sched_domains_numa_masks) | |
1405 | return; | |
1406 | ||
1407 | /* | |
1408 | * Now for each level, construct a mask per node which contains all | |
1409 | * CPUs of nodes that are that many hops away from us. | |
1410 | */ | |
1411 | for (i = 0; i < level; i++) { | |
1412 | sched_domains_numa_masks[i] = | |
1413 | kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); | |
1414 | if (!sched_domains_numa_masks[i]) | |
1415 | return; | |
1416 | ||
1417 | for (j = 0; j < nr_node_ids; j++) { | |
1418 | struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); | |
1419 | if (!mask) | |
1420 | return; | |
1421 | ||
1422 | sched_domains_numa_masks[i][j] = mask; | |
1423 | ||
1424 | for_each_node(k) { | |
1425 | if (node_distance(j, k) > sched_domains_numa_distance[i]) | |
1426 | continue; | |
1427 | ||
1428 | cpumask_or(mask, mask, cpumask_of_node(k)); | |
1429 | } | |
1430 | } | |
1431 | } | |
1432 | ||
1433 | /* Compute default topology size */ | |
1434 | for (i = 0; sched_domain_topology[i].mask; i++); | |
1435 | ||
1436 | tl = kzalloc((i + level + 1) * | |
1437 | sizeof(struct sched_domain_topology_level), GFP_KERNEL); | |
1438 | if (!tl) | |
1439 | return; | |
1440 | ||
1441 | /* | |
1442 | * Copy the default topology bits.. | |
1443 | */ | |
1444 | for (i = 0; sched_domain_topology[i].mask; i++) | |
1445 | tl[i] = sched_domain_topology[i]; | |
1446 | ||
1447 | /* | |
1448 | * .. and append 'j' levels of NUMA goodness. | |
1449 | */ | |
1450 | for (j = 0; j < level; i++, j++) { | |
1451 | tl[i] = (struct sched_domain_topology_level){ | |
1452 | .mask = sd_numa_mask, | |
1453 | .sd_flags = cpu_numa_flags, | |
1454 | .flags = SDTL_OVERLAP, | |
1455 | .numa_level = j, | |
1456 | SD_INIT_NAME(NUMA) | |
1457 | }; | |
1458 | } | |
1459 | ||
1460 | sched_domain_topology = tl; | |
1461 | ||
1462 | sched_domains_numa_levels = level; | |
1463 | sched_max_numa_distance = sched_domains_numa_distance[level - 1]; | |
1464 | ||
1465 | init_numa_topology_type(); | |
1466 | } | |
1467 | ||
1468 | void sched_domains_numa_masks_set(unsigned int cpu) | |
1469 | { | |
1470 | int node = cpu_to_node(cpu); | |
1471 | int i, j; | |
1472 | ||
1473 | for (i = 0; i < sched_domains_numa_levels; i++) { | |
1474 | for (j = 0; j < nr_node_ids; j++) { | |
1475 | if (node_distance(j, node) <= sched_domains_numa_distance[i]) | |
1476 | cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); | |
1477 | } | |
1478 | } | |
1479 | } | |
1480 | ||
1481 | void sched_domains_numa_masks_clear(unsigned int cpu) | |
1482 | { | |
1483 | int i, j; | |
1484 | ||
1485 | for (i = 0; i < sched_domains_numa_levels; i++) { | |
1486 | for (j = 0; j < nr_node_ids; j++) | |
1487 | cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); | |
1488 | } | |
1489 | } | |
1490 | ||
1491 | #endif /* CONFIG_NUMA */ | |
1492 | ||
1493 | static int __sdt_alloc(const struct cpumask *cpu_map) | |
1494 | { | |
1495 | struct sched_domain_topology_level *tl; | |
1496 | int j; | |
1497 | ||
1498 | for_each_sd_topology(tl) { | |
1499 | struct sd_data *sdd = &tl->data; | |
1500 | ||
1501 | sdd->sd = alloc_percpu(struct sched_domain *); | |
1502 | if (!sdd->sd) | |
1503 | return -ENOMEM; | |
1504 | ||
1505 | sdd->sds = alloc_percpu(struct sched_domain_shared *); | |
1506 | if (!sdd->sds) | |
1507 | return -ENOMEM; | |
1508 | ||
1509 | sdd->sg = alloc_percpu(struct sched_group *); | |
1510 | if (!sdd->sg) | |
1511 | return -ENOMEM; | |
1512 | ||
1513 | sdd->sgc = alloc_percpu(struct sched_group_capacity *); | |
1514 | if (!sdd->sgc) | |
1515 | return -ENOMEM; | |
1516 | ||
1517 | for_each_cpu(j, cpu_map) { | |
1518 | struct sched_domain *sd; | |
1519 | struct sched_domain_shared *sds; | |
1520 | struct sched_group *sg; | |
1521 | struct sched_group_capacity *sgc; | |
1522 | ||
1523 | sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), | |
1524 | GFP_KERNEL, cpu_to_node(j)); | |
1525 | if (!sd) | |
1526 | return -ENOMEM; | |
1527 | ||
1528 | *per_cpu_ptr(sdd->sd, j) = sd; | |
1529 | ||
1530 | sds = kzalloc_node(sizeof(struct sched_domain_shared), | |
1531 | GFP_KERNEL, cpu_to_node(j)); | |
1532 | if (!sds) | |
1533 | return -ENOMEM; | |
1534 | ||
1535 | *per_cpu_ptr(sdd->sds, j) = sds; | |
1536 | ||
1537 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), | |
1538 | GFP_KERNEL, cpu_to_node(j)); | |
1539 | if (!sg) | |
1540 | return -ENOMEM; | |
1541 | ||
1542 | sg->next = sg; | |
1543 | ||
1544 | *per_cpu_ptr(sdd->sg, j) = sg; | |
1545 | ||
1546 | sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), | |
1547 | GFP_KERNEL, cpu_to_node(j)); | |
1548 | if (!sgc) | |
1549 | return -ENOMEM; | |
1550 | ||
005f874d PZ |
1551 | #ifdef CONFIG_SCHED_DEBUG |
1552 | sgc->id = j; | |
1553 | #endif | |
1554 | ||
f2cb1360 IM |
1555 | *per_cpu_ptr(sdd->sgc, j) = sgc; |
1556 | } | |
1557 | } | |
1558 | ||
1559 | return 0; | |
1560 | } | |
1561 | ||
1562 | static void __sdt_free(const struct cpumask *cpu_map) | |
1563 | { | |
1564 | struct sched_domain_topology_level *tl; | |
1565 | int j; | |
1566 | ||
1567 | for_each_sd_topology(tl) { | |
1568 | struct sd_data *sdd = &tl->data; | |
1569 | ||
1570 | for_each_cpu(j, cpu_map) { | |
1571 | struct sched_domain *sd; | |
1572 | ||
1573 | if (sdd->sd) { | |
1574 | sd = *per_cpu_ptr(sdd->sd, j); | |
1575 | if (sd && (sd->flags & SD_OVERLAP)) | |
1576 | free_sched_groups(sd->groups, 0); | |
1577 | kfree(*per_cpu_ptr(sdd->sd, j)); | |
1578 | } | |
1579 | ||
1580 | if (sdd->sds) | |
1581 | kfree(*per_cpu_ptr(sdd->sds, j)); | |
1582 | if (sdd->sg) | |
1583 | kfree(*per_cpu_ptr(sdd->sg, j)); | |
1584 | if (sdd->sgc) | |
1585 | kfree(*per_cpu_ptr(sdd->sgc, j)); | |
1586 | } | |
1587 | free_percpu(sdd->sd); | |
1588 | sdd->sd = NULL; | |
1589 | free_percpu(sdd->sds); | |
1590 | sdd->sds = NULL; | |
1591 | free_percpu(sdd->sg); | |
1592 | sdd->sg = NULL; | |
1593 | free_percpu(sdd->sgc); | |
1594 | sdd->sgc = NULL; | |
1595 | } | |
1596 | } | |
1597 | ||
1598 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | |
1599 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | |
1600 | struct sched_domain *child, int cpu) | |
1601 | { | |
1602 | struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); | |
1603 | ||
1604 | if (child) { | |
1605 | sd->level = child->level + 1; | |
1606 | sched_domain_level_max = max(sched_domain_level_max, sd->level); | |
1607 | child->parent = sd; | |
1608 | ||
1609 | if (!cpumask_subset(sched_domain_span(child), | |
1610 | sched_domain_span(sd))) { | |
1611 | pr_err("BUG: arch topology borken\n"); | |
1612 | #ifdef CONFIG_SCHED_DEBUG | |
1613 | pr_err(" the %s domain not a subset of the %s domain\n", | |
1614 | child->name, sd->name); | |
1615 | #endif | |
1616 | /* Fixup, ensure @sd has at least @child cpus. */ | |
1617 | cpumask_or(sched_domain_span(sd), | |
1618 | sched_domain_span(sd), | |
1619 | sched_domain_span(child)); | |
1620 | } | |
1621 | ||
1622 | } | |
1623 | set_domain_attribute(sd, attr); | |
1624 | ||
1625 | return sd; | |
1626 | } | |
1627 | ||
1628 | /* | |
1629 | * Build sched domains for a given set of CPUs and attach the sched domains | |
1630 | * to the individual CPUs | |
1631 | */ | |
1632 | static int | |
1633 | build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) | |
1634 | { | |
1635 | enum s_alloc alloc_state; | |
1636 | struct sched_domain *sd; | |
1637 | struct s_data d; | |
1638 | struct rq *rq = NULL; | |
1639 | int i, ret = -ENOMEM; | |
1640 | ||
1641 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); | |
1642 | if (alloc_state != sa_rootdomain) | |
1643 | goto error; | |
1644 | ||
1645 | /* Set up domains for CPUs specified by the cpu_map: */ | |
1646 | for_each_cpu(i, cpu_map) { | |
1647 | struct sched_domain_topology_level *tl; | |
1648 | ||
1649 | sd = NULL; | |
1650 | for_each_sd_topology(tl) { | |
1651 | sd = build_sched_domain(tl, cpu_map, attr, sd, i); | |
1652 | if (tl == sched_domain_topology) | |
1653 | *per_cpu_ptr(d.sd, i) = sd; | |
af85596c | 1654 | if (tl->flags & SDTL_OVERLAP) |
f2cb1360 IM |
1655 | sd->flags |= SD_OVERLAP; |
1656 | if (cpumask_equal(cpu_map, sched_domain_span(sd))) | |
1657 | break; | |
1658 | } | |
1659 | } | |
1660 | ||
1661 | /* Build the groups for the domains */ | |
1662 | for_each_cpu(i, cpu_map) { | |
1663 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { | |
1664 | sd->span_weight = cpumask_weight(sched_domain_span(sd)); | |
1665 | if (sd->flags & SD_OVERLAP) { | |
1666 | if (build_overlap_sched_groups(sd, i)) | |
1667 | goto error; | |
1668 | } else { | |
1669 | if (build_sched_groups(sd, i)) | |
1670 | goto error; | |
1671 | } | |
1672 | } | |
1673 | } | |
1674 | ||
1675 | /* Calculate CPU capacity for physical packages and nodes */ | |
1676 | for (i = nr_cpumask_bits-1; i >= 0; i--) { | |
1677 | if (!cpumask_test_cpu(i, cpu_map)) | |
1678 | continue; | |
1679 | ||
1680 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { | |
1681 | claim_allocations(i, sd); | |
1682 | init_sched_groups_capacity(i, sd); | |
1683 | } | |
1684 | } | |
1685 | ||
1686 | /* Attach the domains */ | |
1687 | rcu_read_lock(); | |
1688 | for_each_cpu(i, cpu_map) { | |
1689 | rq = cpu_rq(i); | |
1690 | sd = *per_cpu_ptr(d.sd, i); | |
1691 | ||
1692 | /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ | |
1693 | if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) | |
1694 | WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); | |
1695 | ||
1696 | cpu_attach_domain(sd, d.rd, i); | |
1697 | } | |
1698 | rcu_read_unlock(); | |
1699 | ||
1700 | if (rq && sched_debug_enabled) { | |
1701 | pr_info("span: %*pbl (max cpu_capacity = %lu)\n", | |
1702 | cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); | |
1703 | } | |
1704 | ||
1705 | ret = 0; | |
1706 | error: | |
1707 | __free_domain_allocs(&d, alloc_state, cpu_map); | |
1708 | return ret; | |
1709 | } | |
1710 | ||
1711 | /* Current sched domains: */ | |
1712 | static cpumask_var_t *doms_cur; | |
1713 | ||
1714 | /* Number of sched domains in 'doms_cur': */ | |
1715 | static int ndoms_cur; | |
1716 | ||
1717 | /* Attribues of custom domains in 'doms_cur' */ | |
1718 | static struct sched_domain_attr *dattr_cur; | |
1719 | ||
1720 | /* | |
1721 | * Special case: If a kmalloc() of a doms_cur partition (array of | |
1722 | * cpumask) fails, then fallback to a single sched domain, | |
1723 | * as determined by the single cpumask fallback_doms. | |
1724 | */ | |
8d5dc512 | 1725 | static cpumask_var_t fallback_doms; |
f2cb1360 IM |
1726 | |
1727 | /* | |
1728 | * arch_update_cpu_topology lets virtualized architectures update the | |
1729 | * CPU core maps. It is supposed to return 1 if the topology changed | |
1730 | * or 0 if it stayed the same. | |
1731 | */ | |
1732 | int __weak arch_update_cpu_topology(void) | |
1733 | { | |
1734 | return 0; | |
1735 | } | |
1736 | ||
1737 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) | |
1738 | { | |
1739 | int i; | |
1740 | cpumask_var_t *doms; | |
1741 | ||
1742 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); | |
1743 | if (!doms) | |
1744 | return NULL; | |
1745 | for (i = 0; i < ndoms; i++) { | |
1746 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { | |
1747 | free_sched_domains(doms, i); | |
1748 | return NULL; | |
1749 | } | |
1750 | } | |
1751 | return doms; | |
1752 | } | |
1753 | ||
1754 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) | |
1755 | { | |
1756 | unsigned int i; | |
1757 | for (i = 0; i < ndoms; i++) | |
1758 | free_cpumask_var(doms[i]); | |
1759 | kfree(doms); | |
1760 | } | |
1761 | ||
1762 | /* | |
1763 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | |
1764 | * For now this just excludes isolated CPUs, but could be used to | |
1765 | * exclude other special cases in the future. | |
1766 | */ | |
8d5dc512 | 1767 | int sched_init_domains(const struct cpumask *cpu_map) |
f2cb1360 IM |
1768 | { |
1769 | int err; | |
1770 | ||
8d5dc512 | 1771 | zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); |
1676330e | 1772 | zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL); |
8d5dc512 PZ |
1773 | zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); |
1774 | ||
f2cb1360 IM |
1775 | arch_update_cpu_topology(); |
1776 | ndoms_cur = 1; | |
1777 | doms_cur = alloc_sched_domains(ndoms_cur); | |
1778 | if (!doms_cur) | |
1779 | doms_cur = &fallback_doms; | |
1780 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); | |
1781 | err = build_sched_domains(doms_cur[0], NULL); | |
1782 | register_sched_domain_sysctl(); | |
1783 | ||
1784 | return err; | |
1785 | } | |
1786 | ||
1787 | /* | |
1788 | * Detach sched domains from a group of CPUs specified in cpu_map | |
1789 | * These CPUs will now be attached to the NULL domain | |
1790 | */ | |
1791 | static void detach_destroy_domains(const struct cpumask *cpu_map) | |
1792 | { | |
1793 | int i; | |
1794 | ||
1795 | rcu_read_lock(); | |
1796 | for_each_cpu(i, cpu_map) | |
1797 | cpu_attach_domain(NULL, &def_root_domain, i); | |
1798 | rcu_read_unlock(); | |
1799 | } | |
1800 | ||
1801 | /* handle null as "default" */ | |
1802 | static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |
1803 | struct sched_domain_attr *new, int idx_new) | |
1804 | { | |
1805 | struct sched_domain_attr tmp; | |
1806 | ||
1807 | /* Fast path: */ | |
1808 | if (!new && !cur) | |
1809 | return 1; | |
1810 | ||
1811 | tmp = SD_ATTR_INIT; | |
1812 | return !memcmp(cur ? (cur + idx_cur) : &tmp, | |
1813 | new ? (new + idx_new) : &tmp, | |
1814 | sizeof(struct sched_domain_attr)); | |
1815 | } | |
1816 | ||
1817 | /* | |
1818 | * Partition sched domains as specified by the 'ndoms_new' | |
1819 | * cpumasks in the array doms_new[] of cpumasks. This compares | |
1820 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | |
1821 | * It destroys each deleted domain and builds each new domain. | |
1822 | * | |
1823 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. | |
1824 | * The masks don't intersect (don't overlap.) We should setup one | |
1825 | * sched domain for each mask. CPUs not in any of the cpumasks will | |
1826 | * not be load balanced. If the same cpumask appears both in the | |
1827 | * current 'doms_cur' domains and in the new 'doms_new', we can leave | |
1828 | * it as it is. | |
1829 | * | |
1830 | * The passed in 'doms_new' should be allocated using | |
1831 | * alloc_sched_domains. This routine takes ownership of it and will | |
1832 | * free_sched_domains it when done with it. If the caller failed the | |
1833 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, | |
1834 | * and partition_sched_domains() will fallback to the single partition | |
1835 | * 'fallback_doms', it also forces the domains to be rebuilt. | |
1836 | * | |
1837 | * If doms_new == NULL it will be replaced with cpu_online_mask. | |
1838 | * ndoms_new == 0 is a special case for destroying existing domains, | |
1839 | * and it will not create the default domain. | |
1840 | * | |
1841 | * Call with hotplug lock held | |
1842 | */ | |
1843 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |
1844 | struct sched_domain_attr *dattr_new) | |
1845 | { | |
1846 | int i, j, n; | |
1847 | int new_topology; | |
1848 | ||
1849 | mutex_lock(&sched_domains_mutex); | |
1850 | ||
1851 | /* Always unregister in case we don't destroy any domains: */ | |
1852 | unregister_sched_domain_sysctl(); | |
1853 | ||
1854 | /* Let the architecture update CPU core mappings: */ | |
1855 | new_topology = arch_update_cpu_topology(); | |
1856 | ||
1857 | n = doms_new ? ndoms_new : 0; | |
1858 | ||
1859 | /* Destroy deleted domains: */ | |
1860 | for (i = 0; i < ndoms_cur; i++) { | |
1861 | for (j = 0; j < n && !new_topology; j++) { | |
1862 | if (cpumask_equal(doms_cur[i], doms_new[j]) | |
1863 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | |
1864 | goto match1; | |
1865 | } | |
1866 | /* No match - a current sched domain not in new doms_new[] */ | |
1867 | detach_destroy_domains(doms_cur[i]); | |
1868 | match1: | |
1869 | ; | |
1870 | } | |
1871 | ||
1872 | n = ndoms_cur; | |
1873 | if (doms_new == NULL) { | |
1874 | n = 0; | |
1875 | doms_new = &fallback_doms; | |
1876 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); | |
1877 | WARN_ON_ONCE(dattr_new); | |
1878 | } | |
1879 | ||
1880 | /* Build new domains: */ | |
1881 | for (i = 0; i < ndoms_new; i++) { | |
1882 | for (j = 0; j < n && !new_topology; j++) { | |
1883 | if (cpumask_equal(doms_new[i], doms_cur[j]) | |
1884 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | |
1885 | goto match2; | |
1886 | } | |
1887 | /* No match - add a new doms_new */ | |
1888 | build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); | |
1889 | match2: | |
1890 | ; | |
1891 | } | |
1892 | ||
1893 | /* Remember the new sched domains: */ | |
1894 | if (doms_cur != &fallback_doms) | |
1895 | free_sched_domains(doms_cur, ndoms_cur); | |
1896 | ||
1897 | kfree(dattr_cur); | |
1898 | doms_cur = doms_new; | |
1899 | dattr_cur = dattr_new; | |
1900 | ndoms_cur = ndoms_new; | |
1901 | ||
1902 | register_sched_domain_sysctl(); | |
1903 | ||
1904 | mutex_unlock(&sched_domains_mutex); | |
1905 | } | |
1906 |