]>
Commit | Line | Data |
---|---|---|
f2cb1360 IM |
1 | /* |
2 | * Scheduler topology setup/handling methods | |
3 | */ | |
4 | #include <linux/sched.h> | |
5 | #include <linux/mutex.h> | |
6 | ||
7 | #include "sched.h" | |
8 | ||
9 | DEFINE_MUTEX(sched_domains_mutex); | |
10 | ||
11 | /* Protected by sched_domains_mutex: */ | |
12 | cpumask_var_t sched_domains_tmpmask; | |
13 | ||
14 | #ifdef CONFIG_SCHED_DEBUG | |
15 | ||
16 | static __read_mostly int sched_debug_enabled; | |
17 | ||
18 | static int __init sched_debug_setup(char *str) | |
19 | { | |
20 | sched_debug_enabled = 1; | |
21 | ||
22 | return 0; | |
23 | } | |
24 | early_param("sched_debug", sched_debug_setup); | |
25 | ||
26 | static inline bool sched_debug(void) | |
27 | { | |
28 | return sched_debug_enabled; | |
29 | } | |
30 | ||
31 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |
32 | struct cpumask *groupmask) | |
33 | { | |
34 | struct sched_group *group = sd->groups; | |
35 | ||
36 | cpumask_clear(groupmask); | |
37 | ||
005f874d | 38 | printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); |
f2cb1360 IM |
39 | |
40 | if (!(sd->flags & SD_LOAD_BALANCE)) { | |
41 | printk("does not load-balance\n"); | |
42 | if (sd->parent) | |
43 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" | |
44 | " has parent"); | |
45 | return -1; | |
46 | } | |
47 | ||
005f874d | 48 | printk(KERN_CONT "span=%*pbl level=%s\n", |
f2cb1360 IM |
49 | cpumask_pr_args(sched_domain_span(sd)), sd->name); |
50 | ||
51 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { | |
52 | printk(KERN_ERR "ERROR: domain->span does not contain " | |
53 | "CPU%d\n", cpu); | |
54 | } | |
55 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { | |
56 | printk(KERN_ERR "ERROR: domain->groups does not contain" | |
57 | " CPU%d\n", cpu); | |
58 | } | |
59 | ||
60 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); | |
61 | do { | |
62 | if (!group) { | |
63 | printk("\n"); | |
64 | printk(KERN_ERR "ERROR: group is NULL\n"); | |
65 | break; | |
66 | } | |
67 | ||
68 | if (!cpumask_weight(sched_group_cpus(group))) { | |
69 | printk(KERN_CONT "\n"); | |
70 | printk(KERN_ERR "ERROR: empty group\n"); | |
71 | break; | |
72 | } | |
73 | ||
74 | if (!(sd->flags & SD_OVERLAP) && | |
75 | cpumask_intersects(groupmask, sched_group_cpus(group))) { | |
76 | printk(KERN_CONT "\n"); | |
77 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | |
78 | break; | |
79 | } | |
80 | ||
81 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); | |
82 | ||
005f874d PZ |
83 | printk(KERN_CONT " %d:{ span=%*pbl", |
84 | group->sgc->id, | |
85 | cpumask_pr_args(sched_group_cpus(group))); | |
b0151c25 PZ |
86 | |
87 | if ((sd->flags & SD_OVERLAP) && !cpumask_full(sched_group_mask(group))) { | |
005f874d | 88 | printk(KERN_CONT " mask=%*pbl", |
b0151c25 PZ |
89 | cpumask_pr_args(sched_group_mask(group))); |
90 | } | |
91 | ||
005f874d PZ |
92 | if (group->sgc->capacity != SCHED_CAPACITY_SCALE) |
93 | printk(KERN_CONT " cap=%lu", group->sgc->capacity); | |
f2cb1360 | 94 | |
a420b063 PZ |
95 | if (group == sd->groups && sd->child && |
96 | !cpumask_equal(sched_domain_span(sd->child), | |
97 | sched_group_cpus(group))) { | |
98 | printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); | |
99 | } | |
100 | ||
005f874d PZ |
101 | printk(KERN_CONT " }"); |
102 | ||
f2cb1360 | 103 | group = group->next; |
b0151c25 PZ |
104 | |
105 | if (group != sd->groups) | |
106 | printk(KERN_CONT ","); | |
107 | ||
f2cb1360 IM |
108 | } while (group != sd->groups); |
109 | printk(KERN_CONT "\n"); | |
110 | ||
111 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) | |
112 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | |
113 | ||
114 | if (sd->parent && | |
115 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | |
116 | printk(KERN_ERR "ERROR: parent span is not a superset " | |
117 | "of domain->span\n"); | |
118 | return 0; | |
119 | } | |
120 | ||
121 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | |
122 | { | |
123 | int level = 0; | |
124 | ||
125 | if (!sched_debug_enabled) | |
126 | return; | |
127 | ||
128 | if (!sd) { | |
129 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); | |
130 | return; | |
131 | } | |
132 | ||
005f874d | 133 | printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); |
f2cb1360 IM |
134 | |
135 | for (;;) { | |
136 | if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) | |
137 | break; | |
138 | level++; | |
139 | sd = sd->parent; | |
140 | if (!sd) | |
141 | break; | |
142 | } | |
143 | } | |
144 | #else /* !CONFIG_SCHED_DEBUG */ | |
145 | ||
146 | # define sched_debug_enabled 0 | |
147 | # define sched_domain_debug(sd, cpu) do { } while (0) | |
148 | static inline bool sched_debug(void) | |
149 | { | |
150 | return false; | |
151 | } | |
152 | #endif /* CONFIG_SCHED_DEBUG */ | |
153 | ||
154 | static int sd_degenerate(struct sched_domain *sd) | |
155 | { | |
156 | if (cpumask_weight(sched_domain_span(sd)) == 1) | |
157 | return 1; | |
158 | ||
159 | /* Following flags need at least 2 groups */ | |
160 | if (sd->flags & (SD_LOAD_BALANCE | | |
161 | SD_BALANCE_NEWIDLE | | |
162 | SD_BALANCE_FORK | | |
163 | SD_BALANCE_EXEC | | |
164 | SD_SHARE_CPUCAPACITY | | |
165 | SD_ASYM_CPUCAPACITY | | |
166 | SD_SHARE_PKG_RESOURCES | | |
167 | SD_SHARE_POWERDOMAIN)) { | |
168 | if (sd->groups != sd->groups->next) | |
169 | return 0; | |
170 | } | |
171 | ||
172 | /* Following flags don't use groups */ | |
173 | if (sd->flags & (SD_WAKE_AFFINE)) | |
174 | return 0; | |
175 | ||
176 | return 1; | |
177 | } | |
178 | ||
179 | static int | |
180 | sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |
181 | { | |
182 | unsigned long cflags = sd->flags, pflags = parent->flags; | |
183 | ||
184 | if (sd_degenerate(parent)) | |
185 | return 1; | |
186 | ||
187 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) | |
188 | return 0; | |
189 | ||
190 | /* Flags needing groups don't count if only 1 group in parent */ | |
191 | if (parent->groups == parent->groups->next) { | |
192 | pflags &= ~(SD_LOAD_BALANCE | | |
193 | SD_BALANCE_NEWIDLE | | |
194 | SD_BALANCE_FORK | | |
195 | SD_BALANCE_EXEC | | |
196 | SD_ASYM_CPUCAPACITY | | |
197 | SD_SHARE_CPUCAPACITY | | |
198 | SD_SHARE_PKG_RESOURCES | | |
199 | SD_PREFER_SIBLING | | |
200 | SD_SHARE_POWERDOMAIN); | |
201 | if (nr_node_ids == 1) | |
202 | pflags &= ~SD_SERIALIZE; | |
203 | } | |
204 | if (~cflags & pflags) | |
205 | return 0; | |
206 | ||
207 | return 1; | |
208 | } | |
209 | ||
210 | static void free_rootdomain(struct rcu_head *rcu) | |
211 | { | |
212 | struct root_domain *rd = container_of(rcu, struct root_domain, rcu); | |
213 | ||
214 | cpupri_cleanup(&rd->cpupri); | |
215 | cpudl_cleanup(&rd->cpudl); | |
216 | free_cpumask_var(rd->dlo_mask); | |
217 | free_cpumask_var(rd->rto_mask); | |
218 | free_cpumask_var(rd->online); | |
219 | free_cpumask_var(rd->span); | |
220 | kfree(rd); | |
221 | } | |
222 | ||
223 | void rq_attach_root(struct rq *rq, struct root_domain *rd) | |
224 | { | |
225 | struct root_domain *old_rd = NULL; | |
226 | unsigned long flags; | |
227 | ||
228 | raw_spin_lock_irqsave(&rq->lock, flags); | |
229 | ||
230 | if (rq->rd) { | |
231 | old_rd = rq->rd; | |
232 | ||
233 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) | |
234 | set_rq_offline(rq); | |
235 | ||
236 | cpumask_clear_cpu(rq->cpu, old_rd->span); | |
237 | ||
238 | /* | |
239 | * If we dont want to free the old_rd yet then | |
240 | * set old_rd to NULL to skip the freeing later | |
241 | * in this function: | |
242 | */ | |
243 | if (!atomic_dec_and_test(&old_rd->refcount)) | |
244 | old_rd = NULL; | |
245 | } | |
246 | ||
247 | atomic_inc(&rd->refcount); | |
248 | rq->rd = rd; | |
249 | ||
250 | cpumask_set_cpu(rq->cpu, rd->span); | |
251 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) | |
252 | set_rq_online(rq); | |
253 | ||
254 | raw_spin_unlock_irqrestore(&rq->lock, flags); | |
255 | ||
256 | if (old_rd) | |
257 | call_rcu_sched(&old_rd->rcu, free_rootdomain); | |
258 | } | |
259 | ||
260 | static int init_rootdomain(struct root_domain *rd) | |
261 | { | |
262 | memset(rd, 0, sizeof(*rd)); | |
263 | ||
264 | if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) | |
265 | goto out; | |
266 | if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) | |
267 | goto free_span; | |
268 | if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) | |
269 | goto free_online; | |
270 | if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | |
271 | goto free_dlo_mask; | |
272 | ||
273 | init_dl_bw(&rd->dl_bw); | |
274 | if (cpudl_init(&rd->cpudl) != 0) | |
275 | goto free_rto_mask; | |
276 | ||
277 | if (cpupri_init(&rd->cpupri) != 0) | |
278 | goto free_cpudl; | |
279 | return 0; | |
280 | ||
281 | free_cpudl: | |
282 | cpudl_cleanup(&rd->cpudl); | |
283 | free_rto_mask: | |
284 | free_cpumask_var(rd->rto_mask); | |
285 | free_dlo_mask: | |
286 | free_cpumask_var(rd->dlo_mask); | |
287 | free_online: | |
288 | free_cpumask_var(rd->online); | |
289 | free_span: | |
290 | free_cpumask_var(rd->span); | |
291 | out: | |
292 | return -ENOMEM; | |
293 | } | |
294 | ||
295 | /* | |
296 | * By default the system creates a single root-domain with all CPUs as | |
297 | * members (mimicking the global state we have today). | |
298 | */ | |
299 | struct root_domain def_root_domain; | |
300 | ||
301 | void init_defrootdomain(void) | |
302 | { | |
303 | init_rootdomain(&def_root_domain); | |
304 | ||
305 | atomic_set(&def_root_domain.refcount, 1); | |
306 | } | |
307 | ||
308 | static struct root_domain *alloc_rootdomain(void) | |
309 | { | |
310 | struct root_domain *rd; | |
311 | ||
312 | rd = kmalloc(sizeof(*rd), GFP_KERNEL); | |
313 | if (!rd) | |
314 | return NULL; | |
315 | ||
316 | if (init_rootdomain(rd) != 0) { | |
317 | kfree(rd); | |
318 | return NULL; | |
319 | } | |
320 | ||
321 | return rd; | |
322 | } | |
323 | ||
324 | static void free_sched_groups(struct sched_group *sg, int free_sgc) | |
325 | { | |
326 | struct sched_group *tmp, *first; | |
327 | ||
328 | if (!sg) | |
329 | return; | |
330 | ||
331 | first = sg; | |
332 | do { | |
333 | tmp = sg->next; | |
334 | ||
335 | if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) | |
336 | kfree(sg->sgc); | |
337 | ||
338 | kfree(sg); | |
339 | sg = tmp; | |
340 | } while (sg != first); | |
341 | } | |
342 | ||
343 | static void destroy_sched_domain(struct sched_domain *sd) | |
344 | { | |
345 | /* | |
346 | * If its an overlapping domain it has private groups, iterate and | |
347 | * nuke them all. | |
348 | */ | |
349 | if (sd->flags & SD_OVERLAP) { | |
350 | free_sched_groups(sd->groups, 1); | |
351 | } else if (atomic_dec_and_test(&sd->groups->ref)) { | |
352 | kfree(sd->groups->sgc); | |
353 | kfree(sd->groups); | |
354 | } | |
355 | if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) | |
356 | kfree(sd->shared); | |
357 | kfree(sd); | |
358 | } | |
359 | ||
360 | static void destroy_sched_domains_rcu(struct rcu_head *rcu) | |
361 | { | |
362 | struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); | |
363 | ||
364 | while (sd) { | |
365 | struct sched_domain *parent = sd->parent; | |
366 | destroy_sched_domain(sd); | |
367 | sd = parent; | |
368 | } | |
369 | } | |
370 | ||
371 | static void destroy_sched_domains(struct sched_domain *sd) | |
372 | { | |
373 | if (sd) | |
374 | call_rcu(&sd->rcu, destroy_sched_domains_rcu); | |
375 | } | |
376 | ||
377 | /* | |
378 | * Keep a special pointer to the highest sched_domain that has | |
379 | * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this | |
380 | * allows us to avoid some pointer chasing select_idle_sibling(). | |
381 | * | |
382 | * Also keep a unique ID per domain (we use the first CPU number in | |
383 | * the cpumask of the domain), this allows us to quickly tell if | |
384 | * two CPUs are in the same cache domain, see cpus_share_cache(). | |
385 | */ | |
386 | DEFINE_PER_CPU(struct sched_domain *, sd_llc); | |
387 | DEFINE_PER_CPU(int, sd_llc_size); | |
388 | DEFINE_PER_CPU(int, sd_llc_id); | |
389 | DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); | |
390 | DEFINE_PER_CPU(struct sched_domain *, sd_numa); | |
391 | DEFINE_PER_CPU(struct sched_domain *, sd_asym); | |
392 | ||
393 | static void update_top_cache_domain(int cpu) | |
394 | { | |
395 | struct sched_domain_shared *sds = NULL; | |
396 | struct sched_domain *sd; | |
397 | int id = cpu; | |
398 | int size = 1; | |
399 | ||
400 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); | |
401 | if (sd) { | |
402 | id = cpumask_first(sched_domain_span(sd)); | |
403 | size = cpumask_weight(sched_domain_span(sd)); | |
404 | sds = sd->shared; | |
405 | } | |
406 | ||
407 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | |
408 | per_cpu(sd_llc_size, cpu) = size; | |
409 | per_cpu(sd_llc_id, cpu) = id; | |
410 | rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); | |
411 | ||
412 | sd = lowest_flag_domain(cpu, SD_NUMA); | |
413 | rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); | |
414 | ||
415 | sd = highest_flag_domain(cpu, SD_ASYM_PACKING); | |
416 | rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); | |
417 | } | |
418 | ||
419 | /* | |
420 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must | |
421 | * hold the hotplug lock. | |
422 | */ | |
423 | static void | |
424 | cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |
425 | { | |
426 | struct rq *rq = cpu_rq(cpu); | |
427 | struct sched_domain *tmp; | |
428 | ||
429 | /* Remove the sched domains which do not contribute to scheduling. */ | |
430 | for (tmp = sd; tmp; ) { | |
431 | struct sched_domain *parent = tmp->parent; | |
432 | if (!parent) | |
433 | break; | |
434 | ||
435 | if (sd_parent_degenerate(tmp, parent)) { | |
436 | tmp->parent = parent->parent; | |
437 | if (parent->parent) | |
438 | parent->parent->child = tmp; | |
439 | /* | |
440 | * Transfer SD_PREFER_SIBLING down in case of a | |
441 | * degenerate parent; the spans match for this | |
442 | * so the property transfers. | |
443 | */ | |
444 | if (parent->flags & SD_PREFER_SIBLING) | |
445 | tmp->flags |= SD_PREFER_SIBLING; | |
446 | destroy_sched_domain(parent); | |
447 | } else | |
448 | tmp = tmp->parent; | |
449 | } | |
450 | ||
451 | if (sd && sd_degenerate(sd)) { | |
452 | tmp = sd; | |
453 | sd = sd->parent; | |
454 | destroy_sched_domain(tmp); | |
455 | if (sd) | |
456 | sd->child = NULL; | |
457 | } | |
458 | ||
459 | sched_domain_debug(sd, cpu); | |
460 | ||
461 | rq_attach_root(rq, rd); | |
462 | tmp = rq->sd; | |
463 | rcu_assign_pointer(rq->sd, sd); | |
464 | destroy_sched_domains(tmp); | |
465 | ||
466 | update_top_cache_domain(cpu); | |
467 | } | |
468 | ||
469 | /* Setup the mask of CPUs configured for isolated domains */ | |
470 | static int __init isolated_cpu_setup(char *str) | |
471 | { | |
472 | int ret; | |
473 | ||
474 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | |
475 | ret = cpulist_parse(str, cpu_isolated_map); | |
476 | if (ret) { | |
477 | pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids); | |
478 | return 0; | |
479 | } | |
480 | return 1; | |
481 | } | |
482 | __setup("isolcpus=", isolated_cpu_setup); | |
483 | ||
484 | struct s_data { | |
485 | struct sched_domain ** __percpu sd; | |
486 | struct root_domain *rd; | |
487 | }; | |
488 | ||
489 | enum s_alloc { | |
490 | sa_rootdomain, | |
491 | sa_sd, | |
492 | sa_sd_storage, | |
493 | sa_none, | |
494 | }; | |
495 | ||
496 | /* | |
497 | * Build an iteration mask that can exclude certain CPUs from the upwards | |
498 | * domain traversal. | |
73bb059f PZ |
499 | * |
500 | * Only CPUs that can arrive at this group should be considered to continue | |
501 | * balancing. | |
f2cb1360 IM |
502 | */ |
503 | static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) | |
504 | { | |
f32d782e | 505 | const struct cpumask *sg_span = sched_group_cpus(sg); |
f2cb1360 IM |
506 | struct sd_data *sdd = sd->private; |
507 | struct sched_domain *sibling; | |
508 | int i; | |
509 | ||
f32d782e | 510 | for_each_cpu(i, sg_span) { |
f2cb1360 | 511 | sibling = *per_cpu_ptr(sdd->sd, i); |
73bb059f PZ |
512 | |
513 | /* | |
514 | * Can happen in the asymmetric case, where these siblings are | |
515 | * unused. The mask will not be empty because those CPUs that | |
516 | * do have the top domain _should_ span the domain. | |
517 | */ | |
518 | if (!sibling->child) | |
519 | continue; | |
520 | ||
521 | /* If we would not end up here, we can't continue from here */ | |
522 | if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) | |
f2cb1360 IM |
523 | continue; |
524 | ||
525 | cpumask_set_cpu(i, sched_group_mask(sg)); | |
526 | } | |
73bb059f PZ |
527 | |
528 | /* We must not have empty masks here */ | |
529 | WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg))); | |
f2cb1360 IM |
530 | } |
531 | ||
532 | /* | |
533 | * Return the canonical balance CPU for this group, this is the first CPU | |
534 | * of this group that's also in the iteration mask. | |
535 | */ | |
536 | int group_balance_cpu(struct sched_group *sg) | |
537 | { | |
538 | return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); | |
539 | } | |
540 | ||
8c033469 LRV |
541 | static struct sched_group * |
542 | build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) | |
543 | { | |
544 | struct sched_group *sg; | |
545 | struct cpumask *sg_span; | |
546 | ||
547 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), | |
548 | GFP_KERNEL, cpu_to_node(cpu)); | |
549 | ||
550 | if (!sg) | |
551 | return NULL; | |
552 | ||
553 | sg_span = sched_group_cpus(sg); | |
554 | if (sd->child) | |
555 | cpumask_copy(sg_span, sched_domain_span(sd->child)); | |
556 | else | |
557 | cpumask_copy(sg_span, sched_domain_span(sd)); | |
558 | ||
559 | return sg; | |
560 | } | |
561 | ||
562 | static void init_overlap_sched_group(struct sched_domain *sd, | |
563 | struct sched_group *sg, int cpu) | |
564 | { | |
565 | struct sd_data *sdd = sd->private; | |
566 | struct cpumask *sg_span; | |
567 | ||
568 | sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); | |
569 | if (atomic_inc_return(&sg->sgc->ref) == 1) | |
570 | build_group_mask(sd, sg); | |
571 | ||
572 | /* | |
573 | * Initialize sgc->capacity such that even if we mess up the | |
574 | * domains and no possible iteration will get us here, we won't | |
575 | * die on a /0 trap. | |
576 | */ | |
577 | sg_span = sched_group_cpus(sg); | |
578 | sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); | |
579 | sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; | |
580 | } | |
581 | ||
f2cb1360 IM |
582 | static int |
583 | build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |
584 | { | |
91eaed0d | 585 | struct sched_group *first = NULL, *last = NULL, *sg; |
f2cb1360 IM |
586 | const struct cpumask *span = sched_domain_span(sd); |
587 | struct cpumask *covered = sched_domains_tmpmask; | |
588 | struct sd_data *sdd = sd->private; | |
589 | struct sched_domain *sibling; | |
590 | int i; | |
591 | ||
592 | cpumask_clear(covered); | |
593 | ||
0372dd27 | 594 | for_each_cpu_wrap(i, span, cpu) { |
f2cb1360 IM |
595 | struct cpumask *sg_span; |
596 | ||
597 | if (cpumask_test_cpu(i, covered)) | |
598 | continue; | |
599 | ||
600 | sibling = *per_cpu_ptr(sdd->sd, i); | |
601 | ||
c20e1ea4 LRV |
602 | /* |
603 | * Asymmetric node setups can result in situations where the | |
604 | * domain tree is of unequal depth, make sure to skip domains | |
605 | * that already cover the entire range. | |
606 | * | |
607 | * In that case build_sched_domains() will have terminated the | |
608 | * iteration early and our sibling sd spans will be empty. | |
609 | * Domains should always include the CPU they're built on, so | |
610 | * check that. | |
611 | */ | |
f2cb1360 IM |
612 | if (!cpumask_test_cpu(i, sched_domain_span(sibling))) |
613 | continue; | |
614 | ||
8c033469 | 615 | sg = build_group_from_child_sched_domain(sibling, cpu); |
f2cb1360 IM |
616 | if (!sg) |
617 | goto fail; | |
618 | ||
619 | sg_span = sched_group_cpus(sg); | |
f2cb1360 IM |
620 | cpumask_or(covered, covered, sg_span); |
621 | ||
8c033469 | 622 | init_overlap_sched_group(sd, sg, i); |
f2cb1360 | 623 | |
f2cb1360 IM |
624 | if (!first) |
625 | first = sg; | |
626 | if (last) | |
627 | last->next = sg; | |
628 | last = sg; | |
629 | last->next = first; | |
630 | } | |
91eaed0d | 631 | sd->groups = first; |
f2cb1360 IM |
632 | |
633 | return 0; | |
634 | ||
635 | fail: | |
636 | free_sched_groups(first, 0); | |
637 | ||
638 | return -ENOMEM; | |
639 | } | |
640 | ||
641 | static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) | |
642 | { | |
643 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); | |
644 | struct sched_domain *child = sd->child; | |
645 | ||
646 | if (child) | |
647 | cpu = cpumask_first(sched_domain_span(child)); | |
648 | ||
649 | if (sg) { | |
650 | *sg = *per_cpu_ptr(sdd->sg, cpu); | |
651 | (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); | |
652 | ||
653 | /* For claim_allocations: */ | |
654 | atomic_set(&(*sg)->sgc->ref, 1); | |
655 | } | |
656 | ||
657 | return cpu; | |
658 | } | |
659 | ||
660 | /* | |
661 | * build_sched_groups will build a circular linked list of the groups | |
662 | * covered by the given span, and will set each group's ->cpumask correctly, | |
663 | * and ->cpu_capacity to 0. | |
664 | * | |
665 | * Assumes the sched_domain tree is fully constructed | |
666 | */ | |
667 | static int | |
668 | build_sched_groups(struct sched_domain *sd, int cpu) | |
669 | { | |
670 | struct sched_group *first = NULL, *last = NULL; | |
671 | struct sd_data *sdd = sd->private; | |
672 | const struct cpumask *span = sched_domain_span(sd); | |
673 | struct cpumask *covered; | |
674 | int i; | |
675 | ||
676 | get_group(cpu, sdd, &sd->groups); | |
677 | atomic_inc(&sd->groups->ref); | |
678 | ||
679 | if (cpu != cpumask_first(span)) | |
680 | return 0; | |
681 | ||
682 | lockdep_assert_held(&sched_domains_mutex); | |
683 | covered = sched_domains_tmpmask; | |
684 | ||
685 | cpumask_clear(covered); | |
686 | ||
687 | for_each_cpu(i, span) { | |
688 | struct sched_group *sg; | |
689 | int group, j; | |
690 | ||
691 | if (cpumask_test_cpu(i, covered)) | |
692 | continue; | |
693 | ||
694 | group = get_group(i, sdd, &sg); | |
695 | cpumask_setall(sched_group_mask(sg)); | |
696 | ||
697 | for_each_cpu(j, span) { | |
698 | if (get_group(j, sdd, NULL) != group) | |
699 | continue; | |
700 | ||
701 | cpumask_set_cpu(j, covered); | |
702 | cpumask_set_cpu(j, sched_group_cpus(sg)); | |
703 | } | |
704 | ||
705 | if (!first) | |
706 | first = sg; | |
707 | if (last) | |
708 | last->next = sg; | |
709 | last = sg; | |
710 | } | |
711 | last->next = first; | |
712 | ||
713 | return 0; | |
714 | } | |
715 | ||
716 | /* | |
717 | * Initialize sched groups cpu_capacity. | |
718 | * | |
719 | * cpu_capacity indicates the capacity of sched group, which is used while | |
720 | * distributing the load between different sched groups in a sched domain. | |
721 | * Typically cpu_capacity for all the groups in a sched domain will be same | |
722 | * unless there are asymmetries in the topology. If there are asymmetries, | |
723 | * group having more cpu_capacity will pickup more load compared to the | |
724 | * group having less cpu_capacity. | |
725 | */ | |
726 | static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) | |
727 | { | |
728 | struct sched_group *sg = sd->groups; | |
729 | ||
730 | WARN_ON(!sg); | |
731 | ||
732 | do { | |
733 | int cpu, max_cpu = -1; | |
734 | ||
735 | sg->group_weight = cpumask_weight(sched_group_cpus(sg)); | |
736 | ||
737 | if (!(sd->flags & SD_ASYM_PACKING)) | |
738 | goto next; | |
739 | ||
740 | for_each_cpu(cpu, sched_group_cpus(sg)) { | |
741 | if (max_cpu < 0) | |
742 | max_cpu = cpu; | |
743 | else if (sched_asym_prefer(cpu, max_cpu)) | |
744 | max_cpu = cpu; | |
745 | } | |
746 | sg->asym_prefer_cpu = max_cpu; | |
747 | ||
748 | next: | |
749 | sg = sg->next; | |
750 | } while (sg != sd->groups); | |
751 | ||
752 | if (cpu != group_balance_cpu(sg)) | |
753 | return; | |
754 | ||
755 | update_group_capacity(sd, cpu); | |
756 | } | |
757 | ||
758 | /* | |
759 | * Initializers for schedule domains | |
760 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() | |
761 | */ | |
762 | ||
763 | static int default_relax_domain_level = -1; | |
764 | int sched_domain_level_max; | |
765 | ||
766 | static int __init setup_relax_domain_level(char *str) | |
767 | { | |
768 | if (kstrtoint(str, 0, &default_relax_domain_level)) | |
769 | pr_warn("Unable to set relax_domain_level\n"); | |
770 | ||
771 | return 1; | |
772 | } | |
773 | __setup("relax_domain_level=", setup_relax_domain_level); | |
774 | ||
775 | static void set_domain_attribute(struct sched_domain *sd, | |
776 | struct sched_domain_attr *attr) | |
777 | { | |
778 | int request; | |
779 | ||
780 | if (!attr || attr->relax_domain_level < 0) { | |
781 | if (default_relax_domain_level < 0) | |
782 | return; | |
783 | else | |
784 | request = default_relax_domain_level; | |
785 | } else | |
786 | request = attr->relax_domain_level; | |
787 | if (request < sd->level) { | |
788 | /* Turn off idle balance on this domain: */ | |
789 | sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); | |
790 | } else { | |
791 | /* Turn on idle balance on this domain: */ | |
792 | sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); | |
793 | } | |
794 | } | |
795 | ||
796 | static void __sdt_free(const struct cpumask *cpu_map); | |
797 | static int __sdt_alloc(const struct cpumask *cpu_map); | |
798 | ||
799 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |
800 | const struct cpumask *cpu_map) | |
801 | { | |
802 | switch (what) { | |
803 | case sa_rootdomain: | |
804 | if (!atomic_read(&d->rd->refcount)) | |
805 | free_rootdomain(&d->rd->rcu); | |
806 | /* Fall through */ | |
807 | case sa_sd: | |
808 | free_percpu(d->sd); | |
809 | /* Fall through */ | |
810 | case sa_sd_storage: | |
811 | __sdt_free(cpu_map); | |
812 | /* Fall through */ | |
813 | case sa_none: | |
814 | break; | |
815 | } | |
816 | } | |
817 | ||
818 | static enum s_alloc | |
819 | __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) | |
820 | { | |
821 | memset(d, 0, sizeof(*d)); | |
822 | ||
823 | if (__sdt_alloc(cpu_map)) | |
824 | return sa_sd_storage; | |
825 | d->sd = alloc_percpu(struct sched_domain *); | |
826 | if (!d->sd) | |
827 | return sa_sd_storage; | |
828 | d->rd = alloc_rootdomain(); | |
829 | if (!d->rd) | |
830 | return sa_sd; | |
831 | return sa_rootdomain; | |
832 | } | |
833 | ||
834 | /* | |
835 | * NULL the sd_data elements we've used to build the sched_domain and | |
836 | * sched_group structure so that the subsequent __free_domain_allocs() | |
837 | * will not free the data we're using. | |
838 | */ | |
839 | static void claim_allocations(int cpu, struct sched_domain *sd) | |
840 | { | |
841 | struct sd_data *sdd = sd->private; | |
842 | ||
843 | WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); | |
844 | *per_cpu_ptr(sdd->sd, cpu) = NULL; | |
845 | ||
846 | if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) | |
847 | *per_cpu_ptr(sdd->sds, cpu) = NULL; | |
848 | ||
849 | if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) | |
850 | *per_cpu_ptr(sdd->sg, cpu) = NULL; | |
851 | ||
852 | if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) | |
853 | *per_cpu_ptr(sdd->sgc, cpu) = NULL; | |
854 | } | |
855 | ||
856 | #ifdef CONFIG_NUMA | |
857 | static int sched_domains_numa_levels; | |
858 | enum numa_topology_type sched_numa_topology_type; | |
859 | static int *sched_domains_numa_distance; | |
860 | int sched_max_numa_distance; | |
861 | static struct cpumask ***sched_domains_numa_masks; | |
862 | static int sched_domains_curr_level; | |
863 | #endif | |
864 | ||
865 | /* | |
866 | * SD_flags allowed in topology descriptions. | |
867 | * | |
868 | * These flags are purely descriptive of the topology and do not prescribe | |
869 | * behaviour. Behaviour is artificial and mapped in the below sd_init() | |
870 | * function: | |
871 | * | |
872 | * SD_SHARE_CPUCAPACITY - describes SMT topologies | |
873 | * SD_SHARE_PKG_RESOURCES - describes shared caches | |
874 | * SD_NUMA - describes NUMA topologies | |
875 | * SD_SHARE_POWERDOMAIN - describes shared power domain | |
876 | * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies | |
877 | * | |
878 | * Odd one out, which beside describing the topology has a quirk also | |
879 | * prescribes the desired behaviour that goes along with it: | |
880 | * | |
881 | * SD_ASYM_PACKING - describes SMT quirks | |
882 | */ | |
883 | #define TOPOLOGY_SD_FLAGS \ | |
884 | (SD_SHARE_CPUCAPACITY | \ | |
885 | SD_SHARE_PKG_RESOURCES | \ | |
886 | SD_NUMA | \ | |
887 | SD_ASYM_PACKING | \ | |
888 | SD_ASYM_CPUCAPACITY | \ | |
889 | SD_SHARE_POWERDOMAIN) | |
890 | ||
891 | static struct sched_domain * | |
892 | sd_init(struct sched_domain_topology_level *tl, | |
893 | const struct cpumask *cpu_map, | |
894 | struct sched_domain *child, int cpu) | |
895 | { | |
896 | struct sd_data *sdd = &tl->data; | |
897 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); | |
898 | int sd_id, sd_weight, sd_flags = 0; | |
899 | ||
900 | #ifdef CONFIG_NUMA | |
901 | /* | |
902 | * Ugly hack to pass state to sd_numa_mask()... | |
903 | */ | |
904 | sched_domains_curr_level = tl->numa_level; | |
905 | #endif | |
906 | ||
907 | sd_weight = cpumask_weight(tl->mask(cpu)); | |
908 | ||
909 | if (tl->sd_flags) | |
910 | sd_flags = (*tl->sd_flags)(); | |
911 | if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, | |
912 | "wrong sd_flags in topology description\n")) | |
913 | sd_flags &= ~TOPOLOGY_SD_FLAGS; | |
914 | ||
915 | *sd = (struct sched_domain){ | |
916 | .min_interval = sd_weight, | |
917 | .max_interval = 2*sd_weight, | |
918 | .busy_factor = 32, | |
919 | .imbalance_pct = 125, | |
920 | ||
921 | .cache_nice_tries = 0, | |
922 | .busy_idx = 0, | |
923 | .idle_idx = 0, | |
924 | .newidle_idx = 0, | |
925 | .wake_idx = 0, | |
926 | .forkexec_idx = 0, | |
927 | ||
928 | .flags = 1*SD_LOAD_BALANCE | |
929 | | 1*SD_BALANCE_NEWIDLE | |
930 | | 1*SD_BALANCE_EXEC | |
931 | | 1*SD_BALANCE_FORK | |
932 | | 0*SD_BALANCE_WAKE | |
933 | | 1*SD_WAKE_AFFINE | |
934 | | 0*SD_SHARE_CPUCAPACITY | |
935 | | 0*SD_SHARE_PKG_RESOURCES | |
936 | | 0*SD_SERIALIZE | |
937 | | 0*SD_PREFER_SIBLING | |
938 | | 0*SD_NUMA | |
939 | | sd_flags | |
940 | , | |
941 | ||
942 | .last_balance = jiffies, | |
943 | .balance_interval = sd_weight, | |
944 | .smt_gain = 0, | |
945 | .max_newidle_lb_cost = 0, | |
946 | .next_decay_max_lb_cost = jiffies, | |
947 | .child = child, | |
948 | #ifdef CONFIG_SCHED_DEBUG | |
949 | .name = tl->name, | |
950 | #endif | |
951 | }; | |
952 | ||
953 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); | |
954 | sd_id = cpumask_first(sched_domain_span(sd)); | |
955 | ||
956 | /* | |
957 | * Convert topological properties into behaviour. | |
958 | */ | |
959 | ||
960 | if (sd->flags & SD_ASYM_CPUCAPACITY) { | |
961 | struct sched_domain *t = sd; | |
962 | ||
963 | for_each_lower_domain(t) | |
964 | t->flags |= SD_BALANCE_WAKE; | |
965 | } | |
966 | ||
967 | if (sd->flags & SD_SHARE_CPUCAPACITY) { | |
968 | sd->flags |= SD_PREFER_SIBLING; | |
969 | sd->imbalance_pct = 110; | |
970 | sd->smt_gain = 1178; /* ~15% */ | |
971 | ||
972 | } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { | |
973 | sd->imbalance_pct = 117; | |
974 | sd->cache_nice_tries = 1; | |
975 | sd->busy_idx = 2; | |
976 | ||
977 | #ifdef CONFIG_NUMA | |
978 | } else if (sd->flags & SD_NUMA) { | |
979 | sd->cache_nice_tries = 2; | |
980 | sd->busy_idx = 3; | |
981 | sd->idle_idx = 2; | |
982 | ||
983 | sd->flags |= SD_SERIALIZE; | |
984 | if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { | |
985 | sd->flags &= ~(SD_BALANCE_EXEC | | |
986 | SD_BALANCE_FORK | | |
987 | SD_WAKE_AFFINE); | |
988 | } | |
989 | ||
990 | #endif | |
991 | } else { | |
992 | sd->flags |= SD_PREFER_SIBLING; | |
993 | sd->cache_nice_tries = 1; | |
994 | sd->busy_idx = 2; | |
995 | sd->idle_idx = 1; | |
996 | } | |
997 | ||
998 | /* | |
999 | * For all levels sharing cache; connect a sched_domain_shared | |
1000 | * instance. | |
1001 | */ | |
1002 | if (sd->flags & SD_SHARE_PKG_RESOURCES) { | |
1003 | sd->shared = *per_cpu_ptr(sdd->sds, sd_id); | |
1004 | atomic_inc(&sd->shared->ref); | |
1005 | atomic_set(&sd->shared->nr_busy_cpus, sd_weight); | |
1006 | } | |
1007 | ||
1008 | sd->private = sdd; | |
1009 | ||
1010 | return sd; | |
1011 | } | |
1012 | ||
1013 | /* | |
1014 | * Topology list, bottom-up. | |
1015 | */ | |
1016 | static struct sched_domain_topology_level default_topology[] = { | |
1017 | #ifdef CONFIG_SCHED_SMT | |
1018 | { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, | |
1019 | #endif | |
1020 | #ifdef CONFIG_SCHED_MC | |
1021 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, | |
1022 | #endif | |
1023 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, | |
1024 | { NULL, }, | |
1025 | }; | |
1026 | ||
1027 | static struct sched_domain_topology_level *sched_domain_topology = | |
1028 | default_topology; | |
1029 | ||
1030 | #define for_each_sd_topology(tl) \ | |
1031 | for (tl = sched_domain_topology; tl->mask; tl++) | |
1032 | ||
1033 | void set_sched_topology(struct sched_domain_topology_level *tl) | |
1034 | { | |
1035 | if (WARN_ON_ONCE(sched_smp_initialized)) | |
1036 | return; | |
1037 | ||
1038 | sched_domain_topology = tl; | |
1039 | } | |
1040 | ||
1041 | #ifdef CONFIG_NUMA | |
1042 | ||
1043 | static const struct cpumask *sd_numa_mask(int cpu) | |
1044 | { | |
1045 | return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; | |
1046 | } | |
1047 | ||
1048 | static void sched_numa_warn(const char *str) | |
1049 | { | |
1050 | static int done = false; | |
1051 | int i,j; | |
1052 | ||
1053 | if (done) | |
1054 | return; | |
1055 | ||
1056 | done = true; | |
1057 | ||
1058 | printk(KERN_WARNING "ERROR: %s\n\n", str); | |
1059 | ||
1060 | for (i = 0; i < nr_node_ids; i++) { | |
1061 | printk(KERN_WARNING " "); | |
1062 | for (j = 0; j < nr_node_ids; j++) | |
1063 | printk(KERN_CONT "%02d ", node_distance(i,j)); | |
1064 | printk(KERN_CONT "\n"); | |
1065 | } | |
1066 | printk(KERN_WARNING "\n"); | |
1067 | } | |
1068 | ||
1069 | bool find_numa_distance(int distance) | |
1070 | { | |
1071 | int i; | |
1072 | ||
1073 | if (distance == node_distance(0, 0)) | |
1074 | return true; | |
1075 | ||
1076 | for (i = 0; i < sched_domains_numa_levels; i++) { | |
1077 | if (sched_domains_numa_distance[i] == distance) | |
1078 | return true; | |
1079 | } | |
1080 | ||
1081 | return false; | |
1082 | } | |
1083 | ||
1084 | /* | |
1085 | * A system can have three types of NUMA topology: | |
1086 | * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system | |
1087 | * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes | |
1088 | * NUMA_BACKPLANE: nodes can reach other nodes through a backplane | |
1089 | * | |
1090 | * The difference between a glueless mesh topology and a backplane | |
1091 | * topology lies in whether communication between not directly | |
1092 | * connected nodes goes through intermediary nodes (where programs | |
1093 | * could run), or through backplane controllers. This affects | |
1094 | * placement of programs. | |
1095 | * | |
1096 | * The type of topology can be discerned with the following tests: | |
1097 | * - If the maximum distance between any nodes is 1 hop, the system | |
1098 | * is directly connected. | |
1099 | * - If for two nodes A and B, located N > 1 hops away from each other, | |
1100 | * there is an intermediary node C, which is < N hops away from both | |
1101 | * nodes A and B, the system is a glueless mesh. | |
1102 | */ | |
1103 | static void init_numa_topology_type(void) | |
1104 | { | |
1105 | int a, b, c, n; | |
1106 | ||
1107 | n = sched_max_numa_distance; | |
1108 | ||
1109 | if (sched_domains_numa_levels <= 1) { | |
1110 | sched_numa_topology_type = NUMA_DIRECT; | |
1111 | return; | |
1112 | } | |
1113 | ||
1114 | for_each_online_node(a) { | |
1115 | for_each_online_node(b) { | |
1116 | /* Find two nodes furthest removed from each other. */ | |
1117 | if (node_distance(a, b) < n) | |
1118 | continue; | |
1119 | ||
1120 | /* Is there an intermediary node between a and b? */ | |
1121 | for_each_online_node(c) { | |
1122 | if (node_distance(a, c) < n && | |
1123 | node_distance(b, c) < n) { | |
1124 | sched_numa_topology_type = | |
1125 | NUMA_GLUELESS_MESH; | |
1126 | return; | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | sched_numa_topology_type = NUMA_BACKPLANE; | |
1131 | return; | |
1132 | } | |
1133 | } | |
1134 | } | |
1135 | ||
1136 | void sched_init_numa(void) | |
1137 | { | |
1138 | int next_distance, curr_distance = node_distance(0, 0); | |
1139 | struct sched_domain_topology_level *tl; | |
1140 | int level = 0; | |
1141 | int i, j, k; | |
1142 | ||
1143 | sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); | |
1144 | if (!sched_domains_numa_distance) | |
1145 | return; | |
1146 | ||
1147 | /* | |
1148 | * O(nr_nodes^2) deduplicating selection sort -- in order to find the | |
1149 | * unique distances in the node_distance() table. | |
1150 | * | |
1151 | * Assumes node_distance(0,j) includes all distances in | |
1152 | * node_distance(i,j) in order to avoid cubic time. | |
1153 | */ | |
1154 | next_distance = curr_distance; | |
1155 | for (i = 0; i < nr_node_ids; i++) { | |
1156 | for (j = 0; j < nr_node_ids; j++) { | |
1157 | for (k = 0; k < nr_node_ids; k++) { | |
1158 | int distance = node_distance(i, k); | |
1159 | ||
1160 | if (distance > curr_distance && | |
1161 | (distance < next_distance || | |
1162 | next_distance == curr_distance)) | |
1163 | next_distance = distance; | |
1164 | ||
1165 | /* | |
1166 | * While not a strong assumption it would be nice to know | |
1167 | * about cases where if node A is connected to B, B is not | |
1168 | * equally connected to A. | |
1169 | */ | |
1170 | if (sched_debug() && node_distance(k, i) != distance) | |
1171 | sched_numa_warn("Node-distance not symmetric"); | |
1172 | ||
1173 | if (sched_debug() && i && !find_numa_distance(distance)) | |
1174 | sched_numa_warn("Node-0 not representative"); | |
1175 | } | |
1176 | if (next_distance != curr_distance) { | |
1177 | sched_domains_numa_distance[level++] = next_distance; | |
1178 | sched_domains_numa_levels = level; | |
1179 | curr_distance = next_distance; | |
1180 | } else break; | |
1181 | } | |
1182 | ||
1183 | /* | |
1184 | * In case of sched_debug() we verify the above assumption. | |
1185 | */ | |
1186 | if (!sched_debug()) | |
1187 | break; | |
1188 | } | |
1189 | ||
1190 | if (!level) | |
1191 | return; | |
1192 | ||
1193 | /* | |
1194 | * 'level' contains the number of unique distances, excluding the | |
1195 | * identity distance node_distance(i,i). | |
1196 | * | |
1197 | * The sched_domains_numa_distance[] array includes the actual distance | |
1198 | * numbers. | |
1199 | */ | |
1200 | ||
1201 | /* | |
1202 | * Here, we should temporarily reset sched_domains_numa_levels to 0. | |
1203 | * If it fails to allocate memory for array sched_domains_numa_masks[][], | |
1204 | * the array will contain less then 'level' members. This could be | |
1205 | * dangerous when we use it to iterate array sched_domains_numa_masks[][] | |
1206 | * in other functions. | |
1207 | * | |
1208 | * We reset it to 'level' at the end of this function. | |
1209 | */ | |
1210 | sched_domains_numa_levels = 0; | |
1211 | ||
1212 | sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); | |
1213 | if (!sched_domains_numa_masks) | |
1214 | return; | |
1215 | ||
1216 | /* | |
1217 | * Now for each level, construct a mask per node which contains all | |
1218 | * CPUs of nodes that are that many hops away from us. | |
1219 | */ | |
1220 | for (i = 0; i < level; i++) { | |
1221 | sched_domains_numa_masks[i] = | |
1222 | kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); | |
1223 | if (!sched_domains_numa_masks[i]) | |
1224 | return; | |
1225 | ||
1226 | for (j = 0; j < nr_node_ids; j++) { | |
1227 | struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); | |
1228 | if (!mask) | |
1229 | return; | |
1230 | ||
1231 | sched_domains_numa_masks[i][j] = mask; | |
1232 | ||
1233 | for_each_node(k) { | |
1234 | if (node_distance(j, k) > sched_domains_numa_distance[i]) | |
1235 | continue; | |
1236 | ||
1237 | cpumask_or(mask, mask, cpumask_of_node(k)); | |
1238 | } | |
1239 | } | |
1240 | } | |
1241 | ||
1242 | /* Compute default topology size */ | |
1243 | for (i = 0; sched_domain_topology[i].mask; i++); | |
1244 | ||
1245 | tl = kzalloc((i + level + 1) * | |
1246 | sizeof(struct sched_domain_topology_level), GFP_KERNEL); | |
1247 | if (!tl) | |
1248 | return; | |
1249 | ||
1250 | /* | |
1251 | * Copy the default topology bits.. | |
1252 | */ | |
1253 | for (i = 0; sched_domain_topology[i].mask; i++) | |
1254 | tl[i] = sched_domain_topology[i]; | |
1255 | ||
1256 | /* | |
1257 | * .. and append 'j' levels of NUMA goodness. | |
1258 | */ | |
1259 | for (j = 0; j < level; i++, j++) { | |
1260 | tl[i] = (struct sched_domain_topology_level){ | |
1261 | .mask = sd_numa_mask, | |
1262 | .sd_flags = cpu_numa_flags, | |
1263 | .flags = SDTL_OVERLAP, | |
1264 | .numa_level = j, | |
1265 | SD_INIT_NAME(NUMA) | |
1266 | }; | |
1267 | } | |
1268 | ||
1269 | sched_domain_topology = tl; | |
1270 | ||
1271 | sched_domains_numa_levels = level; | |
1272 | sched_max_numa_distance = sched_domains_numa_distance[level - 1]; | |
1273 | ||
1274 | init_numa_topology_type(); | |
1275 | } | |
1276 | ||
1277 | void sched_domains_numa_masks_set(unsigned int cpu) | |
1278 | { | |
1279 | int node = cpu_to_node(cpu); | |
1280 | int i, j; | |
1281 | ||
1282 | for (i = 0; i < sched_domains_numa_levels; i++) { | |
1283 | for (j = 0; j < nr_node_ids; j++) { | |
1284 | if (node_distance(j, node) <= sched_domains_numa_distance[i]) | |
1285 | cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); | |
1286 | } | |
1287 | } | |
1288 | } | |
1289 | ||
1290 | void sched_domains_numa_masks_clear(unsigned int cpu) | |
1291 | { | |
1292 | int i, j; | |
1293 | ||
1294 | for (i = 0; i < sched_domains_numa_levels; i++) { | |
1295 | for (j = 0; j < nr_node_ids; j++) | |
1296 | cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); | |
1297 | } | |
1298 | } | |
1299 | ||
1300 | #endif /* CONFIG_NUMA */ | |
1301 | ||
1302 | static int __sdt_alloc(const struct cpumask *cpu_map) | |
1303 | { | |
1304 | struct sched_domain_topology_level *tl; | |
1305 | int j; | |
1306 | ||
1307 | for_each_sd_topology(tl) { | |
1308 | struct sd_data *sdd = &tl->data; | |
1309 | ||
1310 | sdd->sd = alloc_percpu(struct sched_domain *); | |
1311 | if (!sdd->sd) | |
1312 | return -ENOMEM; | |
1313 | ||
1314 | sdd->sds = alloc_percpu(struct sched_domain_shared *); | |
1315 | if (!sdd->sds) | |
1316 | return -ENOMEM; | |
1317 | ||
1318 | sdd->sg = alloc_percpu(struct sched_group *); | |
1319 | if (!sdd->sg) | |
1320 | return -ENOMEM; | |
1321 | ||
1322 | sdd->sgc = alloc_percpu(struct sched_group_capacity *); | |
1323 | if (!sdd->sgc) | |
1324 | return -ENOMEM; | |
1325 | ||
1326 | for_each_cpu(j, cpu_map) { | |
1327 | struct sched_domain *sd; | |
1328 | struct sched_domain_shared *sds; | |
1329 | struct sched_group *sg; | |
1330 | struct sched_group_capacity *sgc; | |
1331 | ||
1332 | sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), | |
1333 | GFP_KERNEL, cpu_to_node(j)); | |
1334 | if (!sd) | |
1335 | return -ENOMEM; | |
1336 | ||
1337 | *per_cpu_ptr(sdd->sd, j) = sd; | |
1338 | ||
1339 | sds = kzalloc_node(sizeof(struct sched_domain_shared), | |
1340 | GFP_KERNEL, cpu_to_node(j)); | |
1341 | if (!sds) | |
1342 | return -ENOMEM; | |
1343 | ||
1344 | *per_cpu_ptr(sdd->sds, j) = sds; | |
1345 | ||
1346 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), | |
1347 | GFP_KERNEL, cpu_to_node(j)); | |
1348 | if (!sg) | |
1349 | return -ENOMEM; | |
1350 | ||
1351 | sg->next = sg; | |
1352 | ||
1353 | *per_cpu_ptr(sdd->sg, j) = sg; | |
1354 | ||
1355 | sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), | |
1356 | GFP_KERNEL, cpu_to_node(j)); | |
1357 | if (!sgc) | |
1358 | return -ENOMEM; | |
1359 | ||
005f874d PZ |
1360 | #ifdef CONFIG_SCHED_DEBUG |
1361 | sgc->id = j; | |
1362 | #endif | |
1363 | ||
f2cb1360 IM |
1364 | *per_cpu_ptr(sdd->sgc, j) = sgc; |
1365 | } | |
1366 | } | |
1367 | ||
1368 | return 0; | |
1369 | } | |
1370 | ||
1371 | static void __sdt_free(const struct cpumask *cpu_map) | |
1372 | { | |
1373 | struct sched_domain_topology_level *tl; | |
1374 | int j; | |
1375 | ||
1376 | for_each_sd_topology(tl) { | |
1377 | struct sd_data *sdd = &tl->data; | |
1378 | ||
1379 | for_each_cpu(j, cpu_map) { | |
1380 | struct sched_domain *sd; | |
1381 | ||
1382 | if (sdd->sd) { | |
1383 | sd = *per_cpu_ptr(sdd->sd, j); | |
1384 | if (sd && (sd->flags & SD_OVERLAP)) | |
1385 | free_sched_groups(sd->groups, 0); | |
1386 | kfree(*per_cpu_ptr(sdd->sd, j)); | |
1387 | } | |
1388 | ||
1389 | if (sdd->sds) | |
1390 | kfree(*per_cpu_ptr(sdd->sds, j)); | |
1391 | if (sdd->sg) | |
1392 | kfree(*per_cpu_ptr(sdd->sg, j)); | |
1393 | if (sdd->sgc) | |
1394 | kfree(*per_cpu_ptr(sdd->sgc, j)); | |
1395 | } | |
1396 | free_percpu(sdd->sd); | |
1397 | sdd->sd = NULL; | |
1398 | free_percpu(sdd->sds); | |
1399 | sdd->sds = NULL; | |
1400 | free_percpu(sdd->sg); | |
1401 | sdd->sg = NULL; | |
1402 | free_percpu(sdd->sgc); | |
1403 | sdd->sgc = NULL; | |
1404 | } | |
1405 | } | |
1406 | ||
1407 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | |
1408 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | |
1409 | struct sched_domain *child, int cpu) | |
1410 | { | |
1411 | struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); | |
1412 | ||
1413 | if (child) { | |
1414 | sd->level = child->level + 1; | |
1415 | sched_domain_level_max = max(sched_domain_level_max, sd->level); | |
1416 | child->parent = sd; | |
1417 | ||
1418 | if (!cpumask_subset(sched_domain_span(child), | |
1419 | sched_domain_span(sd))) { | |
1420 | pr_err("BUG: arch topology borken\n"); | |
1421 | #ifdef CONFIG_SCHED_DEBUG | |
1422 | pr_err(" the %s domain not a subset of the %s domain\n", | |
1423 | child->name, sd->name); | |
1424 | #endif | |
1425 | /* Fixup, ensure @sd has at least @child cpus. */ | |
1426 | cpumask_or(sched_domain_span(sd), | |
1427 | sched_domain_span(sd), | |
1428 | sched_domain_span(child)); | |
1429 | } | |
1430 | ||
1431 | } | |
1432 | set_domain_attribute(sd, attr); | |
1433 | ||
1434 | return sd; | |
1435 | } | |
1436 | ||
1437 | /* | |
1438 | * Build sched domains for a given set of CPUs and attach the sched domains | |
1439 | * to the individual CPUs | |
1440 | */ | |
1441 | static int | |
1442 | build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) | |
1443 | { | |
1444 | enum s_alloc alloc_state; | |
1445 | struct sched_domain *sd; | |
1446 | struct s_data d; | |
1447 | struct rq *rq = NULL; | |
1448 | int i, ret = -ENOMEM; | |
1449 | ||
1450 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); | |
1451 | if (alloc_state != sa_rootdomain) | |
1452 | goto error; | |
1453 | ||
1454 | /* Set up domains for CPUs specified by the cpu_map: */ | |
1455 | for_each_cpu(i, cpu_map) { | |
1456 | struct sched_domain_topology_level *tl; | |
1457 | ||
1458 | sd = NULL; | |
1459 | for_each_sd_topology(tl) { | |
1460 | sd = build_sched_domain(tl, cpu_map, attr, sd, i); | |
1461 | if (tl == sched_domain_topology) | |
1462 | *per_cpu_ptr(d.sd, i) = sd; | |
af85596c | 1463 | if (tl->flags & SDTL_OVERLAP) |
f2cb1360 IM |
1464 | sd->flags |= SD_OVERLAP; |
1465 | if (cpumask_equal(cpu_map, sched_domain_span(sd))) | |
1466 | break; | |
1467 | } | |
1468 | } | |
1469 | ||
1470 | /* Build the groups for the domains */ | |
1471 | for_each_cpu(i, cpu_map) { | |
1472 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { | |
1473 | sd->span_weight = cpumask_weight(sched_domain_span(sd)); | |
1474 | if (sd->flags & SD_OVERLAP) { | |
1475 | if (build_overlap_sched_groups(sd, i)) | |
1476 | goto error; | |
1477 | } else { | |
1478 | if (build_sched_groups(sd, i)) | |
1479 | goto error; | |
1480 | } | |
1481 | } | |
1482 | } | |
1483 | ||
1484 | /* Calculate CPU capacity for physical packages and nodes */ | |
1485 | for (i = nr_cpumask_bits-1; i >= 0; i--) { | |
1486 | if (!cpumask_test_cpu(i, cpu_map)) | |
1487 | continue; | |
1488 | ||
1489 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { | |
1490 | claim_allocations(i, sd); | |
1491 | init_sched_groups_capacity(i, sd); | |
1492 | } | |
1493 | } | |
1494 | ||
1495 | /* Attach the domains */ | |
1496 | rcu_read_lock(); | |
1497 | for_each_cpu(i, cpu_map) { | |
1498 | rq = cpu_rq(i); | |
1499 | sd = *per_cpu_ptr(d.sd, i); | |
1500 | ||
1501 | /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ | |
1502 | if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) | |
1503 | WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); | |
1504 | ||
1505 | cpu_attach_domain(sd, d.rd, i); | |
1506 | } | |
1507 | rcu_read_unlock(); | |
1508 | ||
1509 | if (rq && sched_debug_enabled) { | |
1510 | pr_info("span: %*pbl (max cpu_capacity = %lu)\n", | |
1511 | cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); | |
1512 | } | |
1513 | ||
1514 | ret = 0; | |
1515 | error: | |
1516 | __free_domain_allocs(&d, alloc_state, cpu_map); | |
1517 | return ret; | |
1518 | } | |
1519 | ||
1520 | /* Current sched domains: */ | |
1521 | static cpumask_var_t *doms_cur; | |
1522 | ||
1523 | /* Number of sched domains in 'doms_cur': */ | |
1524 | static int ndoms_cur; | |
1525 | ||
1526 | /* Attribues of custom domains in 'doms_cur' */ | |
1527 | static struct sched_domain_attr *dattr_cur; | |
1528 | ||
1529 | /* | |
1530 | * Special case: If a kmalloc() of a doms_cur partition (array of | |
1531 | * cpumask) fails, then fallback to a single sched domain, | |
1532 | * as determined by the single cpumask fallback_doms. | |
1533 | */ | |
8d5dc512 | 1534 | static cpumask_var_t fallback_doms; |
f2cb1360 IM |
1535 | |
1536 | /* | |
1537 | * arch_update_cpu_topology lets virtualized architectures update the | |
1538 | * CPU core maps. It is supposed to return 1 if the topology changed | |
1539 | * or 0 if it stayed the same. | |
1540 | */ | |
1541 | int __weak arch_update_cpu_topology(void) | |
1542 | { | |
1543 | return 0; | |
1544 | } | |
1545 | ||
1546 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) | |
1547 | { | |
1548 | int i; | |
1549 | cpumask_var_t *doms; | |
1550 | ||
1551 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); | |
1552 | if (!doms) | |
1553 | return NULL; | |
1554 | for (i = 0; i < ndoms; i++) { | |
1555 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { | |
1556 | free_sched_domains(doms, i); | |
1557 | return NULL; | |
1558 | } | |
1559 | } | |
1560 | return doms; | |
1561 | } | |
1562 | ||
1563 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) | |
1564 | { | |
1565 | unsigned int i; | |
1566 | for (i = 0; i < ndoms; i++) | |
1567 | free_cpumask_var(doms[i]); | |
1568 | kfree(doms); | |
1569 | } | |
1570 | ||
1571 | /* | |
1572 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | |
1573 | * For now this just excludes isolated CPUs, but could be used to | |
1574 | * exclude other special cases in the future. | |
1575 | */ | |
8d5dc512 | 1576 | int sched_init_domains(const struct cpumask *cpu_map) |
f2cb1360 IM |
1577 | { |
1578 | int err; | |
1579 | ||
8d5dc512 PZ |
1580 | zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); |
1581 | zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); | |
1582 | ||
f2cb1360 IM |
1583 | arch_update_cpu_topology(); |
1584 | ndoms_cur = 1; | |
1585 | doms_cur = alloc_sched_domains(ndoms_cur); | |
1586 | if (!doms_cur) | |
1587 | doms_cur = &fallback_doms; | |
1588 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); | |
1589 | err = build_sched_domains(doms_cur[0], NULL); | |
1590 | register_sched_domain_sysctl(); | |
1591 | ||
1592 | return err; | |
1593 | } | |
1594 | ||
1595 | /* | |
1596 | * Detach sched domains from a group of CPUs specified in cpu_map | |
1597 | * These CPUs will now be attached to the NULL domain | |
1598 | */ | |
1599 | static void detach_destroy_domains(const struct cpumask *cpu_map) | |
1600 | { | |
1601 | int i; | |
1602 | ||
1603 | rcu_read_lock(); | |
1604 | for_each_cpu(i, cpu_map) | |
1605 | cpu_attach_domain(NULL, &def_root_domain, i); | |
1606 | rcu_read_unlock(); | |
1607 | } | |
1608 | ||
1609 | /* handle null as "default" */ | |
1610 | static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |
1611 | struct sched_domain_attr *new, int idx_new) | |
1612 | { | |
1613 | struct sched_domain_attr tmp; | |
1614 | ||
1615 | /* Fast path: */ | |
1616 | if (!new && !cur) | |
1617 | return 1; | |
1618 | ||
1619 | tmp = SD_ATTR_INIT; | |
1620 | return !memcmp(cur ? (cur + idx_cur) : &tmp, | |
1621 | new ? (new + idx_new) : &tmp, | |
1622 | sizeof(struct sched_domain_attr)); | |
1623 | } | |
1624 | ||
1625 | /* | |
1626 | * Partition sched domains as specified by the 'ndoms_new' | |
1627 | * cpumasks in the array doms_new[] of cpumasks. This compares | |
1628 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | |
1629 | * It destroys each deleted domain and builds each new domain. | |
1630 | * | |
1631 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. | |
1632 | * The masks don't intersect (don't overlap.) We should setup one | |
1633 | * sched domain for each mask. CPUs not in any of the cpumasks will | |
1634 | * not be load balanced. If the same cpumask appears both in the | |
1635 | * current 'doms_cur' domains and in the new 'doms_new', we can leave | |
1636 | * it as it is. | |
1637 | * | |
1638 | * The passed in 'doms_new' should be allocated using | |
1639 | * alloc_sched_domains. This routine takes ownership of it and will | |
1640 | * free_sched_domains it when done with it. If the caller failed the | |
1641 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, | |
1642 | * and partition_sched_domains() will fallback to the single partition | |
1643 | * 'fallback_doms', it also forces the domains to be rebuilt. | |
1644 | * | |
1645 | * If doms_new == NULL it will be replaced with cpu_online_mask. | |
1646 | * ndoms_new == 0 is a special case for destroying existing domains, | |
1647 | * and it will not create the default domain. | |
1648 | * | |
1649 | * Call with hotplug lock held | |
1650 | */ | |
1651 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |
1652 | struct sched_domain_attr *dattr_new) | |
1653 | { | |
1654 | int i, j, n; | |
1655 | int new_topology; | |
1656 | ||
1657 | mutex_lock(&sched_domains_mutex); | |
1658 | ||
1659 | /* Always unregister in case we don't destroy any domains: */ | |
1660 | unregister_sched_domain_sysctl(); | |
1661 | ||
1662 | /* Let the architecture update CPU core mappings: */ | |
1663 | new_topology = arch_update_cpu_topology(); | |
1664 | ||
1665 | n = doms_new ? ndoms_new : 0; | |
1666 | ||
1667 | /* Destroy deleted domains: */ | |
1668 | for (i = 0; i < ndoms_cur; i++) { | |
1669 | for (j = 0; j < n && !new_topology; j++) { | |
1670 | if (cpumask_equal(doms_cur[i], doms_new[j]) | |
1671 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | |
1672 | goto match1; | |
1673 | } | |
1674 | /* No match - a current sched domain not in new doms_new[] */ | |
1675 | detach_destroy_domains(doms_cur[i]); | |
1676 | match1: | |
1677 | ; | |
1678 | } | |
1679 | ||
1680 | n = ndoms_cur; | |
1681 | if (doms_new == NULL) { | |
1682 | n = 0; | |
1683 | doms_new = &fallback_doms; | |
1684 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); | |
1685 | WARN_ON_ONCE(dattr_new); | |
1686 | } | |
1687 | ||
1688 | /* Build new domains: */ | |
1689 | for (i = 0; i < ndoms_new; i++) { | |
1690 | for (j = 0; j < n && !new_topology; j++) { | |
1691 | if (cpumask_equal(doms_new[i], doms_cur[j]) | |
1692 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | |
1693 | goto match2; | |
1694 | } | |
1695 | /* No match - add a new doms_new */ | |
1696 | build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); | |
1697 | match2: | |
1698 | ; | |
1699 | } | |
1700 | ||
1701 | /* Remember the new sched domains: */ | |
1702 | if (doms_cur != &fallback_doms) | |
1703 | free_sched_domains(doms_cur, ndoms_cur); | |
1704 | ||
1705 | kfree(dattr_cur); | |
1706 | doms_cur = doms_new; | |
1707 | dattr_cur = dattr_new; | |
1708 | ndoms_cur = ndoms_new; | |
1709 | ||
1710 | register_sched_domain_sysctl(); | |
1711 | ||
1712 | mutex_unlock(&sched_domains_mutex); | |
1713 | } | |
1714 |