2 * Scheduler topology setup/handling methods
4 #include <linux/sched.h>
5 #include <linux/mutex.h>
9 DEFINE_MUTEX(sched_domains_mutex
);
11 /* Protected by sched_domains_mutex: */
12 cpumask_var_t sched_domains_tmpmask
;
14 #ifdef CONFIG_SCHED_DEBUG
16 static __read_mostly
int sched_debug_enabled
;
18 static int __init
sched_debug_setup(char *str
)
20 sched_debug_enabled
= 1;
24 early_param("sched_debug", sched_debug_setup
);
26 static inline bool sched_debug(void)
28 return sched_debug_enabled
;
31 static int sched_domain_debug_one(struct sched_domain
*sd
, int cpu
, int level
,
32 struct cpumask
*groupmask
)
34 struct sched_group
*group
= sd
->groups
;
36 cpumask_clear(groupmask
);
38 printk(KERN_DEBUG
"%*s domain %d: ", level
, "", level
);
40 if (!(sd
->flags
& SD_LOAD_BALANCE
)) {
41 printk("does not load-balance\n");
43 printk(KERN_ERR
"ERROR: !SD_LOAD_BALANCE domain"
48 printk(KERN_CONT
"span %*pbl level %s\n",
49 cpumask_pr_args(sched_domain_span(sd
)), sd
->name
);
51 if (!cpumask_test_cpu(cpu
, sched_domain_span(sd
))) {
52 printk(KERN_ERR
"ERROR: domain->span does not contain "
55 if (!cpumask_test_cpu(cpu
, sched_group_cpus(group
))) {
56 printk(KERN_ERR
"ERROR: domain->groups does not contain"
60 printk(KERN_DEBUG
"%*s groups:", level
+ 1, "");
64 printk(KERN_ERR
"ERROR: group is NULL\n");
68 if (!cpumask_weight(sched_group_cpus(group
))) {
69 printk(KERN_CONT
"\n");
70 printk(KERN_ERR
"ERROR: empty group\n");
74 if (!(sd
->flags
& SD_OVERLAP
) &&
75 cpumask_intersects(groupmask
, sched_group_cpus(group
))) {
76 printk(KERN_CONT
"\n");
77 printk(KERN_ERR
"ERROR: repeated CPUs\n");
81 cpumask_or(groupmask
, groupmask
, sched_group_cpus(group
));
83 printk(KERN_CONT
" %*pbl",
84 cpumask_pr_args(sched_group_cpus(group
)));
85 if (group
->sgc
->capacity
!= SCHED_CAPACITY_SCALE
) {
86 printk(KERN_CONT
" (cpu_capacity = %lu)",
87 group
->sgc
->capacity
);
91 } while (group
!= sd
->groups
);
92 printk(KERN_CONT
"\n");
94 if (!cpumask_equal(sched_domain_span(sd
), groupmask
))
95 printk(KERN_ERR
"ERROR: groups don't span domain->span\n");
98 !cpumask_subset(groupmask
, sched_domain_span(sd
->parent
)))
99 printk(KERN_ERR
"ERROR: parent span is not a superset "
100 "of domain->span\n");
104 static void sched_domain_debug(struct sched_domain
*sd
, int cpu
)
108 if (!sched_debug_enabled
)
112 printk(KERN_DEBUG
"CPU%d attaching NULL sched-domain.\n", cpu
);
116 printk(KERN_DEBUG
"CPU%d attaching sched-domain:\n", cpu
);
119 if (sched_domain_debug_one(sd
, cpu
, level
, sched_domains_tmpmask
))
127 #else /* !CONFIG_SCHED_DEBUG */
129 # define sched_debug_enabled 0
130 # define sched_domain_debug(sd, cpu) do { } while (0)
131 static inline bool sched_debug(void)
135 #endif /* CONFIG_SCHED_DEBUG */
137 static int sd_degenerate(struct sched_domain
*sd
)
139 if (cpumask_weight(sched_domain_span(sd
)) == 1)
142 /* Following flags need at least 2 groups */
143 if (sd
->flags
& (SD_LOAD_BALANCE
|
147 SD_SHARE_CPUCAPACITY
|
148 SD_ASYM_CPUCAPACITY
|
149 SD_SHARE_PKG_RESOURCES
|
150 SD_SHARE_POWERDOMAIN
)) {
151 if (sd
->groups
!= sd
->groups
->next
)
155 /* Following flags don't use groups */
156 if (sd
->flags
& (SD_WAKE_AFFINE
))
163 sd_parent_degenerate(struct sched_domain
*sd
, struct sched_domain
*parent
)
165 unsigned long cflags
= sd
->flags
, pflags
= parent
->flags
;
167 if (sd_degenerate(parent
))
170 if (!cpumask_equal(sched_domain_span(sd
), sched_domain_span(parent
)))
173 /* Flags needing groups don't count if only 1 group in parent */
174 if (parent
->groups
== parent
->groups
->next
) {
175 pflags
&= ~(SD_LOAD_BALANCE
|
179 SD_ASYM_CPUCAPACITY
|
180 SD_SHARE_CPUCAPACITY
|
181 SD_SHARE_PKG_RESOURCES
|
183 SD_SHARE_POWERDOMAIN
);
184 if (nr_node_ids
== 1)
185 pflags
&= ~SD_SERIALIZE
;
187 if (~cflags
& pflags
)
193 static void free_rootdomain(struct rcu_head
*rcu
)
195 struct root_domain
*rd
= container_of(rcu
, struct root_domain
, rcu
);
197 cpupri_cleanup(&rd
->cpupri
);
198 cpudl_cleanup(&rd
->cpudl
);
199 free_cpumask_var(rd
->dlo_mask
);
200 free_cpumask_var(rd
->rto_mask
);
201 free_cpumask_var(rd
->online
);
202 free_cpumask_var(rd
->span
);
206 void rq_attach_root(struct rq
*rq
, struct root_domain
*rd
)
208 struct root_domain
*old_rd
= NULL
;
211 raw_spin_lock_irqsave(&rq
->lock
, flags
);
216 if (cpumask_test_cpu(rq
->cpu
, old_rd
->online
))
219 cpumask_clear_cpu(rq
->cpu
, old_rd
->span
);
222 * If we dont want to free the old_rd yet then
223 * set old_rd to NULL to skip the freeing later
226 if (!atomic_dec_and_test(&old_rd
->refcount
))
230 atomic_inc(&rd
->refcount
);
233 cpumask_set_cpu(rq
->cpu
, rd
->span
);
234 if (cpumask_test_cpu(rq
->cpu
, cpu_active_mask
))
237 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
240 call_rcu_sched(&old_rd
->rcu
, free_rootdomain
);
243 static int init_rootdomain(struct root_domain
*rd
)
245 memset(rd
, 0, sizeof(*rd
));
247 if (!zalloc_cpumask_var(&rd
->span
, GFP_KERNEL
))
249 if (!zalloc_cpumask_var(&rd
->online
, GFP_KERNEL
))
251 if (!zalloc_cpumask_var(&rd
->dlo_mask
, GFP_KERNEL
))
253 if (!zalloc_cpumask_var(&rd
->rto_mask
, GFP_KERNEL
))
256 init_dl_bw(&rd
->dl_bw
);
257 if (cpudl_init(&rd
->cpudl
) != 0)
260 if (cpupri_init(&rd
->cpupri
) != 0)
265 cpudl_cleanup(&rd
->cpudl
);
267 free_cpumask_var(rd
->rto_mask
);
269 free_cpumask_var(rd
->dlo_mask
);
271 free_cpumask_var(rd
->online
);
273 free_cpumask_var(rd
->span
);
279 * By default the system creates a single root-domain with all CPUs as
280 * members (mimicking the global state we have today).
282 struct root_domain def_root_domain
;
284 void init_defrootdomain(void)
286 init_rootdomain(&def_root_domain
);
288 atomic_set(&def_root_domain
.refcount
, 1);
291 static struct root_domain
*alloc_rootdomain(void)
293 struct root_domain
*rd
;
295 rd
= kmalloc(sizeof(*rd
), GFP_KERNEL
);
299 if (init_rootdomain(rd
) != 0) {
307 static void free_sched_groups(struct sched_group
*sg
, int free_sgc
)
309 struct sched_group
*tmp
, *first
;
318 if (free_sgc
&& atomic_dec_and_test(&sg
->sgc
->ref
))
323 } while (sg
!= first
);
326 static void destroy_sched_domain(struct sched_domain
*sd
)
329 * If its an overlapping domain it has private groups, iterate and
332 if (sd
->flags
& SD_OVERLAP
) {
333 free_sched_groups(sd
->groups
, 1);
334 } else if (atomic_dec_and_test(&sd
->groups
->ref
)) {
335 kfree(sd
->groups
->sgc
);
338 if (sd
->shared
&& atomic_dec_and_test(&sd
->shared
->ref
))
343 static void destroy_sched_domains_rcu(struct rcu_head
*rcu
)
345 struct sched_domain
*sd
= container_of(rcu
, struct sched_domain
, rcu
);
348 struct sched_domain
*parent
= sd
->parent
;
349 destroy_sched_domain(sd
);
354 static void destroy_sched_domains(struct sched_domain
*sd
)
357 call_rcu(&sd
->rcu
, destroy_sched_domains_rcu
);
361 * Keep a special pointer to the highest sched_domain that has
362 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
363 * allows us to avoid some pointer chasing select_idle_sibling().
365 * Also keep a unique ID per domain (we use the first CPU number in
366 * the cpumask of the domain), this allows us to quickly tell if
367 * two CPUs are in the same cache domain, see cpus_share_cache().
369 DEFINE_PER_CPU(struct sched_domain
*, sd_llc
);
370 DEFINE_PER_CPU(int, sd_llc_size
);
371 DEFINE_PER_CPU(int, sd_llc_id
);
372 DEFINE_PER_CPU(struct sched_domain_shared
*, sd_llc_shared
);
373 DEFINE_PER_CPU(struct sched_domain
*, sd_numa
);
374 DEFINE_PER_CPU(struct sched_domain
*, sd_asym
);
376 static void update_top_cache_domain(int cpu
)
378 struct sched_domain_shared
*sds
= NULL
;
379 struct sched_domain
*sd
;
383 sd
= highest_flag_domain(cpu
, SD_SHARE_PKG_RESOURCES
);
385 id
= cpumask_first(sched_domain_span(sd
));
386 size
= cpumask_weight(sched_domain_span(sd
));
390 rcu_assign_pointer(per_cpu(sd_llc
, cpu
), sd
);
391 per_cpu(sd_llc_size
, cpu
) = size
;
392 per_cpu(sd_llc_id
, cpu
) = id
;
393 rcu_assign_pointer(per_cpu(sd_llc_shared
, cpu
), sds
);
395 sd
= lowest_flag_domain(cpu
, SD_NUMA
);
396 rcu_assign_pointer(per_cpu(sd_numa
, cpu
), sd
);
398 sd
= highest_flag_domain(cpu
, SD_ASYM_PACKING
);
399 rcu_assign_pointer(per_cpu(sd_asym
, cpu
), sd
);
403 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
404 * hold the hotplug lock.
407 cpu_attach_domain(struct sched_domain
*sd
, struct root_domain
*rd
, int cpu
)
409 struct rq
*rq
= cpu_rq(cpu
);
410 struct sched_domain
*tmp
;
412 /* Remove the sched domains which do not contribute to scheduling. */
413 for (tmp
= sd
; tmp
; ) {
414 struct sched_domain
*parent
= tmp
->parent
;
418 if (sd_parent_degenerate(tmp
, parent
)) {
419 tmp
->parent
= parent
->parent
;
421 parent
->parent
->child
= tmp
;
423 * Transfer SD_PREFER_SIBLING down in case of a
424 * degenerate parent; the spans match for this
425 * so the property transfers.
427 if (parent
->flags
& SD_PREFER_SIBLING
)
428 tmp
->flags
|= SD_PREFER_SIBLING
;
429 destroy_sched_domain(parent
);
434 if (sd
&& sd_degenerate(sd
)) {
437 destroy_sched_domain(tmp
);
442 sched_domain_debug(sd
, cpu
);
444 rq_attach_root(rq
, rd
);
446 rcu_assign_pointer(rq
->sd
, sd
);
447 destroy_sched_domains(tmp
);
449 update_top_cache_domain(cpu
);
452 /* Setup the mask of CPUs configured for isolated domains */
453 static int __init
isolated_cpu_setup(char *str
)
457 alloc_bootmem_cpumask_var(&cpu_isolated_map
);
458 ret
= cpulist_parse(str
, cpu_isolated_map
);
460 pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids
);
465 __setup("isolcpus=", isolated_cpu_setup
);
468 struct sched_domain
** __percpu sd
;
469 struct root_domain
*rd
;
480 * Build an iteration mask that can exclude certain CPUs from the upwards
483 * Asymmetric node setups can result in situations where the domain tree is of
484 * unequal depth, make sure to skip domains that already cover the entire
487 * In that case build_sched_domains() will have terminated the iteration early
488 * and our sibling sd spans will be empty. Domains should always include the
489 * CPU they're built on, so check that.
491 static void build_group_mask(struct sched_domain
*sd
, struct sched_group
*sg
)
493 const struct cpumask
*span
= sched_domain_span(sd
);
494 struct sd_data
*sdd
= sd
->private;
495 struct sched_domain
*sibling
;
498 for_each_cpu(i
, span
) {
499 sibling
= *per_cpu_ptr(sdd
->sd
, i
);
500 if (!cpumask_test_cpu(i
, sched_domain_span(sibling
)))
503 cpumask_set_cpu(i
, sched_group_mask(sg
));
508 * Return the canonical balance CPU for this group, this is the first CPU
509 * of this group that's also in the iteration mask.
511 int group_balance_cpu(struct sched_group
*sg
)
513 return cpumask_first_and(sched_group_cpus(sg
), sched_group_mask(sg
));
517 build_overlap_sched_groups(struct sched_domain
*sd
, int cpu
)
519 struct sched_group
*first
= NULL
, *last
= NULL
, *groups
= NULL
, *sg
;
520 const struct cpumask
*span
= sched_domain_span(sd
);
521 struct cpumask
*covered
= sched_domains_tmpmask
;
522 struct sd_data
*sdd
= sd
->private;
523 struct sched_domain
*sibling
;
526 cpumask_clear(covered
);
528 for_each_cpu(i
, span
) {
529 struct cpumask
*sg_span
;
531 if (cpumask_test_cpu(i
, covered
))
534 sibling
= *per_cpu_ptr(sdd
->sd
, i
);
536 /* See the comment near build_group_mask(). */
537 if (!cpumask_test_cpu(i
, sched_domain_span(sibling
)))
540 sg
= kzalloc_node(sizeof(struct sched_group
) + cpumask_size(),
541 GFP_KERNEL
, cpu_to_node(cpu
));
546 sg_span
= sched_group_cpus(sg
);
548 cpumask_copy(sg_span
, sched_domain_span(sibling
->child
));
550 cpumask_set_cpu(i
, sg_span
);
552 cpumask_or(covered
, covered
, sg_span
);
554 sg
->sgc
= *per_cpu_ptr(sdd
->sgc
, i
);
555 if (atomic_inc_return(&sg
->sgc
->ref
) == 1)
556 build_group_mask(sd
, sg
);
559 * Initialize sgc->capacity such that even if we mess up the
560 * domains and no possible iteration will get us here, we won't
563 sg
->sgc
->capacity
= SCHED_CAPACITY_SCALE
* cpumask_weight(sg_span
);
564 sg
->sgc
->min_capacity
= SCHED_CAPACITY_SCALE
;
567 * Make sure the first group of this domain contains the
568 * canonical balance CPU. Otherwise the sched_domain iteration
569 * breaks. See update_sg_lb_stats().
571 if ((!groups
&& cpumask_test_cpu(cpu
, sg_span
)) ||
572 group_balance_cpu(sg
) == cpu
)
587 free_sched_groups(first
, 0);
592 static int get_group(int cpu
, struct sd_data
*sdd
, struct sched_group
**sg
)
594 struct sched_domain
*sd
= *per_cpu_ptr(sdd
->sd
, cpu
);
595 struct sched_domain
*child
= sd
->child
;
598 cpu
= cpumask_first(sched_domain_span(child
));
601 *sg
= *per_cpu_ptr(sdd
->sg
, cpu
);
602 (*sg
)->sgc
= *per_cpu_ptr(sdd
->sgc
, cpu
);
604 /* For claim_allocations: */
605 atomic_set(&(*sg
)->sgc
->ref
, 1);
612 * build_sched_groups will build a circular linked list of the groups
613 * covered by the given span, and will set each group's ->cpumask correctly,
614 * and ->cpu_capacity to 0.
616 * Assumes the sched_domain tree is fully constructed
619 build_sched_groups(struct sched_domain
*sd
, int cpu
)
621 struct sched_group
*first
= NULL
, *last
= NULL
;
622 struct sd_data
*sdd
= sd
->private;
623 const struct cpumask
*span
= sched_domain_span(sd
);
624 struct cpumask
*covered
;
627 get_group(cpu
, sdd
, &sd
->groups
);
628 atomic_inc(&sd
->groups
->ref
);
630 if (cpu
!= cpumask_first(span
))
633 lockdep_assert_held(&sched_domains_mutex
);
634 covered
= sched_domains_tmpmask
;
636 cpumask_clear(covered
);
638 for_each_cpu(i
, span
) {
639 struct sched_group
*sg
;
642 if (cpumask_test_cpu(i
, covered
))
645 group
= get_group(i
, sdd
, &sg
);
646 cpumask_setall(sched_group_mask(sg
));
648 for_each_cpu(j
, span
) {
649 if (get_group(j
, sdd
, NULL
) != group
)
652 cpumask_set_cpu(j
, covered
);
653 cpumask_set_cpu(j
, sched_group_cpus(sg
));
668 * Initialize sched groups cpu_capacity.
670 * cpu_capacity indicates the capacity of sched group, which is used while
671 * distributing the load between different sched groups in a sched domain.
672 * Typically cpu_capacity for all the groups in a sched domain will be same
673 * unless there are asymmetries in the topology. If there are asymmetries,
674 * group having more cpu_capacity will pickup more load compared to the
675 * group having less cpu_capacity.
677 static void init_sched_groups_capacity(int cpu
, struct sched_domain
*sd
)
679 struct sched_group
*sg
= sd
->groups
;
684 int cpu
, max_cpu
= -1;
686 sg
->group_weight
= cpumask_weight(sched_group_cpus(sg
));
688 if (!(sd
->flags
& SD_ASYM_PACKING
))
691 for_each_cpu(cpu
, sched_group_cpus(sg
)) {
694 else if (sched_asym_prefer(cpu
, max_cpu
))
697 sg
->asym_prefer_cpu
= max_cpu
;
701 } while (sg
!= sd
->groups
);
703 if (cpu
!= group_balance_cpu(sg
))
706 update_group_capacity(sd
, cpu
);
710 * Initializers for schedule domains
711 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
714 static int default_relax_domain_level
= -1;
715 int sched_domain_level_max
;
717 static int __init
setup_relax_domain_level(char *str
)
719 if (kstrtoint(str
, 0, &default_relax_domain_level
))
720 pr_warn("Unable to set relax_domain_level\n");
724 __setup("relax_domain_level=", setup_relax_domain_level
);
726 static void set_domain_attribute(struct sched_domain
*sd
,
727 struct sched_domain_attr
*attr
)
731 if (!attr
|| attr
->relax_domain_level
< 0) {
732 if (default_relax_domain_level
< 0)
735 request
= default_relax_domain_level
;
737 request
= attr
->relax_domain_level
;
738 if (request
< sd
->level
) {
739 /* Turn off idle balance on this domain: */
740 sd
->flags
&= ~(SD_BALANCE_WAKE
|SD_BALANCE_NEWIDLE
);
742 /* Turn on idle balance on this domain: */
743 sd
->flags
|= (SD_BALANCE_WAKE
|SD_BALANCE_NEWIDLE
);
747 static void __sdt_free(const struct cpumask
*cpu_map
);
748 static int __sdt_alloc(const struct cpumask
*cpu_map
);
750 static void __free_domain_allocs(struct s_data
*d
, enum s_alloc what
,
751 const struct cpumask
*cpu_map
)
755 if (!atomic_read(&d
->rd
->refcount
))
756 free_rootdomain(&d
->rd
->rcu
);
770 __visit_domain_allocation_hell(struct s_data
*d
, const struct cpumask
*cpu_map
)
772 memset(d
, 0, sizeof(*d
));
774 if (__sdt_alloc(cpu_map
))
775 return sa_sd_storage
;
776 d
->sd
= alloc_percpu(struct sched_domain
*);
778 return sa_sd_storage
;
779 d
->rd
= alloc_rootdomain();
782 return sa_rootdomain
;
786 * NULL the sd_data elements we've used to build the sched_domain and
787 * sched_group structure so that the subsequent __free_domain_allocs()
788 * will not free the data we're using.
790 static void claim_allocations(int cpu
, struct sched_domain
*sd
)
792 struct sd_data
*sdd
= sd
->private;
794 WARN_ON_ONCE(*per_cpu_ptr(sdd
->sd
, cpu
) != sd
);
795 *per_cpu_ptr(sdd
->sd
, cpu
) = NULL
;
797 if (atomic_read(&(*per_cpu_ptr(sdd
->sds
, cpu
))->ref
))
798 *per_cpu_ptr(sdd
->sds
, cpu
) = NULL
;
800 if (atomic_read(&(*per_cpu_ptr(sdd
->sg
, cpu
))->ref
))
801 *per_cpu_ptr(sdd
->sg
, cpu
) = NULL
;
803 if (atomic_read(&(*per_cpu_ptr(sdd
->sgc
, cpu
))->ref
))
804 *per_cpu_ptr(sdd
->sgc
, cpu
) = NULL
;
808 static int sched_domains_numa_levels
;
809 enum numa_topology_type sched_numa_topology_type
;
810 static int *sched_domains_numa_distance
;
811 int sched_max_numa_distance
;
812 static struct cpumask
***sched_domains_numa_masks
;
813 static int sched_domains_curr_level
;
817 * SD_flags allowed in topology descriptions.
819 * These flags are purely descriptive of the topology and do not prescribe
820 * behaviour. Behaviour is artificial and mapped in the below sd_init()
823 * SD_SHARE_CPUCAPACITY - describes SMT topologies
824 * SD_SHARE_PKG_RESOURCES - describes shared caches
825 * SD_NUMA - describes NUMA topologies
826 * SD_SHARE_POWERDOMAIN - describes shared power domain
827 * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies
829 * Odd one out, which beside describing the topology has a quirk also
830 * prescribes the desired behaviour that goes along with it:
832 * SD_ASYM_PACKING - describes SMT quirks
834 #define TOPOLOGY_SD_FLAGS \
835 (SD_SHARE_CPUCAPACITY | \
836 SD_SHARE_PKG_RESOURCES | \
839 SD_ASYM_CPUCAPACITY | \
840 SD_SHARE_POWERDOMAIN)
842 static struct sched_domain
*
843 sd_init(struct sched_domain_topology_level
*tl
,
844 const struct cpumask
*cpu_map
,
845 struct sched_domain
*child
, int cpu
)
847 struct sd_data
*sdd
= &tl
->data
;
848 struct sched_domain
*sd
= *per_cpu_ptr(sdd
->sd
, cpu
);
849 int sd_id
, sd_weight
, sd_flags
= 0;
853 * Ugly hack to pass state to sd_numa_mask()...
855 sched_domains_curr_level
= tl
->numa_level
;
858 sd_weight
= cpumask_weight(tl
->mask(cpu
));
861 sd_flags
= (*tl
->sd_flags
)();
862 if (WARN_ONCE(sd_flags
& ~TOPOLOGY_SD_FLAGS
,
863 "wrong sd_flags in topology description\n"))
864 sd_flags
&= ~TOPOLOGY_SD_FLAGS
;
866 *sd
= (struct sched_domain
){
867 .min_interval
= sd_weight
,
868 .max_interval
= 2*sd_weight
,
870 .imbalance_pct
= 125,
872 .cache_nice_tries
= 0,
879 .flags
= 1*SD_LOAD_BALANCE
880 | 1*SD_BALANCE_NEWIDLE
885 | 0*SD_SHARE_CPUCAPACITY
886 | 0*SD_SHARE_PKG_RESOURCES
888 | 0*SD_PREFER_SIBLING
893 .last_balance
= jiffies
,
894 .balance_interval
= sd_weight
,
896 .max_newidle_lb_cost
= 0,
897 .next_decay_max_lb_cost
= jiffies
,
899 #ifdef CONFIG_SCHED_DEBUG
904 cpumask_and(sched_domain_span(sd
), cpu_map
, tl
->mask(cpu
));
905 sd_id
= cpumask_first(sched_domain_span(sd
));
908 * Convert topological properties into behaviour.
911 if (sd
->flags
& SD_ASYM_CPUCAPACITY
) {
912 struct sched_domain
*t
= sd
;
914 for_each_lower_domain(t
)
915 t
->flags
|= SD_BALANCE_WAKE
;
918 if (sd
->flags
& SD_SHARE_CPUCAPACITY
) {
919 sd
->flags
|= SD_PREFER_SIBLING
;
920 sd
->imbalance_pct
= 110;
921 sd
->smt_gain
= 1178; /* ~15% */
923 } else if (sd
->flags
& SD_SHARE_PKG_RESOURCES
) {
924 sd
->imbalance_pct
= 117;
925 sd
->cache_nice_tries
= 1;
929 } else if (sd
->flags
& SD_NUMA
) {
930 sd
->cache_nice_tries
= 2;
934 sd
->flags
|= SD_SERIALIZE
;
935 if (sched_domains_numa_distance
[tl
->numa_level
] > RECLAIM_DISTANCE
) {
936 sd
->flags
&= ~(SD_BALANCE_EXEC
|
943 sd
->flags
|= SD_PREFER_SIBLING
;
944 sd
->cache_nice_tries
= 1;
950 * For all levels sharing cache; connect a sched_domain_shared
953 if (sd
->flags
& SD_SHARE_PKG_RESOURCES
) {
954 sd
->shared
= *per_cpu_ptr(sdd
->sds
, sd_id
);
955 atomic_inc(&sd
->shared
->ref
);
956 atomic_set(&sd
->shared
->nr_busy_cpus
, sd_weight
);
965 * Topology list, bottom-up.
967 static struct sched_domain_topology_level default_topology
[] = {
968 #ifdef CONFIG_SCHED_SMT
969 { cpu_smt_mask
, cpu_smt_flags
, SD_INIT_NAME(SMT
) },
971 #ifdef CONFIG_SCHED_MC
972 { cpu_coregroup_mask
, cpu_core_flags
, SD_INIT_NAME(MC
) },
974 { cpu_cpu_mask
, SD_INIT_NAME(DIE
) },
978 static struct sched_domain_topology_level
*sched_domain_topology
=
981 #define for_each_sd_topology(tl) \
982 for (tl = sched_domain_topology; tl->mask; tl++)
984 void set_sched_topology(struct sched_domain_topology_level
*tl
)
986 if (WARN_ON_ONCE(sched_smp_initialized
))
989 sched_domain_topology
= tl
;
994 static const struct cpumask
*sd_numa_mask(int cpu
)
996 return sched_domains_numa_masks
[sched_domains_curr_level
][cpu_to_node(cpu
)];
999 static void sched_numa_warn(const char *str
)
1001 static int done
= false;
1009 printk(KERN_WARNING
"ERROR: %s\n\n", str
);
1011 for (i
= 0; i
< nr_node_ids
; i
++) {
1012 printk(KERN_WARNING
" ");
1013 for (j
= 0; j
< nr_node_ids
; j
++)
1014 printk(KERN_CONT
"%02d ", node_distance(i
,j
));
1015 printk(KERN_CONT
"\n");
1017 printk(KERN_WARNING
"\n");
1020 bool find_numa_distance(int distance
)
1024 if (distance
== node_distance(0, 0))
1027 for (i
= 0; i
< sched_domains_numa_levels
; i
++) {
1028 if (sched_domains_numa_distance
[i
] == distance
)
1036 * A system can have three types of NUMA topology:
1037 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1038 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1039 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1041 * The difference between a glueless mesh topology and a backplane
1042 * topology lies in whether communication between not directly
1043 * connected nodes goes through intermediary nodes (where programs
1044 * could run), or through backplane controllers. This affects
1045 * placement of programs.
1047 * The type of topology can be discerned with the following tests:
1048 * - If the maximum distance between any nodes is 1 hop, the system
1049 * is directly connected.
1050 * - If for two nodes A and B, located N > 1 hops away from each other,
1051 * there is an intermediary node C, which is < N hops away from both
1052 * nodes A and B, the system is a glueless mesh.
1054 static void init_numa_topology_type(void)
1058 n
= sched_max_numa_distance
;
1060 if (sched_domains_numa_levels
<= 1) {
1061 sched_numa_topology_type
= NUMA_DIRECT
;
1065 for_each_online_node(a
) {
1066 for_each_online_node(b
) {
1067 /* Find two nodes furthest removed from each other. */
1068 if (node_distance(a
, b
) < n
)
1071 /* Is there an intermediary node between a and b? */
1072 for_each_online_node(c
) {
1073 if (node_distance(a
, c
) < n
&&
1074 node_distance(b
, c
) < n
) {
1075 sched_numa_topology_type
=
1081 sched_numa_topology_type
= NUMA_BACKPLANE
;
1087 void sched_init_numa(void)
1089 int next_distance
, curr_distance
= node_distance(0, 0);
1090 struct sched_domain_topology_level
*tl
;
1094 sched_domains_numa_distance
= kzalloc(sizeof(int) * nr_node_ids
, GFP_KERNEL
);
1095 if (!sched_domains_numa_distance
)
1099 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1100 * unique distances in the node_distance() table.
1102 * Assumes node_distance(0,j) includes all distances in
1103 * node_distance(i,j) in order to avoid cubic time.
1105 next_distance
= curr_distance
;
1106 for (i
= 0; i
< nr_node_ids
; i
++) {
1107 for (j
= 0; j
< nr_node_ids
; j
++) {
1108 for (k
= 0; k
< nr_node_ids
; k
++) {
1109 int distance
= node_distance(i
, k
);
1111 if (distance
> curr_distance
&&
1112 (distance
< next_distance
||
1113 next_distance
== curr_distance
))
1114 next_distance
= distance
;
1117 * While not a strong assumption it would be nice to know
1118 * about cases where if node A is connected to B, B is not
1119 * equally connected to A.
1121 if (sched_debug() && node_distance(k
, i
) != distance
)
1122 sched_numa_warn("Node-distance not symmetric");
1124 if (sched_debug() && i
&& !find_numa_distance(distance
))
1125 sched_numa_warn("Node-0 not representative");
1127 if (next_distance
!= curr_distance
) {
1128 sched_domains_numa_distance
[level
++] = next_distance
;
1129 sched_domains_numa_levels
= level
;
1130 curr_distance
= next_distance
;
1135 * In case of sched_debug() we verify the above assumption.
1145 * 'level' contains the number of unique distances, excluding the
1146 * identity distance node_distance(i,i).
1148 * The sched_domains_numa_distance[] array includes the actual distance
1153 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1154 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1155 * the array will contain less then 'level' members. This could be
1156 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1157 * in other functions.
1159 * We reset it to 'level' at the end of this function.
1161 sched_domains_numa_levels
= 0;
1163 sched_domains_numa_masks
= kzalloc(sizeof(void *) * level
, GFP_KERNEL
);
1164 if (!sched_domains_numa_masks
)
1168 * Now for each level, construct a mask per node which contains all
1169 * CPUs of nodes that are that many hops away from us.
1171 for (i
= 0; i
< level
; i
++) {
1172 sched_domains_numa_masks
[i
] =
1173 kzalloc(nr_node_ids
* sizeof(void *), GFP_KERNEL
);
1174 if (!sched_domains_numa_masks
[i
])
1177 for (j
= 0; j
< nr_node_ids
; j
++) {
1178 struct cpumask
*mask
= kzalloc(cpumask_size(), GFP_KERNEL
);
1182 sched_domains_numa_masks
[i
][j
] = mask
;
1185 if (node_distance(j
, k
) > sched_domains_numa_distance
[i
])
1188 cpumask_or(mask
, mask
, cpumask_of_node(k
));
1193 /* Compute default topology size */
1194 for (i
= 0; sched_domain_topology
[i
].mask
; i
++);
1196 tl
= kzalloc((i
+ level
+ 1) *
1197 sizeof(struct sched_domain_topology_level
), GFP_KERNEL
);
1202 * Copy the default topology bits..
1204 for (i
= 0; sched_domain_topology
[i
].mask
; i
++)
1205 tl
[i
] = sched_domain_topology
[i
];
1208 * .. and append 'j' levels of NUMA goodness.
1210 for (j
= 0; j
< level
; i
++, j
++) {
1211 tl
[i
] = (struct sched_domain_topology_level
){
1212 .mask
= sd_numa_mask
,
1213 .sd_flags
= cpu_numa_flags
,
1214 .flags
= SDTL_OVERLAP
,
1220 sched_domain_topology
= tl
;
1222 sched_domains_numa_levels
= level
;
1223 sched_max_numa_distance
= sched_domains_numa_distance
[level
- 1];
1225 init_numa_topology_type();
1228 void sched_domains_numa_masks_set(unsigned int cpu
)
1230 int node
= cpu_to_node(cpu
);
1233 for (i
= 0; i
< sched_domains_numa_levels
; i
++) {
1234 for (j
= 0; j
< nr_node_ids
; j
++) {
1235 if (node_distance(j
, node
) <= sched_domains_numa_distance
[i
])
1236 cpumask_set_cpu(cpu
, sched_domains_numa_masks
[i
][j
]);
1241 void sched_domains_numa_masks_clear(unsigned int cpu
)
1245 for (i
= 0; i
< sched_domains_numa_levels
; i
++) {
1246 for (j
= 0; j
< nr_node_ids
; j
++)
1247 cpumask_clear_cpu(cpu
, sched_domains_numa_masks
[i
][j
]);
1251 #endif /* CONFIG_NUMA */
1253 static int __sdt_alloc(const struct cpumask
*cpu_map
)
1255 struct sched_domain_topology_level
*tl
;
1258 for_each_sd_topology(tl
) {
1259 struct sd_data
*sdd
= &tl
->data
;
1261 sdd
->sd
= alloc_percpu(struct sched_domain
*);
1265 sdd
->sds
= alloc_percpu(struct sched_domain_shared
*);
1269 sdd
->sg
= alloc_percpu(struct sched_group
*);
1273 sdd
->sgc
= alloc_percpu(struct sched_group_capacity
*);
1277 for_each_cpu(j
, cpu_map
) {
1278 struct sched_domain
*sd
;
1279 struct sched_domain_shared
*sds
;
1280 struct sched_group
*sg
;
1281 struct sched_group_capacity
*sgc
;
1283 sd
= kzalloc_node(sizeof(struct sched_domain
) + cpumask_size(),
1284 GFP_KERNEL
, cpu_to_node(j
));
1288 *per_cpu_ptr(sdd
->sd
, j
) = sd
;
1290 sds
= kzalloc_node(sizeof(struct sched_domain_shared
),
1291 GFP_KERNEL
, cpu_to_node(j
));
1295 *per_cpu_ptr(sdd
->sds
, j
) = sds
;
1297 sg
= kzalloc_node(sizeof(struct sched_group
) + cpumask_size(),
1298 GFP_KERNEL
, cpu_to_node(j
));
1304 *per_cpu_ptr(sdd
->sg
, j
) = sg
;
1306 sgc
= kzalloc_node(sizeof(struct sched_group_capacity
) + cpumask_size(),
1307 GFP_KERNEL
, cpu_to_node(j
));
1311 *per_cpu_ptr(sdd
->sgc
, j
) = sgc
;
1318 static void __sdt_free(const struct cpumask
*cpu_map
)
1320 struct sched_domain_topology_level
*tl
;
1323 for_each_sd_topology(tl
) {
1324 struct sd_data
*sdd
= &tl
->data
;
1326 for_each_cpu(j
, cpu_map
) {
1327 struct sched_domain
*sd
;
1330 sd
= *per_cpu_ptr(sdd
->sd
, j
);
1331 if (sd
&& (sd
->flags
& SD_OVERLAP
))
1332 free_sched_groups(sd
->groups
, 0);
1333 kfree(*per_cpu_ptr(sdd
->sd
, j
));
1337 kfree(*per_cpu_ptr(sdd
->sds
, j
));
1339 kfree(*per_cpu_ptr(sdd
->sg
, j
));
1341 kfree(*per_cpu_ptr(sdd
->sgc
, j
));
1343 free_percpu(sdd
->sd
);
1345 free_percpu(sdd
->sds
);
1347 free_percpu(sdd
->sg
);
1349 free_percpu(sdd
->sgc
);
1354 struct sched_domain
*build_sched_domain(struct sched_domain_topology_level
*tl
,
1355 const struct cpumask
*cpu_map
, struct sched_domain_attr
*attr
,
1356 struct sched_domain
*child
, int cpu
)
1358 struct sched_domain
*sd
= sd_init(tl
, cpu_map
, child
, cpu
);
1361 sd
->level
= child
->level
+ 1;
1362 sched_domain_level_max
= max(sched_domain_level_max
, sd
->level
);
1365 if (!cpumask_subset(sched_domain_span(child
),
1366 sched_domain_span(sd
))) {
1367 pr_err("BUG: arch topology borken\n");
1368 #ifdef CONFIG_SCHED_DEBUG
1369 pr_err(" the %s domain not a subset of the %s domain\n",
1370 child
->name
, sd
->name
);
1372 /* Fixup, ensure @sd has at least @child cpus. */
1373 cpumask_or(sched_domain_span(sd
),
1374 sched_domain_span(sd
),
1375 sched_domain_span(child
));
1379 set_domain_attribute(sd
, attr
);
1385 * Build sched domains for a given set of CPUs and attach the sched domains
1386 * to the individual CPUs
1389 build_sched_domains(const struct cpumask
*cpu_map
, struct sched_domain_attr
*attr
)
1391 enum s_alloc alloc_state
;
1392 struct sched_domain
*sd
;
1394 struct rq
*rq
= NULL
;
1395 int i
, ret
= -ENOMEM
;
1397 alloc_state
= __visit_domain_allocation_hell(&d
, cpu_map
);
1398 if (alloc_state
!= sa_rootdomain
)
1401 /* Set up domains for CPUs specified by the cpu_map: */
1402 for_each_cpu(i
, cpu_map
) {
1403 struct sched_domain_topology_level
*tl
;
1406 for_each_sd_topology(tl
) {
1407 sd
= build_sched_domain(tl
, cpu_map
, attr
, sd
, i
);
1408 if (tl
== sched_domain_topology
)
1409 *per_cpu_ptr(d
.sd
, i
) = sd
;
1410 if (tl
->flags
& SDTL_OVERLAP
|| sched_feat(FORCE_SD_OVERLAP
))
1411 sd
->flags
|= SD_OVERLAP
;
1412 if (cpumask_equal(cpu_map
, sched_domain_span(sd
)))
1417 /* Build the groups for the domains */
1418 for_each_cpu(i
, cpu_map
) {
1419 for (sd
= *per_cpu_ptr(d
.sd
, i
); sd
; sd
= sd
->parent
) {
1420 sd
->span_weight
= cpumask_weight(sched_domain_span(sd
));
1421 if (sd
->flags
& SD_OVERLAP
) {
1422 if (build_overlap_sched_groups(sd
, i
))
1425 if (build_sched_groups(sd
, i
))
1431 /* Calculate CPU capacity for physical packages and nodes */
1432 for (i
= nr_cpumask_bits
-1; i
>= 0; i
--) {
1433 if (!cpumask_test_cpu(i
, cpu_map
))
1436 for (sd
= *per_cpu_ptr(d
.sd
, i
); sd
; sd
= sd
->parent
) {
1437 claim_allocations(i
, sd
);
1438 init_sched_groups_capacity(i
, sd
);
1442 /* Attach the domains */
1444 for_each_cpu(i
, cpu_map
) {
1446 sd
= *per_cpu_ptr(d
.sd
, i
);
1448 /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
1449 if (rq
->cpu_capacity_orig
> READ_ONCE(d
.rd
->max_cpu_capacity
))
1450 WRITE_ONCE(d
.rd
->max_cpu_capacity
, rq
->cpu_capacity_orig
);
1452 cpu_attach_domain(sd
, d
.rd
, i
);
1456 if (rq
&& sched_debug_enabled
) {
1457 pr_info("span: %*pbl (max cpu_capacity = %lu)\n",
1458 cpumask_pr_args(cpu_map
), rq
->rd
->max_cpu_capacity
);
1463 __free_domain_allocs(&d
, alloc_state
, cpu_map
);
1467 /* Current sched domains: */
1468 static cpumask_var_t
*doms_cur
;
1470 /* Number of sched domains in 'doms_cur': */
1471 static int ndoms_cur
;
1473 /* Attribues of custom domains in 'doms_cur' */
1474 static struct sched_domain_attr
*dattr_cur
;
1477 * Special case: If a kmalloc() of a doms_cur partition (array of
1478 * cpumask) fails, then fallback to a single sched domain,
1479 * as determined by the single cpumask fallback_doms.
1481 cpumask_var_t fallback_doms
;
1484 * arch_update_cpu_topology lets virtualized architectures update the
1485 * CPU core maps. It is supposed to return 1 if the topology changed
1486 * or 0 if it stayed the same.
1488 int __weak
arch_update_cpu_topology(void)
1493 cpumask_var_t
*alloc_sched_domains(unsigned int ndoms
)
1496 cpumask_var_t
*doms
;
1498 doms
= kmalloc(sizeof(*doms
) * ndoms
, GFP_KERNEL
);
1501 for (i
= 0; i
< ndoms
; i
++) {
1502 if (!alloc_cpumask_var(&doms
[i
], GFP_KERNEL
)) {
1503 free_sched_domains(doms
, i
);
1510 void free_sched_domains(cpumask_var_t doms
[], unsigned int ndoms
)
1513 for (i
= 0; i
< ndoms
; i
++)
1514 free_cpumask_var(doms
[i
]);
1519 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
1520 * For now this just excludes isolated CPUs, but could be used to
1521 * exclude other special cases in the future.
1523 int init_sched_domains(const struct cpumask
*cpu_map
)
1527 arch_update_cpu_topology();
1529 doms_cur
= alloc_sched_domains(ndoms_cur
);
1531 doms_cur
= &fallback_doms
;
1532 cpumask_andnot(doms_cur
[0], cpu_map
, cpu_isolated_map
);
1533 err
= build_sched_domains(doms_cur
[0], NULL
);
1534 register_sched_domain_sysctl();
1540 * Detach sched domains from a group of CPUs specified in cpu_map
1541 * These CPUs will now be attached to the NULL domain
1543 static void detach_destroy_domains(const struct cpumask
*cpu_map
)
1548 for_each_cpu(i
, cpu_map
)
1549 cpu_attach_domain(NULL
, &def_root_domain
, i
);
1553 /* handle null as "default" */
1554 static int dattrs_equal(struct sched_domain_attr
*cur
, int idx_cur
,
1555 struct sched_domain_attr
*new, int idx_new
)
1557 struct sched_domain_attr tmp
;
1564 return !memcmp(cur
? (cur
+ idx_cur
) : &tmp
,
1565 new ? (new + idx_new
) : &tmp
,
1566 sizeof(struct sched_domain_attr
));
1570 * Partition sched domains as specified by the 'ndoms_new'
1571 * cpumasks in the array doms_new[] of cpumasks. This compares
1572 * doms_new[] to the current sched domain partitioning, doms_cur[].
1573 * It destroys each deleted domain and builds each new domain.
1575 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
1576 * The masks don't intersect (don't overlap.) We should setup one
1577 * sched domain for each mask. CPUs not in any of the cpumasks will
1578 * not be load balanced. If the same cpumask appears both in the
1579 * current 'doms_cur' domains and in the new 'doms_new', we can leave
1582 * The passed in 'doms_new' should be allocated using
1583 * alloc_sched_domains. This routine takes ownership of it and will
1584 * free_sched_domains it when done with it. If the caller failed the
1585 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
1586 * and partition_sched_domains() will fallback to the single partition
1587 * 'fallback_doms', it also forces the domains to be rebuilt.
1589 * If doms_new == NULL it will be replaced with cpu_online_mask.
1590 * ndoms_new == 0 is a special case for destroying existing domains,
1591 * and it will not create the default domain.
1593 * Call with hotplug lock held
1595 void partition_sched_domains(int ndoms_new
, cpumask_var_t doms_new
[],
1596 struct sched_domain_attr
*dattr_new
)
1601 mutex_lock(&sched_domains_mutex
);
1603 /* Always unregister in case we don't destroy any domains: */
1604 unregister_sched_domain_sysctl();
1606 /* Let the architecture update CPU core mappings: */
1607 new_topology
= arch_update_cpu_topology();
1609 n
= doms_new
? ndoms_new
: 0;
1611 /* Destroy deleted domains: */
1612 for (i
= 0; i
< ndoms_cur
; i
++) {
1613 for (j
= 0; j
< n
&& !new_topology
; j
++) {
1614 if (cpumask_equal(doms_cur
[i
], doms_new
[j
])
1615 && dattrs_equal(dattr_cur
, i
, dattr_new
, j
))
1618 /* No match - a current sched domain not in new doms_new[] */
1619 detach_destroy_domains(doms_cur
[i
]);
1625 if (doms_new
== NULL
) {
1627 doms_new
= &fallback_doms
;
1628 cpumask_andnot(doms_new
[0], cpu_active_mask
, cpu_isolated_map
);
1629 WARN_ON_ONCE(dattr_new
);
1632 /* Build new domains: */
1633 for (i
= 0; i
< ndoms_new
; i
++) {
1634 for (j
= 0; j
< n
&& !new_topology
; j
++) {
1635 if (cpumask_equal(doms_new
[i
], doms_cur
[j
])
1636 && dattrs_equal(dattr_new
, i
, dattr_cur
, j
))
1639 /* No match - add a new doms_new */
1640 build_sched_domains(doms_new
[i
], dattr_new
? dattr_new
+ i
: NULL
);
1645 /* Remember the new sched domains: */
1646 if (doms_cur
!= &fallback_doms
)
1647 free_sched_domains(doms_cur
, ndoms_cur
);
1650 doms_cur
= doms_new
;
1651 dattr_cur
= dattr_new
;
1652 ndoms_cur
= ndoms_new
;
1654 register_sched_domain_sysctl();
1656 mutex_unlock(&sched_domains_mutex
);