]>
Commit | Line | Data |
---|---|---|
51e0304c IM |
1 | /* |
2 | * Only give sleepers 50% of their service deficit. This allows | |
3 | * them to run sooner, but does not allow tons of sleepers to | |
4 | * rip the spread apart. | |
5 | */ | |
f8b6d1cc | 6 | SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) |
e26af0e8 | 7 | |
e26af0e8 PZ |
8 | /* |
9 | * Place new tasks ahead so that they do not starve already running | |
10 | * tasks | |
11 | */ | |
f8b6d1cc | 12 | SCHED_FEAT(START_DEBIT, true) |
e26af0e8 | 13 | |
e26af0e8 PZ |
14 | /* |
15 | * Prefer to schedule the task we woke last (assuming it failed | |
16 | * wakeup-preemption), since its likely going to consume data we | |
17 | * touched, increases cache locality. | |
18 | */ | |
f8b6d1cc | 19 | SCHED_FEAT(NEXT_BUDDY, false) |
e26af0e8 PZ |
20 | |
21 | /* | |
22 | * Prefer to schedule the task that ran last (when we did | |
23 | * wake-preempt) as that likely will touch the same data, increases | |
24 | * cache locality. | |
25 | */ | |
f8b6d1cc | 26 | SCHED_FEAT(LAST_BUDDY, true) |
e26af0e8 PZ |
27 | |
28 | /* | |
29 | * Consider buddies to be cache hot, decreases the likelyness of a | |
30 | * cache buddy being migrated away, increases cache locality. | |
31 | */ | |
f8b6d1cc | 32 | SCHED_FEAT(CACHE_HOT_BUDDY, true) |
e26af0e8 | 33 | |
8ed92e51 IM |
34 | /* |
35 | * Allow wakeup-time preemption of the current task: | |
36 | */ | |
37 | SCHED_FEAT(WAKEUP_PREEMPTION, true) | |
38 | ||
8e6598af PZ |
39 | /* |
40 | * Use arch dependent cpu power functions | |
41 | */ | |
bc2a27cd | 42 | SCHED_FEAT(ARCH_POWER, true) |
8e6598af | 43 | |
f8b6d1cc PZ |
44 | SCHED_FEAT(HRTICK, false) |
45 | SCHED_FEAT(DOUBLE_TICK, false) | |
46 | SCHED_FEAT(LB_BIAS, true) | |
e26af0e8 | 47 | |
aa483808 | 48 | /* |
095c0aa8 | 49 | * Decrement CPU power based on time not spent running tasks |
aa483808 | 50 | */ |
f8b6d1cc | 51 | SCHED_FEAT(NONTASK_POWER, true) |
317f3941 PZ |
52 | |
53 | /* | |
54 | * Queue remote wakeups on the target CPU and process them | |
55 | * using the scheduler IPI. Reduces rq->lock contention/bounces. | |
56 | */ | |
f8b6d1cc | 57 | SCHED_FEAT(TTWU_QUEUE, true) |
e3589f6c | 58 | |
f8b6d1cc PZ |
59 | SCHED_FEAT(FORCE_SD_OVERLAP, false) |
60 | SCHED_FEAT(RT_RUNTIME_SHARE, true) | |
eb95308e | 61 | SCHED_FEAT(LB_MIN, false) |
cbee9f88 PZ |
62 | |
63 | /* | |
1a687c2e MG |
64 | * Apply the automatic NUMA scheduling policy. Enabled automatically |
65 | * at runtime if running on a NUMA machine. Can be controlled via | |
b726b7df | 66 | * numa_balancing= |
cbee9f88 PZ |
67 | */ |
68 | #ifdef CONFIG_NUMA_BALANCING | |
1a687c2e | 69 | SCHED_FEAT(NUMA, false) |
3a7053b3 MG |
70 | |
71 | /* | |
72 | * NUMA_FAVOUR_HIGHER will favor moving tasks towards nodes where a | |
73 | * higher number of hinting faults are recorded during active load | |
74 | * balancing. | |
75 | */ | |
76 | SCHED_FEAT(NUMA_FAVOUR_HIGHER, true) | |
7a0f3083 MG |
77 | |
78 | /* | |
79 | * NUMA_RESIST_LOWER will resist moving tasks towards nodes where a | |
80 | * lower number of hinting faults have been recorded. As this has | |
81 | * the potential to prevent a task ever migrating to a new node | |
82 | * due to CPU overload it is disabled by default. | |
83 | */ | |
84 | SCHED_FEAT(NUMA_RESIST_LOWER, false) | |
cbee9f88 | 85 | #endif |