]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/sched/cpudeadline.c
sched/numa: Find the preferred nid with complex NUMA topology
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / cpudeadline.c
1 /*
2 * kernel/sched/cpudl.c
3 *
4 * Global CPU deadline management
5 *
6 * Author: Juri Lelli <j.lelli@sssup.it>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; version 2
11 * of the License.
12 */
13
14 #include <linux/gfp.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include "cpudeadline.h"
18
19 static inline int parent(int i)
20 {
21 return (i - 1) >> 1;
22 }
23
24 static inline int left_child(int i)
25 {
26 return (i << 1) + 1;
27 }
28
29 static inline int right_child(int i)
30 {
31 return (i << 1) + 2;
32 }
33
34 static inline int dl_time_before(u64 a, u64 b)
35 {
36 return (s64)(a - b) < 0;
37 }
38
39 static void cpudl_exchange(struct cpudl *cp, int a, int b)
40 {
41 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
42
43 swap(cp->elements[a].cpu, cp->elements[b].cpu);
44 swap(cp->elements[a].dl , cp->elements[b].dl );
45
46 swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx);
47 }
48
49 static void cpudl_heapify(struct cpudl *cp, int idx)
50 {
51 int l, r, largest;
52
53 /* adapted from lib/prio_heap.c */
54 while(1) {
55 l = left_child(idx);
56 r = right_child(idx);
57 largest = idx;
58
59 if ((l < cp->size) && dl_time_before(cp->elements[idx].dl,
60 cp->elements[l].dl))
61 largest = l;
62 if ((r < cp->size) && dl_time_before(cp->elements[largest].dl,
63 cp->elements[r].dl))
64 largest = r;
65 if (largest == idx)
66 break;
67
68 /* Push idx down the heap one level and bump one up */
69 cpudl_exchange(cp, largest, idx);
70 idx = largest;
71 }
72 }
73
74 static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
75 {
76 WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
77
78 if (dl_time_before(new_dl, cp->elements[idx].dl)) {
79 cp->elements[idx].dl = new_dl;
80 cpudl_heapify(cp, idx);
81 } else {
82 cp->elements[idx].dl = new_dl;
83 while (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
84 cp->elements[idx].dl)) {
85 cpudl_exchange(cp, idx, parent(idx));
86 idx = parent(idx);
87 }
88 }
89 }
90
91 static inline int cpudl_maximum(struct cpudl *cp)
92 {
93 return cp->elements[0].cpu;
94 }
95
96 /*
97 * cpudl_find - find the best (later-dl) CPU in the system
98 * @cp: the cpudl max-heap context
99 * @p: the task
100 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
101 *
102 * Returns: int - best CPU (heap maximum if suitable)
103 */
104 int cpudl_find(struct cpudl *cp, struct task_struct *p,
105 struct cpumask *later_mask)
106 {
107 int best_cpu = -1;
108 const struct sched_dl_entity *dl_se = &p->dl;
109
110 if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) {
111 best_cpu = cpumask_any(later_mask);
112 goto out;
113 } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
114 dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
115 best_cpu = cpudl_maximum(cp);
116 if (later_mask)
117 cpumask_set_cpu(best_cpu, later_mask);
118 }
119
120 out:
121 WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
122
123 return best_cpu;
124 }
125
126 /*
127 * cpudl_set - update the cpudl max-heap
128 * @cp: the cpudl max-heap context
129 * @cpu: the target cpu
130 * @dl: the new earliest deadline for this cpu
131 *
132 * Notes: assumes cpu_rq(cpu)->lock is locked
133 *
134 * Returns: (void)
135 */
136 void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
137 {
138 int old_idx, new_cpu;
139 unsigned long flags;
140
141 WARN_ON(!cpu_present(cpu));
142
143 raw_spin_lock_irqsave(&cp->lock, flags);
144 old_idx = cp->elements[cpu].idx;
145 if (!is_valid) {
146 /* remove item */
147 if (old_idx == IDX_INVALID) {
148 /*
149 * Nothing to remove if old_idx was invalid.
150 * This could happen if a rq_offline_dl is
151 * called for a CPU without -dl tasks running.
152 */
153 goto out;
154 }
155 new_cpu = cp->elements[cp->size - 1].cpu;
156 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
157 cp->elements[old_idx].cpu = new_cpu;
158 cp->size--;
159 cp->elements[new_cpu].idx = old_idx;
160 cp->elements[cpu].idx = IDX_INVALID;
161 while (old_idx > 0 && dl_time_before(
162 cp->elements[parent(old_idx)].dl,
163 cp->elements[old_idx].dl)) {
164 cpudl_exchange(cp, old_idx, parent(old_idx));
165 old_idx = parent(old_idx);
166 }
167 cpumask_set_cpu(cpu, cp->free_cpus);
168 cpudl_heapify(cp, old_idx);
169
170 goto out;
171 }
172
173 if (old_idx == IDX_INVALID) {
174 cp->size++;
175 cp->elements[cp->size - 1].dl = 0;
176 cp->elements[cp->size - 1].cpu = cpu;
177 cp->elements[cpu].idx = cp->size - 1;
178 cpudl_change_key(cp, cp->size - 1, dl);
179 cpumask_clear_cpu(cpu, cp->free_cpus);
180 } else {
181 cpudl_change_key(cp, old_idx, dl);
182 }
183
184 out:
185 raw_spin_unlock_irqrestore(&cp->lock, flags);
186 }
187
188 /*
189 * cpudl_init - initialize the cpudl structure
190 * @cp: the cpudl max-heap context
191 */
192 int cpudl_init(struct cpudl *cp)
193 {
194 int i;
195
196 memset(cp, 0, sizeof(*cp));
197 raw_spin_lock_init(&cp->lock);
198 cp->size = 0;
199
200 cp->elements = kcalloc(nr_cpu_ids,
201 sizeof(struct cpudl_item),
202 GFP_KERNEL);
203 if (!cp->elements)
204 return -ENOMEM;
205
206 if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
207 kfree(cp->elements);
208 return -ENOMEM;
209 }
210
211 for_each_possible_cpu(i)
212 cp->elements[i].idx = IDX_INVALID;
213
214 cpumask_setall(cp->free_cpus);
215
216 return 0;
217 }
218
219 /*
220 * cpudl_cleanup - clean up the cpudl structure
221 * @cp: the cpudl max-heap context
222 */
223 void cpudl_cleanup(struct cpudl *cp)
224 {
225 free_cpumask_var(cp->free_cpus);
226 kfree(cp->elements);
227 }