]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
5a0e3ad6 | 2 | #include <linux/slab.h> |
ccb46000 AM |
3 | #include <linux/kernel.h> |
4 | #include <linux/bitops.h> | |
5 | #include <linux/cpumask.h> | |
8bc3bcc9 | 6 | #include <linux/export.h> |
57c8a661 | 7 | #include <linux/memblock.h> |
98fa15f3 | 8 | #include <linux/numa.h> |
1abdfe70 | 9 | #include <linux/sched/isolation.h> |
ccb46000 | 10 | |
f22ef333 AD |
11 | /** |
12 | * cpumask_next - get the next cpu in a cpumask | |
13 | * @n: the cpu prior to the place to search (ie. return will be > @n) | |
14 | * @srcp: the cpumask pointer | |
15 | * | |
16 | * Returns >= nr_cpu_ids if no further cpus set. | |
17 | */ | |
18 | unsigned int cpumask_next(int n, const struct cpumask *srcp) | |
19 | { | |
20 | /* -1 is a legal arg here. */ | |
21 | if (n != -1) | |
22 | cpumask_check(n); | |
23 | return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); | |
24 | } | |
25 | EXPORT_SYMBOL(cpumask_next); | |
26 | ||
2d3854a3 RR |
27 | /** |
28 | * cpumask_next_and - get the next cpu in *src1p & *src2p | |
29 | * @n: the cpu prior to the place to search (ie. return will be > @n) | |
30 | * @src1p: the first cpumask pointer | |
31 | * @src2p: the second cpumask pointer | |
32 | * | |
33 | * Returns >= nr_cpu_ids if no further cpus set in both. | |
34 | */ | |
35 | int cpumask_next_and(int n, const struct cpumask *src1p, | |
36 | const struct cpumask *src2p) | |
37 | { | |
0ade34c3 CC |
38 | /* -1 is a legal arg here. */ |
39 | if (n != -1) | |
40 | cpumask_check(n); | |
41 | return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), | |
42 | nr_cpumask_bits, n + 1); | |
2d3854a3 RR |
43 | } |
44 | EXPORT_SYMBOL(cpumask_next_and); | |
45 | ||
46 | /** | |
47 | * cpumask_any_but - return a "random" in a cpumask, but not this one. | |
48 | * @mask: the cpumask to search | |
49 | * @cpu: the cpu to ignore. | |
50 | * | |
51 | * Often used to find any cpu but smp_processor_id() in a mask. | |
52 | * Returns >= nr_cpu_ids if no cpus set. | |
53 | */ | |
54 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) | |
55 | { | |
56 | unsigned int i; | |
57 | ||
984f2f37 | 58 | cpumask_check(cpu); |
2d3854a3 RR |
59 | for_each_cpu(i, mask) |
60 | if (i != cpu) | |
61 | break; | |
62 | return i; | |
63 | } | |
3712bba1 | 64 | EXPORT_SYMBOL(cpumask_any_but); |
2d3854a3 | 65 | |
c743f0a5 PZ |
66 | /** |
67 | * cpumask_next_wrap - helper to implement for_each_cpu_wrap | |
68 | * @n: the cpu prior to the place to search | |
69 | * @mask: the cpumask pointer | |
70 | * @start: the start point of the iteration | |
71 | * @wrap: assume @n crossing @start terminates the iteration | |
72 | * | |
73 | * Returns >= nr_cpu_ids on completion | |
74 | * | |
75 | * Note: the @wrap argument is required for the start condition when | |
76 | * we cannot assume @start is set in @mask. | |
77 | */ | |
78 | int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) | |
79 | { | |
80 | int next; | |
81 | ||
82 | again: | |
83 | next = cpumask_next(n, mask); | |
84 | ||
85 | if (wrap && n < start && next >= start) { | |
86 | return nr_cpumask_bits; | |
87 | ||
88 | } else if (next >= nr_cpumask_bits) { | |
89 | wrap = true; | |
90 | n = -1; | |
91 | goto again; | |
92 | } | |
93 | ||
94 | return next; | |
95 | } | |
96 | EXPORT_SYMBOL(cpumask_next_wrap); | |
97 | ||
2d3854a3 RR |
98 | /* These are not inline because of header tangles. */ |
99 | #ifdef CONFIG_CPUMASK_OFFSTACK | |
ec26b805 MT |
100 | /** |
101 | * alloc_cpumask_var_node - allocate a struct cpumask on a given node | |
102 | * @mask: pointer to cpumask_var_t where the cpumask is returned | |
103 | * @flags: GFP_ flags | |
104 | * | |
105 | * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is | |
106 | * a nop returning a constant 1 (in <linux/cpumask.h>) | |
107 | * Returns TRUE if memory allocation succeeded, FALSE otherwise. | |
108 | * | |
109 | * In addition, mask will be NULL if this fails. Note that gcc is | |
110 | * usually smart enough to know that mask can never be NULL if | |
111 | * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case | |
112 | * too. | |
113 | */ | |
7b4967c5 | 114 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) |
2d3854a3 | 115 | { |
38c7fed2 YL |
116 | *mask = kmalloc_node(cpumask_size(), flags, node); |
117 | ||
2d3854a3 RR |
118 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
119 | if (!*mask) { | |
120 | printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); | |
121 | dump_stack(); | |
122 | } | |
123 | #endif | |
2a530080 | 124 | |
2d3854a3 RR |
125 | return *mask != NULL; |
126 | } | |
7b4967c5 MT |
127 | EXPORT_SYMBOL(alloc_cpumask_var_node); |
128 | ||
0281b5dc YL |
129 | bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) |
130 | { | |
131 | return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); | |
132 | } | |
133 | EXPORT_SYMBOL(zalloc_cpumask_var_node); | |
134 | ||
ec26b805 MT |
135 | /** |
136 | * alloc_cpumask_var - allocate a struct cpumask | |
137 | * @mask: pointer to cpumask_var_t where the cpumask is returned | |
138 | * @flags: GFP_ flags | |
139 | * | |
140 | * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is | |
141 | * a nop returning a constant 1 (in <linux/cpumask.h>). | |
142 | * | |
143 | * See alloc_cpumask_var_node. | |
144 | */ | |
7b4967c5 MT |
145 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) |
146 | { | |
37e7b5f1 | 147 | return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); |
7b4967c5 | 148 | } |
2d3854a3 RR |
149 | EXPORT_SYMBOL(alloc_cpumask_var); |
150 | ||
0281b5dc YL |
151 | bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) |
152 | { | |
153 | return alloc_cpumask_var(mask, flags | __GFP_ZERO); | |
154 | } | |
155 | EXPORT_SYMBOL(zalloc_cpumask_var); | |
156 | ||
ec26b805 MT |
157 | /** |
158 | * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. | |
159 | * @mask: pointer to cpumask_var_t where the cpumask is returned | |
160 | * | |
161 | * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is | |
e9690a6e | 162 | * a nop (in <linux/cpumask.h>). |
ec26b805 MT |
163 | * Either returns an allocated (zero-filled) cpumask, or causes the |
164 | * system to panic. | |
165 | */ | |
2d3854a3 RR |
166 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
167 | { | |
7e1c4e27 | 168 | *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); |
8a7f97b9 MR |
169 | if (!*mask) |
170 | panic("%s: Failed to allocate %u bytes\n", __func__, | |
171 | cpumask_size()); | |
2d3854a3 RR |
172 | } |
173 | ||
ec26b805 MT |
174 | /** |
175 | * free_cpumask_var - frees memory allocated for a struct cpumask. | |
176 | * @mask: cpumask to free | |
177 | * | |
178 | * This is safe on a NULL mask. | |
179 | */ | |
2d3854a3 RR |
180 | void free_cpumask_var(cpumask_var_t mask) |
181 | { | |
182 | kfree(mask); | |
183 | } | |
184 | EXPORT_SYMBOL(free_cpumask_var); | |
cd83e42c | 185 | |
ec26b805 MT |
186 | /** |
187 | * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var | |
188 | * @mask: cpumask to free | |
189 | */ | |
984f2f37 | 190 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) |
cd83e42c | 191 | { |
c1529500 | 192 | memblock_free_early(__pa(mask), cpumask_size()); |
cd83e42c | 193 | } |
2d3854a3 | 194 | #endif |
da91309e AV |
195 | |
196 | /** | |
f36963c9 | 197 | * cpumask_local_spread - select the i'th cpu with local numa cpu's first |
da91309e | 198 | * @i: index number |
f36963c9 | 199 | * @node: local numa_node |
da91309e | 200 | * |
f36963c9 RR |
201 | * This function selects an online CPU according to a numa aware policy; |
202 | * local cpus are returned first, followed by non-local ones, then it | |
203 | * wraps around. | |
da91309e | 204 | * |
f36963c9 | 205 | * It's not very efficient, but useful for setup. |
da91309e | 206 | */ |
f36963c9 | 207 | unsigned int cpumask_local_spread(unsigned int i, int node) |
da91309e | 208 | { |
1abdfe70 AB |
209 | int cpu, hk_flags; |
210 | const struct cpumask *mask; | |
da91309e | 211 | |
1abdfe70 AB |
212 | hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ; |
213 | mask = housekeeping_cpumask(hk_flags); | |
f36963c9 | 214 | /* Wrap: we always want a cpu. */ |
1abdfe70 | 215 | i %= cpumask_weight(mask); |
da91309e | 216 | |
98fa15f3 | 217 | if (node == NUMA_NO_NODE) { |
1abdfe70 | 218 | for_each_cpu(cpu, mask) { |
f36963c9 RR |
219 | if (i-- == 0) |
220 | return cpu; | |
1abdfe70 | 221 | } |
da91309e | 222 | } else { |
f36963c9 | 223 | /* NUMA first. */ |
1abdfe70 | 224 | for_each_cpu_and(cpu, cpumask_of_node(node), mask) { |
f36963c9 RR |
225 | if (i-- == 0) |
226 | return cpu; | |
1abdfe70 | 227 | } |
f36963c9 | 228 | |
1abdfe70 | 229 | for_each_cpu(cpu, mask) { |
f36963c9 RR |
230 | /* Skip NUMA nodes, done above. */ |
231 | if (cpumask_test_cpu(cpu, cpumask_of_node(node))) | |
232 | continue; | |
233 | ||
234 | if (i-- == 0) | |
235 | return cpu; | |
da91309e AV |
236 | } |
237 | } | |
f36963c9 | 238 | BUG(); |
da91309e | 239 | } |
f36963c9 | 240 | EXPORT_SYMBOL(cpumask_local_spread); |
46a87b38 PT |
241 | |
242 | static DEFINE_PER_CPU(int, distribute_cpu_mask_prev); | |
243 | ||
244 | /** | |
245 | * Returns an arbitrary cpu within srcp1 & srcp2. | |
246 | * | |
247 | * Iterated calls using the same srcp1 and srcp2 will be distributed within | |
248 | * their intersection. | |
249 | * | |
250 | * Returns >= nr_cpu_ids if the intersection is empty. | |
251 | */ | |
252 | int cpumask_any_and_distribute(const struct cpumask *src1p, | |
253 | const struct cpumask *src2p) | |
254 | { | |
255 | int next, prev; | |
256 | ||
257 | /* NOTE: our first selection will skip 0. */ | |
258 | prev = __this_cpu_read(distribute_cpu_mask_prev); | |
259 | ||
260 | next = cpumask_next_and(prev, src1p, src2p); | |
261 | if (next >= nr_cpu_ids) | |
262 | next = cpumask_first_and(src1p, src2p); | |
263 | ||
264 | if (next < nr_cpu_ids) | |
265 | __this_cpu_write(distribute_cpu_mask_prev, next); | |
266 | ||
267 | return next; | |
268 | } | |
269 | EXPORT_SYMBOL(cpumask_any_and_distribute); |