]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/cpumask.h
cpumask: new API, v2
[mirror_ubuntu-artful-kernel.git] / include / linux / cpumask.h
1 #ifndef __LINUX_CPUMASK_H
2 #define __LINUX_CPUMASK_H
3
4 /*
5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number.
7 *
8 * The new cpumask_ ops take a "struct cpumask *"; the old ones
9 * use cpumask_t.
10 *
11 * See detailed comments in the file linux/bitmap.h describing the
12 * data type on which these cpumasks are based.
13 *
14 * For details of cpumask_scnprintf() and cpumask_parse_user(),
15 * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
16 * For details of cpulist_scnprintf() and cpulist_parse(), see
17 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
18 * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
19 * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
20 * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
21 * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
22 *
23 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
24 * Note: The alternate operations with the suffix "_nr" are used
25 * to limit the range of the loop to nr_cpu_ids instead of
26 * NR_CPUS when NR_CPUS > 64 for performance reasons.
27 * If NR_CPUS is <= 64 then most assembler bitmask
28 * operators execute faster with a constant range, so
29 * the operator will continue to use NR_CPUS.
30 *
31 * Another consideration is that nr_cpu_ids is initialized
32 * to NR_CPUS and isn't lowered until the possible cpus are
33 * discovered (including any disabled cpus). So early uses
34 * will span the entire range of NR_CPUS.
35 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
36 *
37 * The obsolescent cpumask operations are:
38 *
39 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
40 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
41 * void cpus_setall(mask) set all bits
42 * void cpus_clear(mask) clear all bits
43 * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
44 * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
45 *
46 * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
47 * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
48 * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
49 * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
50 * void cpus_complement(dst, src) dst = ~src
51 *
52 * int cpus_equal(mask1, mask2) Does mask1 == mask2?
53 * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
54 * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
55 * int cpus_empty(mask) Is mask empty (no bits sets)?
56 * int cpus_full(mask) Is mask full (all bits sets)?
57 * int cpus_weight(mask) Hamming weigh - number of set bits
58 * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS
59 *
60 * void cpus_shift_right(dst, src, n) Shift right
61 * void cpus_shift_left(dst, src, n) Shift left
62 *
63 * int first_cpu(mask) Number lowest set bit, or NR_CPUS
64 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
65 * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
66 *
67 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
68 * (can be used as an lvalue)
69 * CPU_MASK_ALL Initializer - all bits set
70 * CPU_MASK_NONE Initializer - no bits set
71 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
72 *
73 * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t
74 * variables, and CPUMASK_PTR provides pointers to each field.
75 *
76 * The structure should be defined something like this:
77 * struct my_cpumasks {
78 * cpumask_t mask1;
79 * cpumask_t mask2;
80 * };
81 *
82 * Usage is then:
83 * CPUMASK_ALLOC(my_cpumasks);
84 * CPUMASK_PTR(mask1, my_cpumasks);
85 * CPUMASK_PTR(mask2, my_cpumasks);
86 *
87 * --- DO NOT reference cpumask_t pointers until this check ---
88 * if (my_cpumasks == NULL)
89 * "kmalloc failed"...
90 *
91 * References are now pointers to the cpumask_t variables (*mask1, ...)
92 *
93 *if NR_CPUS > BITS_PER_LONG
94 * CPUMASK_ALLOC(m) Declares and allocates struct m *m =
95 * kmalloc(sizeof(*m), GFP_KERNEL)
96 * CPUMASK_FREE(m) Macro for kfree(m)
97 *else
98 * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m
99 * CPUMASK_FREE(m) Nop
100 *endif
101 * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v)
102 * ------------------------------------------------------------------------
103 *
104 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
105 * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
106 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
107 * int cpulist_parse(buf, map) Parse ascii string as cpulist
108 * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
109 * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
110 * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
111 * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
112 *
113 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
114 * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
115 *
116 * int num_online_cpus() Number of online CPUs
117 * int num_possible_cpus() Number of all possible CPUs
118 * int num_present_cpus() Number of present CPUs
119 *
120 * int cpu_online(cpu) Is some cpu online?
121 * int cpu_possible(cpu) Is some cpu possible?
122 * int cpu_present(cpu) Is some cpu present (can schedule)?
123 *
124 * int any_online_cpu(mask) First online cpu in mask
125 *
126 * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
127 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
128 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
129 *
130 * Subtlety:
131 * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
132 * to generate slightly worse code. Note for example the additional
133 * 40 lines of assembly code compiling the "for each possible cpu"
134 * loops buried in the disk_stat_read() macros calls when compiling
135 * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
136 * one-line #define for cpu_isset(), instead of wrapping an inline
137 * inside a macro, the way we do the other calls.
138 */
139
140 #include <linux/kernel.h>
141 #include <linux/threads.h>
142 #include <linux/bitmap.h>
143
144 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
145 extern cpumask_t _unused_cpumask_arg_;
146
147 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
148 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
149 {
150 set_bit(cpu, dstp->bits);
151 }
152
153 #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
154 static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
155 {
156 clear_bit(cpu, dstp->bits);
157 }
158
159 #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
160 static inline void __cpus_setall(cpumask_t *dstp, int nbits)
161 {
162 bitmap_fill(dstp->bits, nbits);
163 }
164
165 #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
166 static inline void __cpus_clear(cpumask_t *dstp, int nbits)
167 {
168 bitmap_zero(dstp->bits, nbits);
169 }
170
171 /* No static inline type checking - see Subtlety (1) above. */
172 #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
173
174 #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
175 static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
176 {
177 return test_and_set_bit(cpu, addr->bits);
178 }
179
180 #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
181 static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
182 const cpumask_t *src2p, int nbits)
183 {
184 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
185 }
186
187 #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
188 static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
189 const cpumask_t *src2p, int nbits)
190 {
191 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
192 }
193
194 #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
195 static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
196 const cpumask_t *src2p, int nbits)
197 {
198 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
199 }
200
201 #define cpus_andnot(dst, src1, src2) \
202 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
203 static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
204 const cpumask_t *src2p, int nbits)
205 {
206 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
207 }
208
209 #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
210 static inline void __cpus_complement(cpumask_t *dstp,
211 const cpumask_t *srcp, int nbits)
212 {
213 bitmap_complement(dstp->bits, srcp->bits, nbits);
214 }
215
216 #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
217 static inline int __cpus_equal(const cpumask_t *src1p,
218 const cpumask_t *src2p, int nbits)
219 {
220 return bitmap_equal(src1p->bits, src2p->bits, nbits);
221 }
222
223 #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
224 static inline int __cpus_intersects(const cpumask_t *src1p,
225 const cpumask_t *src2p, int nbits)
226 {
227 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
228 }
229
230 #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
231 static inline int __cpus_subset(const cpumask_t *src1p,
232 const cpumask_t *src2p, int nbits)
233 {
234 return bitmap_subset(src1p->bits, src2p->bits, nbits);
235 }
236
237 #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
238 static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
239 {
240 return bitmap_empty(srcp->bits, nbits);
241 }
242
243 #define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
244 static inline int __cpus_full(const cpumask_t *srcp, int nbits)
245 {
246 return bitmap_full(srcp->bits, nbits);
247 }
248
249 #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
250 static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
251 {
252 return bitmap_weight(srcp->bits, nbits);
253 }
254
255 #define cpus_shift_right(dst, src, n) \
256 __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
257 static inline void __cpus_shift_right(cpumask_t *dstp,
258 const cpumask_t *srcp, int n, int nbits)
259 {
260 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
261 }
262
263 #define cpus_shift_left(dst, src, n) \
264 __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
265 static inline void __cpus_shift_left(cpumask_t *dstp,
266 const cpumask_t *srcp, int n, int nbits)
267 {
268 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
269 }
270
271 /*
272 * Special-case data structure for "single bit set only" constant CPU masks.
273 *
274 * We pre-generate all the 64 (or 32) possible bit positions, with enough
275 * padding to the left and the right, and return the constant pointer
276 * appropriately offset.
277 */
278 extern const unsigned long
279 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
280
281 static inline const cpumask_t *get_cpu_mask(unsigned int cpu)
282 {
283 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
284 p -= cpu / BITS_PER_LONG;
285 return (const cpumask_t *)p;
286 }
287
288 /*
289 * In cases where we take the address of the cpumask immediately,
290 * gcc optimizes it out (it's a constant) and there's no huge stack
291 * variable created:
292 */
293 #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
294
295
296 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
297
298 #if NR_CPUS <= BITS_PER_LONG
299
300 #define CPU_MASK_ALL \
301 (cpumask_t) { { \
302 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
303 } }
304
305 #define CPU_MASK_ALL_PTR (&CPU_MASK_ALL)
306
307 #else
308
309 #define CPU_MASK_ALL \
310 (cpumask_t) { { \
311 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
312 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
313 } }
314
315 /* cpu_mask_all is in init/main.c */
316 extern cpumask_t cpu_mask_all;
317 #define CPU_MASK_ALL_PTR (&cpu_mask_all)
318
319 #endif
320
321 #define CPU_MASK_NONE \
322 (cpumask_t) { { \
323 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
324 } }
325
326 #define CPU_MASK_CPU0 \
327 (cpumask_t) { { \
328 [0] = 1UL \
329 } }
330
331 #define cpus_addr(src) ((src).bits)
332
333 #if NR_CPUS > BITS_PER_LONG
334 #define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL)
335 #define CPUMASK_FREE(m) kfree(m)
336 #else
337 #define CPUMASK_ALLOC(m) struct m _m, *m = &_m
338 #define CPUMASK_FREE(m)
339 #endif
340 #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
341
342 #define cpumask_scnprintf(buf, len, src) \
343 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
344 static inline int __cpumask_scnprintf(char *buf, int len,
345 const cpumask_t *srcp, int nbits)
346 {
347 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
348 }
349
350 #define cpumask_parse_user(ubuf, ulen, dst) \
351 __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS)
352 static inline int __cpumask_parse_user(const char __user *buf, int len,
353 cpumask_t *dstp, int nbits)
354 {
355 return bitmap_parse_user(buf, len, dstp->bits, nbits);
356 }
357
358 #define cpulist_scnprintf(buf, len, src) \
359 __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
360 static inline int __cpulist_scnprintf(char *buf, int len,
361 const cpumask_t *srcp, int nbits)
362 {
363 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
364 }
365
366 #define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
367 static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
368 {
369 return bitmap_parselist(buf, dstp->bits, nbits);
370 }
371
372 #define cpu_remap(oldbit, old, new) \
373 __cpu_remap((oldbit), &(old), &(new), NR_CPUS)
374 static inline int __cpu_remap(int oldbit,
375 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
376 {
377 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
378 }
379
380 #define cpus_remap(dst, src, old, new) \
381 __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
382 static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
383 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
384 {
385 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
386 }
387
388 #define cpus_onto(dst, orig, relmap) \
389 __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
390 static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
391 const cpumask_t *relmapp, int nbits)
392 {
393 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
394 }
395
396 #define cpus_fold(dst, orig, sz) \
397 __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
398 static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
399 int sz, int nbits)
400 {
401 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
402 }
403
404 #if NR_CPUS == 1
405
406 #define nr_cpu_ids 1
407 #define first_cpu(src) ({ (void)(src); 0; })
408 #define next_cpu(n, src) ({ (void)(src); 1; })
409 #define any_online_cpu(mask) 0
410 #define for_each_cpu_mask(cpu, mask) \
411 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
412
413 #else /* NR_CPUS > 1 */
414
415 extern int nr_cpu_ids;
416 int __first_cpu(const cpumask_t *srcp);
417 int __next_cpu(int n, const cpumask_t *srcp);
418 int __any_online_cpu(const cpumask_t *mask);
419
420 #define first_cpu(src) __first_cpu(&(src))
421 #define next_cpu(n, src) __next_cpu((n), &(src))
422 #define any_online_cpu(mask) __any_online_cpu(&(mask))
423 #define for_each_cpu_mask(cpu, mask) \
424 for ((cpu) = -1; \
425 (cpu) = next_cpu((cpu), (mask)), \
426 (cpu) < NR_CPUS; )
427 #endif
428
429 #if NR_CPUS <= 64
430
431 #define next_cpu_nr(n, src) next_cpu(n, src)
432 #define cpus_weight_nr(cpumask) cpus_weight(cpumask)
433 #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
434
435 #else /* NR_CPUS > 64 */
436
437 int __next_cpu_nr(int n, const cpumask_t *srcp);
438 #define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
439 #define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
440 #define for_each_cpu_mask_nr(cpu, mask) \
441 for ((cpu) = -1; \
442 (cpu) = next_cpu_nr((cpu), (mask)), \
443 (cpu) < nr_cpu_ids; )
444
445 #endif /* NR_CPUS > 64 */
446
447 /*
448 * The following particular system cpumasks and operations manage
449 * possible, present, active and online cpus. Each of them is a fixed size
450 * bitmap of size NR_CPUS.
451 *
452 * #ifdef CONFIG_HOTPLUG_CPU
453 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
454 * cpu_present_map - has bit 'cpu' set iff cpu is populated
455 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
456 * cpu_active_map - has bit 'cpu' set iff cpu available to migration
457 * #else
458 * cpu_possible_map - has bit 'cpu' set iff cpu is populated
459 * cpu_present_map - copy of cpu_possible_map
460 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
461 * #endif
462 *
463 * In either case, NR_CPUS is fixed at compile time, as the static
464 * size of these bitmaps. The cpu_possible_map is fixed at boot
465 * time, as the set of CPU id's that it is possible might ever
466 * be plugged in at anytime during the life of that system boot.
467 * The cpu_present_map is dynamic(*), representing which CPUs
468 * are currently plugged in. And cpu_online_map is the dynamic
469 * subset of cpu_present_map, indicating those CPUs available
470 * for scheduling.
471 *
472 * If HOTPLUG is enabled, then cpu_possible_map is forced to have
473 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
474 * ACPI reports present at boot.
475 *
476 * If HOTPLUG is enabled, then cpu_present_map varies dynamically,
477 * depending on what ACPI reports as currently plugged in, otherwise
478 * cpu_present_map is just a copy of cpu_possible_map.
479 *
480 * (*) Well, cpu_present_map is dynamic in the hotplug case. If not
481 * hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
482 *
483 * Subtleties:
484 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
485 * assumption that their single CPU is online. The UP
486 * cpu_{online,possible,present}_maps are placebos. Changing them
487 * will have no useful affect on the following num_*_cpus()
488 * and cpu_*() macros in the UP case. This ugliness is a UP
489 * optimization - don't waste any instructions or memory references
490 * asking if you're online or how many CPUs there are if there is
491 * only one CPU.
492 * 2) Most SMP arch's #define some of these maps to be some
493 * other map specific to that arch. Therefore, the following
494 * must be #define macros, not inlines. To see why, examine
495 * the assembly code produced by the following. Note that
496 * set1() writes phys_x_map, but set2() writes x_map:
497 * int x_map, phys_x_map;
498 * #define set1(a) x_map = a
499 * inline void set2(int a) { x_map = a; }
500 * #define x_map phys_x_map
501 * main(){ set1(3); set2(5); }
502 */
503
504 extern cpumask_t cpu_possible_map;
505 extern cpumask_t cpu_online_map;
506 extern cpumask_t cpu_present_map;
507 extern cpumask_t cpu_active_map;
508
509 #if NR_CPUS > 1
510 #define num_online_cpus() cpus_weight_nr(cpu_online_map)
511 #define num_possible_cpus() cpus_weight_nr(cpu_possible_map)
512 #define num_present_cpus() cpus_weight_nr(cpu_present_map)
513 #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
514 #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
515 #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
516 #define cpu_active(cpu) cpu_isset((cpu), cpu_active_map)
517 #else
518 #define num_online_cpus() 1
519 #define num_possible_cpus() 1
520 #define num_present_cpus() 1
521 #define cpu_online(cpu) ((cpu) == 0)
522 #define cpu_possible(cpu) ((cpu) == 0)
523 #define cpu_present(cpu) ((cpu) == 0)
524 #define cpu_active(cpu) ((cpu) == 0)
525 #endif
526
527 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
528
529 #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map)
530 #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
531 #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
532
533 /* These are the new versions of the cpumask operators: passed by pointer.
534 * The older versions will be implemented in terms of these, then deleted. */
535 #define cpumask_bits(maskp) ((maskp)->bits)
536
537 #if NR_CPUS <= BITS_PER_LONG
538 #define CPU_BITS_ALL \
539 { \
540 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
541 }
542
543 /* This produces more efficient code. */
544 #define nr_cpumask_bits NR_CPUS
545
546 #else /* NR_CPUS > BITS_PER_LONG */
547
548 #define CPU_BITS_ALL \
549 { \
550 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
551 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
552 }
553
554 #define nr_cpumask_bits nr_cpu_ids
555 #endif /* NR_CPUS > BITS_PER_LONG */
556
557 /* verify cpu argument to cpumask_* operators */
558 static inline unsigned int cpumask_check(unsigned int cpu)
559 {
560 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
561 WARN_ON_ONCE(cpu >= nr_cpumask_bits);
562 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
563 return cpu;
564 }
565
566 #if NR_CPUS == 1
567 /* Uniprocesor. */
568 #define cpumask_first(src) ({ (void)(src); 0; })
569 #define cpumask_next(n, src) ({ (void)(src); 1; })
570 #define cpumask_next_zero(n, src) ({ (void)(src); 1; })
571 #define cpumask_next_and(n, srcp, andp) ({ (void)(srcp), (void)(andp); 1; })
572 #define cpumask_any_but(mask, cpu) ({ (void)(mask); (void)(cpu); 0; })
573
574 #define for_each_cpu(cpu, mask) \
575 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
576 #define for_each_cpu_and(cpu, mask, and) \
577 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
578 #else
579 /**
580 * cpumask_first - get the first cpu in a cpumask
581 * @srcp: the cpumask pointer
582 *
583 * Returns >= nr_cpu_ids if no cpus set.
584 */
585 static inline unsigned int cpumask_first(const struct cpumask *srcp)
586 {
587 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
588 }
589
590 /**
591 * cpumask_next - get the next cpu in a cpumask
592 * @n: the cpu prior to the place to search (ie. return will be > @n)
593 * @srcp: the cpumask pointer
594 *
595 * Returns >= nr_cpu_ids if no further cpus set.
596 */
597 static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
598 {
599 /* -1 is a legal arg here. */
600 if (n != -1)
601 cpumask_check(n);
602 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
603 }
604
605 /**
606 * cpumask_next_zero - get the next unset cpu in a cpumask
607 * @n: the cpu prior to the place to search (ie. return will be > @n)
608 * @srcp: the cpumask pointer
609 *
610 * Returns >= nr_cpu_ids if no further cpus unset.
611 */
612 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
613 {
614 /* -1 is a legal arg here. */
615 if (n != -1)
616 cpumask_check(n);
617 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
618 }
619
620 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
621 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
622
623 #define for_each_cpu(cpu, mask) \
624 for ((cpu) = -1; \
625 (cpu) = cpumask_next((cpu), (mask)), \
626 (cpu) < nr_cpu_ids;)
627 #define for_each_cpu_and(cpu, mask, and) \
628 for ((cpu) = -1; \
629 (cpu) = cpumask_next_and((cpu), (mask), (and)), \
630 (cpu) < nr_cpu_ids;)
631 #endif /* SMP */
632
633 #define CPU_BITS_NONE \
634 { \
635 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
636 }
637
638 #define CPU_BITS_CPU0 \
639 { \
640 [0] = 1UL \
641 }
642
643 /**
644 * cpumask_set_cpu - set a cpu in a cpumask
645 * @cpu: cpu number (< nr_cpu_ids)
646 * @dstp: the cpumask pointer
647 */
648 static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
649 {
650 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
651 }
652
653 /**
654 * cpumask_clear_cpu - clear a cpu in a cpumask
655 * @cpu: cpu number (< nr_cpu_ids)
656 * @dstp: the cpumask pointer
657 */
658 static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
659 {
660 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
661 }
662
663 /**
664 * cpumask_test_cpu - test for a cpu in a cpumask
665 * @cpu: cpu number (< nr_cpu_ids)
666 * @cpumask: the cpumask pointer
667 *
668 * No static inline type checking - see Subtlety (1) above.
669 */
670 #define cpumask_test_cpu(cpu, cpumask) \
671 test_bit(cpumask_check(cpu), (cpumask)->bits)
672
673 /**
674 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
675 * @cpu: cpu number (< nr_cpu_ids)
676 * @cpumask: the cpumask pointer
677 *
678 * test_and_set_bit wrapper for cpumasks.
679 */
680 static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
681 {
682 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
683 }
684
685 /**
686 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
687 * @dstp: the cpumask pointer
688 */
689 static inline void cpumask_setall(struct cpumask *dstp)
690 {
691 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
692 }
693
694 /**
695 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
696 * @dstp: the cpumask pointer
697 */
698 static inline void cpumask_clear(struct cpumask *dstp)
699 {
700 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
701 }
702
703 /**
704 * cpumask_and - *dstp = *src1p & *src2p
705 * @dstp: the cpumask result
706 * @src1p: the first input
707 * @src2p: the second input
708 */
709 static inline void cpumask_and(struct cpumask *dstp,
710 const struct cpumask *src1p,
711 const struct cpumask *src2p)
712 {
713 bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
714 cpumask_bits(src2p), nr_cpumask_bits);
715 }
716
717 /**
718 * cpumask_or - *dstp = *src1p | *src2p
719 * @dstp: the cpumask result
720 * @src1p: the first input
721 * @src2p: the second input
722 */
723 static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
724 const struct cpumask *src2p)
725 {
726 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
727 cpumask_bits(src2p), nr_cpumask_bits);
728 }
729
730 /**
731 * cpumask_xor - *dstp = *src1p ^ *src2p
732 * @dstp: the cpumask result
733 * @src1p: the first input
734 * @src2p: the second input
735 */
736 static inline void cpumask_xor(struct cpumask *dstp,
737 const struct cpumask *src1p,
738 const struct cpumask *src2p)
739 {
740 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
741 cpumask_bits(src2p), nr_cpumask_bits);
742 }
743
744 /**
745 * cpumask_andnot - *dstp = *src1p & ~*src2p
746 * @dstp: the cpumask result
747 * @src1p: the first input
748 * @src2p: the second input
749 */
750 static inline void cpumask_andnot(struct cpumask *dstp,
751 const struct cpumask *src1p,
752 const struct cpumask *src2p)
753 {
754 bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
755 cpumask_bits(src2p), nr_cpumask_bits);
756 }
757
758 /**
759 * cpumask_complement - *dstp = ~*srcp
760 * @dstp: the cpumask result
761 * @srcp: the input to invert
762 */
763 static inline void cpumask_complement(struct cpumask *dstp,
764 const struct cpumask *srcp)
765 {
766 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
767 nr_cpumask_bits);
768 }
769
770 /**
771 * cpumask_equal - *src1p == *src2p
772 * @src1p: the first input
773 * @src2p: the second input
774 */
775 static inline bool cpumask_equal(const struct cpumask *src1p,
776 const struct cpumask *src2p)
777 {
778 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
779 nr_cpumask_bits);
780 }
781
782 /**
783 * cpumask_intersects - (*src1p & *src2p) != 0
784 * @src1p: the first input
785 * @src2p: the second input
786 */
787 static inline bool cpumask_intersects(const struct cpumask *src1p,
788 const struct cpumask *src2p)
789 {
790 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
791 nr_cpumask_bits);
792 }
793
794 /**
795 * cpumask_subset - (*src1p & ~*src2p) == 0
796 * @src1p: the first input
797 * @src2p: the second input
798 */
799 static inline int cpumask_subset(const struct cpumask *src1p,
800 const struct cpumask *src2p)
801 {
802 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
803 nr_cpumask_bits);
804 }
805
806 /**
807 * cpumask_empty - *srcp == 0
808 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
809 */
810 static inline bool cpumask_empty(const struct cpumask *srcp)
811 {
812 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
813 }
814
815 /**
816 * cpumask_full - *srcp == 0xFFFFFFFF...
817 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
818 */
819 static inline bool cpumask_full(const struct cpumask *srcp)
820 {
821 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
822 }
823
824 /**
825 * cpumask_weight - Count of bits in *srcp
826 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
827 */
828 static inline unsigned int cpumask_weight(const struct cpumask *srcp)
829 {
830 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
831 }
832
833 /**
834 * cpumask_shift_right - *dstp = *srcp >> n
835 * @dstp: the cpumask result
836 * @srcp: the input to shift
837 * @n: the number of bits to shift by
838 */
839 static inline void cpumask_shift_right(struct cpumask *dstp,
840 const struct cpumask *srcp, int n)
841 {
842 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
843 nr_cpumask_bits);
844 }
845
846 /**
847 * cpumask_shift_left - *dstp = *srcp << n
848 * @dstp: the cpumask result
849 * @srcp: the input to shift
850 * @n: the number of bits to shift by
851 */
852 static inline void cpumask_shift_left(struct cpumask *dstp,
853 const struct cpumask *srcp, int n)
854 {
855 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
856 nr_cpumask_bits);
857 }
858
859 /**
860 * cpumask_copy - *dstp = *srcp
861 * @dstp: the result
862 * @srcp: the input cpumask
863 */
864 static inline void cpumask_copy(struct cpumask *dstp,
865 const struct cpumask *srcp)
866 {
867 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
868 }
869
870 /**
871 * cpumask_any - pick a "random" cpu from *srcp
872 * @srcp: the input cpumask
873 *
874 * Returns >= nr_cpu_ids if no cpus set.
875 */
876 #define cpumask_any(srcp) cpumask_first(srcp)
877
878 /**
879 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
880 * @src1p: the first input
881 * @src2p: the second input
882 *
883 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
884 */
885 #define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
886
887 /**
888 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
889 * @mask1: the first input cpumask
890 * @mask2: the second input cpumask
891 *
892 * Returns >= nr_cpu_ids if no cpus set.
893 */
894 #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
895
896 /**
897 * cpumask_of - the cpumask containing just a given cpu
898 * @cpu: the cpu (<= nr_cpu_ids)
899 */
900 #define cpumask_of(cpu) (get_cpu_mask(cpu))
901
902 /**
903 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
904 * @bitmap: the bitmap
905 *
906 * There are a few places where cpumask_var_t isn't appropriate and
907 * static cpumasks must be used (eg. very early boot), yet we don't
908 * expose the definition of 'struct cpumask'.
909 *
910 * This does the conversion, and can be used as a constant initializer.
911 */
912 #define to_cpumask(bitmap) \
913 ((struct cpumask *)(1 ? (bitmap) \
914 : (void *)sizeof(__check_is_bitmap(bitmap))))
915
916 static inline int __check_is_bitmap(const unsigned long *bitmap)
917 {
918 return 1;
919 }
920
921 /**
922 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
923 *
924 * This will eventually be a runtime variable, depending on nr_cpu_ids.
925 */
926 static inline size_t cpumask_size(void)
927 {
928 /* FIXME: Once all cpumask assignments are eliminated, this
929 * can be nr_cpumask_bits */
930 return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
931 }
932
933 /*
934 * cpumask_var_t: struct cpumask for stack usage.
935 *
936 * Oh, the wicked games we play! In order to make kernel coding a
937 * little more difficult, we typedef cpumask_var_t to an array or a
938 * pointer: doing &mask on an array is a noop, so it still works.
939 *
940 * ie.
941 * cpumask_var_t tmpmask;
942 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
943 * return -ENOMEM;
944 *
945 * ... use 'tmpmask' like a normal struct cpumask * ...
946 *
947 * free_cpumask_var(tmpmask);
948 */
949 #ifdef CONFIG_CPUMASK_OFFSTACK
950 typedef struct cpumask *cpumask_var_t;
951
952 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
953 void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
954 void free_cpumask_var(cpumask_var_t mask);
955 void free_bootmem_cpumask_var(cpumask_var_t mask);
956
957 #else
958 typedef struct cpumask cpumask_var_t[1];
959
960 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
961 {
962 return true;
963 }
964
965 static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
966 {
967 }
968
969 static inline void free_cpumask_var(cpumask_var_t mask)
970 {
971 }
972
973 static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
974 {
975 }
976 #endif /* CONFIG_CPUMASK_OFFSTACK */
977
978 /* The pointer versions of the maps, these will become the primary versions. */
979 #define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map)
980 #define cpu_online_mask ((const struct cpumask *)&cpu_online_map)
981 #define cpu_present_mask ((const struct cpumask *)&cpu_present_map)
982 #define cpu_active_mask ((const struct cpumask *)&cpu_active_map)
983
984 /* It's common to want to use cpu_all_mask in struct member initializers,
985 * so it has to refer to an address rather than a pointer. */
986 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
987 #define cpu_all_mask to_cpumask(cpu_all_bits)
988
989 /* First bits of cpu_bit_bitmap are in fact unset. */
990 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
991
992 /* Wrappers for arch boot code to manipulate normally-constant masks */
993 static inline void set_cpu_possible(unsigned int cpu, bool possible)
994 {
995 if (possible)
996 cpumask_set_cpu(cpu, &cpu_possible_map);
997 else
998 cpumask_clear_cpu(cpu, &cpu_possible_map);
999 }
1000
1001 static inline void set_cpu_present(unsigned int cpu, bool present)
1002 {
1003 if (present)
1004 cpumask_set_cpu(cpu, &cpu_present_map);
1005 else
1006 cpumask_clear_cpu(cpu, &cpu_present_map);
1007 }
1008
1009 static inline void set_cpu_online(unsigned int cpu, bool online)
1010 {
1011 if (online)
1012 cpumask_set_cpu(cpu, &cpu_online_map);
1013 else
1014 cpumask_clear_cpu(cpu, &cpu_online_map);
1015 }
1016
1017 static inline void set_cpu_active(unsigned int cpu, bool active)
1018 {
1019 if (active)
1020 cpumask_set_cpu(cpu, &cpu_active_map);
1021 else
1022 cpumask_clear_cpu(cpu, &cpu_active_map);
1023 }
1024
1025 static inline void init_cpu_present(const struct cpumask *src)
1026 {
1027 cpumask_copy(&cpu_present_map, src);
1028 }
1029
1030 static inline void init_cpu_possible(const struct cpumask *src)
1031 {
1032 cpumask_copy(&cpu_possible_map, src);
1033 }
1034
1035 static inline void init_cpu_online(const struct cpumask *src)
1036 {
1037 cpumask_copy(&cpu_online_map, src);
1038 }
1039 #endif /* __LINUX_CPUMASK_H */