1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/percpu.c - percpu memory allocator
5 * Copyright (C) 2009 SUSE Linux Products GmbH
6 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
8 * Copyright (C) 2017 Facebook Inc.
9 * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
11 * The percpu allocator handles both static and dynamic areas. Percpu
12 * areas are allocated in chunks which are divided into units. There is
13 * a 1-to-1 mapping for units to possible cpus. These units are grouped
14 * based on NUMA properties of the machine.
17 * ------------------- ------------------- ------------
18 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19 * ------------------- ...... ------------------- .... ------------
21 * Allocation is done by offsets into a unit's address space. Ie., an
22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
24 * and even sparse. Access is handled by configuring percpu base
25 * registers according to the cpu to unit mappings and offsetting the
26 * base address using pcpu_unit_size.
28 * There is special consideration for the first chunk which must handle
29 * the static percpu variables in the kernel image as allocation services
30 * are not online yet. In short, the first chunk is structured like so:
32 * <Static | [Reserved] | Dynamic>
34 * The static data is copied from the original section managed by the
35 * linker. The reserved section, if non-zero, primarily manages static
36 * percpu variables from kernel modules. Finally, the dynamic section
37 * takes care of normal allocations.
39 * The allocator organizes chunks into lists according to free size and
40 * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41 * flag should be passed. All memcg-aware allocations are sharing one set
42 * of chunks and all unaccounted allocations and allocations performed
43 * by processes belonging to the root memory cgroup are using the second set.
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
46 * is managed by a bitmap with metadata blocks. The allocation map is updated
47 * on every allocation and free to reflect the current state while the boundary
48 * map is only updated on allocation. Each metadata block contains
49 * information to help mitigate the need to iterate over large portions
50 * of the bitmap. The reverse mapping from page to chunk is stored in
51 * the page's index. Lastly, units are lazily backed and grow in unison.
53 * There is a unique conversion that goes on here between bytes and bits.
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
55 * tracks the number of pages it is responsible for in nr_pages. Helper
56 * functions are used to convert from between the bytes, bits, and blocks.
57 * All hints are managed in bits unless explicitly stated.
59 * To use this allocator, arch code should do the following:
61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62 * regular address to percpu pointer and back if they need to be
63 * different from the default
65 * - use pcpu_setup_first_chunk() during percpu area initialization to
66 * setup the first chunk containing the kernel static percpu area
69 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
71 #include <linux/bitmap.h>
72 #include <linux/cpumask.h>
73 #include <linux/memblock.h>
74 #include <linux/err.h>
75 #include <linux/lcm.h>
76 #include <linux/list.h>
77 #include <linux/log2.h>
79 #include <linux/module.h>
80 #include <linux/mutex.h>
81 #include <linux/percpu.h>
82 #include <linux/pfn.h>
83 #include <linux/slab.h>
84 #include <linux/spinlock.h>
85 #include <linux/vmalloc.h>
86 #include <linux/workqueue.h>
87 #include <linux/kmemleak.h>
88 #include <linux/sched.h>
89 #include <linux/sched/mm.h>
90 #include <linux/memcontrol.h>
92 #include <asm/cacheflush.h>
93 #include <asm/sections.h>
94 #include <asm/tlbflush.h>
97 #define CREATE_TRACE_POINTS
98 #include <trace/events/percpu.h>
100 #include "percpu-internal.h"
103 * The slots are sorted by the size of the biggest continuous free area.
104 * 1-31 bytes share the same slot.
106 #define PCPU_SLOT_BASE_SHIFT 5
107 /* chunks in slots below this are subject to being sidelined on failed alloc */
108 #define PCPU_SLOT_FAIL_THRESHOLD 3
110 #define PCPU_EMPTY_POP_PAGES_LOW 2
111 #define PCPU_EMPTY_POP_PAGES_HIGH 4
114 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
115 #ifndef __addr_to_pcpu_ptr
116 #define __addr_to_pcpu_ptr(addr) \
117 (void __percpu *)((unsigned long)(addr) - \
118 (unsigned long)pcpu_base_addr + \
119 (unsigned long)__per_cpu_start)
121 #ifndef __pcpu_ptr_to_addr
122 #define __pcpu_ptr_to_addr(ptr) \
123 (void __force *)((unsigned long)(ptr) + \
124 (unsigned long)pcpu_base_addr - \
125 (unsigned long)__per_cpu_start)
127 #else /* CONFIG_SMP */
128 /* on UP, it's always identity mapped */
129 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
130 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
131 #endif /* CONFIG_SMP */
133 static int pcpu_unit_pages __ro_after_init
;
134 static int pcpu_unit_size __ro_after_init
;
135 static int pcpu_nr_units __ro_after_init
;
136 static int pcpu_atom_size __ro_after_init
;
137 int pcpu_nr_slots __ro_after_init
;
138 static int pcpu_free_slot __ro_after_init
;
139 int pcpu_sidelined_slot __ro_after_init
;
140 int pcpu_to_depopulate_slot __ro_after_init
;
141 static size_t pcpu_chunk_struct_size __ro_after_init
;
143 /* cpus with the lowest and highest unit addresses */
144 static unsigned int pcpu_low_unit_cpu __ro_after_init
;
145 static unsigned int pcpu_high_unit_cpu __ro_after_init
;
147 /* the address of the first chunk which starts with the kernel static area */
148 void *pcpu_base_addr __ro_after_init
;
150 static const int *pcpu_unit_map __ro_after_init
; /* cpu -> unit */
151 const unsigned long *pcpu_unit_offsets __ro_after_init
; /* cpu -> unit offset */
153 /* group information, used for vm allocation */
154 static int pcpu_nr_groups __ro_after_init
;
155 static const unsigned long *pcpu_group_offsets __ro_after_init
;
156 static const size_t *pcpu_group_sizes __ro_after_init
;
159 * The first chunk which always exists. Note that unlike other
160 * chunks, this one can be allocated and mapped in several different
161 * ways and thus often doesn't live in the vmalloc area.
163 struct pcpu_chunk
*pcpu_first_chunk __ro_after_init
;
166 * Optional reserved chunk. This chunk reserves part of the first
167 * chunk and serves it for reserved allocations. When the reserved
168 * region doesn't exist, the following variable is NULL.
170 struct pcpu_chunk
*pcpu_reserved_chunk __ro_after_init
;
172 DEFINE_SPINLOCK(pcpu_lock
); /* all internal data structures */
173 static DEFINE_MUTEX(pcpu_alloc_mutex
); /* chunk create/destroy, [de]pop, map ext */
175 struct list_head
*pcpu_chunk_lists __ro_after_init
; /* chunk list slots */
177 /* chunks which need their map areas extended, protected by pcpu_lock */
178 static LIST_HEAD(pcpu_map_extend_chunks
);
181 * The number of empty populated pages, protected by pcpu_lock.
182 * The reserved chunk doesn't contribute to the count.
184 int pcpu_nr_empty_pop_pages
;
187 * The number of populated pages in use by the allocator, protected by
188 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
189 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
190 * and increments/decrements this count by 1).
192 static unsigned long pcpu_nr_populated
;
195 * Balance work is used to populate or destroy chunks asynchronously. We
196 * try to keep the number of populated free pages between
197 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
200 static void pcpu_balance_workfn(struct work_struct
*work
);
201 static DECLARE_WORK(pcpu_balance_work
, pcpu_balance_workfn
);
202 static bool pcpu_async_enabled __read_mostly
;
203 static bool pcpu_atomic_alloc_failed
;
205 static void pcpu_schedule_balance_work(void)
207 if (pcpu_async_enabled
)
208 schedule_work(&pcpu_balance_work
);
212 * pcpu_addr_in_chunk - check if the address is served from this chunk
213 * @chunk: chunk of interest
214 * @addr: percpu address
217 * True if the address is served from this chunk.
219 static bool pcpu_addr_in_chunk(struct pcpu_chunk
*chunk
, void *addr
)
221 void *start_addr
, *end_addr
;
226 start_addr
= chunk
->base_addr
+ chunk
->start_offset
;
227 end_addr
= chunk
->base_addr
+ chunk
->nr_pages
* PAGE_SIZE
-
230 return addr
>= start_addr
&& addr
< end_addr
;
233 static int __pcpu_size_to_slot(int size
)
235 int highbit
= fls(size
); /* size is in bytes */
236 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
239 static int pcpu_size_to_slot(int size
)
241 if (size
== pcpu_unit_size
)
242 return pcpu_free_slot
;
243 return __pcpu_size_to_slot(size
);
246 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
248 const struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
250 if (chunk
->free_bytes
< PCPU_MIN_ALLOC_SIZE
||
251 chunk_md
->contig_hint
== 0)
254 return pcpu_size_to_slot(chunk_md
->contig_hint
* PCPU_MIN_ALLOC_SIZE
);
257 /* set the pointer to a chunk in a page struct */
258 static void pcpu_set_page_chunk(struct page
*page
, struct pcpu_chunk
*pcpu
)
260 page
->index
= (unsigned long)pcpu
;
263 /* obtain pointer to a chunk from a page struct */
264 static struct pcpu_chunk
*pcpu_get_page_chunk(struct page
*page
)
266 return (struct pcpu_chunk
*)page
->index
;
269 static int __maybe_unused
pcpu_page_idx(unsigned int cpu
, int page_idx
)
271 return pcpu_unit_map
[cpu
] * pcpu_unit_pages
+ page_idx
;
274 static unsigned long pcpu_unit_page_offset(unsigned int cpu
, int page_idx
)
276 return pcpu_unit_offsets
[cpu
] + (page_idx
<< PAGE_SHIFT
);
279 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
280 unsigned int cpu
, int page_idx
)
282 return (unsigned long)chunk
->base_addr
+
283 pcpu_unit_page_offset(cpu
, page_idx
);
287 * The following are helper functions to help access bitmaps and convert
288 * between bitmap offsets to address offsets.
290 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk
*chunk
, int index
)
292 return chunk
->alloc_map
+
293 (index
* PCPU_BITMAP_BLOCK_BITS
/ BITS_PER_LONG
);
296 static unsigned long pcpu_off_to_block_index(int off
)
298 return off
/ PCPU_BITMAP_BLOCK_BITS
;
301 static unsigned long pcpu_off_to_block_off(int off
)
303 return off
& (PCPU_BITMAP_BLOCK_BITS
- 1);
306 static unsigned long pcpu_block_off_to_off(int index
, int off
)
308 return index
* PCPU_BITMAP_BLOCK_BITS
+ off
;
312 * pcpu_check_block_hint - check against the contig hint
313 * @block: block of interest
314 * @bits: size of allocation
315 * @align: alignment of area (max PAGE_SIZE)
317 * Check to see if the allocation can fit in the block's contig hint.
318 * Note, a chunk uses the same hints as a block so this can also check against
319 * the chunk's contig hint.
321 static bool pcpu_check_block_hint(struct pcpu_block_md
*block
, int bits
,
324 int bit_off
= ALIGN(block
->contig_hint_start
, align
) -
325 block
->contig_hint_start
;
327 return bit_off
+ bits
<= block
->contig_hint
;
331 * pcpu_next_hint - determine which hint to use
332 * @block: block of interest
333 * @alloc_bits: size of allocation
335 * This determines if we should scan based on the scan_hint or first_free.
336 * In general, we want to scan from first_free to fulfill allocations by
337 * first fit. However, if we know a scan_hint at position scan_hint_start
338 * cannot fulfill an allocation, we can begin scanning from there knowing
339 * the contig_hint will be our fallback.
341 static int pcpu_next_hint(struct pcpu_block_md
*block
, int alloc_bits
)
344 * The three conditions below determine if we can skip past the
345 * scan_hint. First, does the scan hint exist. Second, is the
346 * contig_hint after the scan_hint (possibly not true iff
347 * contig_hint == scan_hint). Third, is the allocation request
348 * larger than the scan_hint.
350 if (block
->scan_hint
&&
351 block
->contig_hint_start
> block
->scan_hint_start
&&
352 alloc_bits
> block
->scan_hint
)
353 return block
->scan_hint_start
+ block
->scan_hint
;
355 return block
->first_free
;
359 * pcpu_next_md_free_region - finds the next hint free area
360 * @chunk: chunk of interest
361 * @bit_off: chunk offset
362 * @bits: size of free area
364 * Helper function for pcpu_for_each_md_free_region. It checks
365 * block->contig_hint and performs aggregation across blocks to find the
366 * next hint. It modifies bit_off and bits in-place to be consumed in the
369 static void pcpu_next_md_free_region(struct pcpu_chunk
*chunk
, int *bit_off
,
372 int i
= pcpu_off_to_block_index(*bit_off
);
373 int block_off
= pcpu_off_to_block_off(*bit_off
);
374 struct pcpu_block_md
*block
;
377 for (block
= chunk
->md_blocks
+ i
; i
< pcpu_chunk_nr_blocks(chunk
);
379 /* handles contig area across blocks */
381 *bits
+= block
->left_free
;
382 if (block
->left_free
== PCPU_BITMAP_BLOCK_BITS
)
388 * This checks three things. First is there a contig_hint to
389 * check. Second, have we checked this hint before by
390 * comparing the block_off. Third, is this the same as the
391 * right contig hint. In the last case, it spills over into
392 * the next block and should be handled by the contig area
393 * across blocks code.
395 *bits
= block
->contig_hint
;
396 if (*bits
&& block
->contig_hint_start
>= block_off
&&
397 *bits
+ block
->contig_hint_start
< PCPU_BITMAP_BLOCK_BITS
) {
398 *bit_off
= pcpu_block_off_to_off(i
,
399 block
->contig_hint_start
);
402 /* reset to satisfy the second predicate above */
405 *bits
= block
->right_free
;
406 *bit_off
= (i
+ 1) * PCPU_BITMAP_BLOCK_BITS
- block
->right_free
;
411 * pcpu_next_fit_region - finds fit areas for a given allocation request
412 * @chunk: chunk of interest
413 * @alloc_bits: size of allocation
414 * @align: alignment of area (max PAGE_SIZE)
415 * @bit_off: chunk offset
416 * @bits: size of free area
418 * Finds the next free region that is viable for use with a given size and
419 * alignment. This only returns if there is a valid area to be used for this
420 * allocation. block->first_free is returned if the allocation request fits
421 * within the block to see if the request can be fulfilled prior to the contig
424 static void pcpu_next_fit_region(struct pcpu_chunk
*chunk
, int alloc_bits
,
425 int align
, int *bit_off
, int *bits
)
427 int i
= pcpu_off_to_block_index(*bit_off
);
428 int block_off
= pcpu_off_to_block_off(*bit_off
);
429 struct pcpu_block_md
*block
;
432 for (block
= chunk
->md_blocks
+ i
; i
< pcpu_chunk_nr_blocks(chunk
);
434 /* handles contig area across blocks */
436 *bits
+= block
->left_free
;
437 if (*bits
>= alloc_bits
)
439 if (block
->left_free
== PCPU_BITMAP_BLOCK_BITS
)
443 /* check block->contig_hint */
444 *bits
= ALIGN(block
->contig_hint_start
, align
) -
445 block
->contig_hint_start
;
447 * This uses the block offset to determine if this has been
448 * checked in the prior iteration.
450 if (block
->contig_hint
&&
451 block
->contig_hint_start
>= block_off
&&
452 block
->contig_hint
>= *bits
+ alloc_bits
) {
453 int start
= pcpu_next_hint(block
, alloc_bits
);
455 *bits
+= alloc_bits
+ block
->contig_hint_start
-
457 *bit_off
= pcpu_block_off_to_off(i
, start
);
460 /* reset to satisfy the second predicate above */
463 *bit_off
= ALIGN(PCPU_BITMAP_BLOCK_BITS
- block
->right_free
,
465 *bits
= PCPU_BITMAP_BLOCK_BITS
- *bit_off
;
466 *bit_off
= pcpu_block_off_to_off(i
, *bit_off
);
467 if (*bits
>= alloc_bits
)
471 /* no valid offsets were found - fail condition */
472 *bit_off
= pcpu_chunk_map_bits(chunk
);
476 * Metadata free area iterators. These perform aggregation of free areas
477 * based on the metadata blocks and return the offset @bit_off and size in
478 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
479 * a fit is found for the allocation request.
481 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
482 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
483 (bit_off) < pcpu_chunk_map_bits((chunk)); \
484 (bit_off) += (bits) + 1, \
485 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
487 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
488 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
490 (bit_off) < pcpu_chunk_map_bits((chunk)); \
491 (bit_off) += (bits), \
492 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
496 * pcpu_mem_zalloc - allocate memory
497 * @size: bytes to allocate
498 * @gfp: allocation flags
500 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
501 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
502 * This is to facilitate passing through whitelisted flags. The
503 * returned memory is always zeroed.
506 * Pointer to the allocated area on success, NULL on failure.
508 static void *pcpu_mem_zalloc(size_t size
, gfp_t gfp
)
510 if (WARN_ON_ONCE(!slab_is_available()))
513 if (size
<= PAGE_SIZE
)
514 return kzalloc(size
, gfp
);
516 return __vmalloc(size
, gfp
| __GFP_ZERO
);
520 * pcpu_mem_free - free memory
521 * @ptr: memory to free
523 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
525 static void pcpu_mem_free(void *ptr
)
530 static void __pcpu_chunk_move(struct pcpu_chunk
*chunk
, int slot
,
533 if (chunk
!= pcpu_reserved_chunk
) {
535 list_move(&chunk
->list
, &pcpu_chunk_lists
[slot
]);
537 list_move_tail(&chunk
->list
, &pcpu_chunk_lists
[slot
]);
541 static void pcpu_chunk_move(struct pcpu_chunk
*chunk
, int slot
)
543 __pcpu_chunk_move(chunk
, slot
, true);
547 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
548 * @chunk: chunk of interest
549 * @oslot: the previous slot it was on
551 * This function is called after an allocation or free changed @chunk.
552 * New slot according to the changed state is determined and @chunk is
553 * moved to the slot. Note that the reserved chunk is never put on
559 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
561 int nslot
= pcpu_chunk_slot(chunk
);
563 /* leave isolated chunks in-place */
568 __pcpu_chunk_move(chunk
, nslot
, oslot
< nslot
);
571 static void pcpu_isolate_chunk(struct pcpu_chunk
*chunk
)
573 lockdep_assert_held(&pcpu_lock
);
575 if (!chunk
->isolated
) {
576 chunk
->isolated
= true;
577 pcpu_nr_empty_pop_pages
-= chunk
->nr_empty_pop_pages
;
579 list_move(&chunk
->list
, &pcpu_chunk_lists
[pcpu_to_depopulate_slot
]);
582 static void pcpu_reintegrate_chunk(struct pcpu_chunk
*chunk
)
584 lockdep_assert_held(&pcpu_lock
);
586 if (chunk
->isolated
) {
587 chunk
->isolated
= false;
588 pcpu_nr_empty_pop_pages
+= chunk
->nr_empty_pop_pages
;
589 pcpu_chunk_relocate(chunk
, -1);
594 * pcpu_update_empty_pages - update empty page counters
595 * @chunk: chunk of interest
596 * @nr: nr of empty pages
598 * This is used to keep track of the empty pages now based on the premise
599 * a md_block covers a page. The hint update functions recognize if a block
600 * is made full or broken to calculate deltas for keeping track of free pages.
602 static inline void pcpu_update_empty_pages(struct pcpu_chunk
*chunk
, int nr
)
604 chunk
->nr_empty_pop_pages
+= nr
;
605 if (chunk
!= pcpu_reserved_chunk
&& !chunk
->isolated
)
606 pcpu_nr_empty_pop_pages
+= nr
;
610 * pcpu_region_overlap - determines if two regions overlap
611 * @a: start of first region, inclusive
612 * @b: end of first region, exclusive
613 * @x: start of second region, inclusive
614 * @y: end of second region, exclusive
616 * This is used to determine if the hint region [a, b) overlaps with the
617 * allocated region [x, y).
619 static inline bool pcpu_region_overlap(int a
, int b
, int x
, int y
)
621 return (a
< y
) && (x
< b
);
625 * pcpu_block_update - updates a block given a free area
626 * @block: block of interest
627 * @start: start offset in block
628 * @end: end offset in block
630 * Updates a block given a known free area. The region [start, end) is
631 * expected to be the entirety of the free area within a block. Chooses
632 * the best starting offset if the contig hints are equal.
634 static void pcpu_block_update(struct pcpu_block_md
*block
, int start
, int end
)
636 int contig
= end
- start
;
638 block
->first_free
= min(block
->first_free
, start
);
640 block
->left_free
= contig
;
642 if (end
== block
->nr_bits
)
643 block
->right_free
= contig
;
645 if (contig
> block
->contig_hint
) {
646 /* promote the old contig_hint to be the new scan_hint */
647 if (start
> block
->contig_hint_start
) {
648 if (block
->contig_hint
> block
->scan_hint
) {
649 block
->scan_hint_start
=
650 block
->contig_hint_start
;
651 block
->scan_hint
= block
->contig_hint
;
652 } else if (start
< block
->scan_hint_start
) {
654 * The old contig_hint == scan_hint. But, the
655 * new contig is larger so hold the invariant
656 * scan_hint_start < contig_hint_start.
658 block
->scan_hint
= 0;
661 block
->scan_hint
= 0;
663 block
->contig_hint_start
= start
;
664 block
->contig_hint
= contig
;
665 } else if (contig
== block
->contig_hint
) {
666 if (block
->contig_hint_start
&&
668 __ffs(start
) > __ffs(block
->contig_hint_start
))) {
669 /* start has a better alignment so use it */
670 block
->contig_hint_start
= start
;
671 if (start
< block
->scan_hint_start
&&
672 block
->contig_hint
> block
->scan_hint
)
673 block
->scan_hint
= 0;
674 } else if (start
> block
->scan_hint_start
||
675 block
->contig_hint
> block
->scan_hint
) {
677 * Knowing contig == contig_hint, update the scan_hint
678 * if it is farther than or larger than the current
681 block
->scan_hint_start
= start
;
682 block
->scan_hint
= contig
;
686 * The region is smaller than the contig_hint. So only update
687 * the scan_hint if it is larger than or equal and farther than
688 * the current scan_hint.
690 if ((start
< block
->contig_hint_start
&&
691 (contig
> block
->scan_hint
||
692 (contig
== block
->scan_hint
&&
693 start
> block
->scan_hint_start
)))) {
694 block
->scan_hint_start
= start
;
695 block
->scan_hint
= contig
;
701 * pcpu_block_update_scan - update a block given a free area from a scan
702 * @chunk: chunk of interest
703 * @bit_off: chunk offset
704 * @bits: size of free area
706 * Finding the final allocation spot first goes through pcpu_find_block_fit()
707 * to find a block that can hold the allocation and then pcpu_alloc_area()
708 * where a scan is used. When allocations require specific alignments,
709 * we can inadvertently create holes which will not be seen in the alloc
712 * This takes a given free area hole and updates a block as it may change the
713 * scan_hint. We need to scan backwards to ensure we don't miss free bits
716 static void pcpu_block_update_scan(struct pcpu_chunk
*chunk
, int bit_off
,
719 int s_off
= pcpu_off_to_block_off(bit_off
);
720 int e_off
= s_off
+ bits
;
722 struct pcpu_block_md
*block
;
724 if (e_off
> PCPU_BITMAP_BLOCK_BITS
)
727 s_index
= pcpu_off_to_block_index(bit_off
);
728 block
= chunk
->md_blocks
+ s_index
;
730 /* scan backwards in case of alignment skipping free bits */
731 l_bit
= find_last_bit(pcpu_index_alloc_map(chunk
, s_index
), s_off
);
732 s_off
= (s_off
== l_bit
) ? 0 : l_bit
+ 1;
734 pcpu_block_update(block
, s_off
, e_off
);
738 * pcpu_chunk_refresh_hint - updates metadata about a chunk
739 * @chunk: chunk of interest
740 * @full_scan: if we should scan from the beginning
742 * Iterates over the metadata blocks to find the largest contig area.
743 * A full scan can be avoided on the allocation path as this is triggered
744 * if we broke the contig_hint. In doing so, the scan_hint will be before
745 * the contig_hint or after if the scan_hint == contig_hint. This cannot
746 * be prevented on freeing as we want to find the largest area possibly
749 static void pcpu_chunk_refresh_hint(struct pcpu_chunk
*chunk
, bool full_scan
)
751 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
754 /* promote scan_hint to contig_hint */
755 if (!full_scan
&& chunk_md
->scan_hint
) {
756 bit_off
= chunk_md
->scan_hint_start
+ chunk_md
->scan_hint
;
757 chunk_md
->contig_hint_start
= chunk_md
->scan_hint_start
;
758 chunk_md
->contig_hint
= chunk_md
->scan_hint
;
759 chunk_md
->scan_hint
= 0;
761 bit_off
= chunk_md
->first_free
;
762 chunk_md
->contig_hint
= 0;
766 pcpu_for_each_md_free_region(chunk
, bit_off
, bits
)
767 pcpu_block_update(chunk_md
, bit_off
, bit_off
+ bits
);
771 * pcpu_block_refresh_hint
772 * @chunk: chunk of interest
773 * @index: index of the metadata block
775 * Scans over the block beginning at first_free and updates the block
776 * metadata accordingly.
778 static void pcpu_block_refresh_hint(struct pcpu_chunk
*chunk
, int index
)
780 struct pcpu_block_md
*block
= chunk
->md_blocks
+ index
;
781 unsigned long *alloc_map
= pcpu_index_alloc_map(chunk
, index
);
782 unsigned int rs
, re
, start
; /* region start, region end */
784 /* promote scan_hint to contig_hint */
785 if (block
->scan_hint
) {
786 start
= block
->scan_hint_start
+ block
->scan_hint
;
787 block
->contig_hint_start
= block
->scan_hint_start
;
788 block
->contig_hint
= block
->scan_hint
;
789 block
->scan_hint
= 0;
791 start
= block
->first_free
;
792 block
->contig_hint
= 0;
795 block
->right_free
= 0;
797 /* iterate over free areas and update the contig hints */
798 bitmap_for_each_clear_region(alloc_map
, rs
, re
, start
,
799 PCPU_BITMAP_BLOCK_BITS
)
800 pcpu_block_update(block
, rs
, re
);
804 * pcpu_block_update_hint_alloc - update hint on allocation path
805 * @chunk: chunk of interest
806 * @bit_off: chunk offset
807 * @bits: size of request
809 * Updates metadata for the allocation path. The metadata only has to be
810 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
811 * scans are required if the block's contig hint is broken.
813 static void pcpu_block_update_hint_alloc(struct pcpu_chunk
*chunk
, int bit_off
,
816 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
817 int nr_empty_pages
= 0;
818 struct pcpu_block_md
*s_block
, *e_block
, *block
;
819 int s_index
, e_index
; /* block indexes of the freed allocation */
820 int s_off
, e_off
; /* block offsets of the freed allocation */
823 * Calculate per block offsets.
824 * The calculation uses an inclusive range, but the resulting offsets
825 * are [start, end). e_index always points to the last block in the
828 s_index
= pcpu_off_to_block_index(bit_off
);
829 e_index
= pcpu_off_to_block_index(bit_off
+ bits
- 1);
830 s_off
= pcpu_off_to_block_off(bit_off
);
831 e_off
= pcpu_off_to_block_off(bit_off
+ bits
- 1) + 1;
833 s_block
= chunk
->md_blocks
+ s_index
;
834 e_block
= chunk
->md_blocks
+ e_index
;
838 * block->first_free must be updated if the allocation takes its place.
839 * If the allocation breaks the contig_hint, a scan is required to
842 if (s_block
->contig_hint
== PCPU_BITMAP_BLOCK_BITS
)
845 if (s_off
== s_block
->first_free
)
846 s_block
->first_free
= find_next_zero_bit(
847 pcpu_index_alloc_map(chunk
, s_index
),
848 PCPU_BITMAP_BLOCK_BITS
,
851 if (pcpu_region_overlap(s_block
->scan_hint_start
,
852 s_block
->scan_hint_start
+ s_block
->scan_hint
,
855 s_block
->scan_hint
= 0;
857 if (pcpu_region_overlap(s_block
->contig_hint_start
,
858 s_block
->contig_hint_start
+
859 s_block
->contig_hint
,
862 /* block contig hint is broken - scan to fix it */
864 s_block
->left_free
= 0;
865 pcpu_block_refresh_hint(chunk
, s_index
);
867 /* update left and right contig manually */
868 s_block
->left_free
= min(s_block
->left_free
, s_off
);
869 if (s_index
== e_index
)
870 s_block
->right_free
= min_t(int, s_block
->right_free
,
871 PCPU_BITMAP_BLOCK_BITS
- e_off
);
873 s_block
->right_free
= 0;
879 if (s_index
!= e_index
) {
880 if (e_block
->contig_hint
== PCPU_BITMAP_BLOCK_BITS
)
884 * When the allocation is across blocks, the end is along
885 * the left part of the e_block.
887 e_block
->first_free
= find_next_zero_bit(
888 pcpu_index_alloc_map(chunk
, e_index
),
889 PCPU_BITMAP_BLOCK_BITS
, e_off
);
891 if (e_off
== PCPU_BITMAP_BLOCK_BITS
) {
892 /* reset the block */
895 if (e_off
> e_block
->scan_hint_start
)
896 e_block
->scan_hint
= 0;
898 e_block
->left_free
= 0;
899 if (e_off
> e_block
->contig_hint_start
) {
900 /* contig hint is broken - scan to fix it */
901 pcpu_block_refresh_hint(chunk
, e_index
);
903 e_block
->right_free
=
904 min_t(int, e_block
->right_free
,
905 PCPU_BITMAP_BLOCK_BITS
- e_off
);
909 /* update in-between md_blocks */
910 nr_empty_pages
+= (e_index
- s_index
- 1);
911 for (block
= s_block
+ 1; block
< e_block
; block
++) {
912 block
->scan_hint
= 0;
913 block
->contig_hint
= 0;
914 block
->left_free
= 0;
915 block
->right_free
= 0;
920 pcpu_update_empty_pages(chunk
, -nr_empty_pages
);
922 if (pcpu_region_overlap(chunk_md
->scan_hint_start
,
923 chunk_md
->scan_hint_start
+
927 chunk_md
->scan_hint
= 0;
930 * The only time a full chunk scan is required is if the chunk
931 * contig hint is broken. Otherwise, it means a smaller space
932 * was used and therefore the chunk contig hint is still correct.
934 if (pcpu_region_overlap(chunk_md
->contig_hint_start
,
935 chunk_md
->contig_hint_start
+
936 chunk_md
->contig_hint
,
939 pcpu_chunk_refresh_hint(chunk
, false);
943 * pcpu_block_update_hint_free - updates the block hints on the free path
944 * @chunk: chunk of interest
945 * @bit_off: chunk offset
946 * @bits: size of request
948 * Updates metadata for the allocation path. This avoids a blind block
949 * refresh by making use of the block contig hints. If this fails, it scans
950 * forward and backward to determine the extent of the free area. This is
951 * capped at the boundary of blocks.
953 * A chunk update is triggered if a page becomes free, a block becomes free,
954 * or the free spans across blocks. This tradeoff is to minimize iterating
955 * over the block metadata to update chunk_md->contig_hint.
956 * chunk_md->contig_hint may be off by up to a page, but it will never be more
957 * than the available space. If the contig hint is contained in one block, it
960 static void pcpu_block_update_hint_free(struct pcpu_chunk
*chunk
, int bit_off
,
963 int nr_empty_pages
= 0;
964 struct pcpu_block_md
*s_block
, *e_block
, *block
;
965 int s_index
, e_index
; /* block indexes of the freed allocation */
966 int s_off
, e_off
; /* block offsets of the freed allocation */
967 int start
, end
; /* start and end of the whole free area */
970 * Calculate per block offsets.
971 * The calculation uses an inclusive range, but the resulting offsets
972 * are [start, end). e_index always points to the last block in the
975 s_index
= pcpu_off_to_block_index(bit_off
);
976 e_index
= pcpu_off_to_block_index(bit_off
+ bits
- 1);
977 s_off
= pcpu_off_to_block_off(bit_off
);
978 e_off
= pcpu_off_to_block_off(bit_off
+ bits
- 1) + 1;
980 s_block
= chunk
->md_blocks
+ s_index
;
981 e_block
= chunk
->md_blocks
+ e_index
;
984 * Check if the freed area aligns with the block->contig_hint.
985 * If it does, then the scan to find the beginning/end of the
986 * larger free area can be avoided.
988 * start and end refer to beginning and end of the free area
989 * within each their respective blocks. This is not necessarily
990 * the entire free area as it may span blocks past the beginning
991 * or end of the block.
994 if (s_off
== s_block
->contig_hint
+ s_block
->contig_hint_start
) {
995 start
= s_block
->contig_hint_start
;
998 * Scan backwards to find the extent of the free area.
999 * find_last_bit returns the starting bit, so if the start bit
1000 * is returned, that means there was no last bit and the
1001 * remainder of the chunk is free.
1003 int l_bit
= find_last_bit(pcpu_index_alloc_map(chunk
, s_index
),
1005 start
= (start
== l_bit
) ? 0 : l_bit
+ 1;
1009 if (e_off
== e_block
->contig_hint_start
)
1010 end
= e_block
->contig_hint_start
+ e_block
->contig_hint
;
1012 end
= find_next_bit(pcpu_index_alloc_map(chunk
, e_index
),
1013 PCPU_BITMAP_BLOCK_BITS
, end
);
1015 /* update s_block */
1016 e_off
= (s_index
== e_index
) ? end
: PCPU_BITMAP_BLOCK_BITS
;
1017 if (!start
&& e_off
== PCPU_BITMAP_BLOCK_BITS
)
1019 pcpu_block_update(s_block
, start
, e_off
);
1021 /* freeing in the same block */
1022 if (s_index
!= e_index
) {
1023 /* update e_block */
1024 if (end
== PCPU_BITMAP_BLOCK_BITS
)
1026 pcpu_block_update(e_block
, 0, end
);
1028 /* reset md_blocks in the middle */
1029 nr_empty_pages
+= (e_index
- s_index
- 1);
1030 for (block
= s_block
+ 1; block
< e_block
; block
++) {
1031 block
->first_free
= 0;
1032 block
->scan_hint
= 0;
1033 block
->contig_hint_start
= 0;
1034 block
->contig_hint
= PCPU_BITMAP_BLOCK_BITS
;
1035 block
->left_free
= PCPU_BITMAP_BLOCK_BITS
;
1036 block
->right_free
= PCPU_BITMAP_BLOCK_BITS
;
1041 pcpu_update_empty_pages(chunk
, nr_empty_pages
);
1044 * Refresh chunk metadata when the free makes a block free or spans
1045 * across blocks. The contig_hint may be off by up to a page, but if
1046 * the contig_hint is contained in a block, it will be accurate with
1047 * the else condition below.
1049 if (((end
- start
) >= PCPU_BITMAP_BLOCK_BITS
) || s_index
!= e_index
)
1050 pcpu_chunk_refresh_hint(chunk
, true);
1052 pcpu_block_update(&chunk
->chunk_md
,
1053 pcpu_block_off_to_off(s_index
, start
),
1058 * pcpu_is_populated - determines if the region is populated
1059 * @chunk: chunk of interest
1060 * @bit_off: chunk offset
1061 * @bits: size of area
1062 * @next_off: return value for the next offset to start searching
1064 * For atomic allocations, check if the backing pages are populated.
1067 * Bool if the backing pages are populated.
1068 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1070 static bool pcpu_is_populated(struct pcpu_chunk
*chunk
, int bit_off
, int bits
,
1073 unsigned int page_start
, page_end
, rs
, re
;
1075 page_start
= PFN_DOWN(bit_off
* PCPU_MIN_ALLOC_SIZE
);
1076 page_end
= PFN_UP((bit_off
+ bits
) * PCPU_MIN_ALLOC_SIZE
);
1079 bitmap_next_clear_region(chunk
->populated
, &rs
, &re
, page_end
);
1083 *next_off
= re
* PAGE_SIZE
/ PCPU_MIN_ALLOC_SIZE
;
1088 * pcpu_find_block_fit - finds the block index to start searching
1089 * @chunk: chunk of interest
1090 * @alloc_bits: size of request in allocation units
1091 * @align: alignment of area (max PAGE_SIZE bytes)
1092 * @pop_only: use populated regions only
1094 * Given a chunk and an allocation spec, find the offset to begin searching
1095 * for a free region. This iterates over the bitmap metadata blocks to
1096 * find an offset that will be guaranteed to fit the requirements. It is
1097 * not quite first fit as if the allocation does not fit in the contig hint
1098 * of a block or chunk, it is skipped. This errs on the side of caution
1099 * to prevent excess iteration. Poor alignment can cause the allocator to
1100 * skip over blocks and chunks that have valid free areas.
1103 * The offset in the bitmap to begin searching.
1104 * -1 if no offset is found.
1106 static int pcpu_find_block_fit(struct pcpu_chunk
*chunk
, int alloc_bits
,
1107 size_t align
, bool pop_only
)
1109 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
1110 int bit_off
, bits
, next_off
;
1113 * This is an optimization to prevent scanning by assuming if the
1114 * allocation cannot fit in the global hint, there is memory pressure
1115 * and creating a new chunk would happen soon.
1117 if (!pcpu_check_block_hint(chunk_md
, alloc_bits
, align
))
1120 bit_off
= pcpu_next_hint(chunk_md
, alloc_bits
);
1122 pcpu_for_each_fit_region(chunk
, alloc_bits
, align
, bit_off
, bits
) {
1123 if (!pop_only
|| pcpu_is_populated(chunk
, bit_off
, bits
,
1131 if (bit_off
== pcpu_chunk_map_bits(chunk
))
1138 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1139 * @map: the address to base the search on
1140 * @size: the bitmap size in bits
1141 * @start: the bitnumber to start searching at
1142 * @nr: the number of zeroed bits we're looking for
1143 * @align_mask: alignment mask for zero area
1144 * @largest_off: offset of the largest area skipped
1145 * @largest_bits: size of the largest area skipped
1147 * The @align_mask should be one less than a power of 2.
1149 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1150 * the largest area that was skipped. This is imperfect, but in general is
1151 * good enough. The largest remembered region is the largest failed region
1152 * seen. This does not include anything we possibly skipped due to alignment.
1153 * pcpu_block_update_scan() does scan backwards to try and recover what was
1154 * lost to alignment. While this can cause scanning to miss earlier possible
1155 * free areas, smaller allocations will eventually fill those holes.
1157 static unsigned long pcpu_find_zero_area(unsigned long *map
,
1159 unsigned long start
,
1161 unsigned long align_mask
,
1162 unsigned long *largest_off
,
1163 unsigned long *largest_bits
)
1165 unsigned long index
, end
, i
, area_off
, area_bits
;
1167 index
= find_next_zero_bit(map
, size
, start
);
1169 /* Align allocation */
1170 index
= __ALIGN_MASK(index
, align_mask
);
1176 i
= find_next_bit(map
, end
, index
);
1178 area_bits
= i
- area_off
;
1179 /* remember largest unused area with best alignment */
1180 if (area_bits
> *largest_bits
||
1181 (area_bits
== *largest_bits
&& *largest_off
&&
1182 (!area_off
|| __ffs(area_off
) > __ffs(*largest_off
)))) {
1183 *largest_off
= area_off
;
1184 *largest_bits
= area_bits
;
1194 * pcpu_alloc_area - allocates an area from a pcpu_chunk
1195 * @chunk: chunk of interest
1196 * @alloc_bits: size of request in allocation units
1197 * @align: alignment of area (max PAGE_SIZE)
1198 * @start: bit_off to start searching
1200 * This function takes in a @start offset to begin searching to fit an
1201 * allocation of @alloc_bits with alignment @align. It needs to scan
1202 * the allocation map because if it fits within the block's contig hint,
1203 * @start will be block->first_free. This is an attempt to fill the
1204 * allocation prior to breaking the contig hint. The allocation and
1205 * boundary maps are updated accordingly if it confirms a valid
1209 * Allocated addr offset in @chunk on success.
1210 * -1 if no matching area is found.
1212 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int alloc_bits
,
1213 size_t align
, int start
)
1215 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
1216 size_t align_mask
= (align
) ? (align
- 1) : 0;
1217 unsigned long area_off
= 0, area_bits
= 0;
1218 int bit_off
, end
, oslot
;
1220 lockdep_assert_held(&pcpu_lock
);
1222 oslot
= pcpu_chunk_slot(chunk
);
1225 * Search to find a fit.
1227 end
= min_t(int, start
+ alloc_bits
+ PCPU_BITMAP_BLOCK_BITS
,
1228 pcpu_chunk_map_bits(chunk
));
1229 bit_off
= pcpu_find_zero_area(chunk
->alloc_map
, end
, start
, alloc_bits
,
1230 align_mask
, &area_off
, &area_bits
);
1235 pcpu_block_update_scan(chunk
, area_off
, area_bits
);
1237 /* update alloc map */
1238 bitmap_set(chunk
->alloc_map
, bit_off
, alloc_bits
);
1240 /* update boundary map */
1241 set_bit(bit_off
, chunk
->bound_map
);
1242 bitmap_clear(chunk
->bound_map
, bit_off
+ 1, alloc_bits
- 1);
1243 set_bit(bit_off
+ alloc_bits
, chunk
->bound_map
);
1245 chunk
->free_bytes
-= alloc_bits
* PCPU_MIN_ALLOC_SIZE
;
1247 /* update first free bit */
1248 if (bit_off
== chunk_md
->first_free
)
1249 chunk_md
->first_free
= find_next_zero_bit(
1251 pcpu_chunk_map_bits(chunk
),
1252 bit_off
+ alloc_bits
);
1254 pcpu_block_update_hint_alloc(chunk
, bit_off
, alloc_bits
);
1256 pcpu_chunk_relocate(chunk
, oslot
);
1258 return bit_off
* PCPU_MIN_ALLOC_SIZE
;
1262 * pcpu_free_area - frees the corresponding offset
1263 * @chunk: chunk of interest
1264 * @off: addr offset into chunk
1266 * This function determines the size of an allocation to free using
1267 * the boundary bitmap and clears the allocation map.
1270 * Number of freed bytes.
1272 static int pcpu_free_area(struct pcpu_chunk
*chunk
, int off
)
1274 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
1275 int bit_off
, bits
, end
, oslot
, freed
;
1277 lockdep_assert_held(&pcpu_lock
);
1278 pcpu_stats_area_dealloc(chunk
);
1280 oslot
= pcpu_chunk_slot(chunk
);
1282 bit_off
= off
/ PCPU_MIN_ALLOC_SIZE
;
1284 /* find end index */
1285 end
= find_next_bit(chunk
->bound_map
, pcpu_chunk_map_bits(chunk
),
1287 bits
= end
- bit_off
;
1288 bitmap_clear(chunk
->alloc_map
, bit_off
, bits
);
1290 freed
= bits
* PCPU_MIN_ALLOC_SIZE
;
1292 /* update metadata */
1293 chunk
->free_bytes
+= freed
;
1295 /* update first free bit */
1296 chunk_md
->first_free
= min(chunk_md
->first_free
, bit_off
);
1298 pcpu_block_update_hint_free(chunk
, bit_off
, bits
);
1300 pcpu_chunk_relocate(chunk
, oslot
);
1305 static void pcpu_init_md_block(struct pcpu_block_md
*block
, int nr_bits
)
1307 block
->scan_hint
= 0;
1308 block
->contig_hint
= nr_bits
;
1309 block
->left_free
= nr_bits
;
1310 block
->right_free
= nr_bits
;
1311 block
->first_free
= 0;
1312 block
->nr_bits
= nr_bits
;
1315 static void pcpu_init_md_blocks(struct pcpu_chunk
*chunk
)
1317 struct pcpu_block_md
*md_block
;
1319 /* init the chunk's block */
1320 pcpu_init_md_block(&chunk
->chunk_md
, pcpu_chunk_map_bits(chunk
));
1322 for (md_block
= chunk
->md_blocks
;
1323 md_block
!= chunk
->md_blocks
+ pcpu_chunk_nr_blocks(chunk
);
1325 pcpu_init_md_block(md_block
, PCPU_BITMAP_BLOCK_BITS
);
1329 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1330 * @tmp_addr: the start of the region served
1331 * @map_size: size of the region served
1333 * This is responsible for creating the chunks that serve the first chunk. The
1334 * base_addr is page aligned down of @tmp_addr while the region end is page
1335 * aligned up. Offsets are kept track of to determine the region served. All
1336 * this is done to appease the bitmap allocator in avoiding partial blocks.
1339 * Chunk serving the region at @tmp_addr of @map_size.
1341 static struct pcpu_chunk
* __init
pcpu_alloc_first_chunk(unsigned long tmp_addr
,
1344 struct pcpu_chunk
*chunk
;
1345 unsigned long aligned_addr
, lcm_align
;
1346 int start_offset
, offset_bits
, region_size
, region_bits
;
1349 /* region calculations */
1350 aligned_addr
= tmp_addr
& PAGE_MASK
;
1352 start_offset
= tmp_addr
- aligned_addr
;
1355 * Align the end of the region with the LCM of PAGE_SIZE and
1356 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1359 lcm_align
= lcm(PAGE_SIZE
, PCPU_BITMAP_BLOCK_SIZE
);
1360 region_size
= ALIGN(start_offset
+ map_size
, lcm_align
);
1362 /* allocate chunk */
1363 alloc_size
= struct_size(chunk
, populated
,
1364 BITS_TO_LONGS(region_size
>> PAGE_SHIFT
));
1365 chunk
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1367 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1370 INIT_LIST_HEAD(&chunk
->list
);
1372 chunk
->base_addr
= (void *)aligned_addr
;
1373 chunk
->start_offset
= start_offset
;
1374 chunk
->end_offset
= region_size
- chunk
->start_offset
- map_size
;
1376 chunk
->nr_pages
= region_size
>> PAGE_SHIFT
;
1377 region_bits
= pcpu_chunk_map_bits(chunk
);
1379 alloc_size
= BITS_TO_LONGS(region_bits
) * sizeof(chunk
->alloc_map
[0]);
1380 chunk
->alloc_map
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1381 if (!chunk
->alloc_map
)
1382 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1386 BITS_TO_LONGS(region_bits
+ 1) * sizeof(chunk
->bound_map
[0]);
1387 chunk
->bound_map
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1388 if (!chunk
->bound_map
)
1389 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1392 alloc_size
= pcpu_chunk_nr_blocks(chunk
) * sizeof(chunk
->md_blocks
[0]);
1393 chunk
->md_blocks
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1394 if (!chunk
->md_blocks
)
1395 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1398 #ifdef CONFIG_MEMCG_KMEM
1399 /* first chunk is free to use */
1400 chunk
->obj_cgroups
= NULL
;
1402 pcpu_init_md_blocks(chunk
);
1404 /* manage populated page bitmap */
1405 chunk
->immutable
= true;
1406 bitmap_fill(chunk
->populated
, chunk
->nr_pages
);
1407 chunk
->nr_populated
= chunk
->nr_pages
;
1408 chunk
->nr_empty_pop_pages
= chunk
->nr_pages
;
1410 chunk
->free_bytes
= map_size
;
1412 if (chunk
->start_offset
) {
1413 /* hide the beginning of the bitmap */
1414 offset_bits
= chunk
->start_offset
/ PCPU_MIN_ALLOC_SIZE
;
1415 bitmap_set(chunk
->alloc_map
, 0, offset_bits
);
1416 set_bit(0, chunk
->bound_map
);
1417 set_bit(offset_bits
, chunk
->bound_map
);
1419 chunk
->chunk_md
.first_free
= offset_bits
;
1421 pcpu_block_update_hint_alloc(chunk
, 0, offset_bits
);
1424 if (chunk
->end_offset
) {
1425 /* hide the end of the bitmap */
1426 offset_bits
= chunk
->end_offset
/ PCPU_MIN_ALLOC_SIZE
;
1427 bitmap_set(chunk
->alloc_map
,
1428 pcpu_chunk_map_bits(chunk
) - offset_bits
,
1430 set_bit((start_offset
+ map_size
) / PCPU_MIN_ALLOC_SIZE
,
1432 set_bit(region_bits
, chunk
->bound_map
);
1434 pcpu_block_update_hint_alloc(chunk
, pcpu_chunk_map_bits(chunk
)
1435 - offset_bits
, offset_bits
);
1441 static struct pcpu_chunk
*pcpu_alloc_chunk(gfp_t gfp
)
1443 struct pcpu_chunk
*chunk
;
1446 chunk
= pcpu_mem_zalloc(pcpu_chunk_struct_size
, gfp
);
1450 INIT_LIST_HEAD(&chunk
->list
);
1451 chunk
->nr_pages
= pcpu_unit_pages
;
1452 region_bits
= pcpu_chunk_map_bits(chunk
);
1454 chunk
->alloc_map
= pcpu_mem_zalloc(BITS_TO_LONGS(region_bits
) *
1455 sizeof(chunk
->alloc_map
[0]), gfp
);
1456 if (!chunk
->alloc_map
)
1457 goto alloc_map_fail
;
1459 chunk
->bound_map
= pcpu_mem_zalloc(BITS_TO_LONGS(region_bits
+ 1) *
1460 sizeof(chunk
->bound_map
[0]), gfp
);
1461 if (!chunk
->bound_map
)
1462 goto bound_map_fail
;
1464 chunk
->md_blocks
= pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk
) *
1465 sizeof(chunk
->md_blocks
[0]), gfp
);
1466 if (!chunk
->md_blocks
)
1467 goto md_blocks_fail
;
1469 #ifdef CONFIG_MEMCG_KMEM
1470 if (!mem_cgroup_kmem_disabled()) {
1471 chunk
->obj_cgroups
=
1472 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk
) *
1473 sizeof(struct obj_cgroup
*), gfp
);
1474 if (!chunk
->obj_cgroups
)
1479 pcpu_init_md_blocks(chunk
);
1482 chunk
->free_bytes
= chunk
->nr_pages
* PAGE_SIZE
;
1486 #ifdef CONFIG_MEMCG_KMEM
1488 pcpu_mem_free(chunk
->md_blocks
);
1491 pcpu_mem_free(chunk
->bound_map
);
1493 pcpu_mem_free(chunk
->alloc_map
);
1495 pcpu_mem_free(chunk
);
1500 static void pcpu_free_chunk(struct pcpu_chunk
*chunk
)
1504 #ifdef CONFIG_MEMCG_KMEM
1505 pcpu_mem_free(chunk
->obj_cgroups
);
1507 pcpu_mem_free(chunk
->md_blocks
);
1508 pcpu_mem_free(chunk
->bound_map
);
1509 pcpu_mem_free(chunk
->alloc_map
);
1510 pcpu_mem_free(chunk
);
1514 * pcpu_chunk_populated - post-population bookkeeping
1515 * @chunk: pcpu_chunk which got populated
1516 * @page_start: the start page
1517 * @page_end: the end page
1519 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1520 * the bookkeeping information accordingly. Must be called after each
1521 * successful population.
1523 static void pcpu_chunk_populated(struct pcpu_chunk
*chunk
, int page_start
,
1526 int nr
= page_end
- page_start
;
1528 lockdep_assert_held(&pcpu_lock
);
1530 bitmap_set(chunk
->populated
, page_start
, nr
);
1531 chunk
->nr_populated
+= nr
;
1532 pcpu_nr_populated
+= nr
;
1534 pcpu_update_empty_pages(chunk
, nr
);
1538 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1539 * @chunk: pcpu_chunk which got depopulated
1540 * @page_start: the start page
1541 * @page_end: the end page
1543 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1544 * Update the bookkeeping information accordingly. Must be called after
1545 * each successful depopulation.
1547 static void pcpu_chunk_depopulated(struct pcpu_chunk
*chunk
,
1548 int page_start
, int page_end
)
1550 int nr
= page_end
- page_start
;
1552 lockdep_assert_held(&pcpu_lock
);
1554 bitmap_clear(chunk
->populated
, page_start
, nr
);
1555 chunk
->nr_populated
-= nr
;
1556 pcpu_nr_populated
-= nr
;
1558 pcpu_update_empty_pages(chunk
, -nr
);
1562 * Chunk management implementation.
1564 * To allow different implementations, chunk alloc/free and
1565 * [de]population are implemented in a separate file which is pulled
1566 * into this file and compiled together. The following functions
1567 * should be implemented.
1569 * pcpu_populate_chunk - populate the specified range of a chunk
1570 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1571 * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
1572 * pcpu_create_chunk - create a new chunk
1573 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1574 * pcpu_addr_to_page - translate address to physical address
1575 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1577 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
,
1578 int page_start
, int page_end
, gfp_t gfp
);
1579 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
,
1580 int page_start
, int page_end
);
1581 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk
*chunk
,
1582 int page_start
, int page_end
);
1583 static struct pcpu_chunk
*pcpu_create_chunk(gfp_t gfp
);
1584 static void pcpu_destroy_chunk(struct pcpu_chunk
*chunk
);
1585 static struct page
*pcpu_addr_to_page(void *addr
);
1586 static int __init
pcpu_verify_alloc_info(const struct pcpu_alloc_info
*ai
);
1588 #ifdef CONFIG_NEED_PER_CPU_KM
1589 #include "percpu-km.c"
1591 #include "percpu-vm.c"
1595 * pcpu_chunk_addr_search - determine chunk containing specified address
1596 * @addr: address for which the chunk needs to be determined.
1598 * This is an internal function that handles all but static allocations.
1599 * Static percpu address values should never be passed into the allocator.
1602 * The address of the found chunk.
1604 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
1606 /* is it in the dynamic region (first chunk)? */
1607 if (pcpu_addr_in_chunk(pcpu_first_chunk
, addr
))
1608 return pcpu_first_chunk
;
1610 /* is it in the reserved region? */
1611 if (pcpu_addr_in_chunk(pcpu_reserved_chunk
, addr
))
1612 return pcpu_reserved_chunk
;
1615 * The address is relative to unit0 which might be unused and
1616 * thus unmapped. Offset the address to the unit space of the
1617 * current processor before looking it up in the vmalloc
1618 * space. Note that any possible cpu id can be used here, so
1619 * there's no need to worry about preemption or cpu hotplug.
1621 addr
+= pcpu_unit_offsets
[raw_smp_processor_id()];
1622 return pcpu_get_page_chunk(pcpu_addr_to_page(addr
));
1625 #ifdef CONFIG_MEMCG_KMEM
1626 static bool pcpu_memcg_pre_alloc_hook(size_t size
, gfp_t gfp
,
1627 struct obj_cgroup
**objcgp
)
1629 struct obj_cgroup
*objcg
;
1631 if (!memcg_kmem_enabled() || !(gfp
& __GFP_ACCOUNT
))
1634 objcg
= get_obj_cgroup_from_current();
1638 if (obj_cgroup_charge(objcg
, gfp
, size
* num_possible_cpus())) {
1639 obj_cgroup_put(objcg
);
1647 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup
*objcg
,
1648 struct pcpu_chunk
*chunk
, int off
,
1654 if (likely(chunk
&& chunk
->obj_cgroups
)) {
1655 chunk
->obj_cgroups
[off
>> PCPU_MIN_ALLOC_SHIFT
] = objcg
;
1658 mod_memcg_state(obj_cgroup_memcg(objcg
), MEMCG_PERCPU_B
,
1659 size
* num_possible_cpus());
1662 obj_cgroup_uncharge(objcg
, size
* num_possible_cpus());
1663 obj_cgroup_put(objcg
);
1667 static void pcpu_memcg_free_hook(struct pcpu_chunk
*chunk
, int off
, size_t size
)
1669 struct obj_cgroup
*objcg
;
1671 if (unlikely(!chunk
->obj_cgroups
))
1674 objcg
= chunk
->obj_cgroups
[off
>> PCPU_MIN_ALLOC_SHIFT
];
1677 chunk
->obj_cgroups
[off
>> PCPU_MIN_ALLOC_SHIFT
] = NULL
;
1679 obj_cgroup_uncharge(objcg
, size
* num_possible_cpus());
1682 mod_memcg_state(obj_cgroup_memcg(objcg
), MEMCG_PERCPU_B
,
1683 -(size
* num_possible_cpus()));
1686 obj_cgroup_put(objcg
);
1689 #else /* CONFIG_MEMCG_KMEM */
1691 pcpu_memcg_pre_alloc_hook(size_t size
, gfp_t gfp
, struct obj_cgroup
**objcgp
)
1696 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup
*objcg
,
1697 struct pcpu_chunk
*chunk
, int off
,
1702 static void pcpu_memcg_free_hook(struct pcpu_chunk
*chunk
, int off
, size_t size
)
1705 #endif /* CONFIG_MEMCG_KMEM */
1708 * pcpu_alloc - the percpu allocator
1709 * @size: size of area to allocate in bytes
1710 * @align: alignment of area (max PAGE_SIZE)
1711 * @reserved: allocate from the reserved chunk if available
1712 * @gfp: allocation flags
1714 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1715 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1716 * then no warning will be triggered on invalid or failed allocation
1720 * Percpu pointer to the allocated area on success, NULL on failure.
1722 static void __percpu
*pcpu_alloc(size_t size
, size_t align
, bool reserved
,
1728 struct obj_cgroup
*objcg
= NULL
;
1729 static int warn_limit
= 10;
1730 struct pcpu_chunk
*chunk
, *next
;
1732 int slot
, off
, cpu
, ret
;
1733 unsigned long flags
;
1735 size_t bits
, bit_align
;
1737 gfp
= current_gfp_context(gfp
);
1738 /* whitelisted flags that can be passed to the backing allocators */
1739 pcpu_gfp
= gfp
& (GFP_KERNEL
| __GFP_NORETRY
| __GFP_NOWARN
);
1740 is_atomic
= (gfp
& GFP_KERNEL
) != GFP_KERNEL
;
1741 do_warn
= !(gfp
& __GFP_NOWARN
);
1744 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1745 * therefore alignment must be a minimum of that many bytes.
1746 * An allocation may have internal fragmentation from rounding up
1747 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1749 if (unlikely(align
< PCPU_MIN_ALLOC_SIZE
))
1750 align
= PCPU_MIN_ALLOC_SIZE
;
1752 size
= ALIGN(size
, PCPU_MIN_ALLOC_SIZE
);
1753 bits
= size
>> PCPU_MIN_ALLOC_SHIFT
;
1754 bit_align
= align
>> PCPU_MIN_ALLOC_SHIFT
;
1756 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
||
1757 !is_power_of_2(align
))) {
1758 WARN(do_warn
, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1763 if (unlikely(!pcpu_memcg_pre_alloc_hook(size
, gfp
, &objcg
)))
1768 * pcpu_balance_workfn() allocates memory under this mutex,
1769 * and it may wait for memory reclaim. Allow current task
1770 * to become OOM victim, in case of memory pressure.
1772 if (gfp
& __GFP_NOFAIL
) {
1773 mutex_lock(&pcpu_alloc_mutex
);
1774 } else if (mutex_lock_killable(&pcpu_alloc_mutex
)) {
1775 pcpu_memcg_post_alloc_hook(objcg
, NULL
, 0, size
);
1780 spin_lock_irqsave(&pcpu_lock
, flags
);
1782 /* serve reserved allocations from the reserved chunk if available */
1783 if (reserved
&& pcpu_reserved_chunk
) {
1784 chunk
= pcpu_reserved_chunk
;
1786 off
= pcpu_find_block_fit(chunk
, bits
, bit_align
, is_atomic
);
1788 err
= "alloc from reserved chunk failed";
1792 off
= pcpu_alloc_area(chunk
, bits
, bit_align
, off
);
1796 err
= "alloc from reserved chunk failed";
1801 /* search through normal chunks */
1802 for (slot
= pcpu_size_to_slot(size
); slot
<= pcpu_free_slot
; slot
++) {
1803 list_for_each_entry_safe(chunk
, next
, &pcpu_chunk_lists
[slot
],
1805 off
= pcpu_find_block_fit(chunk
, bits
, bit_align
,
1808 if (slot
< PCPU_SLOT_FAIL_THRESHOLD
)
1809 pcpu_chunk_move(chunk
, 0);
1813 off
= pcpu_alloc_area(chunk
, bits
, bit_align
, off
);
1815 pcpu_reintegrate_chunk(chunk
);
1821 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1824 * No space left. Create a new chunk. We don't want multiple
1825 * tasks to create chunks simultaneously. Serialize and create iff
1826 * there's still no empty chunk after grabbing the mutex.
1829 err
= "atomic alloc failed, no space left";
1833 if (list_empty(&pcpu_chunk_lists
[pcpu_free_slot
])) {
1834 chunk
= pcpu_create_chunk(pcpu_gfp
);
1836 err
= "failed to allocate new chunk";
1840 spin_lock_irqsave(&pcpu_lock
, flags
);
1841 pcpu_chunk_relocate(chunk
, -1);
1843 spin_lock_irqsave(&pcpu_lock
, flags
);
1849 pcpu_stats_area_alloc(chunk
, size
);
1850 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1852 /* populate if not all pages are already there */
1854 unsigned int page_start
, page_end
, rs
, re
;
1856 page_start
= PFN_DOWN(off
);
1857 page_end
= PFN_UP(off
+ size
);
1859 bitmap_for_each_clear_region(chunk
->populated
, rs
, re
,
1860 page_start
, page_end
) {
1861 WARN_ON(chunk
->immutable
);
1863 ret
= pcpu_populate_chunk(chunk
, rs
, re
, pcpu_gfp
);
1865 spin_lock_irqsave(&pcpu_lock
, flags
);
1867 pcpu_free_area(chunk
, off
);
1868 err
= "failed to populate";
1871 pcpu_chunk_populated(chunk
, rs
, re
);
1872 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1875 mutex_unlock(&pcpu_alloc_mutex
);
1878 if (pcpu_nr_empty_pop_pages
< PCPU_EMPTY_POP_PAGES_LOW
)
1879 pcpu_schedule_balance_work();
1881 /* clear the areas and return address relative to base address */
1882 for_each_possible_cpu(cpu
)
1883 memset((void *)pcpu_chunk_addr(chunk
, cpu
, 0) + off
, 0, size
);
1885 ptr
= __addr_to_pcpu_ptr(chunk
->base_addr
+ off
);
1886 kmemleak_alloc_percpu(ptr
, size
, gfp
);
1888 trace_percpu_alloc_percpu(reserved
, is_atomic
, size
, align
,
1889 chunk
->base_addr
, off
, ptr
);
1891 pcpu_memcg_post_alloc_hook(objcg
, chunk
, off
, size
);
1896 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1898 trace_percpu_alloc_percpu_fail(reserved
, is_atomic
, size
, align
);
1900 if (!is_atomic
&& do_warn
&& warn_limit
) {
1901 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1902 size
, align
, is_atomic
, err
);
1905 pr_info("limit reached, disable warning\n");
1908 /* see the flag handling in pcpu_balance_workfn() */
1909 pcpu_atomic_alloc_failed
= true;
1910 pcpu_schedule_balance_work();
1912 mutex_unlock(&pcpu_alloc_mutex
);
1915 pcpu_memcg_post_alloc_hook(objcg
, NULL
, 0, size
);
1921 * __alloc_percpu_gfp - allocate dynamic percpu area
1922 * @size: size of area to allocate in bytes
1923 * @align: alignment of area (max PAGE_SIZE)
1924 * @gfp: allocation flags
1926 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1927 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1928 * be called from any context but is a lot more likely to fail. If @gfp
1929 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1930 * allocation requests.
1933 * Percpu pointer to the allocated area on success, NULL on failure.
1935 void __percpu
*__alloc_percpu_gfp(size_t size
, size_t align
, gfp_t gfp
)
1937 return pcpu_alloc(size
, align
, false, gfp
);
1939 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp
);
1942 * __alloc_percpu - allocate dynamic percpu area
1943 * @size: size of area to allocate in bytes
1944 * @align: alignment of area (max PAGE_SIZE)
1946 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1948 void __percpu
*__alloc_percpu(size_t size
, size_t align
)
1950 return pcpu_alloc(size
, align
, false, GFP_KERNEL
);
1952 EXPORT_SYMBOL_GPL(__alloc_percpu
);
1955 * __alloc_reserved_percpu - allocate reserved percpu area
1956 * @size: size of area to allocate in bytes
1957 * @align: alignment of area (max PAGE_SIZE)
1959 * Allocate zero-filled percpu area of @size bytes aligned at @align
1960 * from reserved percpu area if arch has set it up; otherwise,
1961 * allocation is served from the same dynamic area. Might sleep.
1962 * Might trigger writeouts.
1965 * Does GFP_KERNEL allocation.
1968 * Percpu pointer to the allocated area on success, NULL on failure.
1970 void __percpu
*__alloc_reserved_percpu(size_t size
, size_t align
)
1972 return pcpu_alloc(size
, align
, true, GFP_KERNEL
);
1976 * pcpu_balance_free - manage the amount of free chunks
1977 * @empty_only: free chunks only if there are no populated pages
1979 * If empty_only is %false, reclaim all fully free chunks regardless of the
1980 * number of populated pages. Otherwise, only reclaim chunks that have no
1984 * pcpu_lock (can be dropped temporarily)
1986 static void pcpu_balance_free(bool empty_only
)
1989 struct list_head
*free_head
= &pcpu_chunk_lists
[pcpu_free_slot
];
1990 struct pcpu_chunk
*chunk
, *next
;
1992 lockdep_assert_held(&pcpu_lock
);
1995 * There's no reason to keep around multiple unused chunks and VM
1996 * areas can be scarce. Destroy all free chunks except for one.
1998 list_for_each_entry_safe(chunk
, next
, free_head
, list
) {
1999 WARN_ON(chunk
->immutable
);
2001 /* spare the first one */
2002 if (chunk
== list_first_entry(free_head
, struct pcpu_chunk
, list
))
2005 if (!empty_only
|| chunk
->nr_empty_pop_pages
== 0)
2006 list_move(&chunk
->list
, &to_free
);
2009 if (list_empty(&to_free
))
2012 spin_unlock_irq(&pcpu_lock
);
2013 list_for_each_entry_safe(chunk
, next
, &to_free
, list
) {
2014 unsigned int rs
, re
;
2016 bitmap_for_each_set_region(chunk
->populated
, rs
, re
, 0,
2018 pcpu_depopulate_chunk(chunk
, rs
, re
);
2019 spin_lock_irq(&pcpu_lock
);
2020 pcpu_chunk_depopulated(chunk
, rs
, re
);
2021 spin_unlock_irq(&pcpu_lock
);
2023 pcpu_destroy_chunk(chunk
);
2026 spin_lock_irq(&pcpu_lock
);
2030 * pcpu_balance_populated - manage the amount of populated pages
2032 * Maintain a certain amount of populated pages to satisfy atomic allocations.
2033 * It is possible that this is called when physical memory is scarce causing
2034 * OOM killer to be triggered. We should avoid doing so until an actual
2035 * allocation causes the failure as it is possible that requests can be
2036 * serviced from already backed regions.
2039 * pcpu_lock (can be dropped temporarily)
2041 static void pcpu_balance_populated(void)
2043 /* gfp flags passed to underlying allocators */
2044 const gfp_t gfp
= GFP_KERNEL
| __GFP_NORETRY
| __GFP_NOWARN
;
2045 struct pcpu_chunk
*chunk
;
2046 int slot
, nr_to_pop
, ret
;
2048 lockdep_assert_held(&pcpu_lock
);
2051 * Ensure there are certain number of free populated pages for
2052 * atomic allocs. Fill up from the most packed so that atomic
2053 * allocs don't increase fragmentation. If atomic allocation
2054 * failed previously, always populate the maximum amount. This
2055 * should prevent atomic allocs larger than PAGE_SIZE from keeping
2056 * failing indefinitely; however, large atomic allocs are not
2057 * something we support properly and can be highly unreliable and
2061 if (pcpu_atomic_alloc_failed
) {
2062 nr_to_pop
= PCPU_EMPTY_POP_PAGES_HIGH
;
2063 /* best effort anyway, don't worry about synchronization */
2064 pcpu_atomic_alloc_failed
= false;
2066 nr_to_pop
= clamp(PCPU_EMPTY_POP_PAGES_HIGH
-
2067 pcpu_nr_empty_pop_pages
,
2068 0, PCPU_EMPTY_POP_PAGES_HIGH
);
2071 for (slot
= pcpu_size_to_slot(PAGE_SIZE
); slot
<= pcpu_free_slot
; slot
++) {
2072 unsigned int nr_unpop
= 0, rs
, re
;
2077 list_for_each_entry(chunk
, &pcpu_chunk_lists
[slot
], list
) {
2078 nr_unpop
= chunk
->nr_pages
- chunk
->nr_populated
;
2086 /* @chunk can't go away while pcpu_alloc_mutex is held */
2087 bitmap_for_each_clear_region(chunk
->populated
, rs
, re
, 0,
2089 int nr
= min_t(int, re
- rs
, nr_to_pop
);
2091 spin_unlock_irq(&pcpu_lock
);
2092 ret
= pcpu_populate_chunk(chunk
, rs
, rs
+ nr
, gfp
);
2094 spin_lock_irq(&pcpu_lock
);
2097 pcpu_chunk_populated(chunk
, rs
, rs
+ nr
);
2108 /* ran out of chunks to populate, create a new one and retry */
2109 spin_unlock_irq(&pcpu_lock
);
2110 chunk
= pcpu_create_chunk(gfp
);
2112 spin_lock_irq(&pcpu_lock
);
2114 pcpu_chunk_relocate(chunk
, -1);
2121 * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
2123 * Scan over chunks in the depopulate list and try to release unused populated
2124 * pages back to the system. Depopulated chunks are sidelined to prevent
2125 * repopulating these pages unless required. Fully free chunks are reintegrated
2126 * and freed accordingly (1 is kept around). If we drop below the empty
2127 * populated pages threshold, reintegrate the chunk if it has empty free pages.
2128 * Each chunk is scanned in the reverse order to keep populated pages close to
2129 * the beginning of the chunk.
2132 * pcpu_lock (can be dropped temporarily)
2135 static void pcpu_reclaim_populated(void)
2137 struct pcpu_chunk
*chunk
;
2138 struct pcpu_block_md
*block
;
2139 int freed_page_start
, freed_page_end
;
2143 lockdep_assert_held(&pcpu_lock
);
2146 * Once a chunk is isolated to the to_depopulate list, the chunk is no
2147 * longer discoverable to allocations whom may populate pages. The only
2148 * other accessor is the free path which only returns area back to the
2149 * allocator not touching the populated bitmap.
2151 while (!list_empty(&pcpu_chunk_lists
[pcpu_to_depopulate_slot
])) {
2152 chunk
= list_first_entry(&pcpu_chunk_lists
[pcpu_to_depopulate_slot
],
2153 struct pcpu_chunk
, list
);
2154 WARN_ON(chunk
->immutable
);
2157 * Scan chunk's pages in the reverse order to keep populated
2158 * pages close to the beginning of the chunk.
2160 freed_page_start
= chunk
->nr_pages
;
2162 reintegrate
= false;
2163 for (i
= chunk
->nr_pages
- 1, end
= -1; i
>= 0; i
--) {
2164 /* no more work to do */
2165 if (chunk
->nr_empty_pop_pages
== 0)
2168 /* reintegrate chunk to prevent atomic alloc failures */
2169 if (pcpu_nr_empty_pop_pages
< PCPU_EMPTY_POP_PAGES_HIGH
) {
2175 * If the page is empty and populated, start or
2176 * extend the (i, end) range. If i == 0, decrease
2177 * i and perform the depopulation to cover the last
2178 * (first) page in the chunk.
2180 block
= chunk
->md_blocks
+ i
;
2181 if (block
->contig_hint
== PCPU_BITMAP_BLOCK_BITS
&&
2182 test_bit(i
, chunk
->populated
)) {
2190 /* depopulate if there is an active range */
2194 spin_unlock_irq(&pcpu_lock
);
2195 pcpu_depopulate_chunk(chunk
, i
+ 1, end
+ 1);
2197 spin_lock_irq(&pcpu_lock
);
2199 pcpu_chunk_depopulated(chunk
, i
+ 1, end
+ 1);
2200 freed_page_start
= min(freed_page_start
, i
+ 1);
2201 freed_page_end
= max(freed_page_end
, end
+ 1);
2203 /* reset the range and continue */
2208 /* batch tlb flush per chunk to amortize cost */
2209 if (freed_page_start
< freed_page_end
) {
2210 spin_unlock_irq(&pcpu_lock
);
2211 pcpu_post_unmap_tlb_flush(chunk
,
2215 spin_lock_irq(&pcpu_lock
);
2218 if (reintegrate
|| chunk
->free_bytes
== pcpu_unit_size
)
2219 pcpu_reintegrate_chunk(chunk
);
2221 list_move_tail(&chunk
->list
,
2222 &pcpu_chunk_lists
[pcpu_sidelined_slot
]);
2227 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2230 * For each chunk type, manage the number of fully free chunks and the number of
2231 * populated pages. An important thing to consider is when pages are freed and
2232 * how they contribute to the global counts.
2234 static void pcpu_balance_workfn(struct work_struct
*work
)
2237 * pcpu_balance_free() is called twice because the first time we may
2238 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2239 * to grow other chunks. This then gives pcpu_reclaim_populated() time
2240 * to move fully free chunks to the active list to be freed if
2243 mutex_lock(&pcpu_alloc_mutex
);
2244 spin_lock_irq(&pcpu_lock
);
2246 pcpu_balance_free(false);
2247 pcpu_reclaim_populated();
2248 pcpu_balance_populated();
2249 pcpu_balance_free(true);
2251 spin_unlock_irq(&pcpu_lock
);
2252 mutex_unlock(&pcpu_alloc_mutex
);
2256 * free_percpu - free percpu area
2257 * @ptr: pointer to area to free
2259 * Free percpu area @ptr.
2262 * Can be called from atomic context.
2264 void free_percpu(void __percpu
*ptr
)
2267 struct pcpu_chunk
*chunk
;
2268 unsigned long flags
;
2270 bool need_balance
= false;
2275 kmemleak_free_percpu(ptr
);
2277 addr
= __pcpu_ptr_to_addr(ptr
);
2279 spin_lock_irqsave(&pcpu_lock
, flags
);
2281 chunk
= pcpu_chunk_addr_search(addr
);
2282 off
= addr
- chunk
->base_addr
;
2284 size
= pcpu_free_area(chunk
, off
);
2286 pcpu_memcg_free_hook(chunk
, off
, size
);
2289 * If there are more than one fully free chunks, wake up grim reaper.
2290 * If the chunk is isolated, it may be in the process of being
2291 * reclaimed. Let reclaim manage cleaning up of that chunk.
2293 if (!chunk
->isolated
&& chunk
->free_bytes
== pcpu_unit_size
) {
2294 struct pcpu_chunk
*pos
;
2296 list_for_each_entry(pos
, &pcpu_chunk_lists
[pcpu_free_slot
], list
)
2298 need_balance
= true;
2301 } else if (pcpu_should_reclaim_chunk(chunk
)) {
2302 pcpu_isolate_chunk(chunk
);
2303 need_balance
= true;
2306 trace_percpu_free_percpu(chunk
->base_addr
, off
, ptr
);
2308 spin_unlock_irqrestore(&pcpu_lock
, flags
);
2311 pcpu_schedule_balance_work();
2313 EXPORT_SYMBOL_GPL(free_percpu
);
2315 bool __is_kernel_percpu_address(unsigned long addr
, unsigned long *can_addr
)
2318 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
2319 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
2322 for_each_possible_cpu(cpu
) {
2323 void *start
= per_cpu_ptr(base
, cpu
);
2324 void *va
= (void *)addr
;
2326 if (va
>= start
&& va
< start
+ static_size
) {
2328 *can_addr
= (unsigned long) (va
- start
);
2329 *can_addr
+= (unsigned long)
2330 per_cpu_ptr(base
, get_boot_cpu_id());
2336 /* on UP, can't distinguish from other static vars, always false */
2341 * is_kernel_percpu_address - test whether address is from static percpu area
2342 * @addr: address to test
2344 * Test whether @addr belongs to in-kernel static percpu area. Module
2345 * static percpu areas are not considered. For those, use
2346 * is_module_percpu_address().
2349 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2351 bool is_kernel_percpu_address(unsigned long addr
)
2353 return __is_kernel_percpu_address(addr
, NULL
);
2357 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2358 * @addr: the address to be converted to physical address
2360 * Given @addr which is dereferenceable address obtained via one of
2361 * percpu access macros, this function translates it into its physical
2362 * address. The caller is responsible for ensuring @addr stays valid
2363 * until this function finishes.
2365 * percpu allocator has special setup for the first chunk, which currently
2366 * supports either embedding in linear address space or vmalloc mapping,
2367 * and, from the second one, the backing allocator (currently either vm or
2368 * km) provides translation.
2370 * The addr can be translated simply without checking if it falls into the
2371 * first chunk. But the current code reflects better how percpu allocator
2372 * actually works, and the verification can discover both bugs in percpu
2373 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2377 * The physical address for @addr.
2379 phys_addr_t
per_cpu_ptr_to_phys(void *addr
)
2381 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
2382 bool in_first_chunk
= false;
2383 unsigned long first_low
, first_high
;
2387 * The following test on unit_low/high isn't strictly
2388 * necessary but will speed up lookups of addresses which
2389 * aren't in the first chunk.
2391 * The address check is against full chunk sizes. pcpu_base_addr
2392 * points to the beginning of the first chunk including the
2393 * static region. Assumes good intent as the first chunk may
2394 * not be full (ie. < pcpu_unit_pages in size).
2396 first_low
= (unsigned long)pcpu_base_addr
+
2397 pcpu_unit_page_offset(pcpu_low_unit_cpu
, 0);
2398 first_high
= (unsigned long)pcpu_base_addr
+
2399 pcpu_unit_page_offset(pcpu_high_unit_cpu
, pcpu_unit_pages
);
2400 if ((unsigned long)addr
>= first_low
&&
2401 (unsigned long)addr
< first_high
) {
2402 for_each_possible_cpu(cpu
) {
2403 void *start
= per_cpu_ptr(base
, cpu
);
2405 if (addr
>= start
&& addr
< start
+ pcpu_unit_size
) {
2406 in_first_chunk
= true;
2412 if (in_first_chunk
) {
2413 if (!is_vmalloc_addr(addr
))
2416 return page_to_phys(vmalloc_to_page(addr
)) +
2417 offset_in_page(addr
);
2419 return page_to_phys(pcpu_addr_to_page(addr
)) +
2420 offset_in_page(addr
);
2424 * pcpu_alloc_alloc_info - allocate percpu allocation info
2425 * @nr_groups: the number of groups
2426 * @nr_units: the number of units
2428 * Allocate ai which is large enough for @nr_groups groups containing
2429 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2430 * cpu_map array which is long enough for @nr_units and filled with
2431 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2432 * pointer of other groups.
2435 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2438 struct pcpu_alloc_info
* __init
pcpu_alloc_alloc_info(int nr_groups
,
2441 struct pcpu_alloc_info
*ai
;
2442 size_t base_size
, ai_size
;
2446 base_size
= ALIGN(struct_size(ai
, groups
, nr_groups
),
2447 __alignof__(ai
->groups
[0].cpu_map
[0]));
2448 ai_size
= base_size
+ nr_units
* sizeof(ai
->groups
[0].cpu_map
[0]);
2450 ptr
= memblock_alloc(PFN_ALIGN(ai_size
), PAGE_SIZE
);
2456 ai
->groups
[0].cpu_map
= ptr
;
2458 for (unit
= 0; unit
< nr_units
; unit
++)
2459 ai
->groups
[0].cpu_map
[unit
] = NR_CPUS
;
2461 ai
->nr_groups
= nr_groups
;
2462 ai
->__ai_size
= PFN_ALIGN(ai_size
);
2468 * pcpu_free_alloc_info - free percpu allocation info
2469 * @ai: pcpu_alloc_info to free
2471 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2473 void __init
pcpu_free_alloc_info(struct pcpu_alloc_info
*ai
)
2475 memblock_free_early(__pa(ai
), ai
->__ai_size
);
2479 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2481 * @ai: allocation info to dump
2483 * Print out information about @ai using loglevel @lvl.
2485 static void pcpu_dump_alloc_info(const char *lvl
,
2486 const struct pcpu_alloc_info
*ai
)
2488 int group_width
= 1, cpu_width
= 1, width
;
2489 char empty_str
[] = "--------";
2490 int alloc
= 0, alloc_end
= 0;
2492 int upa
, apl
; /* units per alloc, allocs per line */
2498 v
= num_possible_cpus();
2501 empty_str
[min_t(int, cpu_width
, sizeof(empty_str
) - 1)] = '\0';
2503 upa
= ai
->alloc_size
/ ai
->unit_size
;
2504 width
= upa
* (cpu_width
+ 1) + group_width
+ 3;
2505 apl
= rounddown_pow_of_two(max(60 / width
, 1));
2507 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2508 lvl
, ai
->static_size
, ai
->reserved_size
, ai
->dyn_size
,
2509 ai
->unit_size
, ai
->alloc_size
/ ai
->atom_size
, ai
->atom_size
);
2511 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2512 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2513 int unit
= 0, unit_end
= 0;
2515 BUG_ON(gi
->nr_units
% upa
);
2516 for (alloc_end
+= gi
->nr_units
/ upa
;
2517 alloc
< alloc_end
; alloc
++) {
2518 if (!(alloc
% apl
)) {
2520 printk("%spcpu-alloc: ", lvl
);
2522 pr_cont("[%0*d] ", group_width
, group
);
2524 for (unit_end
+= upa
; unit
< unit_end
; unit
++)
2525 if (gi
->cpu_map
[unit
] != NR_CPUS
)
2527 cpu_width
, gi
->cpu_map
[unit
]);
2529 pr_cont("%s ", empty_str
);
2536 * pcpu_setup_first_chunk - initialize the first percpu chunk
2537 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2538 * @base_addr: mapped address
2540 * Initialize the first percpu chunk which contains the kernel static
2541 * percpu area. This function is to be called from arch percpu area
2544 * @ai contains all information necessary to initialize the first
2545 * chunk and prime the dynamic percpu allocator.
2547 * @ai->static_size is the size of static percpu area.
2549 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2550 * reserve after the static area in the first chunk. This reserves
2551 * the first chunk such that it's available only through reserved
2552 * percpu allocation. This is primarily used to serve module percpu
2553 * static areas on architectures where the addressing model has
2554 * limited offset range for symbol relocations to guarantee module
2555 * percpu symbols fall inside the relocatable range.
2557 * @ai->dyn_size determines the number of bytes available for dynamic
2558 * allocation in the first chunk. The area between @ai->static_size +
2559 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2561 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2562 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2565 * @ai->atom_size is the allocation atom size and used as alignment
2568 * @ai->alloc_size is the allocation size and always multiple of
2569 * @ai->atom_size. This is larger than @ai->atom_size if
2570 * @ai->unit_size is larger than @ai->atom_size.
2572 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2573 * percpu areas. Units which should be colocated are put into the
2574 * same group. Dynamic VM areas will be allocated according to these
2575 * groupings. If @ai->nr_groups is zero, a single group containing
2576 * all units is assumed.
2578 * The caller should have mapped the first chunk at @base_addr and
2579 * copied static data to each unit.
2581 * The first chunk will always contain a static and a dynamic region.
2582 * However, the static region is not managed by any chunk. If the first
2583 * chunk also contains a reserved region, it is served by two chunks -
2584 * one for the reserved region and one for the dynamic region. They
2585 * share the same vm, but use offset regions in the area allocation map.
2586 * The chunk serving the dynamic region is circulated in the chunk slots
2587 * and available for dynamic allocation like any other chunk.
2589 void __init
pcpu_setup_first_chunk(const struct pcpu_alloc_info
*ai
,
2592 size_t size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
2593 size_t static_size
, dyn_size
;
2594 struct pcpu_chunk
*chunk
;
2595 unsigned long *group_offsets
;
2596 size_t *group_sizes
;
2597 unsigned long *unit_off
;
2602 unsigned long tmp_addr
;
2605 #define PCPU_SETUP_BUG_ON(cond) do { \
2606 if (unlikely(cond)) { \
2607 pr_emerg("failed to initialize, %s\n", #cond); \
2608 pr_emerg("cpu_possible_mask=%*pb\n", \
2609 cpumask_pr_args(cpu_possible_mask)); \
2610 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2616 PCPU_SETUP_BUG_ON(ai
->nr_groups
<= 0);
2618 PCPU_SETUP_BUG_ON(!ai
->static_size
);
2619 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start
));
2621 PCPU_SETUP_BUG_ON(!base_addr
);
2622 PCPU_SETUP_BUG_ON(offset_in_page(base_addr
));
2623 PCPU_SETUP_BUG_ON(ai
->unit_size
< size_sum
);
2624 PCPU_SETUP_BUG_ON(offset_in_page(ai
->unit_size
));
2625 PCPU_SETUP_BUG_ON(ai
->unit_size
< PCPU_MIN_UNIT_SIZE
);
2626 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai
->unit_size
, PCPU_BITMAP_BLOCK_SIZE
));
2627 PCPU_SETUP_BUG_ON(ai
->dyn_size
< PERCPU_DYNAMIC_EARLY_SIZE
);
2628 PCPU_SETUP_BUG_ON(!ai
->dyn_size
);
2629 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai
->reserved_size
, PCPU_MIN_ALLOC_SIZE
));
2630 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE
, PAGE_SIZE
) ||
2631 IS_ALIGNED(PAGE_SIZE
, PCPU_BITMAP_BLOCK_SIZE
)));
2632 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai
) < 0);
2634 /* process group information and build config tables accordingly */
2635 alloc_size
= ai
->nr_groups
* sizeof(group_offsets
[0]);
2636 group_offsets
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2638 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2641 alloc_size
= ai
->nr_groups
* sizeof(group_sizes
[0]);
2642 group_sizes
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2644 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2647 alloc_size
= nr_cpu_ids
* sizeof(unit_map
[0]);
2648 unit_map
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2650 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2653 alloc_size
= nr_cpu_ids
* sizeof(unit_off
[0]);
2654 unit_off
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2656 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2659 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
2660 unit_map
[cpu
] = UINT_MAX
;
2662 pcpu_low_unit_cpu
= NR_CPUS
;
2663 pcpu_high_unit_cpu
= NR_CPUS
;
2665 for (group
= 0, unit
= 0; group
< ai
->nr_groups
; group
++, unit
+= i
) {
2666 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2668 group_offsets
[group
] = gi
->base_offset
;
2669 group_sizes
[group
] = gi
->nr_units
* ai
->unit_size
;
2671 for (i
= 0; i
< gi
->nr_units
; i
++) {
2672 cpu
= gi
->cpu_map
[i
];
2676 PCPU_SETUP_BUG_ON(cpu
>= nr_cpu_ids
);
2677 PCPU_SETUP_BUG_ON(!cpu_possible(cpu
));
2678 PCPU_SETUP_BUG_ON(unit_map
[cpu
] != UINT_MAX
);
2680 unit_map
[cpu
] = unit
+ i
;
2681 unit_off
[cpu
] = gi
->base_offset
+ i
* ai
->unit_size
;
2683 /* determine low/high unit_cpu */
2684 if (pcpu_low_unit_cpu
== NR_CPUS
||
2685 unit_off
[cpu
] < unit_off
[pcpu_low_unit_cpu
])
2686 pcpu_low_unit_cpu
= cpu
;
2687 if (pcpu_high_unit_cpu
== NR_CPUS
||
2688 unit_off
[cpu
] > unit_off
[pcpu_high_unit_cpu
])
2689 pcpu_high_unit_cpu
= cpu
;
2692 pcpu_nr_units
= unit
;
2694 for_each_possible_cpu(cpu
)
2695 PCPU_SETUP_BUG_ON(unit_map
[cpu
] == UINT_MAX
);
2697 /* we're done parsing the input, undefine BUG macro and dump config */
2698 #undef PCPU_SETUP_BUG_ON
2699 pcpu_dump_alloc_info(KERN_DEBUG
, ai
);
2701 pcpu_nr_groups
= ai
->nr_groups
;
2702 pcpu_group_offsets
= group_offsets
;
2703 pcpu_group_sizes
= group_sizes
;
2704 pcpu_unit_map
= unit_map
;
2705 pcpu_unit_offsets
= unit_off
;
2707 /* determine basic parameters */
2708 pcpu_unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
2709 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
2710 pcpu_atom_size
= ai
->atom_size
;
2711 pcpu_chunk_struct_size
= struct_size(chunk
, populated
,
2712 BITS_TO_LONGS(pcpu_unit_pages
));
2714 pcpu_stats_save_ai(ai
);
2717 * Allocate chunk slots. The slots after the active slots are:
2718 * sidelined_slot - isolated, depopulated chunks
2719 * free_slot - fully free chunks
2720 * to_depopulate_slot - isolated, chunks to depopulate
2722 pcpu_sidelined_slot
= __pcpu_size_to_slot(pcpu_unit_size
) + 1;
2723 pcpu_free_slot
= pcpu_sidelined_slot
+ 1;
2724 pcpu_to_depopulate_slot
= pcpu_free_slot
+ 1;
2725 pcpu_nr_slots
= pcpu_to_depopulate_slot
+ 1;
2726 pcpu_chunk_lists
= memblock_alloc(pcpu_nr_slots
*
2727 sizeof(pcpu_chunk_lists
[0]),
2729 if (!pcpu_chunk_lists
)
2730 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2731 pcpu_nr_slots
* sizeof(pcpu_chunk_lists
[0]));
2733 for (i
= 0; i
< pcpu_nr_slots
; i
++)
2734 INIT_LIST_HEAD(&pcpu_chunk_lists
[i
]);
2737 * The end of the static region needs to be aligned with the
2738 * minimum allocation size as this offsets the reserved and
2739 * dynamic region. The first chunk ends page aligned by
2740 * expanding the dynamic region, therefore the dynamic region
2741 * can be shrunk to compensate while still staying above the
2744 static_size
= ALIGN(ai
->static_size
, PCPU_MIN_ALLOC_SIZE
);
2745 dyn_size
= ai
->dyn_size
- (static_size
- ai
->static_size
);
2748 * Initialize first chunk.
2749 * If the reserved_size is non-zero, this initializes the reserved
2750 * chunk. If the reserved_size is zero, the reserved chunk is NULL
2751 * and the dynamic region is initialized here. The first chunk,
2752 * pcpu_first_chunk, will always point to the chunk that serves
2753 * the dynamic region.
2755 tmp_addr
= (unsigned long)base_addr
+ static_size
;
2756 map_size
= ai
->reserved_size
?: dyn_size
;
2757 chunk
= pcpu_alloc_first_chunk(tmp_addr
, map_size
);
2759 /* init dynamic chunk if necessary */
2760 if (ai
->reserved_size
) {
2761 pcpu_reserved_chunk
= chunk
;
2763 tmp_addr
= (unsigned long)base_addr
+ static_size
+
2765 map_size
= dyn_size
;
2766 chunk
= pcpu_alloc_first_chunk(tmp_addr
, map_size
);
2769 /* link the first chunk in */
2770 pcpu_first_chunk
= chunk
;
2771 pcpu_nr_empty_pop_pages
= pcpu_first_chunk
->nr_empty_pop_pages
;
2772 pcpu_chunk_relocate(pcpu_first_chunk
, -1);
2774 /* include all regions of the first chunk */
2775 pcpu_nr_populated
+= PFN_DOWN(size_sum
);
2777 pcpu_stats_chunk_alloc();
2778 trace_percpu_create_chunk(base_addr
);
2781 pcpu_base_addr
= base_addr
;
2786 const char * const pcpu_fc_names
[PCPU_FC_NR
] __initconst
= {
2787 [PCPU_FC_AUTO
] = "auto",
2788 [PCPU_FC_EMBED
] = "embed",
2789 [PCPU_FC_PAGE
] = "page",
2792 enum pcpu_fc pcpu_chosen_fc __initdata
= PCPU_FC_AUTO
;
2794 static int __init
percpu_alloc_setup(char *str
)
2801 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2802 else if (!strcmp(str
, "embed"))
2803 pcpu_chosen_fc
= PCPU_FC_EMBED
;
2805 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2806 else if (!strcmp(str
, "page"))
2807 pcpu_chosen_fc
= PCPU_FC_PAGE
;
2810 pr_warn("unknown allocator %s specified\n", str
);
2814 early_param("percpu_alloc", percpu_alloc_setup
);
2817 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2818 * Build it if needed by the arch config or the generic setup is going
2821 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2822 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2823 #define BUILD_EMBED_FIRST_CHUNK
2826 /* build pcpu_page_first_chunk() iff needed by the arch config */
2827 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2828 #define BUILD_PAGE_FIRST_CHUNK
2831 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2832 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2834 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2835 * @reserved_size: the size of reserved percpu area in bytes
2836 * @dyn_size: minimum free size for dynamic allocation in bytes
2837 * @atom_size: allocation atom size
2838 * @cpu_distance_fn: callback to determine distance between cpus, optional
2840 * This function determines grouping of units, their mappings to cpus
2841 * and other parameters considering needed percpu size, allocation
2842 * atom size and distances between CPUs.
2844 * Groups are always multiples of atom size and CPUs which are of
2845 * LOCAL_DISTANCE both ways are grouped together and share space for
2846 * units in the same group. The returned configuration is guaranteed
2847 * to have CPUs on different nodes on different groups and >=75% usage
2848 * of allocated virtual address space.
2851 * On success, pointer to the new allocation_info is returned. On
2852 * failure, ERR_PTR value is returned.
2854 static struct pcpu_alloc_info
* __init __flatten
pcpu_build_alloc_info(
2855 size_t reserved_size
, size_t dyn_size
,
2857 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
)
2859 static int group_map
[NR_CPUS
] __initdata
;
2860 static int group_cnt
[NR_CPUS
] __initdata
;
2861 static struct cpumask mask __initdata
;
2862 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
2863 int nr_groups
= 1, nr_units
= 0;
2864 size_t size_sum
, min_unit_size
, alloc_size
;
2865 int upa
, max_upa
, best_upa
; /* units_per_alloc */
2866 int last_allocs
, group
, unit
;
2867 unsigned int cpu
, tcpu
;
2868 struct pcpu_alloc_info
*ai
;
2869 unsigned int *cpu_map
;
2871 /* this function may be called multiple times */
2872 memset(group_map
, 0, sizeof(group_map
));
2873 memset(group_cnt
, 0, sizeof(group_cnt
));
2874 cpumask_clear(&mask
);
2876 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2877 size_sum
= PFN_ALIGN(static_size
+ reserved_size
+
2878 max_t(size_t, dyn_size
, PERCPU_DYNAMIC_EARLY_SIZE
));
2879 dyn_size
= size_sum
- static_size
- reserved_size
;
2882 * Determine min_unit_size, alloc_size and max_upa such that
2883 * alloc_size is multiple of atom_size and is the smallest
2884 * which can accommodate 4k aligned segments which are equal to
2885 * or larger than min_unit_size.
2887 min_unit_size
= max_t(size_t, size_sum
, PCPU_MIN_UNIT_SIZE
);
2889 /* determine the maximum # of units that can fit in an allocation */
2890 alloc_size
= roundup(min_unit_size
, atom_size
);
2891 upa
= alloc_size
/ min_unit_size
;
2892 while (alloc_size
% upa
|| (offset_in_page(alloc_size
/ upa
)))
2896 cpumask_copy(&mask
, cpu_possible_mask
);
2898 /* group cpus according to their proximity */
2899 for (group
= 0; !cpumask_empty(&mask
); group
++) {
2900 /* pop the group's first cpu */
2901 cpu
= cpumask_first(&mask
);
2902 group_map
[cpu
] = group
;
2904 cpumask_clear_cpu(cpu
, &mask
);
2906 for_each_cpu(tcpu
, &mask
) {
2907 if (!cpu_distance_fn
||
2908 (cpu_distance_fn(cpu
, tcpu
) == LOCAL_DISTANCE
&&
2909 cpu_distance_fn(tcpu
, cpu
) == LOCAL_DISTANCE
)) {
2910 group_map
[tcpu
] = group
;
2912 cpumask_clear_cpu(tcpu
, &mask
);
2919 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2920 * Expand the unit_size until we use >= 75% of the units allocated.
2921 * Related to atom_size, which could be much larger than the unit_size.
2923 last_allocs
= INT_MAX
;
2925 for (upa
= max_upa
; upa
; upa
--) {
2926 int allocs
= 0, wasted
= 0;
2928 if (alloc_size
% upa
|| (offset_in_page(alloc_size
/ upa
)))
2931 for (group
= 0; group
< nr_groups
; group
++) {
2932 int this_allocs
= DIV_ROUND_UP(group_cnt
[group
], upa
);
2933 allocs
+= this_allocs
;
2934 wasted
+= this_allocs
* upa
- group_cnt
[group
];
2938 * Don't accept if wastage is over 1/3. The
2939 * greater-than comparison ensures upa==1 always
2940 * passes the following check.
2942 if (wasted
> num_possible_cpus() / 3)
2945 /* and then don't consume more memory */
2946 if (allocs
> last_allocs
)
2948 last_allocs
= allocs
;
2954 /* allocate and fill alloc_info */
2955 for (group
= 0; group
< nr_groups
; group
++)
2956 nr_units
+= roundup(group_cnt
[group
], upa
);
2958 ai
= pcpu_alloc_alloc_info(nr_groups
, nr_units
);
2960 return ERR_PTR(-ENOMEM
);
2961 cpu_map
= ai
->groups
[0].cpu_map
;
2963 for (group
= 0; group
< nr_groups
; group
++) {
2964 ai
->groups
[group
].cpu_map
= cpu_map
;
2965 cpu_map
+= roundup(group_cnt
[group
], upa
);
2968 ai
->static_size
= static_size
;
2969 ai
->reserved_size
= reserved_size
;
2970 ai
->dyn_size
= dyn_size
;
2971 ai
->unit_size
= alloc_size
/ upa
;
2972 ai
->atom_size
= atom_size
;
2973 ai
->alloc_size
= alloc_size
;
2975 for (group
= 0, unit
= 0; group
< nr_groups
; group
++) {
2976 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2979 * Initialize base_offset as if all groups are located
2980 * back-to-back. The caller should update this to
2981 * reflect actual allocation.
2983 gi
->base_offset
= unit
* ai
->unit_size
;
2985 for_each_possible_cpu(cpu
)
2986 if (group_map
[cpu
] == group
)
2987 gi
->cpu_map
[gi
->nr_units
++] = cpu
;
2988 gi
->nr_units
= roundup(gi
->nr_units
, upa
);
2989 unit
+= gi
->nr_units
;
2991 BUG_ON(unit
!= nr_units
);
2995 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2997 #if defined(BUILD_EMBED_FIRST_CHUNK)
2999 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
3000 * @reserved_size: the size of reserved percpu area in bytes
3001 * @dyn_size: minimum free size for dynamic allocation in bytes
3002 * @atom_size: allocation atom size
3003 * @cpu_distance_fn: callback to determine distance between cpus, optional
3004 * @alloc_fn: function to allocate percpu page
3005 * @free_fn: function to free percpu page
3007 * This is a helper to ease setting up embedded first percpu chunk and
3008 * can be called where pcpu_setup_first_chunk() is expected.
3010 * If this function is used to setup the first chunk, it is allocated
3011 * by calling @alloc_fn and used as-is without being mapped into
3012 * vmalloc area. Allocations are always whole multiples of @atom_size
3013 * aligned to @atom_size.
3015 * This enables the first chunk to piggy back on the linear physical
3016 * mapping which often uses larger page size. Please note that this
3017 * can result in very sparse cpu->unit mapping on NUMA machines thus
3018 * requiring large vmalloc address space. Don't use this allocator if
3019 * vmalloc space is not orders of magnitude larger than distances
3020 * between node memory addresses (ie. 32bit NUMA machines).
3022 * @dyn_size specifies the minimum dynamic area size.
3024 * If the needed size is smaller than the minimum or specified unit
3025 * size, the leftover is returned using @free_fn.
3028 * 0 on success, -errno on failure.
3030 int __init
pcpu_embed_first_chunk(size_t reserved_size
, size_t dyn_size
,
3032 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
,
3033 pcpu_fc_alloc_fn_t alloc_fn
,
3034 pcpu_fc_free_fn_t free_fn
)
3036 void *base
= (void *)ULONG_MAX
;
3037 void **areas
= NULL
;
3038 struct pcpu_alloc_info
*ai
;
3039 size_t size_sum
, areas_size
;
3040 unsigned long max_distance
;
3041 int group
, i
, highest_group
, rc
= 0;
3043 ai
= pcpu_build_alloc_info(reserved_size
, dyn_size
, atom_size
,
3048 size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
3049 areas_size
= PFN_ALIGN(ai
->nr_groups
* sizeof(void *));
3051 areas
= memblock_alloc(areas_size
, SMP_CACHE_BYTES
);
3057 /* allocate, copy and determine base address & max_distance */
3059 for (group
= 0; group
< ai
->nr_groups
; group
++) {
3060 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
3061 unsigned int cpu
= NR_CPUS
;
3064 for (i
= 0; i
< gi
->nr_units
&& cpu
== NR_CPUS
; i
++)
3065 cpu
= gi
->cpu_map
[i
];
3066 BUG_ON(cpu
== NR_CPUS
);
3068 /* allocate space for the whole group */
3069 ptr
= alloc_fn(cpu
, gi
->nr_units
* ai
->unit_size
, atom_size
);
3072 goto out_free_areas
;
3074 /* kmemleak tracks the percpu allocations separately */
3078 base
= min(ptr
, base
);
3079 if (ptr
> areas
[highest_group
])
3080 highest_group
= group
;
3082 max_distance
= areas
[highest_group
] - base
;
3083 max_distance
+= ai
->unit_size
* ai
->groups
[highest_group
].nr_units
;
3085 /* warn if maximum distance is further than 75% of vmalloc space */
3086 if (max_distance
> VMALLOC_TOTAL
* 3 / 4) {
3087 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
3088 max_distance
, VMALLOC_TOTAL
);
3089 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
3090 /* and fail if we have fallback */
3092 goto out_free_areas
;
3097 * Copy data and free unused parts. This should happen after all
3098 * allocations are complete; otherwise, we may end up with
3099 * overlapping groups.
3101 for (group
= 0; group
< ai
->nr_groups
; group
++) {
3102 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
3103 void *ptr
= areas
[group
];
3105 for (i
= 0; i
< gi
->nr_units
; i
++, ptr
+= ai
->unit_size
) {
3106 if (gi
->cpu_map
[i
] == NR_CPUS
) {
3107 /* unused unit, free whole */
3108 free_fn(ptr
, ai
->unit_size
);
3111 /* copy and return the unused part */
3112 memcpy(ptr
, __per_cpu_load
, ai
->static_size
);
3113 free_fn(ptr
+ size_sum
, ai
->unit_size
- size_sum
);
3117 /* base address is now known, determine group base offsets */
3118 for (group
= 0; group
< ai
->nr_groups
; group
++) {
3119 ai
->groups
[group
].base_offset
= areas
[group
] - base
;
3122 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
3123 PFN_DOWN(size_sum
), ai
->static_size
, ai
->reserved_size
,
3124 ai
->dyn_size
, ai
->unit_size
);
3126 pcpu_setup_first_chunk(ai
, base
);
3130 for (group
= 0; group
< ai
->nr_groups
; group
++)
3132 free_fn(areas
[group
],
3133 ai
->groups
[group
].nr_units
* ai
->unit_size
);
3135 pcpu_free_alloc_info(ai
);
3137 memblock_free_early(__pa(areas
), areas_size
);
3140 #endif /* BUILD_EMBED_FIRST_CHUNK */
3142 #ifdef BUILD_PAGE_FIRST_CHUNK
3144 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3145 * @reserved_size: the size of reserved percpu area in bytes
3146 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
3147 * @free_fn: function to free percpu page, always called with PAGE_SIZE
3148 * @populate_pte_fn: function to populate pte
3150 * This is a helper to ease setting up page-remapped first percpu
3151 * chunk and can be called where pcpu_setup_first_chunk() is expected.
3153 * This is the basic allocator. Static percpu area is allocated
3154 * page-by-page into vmalloc area.
3157 * 0 on success, -errno on failure.
3159 int __init
pcpu_page_first_chunk(size_t reserved_size
,
3160 pcpu_fc_alloc_fn_t alloc_fn
,
3161 pcpu_fc_free_fn_t free_fn
,
3162 pcpu_fc_populate_pte_fn_t populate_pte_fn
)
3164 static struct vm_struct vm
;
3165 struct pcpu_alloc_info
*ai
;
3169 struct page
**pages
;
3170 int unit
, i
, j
, rc
= 0;
3174 snprintf(psize_str
, sizeof(psize_str
), "%luK", PAGE_SIZE
>> 10);
3176 ai
= pcpu_build_alloc_info(reserved_size
, 0, PAGE_SIZE
, NULL
);
3179 BUG_ON(ai
->nr_groups
!= 1);
3180 upa
= ai
->alloc_size
/ai
->unit_size
;
3181 nr_g0_units
= roundup(num_possible_cpus(), upa
);
3182 if (WARN_ON(ai
->groups
[0].nr_units
!= nr_g0_units
)) {
3183 pcpu_free_alloc_info(ai
);
3187 unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
3189 /* unaligned allocations can't be freed, round up to page size */
3190 pages_size
= PFN_ALIGN(unit_pages
* num_possible_cpus() *
3192 pages
= memblock_alloc(pages_size
, SMP_CACHE_BYTES
);
3194 panic("%s: Failed to allocate %zu bytes\n", __func__
,
3197 /* allocate pages */
3199 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
3200 unsigned int cpu
= ai
->groups
[0].cpu_map
[unit
];
3201 for (i
= 0; i
< unit_pages
; i
++) {
3204 ptr
= alloc_fn(cpu
, PAGE_SIZE
, PAGE_SIZE
);
3206 pr_warn("failed to allocate %s page for cpu%u\n",
3210 /* kmemleak tracks the percpu allocations separately */
3212 pages
[j
++] = virt_to_page(ptr
);
3216 /* allocate vm area, map the pages and copy static data */
3217 vm
.flags
= VM_ALLOC
;
3218 vm
.size
= num_possible_cpus() * ai
->unit_size
;
3219 vm_area_register_early(&vm
, PAGE_SIZE
);
3221 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
3222 unsigned long unit_addr
=
3223 (unsigned long)vm
.addr
+ unit
* ai
->unit_size
;
3225 for (i
= 0; i
< unit_pages
; i
++)
3226 populate_pte_fn(unit_addr
+ (i
<< PAGE_SHIFT
));
3228 /* pte already populated, the following shouldn't fail */
3229 rc
= __pcpu_map_pages(unit_addr
, &pages
[unit
* unit_pages
],
3232 panic("failed to map percpu area, err=%d\n", rc
);
3235 * FIXME: Archs with virtual cache should flush local
3236 * cache for the linear mapping here - something
3237 * equivalent to flush_cache_vmap() on the local cpu.
3238 * flush_cache_vmap() can't be used as most supporting
3239 * data structures are not set up yet.
3242 /* copy static data */
3243 memcpy((void *)unit_addr
, __per_cpu_load
, ai
->static_size
);
3246 /* we're ready, commit */
3247 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3248 unit_pages
, psize_str
, ai
->static_size
,
3249 ai
->reserved_size
, ai
->dyn_size
);
3251 pcpu_setup_first_chunk(ai
, vm
.addr
);
3256 free_fn(page_address(pages
[j
]), PAGE_SIZE
);
3259 memblock_free_early(__pa(pages
), pages_size
);
3260 pcpu_free_alloc_info(ai
);
3263 #endif /* BUILD_PAGE_FIRST_CHUNK */
3265 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
3267 * Generic SMP percpu area setup.
3269 * The embedding helper is used because its behavior closely resembles
3270 * the original non-dynamic generic percpu area setup. This is
3271 * important because many archs have addressing restrictions and might
3272 * fail if the percpu area is located far away from the previous
3273 * location. As an added bonus, in non-NUMA cases, embedding is
3274 * generally a good idea TLB-wise because percpu area can piggy back
3275 * on the physical linear memory mapping which uses large page
3276 * mappings on applicable archs.
3278 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
3279 EXPORT_SYMBOL(__per_cpu_offset
);
3281 static void * __init
pcpu_dfl_fc_alloc(unsigned int cpu
, size_t size
,
3284 return memblock_alloc_from(size
, align
, __pa(MAX_DMA_ADDRESS
));
3287 static void __init
pcpu_dfl_fc_free(void *ptr
, size_t size
)
3289 memblock_free_early(__pa(ptr
), size
);
3292 void __init
setup_per_cpu_areas(void)
3294 unsigned long delta
;
3299 * Always reserve area for module percpu variables. That's
3300 * what the legacy allocator did.
3302 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
3303 PERCPU_DYNAMIC_RESERVE
, PAGE_SIZE
, NULL
,
3304 pcpu_dfl_fc_alloc
, pcpu_dfl_fc_free
);
3306 panic("Failed to initialize percpu areas.");
3308 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
3309 for_each_possible_cpu(cpu
)
3310 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
3312 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3314 #else /* CONFIG_SMP */
3317 * UP percpu area setup.
3319 * UP always uses km-based percpu allocator with identity mapping.
3320 * Static percpu variables are indistinguishable from the usual static
3321 * variables and don't require any special preparation.
3323 void __init
setup_per_cpu_areas(void)
3325 const size_t unit_size
=
3326 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE
,
3327 PERCPU_DYNAMIC_RESERVE
));
3328 struct pcpu_alloc_info
*ai
;
3331 ai
= pcpu_alloc_alloc_info(1, 1);
3332 fc
= memblock_alloc_from(unit_size
, PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
3334 panic("Failed to allocate memory for percpu areas.");
3335 /* kmemleak tracks the percpu allocations separately */
3338 ai
->dyn_size
= unit_size
;
3339 ai
->unit_size
= unit_size
;
3340 ai
->atom_size
= unit_size
;
3341 ai
->alloc_size
= unit_size
;
3342 ai
->groups
[0].nr_units
= 1;
3343 ai
->groups
[0].cpu_map
[0] = 0;
3345 pcpu_setup_first_chunk(ai
, fc
);
3346 pcpu_free_alloc_info(ai
);
3349 #endif /* CONFIG_SMP */
3352 * pcpu_nr_pages - calculate total number of populated backing pages
3354 * This reflects the number of pages populated to back chunks. Metadata is
3355 * excluded in the number exposed in meminfo as the number of backing pages
3356 * scales with the number of cpus and can quickly outweigh the memory used for
3357 * metadata. It also keeps this calculation nice and simple.
3360 * Total number of populated backing pages in use by the allocator.
3362 unsigned long pcpu_nr_pages(void)
3364 return pcpu_nr_populated
* pcpu_nr_units
;
3368 * Percpu allocator is initialized early during boot when neither slab or
3369 * workqueue is available. Plug async management until everything is up
3372 static int __init
percpu_enable_async(void)
3374 pcpu_async_enabled
= true;
3377 subsys_initcall(percpu_enable_async
);