1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/percpu.c - percpu memory allocator
5 * Copyright (C) 2009 SUSE Linux Products GmbH
6 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
8 * Copyright (C) 2017 Facebook Inc.
9 * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
11 * The percpu allocator handles both static and dynamic areas. Percpu
12 * areas are allocated in chunks which are divided into units. There is
13 * a 1-to-1 mapping for units to possible cpus. These units are grouped
14 * based on NUMA properties of the machine.
17 * ------------------- ------------------- ------------
18 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19 * ------------------- ...... ------------------- .... ------------
21 * Allocation is done by offsets into a unit's address space. Ie., an
22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
24 * and even sparse. Access is handled by configuring percpu base
25 * registers according to the cpu to unit mappings and offsetting the
26 * base address using pcpu_unit_size.
28 * There is special consideration for the first chunk which must handle
29 * the static percpu variables in the kernel image as allocation services
30 * are not online yet. In short, the first chunk is structured like so:
32 * <Static | [Reserved] | Dynamic>
34 * The static data is copied from the original section managed by the
35 * linker. The reserved section, if non-zero, primarily manages static
36 * percpu variables from kernel modules. Finally, the dynamic section
37 * takes care of normal allocations.
39 * The allocator organizes chunks into lists according to free size and
40 * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41 * flag should be passed. All memcg-aware allocations are sharing one set
42 * of chunks and all unaccounted allocations and allocations performed
43 * by processes belonging to the root memory cgroup are using the second set.
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
46 * is managed by a bitmap with metadata blocks. The allocation map is updated
47 * on every allocation and free to reflect the current state while the boundary
48 * map is only updated on allocation. Each metadata block contains
49 * information to help mitigate the need to iterate over large portions
50 * of the bitmap. The reverse mapping from page to chunk is stored in
51 * the page's index. Lastly, units are lazily backed and grow in unison.
53 * There is a unique conversion that goes on here between bytes and bits.
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
55 * tracks the number of pages it is responsible for in nr_pages. Helper
56 * functions are used to convert from between the bytes, bits, and blocks.
57 * All hints are managed in bits unless explicitly stated.
59 * To use this allocator, arch code should do the following:
61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62 * regular address to percpu pointer and back if they need to be
63 * different from the default
65 * - use pcpu_setup_first_chunk() during percpu area initialization to
66 * setup the first chunk containing the kernel static percpu area
69 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
71 #include <linux/bitmap.h>
72 #include <linux/cpumask.h>
73 #include <linux/memblock.h>
74 #include <linux/err.h>
75 #include <linux/lcm.h>
76 #include <linux/list.h>
77 #include <linux/log2.h>
79 #include <linux/module.h>
80 #include <linux/mutex.h>
81 #include <linux/percpu.h>
82 #include <linux/pfn.h>
83 #include <linux/slab.h>
84 #include <linux/spinlock.h>
85 #include <linux/vmalloc.h>
86 #include <linux/workqueue.h>
87 #include <linux/kmemleak.h>
88 #include <linux/sched.h>
89 #include <linux/sched/mm.h>
90 #include <linux/memcontrol.h>
92 #include <asm/cacheflush.h>
93 #include <asm/sections.h>
94 #include <asm/tlbflush.h>
97 #define CREATE_TRACE_POINTS
98 #include <trace/events/percpu.h>
100 #include "percpu-internal.h"
102 /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
103 #define PCPU_SLOT_BASE_SHIFT 5
104 /* chunks in slots below this are subject to being sidelined on failed alloc */
105 #define PCPU_SLOT_FAIL_THRESHOLD 3
107 #define PCPU_EMPTY_POP_PAGES_LOW 2
108 #define PCPU_EMPTY_POP_PAGES_HIGH 4
111 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
112 #ifndef __addr_to_pcpu_ptr
113 #define __addr_to_pcpu_ptr(addr) \
114 (void __percpu *)((unsigned long)(addr) - \
115 (unsigned long)pcpu_base_addr + \
116 (unsigned long)__per_cpu_start)
118 #ifndef __pcpu_ptr_to_addr
119 #define __pcpu_ptr_to_addr(ptr) \
120 (void __force *)((unsigned long)(ptr) + \
121 (unsigned long)pcpu_base_addr - \
122 (unsigned long)__per_cpu_start)
124 #else /* CONFIG_SMP */
125 /* on UP, it's always identity mapped */
126 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
127 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
128 #endif /* CONFIG_SMP */
130 static int pcpu_unit_pages __ro_after_init
;
131 static int pcpu_unit_size __ro_after_init
;
132 static int pcpu_nr_units __ro_after_init
;
133 static int pcpu_atom_size __ro_after_init
;
134 int pcpu_nr_slots __ro_after_init
;
135 static size_t pcpu_chunk_struct_size __ro_after_init
;
137 /* cpus with the lowest and highest unit addresses */
138 static unsigned int pcpu_low_unit_cpu __ro_after_init
;
139 static unsigned int pcpu_high_unit_cpu __ro_after_init
;
141 /* the address of the first chunk which starts with the kernel static area */
142 void *pcpu_base_addr __ro_after_init
;
143 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
145 static const int *pcpu_unit_map __ro_after_init
; /* cpu -> unit */
146 const unsigned long *pcpu_unit_offsets __ro_after_init
; /* cpu -> unit offset */
148 /* group information, used for vm allocation */
149 static int pcpu_nr_groups __ro_after_init
;
150 static const unsigned long *pcpu_group_offsets __ro_after_init
;
151 static const size_t *pcpu_group_sizes __ro_after_init
;
154 * The first chunk which always exists. Note that unlike other
155 * chunks, this one can be allocated and mapped in several different
156 * ways and thus often doesn't live in the vmalloc area.
158 struct pcpu_chunk
*pcpu_first_chunk __ro_after_init
;
161 * Optional reserved chunk. This chunk reserves part of the first
162 * chunk and serves it for reserved allocations. When the reserved
163 * region doesn't exist, the following variable is NULL.
165 struct pcpu_chunk
*pcpu_reserved_chunk __ro_after_init
;
167 DEFINE_SPINLOCK(pcpu_lock
); /* all internal data structures */
168 static DEFINE_MUTEX(pcpu_alloc_mutex
); /* chunk create/destroy, [de]pop, map ext */
170 struct list_head
*pcpu_chunk_lists __ro_after_init
; /* chunk list slots */
172 /* chunks which need their map areas extended, protected by pcpu_lock */
173 static LIST_HEAD(pcpu_map_extend_chunks
);
176 * The number of empty populated pages, protected by pcpu_lock. The
177 * reserved chunk doesn't contribute to the count.
179 int pcpu_nr_empty_pop_pages
;
182 * The number of populated pages in use by the allocator, protected by
183 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
184 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
185 * and increments/decrements this count by 1).
187 static unsigned long pcpu_nr_populated
;
190 * Balance work is used to populate or destroy chunks asynchronously. We
191 * try to keep the number of populated free pages between
192 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
195 static void pcpu_balance_workfn(struct work_struct
*work
);
196 static DECLARE_WORK(pcpu_balance_work
, pcpu_balance_workfn
);
197 static bool pcpu_async_enabled __read_mostly
;
198 static bool pcpu_atomic_alloc_failed
;
200 static void pcpu_schedule_balance_work(void)
202 if (pcpu_async_enabled
)
203 schedule_work(&pcpu_balance_work
);
207 * pcpu_addr_in_chunk - check if the address is served from this chunk
208 * @chunk: chunk of interest
209 * @addr: percpu address
212 * True if the address is served from this chunk.
214 static bool pcpu_addr_in_chunk(struct pcpu_chunk
*chunk
, void *addr
)
216 void *start_addr
, *end_addr
;
221 start_addr
= chunk
->base_addr
+ chunk
->start_offset
;
222 end_addr
= chunk
->base_addr
+ chunk
->nr_pages
* PAGE_SIZE
-
225 return addr
>= start_addr
&& addr
< end_addr
;
228 static int __pcpu_size_to_slot(int size
)
230 int highbit
= fls(size
); /* size is in bytes */
231 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
234 static int pcpu_size_to_slot(int size
)
236 if (size
== pcpu_unit_size
)
237 return pcpu_nr_slots
- 1;
238 return __pcpu_size_to_slot(size
);
241 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
243 const struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
245 if (chunk
->free_bytes
< PCPU_MIN_ALLOC_SIZE
||
246 chunk_md
->contig_hint
== 0)
249 return pcpu_size_to_slot(chunk_md
->contig_hint
* PCPU_MIN_ALLOC_SIZE
);
252 /* set the pointer to a chunk in a page struct */
253 static void pcpu_set_page_chunk(struct page
*page
, struct pcpu_chunk
*pcpu
)
255 page
->index
= (unsigned long)pcpu
;
258 /* obtain pointer to a chunk from a page struct */
259 static struct pcpu_chunk
*pcpu_get_page_chunk(struct page
*page
)
261 return (struct pcpu_chunk
*)page
->index
;
264 static int __maybe_unused
pcpu_page_idx(unsigned int cpu
, int page_idx
)
266 return pcpu_unit_map
[cpu
] * pcpu_unit_pages
+ page_idx
;
269 static unsigned long pcpu_unit_page_offset(unsigned int cpu
, int page_idx
)
271 return pcpu_unit_offsets
[cpu
] + (page_idx
<< PAGE_SHIFT
);
274 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
275 unsigned int cpu
, int page_idx
)
277 return (unsigned long)chunk
->base_addr
+
278 pcpu_unit_page_offset(cpu
, page_idx
);
282 * The following are helper functions to help access bitmaps and convert
283 * between bitmap offsets to address offsets.
285 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk
*chunk
, int index
)
287 return chunk
->alloc_map
+
288 (index
* PCPU_BITMAP_BLOCK_BITS
/ BITS_PER_LONG
);
291 static unsigned long pcpu_off_to_block_index(int off
)
293 return off
/ PCPU_BITMAP_BLOCK_BITS
;
296 static unsigned long pcpu_off_to_block_off(int off
)
298 return off
& (PCPU_BITMAP_BLOCK_BITS
- 1);
301 static unsigned long pcpu_block_off_to_off(int index
, int off
)
303 return index
* PCPU_BITMAP_BLOCK_BITS
+ off
;
307 * pcpu_next_hint - determine which hint to use
308 * @block: block of interest
309 * @alloc_bits: size of allocation
311 * This determines if we should scan based on the scan_hint or first_free.
312 * In general, we want to scan from first_free to fulfill allocations by
313 * first fit. However, if we know a scan_hint at position scan_hint_start
314 * cannot fulfill an allocation, we can begin scanning from there knowing
315 * the contig_hint will be our fallback.
317 static int pcpu_next_hint(struct pcpu_block_md
*block
, int alloc_bits
)
320 * The three conditions below determine if we can skip past the
321 * scan_hint. First, does the scan hint exist. Second, is the
322 * contig_hint after the scan_hint (possibly not true iff
323 * contig_hint == scan_hint). Third, is the allocation request
324 * larger than the scan_hint.
326 if (block
->scan_hint
&&
327 block
->contig_hint_start
> block
->scan_hint_start
&&
328 alloc_bits
> block
->scan_hint
)
329 return block
->scan_hint_start
+ block
->scan_hint
;
331 return block
->first_free
;
335 * pcpu_next_md_free_region - finds the next hint free area
336 * @chunk: chunk of interest
337 * @bit_off: chunk offset
338 * @bits: size of free area
340 * Helper function for pcpu_for_each_md_free_region. It checks
341 * block->contig_hint and performs aggregation across blocks to find the
342 * next hint. It modifies bit_off and bits in-place to be consumed in the
345 static void pcpu_next_md_free_region(struct pcpu_chunk
*chunk
, int *bit_off
,
348 int i
= pcpu_off_to_block_index(*bit_off
);
349 int block_off
= pcpu_off_to_block_off(*bit_off
);
350 struct pcpu_block_md
*block
;
353 for (block
= chunk
->md_blocks
+ i
; i
< pcpu_chunk_nr_blocks(chunk
);
355 /* handles contig area across blocks */
357 *bits
+= block
->left_free
;
358 if (block
->left_free
== PCPU_BITMAP_BLOCK_BITS
)
364 * This checks three things. First is there a contig_hint to
365 * check. Second, have we checked this hint before by
366 * comparing the block_off. Third, is this the same as the
367 * right contig hint. In the last case, it spills over into
368 * the next block and should be handled by the contig area
369 * across blocks code.
371 *bits
= block
->contig_hint
;
372 if (*bits
&& block
->contig_hint_start
>= block_off
&&
373 *bits
+ block
->contig_hint_start
< PCPU_BITMAP_BLOCK_BITS
) {
374 *bit_off
= pcpu_block_off_to_off(i
,
375 block
->contig_hint_start
);
378 /* reset to satisfy the second predicate above */
381 *bits
= block
->right_free
;
382 *bit_off
= (i
+ 1) * PCPU_BITMAP_BLOCK_BITS
- block
->right_free
;
387 * pcpu_next_fit_region - finds fit areas for a given allocation request
388 * @chunk: chunk of interest
389 * @alloc_bits: size of allocation
390 * @align: alignment of area (max PAGE_SIZE)
391 * @bit_off: chunk offset
392 * @bits: size of free area
394 * Finds the next free region that is viable for use with a given size and
395 * alignment. This only returns if there is a valid area to be used for this
396 * allocation. block->first_free is returned if the allocation request fits
397 * within the block to see if the request can be fulfilled prior to the contig
400 static void pcpu_next_fit_region(struct pcpu_chunk
*chunk
, int alloc_bits
,
401 int align
, int *bit_off
, int *bits
)
403 int i
= pcpu_off_to_block_index(*bit_off
);
404 int block_off
= pcpu_off_to_block_off(*bit_off
);
405 struct pcpu_block_md
*block
;
408 for (block
= chunk
->md_blocks
+ i
; i
< pcpu_chunk_nr_blocks(chunk
);
410 /* handles contig area across blocks */
412 *bits
+= block
->left_free
;
413 if (*bits
>= alloc_bits
)
415 if (block
->left_free
== PCPU_BITMAP_BLOCK_BITS
)
419 /* check block->contig_hint */
420 *bits
= ALIGN(block
->contig_hint_start
, align
) -
421 block
->contig_hint_start
;
423 * This uses the block offset to determine if this has been
424 * checked in the prior iteration.
426 if (block
->contig_hint
&&
427 block
->contig_hint_start
>= block_off
&&
428 block
->contig_hint
>= *bits
+ alloc_bits
) {
429 int start
= pcpu_next_hint(block
, alloc_bits
);
431 *bits
+= alloc_bits
+ block
->contig_hint_start
-
433 *bit_off
= pcpu_block_off_to_off(i
, start
);
436 /* reset to satisfy the second predicate above */
439 *bit_off
= ALIGN(PCPU_BITMAP_BLOCK_BITS
- block
->right_free
,
441 *bits
= PCPU_BITMAP_BLOCK_BITS
- *bit_off
;
442 *bit_off
= pcpu_block_off_to_off(i
, *bit_off
);
443 if (*bits
>= alloc_bits
)
447 /* no valid offsets were found - fail condition */
448 *bit_off
= pcpu_chunk_map_bits(chunk
);
452 * Metadata free area iterators. These perform aggregation of free areas
453 * based on the metadata blocks and return the offset @bit_off and size in
454 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
455 * a fit is found for the allocation request.
457 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
458 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
459 (bit_off) < pcpu_chunk_map_bits((chunk)); \
460 (bit_off) += (bits) + 1, \
461 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
463 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
464 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
466 (bit_off) < pcpu_chunk_map_bits((chunk)); \
467 (bit_off) += (bits), \
468 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
472 * pcpu_mem_zalloc - allocate memory
473 * @size: bytes to allocate
474 * @gfp: allocation flags
476 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
477 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
478 * This is to facilitate passing through whitelisted flags. The
479 * returned memory is always zeroed.
482 * Pointer to the allocated area on success, NULL on failure.
484 static void *pcpu_mem_zalloc(size_t size
, gfp_t gfp
)
486 if (WARN_ON_ONCE(!slab_is_available()))
489 if (size
<= PAGE_SIZE
)
490 return kzalloc(size
, gfp
);
492 return __vmalloc(size
, gfp
| __GFP_ZERO
);
496 * pcpu_mem_free - free memory
497 * @ptr: memory to free
499 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
501 static void pcpu_mem_free(void *ptr
)
506 static void __pcpu_chunk_move(struct pcpu_chunk
*chunk
, int slot
,
509 if (chunk
!= pcpu_reserved_chunk
) {
510 struct list_head
*pcpu_slot
;
512 pcpu_slot
= pcpu_chunk_list(pcpu_chunk_type(chunk
));
514 list_move(&chunk
->list
, &pcpu_slot
[slot
]);
516 list_move_tail(&chunk
->list
, &pcpu_slot
[slot
]);
520 static void pcpu_chunk_move(struct pcpu_chunk
*chunk
, int slot
)
522 __pcpu_chunk_move(chunk
, slot
, true);
526 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
527 * @chunk: chunk of interest
528 * @oslot: the previous slot it was on
530 * This function is called after an allocation or free changed @chunk.
531 * New slot according to the changed state is determined and @chunk is
532 * moved to the slot. Note that the reserved chunk is never put on
538 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
540 int nslot
= pcpu_chunk_slot(chunk
);
543 __pcpu_chunk_move(chunk
, nslot
, oslot
< nslot
);
547 * pcpu_update_empty_pages - update empty page counters
548 * @chunk: chunk of interest
549 * @nr: nr of empty pages
551 * This is used to keep track of the empty pages now based on the premise
552 * a md_block covers a page. The hint update functions recognize if a block
553 * is made full or broken to calculate deltas for keeping track of free pages.
555 static inline void pcpu_update_empty_pages(struct pcpu_chunk
*chunk
, int nr
)
557 chunk
->nr_empty_pop_pages
+= nr
;
558 if (chunk
!= pcpu_reserved_chunk
)
559 pcpu_nr_empty_pop_pages
+= nr
;
563 * pcpu_region_overlap - determines if two regions overlap
564 * @a: start of first region, inclusive
565 * @b: end of first region, exclusive
566 * @x: start of second region, inclusive
567 * @y: end of second region, exclusive
569 * This is used to determine if the hint region [a, b) overlaps with the
570 * allocated region [x, y).
572 static inline bool pcpu_region_overlap(int a
, int b
, int x
, int y
)
574 return (a
< y
) && (x
< b
);
578 * pcpu_block_update - updates a block given a free area
579 * @block: block of interest
580 * @start: start offset in block
581 * @end: end offset in block
583 * Updates a block given a known free area. The region [start, end) is
584 * expected to be the entirety of the free area within a block. Chooses
585 * the best starting offset if the contig hints are equal.
587 static void pcpu_block_update(struct pcpu_block_md
*block
, int start
, int end
)
589 int contig
= end
- start
;
591 block
->first_free
= min(block
->first_free
, start
);
593 block
->left_free
= contig
;
595 if (end
== block
->nr_bits
)
596 block
->right_free
= contig
;
598 if (contig
> block
->contig_hint
) {
599 /* promote the old contig_hint to be the new scan_hint */
600 if (start
> block
->contig_hint_start
) {
601 if (block
->contig_hint
> block
->scan_hint
) {
602 block
->scan_hint_start
=
603 block
->contig_hint_start
;
604 block
->scan_hint
= block
->contig_hint
;
605 } else if (start
< block
->scan_hint_start
) {
607 * The old contig_hint == scan_hint. But, the
608 * new contig is larger so hold the invariant
609 * scan_hint_start < contig_hint_start.
611 block
->scan_hint
= 0;
614 block
->scan_hint
= 0;
616 block
->contig_hint_start
= start
;
617 block
->contig_hint
= contig
;
618 } else if (contig
== block
->contig_hint
) {
619 if (block
->contig_hint_start
&&
621 __ffs(start
) > __ffs(block
->contig_hint_start
))) {
622 /* start has a better alignment so use it */
623 block
->contig_hint_start
= start
;
624 if (start
< block
->scan_hint_start
&&
625 block
->contig_hint
> block
->scan_hint
)
626 block
->scan_hint
= 0;
627 } else if (start
> block
->scan_hint_start
||
628 block
->contig_hint
> block
->scan_hint
) {
630 * Knowing contig == contig_hint, update the scan_hint
631 * if it is farther than or larger than the current
634 block
->scan_hint_start
= start
;
635 block
->scan_hint
= contig
;
639 * The region is smaller than the contig_hint. So only update
640 * the scan_hint if it is larger than or equal and farther than
641 * the current scan_hint.
643 if ((start
< block
->contig_hint_start
&&
644 (contig
> block
->scan_hint
||
645 (contig
== block
->scan_hint
&&
646 start
> block
->scan_hint_start
)))) {
647 block
->scan_hint_start
= start
;
648 block
->scan_hint
= contig
;
654 * pcpu_block_update_scan - update a block given a free area from a scan
655 * @chunk: chunk of interest
656 * @bit_off: chunk offset
657 * @bits: size of free area
659 * Finding the final allocation spot first goes through pcpu_find_block_fit()
660 * to find a block that can hold the allocation and then pcpu_alloc_area()
661 * where a scan is used. When allocations require specific alignments,
662 * we can inadvertently create holes which will not be seen in the alloc
665 * This takes a given free area hole and updates a block as it may change the
666 * scan_hint. We need to scan backwards to ensure we don't miss free bits
669 static void pcpu_block_update_scan(struct pcpu_chunk
*chunk
, int bit_off
,
672 int s_off
= pcpu_off_to_block_off(bit_off
);
673 int e_off
= s_off
+ bits
;
675 struct pcpu_block_md
*block
;
677 if (e_off
> PCPU_BITMAP_BLOCK_BITS
)
680 s_index
= pcpu_off_to_block_index(bit_off
);
681 block
= chunk
->md_blocks
+ s_index
;
683 /* scan backwards in case of alignment skipping free bits */
684 l_bit
= find_last_bit(pcpu_index_alloc_map(chunk
, s_index
), s_off
);
685 s_off
= (s_off
== l_bit
) ? 0 : l_bit
+ 1;
687 pcpu_block_update(block
, s_off
, e_off
);
691 * pcpu_chunk_refresh_hint - updates metadata about a chunk
692 * @chunk: chunk of interest
693 * @full_scan: if we should scan from the beginning
695 * Iterates over the metadata blocks to find the largest contig area.
696 * A full scan can be avoided on the allocation path as this is triggered
697 * if we broke the contig_hint. In doing so, the scan_hint will be before
698 * the contig_hint or after if the scan_hint == contig_hint. This cannot
699 * be prevented on freeing as we want to find the largest area possibly
702 static void pcpu_chunk_refresh_hint(struct pcpu_chunk
*chunk
, bool full_scan
)
704 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
707 /* promote scan_hint to contig_hint */
708 if (!full_scan
&& chunk_md
->scan_hint
) {
709 bit_off
= chunk_md
->scan_hint_start
+ chunk_md
->scan_hint
;
710 chunk_md
->contig_hint_start
= chunk_md
->scan_hint_start
;
711 chunk_md
->contig_hint
= chunk_md
->scan_hint
;
712 chunk_md
->scan_hint
= 0;
714 bit_off
= chunk_md
->first_free
;
715 chunk_md
->contig_hint
= 0;
719 pcpu_for_each_md_free_region(chunk
, bit_off
, bits
)
720 pcpu_block_update(chunk_md
, bit_off
, bit_off
+ bits
);
724 * pcpu_block_refresh_hint
725 * @chunk: chunk of interest
726 * @index: index of the metadata block
728 * Scans over the block beginning at first_free and updates the block
729 * metadata accordingly.
731 static void pcpu_block_refresh_hint(struct pcpu_chunk
*chunk
, int index
)
733 struct pcpu_block_md
*block
= chunk
->md_blocks
+ index
;
734 unsigned long *alloc_map
= pcpu_index_alloc_map(chunk
, index
);
735 unsigned int rs
, re
, start
; /* region start, region end */
737 /* promote scan_hint to contig_hint */
738 if (block
->scan_hint
) {
739 start
= block
->scan_hint_start
+ block
->scan_hint
;
740 block
->contig_hint_start
= block
->scan_hint_start
;
741 block
->contig_hint
= block
->scan_hint
;
742 block
->scan_hint
= 0;
744 start
= block
->first_free
;
745 block
->contig_hint
= 0;
748 block
->right_free
= 0;
750 /* iterate over free areas and update the contig hints */
751 bitmap_for_each_clear_region(alloc_map
, rs
, re
, start
,
752 PCPU_BITMAP_BLOCK_BITS
)
753 pcpu_block_update(block
, rs
, re
);
757 * pcpu_block_update_hint_alloc - update hint on allocation path
758 * @chunk: chunk of interest
759 * @bit_off: chunk offset
760 * @bits: size of request
762 * Updates metadata for the allocation path. The metadata only has to be
763 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
764 * scans are required if the block's contig hint is broken.
766 static void pcpu_block_update_hint_alloc(struct pcpu_chunk
*chunk
, int bit_off
,
769 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
770 int nr_empty_pages
= 0;
771 struct pcpu_block_md
*s_block
, *e_block
, *block
;
772 int s_index
, e_index
; /* block indexes of the freed allocation */
773 int s_off
, e_off
; /* block offsets of the freed allocation */
776 * Calculate per block offsets.
777 * The calculation uses an inclusive range, but the resulting offsets
778 * are [start, end). e_index always points to the last block in the
781 s_index
= pcpu_off_to_block_index(bit_off
);
782 e_index
= pcpu_off_to_block_index(bit_off
+ bits
- 1);
783 s_off
= pcpu_off_to_block_off(bit_off
);
784 e_off
= pcpu_off_to_block_off(bit_off
+ bits
- 1) + 1;
786 s_block
= chunk
->md_blocks
+ s_index
;
787 e_block
= chunk
->md_blocks
+ e_index
;
791 * block->first_free must be updated if the allocation takes its place.
792 * If the allocation breaks the contig_hint, a scan is required to
795 if (s_block
->contig_hint
== PCPU_BITMAP_BLOCK_BITS
)
798 if (s_off
== s_block
->first_free
)
799 s_block
->first_free
= find_next_zero_bit(
800 pcpu_index_alloc_map(chunk
, s_index
),
801 PCPU_BITMAP_BLOCK_BITS
,
804 if (pcpu_region_overlap(s_block
->scan_hint_start
,
805 s_block
->scan_hint_start
+ s_block
->scan_hint
,
808 s_block
->scan_hint
= 0;
810 if (pcpu_region_overlap(s_block
->contig_hint_start
,
811 s_block
->contig_hint_start
+
812 s_block
->contig_hint
,
815 /* block contig hint is broken - scan to fix it */
817 s_block
->left_free
= 0;
818 pcpu_block_refresh_hint(chunk
, s_index
);
820 /* update left and right contig manually */
821 s_block
->left_free
= min(s_block
->left_free
, s_off
);
822 if (s_index
== e_index
)
823 s_block
->right_free
= min_t(int, s_block
->right_free
,
824 PCPU_BITMAP_BLOCK_BITS
- e_off
);
826 s_block
->right_free
= 0;
832 if (s_index
!= e_index
) {
833 if (e_block
->contig_hint
== PCPU_BITMAP_BLOCK_BITS
)
837 * When the allocation is across blocks, the end is along
838 * the left part of the e_block.
840 e_block
->first_free
= find_next_zero_bit(
841 pcpu_index_alloc_map(chunk
, e_index
),
842 PCPU_BITMAP_BLOCK_BITS
, e_off
);
844 if (e_off
== PCPU_BITMAP_BLOCK_BITS
) {
845 /* reset the block */
848 if (e_off
> e_block
->scan_hint_start
)
849 e_block
->scan_hint
= 0;
851 e_block
->left_free
= 0;
852 if (e_off
> e_block
->contig_hint_start
) {
853 /* contig hint is broken - scan to fix it */
854 pcpu_block_refresh_hint(chunk
, e_index
);
856 e_block
->right_free
=
857 min_t(int, e_block
->right_free
,
858 PCPU_BITMAP_BLOCK_BITS
- e_off
);
862 /* update in-between md_blocks */
863 nr_empty_pages
+= (e_index
- s_index
- 1);
864 for (block
= s_block
+ 1; block
< e_block
; block
++) {
865 block
->scan_hint
= 0;
866 block
->contig_hint
= 0;
867 block
->left_free
= 0;
868 block
->right_free
= 0;
873 pcpu_update_empty_pages(chunk
, -nr_empty_pages
);
875 if (pcpu_region_overlap(chunk_md
->scan_hint_start
,
876 chunk_md
->scan_hint_start
+
880 chunk_md
->scan_hint
= 0;
883 * The only time a full chunk scan is required is if the chunk
884 * contig hint is broken. Otherwise, it means a smaller space
885 * was used and therefore the chunk contig hint is still correct.
887 if (pcpu_region_overlap(chunk_md
->contig_hint_start
,
888 chunk_md
->contig_hint_start
+
889 chunk_md
->contig_hint
,
892 pcpu_chunk_refresh_hint(chunk
, false);
896 * pcpu_block_update_hint_free - updates the block hints on the free path
897 * @chunk: chunk of interest
898 * @bit_off: chunk offset
899 * @bits: size of request
901 * Updates metadata for the allocation path. This avoids a blind block
902 * refresh by making use of the block contig hints. If this fails, it scans
903 * forward and backward to determine the extent of the free area. This is
904 * capped at the boundary of blocks.
906 * A chunk update is triggered if a page becomes free, a block becomes free,
907 * or the free spans across blocks. This tradeoff is to minimize iterating
908 * over the block metadata to update chunk_md->contig_hint.
909 * chunk_md->contig_hint may be off by up to a page, but it will never be more
910 * than the available space. If the contig hint is contained in one block, it
913 static void pcpu_block_update_hint_free(struct pcpu_chunk
*chunk
, int bit_off
,
916 int nr_empty_pages
= 0;
917 struct pcpu_block_md
*s_block
, *e_block
, *block
;
918 int s_index
, e_index
; /* block indexes of the freed allocation */
919 int s_off
, e_off
; /* block offsets of the freed allocation */
920 int start
, end
; /* start and end of the whole free area */
923 * Calculate per block offsets.
924 * The calculation uses an inclusive range, but the resulting offsets
925 * are [start, end). e_index always points to the last block in the
928 s_index
= pcpu_off_to_block_index(bit_off
);
929 e_index
= pcpu_off_to_block_index(bit_off
+ bits
- 1);
930 s_off
= pcpu_off_to_block_off(bit_off
);
931 e_off
= pcpu_off_to_block_off(bit_off
+ bits
- 1) + 1;
933 s_block
= chunk
->md_blocks
+ s_index
;
934 e_block
= chunk
->md_blocks
+ e_index
;
937 * Check if the freed area aligns with the block->contig_hint.
938 * If it does, then the scan to find the beginning/end of the
939 * larger free area can be avoided.
941 * start and end refer to beginning and end of the free area
942 * within each their respective blocks. This is not necessarily
943 * the entire free area as it may span blocks past the beginning
944 * or end of the block.
947 if (s_off
== s_block
->contig_hint
+ s_block
->contig_hint_start
) {
948 start
= s_block
->contig_hint_start
;
951 * Scan backwards to find the extent of the free area.
952 * find_last_bit returns the starting bit, so if the start bit
953 * is returned, that means there was no last bit and the
954 * remainder of the chunk is free.
956 int l_bit
= find_last_bit(pcpu_index_alloc_map(chunk
, s_index
),
958 start
= (start
== l_bit
) ? 0 : l_bit
+ 1;
962 if (e_off
== e_block
->contig_hint_start
)
963 end
= e_block
->contig_hint_start
+ e_block
->contig_hint
;
965 end
= find_next_bit(pcpu_index_alloc_map(chunk
, e_index
),
966 PCPU_BITMAP_BLOCK_BITS
, end
);
969 e_off
= (s_index
== e_index
) ? end
: PCPU_BITMAP_BLOCK_BITS
;
970 if (!start
&& e_off
== PCPU_BITMAP_BLOCK_BITS
)
972 pcpu_block_update(s_block
, start
, e_off
);
974 /* freeing in the same block */
975 if (s_index
!= e_index
) {
977 if (end
== PCPU_BITMAP_BLOCK_BITS
)
979 pcpu_block_update(e_block
, 0, end
);
981 /* reset md_blocks in the middle */
982 nr_empty_pages
+= (e_index
- s_index
- 1);
983 for (block
= s_block
+ 1; block
< e_block
; block
++) {
984 block
->first_free
= 0;
985 block
->scan_hint
= 0;
986 block
->contig_hint_start
= 0;
987 block
->contig_hint
= PCPU_BITMAP_BLOCK_BITS
;
988 block
->left_free
= PCPU_BITMAP_BLOCK_BITS
;
989 block
->right_free
= PCPU_BITMAP_BLOCK_BITS
;
994 pcpu_update_empty_pages(chunk
, nr_empty_pages
);
997 * Refresh chunk metadata when the free makes a block free or spans
998 * across blocks. The contig_hint may be off by up to a page, but if
999 * the contig_hint is contained in a block, it will be accurate with
1000 * the else condition below.
1002 if (((end
- start
) >= PCPU_BITMAP_BLOCK_BITS
) || s_index
!= e_index
)
1003 pcpu_chunk_refresh_hint(chunk
, true);
1005 pcpu_block_update(&chunk
->chunk_md
,
1006 pcpu_block_off_to_off(s_index
, start
),
1011 * pcpu_is_populated - determines if the region is populated
1012 * @chunk: chunk of interest
1013 * @bit_off: chunk offset
1014 * @bits: size of area
1015 * @next_off: return value for the next offset to start searching
1017 * For atomic allocations, check if the backing pages are populated.
1020 * Bool if the backing pages are populated.
1021 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1023 static bool pcpu_is_populated(struct pcpu_chunk
*chunk
, int bit_off
, int bits
,
1026 unsigned int page_start
, page_end
, rs
, re
;
1028 page_start
= PFN_DOWN(bit_off
* PCPU_MIN_ALLOC_SIZE
);
1029 page_end
= PFN_UP((bit_off
+ bits
) * PCPU_MIN_ALLOC_SIZE
);
1032 bitmap_next_clear_region(chunk
->populated
, &rs
, &re
, page_end
);
1036 *next_off
= re
* PAGE_SIZE
/ PCPU_MIN_ALLOC_SIZE
;
1041 * pcpu_find_block_fit - finds the block index to start searching
1042 * @chunk: chunk of interest
1043 * @alloc_bits: size of request in allocation units
1044 * @align: alignment of area (max PAGE_SIZE bytes)
1045 * @pop_only: use populated regions only
1047 * Given a chunk and an allocation spec, find the offset to begin searching
1048 * for a free region. This iterates over the bitmap metadata blocks to
1049 * find an offset that will be guaranteed to fit the requirements. It is
1050 * not quite first fit as if the allocation does not fit in the contig hint
1051 * of a block or chunk, it is skipped. This errs on the side of caution
1052 * to prevent excess iteration. Poor alignment can cause the allocator to
1053 * skip over blocks and chunks that have valid free areas.
1056 * The offset in the bitmap to begin searching.
1057 * -1 if no offset is found.
1059 static int pcpu_find_block_fit(struct pcpu_chunk
*chunk
, int alloc_bits
,
1060 size_t align
, bool pop_only
)
1062 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
1063 int bit_off
, bits
, next_off
;
1066 * Check to see if the allocation can fit in the chunk's contig hint.
1067 * This is an optimization to prevent scanning by assuming if it
1068 * cannot fit in the global hint, there is memory pressure and creating
1069 * a new chunk would happen soon.
1071 bit_off
= ALIGN(chunk_md
->contig_hint_start
, align
) -
1072 chunk_md
->contig_hint_start
;
1073 if (bit_off
+ alloc_bits
> chunk_md
->contig_hint
)
1076 bit_off
= pcpu_next_hint(chunk_md
, alloc_bits
);
1078 pcpu_for_each_fit_region(chunk
, alloc_bits
, align
, bit_off
, bits
) {
1079 if (!pop_only
|| pcpu_is_populated(chunk
, bit_off
, bits
,
1087 if (bit_off
== pcpu_chunk_map_bits(chunk
))
1094 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1095 * @map: the address to base the search on
1096 * @size: the bitmap size in bits
1097 * @start: the bitnumber to start searching at
1098 * @nr: the number of zeroed bits we're looking for
1099 * @align_mask: alignment mask for zero area
1100 * @largest_off: offset of the largest area skipped
1101 * @largest_bits: size of the largest area skipped
1103 * The @align_mask should be one less than a power of 2.
1105 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1106 * the largest area that was skipped. This is imperfect, but in general is
1107 * good enough. The largest remembered region is the largest failed region
1108 * seen. This does not include anything we possibly skipped due to alignment.
1109 * pcpu_block_update_scan() does scan backwards to try and recover what was
1110 * lost to alignment. While this can cause scanning to miss earlier possible
1111 * free areas, smaller allocations will eventually fill those holes.
1113 static unsigned long pcpu_find_zero_area(unsigned long *map
,
1115 unsigned long start
,
1117 unsigned long align_mask
,
1118 unsigned long *largest_off
,
1119 unsigned long *largest_bits
)
1121 unsigned long index
, end
, i
, area_off
, area_bits
;
1123 index
= find_next_zero_bit(map
, size
, start
);
1125 /* Align allocation */
1126 index
= __ALIGN_MASK(index
, align_mask
);
1132 i
= find_next_bit(map
, end
, index
);
1134 area_bits
= i
- area_off
;
1135 /* remember largest unused area with best alignment */
1136 if (area_bits
> *largest_bits
||
1137 (area_bits
== *largest_bits
&& *largest_off
&&
1138 (!area_off
|| __ffs(area_off
) > __ffs(*largest_off
)))) {
1139 *largest_off
= area_off
;
1140 *largest_bits
= area_bits
;
1150 * pcpu_alloc_area - allocates an area from a pcpu_chunk
1151 * @chunk: chunk of interest
1152 * @alloc_bits: size of request in allocation units
1153 * @align: alignment of area (max PAGE_SIZE)
1154 * @start: bit_off to start searching
1156 * This function takes in a @start offset to begin searching to fit an
1157 * allocation of @alloc_bits with alignment @align. It needs to scan
1158 * the allocation map because if it fits within the block's contig hint,
1159 * @start will be block->first_free. This is an attempt to fill the
1160 * allocation prior to breaking the contig hint. The allocation and
1161 * boundary maps are updated accordingly if it confirms a valid
1165 * Allocated addr offset in @chunk on success.
1166 * -1 if no matching area is found.
1168 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int alloc_bits
,
1169 size_t align
, int start
)
1171 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
1172 size_t align_mask
= (align
) ? (align
- 1) : 0;
1173 unsigned long area_off
= 0, area_bits
= 0;
1174 int bit_off
, end
, oslot
;
1176 lockdep_assert_held(&pcpu_lock
);
1178 oslot
= pcpu_chunk_slot(chunk
);
1181 * Search to find a fit.
1183 end
= min_t(int, start
+ alloc_bits
+ PCPU_BITMAP_BLOCK_BITS
,
1184 pcpu_chunk_map_bits(chunk
));
1185 bit_off
= pcpu_find_zero_area(chunk
->alloc_map
, end
, start
, alloc_bits
,
1186 align_mask
, &area_off
, &area_bits
);
1191 pcpu_block_update_scan(chunk
, area_off
, area_bits
);
1193 /* update alloc map */
1194 bitmap_set(chunk
->alloc_map
, bit_off
, alloc_bits
);
1196 /* update boundary map */
1197 set_bit(bit_off
, chunk
->bound_map
);
1198 bitmap_clear(chunk
->bound_map
, bit_off
+ 1, alloc_bits
- 1);
1199 set_bit(bit_off
+ alloc_bits
, chunk
->bound_map
);
1201 chunk
->free_bytes
-= alloc_bits
* PCPU_MIN_ALLOC_SIZE
;
1203 /* update first free bit */
1204 if (bit_off
== chunk_md
->first_free
)
1205 chunk_md
->first_free
= find_next_zero_bit(
1207 pcpu_chunk_map_bits(chunk
),
1208 bit_off
+ alloc_bits
);
1210 pcpu_block_update_hint_alloc(chunk
, bit_off
, alloc_bits
);
1212 pcpu_chunk_relocate(chunk
, oslot
);
1214 return bit_off
* PCPU_MIN_ALLOC_SIZE
;
1218 * pcpu_free_area - frees the corresponding offset
1219 * @chunk: chunk of interest
1220 * @off: addr offset into chunk
1222 * This function determines the size of an allocation to free using
1223 * the boundary bitmap and clears the allocation map.
1226 * Number of freed bytes.
1228 static int pcpu_free_area(struct pcpu_chunk
*chunk
, int off
)
1230 struct pcpu_block_md
*chunk_md
= &chunk
->chunk_md
;
1231 int bit_off
, bits
, end
, oslot
, freed
;
1233 lockdep_assert_held(&pcpu_lock
);
1234 pcpu_stats_area_dealloc(chunk
);
1236 oslot
= pcpu_chunk_slot(chunk
);
1238 bit_off
= off
/ PCPU_MIN_ALLOC_SIZE
;
1240 /* find end index */
1241 end
= find_next_bit(chunk
->bound_map
, pcpu_chunk_map_bits(chunk
),
1243 bits
= end
- bit_off
;
1244 bitmap_clear(chunk
->alloc_map
, bit_off
, bits
);
1246 freed
= bits
* PCPU_MIN_ALLOC_SIZE
;
1248 /* update metadata */
1249 chunk
->free_bytes
+= freed
;
1251 /* update first free bit */
1252 chunk_md
->first_free
= min(chunk_md
->first_free
, bit_off
);
1254 pcpu_block_update_hint_free(chunk
, bit_off
, bits
);
1256 pcpu_chunk_relocate(chunk
, oslot
);
1261 static void pcpu_init_md_block(struct pcpu_block_md
*block
, int nr_bits
)
1263 block
->scan_hint
= 0;
1264 block
->contig_hint
= nr_bits
;
1265 block
->left_free
= nr_bits
;
1266 block
->right_free
= nr_bits
;
1267 block
->first_free
= 0;
1268 block
->nr_bits
= nr_bits
;
1271 static void pcpu_init_md_blocks(struct pcpu_chunk
*chunk
)
1273 struct pcpu_block_md
*md_block
;
1275 /* init the chunk's block */
1276 pcpu_init_md_block(&chunk
->chunk_md
, pcpu_chunk_map_bits(chunk
));
1278 for (md_block
= chunk
->md_blocks
;
1279 md_block
!= chunk
->md_blocks
+ pcpu_chunk_nr_blocks(chunk
);
1281 pcpu_init_md_block(md_block
, PCPU_BITMAP_BLOCK_BITS
);
1285 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1286 * @tmp_addr: the start of the region served
1287 * @map_size: size of the region served
1289 * This is responsible for creating the chunks that serve the first chunk. The
1290 * base_addr is page aligned down of @tmp_addr while the region end is page
1291 * aligned up. Offsets are kept track of to determine the region served. All
1292 * this is done to appease the bitmap allocator in avoiding partial blocks.
1295 * Chunk serving the region at @tmp_addr of @map_size.
1297 static struct pcpu_chunk
* __init
pcpu_alloc_first_chunk(unsigned long tmp_addr
,
1300 struct pcpu_chunk
*chunk
;
1301 unsigned long aligned_addr
, lcm_align
;
1302 int start_offset
, offset_bits
, region_size
, region_bits
;
1305 /* region calculations */
1306 aligned_addr
= tmp_addr
& PAGE_MASK
;
1308 start_offset
= tmp_addr
- aligned_addr
;
1311 * Align the end of the region with the LCM of PAGE_SIZE and
1312 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1315 lcm_align
= lcm(PAGE_SIZE
, PCPU_BITMAP_BLOCK_SIZE
);
1316 region_size
= ALIGN(start_offset
+ map_size
, lcm_align
);
1318 /* allocate chunk */
1319 alloc_size
= struct_size(chunk
, populated
,
1320 BITS_TO_LONGS(region_size
>> PAGE_SHIFT
));
1321 chunk
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1323 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1326 INIT_LIST_HEAD(&chunk
->list
);
1328 chunk
->base_addr
= (void *)aligned_addr
;
1329 chunk
->start_offset
= start_offset
;
1330 chunk
->end_offset
= region_size
- chunk
->start_offset
- map_size
;
1332 chunk
->nr_pages
= region_size
>> PAGE_SHIFT
;
1333 region_bits
= pcpu_chunk_map_bits(chunk
);
1335 alloc_size
= BITS_TO_LONGS(region_bits
) * sizeof(chunk
->alloc_map
[0]);
1336 chunk
->alloc_map
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1337 if (!chunk
->alloc_map
)
1338 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1342 BITS_TO_LONGS(region_bits
+ 1) * sizeof(chunk
->bound_map
[0]);
1343 chunk
->bound_map
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1344 if (!chunk
->bound_map
)
1345 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1348 alloc_size
= pcpu_chunk_nr_blocks(chunk
) * sizeof(chunk
->md_blocks
[0]);
1349 chunk
->md_blocks
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
1350 if (!chunk
->md_blocks
)
1351 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1354 #ifdef CONFIG_MEMCG_KMEM
1355 /* first chunk isn't memcg-aware */
1356 chunk
->obj_cgroups
= NULL
;
1358 pcpu_init_md_blocks(chunk
);
1360 /* manage populated page bitmap */
1361 chunk
->immutable
= true;
1362 bitmap_fill(chunk
->populated
, chunk
->nr_pages
);
1363 chunk
->nr_populated
= chunk
->nr_pages
;
1364 chunk
->nr_empty_pop_pages
= chunk
->nr_pages
;
1366 chunk
->free_bytes
= map_size
;
1368 if (chunk
->start_offset
) {
1369 /* hide the beginning of the bitmap */
1370 offset_bits
= chunk
->start_offset
/ PCPU_MIN_ALLOC_SIZE
;
1371 bitmap_set(chunk
->alloc_map
, 0, offset_bits
);
1372 set_bit(0, chunk
->bound_map
);
1373 set_bit(offset_bits
, chunk
->bound_map
);
1375 chunk
->chunk_md
.first_free
= offset_bits
;
1377 pcpu_block_update_hint_alloc(chunk
, 0, offset_bits
);
1380 if (chunk
->end_offset
) {
1381 /* hide the end of the bitmap */
1382 offset_bits
= chunk
->end_offset
/ PCPU_MIN_ALLOC_SIZE
;
1383 bitmap_set(chunk
->alloc_map
,
1384 pcpu_chunk_map_bits(chunk
) - offset_bits
,
1386 set_bit((start_offset
+ map_size
) / PCPU_MIN_ALLOC_SIZE
,
1388 set_bit(region_bits
, chunk
->bound_map
);
1390 pcpu_block_update_hint_alloc(chunk
, pcpu_chunk_map_bits(chunk
)
1391 - offset_bits
, offset_bits
);
1397 static struct pcpu_chunk
*pcpu_alloc_chunk(enum pcpu_chunk_type type
, gfp_t gfp
)
1399 struct pcpu_chunk
*chunk
;
1402 chunk
= pcpu_mem_zalloc(pcpu_chunk_struct_size
, gfp
);
1406 INIT_LIST_HEAD(&chunk
->list
);
1407 chunk
->nr_pages
= pcpu_unit_pages
;
1408 region_bits
= pcpu_chunk_map_bits(chunk
);
1410 chunk
->alloc_map
= pcpu_mem_zalloc(BITS_TO_LONGS(region_bits
) *
1411 sizeof(chunk
->alloc_map
[0]), gfp
);
1412 if (!chunk
->alloc_map
)
1413 goto alloc_map_fail
;
1415 chunk
->bound_map
= pcpu_mem_zalloc(BITS_TO_LONGS(region_bits
+ 1) *
1416 sizeof(chunk
->bound_map
[0]), gfp
);
1417 if (!chunk
->bound_map
)
1418 goto bound_map_fail
;
1420 chunk
->md_blocks
= pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk
) *
1421 sizeof(chunk
->md_blocks
[0]), gfp
);
1422 if (!chunk
->md_blocks
)
1423 goto md_blocks_fail
;
1425 #ifdef CONFIG_MEMCG_KMEM
1426 if (pcpu_is_memcg_chunk(type
)) {
1427 chunk
->obj_cgroups
=
1428 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk
) *
1429 sizeof(struct obj_cgroup
*), gfp
);
1430 if (!chunk
->obj_cgroups
)
1435 pcpu_init_md_blocks(chunk
);
1438 chunk
->free_bytes
= chunk
->nr_pages
* PAGE_SIZE
;
1442 #ifdef CONFIG_MEMCG_KMEM
1444 pcpu_mem_free(chunk
->md_blocks
);
1447 pcpu_mem_free(chunk
->bound_map
);
1449 pcpu_mem_free(chunk
->alloc_map
);
1451 pcpu_mem_free(chunk
);
1456 static void pcpu_free_chunk(struct pcpu_chunk
*chunk
)
1460 #ifdef CONFIG_MEMCG_KMEM
1461 pcpu_mem_free(chunk
->obj_cgroups
);
1463 pcpu_mem_free(chunk
->md_blocks
);
1464 pcpu_mem_free(chunk
->bound_map
);
1465 pcpu_mem_free(chunk
->alloc_map
);
1466 pcpu_mem_free(chunk
);
1470 * pcpu_chunk_populated - post-population bookkeeping
1471 * @chunk: pcpu_chunk which got populated
1472 * @page_start: the start page
1473 * @page_end: the end page
1475 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1476 * the bookkeeping information accordingly. Must be called after each
1477 * successful population.
1479 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1480 * is to serve an allocation in that area.
1482 static void pcpu_chunk_populated(struct pcpu_chunk
*chunk
, int page_start
,
1485 int nr
= page_end
- page_start
;
1487 lockdep_assert_held(&pcpu_lock
);
1489 bitmap_set(chunk
->populated
, page_start
, nr
);
1490 chunk
->nr_populated
+= nr
;
1491 pcpu_nr_populated
+= nr
;
1493 pcpu_update_empty_pages(chunk
, nr
);
1497 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1498 * @chunk: pcpu_chunk which got depopulated
1499 * @page_start: the start page
1500 * @page_end: the end page
1502 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1503 * Update the bookkeeping information accordingly. Must be called after
1504 * each successful depopulation.
1506 static void pcpu_chunk_depopulated(struct pcpu_chunk
*chunk
,
1507 int page_start
, int page_end
)
1509 int nr
= page_end
- page_start
;
1511 lockdep_assert_held(&pcpu_lock
);
1513 bitmap_clear(chunk
->populated
, page_start
, nr
);
1514 chunk
->nr_populated
-= nr
;
1515 pcpu_nr_populated
-= nr
;
1517 pcpu_update_empty_pages(chunk
, -nr
);
1521 * Chunk management implementation.
1523 * To allow different implementations, chunk alloc/free and
1524 * [de]population are implemented in a separate file which is pulled
1525 * into this file and compiled together. The following functions
1526 * should be implemented.
1528 * pcpu_populate_chunk - populate the specified range of a chunk
1529 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1530 * pcpu_create_chunk - create a new chunk
1531 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1532 * pcpu_addr_to_page - translate address to physical address
1533 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1535 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
,
1536 int page_start
, int page_end
, gfp_t gfp
);
1537 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
,
1538 int page_start
, int page_end
);
1539 static struct pcpu_chunk
*pcpu_create_chunk(enum pcpu_chunk_type type
,
1541 static void pcpu_destroy_chunk(struct pcpu_chunk
*chunk
);
1542 static struct page
*pcpu_addr_to_page(void *addr
);
1543 static int __init
pcpu_verify_alloc_info(const struct pcpu_alloc_info
*ai
);
1545 #ifdef CONFIG_NEED_PER_CPU_KM
1546 #include "percpu-km.c"
1548 #include "percpu-vm.c"
1552 * pcpu_chunk_addr_search - determine chunk containing specified address
1553 * @addr: address for which the chunk needs to be determined.
1555 * This is an internal function that handles all but static allocations.
1556 * Static percpu address values should never be passed into the allocator.
1559 * The address of the found chunk.
1561 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
1563 /* is it in the dynamic region (first chunk)? */
1564 if (pcpu_addr_in_chunk(pcpu_first_chunk
, addr
))
1565 return pcpu_first_chunk
;
1567 /* is it in the reserved region? */
1568 if (pcpu_addr_in_chunk(pcpu_reserved_chunk
, addr
))
1569 return pcpu_reserved_chunk
;
1572 * The address is relative to unit0 which might be unused and
1573 * thus unmapped. Offset the address to the unit space of the
1574 * current processor before looking it up in the vmalloc
1575 * space. Note that any possible cpu id can be used here, so
1576 * there's no need to worry about preemption or cpu hotplug.
1578 addr
+= pcpu_unit_offsets
[raw_smp_processor_id()];
1579 return pcpu_get_page_chunk(pcpu_addr_to_page(addr
));
1582 #ifdef CONFIG_MEMCG_KMEM
1583 static enum pcpu_chunk_type
pcpu_memcg_pre_alloc_hook(size_t size
, gfp_t gfp
,
1584 struct obj_cgroup
**objcgp
)
1586 struct obj_cgroup
*objcg
;
1588 if (!memcg_kmem_enabled() || !(gfp
& __GFP_ACCOUNT
))
1589 return PCPU_CHUNK_ROOT
;
1591 objcg
= get_obj_cgroup_from_current();
1593 return PCPU_CHUNK_ROOT
;
1595 if (obj_cgroup_charge(objcg
, gfp
, size
* num_possible_cpus())) {
1596 obj_cgroup_put(objcg
);
1597 return PCPU_FAIL_ALLOC
;
1601 return PCPU_CHUNK_MEMCG
;
1604 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup
*objcg
,
1605 struct pcpu_chunk
*chunk
, int off
,
1612 chunk
->obj_cgroups
[off
>> PCPU_MIN_ALLOC_SHIFT
] = objcg
;
1615 mod_memcg_state(obj_cgroup_memcg(objcg
), MEMCG_PERCPU_B
,
1616 size
* num_possible_cpus());
1619 obj_cgroup_uncharge(objcg
, size
* num_possible_cpus());
1620 obj_cgroup_put(objcg
);
1624 static void pcpu_memcg_free_hook(struct pcpu_chunk
*chunk
, int off
, size_t size
)
1626 struct obj_cgroup
*objcg
;
1628 if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk
)))
1631 objcg
= chunk
->obj_cgroups
[off
>> PCPU_MIN_ALLOC_SHIFT
];
1632 chunk
->obj_cgroups
[off
>> PCPU_MIN_ALLOC_SHIFT
] = NULL
;
1634 obj_cgroup_uncharge(objcg
, size
* num_possible_cpus());
1637 mod_memcg_state(obj_cgroup_memcg(objcg
), MEMCG_PERCPU_B
,
1638 -(size
* num_possible_cpus()));
1641 obj_cgroup_put(objcg
);
1644 #else /* CONFIG_MEMCG_KMEM */
1645 static enum pcpu_chunk_type
1646 pcpu_memcg_pre_alloc_hook(size_t size
, gfp_t gfp
, struct obj_cgroup
**objcgp
)
1648 return PCPU_CHUNK_ROOT
;
1651 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup
*objcg
,
1652 struct pcpu_chunk
*chunk
, int off
,
1657 static void pcpu_memcg_free_hook(struct pcpu_chunk
*chunk
, int off
, size_t size
)
1660 #endif /* CONFIG_MEMCG_KMEM */
1663 * pcpu_alloc - the percpu allocator
1664 * @size: size of area to allocate in bytes
1665 * @align: alignment of area (max PAGE_SIZE)
1666 * @reserved: allocate from the reserved chunk if available
1667 * @gfp: allocation flags
1669 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1670 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1671 * then no warning will be triggered on invalid or failed allocation
1675 * Percpu pointer to the allocated area on success, NULL on failure.
1677 static void __percpu
*pcpu_alloc(size_t size
, size_t align
, bool reserved
,
1683 enum pcpu_chunk_type type
;
1684 struct list_head
*pcpu_slot
;
1685 struct obj_cgroup
*objcg
= NULL
;
1686 static int warn_limit
= 10;
1687 struct pcpu_chunk
*chunk
, *next
;
1689 int slot
, off
, cpu
, ret
;
1690 unsigned long flags
;
1692 size_t bits
, bit_align
;
1694 gfp
= current_gfp_context(gfp
);
1695 /* whitelisted flags that can be passed to the backing allocators */
1696 pcpu_gfp
= gfp
& (GFP_KERNEL
| __GFP_NORETRY
| __GFP_NOWARN
);
1697 is_atomic
= (gfp
& GFP_KERNEL
) != GFP_KERNEL
;
1698 do_warn
= !(gfp
& __GFP_NOWARN
);
1701 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1702 * therefore alignment must be a minimum of that many bytes.
1703 * An allocation may have internal fragmentation from rounding up
1704 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1706 if (unlikely(align
< PCPU_MIN_ALLOC_SIZE
))
1707 align
= PCPU_MIN_ALLOC_SIZE
;
1709 size
= ALIGN(size
, PCPU_MIN_ALLOC_SIZE
);
1710 bits
= size
>> PCPU_MIN_ALLOC_SHIFT
;
1711 bit_align
= align
>> PCPU_MIN_ALLOC_SHIFT
;
1713 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
||
1714 !is_power_of_2(align
))) {
1715 WARN(do_warn
, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1720 type
= pcpu_memcg_pre_alloc_hook(size
, gfp
, &objcg
);
1721 if (unlikely(type
== PCPU_FAIL_ALLOC
))
1723 pcpu_slot
= pcpu_chunk_list(type
);
1727 * pcpu_balance_workfn() allocates memory under this mutex,
1728 * and it may wait for memory reclaim. Allow current task
1729 * to become OOM victim, in case of memory pressure.
1731 if (gfp
& __GFP_NOFAIL
) {
1732 mutex_lock(&pcpu_alloc_mutex
);
1733 } else if (mutex_lock_killable(&pcpu_alloc_mutex
)) {
1734 pcpu_memcg_post_alloc_hook(objcg
, NULL
, 0, size
);
1739 spin_lock_irqsave(&pcpu_lock
, flags
);
1741 /* serve reserved allocations from the reserved chunk if available */
1742 if (reserved
&& pcpu_reserved_chunk
) {
1743 chunk
= pcpu_reserved_chunk
;
1745 off
= pcpu_find_block_fit(chunk
, bits
, bit_align
, is_atomic
);
1747 err
= "alloc from reserved chunk failed";
1751 off
= pcpu_alloc_area(chunk
, bits
, bit_align
, off
);
1755 err
= "alloc from reserved chunk failed";
1760 /* search through normal chunks */
1761 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
1762 list_for_each_entry_safe(chunk
, next
, &pcpu_slot
[slot
], list
) {
1763 off
= pcpu_find_block_fit(chunk
, bits
, bit_align
,
1766 if (slot
< PCPU_SLOT_FAIL_THRESHOLD
)
1767 pcpu_chunk_move(chunk
, 0);
1771 off
= pcpu_alloc_area(chunk
, bits
, bit_align
, off
);
1778 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1781 * No space left. Create a new chunk. We don't want multiple
1782 * tasks to create chunks simultaneously. Serialize and create iff
1783 * there's still no empty chunk after grabbing the mutex.
1786 err
= "atomic alloc failed, no space left";
1790 if (list_empty(&pcpu_slot
[pcpu_nr_slots
- 1])) {
1791 chunk
= pcpu_create_chunk(type
, pcpu_gfp
);
1793 err
= "failed to allocate new chunk";
1797 spin_lock_irqsave(&pcpu_lock
, flags
);
1798 pcpu_chunk_relocate(chunk
, -1);
1800 spin_lock_irqsave(&pcpu_lock
, flags
);
1806 pcpu_stats_area_alloc(chunk
, size
);
1807 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1809 /* populate if not all pages are already there */
1811 unsigned int page_start
, page_end
, rs
, re
;
1813 page_start
= PFN_DOWN(off
);
1814 page_end
= PFN_UP(off
+ size
);
1816 bitmap_for_each_clear_region(chunk
->populated
, rs
, re
,
1817 page_start
, page_end
) {
1818 WARN_ON(chunk
->immutable
);
1820 ret
= pcpu_populate_chunk(chunk
, rs
, re
, pcpu_gfp
);
1822 spin_lock_irqsave(&pcpu_lock
, flags
);
1824 pcpu_free_area(chunk
, off
);
1825 err
= "failed to populate";
1828 pcpu_chunk_populated(chunk
, rs
, re
);
1829 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1832 mutex_unlock(&pcpu_alloc_mutex
);
1835 if (pcpu_nr_empty_pop_pages
< PCPU_EMPTY_POP_PAGES_LOW
)
1836 pcpu_schedule_balance_work();
1838 /* clear the areas and return address relative to base address */
1839 for_each_possible_cpu(cpu
)
1840 memset((void *)pcpu_chunk_addr(chunk
, cpu
, 0) + off
, 0, size
);
1842 ptr
= __addr_to_pcpu_ptr(chunk
->base_addr
+ off
);
1843 kmemleak_alloc_percpu(ptr
, size
, gfp
);
1845 trace_percpu_alloc_percpu(reserved
, is_atomic
, size
, align
,
1846 chunk
->base_addr
, off
, ptr
);
1848 pcpu_memcg_post_alloc_hook(objcg
, chunk
, off
, size
);
1853 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1855 trace_percpu_alloc_percpu_fail(reserved
, is_atomic
, size
, align
);
1857 if (!is_atomic
&& do_warn
&& warn_limit
) {
1858 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1859 size
, align
, is_atomic
, err
);
1862 pr_info("limit reached, disable warning\n");
1865 /* see the flag handling in pcpu_blance_workfn() */
1866 pcpu_atomic_alloc_failed
= true;
1867 pcpu_schedule_balance_work();
1869 mutex_unlock(&pcpu_alloc_mutex
);
1872 pcpu_memcg_post_alloc_hook(objcg
, NULL
, 0, size
);
1878 * __alloc_percpu_gfp - allocate dynamic percpu area
1879 * @size: size of area to allocate in bytes
1880 * @align: alignment of area (max PAGE_SIZE)
1881 * @gfp: allocation flags
1883 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1884 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1885 * be called from any context but is a lot more likely to fail. If @gfp
1886 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1887 * allocation requests.
1890 * Percpu pointer to the allocated area on success, NULL on failure.
1892 void __percpu
*__alloc_percpu_gfp(size_t size
, size_t align
, gfp_t gfp
)
1894 return pcpu_alloc(size
, align
, false, gfp
);
1896 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp
);
1899 * __alloc_percpu - allocate dynamic percpu area
1900 * @size: size of area to allocate in bytes
1901 * @align: alignment of area (max PAGE_SIZE)
1903 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1905 void __percpu
*__alloc_percpu(size_t size
, size_t align
)
1907 return pcpu_alloc(size
, align
, false, GFP_KERNEL
);
1909 EXPORT_SYMBOL_GPL(__alloc_percpu
);
1912 * __alloc_reserved_percpu - allocate reserved percpu area
1913 * @size: size of area to allocate in bytes
1914 * @align: alignment of area (max PAGE_SIZE)
1916 * Allocate zero-filled percpu area of @size bytes aligned at @align
1917 * from reserved percpu area if arch has set it up; otherwise,
1918 * allocation is served from the same dynamic area. Might sleep.
1919 * Might trigger writeouts.
1922 * Does GFP_KERNEL allocation.
1925 * Percpu pointer to the allocated area on success, NULL on failure.
1927 void __percpu
*__alloc_reserved_percpu(size_t size
, size_t align
)
1929 return pcpu_alloc(size
, align
, true, GFP_KERNEL
);
1933 * __pcpu_balance_workfn - manage the amount of free chunks and populated pages
1936 * Reclaim all fully free chunks except for the first one. This is also
1937 * responsible for maintaining the pool of empty populated pages. However,
1938 * it is possible that this is called when physical memory is scarce causing
1939 * OOM killer to be triggered. We should avoid doing so until an actual
1940 * allocation causes the failure as it is possible that requests can be
1941 * serviced from already backed regions.
1943 static void __pcpu_balance_workfn(enum pcpu_chunk_type type
)
1945 /* gfp flags passed to underlying allocators */
1946 const gfp_t gfp
= GFP_KERNEL
| __GFP_NORETRY
| __GFP_NOWARN
;
1948 struct list_head
*pcpu_slot
= pcpu_chunk_list(type
);
1949 struct list_head
*free_head
= &pcpu_slot
[pcpu_nr_slots
- 1];
1950 struct pcpu_chunk
*chunk
, *next
;
1951 int slot
, nr_to_pop
, ret
;
1954 * There's no reason to keep around multiple unused chunks and VM
1955 * areas can be scarce. Destroy all free chunks except for one.
1957 mutex_lock(&pcpu_alloc_mutex
);
1958 spin_lock_irq(&pcpu_lock
);
1960 list_for_each_entry_safe(chunk
, next
, free_head
, list
) {
1961 WARN_ON(chunk
->immutable
);
1963 /* spare the first one */
1964 if (chunk
== list_first_entry(free_head
, struct pcpu_chunk
, list
))
1967 list_move(&chunk
->list
, &to_free
);
1970 spin_unlock_irq(&pcpu_lock
);
1972 list_for_each_entry_safe(chunk
, next
, &to_free
, list
) {
1973 unsigned int rs
, re
;
1975 bitmap_for_each_set_region(chunk
->populated
, rs
, re
, 0,
1977 pcpu_depopulate_chunk(chunk
, rs
, re
);
1978 spin_lock_irq(&pcpu_lock
);
1979 pcpu_chunk_depopulated(chunk
, rs
, re
);
1980 spin_unlock_irq(&pcpu_lock
);
1982 pcpu_destroy_chunk(chunk
);
1987 * Ensure there are certain number of free populated pages for
1988 * atomic allocs. Fill up from the most packed so that atomic
1989 * allocs don't increase fragmentation. If atomic allocation
1990 * failed previously, always populate the maximum amount. This
1991 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1992 * failing indefinitely; however, large atomic allocs are not
1993 * something we support properly and can be highly unreliable and
1997 if (pcpu_atomic_alloc_failed
) {
1998 nr_to_pop
= PCPU_EMPTY_POP_PAGES_HIGH
;
1999 /* best effort anyway, don't worry about synchronization */
2000 pcpu_atomic_alloc_failed
= false;
2002 nr_to_pop
= clamp(PCPU_EMPTY_POP_PAGES_HIGH
-
2003 pcpu_nr_empty_pop_pages
,
2004 0, PCPU_EMPTY_POP_PAGES_HIGH
);
2007 for (slot
= pcpu_size_to_slot(PAGE_SIZE
); slot
< pcpu_nr_slots
; slot
++) {
2008 unsigned int nr_unpop
= 0, rs
, re
;
2013 spin_lock_irq(&pcpu_lock
);
2014 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
2015 nr_unpop
= chunk
->nr_pages
- chunk
->nr_populated
;
2019 spin_unlock_irq(&pcpu_lock
);
2024 /* @chunk can't go away while pcpu_alloc_mutex is held */
2025 bitmap_for_each_clear_region(chunk
->populated
, rs
, re
, 0,
2027 int nr
= min_t(int, re
- rs
, nr_to_pop
);
2029 ret
= pcpu_populate_chunk(chunk
, rs
, rs
+ nr
, gfp
);
2032 spin_lock_irq(&pcpu_lock
);
2033 pcpu_chunk_populated(chunk
, rs
, rs
+ nr
);
2034 spin_unlock_irq(&pcpu_lock
);
2045 /* ran out of chunks to populate, create a new one and retry */
2046 chunk
= pcpu_create_chunk(type
, gfp
);
2048 spin_lock_irq(&pcpu_lock
);
2049 pcpu_chunk_relocate(chunk
, -1);
2050 spin_unlock_irq(&pcpu_lock
);
2055 mutex_unlock(&pcpu_alloc_mutex
);
2059 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2062 * Call __pcpu_balance_workfn() for each chunk type.
2064 static void pcpu_balance_workfn(struct work_struct
*work
)
2066 enum pcpu_chunk_type type
;
2068 for (type
= 0; type
< PCPU_NR_CHUNK_TYPES
; type
++)
2069 __pcpu_balance_workfn(type
);
2073 * free_percpu - free percpu area
2074 * @ptr: pointer to area to free
2076 * Free percpu area @ptr.
2079 * Can be called from atomic context.
2081 void free_percpu(void __percpu
*ptr
)
2084 struct pcpu_chunk
*chunk
;
2085 unsigned long flags
;
2087 bool need_balance
= false;
2088 struct list_head
*pcpu_slot
;
2093 kmemleak_free_percpu(ptr
);
2095 addr
= __pcpu_ptr_to_addr(ptr
);
2097 spin_lock_irqsave(&pcpu_lock
, flags
);
2099 chunk
= pcpu_chunk_addr_search(addr
);
2100 off
= addr
- chunk
->base_addr
;
2102 size
= pcpu_free_area(chunk
, off
);
2104 pcpu_slot
= pcpu_chunk_list(pcpu_chunk_type(chunk
));
2106 pcpu_memcg_free_hook(chunk
, off
, size
);
2108 /* if there are more than one fully free chunks, wake up grim reaper */
2109 if (chunk
->free_bytes
== pcpu_unit_size
) {
2110 struct pcpu_chunk
*pos
;
2112 list_for_each_entry(pos
, &pcpu_slot
[pcpu_nr_slots
- 1], list
)
2114 need_balance
= true;
2119 trace_percpu_free_percpu(chunk
->base_addr
, off
, ptr
);
2121 spin_unlock_irqrestore(&pcpu_lock
, flags
);
2124 pcpu_schedule_balance_work();
2126 EXPORT_SYMBOL_GPL(free_percpu
);
2128 bool __is_kernel_percpu_address(unsigned long addr
, unsigned long *can_addr
)
2131 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
2132 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
2135 for_each_possible_cpu(cpu
) {
2136 void *start
= per_cpu_ptr(base
, cpu
);
2137 void *va
= (void *)addr
;
2139 if (va
>= start
&& va
< start
+ static_size
) {
2141 *can_addr
= (unsigned long) (va
- start
);
2142 *can_addr
+= (unsigned long)
2143 per_cpu_ptr(base
, get_boot_cpu_id());
2149 /* on UP, can't distinguish from other static vars, always false */
2154 * is_kernel_percpu_address - test whether address is from static percpu area
2155 * @addr: address to test
2157 * Test whether @addr belongs to in-kernel static percpu area. Module
2158 * static percpu areas are not considered. For those, use
2159 * is_module_percpu_address().
2162 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2164 bool is_kernel_percpu_address(unsigned long addr
)
2166 return __is_kernel_percpu_address(addr
, NULL
);
2170 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2171 * @addr: the address to be converted to physical address
2173 * Given @addr which is dereferenceable address obtained via one of
2174 * percpu access macros, this function translates it into its physical
2175 * address. The caller is responsible for ensuring @addr stays valid
2176 * until this function finishes.
2178 * percpu allocator has special setup for the first chunk, which currently
2179 * supports either embedding in linear address space or vmalloc mapping,
2180 * and, from the second one, the backing allocator (currently either vm or
2181 * km) provides translation.
2183 * The addr can be translated simply without checking if it falls into the
2184 * first chunk. But the current code reflects better how percpu allocator
2185 * actually works, and the verification can discover both bugs in percpu
2186 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2190 * The physical address for @addr.
2192 phys_addr_t
per_cpu_ptr_to_phys(void *addr
)
2194 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
2195 bool in_first_chunk
= false;
2196 unsigned long first_low
, first_high
;
2200 * The following test on unit_low/high isn't strictly
2201 * necessary but will speed up lookups of addresses which
2202 * aren't in the first chunk.
2204 * The address check is against full chunk sizes. pcpu_base_addr
2205 * points to the beginning of the first chunk including the
2206 * static region. Assumes good intent as the first chunk may
2207 * not be full (ie. < pcpu_unit_pages in size).
2209 first_low
= (unsigned long)pcpu_base_addr
+
2210 pcpu_unit_page_offset(pcpu_low_unit_cpu
, 0);
2211 first_high
= (unsigned long)pcpu_base_addr
+
2212 pcpu_unit_page_offset(pcpu_high_unit_cpu
, pcpu_unit_pages
);
2213 if ((unsigned long)addr
>= first_low
&&
2214 (unsigned long)addr
< first_high
) {
2215 for_each_possible_cpu(cpu
) {
2216 void *start
= per_cpu_ptr(base
, cpu
);
2218 if (addr
>= start
&& addr
< start
+ pcpu_unit_size
) {
2219 in_first_chunk
= true;
2225 if (in_first_chunk
) {
2226 if (!is_vmalloc_addr(addr
))
2229 return page_to_phys(vmalloc_to_page(addr
)) +
2230 offset_in_page(addr
);
2232 return page_to_phys(pcpu_addr_to_page(addr
)) +
2233 offset_in_page(addr
);
2237 * pcpu_alloc_alloc_info - allocate percpu allocation info
2238 * @nr_groups: the number of groups
2239 * @nr_units: the number of units
2241 * Allocate ai which is large enough for @nr_groups groups containing
2242 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2243 * cpu_map array which is long enough for @nr_units and filled with
2244 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2245 * pointer of other groups.
2248 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2251 struct pcpu_alloc_info
* __init
pcpu_alloc_alloc_info(int nr_groups
,
2254 struct pcpu_alloc_info
*ai
;
2255 size_t base_size
, ai_size
;
2259 base_size
= ALIGN(struct_size(ai
, groups
, nr_groups
),
2260 __alignof__(ai
->groups
[0].cpu_map
[0]));
2261 ai_size
= base_size
+ nr_units
* sizeof(ai
->groups
[0].cpu_map
[0]);
2263 ptr
= memblock_alloc(PFN_ALIGN(ai_size
), PAGE_SIZE
);
2269 ai
->groups
[0].cpu_map
= ptr
;
2271 for (unit
= 0; unit
< nr_units
; unit
++)
2272 ai
->groups
[0].cpu_map
[unit
] = NR_CPUS
;
2274 ai
->nr_groups
= nr_groups
;
2275 ai
->__ai_size
= PFN_ALIGN(ai_size
);
2281 * pcpu_free_alloc_info - free percpu allocation info
2282 * @ai: pcpu_alloc_info to free
2284 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2286 void __init
pcpu_free_alloc_info(struct pcpu_alloc_info
*ai
)
2288 memblock_free_early(__pa(ai
), ai
->__ai_size
);
2292 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2294 * @ai: allocation info to dump
2296 * Print out information about @ai using loglevel @lvl.
2298 static void pcpu_dump_alloc_info(const char *lvl
,
2299 const struct pcpu_alloc_info
*ai
)
2301 int group_width
= 1, cpu_width
= 1, width
;
2302 char empty_str
[] = "--------";
2303 int alloc
= 0, alloc_end
= 0;
2305 int upa
, apl
; /* units per alloc, allocs per line */
2311 v
= num_possible_cpus();
2314 empty_str
[min_t(int, cpu_width
, sizeof(empty_str
) - 1)] = '\0';
2316 upa
= ai
->alloc_size
/ ai
->unit_size
;
2317 width
= upa
* (cpu_width
+ 1) + group_width
+ 3;
2318 apl
= rounddown_pow_of_two(max(60 / width
, 1));
2320 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2321 lvl
, ai
->static_size
, ai
->reserved_size
, ai
->dyn_size
,
2322 ai
->unit_size
, ai
->alloc_size
/ ai
->atom_size
, ai
->atom_size
);
2324 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2325 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2326 int unit
= 0, unit_end
= 0;
2328 BUG_ON(gi
->nr_units
% upa
);
2329 for (alloc_end
+= gi
->nr_units
/ upa
;
2330 alloc
< alloc_end
; alloc
++) {
2331 if (!(alloc
% apl
)) {
2333 printk("%spcpu-alloc: ", lvl
);
2335 pr_cont("[%0*d] ", group_width
, group
);
2337 for (unit_end
+= upa
; unit
< unit_end
; unit
++)
2338 if (gi
->cpu_map
[unit
] != NR_CPUS
)
2340 cpu_width
, gi
->cpu_map
[unit
]);
2342 pr_cont("%s ", empty_str
);
2349 * pcpu_setup_first_chunk - initialize the first percpu chunk
2350 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2351 * @base_addr: mapped address
2353 * Initialize the first percpu chunk which contains the kernel static
2354 * percpu area. This function is to be called from arch percpu area
2357 * @ai contains all information necessary to initialize the first
2358 * chunk and prime the dynamic percpu allocator.
2360 * @ai->static_size is the size of static percpu area.
2362 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2363 * reserve after the static area in the first chunk. This reserves
2364 * the first chunk such that it's available only through reserved
2365 * percpu allocation. This is primarily used to serve module percpu
2366 * static areas on architectures where the addressing model has
2367 * limited offset range for symbol relocations to guarantee module
2368 * percpu symbols fall inside the relocatable range.
2370 * @ai->dyn_size determines the number of bytes available for dynamic
2371 * allocation in the first chunk. The area between @ai->static_size +
2372 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2374 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2375 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2378 * @ai->atom_size is the allocation atom size and used as alignment
2381 * @ai->alloc_size is the allocation size and always multiple of
2382 * @ai->atom_size. This is larger than @ai->atom_size if
2383 * @ai->unit_size is larger than @ai->atom_size.
2385 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2386 * percpu areas. Units which should be colocated are put into the
2387 * same group. Dynamic VM areas will be allocated according to these
2388 * groupings. If @ai->nr_groups is zero, a single group containing
2389 * all units is assumed.
2391 * The caller should have mapped the first chunk at @base_addr and
2392 * copied static data to each unit.
2394 * The first chunk will always contain a static and a dynamic region.
2395 * However, the static region is not managed by any chunk. If the first
2396 * chunk also contains a reserved region, it is served by two chunks -
2397 * one for the reserved region and one for the dynamic region. They
2398 * share the same vm, but use offset regions in the area allocation map.
2399 * The chunk serving the dynamic region is circulated in the chunk slots
2400 * and available for dynamic allocation like any other chunk.
2402 void __init
pcpu_setup_first_chunk(const struct pcpu_alloc_info
*ai
,
2405 size_t size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
2406 size_t static_size
, dyn_size
;
2407 struct pcpu_chunk
*chunk
;
2408 unsigned long *group_offsets
;
2409 size_t *group_sizes
;
2410 unsigned long *unit_off
;
2415 unsigned long tmp_addr
;
2417 enum pcpu_chunk_type type
;
2419 #define PCPU_SETUP_BUG_ON(cond) do { \
2420 if (unlikely(cond)) { \
2421 pr_emerg("failed to initialize, %s\n", #cond); \
2422 pr_emerg("cpu_possible_mask=%*pb\n", \
2423 cpumask_pr_args(cpu_possible_mask)); \
2424 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2430 PCPU_SETUP_BUG_ON(ai
->nr_groups
<= 0);
2432 PCPU_SETUP_BUG_ON(!ai
->static_size
);
2433 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start
));
2435 PCPU_SETUP_BUG_ON(!base_addr
);
2436 PCPU_SETUP_BUG_ON(offset_in_page(base_addr
));
2437 PCPU_SETUP_BUG_ON(ai
->unit_size
< size_sum
);
2438 PCPU_SETUP_BUG_ON(offset_in_page(ai
->unit_size
));
2439 PCPU_SETUP_BUG_ON(ai
->unit_size
< PCPU_MIN_UNIT_SIZE
);
2440 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai
->unit_size
, PCPU_BITMAP_BLOCK_SIZE
));
2441 PCPU_SETUP_BUG_ON(ai
->dyn_size
< PERCPU_DYNAMIC_EARLY_SIZE
);
2442 PCPU_SETUP_BUG_ON(!ai
->dyn_size
);
2443 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai
->reserved_size
, PCPU_MIN_ALLOC_SIZE
));
2444 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE
, PAGE_SIZE
) ||
2445 IS_ALIGNED(PAGE_SIZE
, PCPU_BITMAP_BLOCK_SIZE
)));
2446 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai
) < 0);
2448 /* process group information and build config tables accordingly */
2449 alloc_size
= ai
->nr_groups
* sizeof(group_offsets
[0]);
2450 group_offsets
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2452 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2455 alloc_size
= ai
->nr_groups
* sizeof(group_sizes
[0]);
2456 group_sizes
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2458 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2461 alloc_size
= nr_cpu_ids
* sizeof(unit_map
[0]);
2462 unit_map
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2464 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2467 alloc_size
= nr_cpu_ids
* sizeof(unit_off
[0]);
2468 unit_off
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
2470 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2473 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
2474 unit_map
[cpu
] = UINT_MAX
;
2476 pcpu_low_unit_cpu
= NR_CPUS
;
2477 pcpu_high_unit_cpu
= NR_CPUS
;
2479 for (group
= 0, unit
= 0; group
< ai
->nr_groups
; group
++, unit
+= i
) {
2480 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2482 group_offsets
[group
] = gi
->base_offset
;
2483 group_sizes
[group
] = gi
->nr_units
* ai
->unit_size
;
2485 for (i
= 0; i
< gi
->nr_units
; i
++) {
2486 cpu
= gi
->cpu_map
[i
];
2490 PCPU_SETUP_BUG_ON(cpu
>= nr_cpu_ids
);
2491 PCPU_SETUP_BUG_ON(!cpu_possible(cpu
));
2492 PCPU_SETUP_BUG_ON(unit_map
[cpu
] != UINT_MAX
);
2494 unit_map
[cpu
] = unit
+ i
;
2495 unit_off
[cpu
] = gi
->base_offset
+ i
* ai
->unit_size
;
2497 /* determine low/high unit_cpu */
2498 if (pcpu_low_unit_cpu
== NR_CPUS
||
2499 unit_off
[cpu
] < unit_off
[pcpu_low_unit_cpu
])
2500 pcpu_low_unit_cpu
= cpu
;
2501 if (pcpu_high_unit_cpu
== NR_CPUS
||
2502 unit_off
[cpu
] > unit_off
[pcpu_high_unit_cpu
])
2503 pcpu_high_unit_cpu
= cpu
;
2506 pcpu_nr_units
= unit
;
2508 for_each_possible_cpu(cpu
)
2509 PCPU_SETUP_BUG_ON(unit_map
[cpu
] == UINT_MAX
);
2511 /* we're done parsing the input, undefine BUG macro and dump config */
2512 #undef PCPU_SETUP_BUG_ON
2513 pcpu_dump_alloc_info(KERN_DEBUG
, ai
);
2515 pcpu_nr_groups
= ai
->nr_groups
;
2516 pcpu_group_offsets
= group_offsets
;
2517 pcpu_group_sizes
= group_sizes
;
2518 pcpu_unit_map
= unit_map
;
2519 pcpu_unit_offsets
= unit_off
;
2521 /* determine basic parameters */
2522 pcpu_unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
2523 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
2524 pcpu_atom_size
= ai
->atom_size
;
2525 pcpu_chunk_struct_size
= struct_size(chunk
, populated
,
2526 BITS_TO_LONGS(pcpu_unit_pages
));
2528 pcpu_stats_save_ai(ai
);
2531 * Allocate chunk slots. The additional last slot is for
2534 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
2535 pcpu_chunk_lists
= memblock_alloc(pcpu_nr_slots
*
2536 sizeof(pcpu_chunk_lists
[0]) *
2537 PCPU_NR_CHUNK_TYPES
,
2539 if (!pcpu_chunk_lists
)
2540 panic("%s: Failed to allocate %zu bytes\n", __func__
,
2541 pcpu_nr_slots
* sizeof(pcpu_chunk_lists
[0]) *
2542 PCPU_NR_CHUNK_TYPES
);
2544 for (type
= 0; type
< PCPU_NR_CHUNK_TYPES
; type
++)
2545 for (i
= 0; i
< pcpu_nr_slots
; i
++)
2546 INIT_LIST_HEAD(&pcpu_chunk_list(type
)[i
]);
2549 * The end of the static region needs to be aligned with the
2550 * minimum allocation size as this offsets the reserved and
2551 * dynamic region. The first chunk ends page aligned by
2552 * expanding the dynamic region, therefore the dynamic region
2553 * can be shrunk to compensate while still staying above the
2556 static_size
= ALIGN(ai
->static_size
, PCPU_MIN_ALLOC_SIZE
);
2557 dyn_size
= ai
->dyn_size
- (static_size
- ai
->static_size
);
2560 * Initialize first chunk.
2561 * If the reserved_size is non-zero, this initializes the reserved
2562 * chunk. If the reserved_size is zero, the reserved chunk is NULL
2563 * and the dynamic region is initialized here. The first chunk,
2564 * pcpu_first_chunk, will always point to the chunk that serves
2565 * the dynamic region.
2567 tmp_addr
= (unsigned long)base_addr
+ static_size
;
2568 map_size
= ai
->reserved_size
?: dyn_size
;
2569 chunk
= pcpu_alloc_first_chunk(tmp_addr
, map_size
);
2571 /* init dynamic chunk if necessary */
2572 if (ai
->reserved_size
) {
2573 pcpu_reserved_chunk
= chunk
;
2575 tmp_addr
= (unsigned long)base_addr
+ static_size
+
2577 map_size
= dyn_size
;
2578 chunk
= pcpu_alloc_first_chunk(tmp_addr
, map_size
);
2581 /* link the first chunk in */
2582 pcpu_first_chunk
= chunk
;
2583 pcpu_nr_empty_pop_pages
= pcpu_first_chunk
->nr_empty_pop_pages
;
2584 pcpu_chunk_relocate(pcpu_first_chunk
, -1);
2586 /* include all regions of the first chunk */
2587 pcpu_nr_populated
+= PFN_DOWN(size_sum
);
2589 pcpu_stats_chunk_alloc();
2590 trace_percpu_create_chunk(base_addr
);
2593 pcpu_base_addr
= base_addr
;
2598 const char * const pcpu_fc_names
[PCPU_FC_NR
] __initconst
= {
2599 [PCPU_FC_AUTO
] = "auto",
2600 [PCPU_FC_EMBED
] = "embed",
2601 [PCPU_FC_PAGE
] = "page",
2604 enum pcpu_fc pcpu_chosen_fc __initdata
= PCPU_FC_AUTO
;
2606 static int __init
percpu_alloc_setup(char *str
)
2613 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2614 else if (!strcmp(str
, "embed"))
2615 pcpu_chosen_fc
= PCPU_FC_EMBED
;
2617 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2618 else if (!strcmp(str
, "page"))
2619 pcpu_chosen_fc
= PCPU_FC_PAGE
;
2622 pr_warn("unknown allocator %s specified\n", str
);
2626 early_param("percpu_alloc", percpu_alloc_setup
);
2629 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2630 * Build it if needed by the arch config or the generic setup is going
2633 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2634 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2635 #define BUILD_EMBED_FIRST_CHUNK
2638 /* build pcpu_page_first_chunk() iff needed by the arch config */
2639 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2640 #define BUILD_PAGE_FIRST_CHUNK
2643 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2644 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2646 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2647 * @reserved_size: the size of reserved percpu area in bytes
2648 * @dyn_size: minimum free size for dynamic allocation in bytes
2649 * @atom_size: allocation atom size
2650 * @cpu_distance_fn: callback to determine distance between cpus, optional
2652 * This function determines grouping of units, their mappings to cpus
2653 * and other parameters considering needed percpu size, allocation
2654 * atom size and distances between CPUs.
2656 * Groups are always multiples of atom size and CPUs which are of
2657 * LOCAL_DISTANCE both ways are grouped together and share space for
2658 * units in the same group. The returned configuration is guaranteed
2659 * to have CPUs on different nodes on different groups and >=75% usage
2660 * of allocated virtual address space.
2663 * On success, pointer to the new allocation_info is returned. On
2664 * failure, ERR_PTR value is returned.
2666 static struct pcpu_alloc_info
* __init __flatten
pcpu_build_alloc_info(
2667 size_t reserved_size
, size_t dyn_size
,
2669 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
)
2671 static int group_map
[NR_CPUS
] __initdata
;
2672 static int group_cnt
[NR_CPUS
] __initdata
;
2673 static struct cpumask mask __initdata
;
2674 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
2675 int nr_groups
= 1, nr_units
= 0;
2676 size_t size_sum
, min_unit_size
, alloc_size
;
2677 int upa
, max_upa
, best_upa
; /* units_per_alloc */
2678 int last_allocs
, group
, unit
;
2679 unsigned int cpu
, tcpu
;
2680 struct pcpu_alloc_info
*ai
;
2681 unsigned int *cpu_map
;
2683 /* this function may be called multiple times */
2684 memset(group_map
, 0, sizeof(group_map
));
2685 memset(group_cnt
, 0, sizeof(group_cnt
));
2686 cpumask_clear(&mask
);
2688 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2689 size_sum
= PFN_ALIGN(static_size
+ reserved_size
+
2690 max_t(size_t, dyn_size
, PERCPU_DYNAMIC_EARLY_SIZE
));
2691 dyn_size
= size_sum
- static_size
- reserved_size
;
2694 * Determine min_unit_size, alloc_size and max_upa such that
2695 * alloc_size is multiple of atom_size and is the smallest
2696 * which can accommodate 4k aligned segments which are equal to
2697 * or larger than min_unit_size.
2699 min_unit_size
= max_t(size_t, size_sum
, PCPU_MIN_UNIT_SIZE
);
2701 /* determine the maximum # of units that can fit in an allocation */
2702 alloc_size
= roundup(min_unit_size
, atom_size
);
2703 upa
= alloc_size
/ min_unit_size
;
2704 while (alloc_size
% upa
|| (offset_in_page(alloc_size
/ upa
)))
2708 cpumask_copy(&mask
, cpu_possible_mask
);
2710 /* group cpus according to their proximity */
2711 for (group
= 0; !cpumask_empty(&mask
); group
++) {
2712 /* pop the group's first cpu */
2713 cpu
= cpumask_first(&mask
);
2714 group_map
[cpu
] = group
;
2716 cpumask_clear_cpu(cpu
, &mask
);
2718 for_each_cpu(tcpu
, &mask
) {
2719 if (!cpu_distance_fn
||
2720 (cpu_distance_fn(cpu
, tcpu
) == LOCAL_DISTANCE
&&
2721 cpu_distance_fn(tcpu
, cpu
) == LOCAL_DISTANCE
)) {
2722 group_map
[tcpu
] = group
;
2724 cpumask_clear_cpu(tcpu
, &mask
);
2731 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2732 * Expand the unit_size until we use >= 75% of the units allocated.
2733 * Related to atom_size, which could be much larger than the unit_size.
2735 last_allocs
= INT_MAX
;
2736 for (upa
= max_upa
; upa
; upa
--) {
2737 int allocs
= 0, wasted
= 0;
2739 if (alloc_size
% upa
|| (offset_in_page(alloc_size
/ upa
)))
2742 for (group
= 0; group
< nr_groups
; group
++) {
2743 int this_allocs
= DIV_ROUND_UP(group_cnt
[group
], upa
);
2744 allocs
+= this_allocs
;
2745 wasted
+= this_allocs
* upa
- group_cnt
[group
];
2749 * Don't accept if wastage is over 1/3. The
2750 * greater-than comparison ensures upa==1 always
2751 * passes the following check.
2753 if (wasted
> num_possible_cpus() / 3)
2756 /* and then don't consume more memory */
2757 if (allocs
> last_allocs
)
2759 last_allocs
= allocs
;
2764 /* allocate and fill alloc_info */
2765 for (group
= 0; group
< nr_groups
; group
++)
2766 nr_units
+= roundup(group_cnt
[group
], upa
);
2768 ai
= pcpu_alloc_alloc_info(nr_groups
, nr_units
);
2770 return ERR_PTR(-ENOMEM
);
2771 cpu_map
= ai
->groups
[0].cpu_map
;
2773 for (group
= 0; group
< nr_groups
; group
++) {
2774 ai
->groups
[group
].cpu_map
= cpu_map
;
2775 cpu_map
+= roundup(group_cnt
[group
], upa
);
2778 ai
->static_size
= static_size
;
2779 ai
->reserved_size
= reserved_size
;
2780 ai
->dyn_size
= dyn_size
;
2781 ai
->unit_size
= alloc_size
/ upa
;
2782 ai
->atom_size
= atom_size
;
2783 ai
->alloc_size
= alloc_size
;
2785 for (group
= 0, unit
= 0; group
< nr_groups
; group
++) {
2786 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2789 * Initialize base_offset as if all groups are located
2790 * back-to-back. The caller should update this to
2791 * reflect actual allocation.
2793 gi
->base_offset
= unit
* ai
->unit_size
;
2795 for_each_possible_cpu(cpu
)
2796 if (group_map
[cpu
] == group
)
2797 gi
->cpu_map
[gi
->nr_units
++] = cpu
;
2798 gi
->nr_units
= roundup(gi
->nr_units
, upa
);
2799 unit
+= gi
->nr_units
;
2801 BUG_ON(unit
!= nr_units
);
2805 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2807 #if defined(BUILD_EMBED_FIRST_CHUNK)
2809 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2810 * @reserved_size: the size of reserved percpu area in bytes
2811 * @dyn_size: minimum free size for dynamic allocation in bytes
2812 * @atom_size: allocation atom size
2813 * @cpu_distance_fn: callback to determine distance between cpus, optional
2814 * @alloc_fn: function to allocate percpu page
2815 * @free_fn: function to free percpu page
2817 * This is a helper to ease setting up embedded first percpu chunk and
2818 * can be called where pcpu_setup_first_chunk() is expected.
2820 * If this function is used to setup the first chunk, it is allocated
2821 * by calling @alloc_fn and used as-is without being mapped into
2822 * vmalloc area. Allocations are always whole multiples of @atom_size
2823 * aligned to @atom_size.
2825 * This enables the first chunk to piggy back on the linear physical
2826 * mapping which often uses larger page size. Please note that this
2827 * can result in very sparse cpu->unit mapping on NUMA machines thus
2828 * requiring large vmalloc address space. Don't use this allocator if
2829 * vmalloc space is not orders of magnitude larger than distances
2830 * between node memory addresses (ie. 32bit NUMA machines).
2832 * @dyn_size specifies the minimum dynamic area size.
2834 * If the needed size is smaller than the minimum or specified unit
2835 * size, the leftover is returned using @free_fn.
2838 * 0 on success, -errno on failure.
2840 int __init
pcpu_embed_first_chunk(size_t reserved_size
, size_t dyn_size
,
2842 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
,
2843 pcpu_fc_alloc_fn_t alloc_fn
,
2844 pcpu_fc_free_fn_t free_fn
)
2846 void *base
= (void *)ULONG_MAX
;
2847 void **areas
= NULL
;
2848 struct pcpu_alloc_info
*ai
;
2849 size_t size_sum
, areas_size
;
2850 unsigned long max_distance
;
2851 int group
, i
, highest_group
, rc
= 0;
2853 ai
= pcpu_build_alloc_info(reserved_size
, dyn_size
, atom_size
,
2858 size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
2859 areas_size
= PFN_ALIGN(ai
->nr_groups
* sizeof(void *));
2861 areas
= memblock_alloc(areas_size
, SMP_CACHE_BYTES
);
2867 /* allocate, copy and determine base address & max_distance */
2869 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2870 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2871 unsigned int cpu
= NR_CPUS
;
2874 for (i
= 0; i
< gi
->nr_units
&& cpu
== NR_CPUS
; i
++)
2875 cpu
= gi
->cpu_map
[i
];
2876 BUG_ON(cpu
== NR_CPUS
);
2878 /* allocate space for the whole group */
2879 ptr
= alloc_fn(cpu
, gi
->nr_units
* ai
->unit_size
, atom_size
);
2882 goto out_free_areas
;
2884 /* kmemleak tracks the percpu allocations separately */
2888 base
= min(ptr
, base
);
2889 if (ptr
> areas
[highest_group
])
2890 highest_group
= group
;
2892 max_distance
= areas
[highest_group
] - base
;
2893 max_distance
+= ai
->unit_size
* ai
->groups
[highest_group
].nr_units
;
2895 /* warn if maximum distance is further than 75% of vmalloc space */
2896 if (max_distance
> VMALLOC_TOTAL
* 3 / 4) {
2897 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2898 max_distance
, VMALLOC_TOTAL
);
2899 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2900 /* and fail if we have fallback */
2902 goto out_free_areas
;
2907 * Copy data and free unused parts. This should happen after all
2908 * allocations are complete; otherwise, we may end up with
2909 * overlapping groups.
2911 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2912 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
2913 void *ptr
= areas
[group
];
2915 for (i
= 0; i
< gi
->nr_units
; i
++, ptr
+= ai
->unit_size
) {
2916 if (gi
->cpu_map
[i
] == NR_CPUS
) {
2917 /* unused unit, free whole */
2918 free_fn(ptr
, ai
->unit_size
);
2921 /* copy and return the unused part */
2922 memcpy(ptr
, __per_cpu_load
, ai
->static_size
);
2923 free_fn(ptr
+ size_sum
, ai
->unit_size
- size_sum
);
2927 /* base address is now known, determine group base offsets */
2928 for (group
= 0; group
< ai
->nr_groups
; group
++) {
2929 ai
->groups
[group
].base_offset
= areas
[group
] - base
;
2932 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2933 PFN_DOWN(size_sum
), ai
->static_size
, ai
->reserved_size
,
2934 ai
->dyn_size
, ai
->unit_size
);
2936 pcpu_setup_first_chunk(ai
, base
);
2940 for (group
= 0; group
< ai
->nr_groups
; group
++)
2942 free_fn(areas
[group
],
2943 ai
->groups
[group
].nr_units
* ai
->unit_size
);
2945 pcpu_free_alloc_info(ai
);
2947 memblock_free_early(__pa(areas
), areas_size
);
2950 #endif /* BUILD_EMBED_FIRST_CHUNK */
2952 #ifdef BUILD_PAGE_FIRST_CHUNK
2954 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2955 * @reserved_size: the size of reserved percpu area in bytes
2956 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2957 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2958 * @populate_pte_fn: function to populate pte
2960 * This is a helper to ease setting up page-remapped first percpu
2961 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2963 * This is the basic allocator. Static percpu area is allocated
2964 * page-by-page into vmalloc area.
2967 * 0 on success, -errno on failure.
2969 int __init
pcpu_page_first_chunk(size_t reserved_size
,
2970 pcpu_fc_alloc_fn_t alloc_fn
,
2971 pcpu_fc_free_fn_t free_fn
,
2972 pcpu_fc_populate_pte_fn_t populate_pte_fn
)
2974 static struct vm_struct vm
;
2975 struct pcpu_alloc_info
*ai
;
2979 struct page
**pages
;
2980 int unit
, i
, j
, rc
= 0;
2984 snprintf(psize_str
, sizeof(psize_str
), "%luK", PAGE_SIZE
>> 10);
2986 ai
= pcpu_build_alloc_info(reserved_size
, 0, PAGE_SIZE
, NULL
);
2989 BUG_ON(ai
->nr_groups
!= 1);
2990 upa
= ai
->alloc_size
/ai
->unit_size
;
2991 nr_g0_units
= roundup(num_possible_cpus(), upa
);
2992 if (WARN_ON(ai
->groups
[0].nr_units
!= nr_g0_units
)) {
2993 pcpu_free_alloc_info(ai
);
2997 unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
2999 /* unaligned allocations can't be freed, round up to page size */
3000 pages_size
= PFN_ALIGN(unit_pages
* num_possible_cpus() *
3002 pages
= memblock_alloc(pages_size
, SMP_CACHE_BYTES
);
3004 panic("%s: Failed to allocate %zu bytes\n", __func__
,
3007 /* allocate pages */
3009 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
3010 unsigned int cpu
= ai
->groups
[0].cpu_map
[unit
];
3011 for (i
= 0; i
< unit_pages
; i
++) {
3014 ptr
= alloc_fn(cpu
, PAGE_SIZE
, PAGE_SIZE
);
3016 pr_warn("failed to allocate %s page for cpu%u\n",
3020 /* kmemleak tracks the percpu allocations separately */
3022 pages
[j
++] = virt_to_page(ptr
);
3026 /* allocate vm area, map the pages and copy static data */
3027 vm
.flags
= VM_ALLOC
;
3028 vm
.size
= num_possible_cpus() * ai
->unit_size
;
3029 vm_area_register_early(&vm
, PAGE_SIZE
);
3031 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
3032 unsigned long unit_addr
=
3033 (unsigned long)vm
.addr
+ unit
* ai
->unit_size
;
3035 for (i
= 0; i
< unit_pages
; i
++)
3036 populate_pte_fn(unit_addr
+ (i
<< PAGE_SHIFT
));
3038 /* pte already populated, the following shouldn't fail */
3039 rc
= __pcpu_map_pages(unit_addr
, &pages
[unit
* unit_pages
],
3042 panic("failed to map percpu area, err=%d\n", rc
);
3045 * FIXME: Archs with virtual cache should flush local
3046 * cache for the linear mapping here - something
3047 * equivalent to flush_cache_vmap() on the local cpu.
3048 * flush_cache_vmap() can't be used as most supporting
3049 * data structures are not set up yet.
3052 /* copy static data */
3053 memcpy((void *)unit_addr
, __per_cpu_load
, ai
->static_size
);
3056 /* we're ready, commit */
3057 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3058 unit_pages
, psize_str
, ai
->static_size
,
3059 ai
->reserved_size
, ai
->dyn_size
);
3061 pcpu_setup_first_chunk(ai
, vm
.addr
);
3066 free_fn(page_address(pages
[j
]), PAGE_SIZE
);
3069 memblock_free_early(__pa(pages
), pages_size
);
3070 pcpu_free_alloc_info(ai
);
3073 #endif /* BUILD_PAGE_FIRST_CHUNK */
3075 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
3077 * Generic SMP percpu area setup.
3079 * The embedding helper is used because its behavior closely resembles
3080 * the original non-dynamic generic percpu area setup. This is
3081 * important because many archs have addressing restrictions and might
3082 * fail if the percpu area is located far away from the previous
3083 * location. As an added bonus, in non-NUMA cases, embedding is
3084 * generally a good idea TLB-wise because percpu area can piggy back
3085 * on the physical linear memory mapping which uses large page
3086 * mappings on applicable archs.
3088 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
3089 EXPORT_SYMBOL(__per_cpu_offset
);
3091 static void * __init
pcpu_dfl_fc_alloc(unsigned int cpu
, size_t size
,
3094 return memblock_alloc_from(size
, align
, __pa(MAX_DMA_ADDRESS
));
3097 static void __init
pcpu_dfl_fc_free(void *ptr
, size_t size
)
3099 memblock_free_early(__pa(ptr
), size
);
3102 void __init
setup_per_cpu_areas(void)
3104 unsigned long delta
;
3109 * Always reserve area for module percpu variables. That's
3110 * what the legacy allocator did.
3112 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
3113 PERCPU_DYNAMIC_RESERVE
, PAGE_SIZE
, NULL
,
3114 pcpu_dfl_fc_alloc
, pcpu_dfl_fc_free
);
3116 panic("Failed to initialize percpu areas.");
3118 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
3119 for_each_possible_cpu(cpu
)
3120 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
3122 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3124 #else /* CONFIG_SMP */
3127 * UP percpu area setup.
3129 * UP always uses km-based percpu allocator with identity mapping.
3130 * Static percpu variables are indistinguishable from the usual static
3131 * variables and don't require any special preparation.
3133 void __init
setup_per_cpu_areas(void)
3135 const size_t unit_size
=
3136 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE
,
3137 PERCPU_DYNAMIC_RESERVE
));
3138 struct pcpu_alloc_info
*ai
;
3141 ai
= pcpu_alloc_alloc_info(1, 1);
3142 fc
= memblock_alloc_from(unit_size
, PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
3144 panic("Failed to allocate memory for percpu areas.");
3145 /* kmemleak tracks the percpu allocations separately */
3148 ai
->dyn_size
= unit_size
;
3149 ai
->unit_size
= unit_size
;
3150 ai
->atom_size
= unit_size
;
3151 ai
->alloc_size
= unit_size
;
3152 ai
->groups
[0].nr_units
= 1;
3153 ai
->groups
[0].cpu_map
[0] = 0;
3155 pcpu_setup_first_chunk(ai
, fc
);
3156 pcpu_free_alloc_info(ai
);
3159 #endif /* CONFIG_SMP */
3162 * pcpu_nr_pages - calculate total number of populated backing pages
3164 * This reflects the number of pages populated to back chunks. Metadata is
3165 * excluded in the number exposed in meminfo as the number of backing pages
3166 * scales with the number of cpus and can quickly outweigh the memory used for
3167 * metadata. It also keeps this calculation nice and simple.
3170 * Total number of populated backing pages in use by the allocator.
3172 unsigned long pcpu_nr_pages(void)
3174 return pcpu_nr_populated
* pcpu_nr_units
;
3178 * Percpu allocator is initialized early during boot when neither slab or
3179 * workqueue is available. Plug async management until everything is up
3182 static int __init
percpu_enable_async(void)
3184 pcpu_async_enabled
= true;
3187 subsys_initcall(percpu_enable_async
);