2 * mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes. The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be equal to or larger than the maximum contiguous
35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map. A positive value in the map represents a free
40 * region and negative allocated. Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry. This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
46 * To use this allocator, arch code should do the followings.
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
56 #include <linux/bitmap.h>
57 #include <linux/bootmem.h>
58 #include <linux/err.h>
59 #include <linux/list.h>
60 #include <linux/log2.h>
62 #include <linux/module.h>
63 #include <linux/mutex.h>
64 #include <linux/percpu.h>
65 #include <linux/pfn.h>
66 #include <linux/slab.h>
67 #include <linux/spinlock.h>
68 #include <linux/vmalloc.h>
69 #include <linux/workqueue.h>
70 #include <linux/kmemleak.h>
72 #include <asm/cacheflush.h>
73 #include <asm/sections.h>
74 #include <asm/tlbflush.h>
77 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
78 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
81 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
82 #ifndef __addr_to_pcpu_ptr
83 #define __addr_to_pcpu_ptr(addr) \
84 (void __percpu *)((unsigned long)(addr) - \
85 (unsigned long)pcpu_base_addr + \
86 (unsigned long)__per_cpu_start)
88 #ifndef __pcpu_ptr_to_addr
89 #define __pcpu_ptr_to_addr(ptr) \
90 (void __force *)((unsigned long)(ptr) + \
91 (unsigned long)pcpu_base_addr - \
92 (unsigned long)__per_cpu_start)
94 #else /* CONFIG_SMP */
95 /* on UP, it's always identity mapped */
96 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
97 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
98 #endif /* CONFIG_SMP */
101 struct list_head list
; /* linked to pcpu_slot lists */
102 int free_size
; /* free bytes in the chunk */
103 int contig_hint
; /* max contiguous size hint */
104 void *base_addr
; /* base address of this chunk */
105 int map_used
; /* # of map entries used before the sentry */
106 int map_alloc
; /* # of map entries allocated */
107 int *map
; /* allocation map */
108 void *data
; /* chunk data */
109 int first_free
; /* no free below this */
110 bool immutable
; /* no [de]population allowed */
111 unsigned long populated
[]; /* populated bitmap */
114 static int pcpu_unit_pages __read_mostly
;
115 static int pcpu_unit_size __read_mostly
;
116 static int pcpu_nr_units __read_mostly
;
117 static int pcpu_atom_size __read_mostly
;
118 static int pcpu_nr_slots __read_mostly
;
119 static size_t pcpu_chunk_struct_size __read_mostly
;
121 /* cpus with the lowest and highest unit addresses */
122 static unsigned int pcpu_low_unit_cpu __read_mostly
;
123 static unsigned int pcpu_high_unit_cpu __read_mostly
;
125 /* the address of the first chunk which starts with the kernel static area */
126 void *pcpu_base_addr __read_mostly
;
127 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
129 static const int *pcpu_unit_map __read_mostly
; /* cpu -> unit */
130 const unsigned long *pcpu_unit_offsets __read_mostly
; /* cpu -> unit offset */
132 /* group information, used for vm allocation */
133 static int pcpu_nr_groups __read_mostly
;
134 static const unsigned long *pcpu_group_offsets __read_mostly
;
135 static const size_t *pcpu_group_sizes __read_mostly
;
138 * The first chunk which always exists. Note that unlike other
139 * chunks, this one can be allocated and mapped in several different
140 * ways and thus often doesn't live in the vmalloc area.
142 static struct pcpu_chunk
*pcpu_first_chunk
;
145 * Optional reserved chunk. This chunk reserves part of the first
146 * chunk and serves it for reserved allocations. The amount of
147 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
148 * area doesn't exist, the following variables contain NULL and 0
151 static struct pcpu_chunk
*pcpu_reserved_chunk
;
152 static int pcpu_reserved_chunk_limit
;
155 * Free path accesses and alters only the index data structures and can be
156 * safely called from atomic context. When memory needs to be returned to
157 * the system, free path schedules reclaim_work.
159 static DEFINE_SPINLOCK(pcpu_lock
); /* all internal data structures */
160 static DEFINE_MUTEX(pcpu_alloc_mutex
); /* chunk create/destroy, [de]pop */
162 static struct list_head
*pcpu_slot __read_mostly
; /* chunk list slots */
164 /* reclaim work to release fully free chunks, scheduled from free path */
165 static void pcpu_reclaim(struct work_struct
*work
);
166 static DECLARE_WORK(pcpu_reclaim_work
, pcpu_reclaim
);
168 static bool pcpu_addr_in_first_chunk(void *addr
)
170 void *first_start
= pcpu_first_chunk
->base_addr
;
172 return addr
>= first_start
&& addr
< first_start
+ pcpu_unit_size
;
175 static bool pcpu_addr_in_reserved_chunk(void *addr
)
177 void *first_start
= pcpu_first_chunk
->base_addr
;
179 return addr
>= first_start
&&
180 addr
< first_start
+ pcpu_reserved_chunk_limit
;
183 static int __pcpu_size_to_slot(int size
)
185 int highbit
= fls(size
); /* size is in bytes */
186 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
189 static int pcpu_size_to_slot(int size
)
191 if (size
== pcpu_unit_size
)
192 return pcpu_nr_slots
- 1;
193 return __pcpu_size_to_slot(size
);
196 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
198 if (chunk
->free_size
< sizeof(int) || chunk
->contig_hint
< sizeof(int))
201 return pcpu_size_to_slot(chunk
->free_size
);
204 /* set the pointer to a chunk in a page struct */
205 static void pcpu_set_page_chunk(struct page
*page
, struct pcpu_chunk
*pcpu
)
207 page
->index
= (unsigned long)pcpu
;
210 /* obtain pointer to a chunk from a page struct */
211 static struct pcpu_chunk
*pcpu_get_page_chunk(struct page
*page
)
213 return (struct pcpu_chunk
*)page
->index
;
216 static int __maybe_unused
pcpu_page_idx(unsigned int cpu
, int page_idx
)
218 return pcpu_unit_map
[cpu
] * pcpu_unit_pages
+ page_idx
;
221 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
222 unsigned int cpu
, int page_idx
)
224 return (unsigned long)chunk
->base_addr
+ pcpu_unit_offsets
[cpu
] +
225 (page_idx
<< PAGE_SHIFT
);
228 static void __maybe_unused
pcpu_next_unpop(struct pcpu_chunk
*chunk
,
229 int *rs
, int *re
, int end
)
231 *rs
= find_next_zero_bit(chunk
->populated
, end
, *rs
);
232 *re
= find_next_bit(chunk
->populated
, end
, *rs
+ 1);
235 static void __maybe_unused
pcpu_next_pop(struct pcpu_chunk
*chunk
,
236 int *rs
, int *re
, int end
)
238 *rs
= find_next_bit(chunk
->populated
, end
, *rs
);
239 *re
= find_next_zero_bit(chunk
->populated
, end
, *rs
+ 1);
243 * (Un)populated page region iterators. Iterate over (un)populated
244 * page regions between @start and @end in @chunk. @rs and @re should
245 * be integer variables and will be set to start and end page index of
246 * the current region.
248 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
249 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
251 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
253 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
254 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
256 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
259 * pcpu_mem_zalloc - allocate memory
260 * @size: bytes to allocate
262 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
263 * kzalloc() is used; otherwise, vzalloc() is used. The returned
264 * memory is always zeroed.
267 * Does GFP_KERNEL allocation.
270 * Pointer to the allocated area on success, NULL on failure.
272 static void *pcpu_mem_zalloc(size_t size
)
274 if (WARN_ON_ONCE(!slab_is_available()))
277 if (size
<= PAGE_SIZE
)
278 return kzalloc(size
, GFP_KERNEL
);
280 return vzalloc(size
);
284 * pcpu_mem_free - free memory
285 * @ptr: memory to free
286 * @size: size of the area
288 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
290 static void pcpu_mem_free(void *ptr
, size_t size
)
292 if (size
<= PAGE_SIZE
)
299 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
300 * @chunk: chunk of interest
301 * @oslot: the previous slot it was on
303 * This function is called after an allocation or free changed @chunk.
304 * New slot according to the changed state is determined and @chunk is
305 * moved to the slot. Note that the reserved chunk is never put on
311 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
313 int nslot
= pcpu_chunk_slot(chunk
);
315 if (chunk
!= pcpu_reserved_chunk
&& oslot
!= nslot
) {
317 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
319 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
324 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
325 * @chunk: chunk of interest
327 * Determine whether area map of @chunk needs to be extended to
328 * accommodate a new allocation.
334 * New target map allocation length if extension is necessary, 0
337 static int pcpu_need_to_extend(struct pcpu_chunk
*chunk
)
341 if (chunk
->map_alloc
>= chunk
->map_used
+ 3)
344 new_alloc
= PCPU_DFL_MAP_ALLOC
;
345 while (new_alloc
< chunk
->map_used
+ 3)
352 * pcpu_extend_area_map - extend area map of a chunk
353 * @chunk: chunk of interest
354 * @new_alloc: new target allocation length of the area map
356 * Extend area map of @chunk to have @new_alloc entries.
359 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
362 * 0 on success, -errno on failure.
364 static int pcpu_extend_area_map(struct pcpu_chunk
*chunk
, int new_alloc
)
366 int *old
= NULL
, *new = NULL
;
367 size_t old_size
= 0, new_size
= new_alloc
* sizeof(new[0]);
370 new = pcpu_mem_zalloc(new_size
);
374 /* acquire pcpu_lock and switch to new area map */
375 spin_lock_irqsave(&pcpu_lock
, flags
);
377 if (new_alloc
<= chunk
->map_alloc
)
380 old_size
= chunk
->map_alloc
* sizeof(chunk
->map
[0]);
383 memcpy(new, old
, old_size
);
385 chunk
->map_alloc
= new_alloc
;
390 spin_unlock_irqrestore(&pcpu_lock
, flags
);
393 * pcpu_mem_free() might end up calling vfree() which uses
394 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
396 pcpu_mem_free(old
, old_size
);
397 pcpu_mem_free(new, new_size
);
403 * pcpu_alloc_area - allocate area from a pcpu_chunk
404 * @chunk: chunk of interest
405 * @size: wanted size in bytes
406 * @align: wanted align
408 * Try to allocate @size bytes area aligned at @align from @chunk.
409 * Note that this function only allocates the offset. It doesn't
410 * populate or map the area.
412 * @chunk->map must have at least two free slots.
418 * Allocated offset in @chunk on success, -1 if no matching area is
421 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int size
, int align
)
423 int oslot
= pcpu_chunk_slot(chunk
);
426 bool seen_free
= false;
429 for (i
= chunk
->first_free
, p
= chunk
->map
+ i
; i
< chunk
->map_used
; i
++, p
++) {
437 /* extra for alignment requirement */
438 head
= ALIGN(off
, align
) - off
;
440 this_size
= (p
[1] & ~1) - off
;
441 if (this_size
< head
+ size
) {
443 chunk
->first_free
= i
;
446 max_contig
= max(this_size
, max_contig
);
451 * If head is small or the previous block is free,
452 * merge'em. Note that 'small' is defined as smaller
453 * than sizeof(int), which is very small but isn't too
454 * uncommon for percpu allocations.
456 if (head
&& (head
< sizeof(int) || !(p
[-1] & 1))) {
459 chunk
->free_size
-= head
;
461 max_contig
= max(*p
- p
[-1], max_contig
);
466 /* if tail is small, just keep it around */
467 tail
= this_size
- head
- size
;
468 if (tail
< sizeof(int)) {
470 size
= this_size
- head
;
473 /* split if warranted */
475 int nr_extra
= !!head
+ !!tail
;
477 /* insert new subblocks */
478 memmove(p
+ nr_extra
+ 1, p
+ 1,
479 sizeof(chunk
->map
[0]) * (chunk
->map_used
- i
));
480 chunk
->map_used
+= nr_extra
;
484 chunk
->first_free
= i
;
489 max_contig
= max(head
, max_contig
);
493 max_contig
= max(tail
, max_contig
);
498 chunk
->first_free
= i
+ 1;
500 /* update hint and mark allocated */
501 if (i
+ 1 == chunk
->map_used
)
502 chunk
->contig_hint
= max_contig
; /* fully scanned */
504 chunk
->contig_hint
= max(chunk
->contig_hint
,
507 chunk
->free_size
-= size
;
510 pcpu_chunk_relocate(chunk
, oslot
);
514 chunk
->contig_hint
= max_contig
; /* fully scanned */
515 pcpu_chunk_relocate(chunk
, oslot
);
517 /* tell the upper layer that this chunk has no matching area */
522 * pcpu_free_area - free area to a pcpu_chunk
523 * @chunk: chunk of interest
524 * @freeme: offset of area to free
526 * Free area starting from @freeme to @chunk. Note that this function
527 * only modifies the allocation map. It doesn't depopulate or unmap
533 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int freeme
)
535 int oslot
= pcpu_chunk_slot(chunk
);
541 freeme
|= 1; /* we are searching for <given offset, in use> pair */
546 unsigned k
= (i
+ j
) / 2;
550 else if (off
> freeme
)
555 BUG_ON(off
!= freeme
);
557 if (i
< chunk
->first_free
)
558 chunk
->first_free
= i
;
562 chunk
->free_size
+= (p
[1] & ~1) - off
;
564 /* merge with next? */
567 /* merge with previous? */
568 if (i
> 0 && !(p
[-1] & 1)) {
574 chunk
->map_used
-= to_free
;
575 memmove(p
+ 1, p
+ 1 + to_free
,
576 (chunk
->map_used
- i
) * sizeof(chunk
->map
[0]));
579 chunk
->contig_hint
= max(chunk
->map
[i
+ 1] - chunk
->map
[i
] - 1, chunk
->contig_hint
);
580 pcpu_chunk_relocate(chunk
, oslot
);
583 static struct pcpu_chunk
*pcpu_alloc_chunk(void)
585 struct pcpu_chunk
*chunk
;
587 chunk
= pcpu_mem_zalloc(pcpu_chunk_struct_size
);
591 chunk
->map
= pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC
*
592 sizeof(chunk
->map
[0]));
594 pcpu_mem_free(chunk
, pcpu_chunk_struct_size
);
598 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
600 chunk
->map
[1] = pcpu_unit_size
| 1;
603 INIT_LIST_HEAD(&chunk
->list
);
604 chunk
->free_size
= pcpu_unit_size
;
605 chunk
->contig_hint
= pcpu_unit_size
;
610 static void pcpu_free_chunk(struct pcpu_chunk
*chunk
)
614 pcpu_mem_free(chunk
->map
, chunk
->map_alloc
* sizeof(chunk
->map
[0]));
615 pcpu_mem_free(chunk
, pcpu_chunk_struct_size
);
619 * Chunk management implementation.
621 * To allow different implementations, chunk alloc/free and
622 * [de]population are implemented in a separate file which is pulled
623 * into this file and compiled together. The following functions
624 * should be implemented.
626 * pcpu_populate_chunk - populate the specified range of a chunk
627 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
628 * pcpu_create_chunk - create a new chunk
629 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
630 * pcpu_addr_to_page - translate address to physical address
631 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
633 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
);
634 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
);
635 static struct pcpu_chunk
*pcpu_create_chunk(void);
636 static void pcpu_destroy_chunk(struct pcpu_chunk
*chunk
);
637 static struct page
*pcpu_addr_to_page(void *addr
);
638 static int __init
pcpu_verify_alloc_info(const struct pcpu_alloc_info
*ai
);
640 #ifdef CONFIG_NEED_PER_CPU_KM
641 #include "percpu-km.c"
643 #include "percpu-vm.c"
647 * pcpu_chunk_addr_search - determine chunk containing specified address
648 * @addr: address for which the chunk needs to be determined.
651 * The address of the found chunk.
653 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
655 /* is it in the first chunk? */
656 if (pcpu_addr_in_first_chunk(addr
)) {
657 /* is it in the reserved area? */
658 if (pcpu_addr_in_reserved_chunk(addr
))
659 return pcpu_reserved_chunk
;
660 return pcpu_first_chunk
;
664 * The address is relative to unit0 which might be unused and
665 * thus unmapped. Offset the address to the unit space of the
666 * current processor before looking it up in the vmalloc
667 * space. Note that any possible cpu id can be used here, so
668 * there's no need to worry about preemption or cpu hotplug.
670 addr
+= pcpu_unit_offsets
[raw_smp_processor_id()];
671 return pcpu_get_page_chunk(pcpu_addr_to_page(addr
));
675 * pcpu_alloc - the percpu allocator
676 * @size: size of area to allocate in bytes
677 * @align: alignment of area (max PAGE_SIZE)
678 * @reserved: allocate from the reserved chunk if available
680 * Allocate percpu area of @size bytes aligned at @align.
683 * Does GFP_KERNEL allocation.
686 * Percpu pointer to the allocated area on success, NULL on failure.
688 static void __percpu
*pcpu_alloc(size_t size
, size_t align
, bool reserved
)
690 static int warn_limit
= 10;
691 struct pcpu_chunk
*chunk
;
693 int slot
, off
, new_alloc
, cpu
, ret
;
694 int page_start
, page_end
, rs
, re
;
699 * We want the lowest bit of offset available for in-use/free
700 * indicator, so force >= 16bit alignment and make size even.
702 if (unlikely(align
< 2))
705 size
= ALIGN(size
, 2);
707 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
)) {
708 WARN(true, "illegal size (%zu) or align (%zu) for "
709 "percpu allocation\n", size
, align
);
713 spin_lock_irqsave(&pcpu_lock
, flags
);
715 /* serve reserved allocations from the reserved chunk if available */
716 if (reserved
&& pcpu_reserved_chunk
) {
717 chunk
= pcpu_reserved_chunk
;
719 if (size
> chunk
->contig_hint
) {
720 err
= "alloc from reserved chunk failed";
724 while ((new_alloc
= pcpu_need_to_extend(chunk
))) {
725 spin_unlock_irqrestore(&pcpu_lock
, flags
);
726 if (pcpu_extend_area_map(chunk
, new_alloc
) < 0) {
727 err
= "failed to extend area map of reserved chunk";
730 spin_lock_irqsave(&pcpu_lock
, flags
);
733 off
= pcpu_alloc_area(chunk
, size
, align
);
737 err
= "alloc from reserved chunk failed";
742 /* search through normal chunks */
743 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
744 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
745 if (size
> chunk
->contig_hint
)
748 new_alloc
= pcpu_need_to_extend(chunk
);
750 spin_unlock_irqrestore(&pcpu_lock
, flags
);
751 if (pcpu_extend_area_map(chunk
,
753 err
= "failed to extend area map";
756 spin_lock_irqsave(&pcpu_lock
, flags
);
758 * pcpu_lock has been dropped, need to
759 * restart cpu_slot list walking.
764 off
= pcpu_alloc_area(chunk
, size
, align
);
770 spin_unlock_irqrestore(&pcpu_lock
, flags
);
773 * No space left. Create a new chunk. We don't want multiple
774 * tasks to create chunks simultaneously. Serialize and create iff
775 * there's still no empty chunk after grabbing the mutex.
777 mutex_lock(&pcpu_alloc_mutex
);
779 if (list_empty(&pcpu_slot
[pcpu_nr_slots
- 1])) {
780 chunk
= pcpu_create_chunk();
782 err
= "failed to allocate new chunk";
786 spin_lock_irqsave(&pcpu_lock
, flags
);
787 pcpu_chunk_relocate(chunk
, -1);
789 spin_lock_irqsave(&pcpu_lock
, flags
);
792 mutex_unlock(&pcpu_alloc_mutex
);
796 spin_unlock_irqrestore(&pcpu_lock
, flags
);
798 /* populate if not all pages are already there */
799 mutex_lock(&pcpu_alloc_mutex
);
800 page_start
= PFN_DOWN(off
);
801 page_end
= PFN_UP(off
+ size
);
803 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, page_end
) {
804 WARN_ON(chunk
->immutable
);
806 ret
= pcpu_populate_chunk(chunk
, rs
, re
);
808 spin_lock_irqsave(&pcpu_lock
, flags
);
810 mutex_unlock(&pcpu_alloc_mutex
);
811 pcpu_free_area(chunk
, off
);
812 err
= "failed to populate";
815 bitmap_set(chunk
->populated
, rs
, re
- rs
);
816 spin_unlock_irqrestore(&pcpu_lock
, flags
);
819 mutex_unlock(&pcpu_alloc_mutex
);
821 /* clear the areas and return address relative to base address */
822 for_each_possible_cpu(cpu
)
823 memset((void *)pcpu_chunk_addr(chunk
, cpu
, 0) + off
, 0, size
);
825 ptr
= __addr_to_pcpu_ptr(chunk
->base_addr
+ off
);
826 kmemleak_alloc_percpu(ptr
, size
);
830 spin_unlock_irqrestore(&pcpu_lock
, flags
);
833 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
834 "%s\n", size
, align
, err
);
837 pr_info("PERCPU: limit reached, disable warning\n");
843 * __alloc_percpu - allocate dynamic percpu area
844 * @size: size of area to allocate in bytes
845 * @align: alignment of area (max PAGE_SIZE)
847 * Allocate zero-filled percpu area of @size bytes aligned at @align.
848 * Might sleep. Might trigger writeouts.
851 * Does GFP_KERNEL allocation.
854 * Percpu pointer to the allocated area on success, NULL on failure.
856 void __percpu
*__alloc_percpu(size_t size
, size_t align
)
858 return pcpu_alloc(size
, align
, false);
860 EXPORT_SYMBOL_GPL(__alloc_percpu
);
863 * __alloc_reserved_percpu - allocate reserved percpu area
864 * @size: size of area to allocate in bytes
865 * @align: alignment of area (max PAGE_SIZE)
867 * Allocate zero-filled percpu area of @size bytes aligned at @align
868 * from reserved percpu area if arch has set it up; otherwise,
869 * allocation is served from the same dynamic area. Might sleep.
870 * Might trigger writeouts.
873 * Does GFP_KERNEL allocation.
876 * Percpu pointer to the allocated area on success, NULL on failure.
878 void __percpu
*__alloc_reserved_percpu(size_t size
, size_t align
)
880 return pcpu_alloc(size
, align
, true);
884 * pcpu_reclaim - reclaim fully free chunks, workqueue function
887 * Reclaim all fully free chunks except for the first one.
892 static void pcpu_reclaim(struct work_struct
*work
)
895 struct list_head
*head
= &pcpu_slot
[pcpu_nr_slots
- 1];
896 struct pcpu_chunk
*chunk
, *next
;
898 mutex_lock(&pcpu_alloc_mutex
);
899 spin_lock_irq(&pcpu_lock
);
901 list_for_each_entry_safe(chunk
, next
, head
, list
) {
902 WARN_ON(chunk
->immutable
);
904 /* spare the first one */
905 if (chunk
== list_first_entry(head
, struct pcpu_chunk
, list
))
908 list_move(&chunk
->list
, &todo
);
911 spin_unlock_irq(&pcpu_lock
);
913 list_for_each_entry_safe(chunk
, next
, &todo
, list
) {
916 pcpu_for_each_pop_region(chunk
, rs
, re
, 0, pcpu_unit_pages
) {
917 pcpu_depopulate_chunk(chunk
, rs
, re
);
918 bitmap_clear(chunk
->populated
, rs
, re
- rs
);
920 pcpu_destroy_chunk(chunk
);
923 mutex_unlock(&pcpu_alloc_mutex
);
927 * free_percpu - free percpu area
928 * @ptr: pointer to area to free
930 * Free percpu area @ptr.
933 * Can be called from atomic context.
935 void free_percpu(void __percpu
*ptr
)
938 struct pcpu_chunk
*chunk
;
945 kmemleak_free_percpu(ptr
);
947 addr
= __pcpu_ptr_to_addr(ptr
);
949 spin_lock_irqsave(&pcpu_lock
, flags
);
951 chunk
= pcpu_chunk_addr_search(addr
);
952 off
= addr
- chunk
->base_addr
;
954 pcpu_free_area(chunk
, off
);
956 /* if there are more than one fully free chunks, wake up grim reaper */
957 if (chunk
->free_size
== pcpu_unit_size
) {
958 struct pcpu_chunk
*pos
;
960 list_for_each_entry(pos
, &pcpu_slot
[pcpu_nr_slots
- 1], list
)
962 schedule_work(&pcpu_reclaim_work
);
967 spin_unlock_irqrestore(&pcpu_lock
, flags
);
969 EXPORT_SYMBOL_GPL(free_percpu
);
972 * is_kernel_percpu_address - test whether address is from static percpu area
973 * @addr: address to test
975 * Test whether @addr belongs to in-kernel static percpu area. Module
976 * static percpu areas are not considered. For those, use
977 * is_module_percpu_address().
980 * %true if @addr is from in-kernel static percpu area, %false otherwise.
982 bool is_kernel_percpu_address(unsigned long addr
)
985 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
986 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
989 for_each_possible_cpu(cpu
) {
990 void *start
= per_cpu_ptr(base
, cpu
);
992 if ((void *)addr
>= start
&& (void *)addr
< start
+ static_size
)
996 /* on UP, can't distinguish from other static vars, always false */
1001 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1002 * @addr: the address to be converted to physical address
1004 * Given @addr which is dereferenceable address obtained via one of
1005 * percpu access macros, this function translates it into its physical
1006 * address. The caller is responsible for ensuring @addr stays valid
1007 * until this function finishes.
1009 * percpu allocator has special setup for the first chunk, which currently
1010 * supports either embedding in linear address space or vmalloc mapping,
1011 * and, from the second one, the backing allocator (currently either vm or
1012 * km) provides translation.
1014 * The addr can be tranlated simply without checking if it falls into the
1015 * first chunk. But the current code reflects better how percpu allocator
1016 * actually works, and the verification can discover both bugs in percpu
1017 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1021 * The physical address for @addr.
1023 phys_addr_t
per_cpu_ptr_to_phys(void *addr
)
1025 void __percpu
*base
= __addr_to_pcpu_ptr(pcpu_base_addr
);
1026 bool in_first_chunk
= false;
1027 unsigned long first_low
, first_high
;
1031 * The following test on unit_low/high isn't strictly
1032 * necessary but will speed up lookups of addresses which
1033 * aren't in the first chunk.
1035 first_low
= pcpu_chunk_addr(pcpu_first_chunk
, pcpu_low_unit_cpu
, 0);
1036 first_high
= pcpu_chunk_addr(pcpu_first_chunk
, pcpu_high_unit_cpu
,
1038 if ((unsigned long)addr
>= first_low
&&
1039 (unsigned long)addr
< first_high
) {
1040 for_each_possible_cpu(cpu
) {
1041 void *start
= per_cpu_ptr(base
, cpu
);
1043 if (addr
>= start
&& addr
< start
+ pcpu_unit_size
) {
1044 in_first_chunk
= true;
1050 if (in_first_chunk
) {
1051 if (!is_vmalloc_addr(addr
))
1054 return page_to_phys(vmalloc_to_page(addr
)) +
1055 offset_in_page(addr
);
1057 return page_to_phys(pcpu_addr_to_page(addr
)) +
1058 offset_in_page(addr
);
1062 * pcpu_alloc_alloc_info - allocate percpu allocation info
1063 * @nr_groups: the number of groups
1064 * @nr_units: the number of units
1066 * Allocate ai which is large enough for @nr_groups groups containing
1067 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1068 * cpu_map array which is long enough for @nr_units and filled with
1069 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1070 * pointer of other groups.
1073 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1076 struct pcpu_alloc_info
* __init
pcpu_alloc_alloc_info(int nr_groups
,
1079 struct pcpu_alloc_info
*ai
;
1080 size_t base_size
, ai_size
;
1084 base_size
= ALIGN(sizeof(*ai
) + nr_groups
* sizeof(ai
->groups
[0]),
1085 __alignof__(ai
->groups
[0].cpu_map
[0]));
1086 ai_size
= base_size
+ nr_units
* sizeof(ai
->groups
[0].cpu_map
[0]);
1088 ptr
= memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size
), 0);
1094 ai
->groups
[0].cpu_map
= ptr
;
1096 for (unit
= 0; unit
< nr_units
; unit
++)
1097 ai
->groups
[0].cpu_map
[unit
] = NR_CPUS
;
1099 ai
->nr_groups
= nr_groups
;
1100 ai
->__ai_size
= PFN_ALIGN(ai_size
);
1106 * pcpu_free_alloc_info - free percpu allocation info
1107 * @ai: pcpu_alloc_info to free
1109 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1111 void __init
pcpu_free_alloc_info(struct pcpu_alloc_info
*ai
)
1113 memblock_free_early(__pa(ai
), ai
->__ai_size
);
1117 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1119 * @ai: allocation info to dump
1121 * Print out information about @ai using loglevel @lvl.
1123 static void pcpu_dump_alloc_info(const char *lvl
,
1124 const struct pcpu_alloc_info
*ai
)
1126 int group_width
= 1, cpu_width
= 1, width
;
1127 char empty_str
[] = "--------";
1128 int alloc
= 0, alloc_end
= 0;
1130 int upa
, apl
; /* units per alloc, allocs per line */
1136 v
= num_possible_cpus();
1139 empty_str
[min_t(int, cpu_width
, sizeof(empty_str
) - 1)] = '\0';
1141 upa
= ai
->alloc_size
/ ai
->unit_size
;
1142 width
= upa
* (cpu_width
+ 1) + group_width
+ 3;
1143 apl
= rounddown_pow_of_two(max(60 / width
, 1));
1145 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1146 lvl
, ai
->static_size
, ai
->reserved_size
, ai
->dyn_size
,
1147 ai
->unit_size
, ai
->alloc_size
/ ai
->atom_size
, ai
->atom_size
);
1149 for (group
= 0; group
< ai
->nr_groups
; group
++) {
1150 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1151 int unit
= 0, unit_end
= 0;
1153 BUG_ON(gi
->nr_units
% upa
);
1154 for (alloc_end
+= gi
->nr_units
/ upa
;
1155 alloc
< alloc_end
; alloc
++) {
1156 if (!(alloc
% apl
)) {
1157 printk(KERN_CONT
"\n");
1158 printk("%spcpu-alloc: ", lvl
);
1160 printk(KERN_CONT
"[%0*d] ", group_width
, group
);
1162 for (unit_end
+= upa
; unit
< unit_end
; unit
++)
1163 if (gi
->cpu_map
[unit
] != NR_CPUS
)
1164 printk(KERN_CONT
"%0*d ", cpu_width
,
1167 printk(KERN_CONT
"%s ", empty_str
);
1170 printk(KERN_CONT
"\n");
1174 * pcpu_setup_first_chunk - initialize the first percpu chunk
1175 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1176 * @base_addr: mapped address
1178 * Initialize the first percpu chunk which contains the kernel static
1179 * perpcu area. This function is to be called from arch percpu area
1182 * @ai contains all information necessary to initialize the first
1183 * chunk and prime the dynamic percpu allocator.
1185 * @ai->static_size is the size of static percpu area.
1187 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1188 * reserve after the static area in the first chunk. This reserves
1189 * the first chunk such that it's available only through reserved
1190 * percpu allocation. This is primarily used to serve module percpu
1191 * static areas on architectures where the addressing model has
1192 * limited offset range for symbol relocations to guarantee module
1193 * percpu symbols fall inside the relocatable range.
1195 * @ai->dyn_size determines the number of bytes available for dynamic
1196 * allocation in the first chunk. The area between @ai->static_size +
1197 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1199 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1200 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1203 * @ai->atom_size is the allocation atom size and used as alignment
1206 * @ai->alloc_size is the allocation size and always multiple of
1207 * @ai->atom_size. This is larger than @ai->atom_size if
1208 * @ai->unit_size is larger than @ai->atom_size.
1210 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1211 * percpu areas. Units which should be colocated are put into the
1212 * same group. Dynamic VM areas will be allocated according to these
1213 * groupings. If @ai->nr_groups is zero, a single group containing
1214 * all units is assumed.
1216 * The caller should have mapped the first chunk at @base_addr and
1217 * copied static data to each unit.
1219 * If the first chunk ends up with both reserved and dynamic areas, it
1220 * is served by two chunks - one to serve the core static and reserved
1221 * areas and the other for the dynamic area. They share the same vm
1222 * and page map but uses different area allocation map to stay away
1223 * from each other. The latter chunk is circulated in the chunk slots
1224 * and available for dynamic allocation like any other chunks.
1227 * 0 on success, -errno on failure.
1229 int __init
pcpu_setup_first_chunk(const struct pcpu_alloc_info
*ai
,
1232 static char cpus_buf
[4096] __initdata
;
1233 static int smap
[PERCPU_DYNAMIC_EARLY_SLOTS
] __initdata
;
1234 static int dmap
[PERCPU_DYNAMIC_EARLY_SLOTS
] __initdata
;
1235 size_t dyn_size
= ai
->dyn_size
;
1236 size_t size_sum
= ai
->static_size
+ ai
->reserved_size
+ dyn_size
;
1237 struct pcpu_chunk
*schunk
, *dchunk
= NULL
;
1238 unsigned long *group_offsets
;
1239 size_t *group_sizes
;
1240 unsigned long *unit_off
;
1245 cpumask_scnprintf(cpus_buf
, sizeof(cpus_buf
), cpu_possible_mask
);
1247 #define PCPU_SETUP_BUG_ON(cond) do { \
1248 if (unlikely(cond)) { \
1249 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1250 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1251 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1257 PCPU_SETUP_BUG_ON(ai
->nr_groups
<= 0);
1259 PCPU_SETUP_BUG_ON(!ai
->static_size
);
1260 PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start
& ~PAGE_MASK
);
1262 PCPU_SETUP_BUG_ON(!base_addr
);
1263 PCPU_SETUP_BUG_ON((unsigned long)base_addr
& ~PAGE_MASK
);
1264 PCPU_SETUP_BUG_ON(ai
->unit_size
< size_sum
);
1265 PCPU_SETUP_BUG_ON(ai
->unit_size
& ~PAGE_MASK
);
1266 PCPU_SETUP_BUG_ON(ai
->unit_size
< PCPU_MIN_UNIT_SIZE
);
1267 PCPU_SETUP_BUG_ON(ai
->dyn_size
< PERCPU_DYNAMIC_EARLY_SIZE
);
1268 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai
) < 0);
1270 /* process group information and build config tables accordingly */
1271 group_offsets
= memblock_virt_alloc(ai
->nr_groups
*
1272 sizeof(group_offsets
[0]), 0);
1273 group_sizes
= memblock_virt_alloc(ai
->nr_groups
*
1274 sizeof(group_sizes
[0]), 0);
1275 unit_map
= memblock_virt_alloc(nr_cpu_ids
* sizeof(unit_map
[0]), 0);
1276 unit_off
= memblock_virt_alloc(nr_cpu_ids
* sizeof(unit_off
[0]), 0);
1278 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
1279 unit_map
[cpu
] = UINT_MAX
;
1281 pcpu_low_unit_cpu
= NR_CPUS
;
1282 pcpu_high_unit_cpu
= NR_CPUS
;
1284 for (group
= 0, unit
= 0; group
< ai
->nr_groups
; group
++, unit
+= i
) {
1285 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1287 group_offsets
[group
] = gi
->base_offset
;
1288 group_sizes
[group
] = gi
->nr_units
* ai
->unit_size
;
1290 for (i
= 0; i
< gi
->nr_units
; i
++) {
1291 cpu
= gi
->cpu_map
[i
];
1295 PCPU_SETUP_BUG_ON(cpu
> nr_cpu_ids
);
1296 PCPU_SETUP_BUG_ON(!cpu_possible(cpu
));
1297 PCPU_SETUP_BUG_ON(unit_map
[cpu
] != UINT_MAX
);
1299 unit_map
[cpu
] = unit
+ i
;
1300 unit_off
[cpu
] = gi
->base_offset
+ i
* ai
->unit_size
;
1302 /* determine low/high unit_cpu */
1303 if (pcpu_low_unit_cpu
== NR_CPUS
||
1304 unit_off
[cpu
] < unit_off
[pcpu_low_unit_cpu
])
1305 pcpu_low_unit_cpu
= cpu
;
1306 if (pcpu_high_unit_cpu
== NR_CPUS
||
1307 unit_off
[cpu
] > unit_off
[pcpu_high_unit_cpu
])
1308 pcpu_high_unit_cpu
= cpu
;
1311 pcpu_nr_units
= unit
;
1313 for_each_possible_cpu(cpu
)
1314 PCPU_SETUP_BUG_ON(unit_map
[cpu
] == UINT_MAX
);
1316 /* we're done parsing the input, undefine BUG macro and dump config */
1317 #undef PCPU_SETUP_BUG_ON
1318 pcpu_dump_alloc_info(KERN_DEBUG
, ai
);
1320 pcpu_nr_groups
= ai
->nr_groups
;
1321 pcpu_group_offsets
= group_offsets
;
1322 pcpu_group_sizes
= group_sizes
;
1323 pcpu_unit_map
= unit_map
;
1324 pcpu_unit_offsets
= unit_off
;
1326 /* determine basic parameters */
1327 pcpu_unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
1328 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
1329 pcpu_atom_size
= ai
->atom_size
;
1330 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
) +
1331 BITS_TO_LONGS(pcpu_unit_pages
) * sizeof(unsigned long);
1334 * Allocate chunk slots. The additional last slot is for
1337 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
1338 pcpu_slot
= memblock_virt_alloc(
1339 pcpu_nr_slots
* sizeof(pcpu_slot
[0]), 0);
1340 for (i
= 0; i
< pcpu_nr_slots
; i
++)
1341 INIT_LIST_HEAD(&pcpu_slot
[i
]);
1344 * Initialize static chunk. If reserved_size is zero, the
1345 * static chunk covers static area + dynamic allocation area
1346 * in the first chunk. If reserved_size is not zero, it
1347 * covers static area + reserved area (mostly used for module
1348 * static percpu allocation).
1350 schunk
= memblock_virt_alloc(pcpu_chunk_struct_size
, 0);
1351 INIT_LIST_HEAD(&schunk
->list
);
1352 schunk
->base_addr
= base_addr
;
1354 schunk
->map_alloc
= ARRAY_SIZE(smap
);
1355 schunk
->immutable
= true;
1356 bitmap_fill(schunk
->populated
, pcpu_unit_pages
);
1358 if (ai
->reserved_size
) {
1359 schunk
->free_size
= ai
->reserved_size
;
1360 pcpu_reserved_chunk
= schunk
;
1361 pcpu_reserved_chunk_limit
= ai
->static_size
+ ai
->reserved_size
;
1363 schunk
->free_size
= dyn_size
;
1364 dyn_size
= 0; /* dynamic area covered */
1366 schunk
->contig_hint
= schunk
->free_size
;
1369 schunk
->map
[1] = ai
->static_size
;
1370 schunk
->map_used
= 1;
1371 if (schunk
->free_size
)
1372 schunk
->map
[++schunk
->map_used
] = 1 | (ai
->static_size
+ schunk
->free_size
);
1374 schunk
->map
[1] |= 1;
1376 /* init dynamic chunk if necessary */
1378 dchunk
= memblock_virt_alloc(pcpu_chunk_struct_size
, 0);
1379 INIT_LIST_HEAD(&dchunk
->list
);
1380 dchunk
->base_addr
= base_addr
;
1382 dchunk
->map_alloc
= ARRAY_SIZE(dmap
);
1383 dchunk
->immutable
= true;
1384 bitmap_fill(dchunk
->populated
, pcpu_unit_pages
);
1386 dchunk
->contig_hint
= dchunk
->free_size
= dyn_size
;
1388 dchunk
->map
[1] = pcpu_reserved_chunk_limit
;
1389 dchunk
->map
[2] = (pcpu_reserved_chunk_limit
+ dchunk
->free_size
) | 1;
1390 dchunk
->map_used
= 2;
1393 /* link the first chunk in */
1394 pcpu_first_chunk
= dchunk
?: schunk
;
1395 pcpu_chunk_relocate(pcpu_first_chunk
, -1);
1398 pcpu_base_addr
= base_addr
;
1404 const char * const pcpu_fc_names
[PCPU_FC_NR
] __initconst
= {
1405 [PCPU_FC_AUTO
] = "auto",
1406 [PCPU_FC_EMBED
] = "embed",
1407 [PCPU_FC_PAGE
] = "page",
1410 enum pcpu_fc pcpu_chosen_fc __initdata
= PCPU_FC_AUTO
;
1412 static int __init
percpu_alloc_setup(char *str
)
1419 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1420 else if (!strcmp(str
, "embed"))
1421 pcpu_chosen_fc
= PCPU_FC_EMBED
;
1423 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1424 else if (!strcmp(str
, "page"))
1425 pcpu_chosen_fc
= PCPU_FC_PAGE
;
1428 pr_warning("PERCPU: unknown allocator %s specified\n", str
);
1432 early_param("percpu_alloc", percpu_alloc_setup
);
1435 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1436 * Build it if needed by the arch config or the generic setup is going
1439 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1440 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1441 #define BUILD_EMBED_FIRST_CHUNK
1444 /* build pcpu_page_first_chunk() iff needed by the arch config */
1445 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1446 #define BUILD_PAGE_FIRST_CHUNK
1449 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
1450 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1452 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1453 * @reserved_size: the size of reserved percpu area in bytes
1454 * @dyn_size: minimum free size for dynamic allocation in bytes
1455 * @atom_size: allocation atom size
1456 * @cpu_distance_fn: callback to determine distance between cpus, optional
1458 * This function determines grouping of units, their mappings to cpus
1459 * and other parameters considering needed percpu size, allocation
1460 * atom size and distances between CPUs.
1462 * Groups are always mutliples of atom size and CPUs which are of
1463 * LOCAL_DISTANCE both ways are grouped together and share space for
1464 * units in the same group. The returned configuration is guaranteed
1465 * to have CPUs on different nodes on different groups and >=75% usage
1466 * of allocated virtual address space.
1469 * On success, pointer to the new allocation_info is returned. On
1470 * failure, ERR_PTR value is returned.
1472 static struct pcpu_alloc_info
* __init
pcpu_build_alloc_info(
1473 size_t reserved_size
, size_t dyn_size
,
1475 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
)
1477 static int group_map
[NR_CPUS
] __initdata
;
1478 static int group_cnt
[NR_CPUS
] __initdata
;
1479 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
1480 int nr_groups
= 1, nr_units
= 0;
1481 size_t size_sum
, min_unit_size
, alloc_size
;
1482 int upa
, max_upa
, uninitialized_var(best_upa
); /* units_per_alloc */
1483 int last_allocs
, group
, unit
;
1484 unsigned int cpu
, tcpu
;
1485 struct pcpu_alloc_info
*ai
;
1486 unsigned int *cpu_map
;
1488 /* this function may be called multiple times */
1489 memset(group_map
, 0, sizeof(group_map
));
1490 memset(group_cnt
, 0, sizeof(group_cnt
));
1492 /* calculate size_sum and ensure dyn_size is enough for early alloc */
1493 size_sum
= PFN_ALIGN(static_size
+ reserved_size
+
1494 max_t(size_t, dyn_size
, PERCPU_DYNAMIC_EARLY_SIZE
));
1495 dyn_size
= size_sum
- static_size
- reserved_size
;
1498 * Determine min_unit_size, alloc_size and max_upa such that
1499 * alloc_size is multiple of atom_size and is the smallest
1500 * which can accommodate 4k aligned segments which are equal to
1501 * or larger than min_unit_size.
1503 min_unit_size
= max_t(size_t, size_sum
, PCPU_MIN_UNIT_SIZE
);
1505 alloc_size
= roundup(min_unit_size
, atom_size
);
1506 upa
= alloc_size
/ min_unit_size
;
1507 while (alloc_size
% upa
|| ((alloc_size
/ upa
) & ~PAGE_MASK
))
1511 /* group cpus according to their proximity */
1512 for_each_possible_cpu(cpu
) {
1515 for_each_possible_cpu(tcpu
) {
1518 if (group_map
[tcpu
] == group
&& cpu_distance_fn
&&
1519 (cpu_distance_fn(cpu
, tcpu
) > LOCAL_DISTANCE
||
1520 cpu_distance_fn(tcpu
, cpu
) > LOCAL_DISTANCE
)) {
1522 nr_groups
= max(nr_groups
, group
+ 1);
1526 group_map
[cpu
] = group
;
1531 * Expand unit size until address space usage goes over 75%
1532 * and then as much as possible without using more address
1535 last_allocs
= INT_MAX
;
1536 for (upa
= max_upa
; upa
; upa
--) {
1537 int allocs
= 0, wasted
= 0;
1539 if (alloc_size
% upa
|| ((alloc_size
/ upa
) & ~PAGE_MASK
))
1542 for (group
= 0; group
< nr_groups
; group
++) {
1543 int this_allocs
= DIV_ROUND_UP(group_cnt
[group
], upa
);
1544 allocs
+= this_allocs
;
1545 wasted
+= this_allocs
* upa
- group_cnt
[group
];
1549 * Don't accept if wastage is over 1/3. The
1550 * greater-than comparison ensures upa==1 always
1551 * passes the following check.
1553 if (wasted
> num_possible_cpus() / 3)
1556 /* and then don't consume more memory */
1557 if (allocs
> last_allocs
)
1559 last_allocs
= allocs
;
1564 /* allocate and fill alloc_info */
1565 for (group
= 0; group
< nr_groups
; group
++)
1566 nr_units
+= roundup(group_cnt
[group
], upa
);
1568 ai
= pcpu_alloc_alloc_info(nr_groups
, nr_units
);
1570 return ERR_PTR(-ENOMEM
);
1571 cpu_map
= ai
->groups
[0].cpu_map
;
1573 for (group
= 0; group
< nr_groups
; group
++) {
1574 ai
->groups
[group
].cpu_map
= cpu_map
;
1575 cpu_map
+= roundup(group_cnt
[group
], upa
);
1578 ai
->static_size
= static_size
;
1579 ai
->reserved_size
= reserved_size
;
1580 ai
->dyn_size
= dyn_size
;
1581 ai
->unit_size
= alloc_size
/ upa
;
1582 ai
->atom_size
= atom_size
;
1583 ai
->alloc_size
= alloc_size
;
1585 for (group
= 0, unit
= 0; group_cnt
[group
]; group
++) {
1586 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1589 * Initialize base_offset as if all groups are located
1590 * back-to-back. The caller should update this to
1591 * reflect actual allocation.
1593 gi
->base_offset
= unit
* ai
->unit_size
;
1595 for_each_possible_cpu(cpu
)
1596 if (group_map
[cpu
] == group
)
1597 gi
->cpu_map
[gi
->nr_units
++] = cpu
;
1598 gi
->nr_units
= roundup(gi
->nr_units
, upa
);
1599 unit
+= gi
->nr_units
;
1601 BUG_ON(unit
!= nr_units
);
1605 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1607 #if defined(BUILD_EMBED_FIRST_CHUNK)
1609 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1610 * @reserved_size: the size of reserved percpu area in bytes
1611 * @dyn_size: minimum free size for dynamic allocation in bytes
1612 * @atom_size: allocation atom size
1613 * @cpu_distance_fn: callback to determine distance between cpus, optional
1614 * @alloc_fn: function to allocate percpu page
1615 * @free_fn: function to free percpu page
1617 * This is a helper to ease setting up embedded first percpu chunk and
1618 * can be called where pcpu_setup_first_chunk() is expected.
1620 * If this function is used to setup the first chunk, it is allocated
1621 * by calling @alloc_fn and used as-is without being mapped into
1622 * vmalloc area. Allocations are always whole multiples of @atom_size
1623 * aligned to @atom_size.
1625 * This enables the first chunk to piggy back on the linear physical
1626 * mapping which often uses larger page size. Please note that this
1627 * can result in very sparse cpu->unit mapping on NUMA machines thus
1628 * requiring large vmalloc address space. Don't use this allocator if
1629 * vmalloc space is not orders of magnitude larger than distances
1630 * between node memory addresses (ie. 32bit NUMA machines).
1632 * @dyn_size specifies the minimum dynamic area size.
1634 * If the needed size is smaller than the minimum or specified unit
1635 * size, the leftover is returned using @free_fn.
1638 * 0 on success, -errno on failure.
1640 int __init
pcpu_embed_first_chunk(size_t reserved_size
, size_t dyn_size
,
1642 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
,
1643 pcpu_fc_alloc_fn_t alloc_fn
,
1644 pcpu_fc_free_fn_t free_fn
)
1646 void *base
= (void *)ULONG_MAX
;
1647 void **areas
= NULL
;
1648 struct pcpu_alloc_info
*ai
;
1649 size_t size_sum
, areas_size
, max_distance
;
1652 ai
= pcpu_build_alloc_info(reserved_size
, dyn_size
, atom_size
,
1657 size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
1658 areas_size
= PFN_ALIGN(ai
->nr_groups
* sizeof(void *));
1660 areas
= memblock_virt_alloc_nopanic(areas_size
, 0);
1666 /* allocate, copy and determine base address */
1667 for (group
= 0; group
< ai
->nr_groups
; group
++) {
1668 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1669 unsigned int cpu
= NR_CPUS
;
1672 for (i
= 0; i
< gi
->nr_units
&& cpu
== NR_CPUS
; i
++)
1673 cpu
= gi
->cpu_map
[i
];
1674 BUG_ON(cpu
== NR_CPUS
);
1676 /* allocate space for the whole group */
1677 ptr
= alloc_fn(cpu
, gi
->nr_units
* ai
->unit_size
, atom_size
);
1680 goto out_free_areas
;
1682 /* kmemleak tracks the percpu allocations separately */
1686 base
= min(ptr
, base
);
1690 * Copy data and free unused parts. This should happen after all
1691 * allocations are complete; otherwise, we may end up with
1692 * overlapping groups.
1694 for (group
= 0; group
< ai
->nr_groups
; group
++) {
1695 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1696 void *ptr
= areas
[group
];
1698 for (i
= 0; i
< gi
->nr_units
; i
++, ptr
+= ai
->unit_size
) {
1699 if (gi
->cpu_map
[i
] == NR_CPUS
) {
1700 /* unused unit, free whole */
1701 free_fn(ptr
, ai
->unit_size
);
1704 /* copy and return the unused part */
1705 memcpy(ptr
, __per_cpu_load
, ai
->static_size
);
1706 free_fn(ptr
+ size_sum
, ai
->unit_size
- size_sum
);
1710 /* base address is now known, determine group base offsets */
1712 for (group
= 0; group
< ai
->nr_groups
; group
++) {
1713 ai
->groups
[group
].base_offset
= areas
[group
] - base
;
1714 max_distance
= max_t(size_t, max_distance
,
1715 ai
->groups
[group
].base_offset
);
1717 max_distance
+= ai
->unit_size
;
1719 /* warn if maximum distance is further than 75% of vmalloc space */
1720 if (max_distance
> VMALLOC_TOTAL
* 3 / 4) {
1721 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1722 "space 0x%lx\n", max_distance
,
1724 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1725 /* and fail if we have fallback */
1731 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1732 PFN_DOWN(size_sum
), base
, ai
->static_size
, ai
->reserved_size
,
1733 ai
->dyn_size
, ai
->unit_size
);
1735 rc
= pcpu_setup_first_chunk(ai
, base
);
1739 for (group
= 0; group
< ai
->nr_groups
; group
++)
1741 free_fn(areas
[group
],
1742 ai
->groups
[group
].nr_units
* ai
->unit_size
);
1744 pcpu_free_alloc_info(ai
);
1746 memblock_free_early(__pa(areas
), areas_size
);
1749 #endif /* BUILD_EMBED_FIRST_CHUNK */
1751 #ifdef BUILD_PAGE_FIRST_CHUNK
1753 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1754 * @reserved_size: the size of reserved percpu area in bytes
1755 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1756 * @free_fn: function to free percpu page, always called with PAGE_SIZE
1757 * @populate_pte_fn: function to populate pte
1759 * This is a helper to ease setting up page-remapped first percpu
1760 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1762 * This is the basic allocator. Static percpu area is allocated
1763 * page-by-page into vmalloc area.
1766 * 0 on success, -errno on failure.
1768 int __init
pcpu_page_first_chunk(size_t reserved_size
,
1769 pcpu_fc_alloc_fn_t alloc_fn
,
1770 pcpu_fc_free_fn_t free_fn
,
1771 pcpu_fc_populate_pte_fn_t populate_pte_fn
)
1773 static struct vm_struct vm
;
1774 struct pcpu_alloc_info
*ai
;
1778 struct page
**pages
;
1781 snprintf(psize_str
, sizeof(psize_str
), "%luK", PAGE_SIZE
>> 10);
1783 ai
= pcpu_build_alloc_info(reserved_size
, 0, PAGE_SIZE
, NULL
);
1786 BUG_ON(ai
->nr_groups
!= 1);
1787 BUG_ON(ai
->groups
[0].nr_units
!= num_possible_cpus());
1789 unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
1791 /* unaligned allocations can't be freed, round up to page size */
1792 pages_size
= PFN_ALIGN(unit_pages
* num_possible_cpus() *
1794 pages
= memblock_virt_alloc(pages_size
, 0);
1796 /* allocate pages */
1798 for (unit
= 0; unit
< num_possible_cpus(); unit
++)
1799 for (i
= 0; i
< unit_pages
; i
++) {
1800 unsigned int cpu
= ai
->groups
[0].cpu_map
[unit
];
1803 ptr
= alloc_fn(cpu
, PAGE_SIZE
, PAGE_SIZE
);
1805 pr_warning("PERCPU: failed to allocate %s page "
1806 "for cpu%u\n", psize_str
, cpu
);
1809 /* kmemleak tracks the percpu allocations separately */
1811 pages
[j
++] = virt_to_page(ptr
);
1814 /* allocate vm area, map the pages and copy static data */
1815 vm
.flags
= VM_ALLOC
;
1816 vm
.size
= num_possible_cpus() * ai
->unit_size
;
1817 vm_area_register_early(&vm
, PAGE_SIZE
);
1819 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
1820 unsigned long unit_addr
=
1821 (unsigned long)vm
.addr
+ unit
* ai
->unit_size
;
1823 for (i
= 0; i
< unit_pages
; i
++)
1824 populate_pte_fn(unit_addr
+ (i
<< PAGE_SHIFT
));
1826 /* pte already populated, the following shouldn't fail */
1827 rc
= __pcpu_map_pages(unit_addr
, &pages
[unit
* unit_pages
],
1830 panic("failed to map percpu area, err=%d\n", rc
);
1833 * FIXME: Archs with virtual cache should flush local
1834 * cache for the linear mapping here - something
1835 * equivalent to flush_cache_vmap() on the local cpu.
1836 * flush_cache_vmap() can't be used as most supporting
1837 * data structures are not set up yet.
1840 /* copy static data */
1841 memcpy((void *)unit_addr
, __per_cpu_load
, ai
->static_size
);
1844 /* we're ready, commit */
1845 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1846 unit_pages
, psize_str
, vm
.addr
, ai
->static_size
,
1847 ai
->reserved_size
, ai
->dyn_size
);
1849 rc
= pcpu_setup_first_chunk(ai
, vm
.addr
);
1854 free_fn(page_address(pages
[j
]), PAGE_SIZE
);
1857 memblock_free_early(__pa(pages
), pages_size
);
1858 pcpu_free_alloc_info(ai
);
1861 #endif /* BUILD_PAGE_FIRST_CHUNK */
1863 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1865 * Generic SMP percpu area setup.
1867 * The embedding helper is used because its behavior closely resembles
1868 * the original non-dynamic generic percpu area setup. This is
1869 * important because many archs have addressing restrictions and might
1870 * fail if the percpu area is located far away from the previous
1871 * location. As an added bonus, in non-NUMA cases, embedding is
1872 * generally a good idea TLB-wise because percpu area can piggy back
1873 * on the physical linear memory mapping which uses large page
1874 * mappings on applicable archs.
1876 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
1877 EXPORT_SYMBOL(__per_cpu_offset
);
1879 static void * __init
pcpu_dfl_fc_alloc(unsigned int cpu
, size_t size
,
1882 return memblock_virt_alloc_from_nopanic(
1883 size
, align
, __pa(MAX_DMA_ADDRESS
));
1886 static void __init
pcpu_dfl_fc_free(void *ptr
, size_t size
)
1888 memblock_free_early(__pa(ptr
), size
);
1891 void __init
setup_per_cpu_areas(void)
1893 unsigned long delta
;
1898 * Always reserve area for module percpu variables. That's
1899 * what the legacy allocator did.
1901 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
1902 PERCPU_DYNAMIC_RESERVE
, PAGE_SIZE
, NULL
,
1903 pcpu_dfl_fc_alloc
, pcpu_dfl_fc_free
);
1905 panic("Failed to initialize percpu areas.");
1907 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
1908 for_each_possible_cpu(cpu
)
1909 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
1911 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1913 #else /* CONFIG_SMP */
1916 * UP percpu area setup.
1918 * UP always uses km-based percpu allocator with identity mapping.
1919 * Static percpu variables are indistinguishable from the usual static
1920 * variables and don't require any special preparation.
1922 void __init
setup_per_cpu_areas(void)
1924 const size_t unit_size
=
1925 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE
,
1926 PERCPU_DYNAMIC_RESERVE
));
1927 struct pcpu_alloc_info
*ai
;
1930 ai
= pcpu_alloc_alloc_info(1, 1);
1931 fc
= memblock_virt_alloc_from_nopanic(unit_size
,
1933 __pa(MAX_DMA_ADDRESS
));
1935 panic("Failed to allocate memory for percpu areas.");
1936 /* kmemleak tracks the percpu allocations separately */
1939 ai
->dyn_size
= unit_size
;
1940 ai
->unit_size
= unit_size
;
1941 ai
->atom_size
= unit_size
;
1942 ai
->alloc_size
= unit_size
;
1943 ai
->groups
[0].nr_units
= 1;
1944 ai
->groups
[0].cpu_map
[0] = 0;
1946 if (pcpu_setup_first_chunk(ai
, fc
) < 0)
1947 panic("Failed to initialize percpu areas.");
1949 pcpu_free_alloc_info(ai
);
1952 #endif /* CONFIG_SMP */
1955 * First and reserved chunks are initialized with temporary allocation
1956 * map in initdata so that they can be used before slab is online.
1957 * This function is called after slab is brought up and replaces those
1958 * with properly allocated maps.
1960 void __init
percpu_init_late(void)
1962 struct pcpu_chunk
*target_chunks
[] =
1963 { pcpu_first_chunk
, pcpu_reserved_chunk
, NULL
};
1964 struct pcpu_chunk
*chunk
;
1965 unsigned long flags
;
1968 for (i
= 0; (chunk
= target_chunks
[i
]); i
++) {
1970 const size_t size
= PERCPU_DYNAMIC_EARLY_SLOTS
* sizeof(map
[0]);
1972 BUILD_BUG_ON(size
> PAGE_SIZE
);
1974 map
= pcpu_mem_zalloc(size
);
1977 spin_lock_irqsave(&pcpu_lock
, flags
);
1978 memcpy(map
, chunk
->map
, size
);
1980 spin_unlock_irqrestore(&pcpu_lock
, flags
);