2 * linux/mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of boot-time determined number of units and the
12 * first chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated. ie. in
20 * ------------------- ------------------- ------------
21 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
22 * ------------------- ...... ------------------- .... ------------
24 * Allocation is done in offset-size areas of single unit space. Ie,
25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
26 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
27 * cpus. On NUMA, the mapping can be non-linear and even sparse.
28 * Percpu access can be done by configuring percpu base registers
29 * according to cpu to unit mapping and pcpu_unit_size.
31 * There are usually many small percpu allocations many of them being
32 * as small as 4 bytes. The allocator organizes chunks into lists
33 * according to free size and tries to allocate from the fullest one.
34 * Each chunk keeps the maximum contiguous area size hint which is
35 * guaranteed to be eqaul to or larger than the maximum contiguous
36 * area in the chunk. This helps the allocator not to iterate the
37 * chunk maps unnecessarily.
39 * Allocation state in each chunk is kept using an array of integers
40 * on chunk->map. A positive value in the map represents a free
41 * region and negative allocated. Allocation inside a chunk is done
42 * by scanning this map sequentially and serving the first matching
43 * entry. This is mostly copied from the percpu_modalloc() allocator.
44 * Chunks can be determined from the address using the index field
45 * in the page struct. The index field contains a pointer to the chunk.
47 * To use this allocator, arch code should do the followings.
49 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
51 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
52 * regular address to percpu pointer and back if they need to be
53 * different from the default
55 * - use pcpu_setup_first_chunk() during percpu area initialization to
56 * setup the first chunk containing the kernel static percpu area
59 #include <linux/bitmap.h>
60 #include <linux/bootmem.h>
61 #include <linux/list.h>
62 #include <linux/log2.h>
64 #include <linux/module.h>
65 #include <linux/mutex.h>
66 #include <linux/percpu.h>
67 #include <linux/pfn.h>
68 #include <linux/slab.h>
69 #include <linux/spinlock.h>
70 #include <linux/vmalloc.h>
71 #include <linux/workqueue.h>
73 #include <asm/cacheflush.h>
74 #include <asm/sections.h>
75 #include <asm/tlbflush.h>
77 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
78 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
80 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
81 #ifndef __addr_to_pcpu_ptr
82 #define __addr_to_pcpu_ptr(addr) \
83 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
84 + (unsigned long)__per_cpu_start)
86 #ifndef __pcpu_ptr_to_addr
87 #define __pcpu_ptr_to_addr(ptr) \
88 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
89 - (unsigned long)__per_cpu_start)
93 struct list_head list
; /* linked to pcpu_slot lists */
94 int free_size
; /* free bytes in the chunk */
95 int contig_hint
; /* max contiguous size hint */
96 struct vm_struct
*vm
; /* mapped vmalloc region */
97 int map_used
; /* # of map entries used */
98 int map_alloc
; /* # of map entries allocated */
99 int *map
; /* allocation map */
100 bool immutable
; /* no [de]population allowed */
101 unsigned long populated
[]; /* populated bitmap */
104 static int pcpu_unit_pages __read_mostly
;
105 static int pcpu_unit_size __read_mostly
;
106 static int pcpu_nr_units __read_mostly
;
107 static int pcpu_chunk_size __read_mostly
;
108 static int pcpu_nr_slots __read_mostly
;
109 static size_t pcpu_chunk_struct_size __read_mostly
;
111 /* cpus with the lowest and highest unit numbers */
112 static unsigned int pcpu_first_unit_cpu __read_mostly
;
113 static unsigned int pcpu_last_unit_cpu __read_mostly
;
115 /* the address of the first chunk which starts with the kernel static area */
116 void *pcpu_base_addr __read_mostly
;
117 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
119 /* cpu -> unit map */
120 const int *pcpu_unit_map __read_mostly
;
123 * The first chunk which always exists. Note that unlike other
124 * chunks, this one can be allocated and mapped in several different
125 * ways and thus often doesn't live in the vmalloc area.
127 static struct pcpu_chunk
*pcpu_first_chunk
;
130 * Optional reserved chunk. This chunk reserves part of the first
131 * chunk and serves it for reserved allocations. The amount of
132 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
133 * area doesn't exist, the following variables contain NULL and 0
136 static struct pcpu_chunk
*pcpu_reserved_chunk
;
137 static int pcpu_reserved_chunk_limit
;
140 * Synchronization rules.
142 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
143 * protects allocation/reclaim paths, chunks, populated bitmap and
144 * vmalloc mapping. The latter is a spinlock and protects the index
145 * data structures - chunk slots, chunks and area maps in chunks.
147 * During allocation, pcpu_alloc_mutex is kept locked all the time and
148 * pcpu_lock is grabbed and released as necessary. All actual memory
149 * allocations are done using GFP_KERNEL with pcpu_lock released.
151 * Free path accesses and alters only the index data structures, so it
152 * can be safely called from atomic context. When memory needs to be
153 * returned to the system, free path schedules reclaim_work which
154 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
155 * reclaimed, release both locks and frees the chunks. Note that it's
156 * necessary to grab both locks to remove a chunk from circulation as
157 * allocation path might be referencing the chunk with only
158 * pcpu_alloc_mutex locked.
160 static DEFINE_MUTEX(pcpu_alloc_mutex
); /* protects whole alloc and reclaim */
161 static DEFINE_SPINLOCK(pcpu_lock
); /* protects index data structures */
163 static struct list_head
*pcpu_slot __read_mostly
; /* chunk list slots */
165 /* reclaim work to release fully free chunks, scheduled from free path */
166 static void pcpu_reclaim(struct work_struct
*work
);
167 static DECLARE_WORK(pcpu_reclaim_work
, pcpu_reclaim
);
169 static int __pcpu_size_to_slot(int size
)
171 int highbit
= fls(size
); /* size is in bytes */
172 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
175 static int pcpu_size_to_slot(int size
)
177 if (size
== pcpu_unit_size
)
178 return pcpu_nr_slots
- 1;
179 return __pcpu_size_to_slot(size
);
182 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
184 if (chunk
->free_size
< sizeof(int) || chunk
->contig_hint
< sizeof(int))
187 return pcpu_size_to_slot(chunk
->free_size
);
190 static int pcpu_page_idx(unsigned int cpu
, int page_idx
)
192 return pcpu_unit_map
[cpu
] * pcpu_unit_pages
+ page_idx
;
195 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
196 unsigned int cpu
, int page_idx
)
198 return (unsigned long)chunk
->vm
->addr
+
199 (pcpu_page_idx(cpu
, page_idx
) << PAGE_SHIFT
);
202 static struct page
*pcpu_chunk_page(struct pcpu_chunk
*chunk
,
203 unsigned int cpu
, int page_idx
)
205 /* must not be used on pre-mapped chunk */
206 WARN_ON(chunk
->immutable
);
208 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk
, cpu
, page_idx
));
211 /* set the pointer to a chunk in a page struct */
212 static void pcpu_set_page_chunk(struct page
*page
, struct pcpu_chunk
*pcpu
)
214 page
->index
= (unsigned long)pcpu
;
217 /* obtain pointer to a chunk from a page struct */
218 static struct pcpu_chunk
*pcpu_get_page_chunk(struct page
*page
)
220 return (struct pcpu_chunk
*)page
->index
;
223 static void pcpu_next_unpop(struct pcpu_chunk
*chunk
, int *rs
, int *re
, int end
)
225 *rs
= find_next_zero_bit(chunk
->populated
, end
, *rs
);
226 *re
= find_next_bit(chunk
->populated
, end
, *rs
+ 1);
229 static void pcpu_next_pop(struct pcpu_chunk
*chunk
, int *rs
, int *re
, int end
)
231 *rs
= find_next_bit(chunk
->populated
, end
, *rs
);
232 *re
= find_next_zero_bit(chunk
->populated
, end
, *rs
+ 1);
236 * (Un)populated page region iterators. Iterate over (un)populated
237 * page regions betwen @start and @end in @chunk. @rs and @re should
238 * be integer variables and will be set to start and end page index of
239 * the current region.
241 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
242 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
244 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
246 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
247 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
249 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
252 * pcpu_mem_alloc - allocate memory
253 * @size: bytes to allocate
255 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
256 * kzalloc() is used; otherwise, vmalloc() is used. The returned
257 * memory is always zeroed.
260 * Does GFP_KERNEL allocation.
263 * Pointer to the allocated area on success, NULL on failure.
265 static void *pcpu_mem_alloc(size_t size
)
267 if (size
<= PAGE_SIZE
)
268 return kzalloc(size
, GFP_KERNEL
);
270 void *ptr
= vmalloc(size
);
272 memset(ptr
, 0, size
);
278 * pcpu_mem_free - free memory
279 * @ptr: memory to free
280 * @size: size of the area
282 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
284 static void pcpu_mem_free(void *ptr
, size_t size
)
286 if (size
<= PAGE_SIZE
)
293 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
294 * @chunk: chunk of interest
295 * @oslot: the previous slot it was on
297 * This function is called after an allocation or free changed @chunk.
298 * New slot according to the changed state is determined and @chunk is
299 * moved to the slot. Note that the reserved chunk is never put on
305 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
307 int nslot
= pcpu_chunk_slot(chunk
);
309 if (chunk
!= pcpu_reserved_chunk
&& oslot
!= nslot
) {
311 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
313 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
318 * pcpu_chunk_addr_search - determine chunk containing specified address
319 * @addr: address for which the chunk needs to be determined.
322 * The address of the found chunk.
324 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
326 void *first_start
= pcpu_first_chunk
->vm
->addr
;
328 /* is it in the first chunk? */
329 if (addr
>= first_start
&& addr
< first_start
+ pcpu_unit_size
) {
330 /* is it in the reserved area? */
331 if (addr
< first_start
+ pcpu_reserved_chunk_limit
)
332 return pcpu_reserved_chunk
;
333 return pcpu_first_chunk
;
337 * The address is relative to unit0 which might be unused and
338 * thus unmapped. Offset the address to the unit space of the
339 * current processor before looking it up in the vmalloc
340 * space. Note that any possible cpu id can be used here, so
341 * there's no need to worry about preemption or cpu hotplug.
343 addr
+= pcpu_unit_map
[smp_processor_id()] * pcpu_unit_size
;
344 return pcpu_get_page_chunk(vmalloc_to_page(addr
));
348 * pcpu_extend_area_map - extend area map for allocation
349 * @chunk: target chunk
351 * Extend area map of @chunk so that it can accomodate an allocation.
352 * A single allocation can split an area into three areas, so this
353 * function makes sure that @chunk->map has at least two extra slots.
356 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
357 * if area map is extended.
360 * 0 if noop, 1 if successfully extended, -errno on failure.
362 static int pcpu_extend_area_map(struct pcpu_chunk
*chunk
)
369 if (chunk
->map_alloc
>= chunk
->map_used
+ 2)
372 spin_unlock_irq(&pcpu_lock
);
374 new_alloc
= PCPU_DFL_MAP_ALLOC
;
375 while (new_alloc
< chunk
->map_used
+ 2)
378 new = pcpu_mem_alloc(new_alloc
* sizeof(new[0]));
380 spin_lock_irq(&pcpu_lock
);
385 * Acquire pcpu_lock and switch to new area map. Only free
386 * could have happened inbetween, so map_used couldn't have
389 spin_lock_irq(&pcpu_lock
);
390 BUG_ON(new_alloc
< chunk
->map_used
+ 2);
392 size
= chunk
->map_alloc
* sizeof(chunk
->map
[0]);
393 memcpy(new, chunk
->map
, size
);
396 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
397 * one of the first chunks and still using static map.
399 if (chunk
->map_alloc
>= PCPU_DFL_MAP_ALLOC
)
400 pcpu_mem_free(chunk
->map
, size
);
402 chunk
->map_alloc
= new_alloc
;
408 * pcpu_split_block - split a map block
409 * @chunk: chunk of interest
410 * @i: index of map block to split
411 * @head: head size in bytes (can be 0)
412 * @tail: tail size in bytes (can be 0)
414 * Split the @i'th map block into two or three blocks. If @head is
415 * non-zero, @head bytes block is inserted before block @i moving it
416 * to @i+1 and reducing its size by @head bytes.
418 * If @tail is non-zero, the target block, which can be @i or @i+1
419 * depending on @head, is reduced by @tail bytes and @tail byte block
420 * is inserted after the target block.
422 * @chunk->map must have enough free slots to accomodate the split.
427 static void pcpu_split_block(struct pcpu_chunk
*chunk
, int i
,
430 int nr_extra
= !!head
+ !!tail
;
432 BUG_ON(chunk
->map_alloc
< chunk
->map_used
+ nr_extra
);
434 /* insert new subblocks */
435 memmove(&chunk
->map
[i
+ nr_extra
], &chunk
->map
[i
],
436 sizeof(chunk
->map
[0]) * (chunk
->map_used
- i
));
437 chunk
->map_used
+= nr_extra
;
440 chunk
->map
[i
+ 1] = chunk
->map
[i
] - head
;
441 chunk
->map
[i
++] = head
;
444 chunk
->map
[i
++] -= tail
;
445 chunk
->map
[i
] = tail
;
450 * pcpu_alloc_area - allocate area from a pcpu_chunk
451 * @chunk: chunk of interest
452 * @size: wanted size in bytes
453 * @align: wanted align
455 * Try to allocate @size bytes area aligned at @align from @chunk.
456 * Note that this function only allocates the offset. It doesn't
457 * populate or map the area.
459 * @chunk->map must have at least two free slots.
465 * Allocated offset in @chunk on success, -1 if no matching area is
468 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int size
, int align
)
470 int oslot
= pcpu_chunk_slot(chunk
);
474 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++])) {
475 bool is_last
= i
+ 1 == chunk
->map_used
;
478 /* extra for alignment requirement */
479 head
= ALIGN(off
, align
) - off
;
480 BUG_ON(i
== 0 && head
!= 0);
482 if (chunk
->map
[i
] < 0)
484 if (chunk
->map
[i
] < head
+ size
) {
485 max_contig
= max(chunk
->map
[i
], max_contig
);
490 * If head is small or the previous block is free,
491 * merge'em. Note that 'small' is defined as smaller
492 * than sizeof(int), which is very small but isn't too
493 * uncommon for percpu allocations.
495 if (head
&& (head
< sizeof(int) || chunk
->map
[i
- 1] > 0)) {
496 if (chunk
->map
[i
- 1] > 0)
497 chunk
->map
[i
- 1] += head
;
499 chunk
->map
[i
- 1] -= head
;
500 chunk
->free_size
-= head
;
502 chunk
->map
[i
] -= head
;
507 /* if tail is small, just keep it around */
508 tail
= chunk
->map
[i
] - head
- size
;
509 if (tail
< sizeof(int))
512 /* split if warranted */
514 pcpu_split_block(chunk
, i
, head
, tail
);
518 max_contig
= max(chunk
->map
[i
- 1], max_contig
);
521 max_contig
= max(chunk
->map
[i
+ 1], max_contig
);
524 /* update hint and mark allocated */
526 chunk
->contig_hint
= max_contig
; /* fully scanned */
528 chunk
->contig_hint
= max(chunk
->contig_hint
,
531 chunk
->free_size
-= chunk
->map
[i
];
532 chunk
->map
[i
] = -chunk
->map
[i
];
534 pcpu_chunk_relocate(chunk
, oslot
);
538 chunk
->contig_hint
= max_contig
; /* fully scanned */
539 pcpu_chunk_relocate(chunk
, oslot
);
541 /* tell the upper layer that this chunk has no matching area */
546 * pcpu_free_area - free area to a pcpu_chunk
547 * @chunk: chunk of interest
548 * @freeme: offset of area to free
550 * Free area starting from @freeme to @chunk. Note that this function
551 * only modifies the allocation map. It doesn't depopulate or unmap
557 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int freeme
)
559 int oslot
= pcpu_chunk_slot(chunk
);
562 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++]))
565 BUG_ON(off
!= freeme
);
566 BUG_ON(chunk
->map
[i
] > 0);
568 chunk
->map
[i
] = -chunk
->map
[i
];
569 chunk
->free_size
+= chunk
->map
[i
];
571 /* merge with previous? */
572 if (i
> 0 && chunk
->map
[i
- 1] >= 0) {
573 chunk
->map
[i
- 1] += chunk
->map
[i
];
575 memmove(&chunk
->map
[i
], &chunk
->map
[i
+ 1],
576 (chunk
->map_used
- i
) * sizeof(chunk
->map
[0]));
579 /* merge with next? */
580 if (i
+ 1 < chunk
->map_used
&& chunk
->map
[i
+ 1] >= 0) {
581 chunk
->map
[i
] += chunk
->map
[i
+ 1];
583 memmove(&chunk
->map
[i
+ 1], &chunk
->map
[i
+ 2],
584 (chunk
->map_used
- (i
+ 1)) * sizeof(chunk
->map
[0]));
587 chunk
->contig_hint
= max(chunk
->map
[i
], chunk
->contig_hint
);
588 pcpu_chunk_relocate(chunk
, oslot
);
592 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
593 * @chunk: chunk of interest
594 * @bitmapp: output parameter for bitmap
595 * @may_alloc: may allocate the array
597 * Returns pointer to array of pointers to struct page and bitmap,
598 * both of which can be indexed with pcpu_page_idx(). The returned
599 * array is cleared to zero and *@bitmapp is copied from
600 * @chunk->populated. Note that there is only one array and bitmap
601 * and access exclusion is the caller's responsibility.
604 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
605 * Otherwise, don't care.
608 * Pointer to temp pages array on success, NULL on failure.
610 static struct page
**pcpu_get_pages_and_bitmap(struct pcpu_chunk
*chunk
,
611 unsigned long **bitmapp
,
614 static struct page
**pages
;
615 static unsigned long *bitmap
;
616 size_t pages_size
= pcpu_nr_units
* pcpu_unit_pages
* sizeof(pages
[0]);
617 size_t bitmap_size
= BITS_TO_LONGS(pcpu_unit_pages
) *
618 sizeof(unsigned long);
620 if (!pages
|| !bitmap
) {
621 if (may_alloc
&& !pages
)
622 pages
= pcpu_mem_alloc(pages_size
);
623 if (may_alloc
&& !bitmap
)
624 bitmap
= pcpu_mem_alloc(bitmap_size
);
625 if (!pages
|| !bitmap
)
629 memset(pages
, 0, pages_size
);
630 bitmap_copy(bitmap
, chunk
->populated
, pcpu_unit_pages
);
637 * pcpu_free_pages - free pages which were allocated for @chunk
638 * @chunk: chunk pages were allocated for
639 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
640 * @populated: populated bitmap
641 * @page_start: page index of the first page to be freed
642 * @page_end: page index of the last page to be freed + 1
644 * Free pages [@page_start and @page_end) in @pages for all units.
645 * The pages were allocated for @chunk.
647 static void pcpu_free_pages(struct pcpu_chunk
*chunk
,
648 struct page
**pages
, unsigned long *populated
,
649 int page_start
, int page_end
)
654 for_each_possible_cpu(cpu
) {
655 for (i
= page_start
; i
< page_end
; i
++) {
656 struct page
*page
= pages
[pcpu_page_idx(cpu
, i
)];
665 * pcpu_alloc_pages - allocates pages for @chunk
666 * @chunk: target chunk
667 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
668 * @populated: populated bitmap
669 * @page_start: page index of the first page to be allocated
670 * @page_end: page index of the last page to be allocated + 1
672 * Allocate pages [@page_start,@page_end) into @pages for all units.
673 * The allocation is for @chunk. Percpu core doesn't care about the
674 * content of @pages and will pass it verbatim to pcpu_map_pages().
676 static int pcpu_alloc_pages(struct pcpu_chunk
*chunk
,
677 struct page
**pages
, unsigned long *populated
,
678 int page_start
, int page_end
)
680 const gfp_t gfp
= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
684 for_each_possible_cpu(cpu
) {
685 for (i
= page_start
; i
< page_end
; i
++) {
686 struct page
**pagep
= &pages
[pcpu_page_idx(cpu
, i
)];
688 *pagep
= alloc_pages_node(cpu_to_node(cpu
), gfp
, 0);
690 pcpu_free_pages(chunk
, pages
, populated
,
691 page_start
, page_end
);
700 * pcpu_pre_unmap_flush - flush cache prior to unmapping
701 * @chunk: chunk the regions to be flushed belongs to
702 * @page_start: page index of the first page to be flushed
703 * @page_end: page index of the last page to be flushed + 1
705 * Pages in [@page_start,@page_end) of @chunk are about to be
706 * unmapped. Flush cache. As each flushing trial can be very
707 * expensive, issue flush on the whole region at once rather than
708 * doing it for each cpu. This could be an overkill but is more
711 static void pcpu_pre_unmap_flush(struct pcpu_chunk
*chunk
,
712 int page_start
, int page_end
)
715 pcpu_chunk_addr(chunk
, pcpu_first_unit_cpu
, page_start
),
716 pcpu_chunk_addr(chunk
, pcpu_last_unit_cpu
, page_end
));
719 static void __pcpu_unmap_pages(unsigned long addr
, int nr_pages
)
721 unmap_kernel_range_noflush(addr
, nr_pages
<< PAGE_SHIFT
);
725 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
726 * @chunk: chunk of interest
727 * @pages: pages array which can be used to pass information to free
728 * @populated: populated bitmap
729 * @page_start: page index of the first page to unmap
730 * @page_end: page index of the last page to unmap + 1
732 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
733 * Corresponding elements in @pages were cleared by the caller and can
734 * be used to carry information to pcpu_free_pages() which will be
735 * called after all unmaps are finished. The caller should call
736 * proper pre/post flush functions.
738 static void pcpu_unmap_pages(struct pcpu_chunk
*chunk
,
739 struct page
**pages
, unsigned long *populated
,
740 int page_start
, int page_end
)
745 for_each_possible_cpu(cpu
) {
746 for (i
= page_start
; i
< page_end
; i
++) {
749 page
= pcpu_chunk_page(chunk
, cpu
, i
);
751 pages
[pcpu_page_idx(cpu
, i
)] = page
;
753 __pcpu_unmap_pages(pcpu_chunk_addr(chunk
, cpu
, page_start
),
754 page_end
- page_start
);
757 for (i
= page_start
; i
< page_end
; i
++)
758 __clear_bit(i
, populated
);
762 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
763 * @chunk: pcpu_chunk the regions to be flushed belong to
764 * @page_start: page index of the first page to be flushed
765 * @page_end: page index of the last page to be flushed + 1
767 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
768 * TLB for the regions. This can be skipped if the area is to be
769 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
771 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
772 * for the whole region.
774 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk
*chunk
,
775 int page_start
, int page_end
)
777 flush_tlb_kernel_range(
778 pcpu_chunk_addr(chunk
, pcpu_first_unit_cpu
, page_start
),
779 pcpu_chunk_addr(chunk
, pcpu_last_unit_cpu
, page_end
));
782 static int __pcpu_map_pages(unsigned long addr
, struct page
**pages
,
785 return map_kernel_range_noflush(addr
, nr_pages
<< PAGE_SHIFT
,
790 * pcpu_map_pages - map pages into a pcpu_chunk
791 * @chunk: chunk of interest
792 * @pages: pages array containing pages to be mapped
793 * @populated: populated bitmap
794 * @page_start: page index of the first page to map
795 * @page_end: page index of the last page to map + 1
797 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
798 * caller is responsible for calling pcpu_post_map_flush() after all
799 * mappings are complete.
801 * This function is responsible for setting corresponding bits in
802 * @chunk->populated bitmap and whatever is necessary for reverse
803 * lookup (addr -> chunk).
805 static int pcpu_map_pages(struct pcpu_chunk
*chunk
,
806 struct page
**pages
, unsigned long *populated
,
807 int page_start
, int page_end
)
809 unsigned int cpu
, tcpu
;
812 for_each_possible_cpu(cpu
) {
813 err
= __pcpu_map_pages(pcpu_chunk_addr(chunk
, cpu
, page_start
),
814 &pages
[pcpu_page_idx(cpu
, page_start
)],
815 page_end
- page_start
);
820 /* mapping successful, link chunk and mark populated */
821 for (i
= page_start
; i
< page_end
; i
++) {
822 for_each_possible_cpu(cpu
)
823 pcpu_set_page_chunk(pages
[pcpu_page_idx(cpu
, i
)],
825 __set_bit(i
, populated
);
831 for_each_possible_cpu(tcpu
) {
834 __pcpu_unmap_pages(pcpu_chunk_addr(chunk
, tcpu
, page_start
),
835 page_end
- page_start
);
841 * pcpu_post_map_flush - flush cache after mapping
842 * @chunk: pcpu_chunk the regions to be flushed belong to
843 * @page_start: page index of the first page to be flushed
844 * @page_end: page index of the last page to be flushed + 1
846 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
849 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
850 * for the whole region.
852 static void pcpu_post_map_flush(struct pcpu_chunk
*chunk
,
853 int page_start
, int page_end
)
856 pcpu_chunk_addr(chunk
, pcpu_first_unit_cpu
, page_start
),
857 pcpu_chunk_addr(chunk
, pcpu_last_unit_cpu
, page_end
));
861 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
862 * @chunk: chunk to depopulate
863 * @off: offset to the area to depopulate
864 * @size: size of the area to depopulate in bytes
865 * @flush: whether to flush cache and tlb or not
867 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
868 * from @chunk. If @flush is true, vcache is flushed before unmapping
874 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
876 int page_start
= PFN_DOWN(off
);
877 int page_end
= PFN_UP(off
+ size
);
879 unsigned long *populated
;
882 /* quick path, check whether it's empty already */
883 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, page_end
) {
884 if (rs
== page_start
&& re
== page_end
)
889 /* immutable chunks can't be depopulated */
890 WARN_ON(chunk
->immutable
);
893 * If control reaches here, there must have been at least one
894 * successful population attempt so the temp pages array must
897 pages
= pcpu_get_pages_and_bitmap(chunk
, &populated
, false);
901 pcpu_pre_unmap_flush(chunk
, page_start
, page_end
);
903 pcpu_for_each_pop_region(chunk
, rs
, re
, page_start
, page_end
)
904 pcpu_unmap_pages(chunk
, pages
, populated
, rs
, re
);
906 /* no need to flush tlb, vmalloc will handle it lazily */
908 pcpu_for_each_pop_region(chunk
, rs
, re
, page_start
, page_end
)
909 pcpu_free_pages(chunk
, pages
, populated
, rs
, re
);
911 /* commit new bitmap */
912 bitmap_copy(chunk
->populated
, populated
, pcpu_unit_pages
);
916 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
917 * @chunk: chunk of interest
918 * @off: offset to the area to populate
919 * @size: size of the area to populate in bytes
921 * For each cpu, populate and map pages [@page_start,@page_end) into
922 * @chunk. The area is cleared on return.
925 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
927 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
929 int page_start
= PFN_DOWN(off
);
930 int page_end
= PFN_UP(off
+ size
);
931 int free_end
= page_start
, unmap_end
= page_start
;
933 unsigned long *populated
;
937 /* quick path, check whether all pages are already there */
938 pcpu_for_each_pop_region(chunk
, rs
, re
, page_start
, page_end
) {
939 if (rs
== page_start
&& re
== page_end
)
944 /* need to allocate and map pages, this chunk can't be immutable */
945 WARN_ON(chunk
->immutable
);
947 pages
= pcpu_get_pages_and_bitmap(chunk
, &populated
, true);
952 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, page_end
) {
953 rc
= pcpu_alloc_pages(chunk
, pages
, populated
, rs
, re
);
959 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, page_end
) {
960 rc
= pcpu_map_pages(chunk
, pages
, populated
, rs
, re
);
965 pcpu_post_map_flush(chunk
, page_start
, page_end
);
967 /* commit new bitmap */
968 bitmap_copy(chunk
->populated
, populated
, pcpu_unit_pages
);
970 for_each_possible_cpu(cpu
)
971 memset((void *)pcpu_chunk_addr(chunk
, cpu
, 0) + off
, 0, size
);
975 pcpu_pre_unmap_flush(chunk
, page_start
, unmap_end
);
976 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, unmap_end
)
977 pcpu_unmap_pages(chunk
, pages
, populated
, rs
, re
);
978 pcpu_post_unmap_tlb_flush(chunk
, page_start
, unmap_end
);
980 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, free_end
)
981 pcpu_free_pages(chunk
, pages
, populated
, rs
, re
);
985 static void free_pcpu_chunk(struct pcpu_chunk
*chunk
)
990 free_vm_area(chunk
->vm
);
991 pcpu_mem_free(chunk
->map
, chunk
->map_alloc
* sizeof(chunk
->map
[0]));
995 static struct pcpu_chunk
*alloc_pcpu_chunk(void)
997 struct pcpu_chunk
*chunk
;
999 chunk
= kzalloc(pcpu_chunk_struct_size
, GFP_KERNEL
);
1003 chunk
->map
= pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
1004 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
1005 chunk
->map
[chunk
->map_used
++] = pcpu_unit_size
;
1007 chunk
->vm
= get_vm_area(pcpu_chunk_size
, VM_ALLOC
);
1009 free_pcpu_chunk(chunk
);
1013 INIT_LIST_HEAD(&chunk
->list
);
1014 chunk
->free_size
= pcpu_unit_size
;
1015 chunk
->contig_hint
= pcpu_unit_size
;
1021 * pcpu_alloc - the percpu allocator
1022 * @size: size of area to allocate in bytes
1023 * @align: alignment of area (max PAGE_SIZE)
1024 * @reserved: allocate from the reserved chunk if available
1026 * Allocate percpu area of @size bytes aligned at @align.
1029 * Does GFP_KERNEL allocation.
1032 * Percpu pointer to the allocated area on success, NULL on failure.
1034 static void *pcpu_alloc(size_t size
, size_t align
, bool reserved
)
1036 struct pcpu_chunk
*chunk
;
1039 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
)) {
1040 WARN(true, "illegal size (%zu) or align (%zu) for "
1041 "percpu allocation\n", size
, align
);
1045 mutex_lock(&pcpu_alloc_mutex
);
1046 spin_lock_irq(&pcpu_lock
);
1048 /* serve reserved allocations from the reserved chunk if available */
1049 if (reserved
&& pcpu_reserved_chunk
) {
1050 chunk
= pcpu_reserved_chunk
;
1051 if (size
> chunk
->contig_hint
||
1052 pcpu_extend_area_map(chunk
) < 0)
1054 off
= pcpu_alloc_area(chunk
, size
, align
);
1061 /* search through normal chunks */
1062 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
1063 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
1064 if (size
> chunk
->contig_hint
)
1067 switch (pcpu_extend_area_map(chunk
)) {
1071 goto restart
; /* pcpu_lock dropped, restart */
1076 off
= pcpu_alloc_area(chunk
, size
, align
);
1082 /* hmmm... no space left, create a new chunk */
1083 spin_unlock_irq(&pcpu_lock
);
1085 chunk
= alloc_pcpu_chunk();
1087 goto fail_unlock_mutex
;
1089 spin_lock_irq(&pcpu_lock
);
1090 pcpu_chunk_relocate(chunk
, -1);
1094 spin_unlock_irq(&pcpu_lock
);
1096 /* populate, map and clear the area */
1097 if (pcpu_populate_chunk(chunk
, off
, size
)) {
1098 spin_lock_irq(&pcpu_lock
);
1099 pcpu_free_area(chunk
, off
);
1103 mutex_unlock(&pcpu_alloc_mutex
);
1105 /* return address relative to unit0 */
1106 return __addr_to_pcpu_ptr(chunk
->vm
->addr
+ off
);
1109 spin_unlock_irq(&pcpu_lock
);
1111 mutex_unlock(&pcpu_alloc_mutex
);
1116 * __alloc_percpu - allocate dynamic percpu area
1117 * @size: size of area to allocate in bytes
1118 * @align: alignment of area (max PAGE_SIZE)
1120 * Allocate percpu area of @size bytes aligned at @align. Might
1121 * sleep. Might trigger writeouts.
1124 * Does GFP_KERNEL allocation.
1127 * Percpu pointer to the allocated area on success, NULL on failure.
1129 void *__alloc_percpu(size_t size
, size_t align
)
1131 return pcpu_alloc(size
, align
, false);
1133 EXPORT_SYMBOL_GPL(__alloc_percpu
);
1136 * __alloc_reserved_percpu - allocate reserved percpu area
1137 * @size: size of area to allocate in bytes
1138 * @align: alignment of area (max PAGE_SIZE)
1140 * Allocate percpu area of @size bytes aligned at @align from reserved
1141 * percpu area if arch has set it up; otherwise, allocation is served
1142 * from the same dynamic area. Might sleep. Might trigger writeouts.
1145 * Does GFP_KERNEL allocation.
1148 * Percpu pointer to the allocated area on success, NULL on failure.
1150 void *__alloc_reserved_percpu(size_t size
, size_t align
)
1152 return pcpu_alloc(size
, align
, true);
1156 * pcpu_reclaim - reclaim fully free chunks, workqueue function
1159 * Reclaim all fully free chunks except for the first one.
1162 * workqueue context.
1164 static void pcpu_reclaim(struct work_struct
*work
)
1167 struct list_head
*head
= &pcpu_slot
[pcpu_nr_slots
- 1];
1168 struct pcpu_chunk
*chunk
, *next
;
1170 mutex_lock(&pcpu_alloc_mutex
);
1171 spin_lock_irq(&pcpu_lock
);
1173 list_for_each_entry_safe(chunk
, next
, head
, list
) {
1174 WARN_ON(chunk
->immutable
);
1176 /* spare the first one */
1177 if (chunk
== list_first_entry(head
, struct pcpu_chunk
, list
))
1180 list_move(&chunk
->list
, &todo
);
1183 spin_unlock_irq(&pcpu_lock
);
1185 list_for_each_entry_safe(chunk
, next
, &todo
, list
) {
1186 pcpu_depopulate_chunk(chunk
, 0, pcpu_unit_size
);
1187 free_pcpu_chunk(chunk
);
1190 mutex_unlock(&pcpu_alloc_mutex
);
1194 * free_percpu - free percpu area
1195 * @ptr: pointer to area to free
1197 * Free percpu area @ptr.
1200 * Can be called from atomic context.
1202 void free_percpu(void *ptr
)
1204 void *addr
= __pcpu_ptr_to_addr(ptr
);
1205 struct pcpu_chunk
*chunk
;
1206 unsigned long flags
;
1212 spin_lock_irqsave(&pcpu_lock
, flags
);
1214 chunk
= pcpu_chunk_addr_search(addr
);
1215 off
= addr
- chunk
->vm
->addr
;
1217 pcpu_free_area(chunk
, off
);
1219 /* if there are more than one fully free chunks, wake up grim reaper */
1220 if (chunk
->free_size
== pcpu_unit_size
) {
1221 struct pcpu_chunk
*pos
;
1223 list_for_each_entry(pos
, &pcpu_slot
[pcpu_nr_slots
- 1], list
)
1225 schedule_work(&pcpu_reclaim_work
);
1230 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1232 EXPORT_SYMBOL_GPL(free_percpu
);
1235 * pcpu_setup_first_chunk - initialize the first percpu chunk
1236 * @static_size: the size of static percpu area in bytes
1237 * @reserved_size: the size of reserved percpu area in bytes, 0 for none
1238 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1239 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE
1240 * @base_addr: mapped address
1241 * @unit_map: cpu -> unit map, NULL for sequential mapping
1243 * Initialize the first percpu chunk which contains the kernel static
1244 * perpcu area. This function is to be called from arch percpu area
1247 * @reserved_size, if non-zero, specifies the amount of bytes to
1248 * reserve after the static area in the first chunk. This reserves
1249 * the first chunk such that it's available only through reserved
1250 * percpu allocation. This is primarily used to serve module percpu
1251 * static areas on architectures where the addressing model has
1252 * limited offset range for symbol relocations to guarantee module
1253 * percpu symbols fall inside the relocatable range.
1255 * @dyn_size, if non-negative, determines the number of bytes
1256 * available for dynamic allocation in the first chunk. Specifying
1257 * non-negative value makes percpu leave alone the area beyond
1258 * @static_size + @reserved_size + @dyn_size.
1260 * @unit_size specifies unit size and must be aligned to PAGE_SIZE and
1261 * equal to or larger than @static_size + @reserved_size + if
1262 * non-negative, @dyn_size.
1264 * The caller should have mapped the first chunk at @base_addr and
1265 * copied static data to each unit.
1267 * If the first chunk ends up with both reserved and dynamic areas, it
1268 * is served by two chunks - one to serve the core static and reserved
1269 * areas and the other for the dynamic area. They share the same vm
1270 * and page map but uses different area allocation map to stay away
1271 * from each other. The latter chunk is circulated in the chunk slots
1272 * and available for dynamic allocation like any other chunks.
1275 * The determined pcpu_unit_size which can be used to initialize
1278 size_t __init
pcpu_setup_first_chunk(size_t static_size
, size_t reserved_size
,
1279 ssize_t dyn_size
, size_t unit_size
,
1280 void *base_addr
, const int *unit_map
)
1282 static struct vm_struct first_vm
;
1283 static int smap
[2], dmap
[2];
1284 size_t size_sum
= static_size
+ reserved_size
+
1285 (dyn_size
>= 0 ? dyn_size
: 0);
1286 struct pcpu_chunk
*schunk
, *dchunk
= NULL
;
1287 unsigned int cpu
, tcpu
;
1291 BUILD_BUG_ON(ARRAY_SIZE(smap
) >= PCPU_DFL_MAP_ALLOC
||
1292 ARRAY_SIZE(dmap
) >= PCPU_DFL_MAP_ALLOC
);
1293 BUG_ON(!static_size
);
1295 BUG_ON(unit_size
< size_sum
);
1296 BUG_ON(unit_size
& ~PAGE_MASK
);
1297 BUG_ON(unit_size
< PCPU_MIN_UNIT_SIZE
);
1299 /* determine number of units and verify and initialize pcpu_unit_map */
1301 int first_unit
= INT_MAX
, last_unit
= INT_MIN
;
1303 for_each_possible_cpu(cpu
) {
1304 int unit
= unit_map
[cpu
];
1307 for_each_possible_cpu(tcpu
) {
1310 /* the mapping should be one-to-one */
1311 BUG_ON(unit_map
[tcpu
] == unit
);
1314 if (unit
< first_unit
) {
1315 pcpu_first_unit_cpu
= cpu
;
1318 if (unit
> last_unit
) {
1319 pcpu_last_unit_cpu
= cpu
;
1323 pcpu_nr_units
= last_unit
+ 1;
1324 pcpu_unit_map
= unit_map
;
1328 /* #units == #cpus, identity mapped */
1329 identity_map
= alloc_bootmem(nr_cpu_ids
*
1330 sizeof(identity_map
[0]));
1332 for_each_possible_cpu(cpu
)
1333 identity_map
[cpu
] = cpu
;
1335 pcpu_first_unit_cpu
= 0;
1336 pcpu_last_unit_cpu
= pcpu_nr_units
- 1;
1337 pcpu_nr_units
= nr_cpu_ids
;
1338 pcpu_unit_map
= identity_map
;
1341 /* determine basic parameters */
1342 pcpu_unit_pages
= unit_size
>> PAGE_SHIFT
;
1343 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
1344 pcpu_chunk_size
= pcpu_nr_units
* pcpu_unit_size
;
1345 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
) +
1346 BITS_TO_LONGS(pcpu_unit_pages
) * sizeof(unsigned long);
1349 dyn_size
= pcpu_unit_size
- static_size
- reserved_size
;
1351 first_vm
.flags
= VM_ALLOC
;
1352 first_vm
.size
= pcpu_chunk_size
;
1353 first_vm
.addr
= base_addr
;
1356 * Allocate chunk slots. The additional last slot is for
1359 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
1360 pcpu_slot
= alloc_bootmem(pcpu_nr_slots
* sizeof(pcpu_slot
[0]));
1361 for (i
= 0; i
< pcpu_nr_slots
; i
++)
1362 INIT_LIST_HEAD(&pcpu_slot
[i
]);
1365 * Initialize static chunk. If reserved_size is zero, the
1366 * static chunk covers static area + dynamic allocation area
1367 * in the first chunk. If reserved_size is not zero, it
1368 * covers static area + reserved area (mostly used for module
1369 * static percpu allocation).
1371 schunk
= alloc_bootmem(pcpu_chunk_struct_size
);
1372 INIT_LIST_HEAD(&schunk
->list
);
1373 schunk
->vm
= &first_vm
;
1375 schunk
->map_alloc
= ARRAY_SIZE(smap
);
1376 schunk
->immutable
= true;
1377 bitmap_fill(schunk
->populated
, pcpu_unit_pages
);
1379 if (reserved_size
) {
1380 schunk
->free_size
= reserved_size
;
1381 pcpu_reserved_chunk
= schunk
;
1382 pcpu_reserved_chunk_limit
= static_size
+ reserved_size
;
1384 schunk
->free_size
= dyn_size
;
1385 dyn_size
= 0; /* dynamic area covered */
1387 schunk
->contig_hint
= schunk
->free_size
;
1389 schunk
->map
[schunk
->map_used
++] = -static_size
;
1390 if (schunk
->free_size
)
1391 schunk
->map
[schunk
->map_used
++] = schunk
->free_size
;
1393 /* init dynamic chunk if necessary */
1395 dchunk
= alloc_bootmem(pcpu_chunk_struct_size
);
1396 INIT_LIST_HEAD(&dchunk
->list
);
1397 dchunk
->vm
= &first_vm
;
1399 dchunk
->map_alloc
= ARRAY_SIZE(dmap
);
1400 dchunk
->immutable
= true;
1401 bitmap_fill(dchunk
->populated
, pcpu_unit_pages
);
1403 dchunk
->contig_hint
= dchunk
->free_size
= dyn_size
;
1404 dchunk
->map
[dchunk
->map_used
++] = -pcpu_reserved_chunk_limit
;
1405 dchunk
->map
[dchunk
->map_used
++] = dchunk
->free_size
;
1408 /* link the first chunk in */
1409 pcpu_first_chunk
= dchunk
?: schunk
;
1410 pcpu_chunk_relocate(pcpu_first_chunk
, -1);
1413 pcpu_base_addr
= schunk
->vm
->addr
;
1414 return pcpu_unit_size
;
1417 static size_t pcpu_calc_fc_sizes(size_t static_size
, size_t reserved_size
,
1422 size_sum
= PFN_ALIGN(static_size
+ reserved_size
+
1423 (*dyn_sizep
>= 0 ? *dyn_sizep
: 0));
1424 if (*dyn_sizep
!= 0)
1425 *dyn_sizep
= size_sum
- static_size
- reserved_size
;
1431 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1432 * @static_size: the size of static percpu area in bytes
1433 * @reserved_size: the size of reserved percpu area in bytes
1434 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1436 * This is a helper to ease setting up embedded first percpu chunk and
1437 * can be called where pcpu_setup_first_chunk() is expected.
1439 * If this function is used to setup the first chunk, it is allocated
1440 * as a contiguous area using bootmem allocator and used as-is without
1441 * being mapped into vmalloc area. This enables the first chunk to
1442 * piggy back on the linear physical mapping which often uses larger
1445 * When @dyn_size is positive, dynamic area might be larger than
1446 * specified to fill page alignment. When @dyn_size is auto,
1447 * @dyn_size is just big enough to fill page alignment after static
1448 * and reserved areas.
1450 * If the needed size is smaller than the minimum or specified unit
1451 * size, the leftover is returned to the bootmem allocator.
1454 * The determined pcpu_unit_size which can be used to initialize
1455 * percpu access on success, -errno on failure.
1457 ssize_t __init
pcpu_embed_first_chunk(size_t static_size
, size_t reserved_size
,
1460 size_t size_sum
, unit_size
, chunk_size
;
1464 /* determine parameters and allocate */
1465 size_sum
= pcpu_calc_fc_sizes(static_size
, reserved_size
, &dyn_size
);
1467 unit_size
= max_t(size_t, size_sum
, PCPU_MIN_UNIT_SIZE
);
1468 chunk_size
= unit_size
* nr_cpu_ids
;
1470 base
= __alloc_bootmem_nopanic(chunk_size
, PAGE_SIZE
,
1471 __pa(MAX_DMA_ADDRESS
));
1473 pr_warning("PERCPU: failed to allocate %zu bytes for "
1474 "embedding\n", chunk_size
);
1478 /* return the leftover and copy */
1479 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
1480 void *ptr
= base
+ cpu
* unit_size
;
1482 if (cpu_possible(cpu
)) {
1483 free_bootmem(__pa(ptr
+ size_sum
),
1484 unit_size
- size_sum
);
1485 memcpy(ptr
, __per_cpu_load
, static_size
);
1487 free_bootmem(__pa(ptr
), unit_size
);
1490 /* we're ready, commit */
1491 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
1492 size_sum
>> PAGE_SHIFT
, base
, static_size
);
1494 return pcpu_setup_first_chunk(static_size
, reserved_size
, dyn_size
,
1495 unit_size
, base
, NULL
);
1499 * pcpu_4k_first_chunk - map the first chunk using PAGE_SIZE pages
1500 * @static_size: the size of static percpu area in bytes
1501 * @reserved_size: the size of reserved percpu area in bytes
1502 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1503 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1504 * @populate_pte_fn: function to populate pte
1506 * This is a helper to ease setting up embedded first percpu chunk and
1507 * can be called where pcpu_setup_first_chunk() is expected.
1509 * This is the basic allocator. Static percpu area is allocated
1510 * page-by-page into vmalloc area.
1513 * The determined pcpu_unit_size which can be used to initialize
1514 * percpu access on success, -errno on failure.
1516 ssize_t __init
pcpu_4k_first_chunk(size_t static_size
, size_t reserved_size
,
1517 pcpu_fc_alloc_fn_t alloc_fn
,
1518 pcpu_fc_free_fn_t free_fn
,
1519 pcpu_fc_populate_pte_fn_t populate_pte_fn
)
1521 static struct vm_struct vm
;
1524 struct page
**pages
;
1529 unit_pages
= PFN_UP(max_t(size_t, static_size
+ reserved_size
,
1530 PCPU_MIN_UNIT_SIZE
));
1532 /* unaligned allocations can't be freed, round up to page size */
1533 pages_size
= PFN_ALIGN(unit_pages
* nr_cpu_ids
* sizeof(pages
[0]));
1534 pages
= alloc_bootmem(pages_size
);
1536 /* allocate pages */
1538 for_each_possible_cpu(cpu
)
1539 for (i
= 0; i
< unit_pages
; i
++) {
1542 ptr
= alloc_fn(cpu
, PAGE_SIZE
);
1544 pr_warning("PERCPU: failed to allocate "
1545 "4k page for cpu%u\n", cpu
);
1548 pages
[j
++] = virt_to_page(ptr
);
1551 /* allocate vm area, map the pages and copy static data */
1552 vm
.flags
= VM_ALLOC
;
1553 vm
.size
= nr_cpu_ids
* unit_pages
<< PAGE_SHIFT
;
1554 vm_area_register_early(&vm
, PAGE_SIZE
);
1556 for_each_possible_cpu(cpu
) {
1557 unsigned long unit_addr
= (unsigned long)vm
.addr
+
1558 (cpu
* unit_pages
<< PAGE_SHIFT
);
1560 for (i
= 0; i
< unit_pages
; i
++)
1561 populate_pte_fn(unit_addr
+ (i
<< PAGE_SHIFT
));
1563 /* pte already populated, the following shouldn't fail */
1564 ret
= __pcpu_map_pages(unit_addr
, &pages
[cpu
* unit_pages
],
1567 panic("failed to map percpu area, err=%zd\n", ret
);
1570 * FIXME: Archs with virtual cache should flush local
1571 * cache for the linear mapping here - something
1572 * equivalent to flush_cache_vmap() on the local cpu.
1573 * flush_cache_vmap() can't be used as most supporting
1574 * data structures are not set up yet.
1577 /* copy static data */
1578 memcpy((void *)unit_addr
, __per_cpu_load
, static_size
);
1581 /* we're ready, commit */
1582 pr_info("PERCPU: %d 4k pages per cpu, static data %zu bytes\n",
1583 unit_pages
, static_size
);
1585 ret
= pcpu_setup_first_chunk(static_size
, reserved_size
, -1,
1586 unit_pages
<< PAGE_SHIFT
, vm
.addr
, NULL
);
1591 free_fn(page_address(pages
[j
]), PAGE_SIZE
);
1594 free_bootmem(__pa(pages
), pages_size
);
1599 * Large page remapping first chunk setup helper
1601 #ifdef CONFIG_NEED_MULTIPLE_NODES
1604 * pcpu_lpage_build_unit_map - build unit_map for large page remapping
1605 * @static_size: the size of static percpu area in bytes
1606 * @reserved_size: the size of reserved percpu area in bytes
1607 * @dyn_sizep: in/out parameter for dynamic size, -1 for auto
1608 * @unit_sizep: out parameter for unit size
1609 * @unit_map: unit_map to be filled
1610 * @cpu_distance_fn: callback to determine distance between cpus
1612 * This function builds cpu -> unit map and determine other parameters
1613 * considering needed percpu size, large page size and distances
1614 * between CPUs in NUMA.
1616 * CPUs which are of LOCAL_DISTANCE both ways are grouped together and
1617 * may share units in the same large page. The returned configuration
1618 * is guaranteed to have CPUs on different nodes on different large
1619 * pages and >=75% usage of allocated virtual address space.
1622 * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and
1623 * returns the number of units to be allocated. -errno on failure.
1625 int __init
pcpu_lpage_build_unit_map(size_t static_size
, size_t reserved_size
,
1626 ssize_t
*dyn_sizep
, size_t *unit_sizep
,
1627 size_t lpage_size
, int *unit_map
,
1628 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
)
1630 static int group_map
[NR_CPUS
] __initdata
;
1631 static int group_cnt
[NR_CPUS
] __initdata
;
1632 int group_cnt_max
= 0;
1633 size_t size_sum
, min_unit_size
, alloc_size
;
1634 int upa
, max_upa
, uninitialized_var(best_upa
); /* units_per_alloc */
1636 unsigned int cpu
, tcpu
;
1640 * Determine min_unit_size, alloc_size and max_upa such that
1641 * alloc_size is multiple of lpage_size and is the smallest
1642 * which can accomodate 4k aligned segments which are equal to
1643 * or larger than min_unit_size.
1645 size_sum
= pcpu_calc_fc_sizes(static_size
, reserved_size
, dyn_sizep
);
1646 min_unit_size
= max_t(size_t, size_sum
, PCPU_MIN_UNIT_SIZE
);
1648 alloc_size
= roundup(min_unit_size
, lpage_size
);
1649 upa
= alloc_size
/ min_unit_size
;
1650 while (alloc_size
% upa
|| ((alloc_size
/ upa
) & ~PAGE_MASK
))
1654 /* group cpus according to their proximity */
1655 for_each_possible_cpu(cpu
) {
1658 for_each_possible_cpu(tcpu
) {
1661 if (group_map
[tcpu
] == group
&&
1662 (cpu_distance_fn(cpu
, tcpu
) > LOCAL_DISTANCE
||
1663 cpu_distance_fn(tcpu
, cpu
) > LOCAL_DISTANCE
)) {
1668 group_map
[cpu
] = group
;
1670 group_cnt_max
= max(group_cnt_max
, group_cnt
[group
]);
1674 * Expand unit size until address space usage goes over 75%
1675 * and then as much as possible without using more address
1678 last_allocs
= INT_MAX
;
1679 for (upa
= max_upa
; upa
; upa
--) {
1680 int allocs
= 0, wasted
= 0;
1682 if (alloc_size
% upa
|| ((alloc_size
/ upa
) & ~PAGE_MASK
))
1685 for (group
= 0; group_cnt
[group
]; group
++) {
1686 int this_allocs
= DIV_ROUND_UP(group_cnt
[group
], upa
);
1687 allocs
+= this_allocs
;
1688 wasted
+= this_allocs
* upa
- group_cnt
[group
];
1692 * Don't accept if wastage is over 25%. The
1693 * greater-than comparison ensures upa==1 always
1694 * passes the following check.
1696 if (wasted
> num_possible_cpus() / 3)
1699 /* and then don't consume more memory */
1700 if (allocs
> last_allocs
)
1702 last_allocs
= allocs
;
1705 *unit_sizep
= alloc_size
/ best_upa
;
1707 /* assign units to cpus accordingly */
1709 for (group
= 0; group_cnt
[group
]; group
++) {
1710 for_each_possible_cpu(cpu
)
1711 if (group_map
[cpu
] == group
)
1712 unit_map
[cpu
] = unit
++;
1713 unit
= roundup(unit
, best_upa
);
1716 return unit
; /* unit contains aligned number of units */
1724 static size_t pcpul_size
;
1725 static size_t pcpul_lpage_size
;
1726 static int pcpul_nr_lpages
;
1727 static struct pcpul_ent
*pcpul_map
;
1729 static bool __init
pcpul_unit_to_cpu(int unit
, const int *unit_map
,
1734 for_each_possible_cpu(cpu
)
1735 if (unit_map
[cpu
] == unit
) {
1744 static void __init
pcpul_lpage_dump_cfg(const char *lvl
, size_t static_size
,
1745 size_t reserved_size
, size_t dyn_size
,
1746 size_t unit_size
, size_t lpage_size
,
1747 const int *unit_map
, int nr_units
)
1749 int width
= 1, v
= nr_units
;
1750 char empty_str
[] = "--------";
1751 int upl
, lpl
; /* units per lpage, lpage per line */
1757 empty_str
[min_t(int, width
, sizeof(empty_str
) - 1)] = '\0';
1759 upl
= max_t(int, lpage_size
/ unit_size
, 1);
1760 lpl
= rounddown_pow_of_two(max_t(int, 60 / (upl
* (width
+ 1) + 2), 1));
1762 printk("%spcpu-lpage: sta/res/dyn=%zu/%zu/%zu unit=%zu lpage=%zu", lvl
,
1763 static_size
, reserved_size
, dyn_size
, unit_size
, lpage_size
);
1765 for (lpage
= 0, unit
= 0; unit
< nr_units
; unit
++) {
1766 if (!(unit
% upl
)) {
1767 if (!(lpage
++ % lpl
)) {
1769 printk("%spcpu-lpage: ", lvl
);
1773 if (pcpul_unit_to_cpu(unit
, unit_map
, &cpu
))
1774 printk("%0*d ", width
, cpu
);
1776 printk("%s ", empty_str
);
1782 * pcpu_lpage_first_chunk - remap the first percpu chunk using large page
1783 * @static_size: the size of static percpu area in bytes
1784 * @reserved_size: the size of reserved percpu area in bytes
1785 * @dyn_size: free size for dynamic allocation in bytes
1786 * @unit_size: unit size in bytes
1787 * @lpage_size: the size of a large page
1788 * @unit_map: cpu -> unit mapping
1789 * @nr_units: the number of units
1790 * @alloc_fn: function to allocate percpu lpage, always called with lpage_size
1791 * @free_fn: function to free percpu memory, @size <= lpage_size
1792 * @map_fn: function to map percpu lpage, always called with lpage_size
1794 * This allocator uses large page to build and map the first chunk.
1795 * Unlike other helpers, the caller should always specify @dyn_size
1796 * and @unit_size. These parameters along with @unit_map and
1797 * @nr_units can be determined using pcpu_lpage_build_unit_map().
1798 * This two stage initialization is to allow arch code to evaluate the
1799 * parameters before committing to it.
1801 * Large pages are allocated as directed by @unit_map and other
1802 * parameters and mapped to vmalloc space. Unused holes are returned
1803 * to the page allocator. Note that these holes end up being actively
1804 * mapped twice - once to the physical mapping and to the vmalloc area
1805 * for the first percpu chunk. Depending on architecture, this might
1806 * cause problem when changing page attributes of the returned area.
1807 * These double mapped areas can be detected using
1808 * pcpu_lpage_remapped().
1811 * The determined pcpu_unit_size which can be used to initialize
1812 * percpu access on success, -errno on failure.
1814 ssize_t __init
pcpu_lpage_first_chunk(size_t static_size
, size_t reserved_size
,
1815 size_t dyn_size
, size_t unit_size
,
1816 size_t lpage_size
, const int *unit_map
,
1818 pcpu_fc_alloc_fn_t alloc_fn
,
1819 pcpu_fc_free_fn_t free_fn
,
1820 pcpu_fc_map_fn_t map_fn
)
1822 static struct vm_struct vm
;
1823 size_t chunk_size
= unit_size
* nr_units
;
1829 pcpul_lpage_dump_cfg(KERN_DEBUG
, static_size
, reserved_size
, dyn_size
,
1830 unit_size
, lpage_size
, unit_map
, nr_units
);
1832 BUG_ON(chunk_size
% lpage_size
);
1834 pcpul_size
= static_size
+ reserved_size
+ dyn_size
;
1835 pcpul_lpage_size
= lpage_size
;
1836 pcpul_nr_lpages
= chunk_size
/ lpage_size
;
1838 /* allocate pointer array and alloc large pages */
1839 map_size
= pcpul_nr_lpages
* sizeof(pcpul_map
[0]);
1840 pcpul_map
= alloc_bootmem(map_size
);
1842 /* allocate all pages */
1843 for (i
= 0; i
< pcpul_nr_lpages
; i
++) {
1844 size_t offset
= i
* lpage_size
;
1845 int first_unit
= offset
/ unit_size
;
1846 int last_unit
= (offset
+ lpage_size
- 1) / unit_size
;
1849 /* find out which cpu is mapped to this unit */
1850 for (unit
= first_unit
; unit
<= last_unit
; unit
++)
1851 if (pcpul_unit_to_cpu(unit
, unit_map
, &cpu
))
1855 ptr
= alloc_fn(cpu
, lpage_size
);
1857 pr_warning("PERCPU: failed to allocate large page "
1858 "for cpu%u\n", cpu
);
1862 pcpul_map
[i
].ptr
= ptr
;
1865 /* return unused holes */
1866 for (unit
= 0; unit
< nr_units
; unit
++) {
1867 size_t start
= unit
* unit_size
;
1868 size_t end
= start
+ unit_size
;
1871 /* don't free used part of occupied unit */
1872 if (pcpul_unit_to_cpu(unit
, unit_map
, NULL
))
1873 start
+= pcpul_size
;
1875 /* unit can span more than one page, punch the holes */
1876 for (off
= start
; off
< end
; off
= next
) {
1877 void *ptr
= pcpul_map
[off
/ lpage_size
].ptr
;
1878 next
= min(roundup(off
+ 1, lpage_size
), end
);
1880 free_fn(ptr
+ off
% lpage_size
, next
- off
);
1884 /* allocate address, map and copy */
1885 vm
.flags
= VM_ALLOC
;
1886 vm
.size
= chunk_size
;
1887 vm_area_register_early(&vm
, unit_size
);
1889 for (i
= 0; i
< pcpul_nr_lpages
; i
++) {
1890 if (!pcpul_map
[i
].ptr
)
1892 pcpul_map
[i
].map_addr
= vm
.addr
+ i
* lpage_size
;
1893 map_fn(pcpul_map
[i
].ptr
, lpage_size
, pcpul_map
[i
].map_addr
);
1896 for_each_possible_cpu(cpu
)
1897 memcpy(vm
.addr
+ unit_map
[cpu
] * unit_size
, __per_cpu_load
,
1900 /* we're ready, commit */
1901 pr_info("PERCPU: Remapped at %p with large pages, static data "
1902 "%zu bytes\n", vm
.addr
, static_size
);
1904 ret
= pcpu_setup_first_chunk(static_size
, reserved_size
, dyn_size
,
1905 unit_size
, vm
.addr
, unit_map
);
1908 * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped
1909 * lpages are pushed to the end and trimmed.
1911 for (i
= 0; i
< pcpul_nr_lpages
- 1; i
++)
1912 for (j
= i
+ 1; j
< pcpul_nr_lpages
; j
++) {
1913 struct pcpul_ent tmp
;
1915 if (!pcpul_map
[j
].ptr
)
1917 if (pcpul_map
[i
].ptr
&&
1918 pcpul_map
[i
].ptr
< pcpul_map
[j
].ptr
)
1922 pcpul_map
[i
] = pcpul_map
[j
];
1926 while (pcpul_nr_lpages
&& !pcpul_map
[pcpul_nr_lpages
- 1].ptr
)
1932 for (i
= 0; i
< pcpul_nr_lpages
; i
++)
1933 if (pcpul_map
[i
].ptr
)
1934 free_fn(pcpul_map
[i
].ptr
, lpage_size
);
1935 free_bootmem(__pa(pcpul_map
), map_size
);
1940 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
1941 * @kaddr: the kernel address in question
1943 * Determine whether @kaddr falls in the pcpul recycled area. This is
1944 * used by pageattr to detect VM aliases and break up the pcpu large
1945 * page mapping such that the same physical page is not mapped under
1946 * different attributes.
1948 * The recycled area is always at the tail of a partially used large
1952 * Address of corresponding remapped pcpu address if match is found;
1955 void *pcpu_lpage_remapped(void *kaddr
)
1957 unsigned long lpage_mask
= pcpul_lpage_size
- 1;
1958 void *lpage_addr
= (void *)((unsigned long)kaddr
& ~lpage_mask
);
1959 unsigned long offset
= (unsigned long)kaddr
& lpage_mask
;
1960 int left
= 0, right
= pcpul_nr_lpages
- 1;
1963 /* pcpul in use at all? */
1967 /* okay, perform binary search */
1968 while (left
<= right
) {
1969 pos
= (left
+ right
) / 2;
1971 if (pcpul_map
[pos
].ptr
< lpage_addr
)
1973 else if (pcpul_map
[pos
].ptr
> lpage_addr
)
1976 return pcpul_map
[pos
].map_addr
+ offset
;
1984 * Generic percpu area setup.
1986 * The embedding helper is used because its behavior closely resembles
1987 * the original non-dynamic generic percpu area setup. This is
1988 * important because many archs have addressing restrictions and might
1989 * fail if the percpu area is located far away from the previous
1990 * location. As an added bonus, in non-NUMA cases, embedding is
1991 * generally a good idea TLB-wise because percpu area can piggy back
1992 * on the physical linear memory mapping which uses large page
1993 * mappings on applicable archs.
1995 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1996 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
1997 EXPORT_SYMBOL(__per_cpu_offset
);
1999 void __init
setup_per_cpu_areas(void)
2001 size_t static_size
= __per_cpu_end
- __per_cpu_start
;
2003 unsigned long delta
;
2007 * Always reserve area for module percpu variables. That's
2008 * what the legacy allocator did.
2010 unit_size
= pcpu_embed_first_chunk(static_size
, PERCPU_MODULE_RESERVE
,
2011 PERCPU_DYNAMIC_RESERVE
);
2013 panic("Failed to initialized percpu areas.");
2015 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
2016 for_each_possible_cpu(cpu
)
2017 __per_cpu_offset
[cpu
] = delta
+ cpu
* unit_size
;
2019 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */