]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/percpu.c
mm: convert printk(KERN_<LEVEL> to pr_<level>
[mirror_ubuntu-zesty-kernel.git] / mm / percpu.c
CommitLineData
fbf59bc9 1/*
88999a89 2 * mm/percpu.c - percpu memory allocator
fbf59bc9
TH
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
88999a89
TH
10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
2f39e637
TH
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
88999a89 16 * When a chunk is filled up, another chunk is allocated.
fbf59bc9
TH
17 *
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
22 *
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
2f39e637
TH
25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
fbf59bc9 29 *
2f39e637
TH
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes. The allocator organizes chunks into lists
fbf59bc9
TH
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
4785879e 34 * guaranteed to be equal to or larger than the maximum contiguous
fbf59bc9
TH
35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
37 *
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map. A positive value in the map represents a free
40 * region and negative allocated. Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry. This is mostly copied from the percpu_modalloc() allocator.
e1b9aa3f
CL
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
fbf59bc9
TH
45 *
46 * To use this allocator, arch code should do the followings.
47 *
fbf59bc9 48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
e0100983
TH
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
fbf59bc9 51 *
8d408b4b
TH
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
fbf59bc9
TH
54 */
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
fd1e8a1f 58#include <linux/err.h>
fbf59bc9 59#include <linux/list.h>
a530b795 60#include <linux/log2.h>
fbf59bc9
TH
61#include <linux/mm.h>
62#include <linux/module.h>
63#include <linux/mutex.h>
64#include <linux/percpu.h>
65#include <linux/pfn.h>
fbf59bc9 66#include <linux/slab.h>
ccea34b5 67#include <linux/spinlock.h>
fbf59bc9 68#include <linux/vmalloc.h>
a56dbddf 69#include <linux/workqueue.h>
f528f0b8 70#include <linux/kmemleak.h>
fbf59bc9
TH
71
72#include <asm/cacheflush.h>
e0100983 73#include <asm/sections.h>
fbf59bc9 74#include <asm/tlbflush.h>
3b034b0d 75#include <asm/io.h>
fbf59bc9 76
fbf59bc9
TH
77#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
78#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
9c824b6a
TH
79#define PCPU_ATOMIC_MAP_MARGIN_LOW 32
80#define PCPU_ATOMIC_MAP_MARGIN_HIGH 64
1a4d7607
TH
81#define PCPU_EMPTY_POP_PAGES_LOW 2
82#define PCPU_EMPTY_POP_PAGES_HIGH 4
fbf59bc9 83
bbddff05 84#ifdef CONFIG_SMP
e0100983
TH
85/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
86#ifndef __addr_to_pcpu_ptr
87#define __addr_to_pcpu_ptr(addr) \
43cf38eb
TH
88 (void __percpu *)((unsigned long)(addr) - \
89 (unsigned long)pcpu_base_addr + \
90 (unsigned long)__per_cpu_start)
e0100983
TH
91#endif
92#ifndef __pcpu_ptr_to_addr
93#define __pcpu_ptr_to_addr(ptr) \
43cf38eb
TH
94 (void __force *)((unsigned long)(ptr) + \
95 (unsigned long)pcpu_base_addr - \
96 (unsigned long)__per_cpu_start)
e0100983 97#endif
bbddff05
TH
98#else /* CONFIG_SMP */
99/* on UP, it's always identity mapped */
100#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
101#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
102#endif /* CONFIG_SMP */
e0100983 103
fbf59bc9
TH
104struct pcpu_chunk {
105 struct list_head list; /* linked to pcpu_slot lists */
fbf59bc9
TH
106 int free_size; /* free bytes in the chunk */
107 int contig_hint; /* max contiguous size hint */
bba174f5 108 void *base_addr; /* base address of this chunk */
9c824b6a 109
723ad1d9 110 int map_used; /* # of map entries used before the sentry */
fbf59bc9
TH
111 int map_alloc; /* # of map entries allocated */
112 int *map; /* allocation map */
9c824b6a
TH
113 struct work_struct map_extend_work;/* async ->map[] extension */
114
88999a89 115 void *data; /* chunk data */
3d331ad7 116 int first_free; /* no free below this */
8d408b4b 117 bool immutable; /* no [de]population allowed */
b539b87f 118 int nr_populated; /* # of populated pages */
ce3141a2 119 unsigned long populated[]; /* populated bitmap */
fbf59bc9
TH
120};
121
40150d37
TH
122static int pcpu_unit_pages __read_mostly;
123static int pcpu_unit_size __read_mostly;
2f39e637 124static int pcpu_nr_units __read_mostly;
6563297c 125static int pcpu_atom_size __read_mostly;
40150d37
TH
126static int pcpu_nr_slots __read_mostly;
127static size_t pcpu_chunk_struct_size __read_mostly;
fbf59bc9 128
a855b84c
TH
129/* cpus with the lowest and highest unit addresses */
130static unsigned int pcpu_low_unit_cpu __read_mostly;
131static unsigned int pcpu_high_unit_cpu __read_mostly;
2f39e637 132
fbf59bc9 133/* the address of the first chunk which starts with the kernel static area */
40150d37 134void *pcpu_base_addr __read_mostly;
fbf59bc9
TH
135EXPORT_SYMBOL_GPL(pcpu_base_addr);
136
fb435d52
TH
137static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
138const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
2f39e637 139
6563297c
TH
140/* group information, used for vm allocation */
141static int pcpu_nr_groups __read_mostly;
142static const unsigned long *pcpu_group_offsets __read_mostly;
143static const size_t *pcpu_group_sizes __read_mostly;
144
ae9e6bc9
TH
145/*
146 * The first chunk which always exists. Note that unlike other
147 * chunks, this one can be allocated and mapped in several different
148 * ways and thus often doesn't live in the vmalloc area.
149 */
150static struct pcpu_chunk *pcpu_first_chunk;
151
152/*
153 * Optional reserved chunk. This chunk reserves part of the first
154 * chunk and serves it for reserved allocations. The amount of
155 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
156 * area doesn't exist, the following variables contain NULL and 0
157 * respectively.
158 */
edcb4639 159static struct pcpu_chunk *pcpu_reserved_chunk;
edcb4639
TH
160static int pcpu_reserved_chunk_limit;
161
b38d08f3
TH
162static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
163static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
fbf59bc9 164
40150d37 165static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
fbf59bc9 166
b539b87f
TH
167/*
168 * The number of empty populated pages, protected by pcpu_lock. The
169 * reserved chunk doesn't contribute to the count.
170 */
171static int pcpu_nr_empty_pop_pages;
172
1a4d7607
TH
173/*
174 * Balance work is used to populate or destroy chunks asynchronously. We
175 * try to keep the number of populated free pages between
176 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
177 * empty chunk.
178 */
fe6bd8c3
TH
179static void pcpu_balance_workfn(struct work_struct *work);
180static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
1a4d7607
TH
181static bool pcpu_async_enabled __read_mostly;
182static bool pcpu_atomic_alloc_failed;
183
184static void pcpu_schedule_balance_work(void)
185{
186 if (pcpu_async_enabled)
187 schedule_work(&pcpu_balance_work);
188}
a56dbddf 189
020ec653
TH
190static bool pcpu_addr_in_first_chunk(void *addr)
191{
192 void *first_start = pcpu_first_chunk->base_addr;
193
194 return addr >= first_start && addr < first_start + pcpu_unit_size;
195}
196
197static bool pcpu_addr_in_reserved_chunk(void *addr)
198{
199 void *first_start = pcpu_first_chunk->base_addr;
200
201 return addr >= first_start &&
202 addr < first_start + pcpu_reserved_chunk_limit;
203}
204
d9b55eeb 205static int __pcpu_size_to_slot(int size)
fbf59bc9 206{
cae3aeb8 207 int highbit = fls(size); /* size is in bytes */
fbf59bc9
TH
208 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
209}
210
d9b55eeb
TH
211static int pcpu_size_to_slot(int size)
212{
213 if (size == pcpu_unit_size)
214 return pcpu_nr_slots - 1;
215 return __pcpu_size_to_slot(size);
216}
217
fbf59bc9
TH
218static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
219{
220 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
221 return 0;
222
223 return pcpu_size_to_slot(chunk->free_size);
224}
225
88999a89
TH
226/* set the pointer to a chunk in a page struct */
227static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
228{
229 page->index = (unsigned long)pcpu;
230}
231
232/* obtain pointer to a chunk from a page struct */
233static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
234{
235 return (struct pcpu_chunk *)page->index;
236}
237
238static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
fbf59bc9 239{
2f39e637 240 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
fbf59bc9
TH
241}
242
9983b6f0
TH
243static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
244 unsigned int cpu, int page_idx)
fbf59bc9 245{
bba174f5 246 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
fb435d52 247 (page_idx << PAGE_SHIFT);
fbf59bc9
TH
248}
249
88999a89
TH
250static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
251 int *rs, int *re, int end)
ce3141a2
TH
252{
253 *rs = find_next_zero_bit(chunk->populated, end, *rs);
254 *re = find_next_bit(chunk->populated, end, *rs + 1);
255}
256
88999a89
TH
257static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
258 int *rs, int *re, int end)
ce3141a2
TH
259{
260 *rs = find_next_bit(chunk->populated, end, *rs);
261 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
262}
263
264/*
265 * (Un)populated page region iterators. Iterate over (un)populated
b595076a 266 * page regions between @start and @end in @chunk. @rs and @re should
ce3141a2
TH
267 * be integer variables and will be set to start and end page index of
268 * the current region.
269 */
270#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
271 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
272 (rs) < (re); \
273 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
274
275#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
276 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
277 (rs) < (re); \
278 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
279
fbf59bc9 280/**
90459ce0 281 * pcpu_mem_zalloc - allocate memory
1880d93b 282 * @size: bytes to allocate
fbf59bc9 283 *
1880d93b 284 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
90459ce0 285 * kzalloc() is used; otherwise, vzalloc() is used. The returned
1880d93b 286 * memory is always zeroed.
fbf59bc9 287 *
ccea34b5
TH
288 * CONTEXT:
289 * Does GFP_KERNEL allocation.
290 *
fbf59bc9 291 * RETURNS:
1880d93b 292 * Pointer to the allocated area on success, NULL on failure.
fbf59bc9 293 */
90459ce0 294static void *pcpu_mem_zalloc(size_t size)
fbf59bc9 295{
099a19d9
TH
296 if (WARN_ON_ONCE(!slab_is_available()))
297 return NULL;
298
1880d93b
TH
299 if (size <= PAGE_SIZE)
300 return kzalloc(size, GFP_KERNEL);
7af4c093
JJ
301 else
302 return vzalloc(size);
1880d93b 303}
fbf59bc9 304
1880d93b
TH
305/**
306 * pcpu_mem_free - free memory
307 * @ptr: memory to free
1880d93b 308 *
90459ce0 309 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
1880d93b 310 */
1d5cfdb0 311static void pcpu_mem_free(void *ptr)
1880d93b 312{
1d5cfdb0 313 kvfree(ptr);
fbf59bc9
TH
314}
315
b539b87f
TH
316/**
317 * pcpu_count_occupied_pages - count the number of pages an area occupies
318 * @chunk: chunk of interest
319 * @i: index of the area in question
320 *
321 * Count the number of pages chunk's @i'th area occupies. When the area's
322 * start and/or end address isn't aligned to page boundary, the straddled
323 * page is included in the count iff the rest of the page is free.
324 */
325static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
326{
327 int off = chunk->map[i] & ~1;
328 int end = chunk->map[i + 1] & ~1;
329
330 if (!PAGE_ALIGNED(off) && i > 0) {
331 int prev = chunk->map[i - 1];
332
333 if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
334 off = round_down(off, PAGE_SIZE);
335 }
336
337 if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
338 int next = chunk->map[i + 1];
339 int nend = chunk->map[i + 2] & ~1;
340
341 if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
342 end = round_up(end, PAGE_SIZE);
343 }
344
345 return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
346}
347
fbf59bc9
TH
348/**
349 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
350 * @chunk: chunk of interest
351 * @oslot: the previous slot it was on
352 *
353 * This function is called after an allocation or free changed @chunk.
354 * New slot according to the changed state is determined and @chunk is
edcb4639
TH
355 * moved to the slot. Note that the reserved chunk is never put on
356 * chunk slots.
ccea34b5
TH
357 *
358 * CONTEXT:
359 * pcpu_lock.
fbf59bc9
TH
360 */
361static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
362{
363 int nslot = pcpu_chunk_slot(chunk);
364
edcb4639 365 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
fbf59bc9
TH
366 if (oslot < nslot)
367 list_move(&chunk->list, &pcpu_slot[nslot]);
368 else
369 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
370 }
371}
372
9f7dcf22 373/**
833af842
TH
374 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
375 * @chunk: chunk of interest
9c824b6a 376 * @is_atomic: the allocation context
9f7dcf22 377 *
9c824b6a
TH
378 * Determine whether area map of @chunk needs to be extended. If
379 * @is_atomic, only the amount necessary for a new allocation is
380 * considered; however, async extension is scheduled if the left amount is
381 * low. If !@is_atomic, it aims for more empty space. Combined, this
382 * ensures that the map is likely to have enough available space to
383 * accomodate atomic allocations which can't extend maps directly.
9f7dcf22 384 *
ccea34b5 385 * CONTEXT:
833af842 386 * pcpu_lock.
ccea34b5 387 *
9f7dcf22 388 * RETURNS:
833af842
TH
389 * New target map allocation length if extension is necessary, 0
390 * otherwise.
9f7dcf22 391 */
9c824b6a 392static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
9f7dcf22 393{
9c824b6a
TH
394 int margin, new_alloc;
395
396 if (is_atomic) {
397 margin = 3;
9f7dcf22 398
9c824b6a 399 if (chunk->map_alloc <
1a4d7607
TH
400 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
401 pcpu_async_enabled)
9c824b6a
TH
402 schedule_work(&chunk->map_extend_work);
403 } else {
404 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
405 }
406
407 if (chunk->map_alloc >= chunk->map_used + margin)
9f7dcf22
TH
408 return 0;
409
410 new_alloc = PCPU_DFL_MAP_ALLOC;
9c824b6a 411 while (new_alloc < chunk->map_used + margin)
9f7dcf22
TH
412 new_alloc *= 2;
413
833af842
TH
414 return new_alloc;
415}
416
417/**
418 * pcpu_extend_area_map - extend area map of a chunk
419 * @chunk: chunk of interest
420 * @new_alloc: new target allocation length of the area map
421 *
422 * Extend area map of @chunk to have @new_alloc entries.
423 *
424 * CONTEXT:
425 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
426 *
427 * RETURNS:
428 * 0 on success, -errno on failure.
429 */
430static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
431{
432 int *old = NULL, *new = NULL;
433 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
434 unsigned long flags;
435
90459ce0 436 new = pcpu_mem_zalloc(new_size);
833af842 437 if (!new)
9f7dcf22 438 return -ENOMEM;
ccea34b5 439
833af842
TH
440 /* acquire pcpu_lock and switch to new area map */
441 spin_lock_irqsave(&pcpu_lock, flags);
442
443 if (new_alloc <= chunk->map_alloc)
444 goto out_unlock;
9f7dcf22 445
833af842 446 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
a002d148
HS
447 old = chunk->map;
448
449 memcpy(new, old, old_size);
9f7dcf22 450
9f7dcf22
TH
451 chunk->map_alloc = new_alloc;
452 chunk->map = new;
833af842
TH
453 new = NULL;
454
455out_unlock:
456 spin_unlock_irqrestore(&pcpu_lock, flags);
457
458 /*
459 * pcpu_mem_free() might end up calling vfree() which uses
460 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
461 */
1d5cfdb0
TH
462 pcpu_mem_free(old);
463 pcpu_mem_free(new);
833af842 464
9f7dcf22
TH
465 return 0;
466}
467
9c824b6a
TH
468static void pcpu_map_extend_workfn(struct work_struct *work)
469{
470 struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
471 map_extend_work);
472 int new_alloc;
473
474 spin_lock_irq(&pcpu_lock);
475 new_alloc = pcpu_need_to_extend(chunk, false);
476 spin_unlock_irq(&pcpu_lock);
477
478 if (new_alloc)
479 pcpu_extend_area_map(chunk, new_alloc);
480}
481
a16037c8
TH
482/**
483 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
484 * @chunk: chunk the candidate area belongs to
485 * @off: the offset to the start of the candidate area
486 * @this_size: the size of the candidate area
487 * @size: the size of the target allocation
488 * @align: the alignment of the target allocation
489 * @pop_only: only allocate from already populated region
490 *
491 * We're trying to allocate @size bytes aligned at @align. @chunk's area
492 * at @off sized @this_size is a candidate. This function determines
493 * whether the target allocation fits in the candidate area and returns the
494 * number of bytes to pad after @off. If the target area doesn't fit, -1
495 * is returned.
496 *
497 * If @pop_only is %true, this function only considers the already
498 * populated part of the candidate area.
499 */
500static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
501 int size, int align, bool pop_only)
502{
503 int cand_off = off;
504
505 while (true) {
506 int head = ALIGN(cand_off, align) - off;
507 int page_start, page_end, rs, re;
508
509 if (this_size < head + size)
510 return -1;
511
512 if (!pop_only)
513 return head;
514
515 /*
516 * If the first unpopulated page is beyond the end of the
517 * allocation, the whole allocation is populated;
518 * otherwise, retry from the end of the unpopulated area.
519 */
520 page_start = PFN_DOWN(head + off);
521 page_end = PFN_UP(head + off + size);
522
523 rs = page_start;
524 pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
525 if (rs >= page_end)
526 return head;
527 cand_off = re * PAGE_SIZE;
528 }
529}
530
fbf59bc9
TH
531/**
532 * pcpu_alloc_area - allocate area from a pcpu_chunk
533 * @chunk: chunk of interest
cae3aeb8 534 * @size: wanted size in bytes
fbf59bc9 535 * @align: wanted align
a16037c8 536 * @pop_only: allocate only from the populated area
b539b87f 537 * @occ_pages_p: out param for the number of pages the area occupies
fbf59bc9
TH
538 *
539 * Try to allocate @size bytes area aligned at @align from @chunk.
540 * Note that this function only allocates the offset. It doesn't
541 * populate or map the area.
542 *
9f7dcf22
TH
543 * @chunk->map must have at least two free slots.
544 *
ccea34b5
TH
545 * CONTEXT:
546 * pcpu_lock.
547 *
fbf59bc9 548 * RETURNS:
9f7dcf22
TH
549 * Allocated offset in @chunk on success, -1 if no matching area is
550 * found.
fbf59bc9 551 */
a16037c8 552static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
b539b87f 553 bool pop_only, int *occ_pages_p)
fbf59bc9
TH
554{
555 int oslot = pcpu_chunk_slot(chunk);
556 int max_contig = 0;
557 int i, off;
3d331ad7 558 bool seen_free = false;
723ad1d9 559 int *p;
fbf59bc9 560
3d331ad7 561 for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
fbf59bc9 562 int head, tail;
723ad1d9
AV
563 int this_size;
564
565 off = *p;
566 if (off & 1)
567 continue;
fbf59bc9 568
723ad1d9 569 this_size = (p[1] & ~1) - off;
a16037c8
TH
570
571 head = pcpu_fit_in_area(chunk, off, this_size, size, align,
572 pop_only);
573 if (head < 0) {
3d331ad7
AV
574 if (!seen_free) {
575 chunk->first_free = i;
576 seen_free = true;
577 }
723ad1d9 578 max_contig = max(this_size, max_contig);
fbf59bc9
TH
579 continue;
580 }
581
582 /*
583 * If head is small or the previous block is free,
584 * merge'em. Note that 'small' is defined as smaller
585 * than sizeof(int), which is very small but isn't too
586 * uncommon for percpu allocations.
587 */
723ad1d9 588 if (head && (head < sizeof(int) || !(p[-1] & 1))) {
21ddfd38 589 *p = off += head;
723ad1d9 590 if (p[-1] & 1)
fbf59bc9 591 chunk->free_size -= head;
21ddfd38
JZ
592 else
593 max_contig = max(*p - p[-1], max_contig);
723ad1d9 594 this_size -= head;
fbf59bc9
TH
595 head = 0;
596 }
597
598 /* if tail is small, just keep it around */
723ad1d9
AV
599 tail = this_size - head - size;
600 if (tail < sizeof(int)) {
fbf59bc9 601 tail = 0;
723ad1d9
AV
602 size = this_size - head;
603 }
fbf59bc9
TH
604
605 /* split if warranted */
606 if (head || tail) {
706c16f2
AV
607 int nr_extra = !!head + !!tail;
608
609 /* insert new subblocks */
723ad1d9 610 memmove(p + nr_extra + 1, p + 1,
706c16f2
AV
611 sizeof(chunk->map[0]) * (chunk->map_used - i));
612 chunk->map_used += nr_extra;
613
fbf59bc9 614 if (head) {
3d331ad7
AV
615 if (!seen_free) {
616 chunk->first_free = i;
617 seen_free = true;
618 }
723ad1d9
AV
619 *++p = off += head;
620 ++i;
706c16f2
AV
621 max_contig = max(head, max_contig);
622 }
623 if (tail) {
723ad1d9 624 p[1] = off + size;
706c16f2 625 max_contig = max(tail, max_contig);
fbf59bc9 626 }
fbf59bc9
TH
627 }
628
3d331ad7
AV
629 if (!seen_free)
630 chunk->first_free = i + 1;
631
fbf59bc9 632 /* update hint and mark allocated */
723ad1d9 633 if (i + 1 == chunk->map_used)
fbf59bc9
TH
634 chunk->contig_hint = max_contig; /* fully scanned */
635 else
636 chunk->contig_hint = max(chunk->contig_hint,
637 max_contig);
638
723ad1d9
AV
639 chunk->free_size -= size;
640 *p |= 1;
fbf59bc9 641
b539b87f 642 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
fbf59bc9
TH
643 pcpu_chunk_relocate(chunk, oslot);
644 return off;
645 }
646
647 chunk->contig_hint = max_contig; /* fully scanned */
648 pcpu_chunk_relocate(chunk, oslot);
649
9f7dcf22
TH
650 /* tell the upper layer that this chunk has no matching area */
651 return -1;
fbf59bc9
TH
652}
653
654/**
655 * pcpu_free_area - free area to a pcpu_chunk
656 * @chunk: chunk of interest
657 * @freeme: offset of area to free
b539b87f 658 * @occ_pages_p: out param for the number of pages the area occupies
fbf59bc9
TH
659 *
660 * Free area starting from @freeme to @chunk. Note that this function
661 * only modifies the allocation map. It doesn't depopulate or unmap
662 * the area.
ccea34b5
TH
663 *
664 * CONTEXT:
665 * pcpu_lock.
fbf59bc9 666 */
b539b87f
TH
667static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
668 int *occ_pages_p)
fbf59bc9
TH
669{
670 int oslot = pcpu_chunk_slot(chunk);
723ad1d9
AV
671 int off = 0;
672 unsigned i, j;
673 int to_free = 0;
674 int *p;
675
676 freeme |= 1; /* we are searching for <given offset, in use> pair */
677
678 i = 0;
679 j = chunk->map_used;
680 while (i != j) {
681 unsigned k = (i + j) / 2;
682 off = chunk->map[k];
683 if (off < freeme)
684 i = k + 1;
685 else if (off > freeme)
686 j = k;
687 else
688 i = j = k;
689 }
fbf59bc9 690 BUG_ON(off != freeme);
fbf59bc9 691
3d331ad7
AV
692 if (i < chunk->first_free)
693 chunk->first_free = i;
694
723ad1d9
AV
695 p = chunk->map + i;
696 *p = off &= ~1;
697 chunk->free_size += (p[1] & ~1) - off;
fbf59bc9 698
b539b87f
TH
699 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
700
723ad1d9
AV
701 /* merge with next? */
702 if (!(p[1] & 1))
703 to_free++;
fbf59bc9 704 /* merge with previous? */
723ad1d9
AV
705 if (i > 0 && !(p[-1] & 1)) {
706 to_free++;
fbf59bc9 707 i--;
723ad1d9 708 p--;
fbf59bc9 709 }
723ad1d9
AV
710 if (to_free) {
711 chunk->map_used -= to_free;
712 memmove(p + 1, p + 1 + to_free,
713 (chunk->map_used - i) * sizeof(chunk->map[0]));
fbf59bc9
TH
714 }
715
723ad1d9 716 chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
fbf59bc9
TH
717 pcpu_chunk_relocate(chunk, oslot);
718}
719
6081089f
TH
720static struct pcpu_chunk *pcpu_alloc_chunk(void)
721{
722 struct pcpu_chunk *chunk;
723
90459ce0 724 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
6081089f
TH
725 if (!chunk)
726 return NULL;
727
90459ce0
BL
728 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
729 sizeof(chunk->map[0]));
6081089f 730 if (!chunk->map) {
1d5cfdb0 731 pcpu_mem_free(chunk);
6081089f
TH
732 return NULL;
733 }
734
735 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
723ad1d9
AV
736 chunk->map[0] = 0;
737 chunk->map[1] = pcpu_unit_size | 1;
738 chunk->map_used = 1;
6081089f
TH
739
740 INIT_LIST_HEAD(&chunk->list);
9c824b6a 741 INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
6081089f
TH
742 chunk->free_size = pcpu_unit_size;
743 chunk->contig_hint = pcpu_unit_size;
744
745 return chunk;
746}
747
748static void pcpu_free_chunk(struct pcpu_chunk *chunk)
749{
750 if (!chunk)
751 return;
1d5cfdb0
TH
752 pcpu_mem_free(chunk->map);
753 pcpu_mem_free(chunk);
6081089f
TH
754}
755
b539b87f
TH
756/**
757 * pcpu_chunk_populated - post-population bookkeeping
758 * @chunk: pcpu_chunk which got populated
759 * @page_start: the start page
760 * @page_end: the end page
761 *
762 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
763 * the bookkeeping information accordingly. Must be called after each
764 * successful population.
765 */
766static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
767 int page_start, int page_end)
768{
769 int nr = page_end - page_start;
770
771 lockdep_assert_held(&pcpu_lock);
772
773 bitmap_set(chunk->populated, page_start, nr);
774 chunk->nr_populated += nr;
775 pcpu_nr_empty_pop_pages += nr;
776}
777
778/**
779 * pcpu_chunk_depopulated - post-depopulation bookkeeping
780 * @chunk: pcpu_chunk which got depopulated
781 * @page_start: the start page
782 * @page_end: the end page
783 *
784 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
785 * Update the bookkeeping information accordingly. Must be called after
786 * each successful depopulation.
787 */
788static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
789 int page_start, int page_end)
790{
791 int nr = page_end - page_start;
792
793 lockdep_assert_held(&pcpu_lock);
794
795 bitmap_clear(chunk->populated, page_start, nr);
796 chunk->nr_populated -= nr;
797 pcpu_nr_empty_pop_pages -= nr;
798}
799
9f645532
TH
800/*
801 * Chunk management implementation.
802 *
803 * To allow different implementations, chunk alloc/free and
804 * [de]population are implemented in a separate file which is pulled
805 * into this file and compiled together. The following functions
806 * should be implemented.
807 *
808 * pcpu_populate_chunk - populate the specified range of a chunk
809 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
810 * pcpu_create_chunk - create a new chunk
811 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
812 * pcpu_addr_to_page - translate address to physical address
813 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
fbf59bc9 814 */
9f645532
TH
815static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
816static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
817static struct pcpu_chunk *pcpu_create_chunk(void);
818static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
819static struct page *pcpu_addr_to_page(void *addr);
820static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
fbf59bc9 821
b0c9778b
TH
822#ifdef CONFIG_NEED_PER_CPU_KM
823#include "percpu-km.c"
824#else
9f645532 825#include "percpu-vm.c"
b0c9778b 826#endif
fbf59bc9 827
88999a89
TH
828/**
829 * pcpu_chunk_addr_search - determine chunk containing specified address
830 * @addr: address for which the chunk needs to be determined.
831 *
832 * RETURNS:
833 * The address of the found chunk.
834 */
835static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
836{
837 /* is it in the first chunk? */
838 if (pcpu_addr_in_first_chunk(addr)) {
839 /* is it in the reserved area? */
840 if (pcpu_addr_in_reserved_chunk(addr))
841 return pcpu_reserved_chunk;
842 return pcpu_first_chunk;
843 }
844
845 /*
846 * The address is relative to unit0 which might be unused and
847 * thus unmapped. Offset the address to the unit space of the
848 * current processor before looking it up in the vmalloc
849 * space. Note that any possible cpu id can be used here, so
850 * there's no need to worry about preemption or cpu hotplug.
851 */
852 addr += pcpu_unit_offsets[raw_smp_processor_id()];
9f645532 853 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
88999a89
TH
854}
855
fbf59bc9 856/**
edcb4639 857 * pcpu_alloc - the percpu allocator
cae3aeb8 858 * @size: size of area to allocate in bytes
fbf59bc9 859 * @align: alignment of area (max PAGE_SIZE)
edcb4639 860 * @reserved: allocate from the reserved chunk if available
5835d96e 861 * @gfp: allocation flags
fbf59bc9 862 *
5835d96e
TH
863 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
864 * contain %GFP_KERNEL, the allocation is atomic.
fbf59bc9
TH
865 *
866 * RETURNS:
867 * Percpu pointer to the allocated area on success, NULL on failure.
868 */
5835d96e
TH
869static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
870 gfp_t gfp)
fbf59bc9 871{
f2badb0c 872 static int warn_limit = 10;
fbf59bc9 873 struct pcpu_chunk *chunk;
f2badb0c 874 const char *err;
6ae833c7 875 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
b539b87f 876 int occ_pages = 0;
b38d08f3 877 int slot, off, new_alloc, cpu, ret;
403a91b1 878 unsigned long flags;
f528f0b8 879 void __percpu *ptr;
fbf59bc9 880
723ad1d9
AV
881 /*
882 * We want the lowest bit of offset available for in-use/free
2f69fa82 883 * indicator, so force >= 16bit alignment and make size even.
723ad1d9
AV
884 */
885 if (unlikely(align < 2))
886 align = 2;
887
fb009e3a 888 size = ALIGN(size, 2);
2f69fa82 889
8d408b4b 890 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
756a025f
JP
891 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
892 size, align);
fbf59bc9
TH
893 return NULL;
894 }
895
403a91b1 896 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9 897
edcb4639
TH
898 /* serve reserved allocations from the reserved chunk if available */
899 if (reserved && pcpu_reserved_chunk) {
900 chunk = pcpu_reserved_chunk;
833af842
TH
901
902 if (size > chunk->contig_hint) {
903 err = "alloc from reserved chunk failed";
ccea34b5 904 goto fail_unlock;
f2badb0c 905 }
833af842 906
9c824b6a 907 while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
833af842 908 spin_unlock_irqrestore(&pcpu_lock, flags);
5835d96e
TH
909 if (is_atomic ||
910 pcpu_extend_area_map(chunk, new_alloc) < 0) {
833af842 911 err = "failed to extend area map of reserved chunk";
b38d08f3 912 goto fail;
833af842
TH
913 }
914 spin_lock_irqsave(&pcpu_lock, flags);
915 }
916
b539b87f
TH
917 off = pcpu_alloc_area(chunk, size, align, is_atomic,
918 &occ_pages);
edcb4639
TH
919 if (off >= 0)
920 goto area_found;
833af842 921
f2badb0c 922 err = "alloc from reserved chunk failed";
ccea34b5 923 goto fail_unlock;
edcb4639
TH
924 }
925
ccea34b5 926restart:
edcb4639 927 /* search through normal chunks */
fbf59bc9
TH
928 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
929 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
930 if (size > chunk->contig_hint)
931 continue;
ccea34b5 932
9c824b6a 933 new_alloc = pcpu_need_to_extend(chunk, is_atomic);
833af842 934 if (new_alloc) {
5835d96e
TH
935 if (is_atomic)
936 continue;
833af842
TH
937 spin_unlock_irqrestore(&pcpu_lock, flags);
938 if (pcpu_extend_area_map(chunk,
939 new_alloc) < 0) {
940 err = "failed to extend area map";
b38d08f3 941 goto fail;
833af842
TH
942 }
943 spin_lock_irqsave(&pcpu_lock, flags);
944 /*
945 * pcpu_lock has been dropped, need to
946 * restart cpu_slot list walking.
947 */
948 goto restart;
ccea34b5
TH
949 }
950
b539b87f
TH
951 off = pcpu_alloc_area(chunk, size, align, is_atomic,
952 &occ_pages);
fbf59bc9
TH
953 if (off >= 0)
954 goto area_found;
fbf59bc9
TH
955 }
956 }
957
403a91b1 958 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 959
b38d08f3
TH
960 /*
961 * No space left. Create a new chunk. We don't want multiple
962 * tasks to create chunks simultaneously. Serialize and create iff
963 * there's still no empty chunk after grabbing the mutex.
964 */
5835d96e
TH
965 if (is_atomic)
966 goto fail;
967
b38d08f3
TH
968 mutex_lock(&pcpu_alloc_mutex);
969
970 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
971 chunk = pcpu_create_chunk();
972 if (!chunk) {
23cb8981 973 mutex_unlock(&pcpu_alloc_mutex);
b38d08f3
TH
974 err = "failed to allocate new chunk";
975 goto fail;
976 }
977
978 spin_lock_irqsave(&pcpu_lock, flags);
979 pcpu_chunk_relocate(chunk, -1);
980 } else {
981 spin_lock_irqsave(&pcpu_lock, flags);
f2badb0c 982 }
ccea34b5 983
b38d08f3 984 mutex_unlock(&pcpu_alloc_mutex);
ccea34b5 985 goto restart;
fbf59bc9
TH
986
987area_found:
403a91b1 988 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 989
dca49645 990 /* populate if not all pages are already there */
5835d96e 991 if (!is_atomic) {
e04d3208 992 int page_start, page_end, rs, re;
dca49645 993
e04d3208 994 mutex_lock(&pcpu_alloc_mutex);
dca49645 995
e04d3208
TH
996 page_start = PFN_DOWN(off);
997 page_end = PFN_UP(off + size);
b38d08f3 998
e04d3208
TH
999 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
1000 WARN_ON(chunk->immutable);
1001
1002 ret = pcpu_populate_chunk(chunk, rs, re);
1003
1004 spin_lock_irqsave(&pcpu_lock, flags);
1005 if (ret) {
1006 mutex_unlock(&pcpu_alloc_mutex);
b539b87f 1007 pcpu_free_area(chunk, off, &occ_pages);
e04d3208
TH
1008 err = "failed to populate";
1009 goto fail_unlock;
1010 }
b539b87f 1011 pcpu_chunk_populated(chunk, rs, re);
e04d3208 1012 spin_unlock_irqrestore(&pcpu_lock, flags);
dca49645 1013 }
fbf59bc9 1014
e04d3208
TH
1015 mutex_unlock(&pcpu_alloc_mutex);
1016 }
ccea34b5 1017
b539b87f
TH
1018 if (chunk != pcpu_reserved_chunk)
1019 pcpu_nr_empty_pop_pages -= occ_pages;
1020
1a4d7607
TH
1021 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1022 pcpu_schedule_balance_work();
1023
dca49645
TH
1024 /* clear the areas and return address relative to base address */
1025 for_each_possible_cpu(cpu)
1026 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1027
f528f0b8 1028 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
8a8c35fa 1029 kmemleak_alloc_percpu(ptr, size, gfp);
f528f0b8 1030 return ptr;
ccea34b5
TH
1031
1032fail_unlock:
403a91b1 1033 spin_unlock_irqrestore(&pcpu_lock, flags);
b38d08f3 1034fail:
5835d96e 1035 if (!is_atomic && warn_limit) {
598d8091
JP
1036 pr_warn("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1037 size, align, is_atomic, err);
f2badb0c
TH
1038 dump_stack();
1039 if (!--warn_limit)
1040 pr_info("PERCPU: limit reached, disable warning\n");
1041 }
1a4d7607
TH
1042 if (is_atomic) {
1043 /* see the flag handling in pcpu_blance_workfn() */
1044 pcpu_atomic_alloc_failed = true;
1045 pcpu_schedule_balance_work();
1046 }
ccea34b5 1047 return NULL;
fbf59bc9 1048}
edcb4639
TH
1049
1050/**
5835d96e 1051 * __alloc_percpu_gfp - allocate dynamic percpu area
edcb4639
TH
1052 * @size: size of area to allocate in bytes
1053 * @align: alignment of area (max PAGE_SIZE)
5835d96e 1054 * @gfp: allocation flags
edcb4639 1055 *
5835d96e
TH
1056 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1057 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1058 * be called from any context but is a lot more likely to fail.
ccea34b5 1059 *
edcb4639
TH
1060 * RETURNS:
1061 * Percpu pointer to the allocated area on success, NULL on failure.
1062 */
5835d96e
TH
1063void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1064{
1065 return pcpu_alloc(size, align, false, gfp);
1066}
1067EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1068
1069/**
1070 * __alloc_percpu - allocate dynamic percpu area
1071 * @size: size of area to allocate in bytes
1072 * @align: alignment of area (max PAGE_SIZE)
1073 *
1074 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1075 */
43cf38eb 1076void __percpu *__alloc_percpu(size_t size, size_t align)
edcb4639 1077{
5835d96e 1078 return pcpu_alloc(size, align, false, GFP_KERNEL);
edcb4639 1079}
fbf59bc9
TH
1080EXPORT_SYMBOL_GPL(__alloc_percpu);
1081
edcb4639
TH
1082/**
1083 * __alloc_reserved_percpu - allocate reserved percpu area
1084 * @size: size of area to allocate in bytes
1085 * @align: alignment of area (max PAGE_SIZE)
1086 *
9329ba97
TH
1087 * Allocate zero-filled percpu area of @size bytes aligned at @align
1088 * from reserved percpu area if arch has set it up; otherwise,
1089 * allocation is served from the same dynamic area. Might sleep.
1090 * Might trigger writeouts.
edcb4639 1091 *
ccea34b5
TH
1092 * CONTEXT:
1093 * Does GFP_KERNEL allocation.
1094 *
edcb4639
TH
1095 * RETURNS:
1096 * Percpu pointer to the allocated area on success, NULL on failure.
1097 */
43cf38eb 1098void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
edcb4639 1099{
5835d96e 1100 return pcpu_alloc(size, align, true, GFP_KERNEL);
edcb4639
TH
1101}
1102
a56dbddf 1103/**
1a4d7607 1104 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
a56dbddf
TH
1105 * @work: unused
1106 *
1107 * Reclaim all fully free chunks except for the first one.
1108 */
fe6bd8c3 1109static void pcpu_balance_workfn(struct work_struct *work)
fbf59bc9 1110{
fe6bd8c3
TH
1111 LIST_HEAD(to_free);
1112 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
a56dbddf 1113 struct pcpu_chunk *chunk, *next;
1a4d7607 1114 int slot, nr_to_pop, ret;
a56dbddf 1115
1a4d7607
TH
1116 /*
1117 * There's no reason to keep around multiple unused chunks and VM
1118 * areas can be scarce. Destroy all free chunks except for one.
1119 */
ccea34b5
TH
1120 mutex_lock(&pcpu_alloc_mutex);
1121 spin_lock_irq(&pcpu_lock);
a56dbddf 1122
fe6bd8c3 1123 list_for_each_entry_safe(chunk, next, free_head, list) {
a56dbddf
TH
1124 WARN_ON(chunk->immutable);
1125
1126 /* spare the first one */
fe6bd8c3 1127 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
a56dbddf
TH
1128 continue;
1129
fe6bd8c3 1130 list_move(&chunk->list, &to_free);
a56dbddf
TH
1131 }
1132
ccea34b5 1133 spin_unlock_irq(&pcpu_lock);
a56dbddf 1134
fe6bd8c3 1135 list_for_each_entry_safe(chunk, next, &to_free, list) {
a93ace48 1136 int rs, re;
dca49645 1137
a93ace48
TH
1138 pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1139 pcpu_depopulate_chunk(chunk, rs, re);
b539b87f
TH
1140 spin_lock_irq(&pcpu_lock);
1141 pcpu_chunk_depopulated(chunk, rs, re);
1142 spin_unlock_irq(&pcpu_lock);
a93ace48 1143 }
6081089f 1144 pcpu_destroy_chunk(chunk);
a56dbddf 1145 }
971f3918 1146
1a4d7607
TH
1147 /*
1148 * Ensure there are certain number of free populated pages for
1149 * atomic allocs. Fill up from the most packed so that atomic
1150 * allocs don't increase fragmentation. If atomic allocation
1151 * failed previously, always populate the maximum amount. This
1152 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1153 * failing indefinitely; however, large atomic allocs are not
1154 * something we support properly and can be highly unreliable and
1155 * inefficient.
1156 */
1157retry_pop:
1158 if (pcpu_atomic_alloc_failed) {
1159 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1160 /* best effort anyway, don't worry about synchronization */
1161 pcpu_atomic_alloc_failed = false;
1162 } else {
1163 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1164 pcpu_nr_empty_pop_pages,
1165 0, PCPU_EMPTY_POP_PAGES_HIGH);
1166 }
1167
1168 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1169 int nr_unpop = 0, rs, re;
1170
1171 if (!nr_to_pop)
1172 break;
1173
1174 spin_lock_irq(&pcpu_lock);
1175 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1176 nr_unpop = pcpu_unit_pages - chunk->nr_populated;
1177 if (nr_unpop)
1178 break;
1179 }
1180 spin_unlock_irq(&pcpu_lock);
1181
1182 if (!nr_unpop)
1183 continue;
1184
1185 /* @chunk can't go away while pcpu_alloc_mutex is held */
1186 pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1187 int nr = min(re - rs, nr_to_pop);
1188
1189 ret = pcpu_populate_chunk(chunk, rs, rs + nr);
1190 if (!ret) {
1191 nr_to_pop -= nr;
1192 spin_lock_irq(&pcpu_lock);
1193 pcpu_chunk_populated(chunk, rs, rs + nr);
1194 spin_unlock_irq(&pcpu_lock);
1195 } else {
1196 nr_to_pop = 0;
1197 }
1198
1199 if (!nr_to_pop)
1200 break;
1201 }
1202 }
1203
1204 if (nr_to_pop) {
1205 /* ran out of chunks to populate, create a new one and retry */
1206 chunk = pcpu_create_chunk();
1207 if (chunk) {
1208 spin_lock_irq(&pcpu_lock);
1209 pcpu_chunk_relocate(chunk, -1);
1210 spin_unlock_irq(&pcpu_lock);
1211 goto retry_pop;
1212 }
1213 }
1214
971f3918 1215 mutex_unlock(&pcpu_alloc_mutex);
fbf59bc9
TH
1216}
1217
1218/**
1219 * free_percpu - free percpu area
1220 * @ptr: pointer to area to free
1221 *
ccea34b5
TH
1222 * Free percpu area @ptr.
1223 *
1224 * CONTEXT:
1225 * Can be called from atomic context.
fbf59bc9 1226 */
43cf38eb 1227void free_percpu(void __percpu *ptr)
fbf59bc9 1228{
129182e5 1229 void *addr;
fbf59bc9 1230 struct pcpu_chunk *chunk;
ccea34b5 1231 unsigned long flags;
b539b87f 1232 int off, occ_pages;
fbf59bc9
TH
1233
1234 if (!ptr)
1235 return;
1236
f528f0b8
CM
1237 kmemleak_free_percpu(ptr);
1238
129182e5
AM
1239 addr = __pcpu_ptr_to_addr(ptr);
1240
ccea34b5 1241 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9
TH
1242
1243 chunk = pcpu_chunk_addr_search(addr);
bba174f5 1244 off = addr - chunk->base_addr;
fbf59bc9 1245
b539b87f
TH
1246 pcpu_free_area(chunk, off, &occ_pages);
1247
1248 if (chunk != pcpu_reserved_chunk)
1249 pcpu_nr_empty_pop_pages += occ_pages;
fbf59bc9 1250
a56dbddf 1251 /* if there are more than one fully free chunks, wake up grim reaper */
fbf59bc9
TH
1252 if (chunk->free_size == pcpu_unit_size) {
1253 struct pcpu_chunk *pos;
1254
a56dbddf 1255 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
fbf59bc9 1256 if (pos != chunk) {
1a4d7607 1257 pcpu_schedule_balance_work();
fbf59bc9
TH
1258 break;
1259 }
1260 }
1261
ccea34b5 1262 spin_unlock_irqrestore(&pcpu_lock, flags);
fbf59bc9
TH
1263}
1264EXPORT_SYMBOL_GPL(free_percpu);
1265
10fad5e4
TH
1266/**
1267 * is_kernel_percpu_address - test whether address is from static percpu area
1268 * @addr: address to test
1269 *
1270 * Test whether @addr belongs to in-kernel static percpu area. Module
1271 * static percpu areas are not considered. For those, use
1272 * is_module_percpu_address().
1273 *
1274 * RETURNS:
1275 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1276 */
1277bool is_kernel_percpu_address(unsigned long addr)
1278{
bbddff05 1279#ifdef CONFIG_SMP
10fad5e4
TH
1280 const size_t static_size = __per_cpu_end - __per_cpu_start;
1281 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1282 unsigned int cpu;
1283
1284 for_each_possible_cpu(cpu) {
1285 void *start = per_cpu_ptr(base, cpu);
1286
1287 if ((void *)addr >= start && (void *)addr < start + static_size)
1288 return true;
1289 }
bbddff05
TH
1290#endif
1291 /* on UP, can't distinguish from other static vars, always false */
10fad5e4
TH
1292 return false;
1293}
1294
3b034b0d
VG
1295/**
1296 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1297 * @addr: the address to be converted to physical address
1298 *
1299 * Given @addr which is dereferenceable address obtained via one of
1300 * percpu access macros, this function translates it into its physical
1301 * address. The caller is responsible for ensuring @addr stays valid
1302 * until this function finishes.
1303 *
67589c71
DY
1304 * percpu allocator has special setup for the first chunk, which currently
1305 * supports either embedding in linear address space or vmalloc mapping,
1306 * and, from the second one, the backing allocator (currently either vm or
1307 * km) provides translation.
1308 *
bffc4375 1309 * The addr can be translated simply without checking if it falls into the
67589c71
DY
1310 * first chunk. But the current code reflects better how percpu allocator
1311 * actually works, and the verification can discover both bugs in percpu
1312 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1313 * code.
1314 *
3b034b0d
VG
1315 * RETURNS:
1316 * The physical address for @addr.
1317 */
1318phys_addr_t per_cpu_ptr_to_phys(void *addr)
1319{
9983b6f0
TH
1320 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1321 bool in_first_chunk = false;
a855b84c 1322 unsigned long first_low, first_high;
9983b6f0
TH
1323 unsigned int cpu;
1324
1325 /*
a855b84c 1326 * The following test on unit_low/high isn't strictly
9983b6f0
TH
1327 * necessary but will speed up lookups of addresses which
1328 * aren't in the first chunk.
1329 */
a855b84c
TH
1330 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1331 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1332 pcpu_unit_pages);
1333 if ((unsigned long)addr >= first_low &&
1334 (unsigned long)addr < first_high) {
9983b6f0
TH
1335 for_each_possible_cpu(cpu) {
1336 void *start = per_cpu_ptr(base, cpu);
1337
1338 if (addr >= start && addr < start + pcpu_unit_size) {
1339 in_first_chunk = true;
1340 break;
1341 }
1342 }
1343 }
1344
1345 if (in_first_chunk) {
eac522ef 1346 if (!is_vmalloc_addr(addr))
020ec653
TH
1347 return __pa(addr);
1348 else
9f57bd4d
ES
1349 return page_to_phys(vmalloc_to_page(addr)) +
1350 offset_in_page(addr);
020ec653 1351 } else
9f57bd4d
ES
1352 return page_to_phys(pcpu_addr_to_page(addr)) +
1353 offset_in_page(addr);
3b034b0d
VG
1354}
1355
fbf59bc9 1356/**
fd1e8a1f
TH
1357 * pcpu_alloc_alloc_info - allocate percpu allocation info
1358 * @nr_groups: the number of groups
1359 * @nr_units: the number of units
1360 *
1361 * Allocate ai which is large enough for @nr_groups groups containing
1362 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1363 * cpu_map array which is long enough for @nr_units and filled with
1364 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1365 * pointer of other groups.
1366 *
1367 * RETURNS:
1368 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1369 * failure.
1370 */
1371struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1372 int nr_units)
1373{
1374 struct pcpu_alloc_info *ai;
1375 size_t base_size, ai_size;
1376 void *ptr;
1377 int unit;
1378
1379 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1380 __alignof__(ai->groups[0].cpu_map[0]));
1381 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1382
999c17e3 1383 ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
fd1e8a1f
TH
1384 if (!ptr)
1385 return NULL;
1386 ai = ptr;
1387 ptr += base_size;
1388
1389 ai->groups[0].cpu_map = ptr;
1390
1391 for (unit = 0; unit < nr_units; unit++)
1392 ai->groups[0].cpu_map[unit] = NR_CPUS;
1393
1394 ai->nr_groups = nr_groups;
1395 ai->__ai_size = PFN_ALIGN(ai_size);
1396
1397 return ai;
1398}
1399
1400/**
1401 * pcpu_free_alloc_info - free percpu allocation info
1402 * @ai: pcpu_alloc_info to free
1403 *
1404 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1405 */
1406void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1407{
999c17e3 1408 memblock_free_early(__pa(ai), ai->__ai_size);
fd1e8a1f
TH
1409}
1410
fd1e8a1f
TH
1411/**
1412 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1413 * @lvl: loglevel
1414 * @ai: allocation info to dump
1415 *
1416 * Print out information about @ai using loglevel @lvl.
1417 */
1418static void pcpu_dump_alloc_info(const char *lvl,
1419 const struct pcpu_alloc_info *ai)
033e48fb 1420{
fd1e8a1f 1421 int group_width = 1, cpu_width = 1, width;
033e48fb 1422 char empty_str[] = "--------";
fd1e8a1f
TH
1423 int alloc = 0, alloc_end = 0;
1424 int group, v;
1425 int upa, apl; /* units per alloc, allocs per line */
1426
1427 v = ai->nr_groups;
1428 while (v /= 10)
1429 group_width++;
033e48fb 1430
fd1e8a1f 1431 v = num_possible_cpus();
033e48fb 1432 while (v /= 10)
fd1e8a1f
TH
1433 cpu_width++;
1434 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
033e48fb 1435
fd1e8a1f
TH
1436 upa = ai->alloc_size / ai->unit_size;
1437 width = upa * (cpu_width + 1) + group_width + 3;
1438 apl = rounddown_pow_of_two(max(60 / width, 1));
033e48fb 1439
fd1e8a1f
TH
1440 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1441 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1442 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
033e48fb 1443
fd1e8a1f
TH
1444 for (group = 0; group < ai->nr_groups; group++) {
1445 const struct pcpu_group_info *gi = &ai->groups[group];
1446 int unit = 0, unit_end = 0;
1447
1448 BUG_ON(gi->nr_units % upa);
1449 for (alloc_end += gi->nr_units / upa;
1450 alloc < alloc_end; alloc++) {
1451 if (!(alloc % apl)) {
1170532b 1452 pr_cont("\n");
fd1e8a1f
TH
1453 printk("%spcpu-alloc: ", lvl);
1454 }
1170532b 1455 pr_cont("[%0*d] ", group_width, group);
fd1e8a1f
TH
1456
1457 for (unit_end += upa; unit < unit_end; unit++)
1458 if (gi->cpu_map[unit] != NR_CPUS)
1170532b
JP
1459 pr_cont("%0*d ",
1460 cpu_width, gi->cpu_map[unit]);
fd1e8a1f 1461 else
1170532b 1462 pr_cont("%s ", empty_str);
033e48fb 1463 }
033e48fb 1464 }
1170532b 1465 pr_cont("\n");
033e48fb 1466}
033e48fb 1467
fbf59bc9 1468/**
8d408b4b 1469 * pcpu_setup_first_chunk - initialize the first percpu chunk
fd1e8a1f 1470 * @ai: pcpu_alloc_info describing how to percpu area is shaped
38a6be52 1471 * @base_addr: mapped address
8d408b4b
TH
1472 *
1473 * Initialize the first percpu chunk which contains the kernel static
1474 * perpcu area. This function is to be called from arch percpu area
38a6be52 1475 * setup path.
8d408b4b 1476 *
fd1e8a1f
TH
1477 * @ai contains all information necessary to initialize the first
1478 * chunk and prime the dynamic percpu allocator.
1479 *
1480 * @ai->static_size is the size of static percpu area.
1481 *
1482 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
edcb4639
TH
1483 * reserve after the static area in the first chunk. This reserves
1484 * the first chunk such that it's available only through reserved
1485 * percpu allocation. This is primarily used to serve module percpu
1486 * static areas on architectures where the addressing model has
1487 * limited offset range for symbol relocations to guarantee module
1488 * percpu symbols fall inside the relocatable range.
1489 *
fd1e8a1f
TH
1490 * @ai->dyn_size determines the number of bytes available for dynamic
1491 * allocation in the first chunk. The area between @ai->static_size +
1492 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
6074d5b0 1493 *
fd1e8a1f
TH
1494 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1495 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1496 * @ai->dyn_size.
8d408b4b 1497 *
fd1e8a1f
TH
1498 * @ai->atom_size is the allocation atom size and used as alignment
1499 * for vm areas.
8d408b4b 1500 *
fd1e8a1f
TH
1501 * @ai->alloc_size is the allocation size and always multiple of
1502 * @ai->atom_size. This is larger than @ai->atom_size if
1503 * @ai->unit_size is larger than @ai->atom_size.
1504 *
1505 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1506 * percpu areas. Units which should be colocated are put into the
1507 * same group. Dynamic VM areas will be allocated according to these
1508 * groupings. If @ai->nr_groups is zero, a single group containing
1509 * all units is assumed.
8d408b4b 1510 *
38a6be52
TH
1511 * The caller should have mapped the first chunk at @base_addr and
1512 * copied static data to each unit.
fbf59bc9 1513 *
edcb4639
TH
1514 * If the first chunk ends up with both reserved and dynamic areas, it
1515 * is served by two chunks - one to serve the core static and reserved
1516 * areas and the other for the dynamic area. They share the same vm
1517 * and page map but uses different area allocation map to stay away
1518 * from each other. The latter chunk is circulated in the chunk slots
1519 * and available for dynamic allocation like any other chunks.
1520 *
fbf59bc9 1521 * RETURNS:
fb435d52 1522 * 0 on success, -errno on failure.
fbf59bc9 1523 */
fb435d52
TH
1524int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1525 void *base_addr)
fbf59bc9 1526{
099a19d9
TH
1527 static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1528 static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
fd1e8a1f
TH
1529 size_t dyn_size = ai->dyn_size;
1530 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
edcb4639 1531 struct pcpu_chunk *schunk, *dchunk = NULL;
6563297c
TH
1532 unsigned long *group_offsets;
1533 size_t *group_sizes;
fb435d52 1534 unsigned long *unit_off;
fbf59bc9 1535 unsigned int cpu;
fd1e8a1f
TH
1536 int *unit_map;
1537 int group, unit, i;
fbf59bc9 1538
635b75fc
TH
1539#define PCPU_SETUP_BUG_ON(cond) do { \
1540 if (unlikely(cond)) { \
1541 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
807de073
TH
1542 pr_emerg("PERCPU: cpu_possible_mask=%*pb\n", \
1543 cpumask_pr_args(cpu_possible_mask)); \
635b75fc
TH
1544 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1545 BUG(); \
1546 } \
1547} while (0)
1548
2f39e637 1549 /* sanity checks */
635b75fc 1550 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
bbddff05 1551#ifdef CONFIG_SMP
635b75fc 1552 PCPU_SETUP_BUG_ON(!ai->static_size);
f09f1243 1553 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
bbddff05 1554#endif
635b75fc 1555 PCPU_SETUP_BUG_ON(!base_addr);
f09f1243 1556 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
635b75fc 1557 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
f09f1243 1558 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
635b75fc 1559 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
099a19d9 1560 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
9f645532 1561 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
8d408b4b 1562
6563297c 1563 /* process group information and build config tables accordingly */
999c17e3
SS
1564 group_offsets = memblock_virt_alloc(ai->nr_groups *
1565 sizeof(group_offsets[0]), 0);
1566 group_sizes = memblock_virt_alloc(ai->nr_groups *
1567 sizeof(group_sizes[0]), 0);
1568 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1569 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
2f39e637 1570
fd1e8a1f 1571 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
ffe0d5a5 1572 unit_map[cpu] = UINT_MAX;
a855b84c
TH
1573
1574 pcpu_low_unit_cpu = NR_CPUS;
1575 pcpu_high_unit_cpu = NR_CPUS;
2f39e637 1576
fd1e8a1f
TH
1577 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1578 const struct pcpu_group_info *gi = &ai->groups[group];
2f39e637 1579
6563297c
TH
1580 group_offsets[group] = gi->base_offset;
1581 group_sizes[group] = gi->nr_units * ai->unit_size;
1582
fd1e8a1f
TH
1583 for (i = 0; i < gi->nr_units; i++) {
1584 cpu = gi->cpu_map[i];
1585 if (cpu == NR_CPUS)
1586 continue;
8d408b4b 1587
9f295664 1588 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
635b75fc
TH
1589 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1590 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
fbf59bc9 1591
fd1e8a1f 1592 unit_map[cpu] = unit + i;
fb435d52
TH
1593 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1594
a855b84c
TH
1595 /* determine low/high unit_cpu */
1596 if (pcpu_low_unit_cpu == NR_CPUS ||
1597 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1598 pcpu_low_unit_cpu = cpu;
1599 if (pcpu_high_unit_cpu == NR_CPUS ||
1600 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1601 pcpu_high_unit_cpu = cpu;
fd1e8a1f 1602 }
2f39e637 1603 }
fd1e8a1f
TH
1604 pcpu_nr_units = unit;
1605
1606 for_each_possible_cpu(cpu)
635b75fc
TH
1607 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1608
1609 /* we're done parsing the input, undefine BUG macro and dump config */
1610#undef PCPU_SETUP_BUG_ON
bcbea798 1611 pcpu_dump_alloc_info(KERN_DEBUG, ai);
fd1e8a1f 1612
6563297c
TH
1613 pcpu_nr_groups = ai->nr_groups;
1614 pcpu_group_offsets = group_offsets;
1615 pcpu_group_sizes = group_sizes;
fd1e8a1f 1616 pcpu_unit_map = unit_map;
fb435d52 1617 pcpu_unit_offsets = unit_off;
2f39e637
TH
1618
1619 /* determine basic parameters */
fd1e8a1f 1620 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
d9b55eeb 1621 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
6563297c 1622 pcpu_atom_size = ai->atom_size;
ce3141a2
TH
1623 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1624 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
cafe8816 1625
d9b55eeb
TH
1626 /*
1627 * Allocate chunk slots. The additional last slot is for
1628 * empty chunks.
1629 */
1630 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
999c17e3
SS
1631 pcpu_slot = memblock_virt_alloc(
1632 pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
fbf59bc9
TH
1633 for (i = 0; i < pcpu_nr_slots; i++)
1634 INIT_LIST_HEAD(&pcpu_slot[i]);
1635
edcb4639
TH
1636 /*
1637 * Initialize static chunk. If reserved_size is zero, the
1638 * static chunk covers static area + dynamic allocation area
1639 * in the first chunk. If reserved_size is not zero, it
1640 * covers static area + reserved area (mostly used for module
1641 * static percpu allocation).
1642 */
999c17e3 1643 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
2441d15c 1644 INIT_LIST_HEAD(&schunk->list);
9c824b6a 1645 INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
bba174f5 1646 schunk->base_addr = base_addr;
61ace7fa
TH
1647 schunk->map = smap;
1648 schunk->map_alloc = ARRAY_SIZE(smap);
38a6be52 1649 schunk->immutable = true;
ce3141a2 1650 bitmap_fill(schunk->populated, pcpu_unit_pages);
b539b87f 1651 schunk->nr_populated = pcpu_unit_pages;
edcb4639 1652
fd1e8a1f
TH
1653 if (ai->reserved_size) {
1654 schunk->free_size = ai->reserved_size;
ae9e6bc9 1655 pcpu_reserved_chunk = schunk;
fd1e8a1f 1656 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
edcb4639
TH
1657 } else {
1658 schunk->free_size = dyn_size;
1659 dyn_size = 0; /* dynamic area covered */
1660 }
2441d15c 1661 schunk->contig_hint = schunk->free_size;
fbf59bc9 1662
723ad1d9
AV
1663 schunk->map[0] = 1;
1664 schunk->map[1] = ai->static_size;
1665 schunk->map_used = 1;
61ace7fa 1666 if (schunk->free_size)
292c24a0
BH
1667 schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
1668 schunk->map[schunk->map_used] |= 1;
61ace7fa 1669
edcb4639
TH
1670 /* init dynamic chunk if necessary */
1671 if (dyn_size) {
999c17e3 1672 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
edcb4639 1673 INIT_LIST_HEAD(&dchunk->list);
9c824b6a 1674 INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
bba174f5 1675 dchunk->base_addr = base_addr;
edcb4639
TH
1676 dchunk->map = dmap;
1677 dchunk->map_alloc = ARRAY_SIZE(dmap);
38a6be52 1678 dchunk->immutable = true;
ce3141a2 1679 bitmap_fill(dchunk->populated, pcpu_unit_pages);
b539b87f 1680 dchunk->nr_populated = pcpu_unit_pages;
edcb4639
TH
1681
1682 dchunk->contig_hint = dchunk->free_size = dyn_size;
723ad1d9
AV
1683 dchunk->map[0] = 1;
1684 dchunk->map[1] = pcpu_reserved_chunk_limit;
1685 dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
1686 dchunk->map_used = 2;
edcb4639
TH
1687 }
1688
2441d15c 1689 /* link the first chunk in */
ae9e6bc9 1690 pcpu_first_chunk = dchunk ?: schunk;
b539b87f
TH
1691 pcpu_nr_empty_pop_pages +=
1692 pcpu_count_occupied_pages(pcpu_first_chunk, 1);
ae9e6bc9 1693 pcpu_chunk_relocate(pcpu_first_chunk, -1);
fbf59bc9
TH
1694
1695 /* we're done */
bba174f5 1696 pcpu_base_addr = base_addr;
fb435d52 1697 return 0;
fbf59bc9 1698}
66c3a757 1699
bbddff05
TH
1700#ifdef CONFIG_SMP
1701
17f3609c 1702const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
f58dc01b
TH
1703 [PCPU_FC_AUTO] = "auto",
1704 [PCPU_FC_EMBED] = "embed",
1705 [PCPU_FC_PAGE] = "page",
f58dc01b 1706};
66c3a757 1707
f58dc01b 1708enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
66c3a757 1709
f58dc01b
TH
1710static int __init percpu_alloc_setup(char *str)
1711{
5479c78a
CG
1712 if (!str)
1713 return -EINVAL;
1714
f58dc01b
TH
1715 if (0)
1716 /* nada */;
1717#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1718 else if (!strcmp(str, "embed"))
1719 pcpu_chosen_fc = PCPU_FC_EMBED;
1720#endif
1721#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1722 else if (!strcmp(str, "page"))
1723 pcpu_chosen_fc = PCPU_FC_PAGE;
f58dc01b
TH
1724#endif
1725 else
598d8091 1726 pr_warn("PERCPU: unknown allocator %s specified\n", str);
66c3a757 1727
f58dc01b 1728 return 0;
66c3a757 1729}
f58dc01b 1730early_param("percpu_alloc", percpu_alloc_setup);
66c3a757 1731
3c9a024f
TH
1732/*
1733 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1734 * Build it if needed by the arch config or the generic setup is going
1735 * to be used.
1736 */
08fc4580
TH
1737#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1738 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
3c9a024f
TH
1739#define BUILD_EMBED_FIRST_CHUNK
1740#endif
1741
1742/* build pcpu_page_first_chunk() iff needed by the arch config */
1743#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1744#define BUILD_PAGE_FIRST_CHUNK
1745#endif
1746
1747/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1748#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1749/**
1750 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1751 * @reserved_size: the size of reserved percpu area in bytes
1752 * @dyn_size: minimum free size for dynamic allocation in bytes
1753 * @atom_size: allocation atom size
1754 * @cpu_distance_fn: callback to determine distance between cpus, optional
1755 *
1756 * This function determines grouping of units, their mappings to cpus
1757 * and other parameters considering needed percpu size, allocation
1758 * atom size and distances between CPUs.
1759 *
bffc4375 1760 * Groups are always multiples of atom size and CPUs which are of
3c9a024f
TH
1761 * LOCAL_DISTANCE both ways are grouped together and share space for
1762 * units in the same group. The returned configuration is guaranteed
1763 * to have CPUs on different nodes on different groups and >=75% usage
1764 * of allocated virtual address space.
1765 *
1766 * RETURNS:
1767 * On success, pointer to the new allocation_info is returned. On
1768 * failure, ERR_PTR value is returned.
1769 */
1770static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1771 size_t reserved_size, size_t dyn_size,
1772 size_t atom_size,
1773 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1774{
1775 static int group_map[NR_CPUS] __initdata;
1776 static int group_cnt[NR_CPUS] __initdata;
1777 const size_t static_size = __per_cpu_end - __per_cpu_start;
1778 int nr_groups = 1, nr_units = 0;
1779 size_t size_sum, min_unit_size, alloc_size;
1780 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1781 int last_allocs, group, unit;
1782 unsigned int cpu, tcpu;
1783 struct pcpu_alloc_info *ai;
1784 unsigned int *cpu_map;
1785
1786 /* this function may be called multiple times */
1787 memset(group_map, 0, sizeof(group_map));
1788 memset(group_cnt, 0, sizeof(group_cnt));
1789
1790 /* calculate size_sum and ensure dyn_size is enough for early alloc */
1791 size_sum = PFN_ALIGN(static_size + reserved_size +
1792 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1793 dyn_size = size_sum - static_size - reserved_size;
1794
1795 /*
1796 * Determine min_unit_size, alloc_size and max_upa such that
1797 * alloc_size is multiple of atom_size and is the smallest
25985edc 1798 * which can accommodate 4k aligned segments which are equal to
3c9a024f
TH
1799 * or larger than min_unit_size.
1800 */
1801 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1802
1803 alloc_size = roundup(min_unit_size, atom_size);
1804 upa = alloc_size / min_unit_size;
f09f1243 1805 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
3c9a024f
TH
1806 upa--;
1807 max_upa = upa;
1808
1809 /* group cpus according to their proximity */
1810 for_each_possible_cpu(cpu) {
1811 group = 0;
1812 next_group:
1813 for_each_possible_cpu(tcpu) {
1814 if (cpu == tcpu)
1815 break;
1816 if (group_map[tcpu] == group && cpu_distance_fn &&
1817 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1818 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1819 group++;
1820 nr_groups = max(nr_groups, group + 1);
1821 goto next_group;
1822 }
1823 }
1824 group_map[cpu] = group;
1825 group_cnt[group]++;
1826 }
1827
1828 /*
1829 * Expand unit size until address space usage goes over 75%
1830 * and then as much as possible without using more address
1831 * space.
1832 */
1833 last_allocs = INT_MAX;
1834 for (upa = max_upa; upa; upa--) {
1835 int allocs = 0, wasted = 0;
1836
f09f1243 1837 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
3c9a024f
TH
1838 continue;
1839
1840 for (group = 0; group < nr_groups; group++) {
1841 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1842 allocs += this_allocs;
1843 wasted += this_allocs * upa - group_cnt[group];
1844 }
1845
1846 /*
1847 * Don't accept if wastage is over 1/3. The
1848 * greater-than comparison ensures upa==1 always
1849 * passes the following check.
1850 */
1851 if (wasted > num_possible_cpus() / 3)
1852 continue;
1853
1854 /* and then don't consume more memory */
1855 if (allocs > last_allocs)
1856 break;
1857 last_allocs = allocs;
1858 best_upa = upa;
1859 }
1860 upa = best_upa;
1861
1862 /* allocate and fill alloc_info */
1863 for (group = 0; group < nr_groups; group++)
1864 nr_units += roundup(group_cnt[group], upa);
1865
1866 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1867 if (!ai)
1868 return ERR_PTR(-ENOMEM);
1869 cpu_map = ai->groups[0].cpu_map;
1870
1871 for (group = 0; group < nr_groups; group++) {
1872 ai->groups[group].cpu_map = cpu_map;
1873 cpu_map += roundup(group_cnt[group], upa);
1874 }
1875
1876 ai->static_size = static_size;
1877 ai->reserved_size = reserved_size;
1878 ai->dyn_size = dyn_size;
1879 ai->unit_size = alloc_size / upa;
1880 ai->atom_size = atom_size;
1881 ai->alloc_size = alloc_size;
1882
1883 for (group = 0, unit = 0; group_cnt[group]; group++) {
1884 struct pcpu_group_info *gi = &ai->groups[group];
1885
1886 /*
1887 * Initialize base_offset as if all groups are located
1888 * back-to-back. The caller should update this to
1889 * reflect actual allocation.
1890 */
1891 gi->base_offset = unit * ai->unit_size;
1892
1893 for_each_possible_cpu(cpu)
1894 if (group_map[cpu] == group)
1895 gi->cpu_map[gi->nr_units++] = cpu;
1896 gi->nr_units = roundup(gi->nr_units, upa);
1897 unit += gi->nr_units;
1898 }
1899 BUG_ON(unit != nr_units);
1900
1901 return ai;
1902}
1903#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1904
1905#if defined(BUILD_EMBED_FIRST_CHUNK)
66c3a757
TH
1906/**
1907 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
66c3a757 1908 * @reserved_size: the size of reserved percpu area in bytes
4ba6ce25 1909 * @dyn_size: minimum free size for dynamic allocation in bytes
c8826dd5
TH
1910 * @atom_size: allocation atom size
1911 * @cpu_distance_fn: callback to determine distance between cpus, optional
1912 * @alloc_fn: function to allocate percpu page
25985edc 1913 * @free_fn: function to free percpu page
66c3a757
TH
1914 *
1915 * This is a helper to ease setting up embedded first percpu chunk and
1916 * can be called where pcpu_setup_first_chunk() is expected.
1917 *
1918 * If this function is used to setup the first chunk, it is allocated
c8826dd5
TH
1919 * by calling @alloc_fn and used as-is without being mapped into
1920 * vmalloc area. Allocations are always whole multiples of @atom_size
1921 * aligned to @atom_size.
1922 *
1923 * This enables the first chunk to piggy back on the linear physical
1924 * mapping which often uses larger page size. Please note that this
1925 * can result in very sparse cpu->unit mapping on NUMA machines thus
1926 * requiring large vmalloc address space. Don't use this allocator if
1927 * vmalloc space is not orders of magnitude larger than distances
1928 * between node memory addresses (ie. 32bit NUMA machines).
66c3a757 1929 *
4ba6ce25 1930 * @dyn_size specifies the minimum dynamic area size.
66c3a757
TH
1931 *
1932 * If the needed size is smaller than the minimum or specified unit
c8826dd5 1933 * size, the leftover is returned using @free_fn.
66c3a757
TH
1934 *
1935 * RETURNS:
fb435d52 1936 * 0 on success, -errno on failure.
66c3a757 1937 */
4ba6ce25 1938int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
c8826dd5
TH
1939 size_t atom_size,
1940 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1941 pcpu_fc_alloc_fn_t alloc_fn,
1942 pcpu_fc_free_fn_t free_fn)
66c3a757 1943{
c8826dd5
TH
1944 void *base = (void *)ULONG_MAX;
1945 void **areas = NULL;
fd1e8a1f 1946 struct pcpu_alloc_info *ai;
6ea529a2 1947 size_t size_sum, areas_size, max_distance;
c8826dd5 1948 int group, i, rc;
66c3a757 1949
c8826dd5
TH
1950 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1951 cpu_distance_fn);
fd1e8a1f
TH
1952 if (IS_ERR(ai))
1953 return PTR_ERR(ai);
66c3a757 1954
fd1e8a1f 1955 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
c8826dd5 1956 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
fa8a7094 1957
999c17e3 1958 areas = memblock_virt_alloc_nopanic(areas_size, 0);
c8826dd5 1959 if (!areas) {
fb435d52 1960 rc = -ENOMEM;
c8826dd5 1961 goto out_free;
fa8a7094 1962 }
66c3a757 1963
c8826dd5
TH
1964 /* allocate, copy and determine base address */
1965 for (group = 0; group < ai->nr_groups; group++) {
1966 struct pcpu_group_info *gi = &ai->groups[group];
1967 unsigned int cpu = NR_CPUS;
1968 void *ptr;
1969
1970 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1971 cpu = gi->cpu_map[i];
1972 BUG_ON(cpu == NR_CPUS);
1973
1974 /* allocate space for the whole group */
1975 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1976 if (!ptr) {
1977 rc = -ENOMEM;
1978 goto out_free_areas;
1979 }
f528f0b8
CM
1980 /* kmemleak tracks the percpu allocations separately */
1981 kmemleak_free(ptr);
c8826dd5 1982 areas[group] = ptr;
fd1e8a1f 1983
c8826dd5 1984 base = min(ptr, base);
42b64281
TH
1985 }
1986
1987 /*
1988 * Copy data and free unused parts. This should happen after all
1989 * allocations are complete; otherwise, we may end up with
1990 * overlapping groups.
1991 */
1992 for (group = 0; group < ai->nr_groups; group++) {
1993 struct pcpu_group_info *gi = &ai->groups[group];
1994 void *ptr = areas[group];
c8826dd5
TH
1995
1996 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1997 if (gi->cpu_map[i] == NR_CPUS) {
1998 /* unused unit, free whole */
1999 free_fn(ptr, ai->unit_size);
2000 continue;
2001 }
2002 /* copy and return the unused part */
2003 memcpy(ptr, __per_cpu_load, ai->static_size);
2004 free_fn(ptr + size_sum, ai->unit_size - size_sum);
2005 }
fa8a7094 2006 }
66c3a757 2007
c8826dd5 2008 /* base address is now known, determine group base offsets */
6ea529a2
TH
2009 max_distance = 0;
2010 for (group = 0; group < ai->nr_groups; group++) {
c8826dd5 2011 ai->groups[group].base_offset = areas[group] - base;
1a0c3298
TH
2012 max_distance = max_t(size_t, max_distance,
2013 ai->groups[group].base_offset);
6ea529a2
TH
2014 }
2015 max_distance += ai->unit_size;
2016
2017 /* warn if maximum distance is further than 75% of vmalloc space */
8a092171 2018 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
598d8091
JP
2019 pr_warn("PERCPU: max_distance=0x%zx too large for vmalloc space 0x%lx\n",
2020 max_distance, VMALLOC_TOTAL);
6ea529a2
TH
2021#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2022 /* and fail if we have fallback */
2023 rc = -EINVAL;
2024 goto out_free;
2025#endif
2026 }
c8826dd5 2027
004018e2 2028 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
fd1e8a1f
TH
2029 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2030 ai->dyn_size, ai->unit_size);
d4b95f80 2031
fb435d52 2032 rc = pcpu_setup_first_chunk(ai, base);
c8826dd5
TH
2033 goto out_free;
2034
2035out_free_areas:
2036 for (group = 0; group < ai->nr_groups; group++)
f851c8d8
MH
2037 if (areas[group])
2038 free_fn(areas[group],
2039 ai->groups[group].nr_units * ai->unit_size);
c8826dd5 2040out_free:
fd1e8a1f 2041 pcpu_free_alloc_info(ai);
c8826dd5 2042 if (areas)
999c17e3 2043 memblock_free_early(__pa(areas), areas_size);
fb435d52 2044 return rc;
d4b95f80 2045}
3c9a024f 2046#endif /* BUILD_EMBED_FIRST_CHUNK */
d4b95f80 2047
3c9a024f 2048#ifdef BUILD_PAGE_FIRST_CHUNK
d4b95f80 2049/**
00ae4064 2050 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
d4b95f80
TH
2051 * @reserved_size: the size of reserved percpu area in bytes
2052 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
25985edc 2053 * @free_fn: function to free percpu page, always called with PAGE_SIZE
d4b95f80
TH
2054 * @populate_pte_fn: function to populate pte
2055 *
00ae4064
TH
2056 * This is a helper to ease setting up page-remapped first percpu
2057 * chunk and can be called where pcpu_setup_first_chunk() is expected.
d4b95f80
TH
2058 *
2059 * This is the basic allocator. Static percpu area is allocated
2060 * page-by-page into vmalloc area.
2061 *
2062 * RETURNS:
fb435d52 2063 * 0 on success, -errno on failure.
d4b95f80 2064 */
fb435d52
TH
2065int __init pcpu_page_first_chunk(size_t reserved_size,
2066 pcpu_fc_alloc_fn_t alloc_fn,
2067 pcpu_fc_free_fn_t free_fn,
2068 pcpu_fc_populate_pte_fn_t populate_pte_fn)
d4b95f80 2069{
8f05a6a6 2070 static struct vm_struct vm;
fd1e8a1f 2071 struct pcpu_alloc_info *ai;
00ae4064 2072 char psize_str[16];
ce3141a2 2073 int unit_pages;
d4b95f80 2074 size_t pages_size;
ce3141a2 2075 struct page **pages;
fb435d52 2076 int unit, i, j, rc;
d4b95f80 2077
00ae4064
TH
2078 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2079
4ba6ce25 2080 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
fd1e8a1f
TH
2081 if (IS_ERR(ai))
2082 return PTR_ERR(ai);
2083 BUG_ON(ai->nr_groups != 1);
2084 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
2085
2086 unit_pages = ai->unit_size >> PAGE_SHIFT;
d4b95f80
TH
2087
2088 /* unaligned allocations can't be freed, round up to page size */
fd1e8a1f
TH
2089 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2090 sizeof(pages[0]));
999c17e3 2091 pages = memblock_virt_alloc(pages_size, 0);
d4b95f80 2092
8f05a6a6 2093 /* allocate pages */
d4b95f80 2094 j = 0;
fd1e8a1f 2095 for (unit = 0; unit < num_possible_cpus(); unit++)
ce3141a2 2096 for (i = 0; i < unit_pages; i++) {
fd1e8a1f 2097 unsigned int cpu = ai->groups[0].cpu_map[unit];
d4b95f80
TH
2098 void *ptr;
2099
3cbc8565 2100 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
d4b95f80 2101 if (!ptr) {
598d8091
JP
2102 pr_warn("PERCPU: failed to allocate %s page for cpu%u\n",
2103 psize_str, cpu);
d4b95f80
TH
2104 goto enomem;
2105 }
f528f0b8
CM
2106 /* kmemleak tracks the percpu allocations separately */
2107 kmemleak_free(ptr);
ce3141a2 2108 pages[j++] = virt_to_page(ptr);
d4b95f80
TH
2109 }
2110
8f05a6a6
TH
2111 /* allocate vm area, map the pages and copy static data */
2112 vm.flags = VM_ALLOC;
fd1e8a1f 2113 vm.size = num_possible_cpus() * ai->unit_size;
8f05a6a6
TH
2114 vm_area_register_early(&vm, PAGE_SIZE);
2115
fd1e8a1f 2116 for (unit = 0; unit < num_possible_cpus(); unit++) {
1d9d3257 2117 unsigned long unit_addr =
fd1e8a1f 2118 (unsigned long)vm.addr + unit * ai->unit_size;
8f05a6a6 2119
ce3141a2 2120 for (i = 0; i < unit_pages; i++)
8f05a6a6
TH
2121 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2122
2123 /* pte already populated, the following shouldn't fail */
fb435d52
TH
2124 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2125 unit_pages);
2126 if (rc < 0)
2127 panic("failed to map percpu area, err=%d\n", rc);
66c3a757 2128
8f05a6a6
TH
2129 /*
2130 * FIXME: Archs with virtual cache should flush local
2131 * cache for the linear mapping here - something
2132 * equivalent to flush_cache_vmap() on the local cpu.
2133 * flush_cache_vmap() can't be used as most supporting
2134 * data structures are not set up yet.
2135 */
2136
2137 /* copy static data */
fd1e8a1f 2138 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
66c3a757
TH
2139 }
2140
2141 /* we're ready, commit */
1d9d3257 2142 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
fd1e8a1f
TH
2143 unit_pages, psize_str, vm.addr, ai->static_size,
2144 ai->reserved_size, ai->dyn_size);
d4b95f80 2145
fb435d52 2146 rc = pcpu_setup_first_chunk(ai, vm.addr);
d4b95f80
TH
2147 goto out_free_ar;
2148
2149enomem:
2150 while (--j >= 0)
ce3141a2 2151 free_fn(page_address(pages[j]), PAGE_SIZE);
fb435d52 2152 rc = -ENOMEM;
d4b95f80 2153out_free_ar:
999c17e3 2154 memblock_free_early(__pa(pages), pages_size);
fd1e8a1f 2155 pcpu_free_alloc_info(ai);
fb435d52 2156 return rc;
d4b95f80 2157}
3c9a024f 2158#endif /* BUILD_PAGE_FIRST_CHUNK */
d4b95f80 2159
bbddff05 2160#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
e74e3962 2161/*
bbddff05 2162 * Generic SMP percpu area setup.
e74e3962
TH
2163 *
2164 * The embedding helper is used because its behavior closely resembles
2165 * the original non-dynamic generic percpu area setup. This is
2166 * important because many archs have addressing restrictions and might
2167 * fail if the percpu area is located far away from the previous
2168 * location. As an added bonus, in non-NUMA cases, embedding is
2169 * generally a good idea TLB-wise because percpu area can piggy back
2170 * on the physical linear memory mapping which uses large page
2171 * mappings on applicable archs.
2172 */
e74e3962
TH
2173unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2174EXPORT_SYMBOL(__per_cpu_offset);
2175
c8826dd5
TH
2176static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2177 size_t align)
2178{
999c17e3
SS
2179 return memblock_virt_alloc_from_nopanic(
2180 size, align, __pa(MAX_DMA_ADDRESS));
c8826dd5 2181}
66c3a757 2182
c8826dd5
TH
2183static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2184{
999c17e3 2185 memblock_free_early(__pa(ptr), size);
c8826dd5
TH
2186}
2187
e74e3962
TH
2188void __init setup_per_cpu_areas(void)
2189{
e74e3962
TH
2190 unsigned long delta;
2191 unsigned int cpu;
fb435d52 2192 int rc;
e74e3962
TH
2193
2194 /*
2195 * Always reserve area for module percpu variables. That's
2196 * what the legacy allocator did.
2197 */
fb435d52 2198 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
c8826dd5
TH
2199 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2200 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
fb435d52 2201 if (rc < 0)
bbddff05 2202 panic("Failed to initialize percpu areas.");
e74e3962
TH
2203
2204 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2205 for_each_possible_cpu(cpu)
fb435d52 2206 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
66c3a757 2207}
bbddff05
TH
2208#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2209
2210#else /* CONFIG_SMP */
2211
2212/*
2213 * UP percpu area setup.
2214 *
2215 * UP always uses km-based percpu allocator with identity mapping.
2216 * Static percpu variables are indistinguishable from the usual static
2217 * variables and don't require any special preparation.
2218 */
2219void __init setup_per_cpu_areas(void)
2220{
2221 const size_t unit_size =
2222 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2223 PERCPU_DYNAMIC_RESERVE));
2224 struct pcpu_alloc_info *ai;
2225 void *fc;
2226
2227 ai = pcpu_alloc_alloc_info(1, 1);
999c17e3
SS
2228 fc = memblock_virt_alloc_from_nopanic(unit_size,
2229 PAGE_SIZE,
2230 __pa(MAX_DMA_ADDRESS));
bbddff05
TH
2231 if (!ai || !fc)
2232 panic("Failed to allocate memory for percpu areas.");
100d13c3
CM
2233 /* kmemleak tracks the percpu allocations separately */
2234 kmemleak_free(fc);
bbddff05
TH
2235
2236 ai->dyn_size = unit_size;
2237 ai->unit_size = unit_size;
2238 ai->atom_size = unit_size;
2239 ai->alloc_size = unit_size;
2240 ai->groups[0].nr_units = 1;
2241 ai->groups[0].cpu_map[0] = 0;
2242
2243 if (pcpu_setup_first_chunk(ai, fc) < 0)
2244 panic("Failed to initialize percpu areas.");
2245}
2246
2247#endif /* CONFIG_SMP */
099a19d9
TH
2248
2249/*
2250 * First and reserved chunks are initialized with temporary allocation
2251 * map in initdata so that they can be used before slab is online.
2252 * This function is called after slab is brought up and replaces those
2253 * with properly allocated maps.
2254 */
2255void __init percpu_init_late(void)
2256{
2257 struct pcpu_chunk *target_chunks[] =
2258 { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
2259 struct pcpu_chunk *chunk;
2260 unsigned long flags;
2261 int i;
2262
2263 for (i = 0; (chunk = target_chunks[i]); i++) {
2264 int *map;
2265 const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
2266
2267 BUILD_BUG_ON(size > PAGE_SIZE);
2268
90459ce0 2269 map = pcpu_mem_zalloc(size);
099a19d9
TH
2270 BUG_ON(!map);
2271
2272 spin_lock_irqsave(&pcpu_lock, flags);
2273 memcpy(map, chunk->map, size);
2274 chunk->map = map;
2275 spin_unlock_irqrestore(&pcpu_lock, flags);
2276 }
2277}
1a4d7607
TH
2278
2279/*
2280 * Percpu allocator is initialized early during boot when neither slab or
2281 * workqueue is available. Plug async management until everything is up
2282 * and running.
2283 */
2284static int __init percpu_enable_async(void)
2285{
2286 pcpu_async_enabled = true;
2287 return 0;
2288}
2289subsys_initcall(percpu_enable_async);