]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * mm/percpu.c - percpu memory allocator | |
4 | * | |
5 | * Copyright (C) 2009 SUSE Linux Products GmbH | |
6 | * Copyright (C) 2009 Tejun Heo <tj@kernel.org> | |
7 | * | |
8 | * Copyright (C) 2017 Facebook Inc. | |
9 | * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> | |
10 | * | |
11 | * The percpu allocator handles both static and dynamic areas. Percpu | |
12 | * areas are allocated in chunks which are divided into units. There is | |
13 | * a 1-to-1 mapping for units to possible cpus. These units are grouped | |
14 | * based on NUMA properties of the machine. | |
15 | * | |
16 | * c0 c1 c2 | |
17 | * ------------------- ------------------- ------------ | |
18 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u | |
19 | * ------------------- ...... ------------------- .... ------------ | |
20 | * | |
21 | * Allocation is done by offsets into a unit's address space. Ie., an | |
22 | * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, | |
23 | * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear | |
24 | * and even sparse. Access is handled by configuring percpu base | |
25 | * registers according to the cpu to unit mappings and offsetting the | |
26 | * base address using pcpu_unit_size. | |
27 | * | |
28 | * There is special consideration for the first chunk which must handle | |
29 | * the static percpu variables in the kernel image as allocation services | |
30 | * are not online yet. In short, the first chunk is structured like so: | |
31 | * | |
32 | * <Static | [Reserved] | Dynamic> | |
33 | * | |
34 | * The static data is copied from the original section managed by the | |
35 | * linker. The reserved section, if non-zero, primarily manages static | |
36 | * percpu variables from kernel modules. Finally, the dynamic section | |
37 | * takes care of normal allocations. | |
38 | * | |
39 | * The allocator organizes chunks into lists according to free size and | |
40 | * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT | |
41 | * flag should be passed. All memcg-aware allocations are sharing one set | |
42 | * of chunks and all unaccounted allocations and allocations performed | |
43 | * by processes belonging to the root memory cgroup are using the second set. | |
44 | * | |
45 | * The allocator tries to allocate from the fullest chunk first. Each chunk | |
46 | * is managed by a bitmap with metadata blocks. The allocation map is updated | |
47 | * on every allocation and free to reflect the current state while the boundary | |
48 | * map is only updated on allocation. Each metadata block contains | |
49 | * information to help mitigate the need to iterate over large portions | |
50 | * of the bitmap. The reverse mapping from page to chunk is stored in | |
51 | * the page's index. Lastly, units are lazily backed and grow in unison. | |
52 | * | |
53 | * There is a unique conversion that goes on here between bytes and bits. | |
54 | * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk | |
55 | * tracks the number of pages it is responsible for in nr_pages. Helper | |
56 | * functions are used to convert from between the bytes, bits, and blocks. | |
57 | * All hints are managed in bits unless explicitly stated. | |
58 | * | |
59 | * To use this allocator, arch code should do the following: | |
60 | * | |
61 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate | |
62 | * regular address to percpu pointer and back if they need to be | |
63 | * different from the default | |
64 | * | |
65 | * - use pcpu_setup_first_chunk() during percpu area initialization to | |
66 | * setup the first chunk containing the kernel static percpu area | |
67 | */ | |
68 | ||
69 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
70 | ||
71 | #include <linux/bitmap.h> | |
72 | #include <linux/cpumask.h> | |
73 | #include <linux/memblock.h> | |
74 | #include <linux/err.h> | |
75 | #include <linux/lcm.h> | |
76 | #include <linux/list.h> | |
77 | #include <linux/log2.h> | |
78 | #include <linux/mm.h> | |
79 | #include <linux/module.h> | |
80 | #include <linux/mutex.h> | |
81 | #include <linux/percpu.h> | |
82 | #include <linux/pfn.h> | |
83 | #include <linux/slab.h> | |
84 | #include <linux/spinlock.h> | |
85 | #include <linux/vmalloc.h> | |
86 | #include <linux/workqueue.h> | |
87 | #include <linux/kmemleak.h> | |
88 | #include <linux/sched.h> | |
89 | #include <linux/sched/mm.h> | |
90 | #include <linux/memcontrol.h> | |
91 | ||
92 | #include <asm/cacheflush.h> | |
93 | #include <asm/sections.h> | |
94 | #include <asm/tlbflush.h> | |
95 | #include <asm/io.h> | |
96 | ||
97 | #define CREATE_TRACE_POINTS | |
98 | #include <trace/events/percpu.h> | |
99 | ||
100 | #include "percpu-internal.h" | |
101 | ||
102 | /* | |
103 | * The slots are sorted by the size of the biggest continuous free area. | |
104 | * 1-31 bytes share the same slot. | |
105 | */ | |
106 | #define PCPU_SLOT_BASE_SHIFT 5 | |
107 | /* chunks in slots below this are subject to being sidelined on failed alloc */ | |
108 | #define PCPU_SLOT_FAIL_THRESHOLD 3 | |
109 | ||
110 | #define PCPU_EMPTY_POP_PAGES_LOW 2 | |
111 | #define PCPU_EMPTY_POP_PAGES_HIGH 4 | |
112 | ||
113 | #ifdef CONFIG_SMP | |
114 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ | |
115 | #ifndef __addr_to_pcpu_ptr | |
116 | #define __addr_to_pcpu_ptr(addr) \ | |
117 | (void __percpu *)((unsigned long)(addr) - \ | |
118 | (unsigned long)pcpu_base_addr + \ | |
119 | (unsigned long)__per_cpu_start) | |
120 | #endif | |
121 | #ifndef __pcpu_ptr_to_addr | |
122 | #define __pcpu_ptr_to_addr(ptr) \ | |
123 | (void __force *)((unsigned long)(ptr) + \ | |
124 | (unsigned long)pcpu_base_addr - \ | |
125 | (unsigned long)__per_cpu_start) | |
126 | #endif | |
127 | #else /* CONFIG_SMP */ | |
128 | /* on UP, it's always identity mapped */ | |
129 | #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) | |
130 | #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) | |
131 | #endif /* CONFIG_SMP */ | |
132 | ||
133 | static int pcpu_unit_pages __ro_after_init; | |
134 | static int pcpu_unit_size __ro_after_init; | |
135 | static int pcpu_nr_units __ro_after_init; | |
136 | static int pcpu_atom_size __ro_after_init; | |
137 | int pcpu_nr_slots __ro_after_init; | |
138 | static int pcpu_free_slot __ro_after_init; | |
139 | int pcpu_sidelined_slot __ro_after_init; | |
140 | int pcpu_to_depopulate_slot __ro_after_init; | |
141 | static size_t pcpu_chunk_struct_size __ro_after_init; | |
142 | ||
143 | /* cpus with the lowest and highest unit addresses */ | |
144 | static unsigned int pcpu_low_unit_cpu __ro_after_init; | |
145 | static unsigned int pcpu_high_unit_cpu __ro_after_init; | |
146 | ||
147 | /* the address of the first chunk which starts with the kernel static area */ | |
148 | void *pcpu_base_addr __ro_after_init; | |
149 | ||
150 | static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ | |
151 | const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ | |
152 | ||
153 | /* group information, used for vm allocation */ | |
154 | static int pcpu_nr_groups __ro_after_init; | |
155 | static const unsigned long *pcpu_group_offsets __ro_after_init; | |
156 | static const size_t *pcpu_group_sizes __ro_after_init; | |
157 | ||
158 | /* | |
159 | * The first chunk which always exists. Note that unlike other | |
160 | * chunks, this one can be allocated and mapped in several different | |
161 | * ways and thus often doesn't live in the vmalloc area. | |
162 | */ | |
163 | struct pcpu_chunk *pcpu_first_chunk __ro_after_init; | |
164 | ||
165 | /* | |
166 | * Optional reserved chunk. This chunk reserves part of the first | |
167 | * chunk and serves it for reserved allocations. When the reserved | |
168 | * region doesn't exist, the following variable is NULL. | |
169 | */ | |
170 | struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; | |
171 | ||
172 | DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ | |
173 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ | |
174 | ||
175 | struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ | |
176 | ||
177 | /* chunks which need their map areas extended, protected by pcpu_lock */ | |
178 | static LIST_HEAD(pcpu_map_extend_chunks); | |
179 | ||
180 | /* | |
181 | * The number of empty populated pages, protected by pcpu_lock. | |
182 | * The reserved chunk doesn't contribute to the count. | |
183 | */ | |
184 | int pcpu_nr_empty_pop_pages; | |
185 | ||
186 | /* | |
187 | * The number of populated pages in use by the allocator, protected by | |
188 | * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets | |
189 | * allocated/deallocated, it is allocated/deallocated in all units of a chunk | |
190 | * and increments/decrements this count by 1). | |
191 | */ | |
192 | static unsigned long pcpu_nr_populated; | |
193 | ||
194 | /* | |
195 | * Balance work is used to populate or destroy chunks asynchronously. We | |
196 | * try to keep the number of populated free pages between | |
197 | * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one | |
198 | * empty chunk. | |
199 | */ | |
200 | static void pcpu_balance_workfn(struct work_struct *work); | |
201 | static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); | |
202 | static bool pcpu_async_enabled __read_mostly; | |
203 | static bool pcpu_atomic_alloc_failed; | |
204 | ||
205 | static void pcpu_schedule_balance_work(void) | |
206 | { | |
207 | if (pcpu_async_enabled) | |
208 | schedule_work(&pcpu_balance_work); | |
209 | } | |
210 | ||
211 | /** | |
212 | * pcpu_addr_in_chunk - check if the address is served from this chunk | |
213 | * @chunk: chunk of interest | |
214 | * @addr: percpu address | |
215 | * | |
216 | * RETURNS: | |
217 | * True if the address is served from this chunk. | |
218 | */ | |
219 | static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) | |
220 | { | |
221 | void *start_addr, *end_addr; | |
222 | ||
223 | if (!chunk) | |
224 | return false; | |
225 | ||
226 | start_addr = chunk->base_addr + chunk->start_offset; | |
227 | end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - | |
228 | chunk->end_offset; | |
229 | ||
230 | return addr >= start_addr && addr < end_addr; | |
231 | } | |
232 | ||
233 | static int __pcpu_size_to_slot(int size) | |
234 | { | |
235 | int highbit = fls(size); /* size is in bytes */ | |
236 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); | |
237 | } | |
238 | ||
239 | static int pcpu_size_to_slot(int size) | |
240 | { | |
241 | if (size == pcpu_unit_size) | |
242 | return pcpu_free_slot; | |
243 | return __pcpu_size_to_slot(size); | |
244 | } | |
245 | ||
246 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) | |
247 | { | |
248 | const struct pcpu_block_md *chunk_md = &chunk->chunk_md; | |
249 | ||
250 | if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || | |
251 | chunk_md->contig_hint == 0) | |
252 | return 0; | |
253 | ||
254 | return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); | |
255 | } | |
256 | ||
257 | /* set the pointer to a chunk in a page struct */ | |
258 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) | |
259 | { | |
260 | page->index = (unsigned long)pcpu; | |
261 | } | |
262 | ||
263 | /* obtain pointer to a chunk from a page struct */ | |
264 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) | |
265 | { | |
266 | return (struct pcpu_chunk *)page->index; | |
267 | } | |
268 | ||
269 | static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) | |
270 | { | |
271 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; | |
272 | } | |
273 | ||
274 | static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) | |
275 | { | |
276 | return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); | |
277 | } | |
278 | ||
279 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, | |
280 | unsigned int cpu, int page_idx) | |
281 | { | |
282 | return (unsigned long)chunk->base_addr + | |
283 | pcpu_unit_page_offset(cpu, page_idx); | |
284 | } | |
285 | ||
286 | /* | |
287 | * The following are helper functions to help access bitmaps and convert | |
288 | * between bitmap offsets to address offsets. | |
289 | */ | |
290 | static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) | |
291 | { | |
292 | return chunk->alloc_map + | |
293 | (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); | |
294 | } | |
295 | ||
296 | static unsigned long pcpu_off_to_block_index(int off) | |
297 | { | |
298 | return off / PCPU_BITMAP_BLOCK_BITS; | |
299 | } | |
300 | ||
301 | static unsigned long pcpu_off_to_block_off(int off) | |
302 | { | |
303 | return off & (PCPU_BITMAP_BLOCK_BITS - 1); | |
304 | } | |
305 | ||
306 | static unsigned long pcpu_block_off_to_off(int index, int off) | |
307 | { | |
308 | return index * PCPU_BITMAP_BLOCK_BITS + off; | |
309 | } | |
310 | ||
311 | /** | |
312 | * pcpu_check_block_hint - check against the contig hint | |
313 | * @block: block of interest | |
314 | * @bits: size of allocation | |
315 | * @align: alignment of area (max PAGE_SIZE) | |
316 | * | |
317 | * Check to see if the allocation can fit in the block's contig hint. | |
318 | * Note, a chunk uses the same hints as a block so this can also check against | |
319 | * the chunk's contig hint. | |
320 | */ | |
321 | static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits, | |
322 | size_t align) | |
323 | { | |
324 | int bit_off = ALIGN(block->contig_hint_start, align) - | |
325 | block->contig_hint_start; | |
326 | ||
327 | return bit_off + bits <= block->contig_hint; | |
328 | } | |
329 | ||
330 | /* | |
331 | * pcpu_next_hint - determine which hint to use | |
332 | * @block: block of interest | |
333 | * @alloc_bits: size of allocation | |
334 | * | |
335 | * This determines if we should scan based on the scan_hint or first_free. | |
336 | * In general, we want to scan from first_free to fulfill allocations by | |
337 | * first fit. However, if we know a scan_hint at position scan_hint_start | |
338 | * cannot fulfill an allocation, we can begin scanning from there knowing | |
339 | * the contig_hint will be our fallback. | |
340 | */ | |
341 | static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) | |
342 | { | |
343 | /* | |
344 | * The three conditions below determine if we can skip past the | |
345 | * scan_hint. First, does the scan hint exist. Second, is the | |
346 | * contig_hint after the scan_hint (possibly not true iff | |
347 | * contig_hint == scan_hint). Third, is the allocation request | |
348 | * larger than the scan_hint. | |
349 | */ | |
350 | if (block->scan_hint && | |
351 | block->contig_hint_start > block->scan_hint_start && | |
352 | alloc_bits > block->scan_hint) | |
353 | return block->scan_hint_start + block->scan_hint; | |
354 | ||
355 | return block->first_free; | |
356 | } | |
357 | ||
358 | /** | |
359 | * pcpu_next_md_free_region - finds the next hint free area | |
360 | * @chunk: chunk of interest | |
361 | * @bit_off: chunk offset | |
362 | * @bits: size of free area | |
363 | * | |
364 | * Helper function for pcpu_for_each_md_free_region. It checks | |
365 | * block->contig_hint and performs aggregation across blocks to find the | |
366 | * next hint. It modifies bit_off and bits in-place to be consumed in the | |
367 | * loop. | |
368 | */ | |
369 | static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, | |
370 | int *bits) | |
371 | { | |
372 | int i = pcpu_off_to_block_index(*bit_off); | |
373 | int block_off = pcpu_off_to_block_off(*bit_off); | |
374 | struct pcpu_block_md *block; | |
375 | ||
376 | *bits = 0; | |
377 | for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); | |
378 | block++, i++) { | |
379 | /* handles contig area across blocks */ | |
380 | if (*bits) { | |
381 | *bits += block->left_free; | |
382 | if (block->left_free == PCPU_BITMAP_BLOCK_BITS) | |
383 | continue; | |
384 | return; | |
385 | } | |
386 | ||
387 | /* | |
388 | * This checks three things. First is there a contig_hint to | |
389 | * check. Second, have we checked this hint before by | |
390 | * comparing the block_off. Third, is this the same as the | |
391 | * right contig hint. In the last case, it spills over into | |
392 | * the next block and should be handled by the contig area | |
393 | * across blocks code. | |
394 | */ | |
395 | *bits = block->contig_hint; | |
396 | if (*bits && block->contig_hint_start >= block_off && | |
397 | *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { | |
398 | *bit_off = pcpu_block_off_to_off(i, | |
399 | block->contig_hint_start); | |
400 | return; | |
401 | } | |
402 | /* reset to satisfy the second predicate above */ | |
403 | block_off = 0; | |
404 | ||
405 | *bits = block->right_free; | |
406 | *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; | |
407 | } | |
408 | } | |
409 | ||
410 | /** | |
411 | * pcpu_next_fit_region - finds fit areas for a given allocation request | |
412 | * @chunk: chunk of interest | |
413 | * @alloc_bits: size of allocation | |
414 | * @align: alignment of area (max PAGE_SIZE) | |
415 | * @bit_off: chunk offset | |
416 | * @bits: size of free area | |
417 | * | |
418 | * Finds the next free region that is viable for use with a given size and | |
419 | * alignment. This only returns if there is a valid area to be used for this | |
420 | * allocation. block->first_free is returned if the allocation request fits | |
421 | * within the block to see if the request can be fulfilled prior to the contig | |
422 | * hint. | |
423 | */ | |
424 | static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, | |
425 | int align, int *bit_off, int *bits) | |
426 | { | |
427 | int i = pcpu_off_to_block_index(*bit_off); | |
428 | int block_off = pcpu_off_to_block_off(*bit_off); | |
429 | struct pcpu_block_md *block; | |
430 | ||
431 | *bits = 0; | |
432 | for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); | |
433 | block++, i++) { | |
434 | /* handles contig area across blocks */ | |
435 | if (*bits) { | |
436 | *bits += block->left_free; | |
437 | if (*bits >= alloc_bits) | |
438 | return; | |
439 | if (block->left_free == PCPU_BITMAP_BLOCK_BITS) | |
440 | continue; | |
441 | } | |
442 | ||
443 | /* check block->contig_hint */ | |
444 | *bits = ALIGN(block->contig_hint_start, align) - | |
445 | block->contig_hint_start; | |
446 | /* | |
447 | * This uses the block offset to determine if this has been | |
448 | * checked in the prior iteration. | |
449 | */ | |
450 | if (block->contig_hint && | |
451 | block->contig_hint_start >= block_off && | |
452 | block->contig_hint >= *bits + alloc_bits) { | |
453 | int start = pcpu_next_hint(block, alloc_bits); | |
454 | ||
455 | *bits += alloc_bits + block->contig_hint_start - | |
456 | start; | |
457 | *bit_off = pcpu_block_off_to_off(i, start); | |
458 | return; | |
459 | } | |
460 | /* reset to satisfy the second predicate above */ | |
461 | block_off = 0; | |
462 | ||
463 | *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, | |
464 | align); | |
465 | *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; | |
466 | *bit_off = pcpu_block_off_to_off(i, *bit_off); | |
467 | if (*bits >= alloc_bits) | |
468 | return; | |
469 | } | |
470 | ||
471 | /* no valid offsets were found - fail condition */ | |
472 | *bit_off = pcpu_chunk_map_bits(chunk); | |
473 | } | |
474 | ||
475 | /* | |
476 | * Metadata free area iterators. These perform aggregation of free areas | |
477 | * based on the metadata blocks and return the offset @bit_off and size in | |
478 | * bits of the free area @bits. pcpu_for_each_fit_region only returns when | |
479 | * a fit is found for the allocation request. | |
480 | */ | |
481 | #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ | |
482 | for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ | |
483 | (bit_off) < pcpu_chunk_map_bits((chunk)); \ | |
484 | (bit_off) += (bits) + 1, \ | |
485 | pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) | |
486 | ||
487 | #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ | |
488 | for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ | |
489 | &(bits)); \ | |
490 | (bit_off) < pcpu_chunk_map_bits((chunk)); \ | |
491 | (bit_off) += (bits), \ | |
492 | pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ | |
493 | &(bits))) | |
494 | ||
495 | /** | |
496 | * pcpu_mem_zalloc - allocate memory | |
497 | * @size: bytes to allocate | |
498 | * @gfp: allocation flags | |
499 | * | |
500 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, | |
501 | * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. | |
502 | * This is to facilitate passing through whitelisted flags. The | |
503 | * returned memory is always zeroed. | |
504 | * | |
505 | * RETURNS: | |
506 | * Pointer to the allocated area on success, NULL on failure. | |
507 | */ | |
508 | static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) | |
509 | { | |
510 | if (WARN_ON_ONCE(!slab_is_available())) | |
511 | return NULL; | |
512 | ||
513 | if (size <= PAGE_SIZE) | |
514 | return kzalloc(size, gfp); | |
515 | else | |
516 | return __vmalloc(size, gfp | __GFP_ZERO); | |
517 | } | |
518 | ||
519 | /** | |
520 | * pcpu_mem_free - free memory | |
521 | * @ptr: memory to free | |
522 | * | |
523 | * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). | |
524 | */ | |
525 | static void pcpu_mem_free(void *ptr) | |
526 | { | |
527 | kvfree(ptr); | |
528 | } | |
529 | ||
530 | static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, | |
531 | bool move_front) | |
532 | { | |
533 | if (chunk != pcpu_reserved_chunk) { | |
534 | if (move_front) | |
535 | list_move(&chunk->list, &pcpu_chunk_lists[slot]); | |
536 | else | |
537 | list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]); | |
538 | } | |
539 | } | |
540 | ||
541 | static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) | |
542 | { | |
543 | __pcpu_chunk_move(chunk, slot, true); | |
544 | } | |
545 | ||
546 | /** | |
547 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot | |
548 | * @chunk: chunk of interest | |
549 | * @oslot: the previous slot it was on | |
550 | * | |
551 | * This function is called after an allocation or free changed @chunk. | |
552 | * New slot according to the changed state is determined and @chunk is | |
553 | * moved to the slot. Note that the reserved chunk is never put on | |
554 | * chunk slots. | |
555 | * | |
556 | * CONTEXT: | |
557 | * pcpu_lock. | |
558 | */ | |
559 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |
560 | { | |
561 | int nslot = pcpu_chunk_slot(chunk); | |
562 | ||
563 | /* leave isolated chunks in-place */ | |
564 | if (chunk->isolated) | |
565 | return; | |
566 | ||
567 | if (oslot != nslot) | |
568 | __pcpu_chunk_move(chunk, nslot, oslot < nslot); | |
569 | } | |
570 | ||
571 | static void pcpu_isolate_chunk(struct pcpu_chunk *chunk) | |
572 | { | |
573 | lockdep_assert_held(&pcpu_lock); | |
574 | ||
575 | if (!chunk->isolated) { | |
576 | chunk->isolated = true; | |
577 | pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages; | |
578 | } | |
579 | list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]); | |
580 | } | |
581 | ||
582 | static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk) | |
583 | { | |
584 | lockdep_assert_held(&pcpu_lock); | |
585 | ||
586 | if (chunk->isolated) { | |
587 | chunk->isolated = false; | |
588 | pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages; | |
589 | pcpu_chunk_relocate(chunk, -1); | |
590 | } | |
591 | } | |
592 | ||
593 | /* | |
594 | * pcpu_update_empty_pages - update empty page counters | |
595 | * @chunk: chunk of interest | |
596 | * @nr: nr of empty pages | |
597 | * | |
598 | * This is used to keep track of the empty pages now based on the premise | |
599 | * a md_block covers a page. The hint update functions recognize if a block | |
600 | * is made full or broken to calculate deltas for keeping track of free pages. | |
601 | */ | |
602 | static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) | |
603 | { | |
604 | chunk->nr_empty_pop_pages += nr; | |
605 | if (chunk != pcpu_reserved_chunk && !chunk->isolated) | |
606 | pcpu_nr_empty_pop_pages += nr; | |
607 | } | |
608 | ||
609 | /* | |
610 | * pcpu_region_overlap - determines if two regions overlap | |
611 | * @a: start of first region, inclusive | |
612 | * @b: end of first region, exclusive | |
613 | * @x: start of second region, inclusive | |
614 | * @y: end of second region, exclusive | |
615 | * | |
616 | * This is used to determine if the hint region [a, b) overlaps with the | |
617 | * allocated region [x, y). | |
618 | */ | |
619 | static inline bool pcpu_region_overlap(int a, int b, int x, int y) | |
620 | { | |
621 | return (a < y) && (x < b); | |
622 | } | |
623 | ||
624 | /** | |
625 | * pcpu_block_update - updates a block given a free area | |
626 | * @block: block of interest | |
627 | * @start: start offset in block | |
628 | * @end: end offset in block | |
629 | * | |
630 | * Updates a block given a known free area. The region [start, end) is | |
631 | * expected to be the entirety of the free area within a block. Chooses | |
632 | * the best starting offset if the contig hints are equal. | |
633 | */ | |
634 | static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) | |
635 | { | |
636 | int contig = end - start; | |
637 | ||
638 | block->first_free = min(block->first_free, start); | |
639 | if (start == 0) | |
640 | block->left_free = contig; | |
641 | ||
642 | if (end == block->nr_bits) | |
643 | block->right_free = contig; | |
644 | ||
645 | if (contig > block->contig_hint) { | |
646 | /* promote the old contig_hint to be the new scan_hint */ | |
647 | if (start > block->contig_hint_start) { | |
648 | if (block->contig_hint > block->scan_hint) { | |
649 | block->scan_hint_start = | |
650 | block->contig_hint_start; | |
651 | block->scan_hint = block->contig_hint; | |
652 | } else if (start < block->scan_hint_start) { | |
653 | /* | |
654 | * The old contig_hint == scan_hint. But, the | |
655 | * new contig is larger so hold the invariant | |
656 | * scan_hint_start < contig_hint_start. | |
657 | */ | |
658 | block->scan_hint = 0; | |
659 | } | |
660 | } else { | |
661 | block->scan_hint = 0; | |
662 | } | |
663 | block->contig_hint_start = start; | |
664 | block->contig_hint = contig; | |
665 | } else if (contig == block->contig_hint) { | |
666 | if (block->contig_hint_start && | |
667 | (!start || | |
668 | __ffs(start) > __ffs(block->contig_hint_start))) { | |
669 | /* start has a better alignment so use it */ | |
670 | block->contig_hint_start = start; | |
671 | if (start < block->scan_hint_start && | |
672 | block->contig_hint > block->scan_hint) | |
673 | block->scan_hint = 0; | |
674 | } else if (start > block->scan_hint_start || | |
675 | block->contig_hint > block->scan_hint) { | |
676 | /* | |
677 | * Knowing contig == contig_hint, update the scan_hint | |
678 | * if it is farther than or larger than the current | |
679 | * scan_hint. | |
680 | */ | |
681 | block->scan_hint_start = start; | |
682 | block->scan_hint = contig; | |
683 | } | |
684 | } else { | |
685 | /* | |
686 | * The region is smaller than the contig_hint. So only update | |
687 | * the scan_hint if it is larger than or equal and farther than | |
688 | * the current scan_hint. | |
689 | */ | |
690 | if ((start < block->contig_hint_start && | |
691 | (contig > block->scan_hint || | |
692 | (contig == block->scan_hint && | |
693 | start > block->scan_hint_start)))) { | |
694 | block->scan_hint_start = start; | |
695 | block->scan_hint = contig; | |
696 | } | |
697 | } | |
698 | } | |
699 | ||
700 | /* | |
701 | * pcpu_block_update_scan - update a block given a free area from a scan | |
702 | * @chunk: chunk of interest | |
703 | * @bit_off: chunk offset | |
704 | * @bits: size of free area | |
705 | * | |
706 | * Finding the final allocation spot first goes through pcpu_find_block_fit() | |
707 | * to find a block that can hold the allocation and then pcpu_alloc_area() | |
708 | * where a scan is used. When allocations require specific alignments, | |
709 | * we can inadvertently create holes which will not be seen in the alloc | |
710 | * or free paths. | |
711 | * | |
712 | * This takes a given free area hole and updates a block as it may change the | |
713 | * scan_hint. We need to scan backwards to ensure we don't miss free bits | |
714 | * from alignment. | |
715 | */ | |
716 | static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, | |
717 | int bits) | |
718 | { | |
719 | int s_off = pcpu_off_to_block_off(bit_off); | |
720 | int e_off = s_off + bits; | |
721 | int s_index, l_bit; | |
722 | struct pcpu_block_md *block; | |
723 | ||
724 | if (e_off > PCPU_BITMAP_BLOCK_BITS) | |
725 | return; | |
726 | ||
727 | s_index = pcpu_off_to_block_index(bit_off); | |
728 | block = chunk->md_blocks + s_index; | |
729 | ||
730 | /* scan backwards in case of alignment skipping free bits */ | |
731 | l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); | |
732 | s_off = (s_off == l_bit) ? 0 : l_bit + 1; | |
733 | ||
734 | pcpu_block_update(block, s_off, e_off); | |
735 | } | |
736 | ||
737 | /** | |
738 | * pcpu_chunk_refresh_hint - updates metadata about a chunk | |
739 | * @chunk: chunk of interest | |
740 | * @full_scan: if we should scan from the beginning | |
741 | * | |
742 | * Iterates over the metadata blocks to find the largest contig area. | |
743 | * A full scan can be avoided on the allocation path as this is triggered | |
744 | * if we broke the contig_hint. In doing so, the scan_hint will be before | |
745 | * the contig_hint or after if the scan_hint == contig_hint. This cannot | |
746 | * be prevented on freeing as we want to find the largest area possibly | |
747 | * spanning blocks. | |
748 | */ | |
749 | static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) | |
750 | { | |
751 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; | |
752 | int bit_off, bits; | |
753 | ||
754 | /* promote scan_hint to contig_hint */ | |
755 | if (!full_scan && chunk_md->scan_hint) { | |
756 | bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; | |
757 | chunk_md->contig_hint_start = chunk_md->scan_hint_start; | |
758 | chunk_md->contig_hint = chunk_md->scan_hint; | |
759 | chunk_md->scan_hint = 0; | |
760 | } else { | |
761 | bit_off = chunk_md->first_free; | |
762 | chunk_md->contig_hint = 0; | |
763 | } | |
764 | ||
765 | bits = 0; | |
766 | pcpu_for_each_md_free_region(chunk, bit_off, bits) | |
767 | pcpu_block_update(chunk_md, bit_off, bit_off + bits); | |
768 | } | |
769 | ||
770 | /** | |
771 | * pcpu_block_refresh_hint | |
772 | * @chunk: chunk of interest | |
773 | * @index: index of the metadata block | |
774 | * | |
775 | * Scans over the block beginning at first_free and updates the block | |
776 | * metadata accordingly. | |
777 | */ | |
778 | static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) | |
779 | { | |
780 | struct pcpu_block_md *block = chunk->md_blocks + index; | |
781 | unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); | |
782 | unsigned int rs, re, start; /* region start, region end */ | |
783 | ||
784 | /* promote scan_hint to contig_hint */ | |
785 | if (block->scan_hint) { | |
786 | start = block->scan_hint_start + block->scan_hint; | |
787 | block->contig_hint_start = block->scan_hint_start; | |
788 | block->contig_hint = block->scan_hint; | |
789 | block->scan_hint = 0; | |
790 | } else { | |
791 | start = block->first_free; | |
792 | block->contig_hint = 0; | |
793 | } | |
794 | ||
795 | block->right_free = 0; | |
796 | ||
797 | /* iterate over free areas and update the contig hints */ | |
798 | bitmap_for_each_clear_region(alloc_map, rs, re, start, | |
799 | PCPU_BITMAP_BLOCK_BITS) | |
800 | pcpu_block_update(block, rs, re); | |
801 | } | |
802 | ||
803 | /** | |
804 | * pcpu_block_update_hint_alloc - update hint on allocation path | |
805 | * @chunk: chunk of interest | |
806 | * @bit_off: chunk offset | |
807 | * @bits: size of request | |
808 | * | |
809 | * Updates metadata for the allocation path. The metadata only has to be | |
810 | * refreshed by a full scan iff the chunk's contig hint is broken. Block level | |
811 | * scans are required if the block's contig hint is broken. | |
812 | */ | |
813 | static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, | |
814 | int bits) | |
815 | { | |
816 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; | |
817 | int nr_empty_pages = 0; | |
818 | struct pcpu_block_md *s_block, *e_block, *block; | |
819 | int s_index, e_index; /* block indexes of the freed allocation */ | |
820 | int s_off, e_off; /* block offsets of the freed allocation */ | |
821 | ||
822 | /* | |
823 | * Calculate per block offsets. | |
824 | * The calculation uses an inclusive range, but the resulting offsets | |
825 | * are [start, end). e_index always points to the last block in the | |
826 | * range. | |
827 | */ | |
828 | s_index = pcpu_off_to_block_index(bit_off); | |
829 | e_index = pcpu_off_to_block_index(bit_off + bits - 1); | |
830 | s_off = pcpu_off_to_block_off(bit_off); | |
831 | e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; | |
832 | ||
833 | s_block = chunk->md_blocks + s_index; | |
834 | e_block = chunk->md_blocks + e_index; | |
835 | ||
836 | /* | |
837 | * Update s_block. | |
838 | * block->first_free must be updated if the allocation takes its place. | |
839 | * If the allocation breaks the contig_hint, a scan is required to | |
840 | * restore this hint. | |
841 | */ | |
842 | if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) | |
843 | nr_empty_pages++; | |
844 | ||
845 | if (s_off == s_block->first_free) | |
846 | s_block->first_free = find_next_zero_bit( | |
847 | pcpu_index_alloc_map(chunk, s_index), | |
848 | PCPU_BITMAP_BLOCK_BITS, | |
849 | s_off + bits); | |
850 | ||
851 | if (pcpu_region_overlap(s_block->scan_hint_start, | |
852 | s_block->scan_hint_start + s_block->scan_hint, | |
853 | s_off, | |
854 | s_off + bits)) | |
855 | s_block->scan_hint = 0; | |
856 | ||
857 | if (pcpu_region_overlap(s_block->contig_hint_start, | |
858 | s_block->contig_hint_start + | |
859 | s_block->contig_hint, | |
860 | s_off, | |
861 | s_off + bits)) { | |
862 | /* block contig hint is broken - scan to fix it */ | |
863 | if (!s_off) | |
864 | s_block->left_free = 0; | |
865 | pcpu_block_refresh_hint(chunk, s_index); | |
866 | } else { | |
867 | /* update left and right contig manually */ | |
868 | s_block->left_free = min(s_block->left_free, s_off); | |
869 | if (s_index == e_index) | |
870 | s_block->right_free = min_t(int, s_block->right_free, | |
871 | PCPU_BITMAP_BLOCK_BITS - e_off); | |
872 | else | |
873 | s_block->right_free = 0; | |
874 | } | |
875 | ||
876 | /* | |
877 | * Update e_block. | |
878 | */ | |
879 | if (s_index != e_index) { | |
880 | if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) | |
881 | nr_empty_pages++; | |
882 | ||
883 | /* | |
884 | * When the allocation is across blocks, the end is along | |
885 | * the left part of the e_block. | |
886 | */ | |
887 | e_block->first_free = find_next_zero_bit( | |
888 | pcpu_index_alloc_map(chunk, e_index), | |
889 | PCPU_BITMAP_BLOCK_BITS, e_off); | |
890 | ||
891 | if (e_off == PCPU_BITMAP_BLOCK_BITS) { | |
892 | /* reset the block */ | |
893 | e_block++; | |
894 | } else { | |
895 | if (e_off > e_block->scan_hint_start) | |
896 | e_block->scan_hint = 0; | |
897 | ||
898 | e_block->left_free = 0; | |
899 | if (e_off > e_block->contig_hint_start) { | |
900 | /* contig hint is broken - scan to fix it */ | |
901 | pcpu_block_refresh_hint(chunk, e_index); | |
902 | } else { | |
903 | e_block->right_free = | |
904 | min_t(int, e_block->right_free, | |
905 | PCPU_BITMAP_BLOCK_BITS - e_off); | |
906 | } | |
907 | } | |
908 | ||
909 | /* update in-between md_blocks */ | |
910 | nr_empty_pages += (e_index - s_index - 1); | |
911 | for (block = s_block + 1; block < e_block; block++) { | |
912 | block->scan_hint = 0; | |
913 | block->contig_hint = 0; | |
914 | block->left_free = 0; | |
915 | block->right_free = 0; | |
916 | } | |
917 | } | |
918 | ||
919 | if (nr_empty_pages) | |
920 | pcpu_update_empty_pages(chunk, -nr_empty_pages); | |
921 | ||
922 | if (pcpu_region_overlap(chunk_md->scan_hint_start, | |
923 | chunk_md->scan_hint_start + | |
924 | chunk_md->scan_hint, | |
925 | bit_off, | |
926 | bit_off + bits)) | |
927 | chunk_md->scan_hint = 0; | |
928 | ||
929 | /* | |
930 | * The only time a full chunk scan is required is if the chunk | |
931 | * contig hint is broken. Otherwise, it means a smaller space | |
932 | * was used and therefore the chunk contig hint is still correct. | |
933 | */ | |
934 | if (pcpu_region_overlap(chunk_md->contig_hint_start, | |
935 | chunk_md->contig_hint_start + | |
936 | chunk_md->contig_hint, | |
937 | bit_off, | |
938 | bit_off + bits)) | |
939 | pcpu_chunk_refresh_hint(chunk, false); | |
940 | } | |
941 | ||
942 | /** | |
943 | * pcpu_block_update_hint_free - updates the block hints on the free path | |
944 | * @chunk: chunk of interest | |
945 | * @bit_off: chunk offset | |
946 | * @bits: size of request | |
947 | * | |
948 | * Updates metadata for the allocation path. This avoids a blind block | |
949 | * refresh by making use of the block contig hints. If this fails, it scans | |
950 | * forward and backward to determine the extent of the free area. This is | |
951 | * capped at the boundary of blocks. | |
952 | * | |
953 | * A chunk update is triggered if a page becomes free, a block becomes free, | |
954 | * or the free spans across blocks. This tradeoff is to minimize iterating | |
955 | * over the block metadata to update chunk_md->contig_hint. | |
956 | * chunk_md->contig_hint may be off by up to a page, but it will never be more | |
957 | * than the available space. If the contig hint is contained in one block, it | |
958 | * will be accurate. | |
959 | */ | |
960 | static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, | |
961 | int bits) | |
962 | { | |
963 | int nr_empty_pages = 0; | |
964 | struct pcpu_block_md *s_block, *e_block, *block; | |
965 | int s_index, e_index; /* block indexes of the freed allocation */ | |
966 | int s_off, e_off; /* block offsets of the freed allocation */ | |
967 | int start, end; /* start and end of the whole free area */ | |
968 | ||
969 | /* | |
970 | * Calculate per block offsets. | |
971 | * The calculation uses an inclusive range, but the resulting offsets | |
972 | * are [start, end). e_index always points to the last block in the | |
973 | * range. | |
974 | */ | |
975 | s_index = pcpu_off_to_block_index(bit_off); | |
976 | e_index = pcpu_off_to_block_index(bit_off + bits - 1); | |
977 | s_off = pcpu_off_to_block_off(bit_off); | |
978 | e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; | |
979 | ||
980 | s_block = chunk->md_blocks + s_index; | |
981 | e_block = chunk->md_blocks + e_index; | |
982 | ||
983 | /* | |
984 | * Check if the freed area aligns with the block->contig_hint. | |
985 | * If it does, then the scan to find the beginning/end of the | |
986 | * larger free area can be avoided. | |
987 | * | |
988 | * start and end refer to beginning and end of the free area | |
989 | * within each their respective blocks. This is not necessarily | |
990 | * the entire free area as it may span blocks past the beginning | |
991 | * or end of the block. | |
992 | */ | |
993 | start = s_off; | |
994 | if (s_off == s_block->contig_hint + s_block->contig_hint_start) { | |
995 | start = s_block->contig_hint_start; | |
996 | } else { | |
997 | /* | |
998 | * Scan backwards to find the extent of the free area. | |
999 | * find_last_bit returns the starting bit, so if the start bit | |
1000 | * is returned, that means there was no last bit and the | |
1001 | * remainder of the chunk is free. | |
1002 | */ | |
1003 | int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), | |
1004 | start); | |
1005 | start = (start == l_bit) ? 0 : l_bit + 1; | |
1006 | } | |
1007 | ||
1008 | end = e_off; | |
1009 | if (e_off == e_block->contig_hint_start) | |
1010 | end = e_block->contig_hint_start + e_block->contig_hint; | |
1011 | else | |
1012 | end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), | |
1013 | PCPU_BITMAP_BLOCK_BITS, end); | |
1014 | ||
1015 | /* update s_block */ | |
1016 | e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; | |
1017 | if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) | |
1018 | nr_empty_pages++; | |
1019 | pcpu_block_update(s_block, start, e_off); | |
1020 | ||
1021 | /* freeing in the same block */ | |
1022 | if (s_index != e_index) { | |
1023 | /* update e_block */ | |
1024 | if (end == PCPU_BITMAP_BLOCK_BITS) | |
1025 | nr_empty_pages++; | |
1026 | pcpu_block_update(e_block, 0, end); | |
1027 | ||
1028 | /* reset md_blocks in the middle */ | |
1029 | nr_empty_pages += (e_index - s_index - 1); | |
1030 | for (block = s_block + 1; block < e_block; block++) { | |
1031 | block->first_free = 0; | |
1032 | block->scan_hint = 0; | |
1033 | block->contig_hint_start = 0; | |
1034 | block->contig_hint = PCPU_BITMAP_BLOCK_BITS; | |
1035 | block->left_free = PCPU_BITMAP_BLOCK_BITS; | |
1036 | block->right_free = PCPU_BITMAP_BLOCK_BITS; | |
1037 | } | |
1038 | } | |
1039 | ||
1040 | if (nr_empty_pages) | |
1041 | pcpu_update_empty_pages(chunk, nr_empty_pages); | |
1042 | ||
1043 | /* | |
1044 | * Refresh chunk metadata when the free makes a block free or spans | |
1045 | * across blocks. The contig_hint may be off by up to a page, but if | |
1046 | * the contig_hint is contained in a block, it will be accurate with | |
1047 | * the else condition below. | |
1048 | */ | |
1049 | if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) | |
1050 | pcpu_chunk_refresh_hint(chunk, true); | |
1051 | else | |
1052 | pcpu_block_update(&chunk->chunk_md, | |
1053 | pcpu_block_off_to_off(s_index, start), | |
1054 | end); | |
1055 | } | |
1056 | ||
1057 | /** | |
1058 | * pcpu_is_populated - determines if the region is populated | |
1059 | * @chunk: chunk of interest | |
1060 | * @bit_off: chunk offset | |
1061 | * @bits: size of area | |
1062 | * @next_off: return value for the next offset to start searching | |
1063 | * | |
1064 | * For atomic allocations, check if the backing pages are populated. | |
1065 | * | |
1066 | * RETURNS: | |
1067 | * Bool if the backing pages are populated. | |
1068 | * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. | |
1069 | */ | |
1070 | static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, | |
1071 | int *next_off) | |
1072 | { | |
1073 | unsigned int page_start, page_end, rs, re; | |
1074 | ||
1075 | page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); | |
1076 | page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); | |
1077 | ||
1078 | rs = page_start; | |
1079 | bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); | |
1080 | if (rs >= page_end) | |
1081 | return true; | |
1082 | ||
1083 | *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; | |
1084 | return false; | |
1085 | } | |
1086 | ||
1087 | /** | |
1088 | * pcpu_find_block_fit - finds the block index to start searching | |
1089 | * @chunk: chunk of interest | |
1090 | * @alloc_bits: size of request in allocation units | |
1091 | * @align: alignment of area (max PAGE_SIZE bytes) | |
1092 | * @pop_only: use populated regions only | |
1093 | * | |
1094 | * Given a chunk and an allocation spec, find the offset to begin searching | |
1095 | * for a free region. This iterates over the bitmap metadata blocks to | |
1096 | * find an offset that will be guaranteed to fit the requirements. It is | |
1097 | * not quite first fit as if the allocation does not fit in the contig hint | |
1098 | * of a block or chunk, it is skipped. This errs on the side of caution | |
1099 | * to prevent excess iteration. Poor alignment can cause the allocator to | |
1100 | * skip over blocks and chunks that have valid free areas. | |
1101 | * | |
1102 | * RETURNS: | |
1103 | * The offset in the bitmap to begin searching. | |
1104 | * -1 if no offset is found. | |
1105 | */ | |
1106 | static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, | |
1107 | size_t align, bool pop_only) | |
1108 | { | |
1109 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; | |
1110 | int bit_off, bits, next_off; | |
1111 | ||
1112 | /* | |
1113 | * This is an optimization to prevent scanning by assuming if the | |
1114 | * allocation cannot fit in the global hint, there is memory pressure | |
1115 | * and creating a new chunk would happen soon. | |
1116 | */ | |
1117 | if (!pcpu_check_block_hint(chunk_md, alloc_bits, align)) | |
1118 | return -1; | |
1119 | ||
1120 | bit_off = pcpu_next_hint(chunk_md, alloc_bits); | |
1121 | bits = 0; | |
1122 | pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { | |
1123 | if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, | |
1124 | &next_off)) | |
1125 | break; | |
1126 | ||
1127 | bit_off = next_off; | |
1128 | bits = 0; | |
1129 | } | |
1130 | ||
1131 | if (bit_off == pcpu_chunk_map_bits(chunk)) | |
1132 | return -1; | |
1133 | ||
1134 | return bit_off; | |
1135 | } | |
1136 | ||
1137 | /* | |
1138 | * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() | |
1139 | * @map: the address to base the search on | |
1140 | * @size: the bitmap size in bits | |
1141 | * @start: the bitnumber to start searching at | |
1142 | * @nr: the number of zeroed bits we're looking for | |
1143 | * @align_mask: alignment mask for zero area | |
1144 | * @largest_off: offset of the largest area skipped | |
1145 | * @largest_bits: size of the largest area skipped | |
1146 | * | |
1147 | * The @align_mask should be one less than a power of 2. | |
1148 | * | |
1149 | * This is a modified version of bitmap_find_next_zero_area_off() to remember | |
1150 | * the largest area that was skipped. This is imperfect, but in general is | |
1151 | * good enough. The largest remembered region is the largest failed region | |
1152 | * seen. This does not include anything we possibly skipped due to alignment. | |
1153 | * pcpu_block_update_scan() does scan backwards to try and recover what was | |
1154 | * lost to alignment. While this can cause scanning to miss earlier possible | |
1155 | * free areas, smaller allocations will eventually fill those holes. | |
1156 | */ | |
1157 | static unsigned long pcpu_find_zero_area(unsigned long *map, | |
1158 | unsigned long size, | |
1159 | unsigned long start, | |
1160 | unsigned long nr, | |
1161 | unsigned long align_mask, | |
1162 | unsigned long *largest_off, | |
1163 | unsigned long *largest_bits) | |
1164 | { | |
1165 | unsigned long index, end, i, area_off, area_bits; | |
1166 | again: | |
1167 | index = find_next_zero_bit(map, size, start); | |
1168 | ||
1169 | /* Align allocation */ | |
1170 | index = __ALIGN_MASK(index, align_mask); | |
1171 | area_off = index; | |
1172 | ||
1173 | end = index + nr; | |
1174 | if (end > size) | |
1175 | return end; | |
1176 | i = find_next_bit(map, end, index); | |
1177 | if (i < end) { | |
1178 | area_bits = i - area_off; | |
1179 | /* remember largest unused area with best alignment */ | |
1180 | if (area_bits > *largest_bits || | |
1181 | (area_bits == *largest_bits && *largest_off && | |
1182 | (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { | |
1183 | *largest_off = area_off; | |
1184 | *largest_bits = area_bits; | |
1185 | } | |
1186 | ||
1187 | start = i + 1; | |
1188 | goto again; | |
1189 | } | |
1190 | return index; | |
1191 | } | |
1192 | ||
1193 | /** | |
1194 | * pcpu_alloc_area - allocates an area from a pcpu_chunk | |
1195 | * @chunk: chunk of interest | |
1196 | * @alloc_bits: size of request in allocation units | |
1197 | * @align: alignment of area (max PAGE_SIZE) | |
1198 | * @start: bit_off to start searching | |
1199 | * | |
1200 | * This function takes in a @start offset to begin searching to fit an | |
1201 | * allocation of @alloc_bits with alignment @align. It needs to scan | |
1202 | * the allocation map because if it fits within the block's contig hint, | |
1203 | * @start will be block->first_free. This is an attempt to fill the | |
1204 | * allocation prior to breaking the contig hint. The allocation and | |
1205 | * boundary maps are updated accordingly if it confirms a valid | |
1206 | * free area. | |
1207 | * | |
1208 | * RETURNS: | |
1209 | * Allocated addr offset in @chunk on success. | |
1210 | * -1 if no matching area is found. | |
1211 | */ | |
1212 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, | |
1213 | size_t align, int start) | |
1214 | { | |
1215 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; | |
1216 | size_t align_mask = (align) ? (align - 1) : 0; | |
1217 | unsigned long area_off = 0, area_bits = 0; | |
1218 | int bit_off, end, oslot; | |
1219 | ||
1220 | lockdep_assert_held(&pcpu_lock); | |
1221 | ||
1222 | oslot = pcpu_chunk_slot(chunk); | |
1223 | ||
1224 | /* | |
1225 | * Search to find a fit. | |
1226 | */ | |
1227 | end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, | |
1228 | pcpu_chunk_map_bits(chunk)); | |
1229 | bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, | |
1230 | align_mask, &area_off, &area_bits); | |
1231 | if (bit_off >= end) | |
1232 | return -1; | |
1233 | ||
1234 | if (area_bits) | |
1235 | pcpu_block_update_scan(chunk, area_off, area_bits); | |
1236 | ||
1237 | /* update alloc map */ | |
1238 | bitmap_set(chunk->alloc_map, bit_off, alloc_bits); | |
1239 | ||
1240 | /* update boundary map */ | |
1241 | set_bit(bit_off, chunk->bound_map); | |
1242 | bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); | |
1243 | set_bit(bit_off + alloc_bits, chunk->bound_map); | |
1244 | ||
1245 | chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; | |
1246 | ||
1247 | /* update first free bit */ | |
1248 | if (bit_off == chunk_md->first_free) | |
1249 | chunk_md->first_free = find_next_zero_bit( | |
1250 | chunk->alloc_map, | |
1251 | pcpu_chunk_map_bits(chunk), | |
1252 | bit_off + alloc_bits); | |
1253 | ||
1254 | pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); | |
1255 | ||
1256 | pcpu_chunk_relocate(chunk, oslot); | |
1257 | ||
1258 | return bit_off * PCPU_MIN_ALLOC_SIZE; | |
1259 | } | |
1260 | ||
1261 | /** | |
1262 | * pcpu_free_area - frees the corresponding offset | |
1263 | * @chunk: chunk of interest | |
1264 | * @off: addr offset into chunk | |
1265 | * | |
1266 | * This function determines the size of an allocation to free using | |
1267 | * the boundary bitmap and clears the allocation map. | |
1268 | * | |
1269 | * RETURNS: | |
1270 | * Number of freed bytes. | |
1271 | */ | |
1272 | static int pcpu_free_area(struct pcpu_chunk *chunk, int off) | |
1273 | { | |
1274 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; | |
1275 | int bit_off, bits, end, oslot, freed; | |
1276 | ||
1277 | lockdep_assert_held(&pcpu_lock); | |
1278 | pcpu_stats_area_dealloc(chunk); | |
1279 | ||
1280 | oslot = pcpu_chunk_slot(chunk); | |
1281 | ||
1282 | bit_off = off / PCPU_MIN_ALLOC_SIZE; | |
1283 | ||
1284 | /* find end index */ | |
1285 | end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), | |
1286 | bit_off + 1); | |
1287 | bits = end - bit_off; | |
1288 | bitmap_clear(chunk->alloc_map, bit_off, bits); | |
1289 | ||
1290 | freed = bits * PCPU_MIN_ALLOC_SIZE; | |
1291 | ||
1292 | /* update metadata */ | |
1293 | chunk->free_bytes += freed; | |
1294 | ||
1295 | /* update first free bit */ | |
1296 | chunk_md->first_free = min(chunk_md->first_free, bit_off); | |
1297 | ||
1298 | pcpu_block_update_hint_free(chunk, bit_off, bits); | |
1299 | ||
1300 | pcpu_chunk_relocate(chunk, oslot); | |
1301 | ||
1302 | return freed; | |
1303 | } | |
1304 | ||
1305 | static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) | |
1306 | { | |
1307 | block->scan_hint = 0; | |
1308 | block->contig_hint = nr_bits; | |
1309 | block->left_free = nr_bits; | |
1310 | block->right_free = nr_bits; | |
1311 | block->first_free = 0; | |
1312 | block->nr_bits = nr_bits; | |
1313 | } | |
1314 | ||
1315 | static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) | |
1316 | { | |
1317 | struct pcpu_block_md *md_block; | |
1318 | ||
1319 | /* init the chunk's block */ | |
1320 | pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); | |
1321 | ||
1322 | for (md_block = chunk->md_blocks; | |
1323 | md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); | |
1324 | md_block++) | |
1325 | pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); | |
1326 | } | |
1327 | ||
1328 | /** | |
1329 | * pcpu_alloc_first_chunk - creates chunks that serve the first chunk | |
1330 | * @tmp_addr: the start of the region served | |
1331 | * @map_size: size of the region served | |
1332 | * | |
1333 | * This is responsible for creating the chunks that serve the first chunk. The | |
1334 | * base_addr is page aligned down of @tmp_addr while the region end is page | |
1335 | * aligned up. Offsets are kept track of to determine the region served. All | |
1336 | * this is done to appease the bitmap allocator in avoiding partial blocks. | |
1337 | * | |
1338 | * RETURNS: | |
1339 | * Chunk serving the region at @tmp_addr of @map_size. | |
1340 | */ | |
1341 | static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, | |
1342 | int map_size) | |
1343 | { | |
1344 | struct pcpu_chunk *chunk; | |
1345 | unsigned long aligned_addr, lcm_align; | |
1346 | int start_offset, offset_bits, region_size, region_bits; | |
1347 | size_t alloc_size; | |
1348 | ||
1349 | /* region calculations */ | |
1350 | aligned_addr = tmp_addr & PAGE_MASK; | |
1351 | ||
1352 | start_offset = tmp_addr - aligned_addr; | |
1353 | ||
1354 | /* | |
1355 | * Align the end of the region with the LCM of PAGE_SIZE and | |
1356 | * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of | |
1357 | * the other. | |
1358 | */ | |
1359 | lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); | |
1360 | region_size = ALIGN(start_offset + map_size, lcm_align); | |
1361 | ||
1362 | /* allocate chunk */ | |
1363 | alloc_size = struct_size(chunk, populated, | |
1364 | BITS_TO_LONGS(region_size >> PAGE_SHIFT)); | |
1365 | chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
1366 | if (!chunk) | |
1367 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
1368 | alloc_size); | |
1369 | ||
1370 | INIT_LIST_HEAD(&chunk->list); | |
1371 | ||
1372 | chunk->base_addr = (void *)aligned_addr; | |
1373 | chunk->start_offset = start_offset; | |
1374 | chunk->end_offset = region_size - chunk->start_offset - map_size; | |
1375 | ||
1376 | chunk->nr_pages = region_size >> PAGE_SHIFT; | |
1377 | region_bits = pcpu_chunk_map_bits(chunk); | |
1378 | ||
1379 | alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); | |
1380 | chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
1381 | if (!chunk->alloc_map) | |
1382 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
1383 | alloc_size); | |
1384 | ||
1385 | alloc_size = | |
1386 | BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); | |
1387 | chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
1388 | if (!chunk->bound_map) | |
1389 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
1390 | alloc_size); | |
1391 | ||
1392 | alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); | |
1393 | chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
1394 | if (!chunk->md_blocks) | |
1395 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
1396 | alloc_size); | |
1397 | ||
1398 | #ifdef CONFIG_MEMCG_KMEM | |
1399 | /* first chunk is free to use */ | |
1400 | chunk->obj_cgroups = NULL; | |
1401 | #endif | |
1402 | pcpu_init_md_blocks(chunk); | |
1403 | ||
1404 | /* manage populated page bitmap */ | |
1405 | chunk->immutable = true; | |
1406 | bitmap_fill(chunk->populated, chunk->nr_pages); | |
1407 | chunk->nr_populated = chunk->nr_pages; | |
1408 | chunk->nr_empty_pop_pages = chunk->nr_pages; | |
1409 | ||
1410 | chunk->free_bytes = map_size; | |
1411 | ||
1412 | if (chunk->start_offset) { | |
1413 | /* hide the beginning of the bitmap */ | |
1414 | offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; | |
1415 | bitmap_set(chunk->alloc_map, 0, offset_bits); | |
1416 | set_bit(0, chunk->bound_map); | |
1417 | set_bit(offset_bits, chunk->bound_map); | |
1418 | ||
1419 | chunk->chunk_md.first_free = offset_bits; | |
1420 | ||
1421 | pcpu_block_update_hint_alloc(chunk, 0, offset_bits); | |
1422 | } | |
1423 | ||
1424 | if (chunk->end_offset) { | |
1425 | /* hide the end of the bitmap */ | |
1426 | offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; | |
1427 | bitmap_set(chunk->alloc_map, | |
1428 | pcpu_chunk_map_bits(chunk) - offset_bits, | |
1429 | offset_bits); | |
1430 | set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, | |
1431 | chunk->bound_map); | |
1432 | set_bit(region_bits, chunk->bound_map); | |
1433 | ||
1434 | pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) | |
1435 | - offset_bits, offset_bits); | |
1436 | } | |
1437 | ||
1438 | return chunk; | |
1439 | } | |
1440 | ||
1441 | static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) | |
1442 | { | |
1443 | struct pcpu_chunk *chunk; | |
1444 | int region_bits; | |
1445 | ||
1446 | chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); | |
1447 | if (!chunk) | |
1448 | return NULL; | |
1449 | ||
1450 | INIT_LIST_HEAD(&chunk->list); | |
1451 | chunk->nr_pages = pcpu_unit_pages; | |
1452 | region_bits = pcpu_chunk_map_bits(chunk); | |
1453 | ||
1454 | chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * | |
1455 | sizeof(chunk->alloc_map[0]), gfp); | |
1456 | if (!chunk->alloc_map) | |
1457 | goto alloc_map_fail; | |
1458 | ||
1459 | chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * | |
1460 | sizeof(chunk->bound_map[0]), gfp); | |
1461 | if (!chunk->bound_map) | |
1462 | goto bound_map_fail; | |
1463 | ||
1464 | chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * | |
1465 | sizeof(chunk->md_blocks[0]), gfp); | |
1466 | if (!chunk->md_blocks) | |
1467 | goto md_blocks_fail; | |
1468 | ||
1469 | #ifdef CONFIG_MEMCG_KMEM | |
1470 | if (!mem_cgroup_kmem_disabled()) { | |
1471 | chunk->obj_cgroups = | |
1472 | pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * | |
1473 | sizeof(struct obj_cgroup *), gfp); | |
1474 | if (!chunk->obj_cgroups) | |
1475 | goto objcg_fail; | |
1476 | } | |
1477 | #endif | |
1478 | ||
1479 | pcpu_init_md_blocks(chunk); | |
1480 | ||
1481 | /* init metadata */ | |
1482 | chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; | |
1483 | ||
1484 | return chunk; | |
1485 | ||
1486 | #ifdef CONFIG_MEMCG_KMEM | |
1487 | objcg_fail: | |
1488 | pcpu_mem_free(chunk->md_blocks); | |
1489 | #endif | |
1490 | md_blocks_fail: | |
1491 | pcpu_mem_free(chunk->bound_map); | |
1492 | bound_map_fail: | |
1493 | pcpu_mem_free(chunk->alloc_map); | |
1494 | alloc_map_fail: | |
1495 | pcpu_mem_free(chunk); | |
1496 | ||
1497 | return NULL; | |
1498 | } | |
1499 | ||
1500 | static void pcpu_free_chunk(struct pcpu_chunk *chunk) | |
1501 | { | |
1502 | if (!chunk) | |
1503 | return; | |
1504 | #ifdef CONFIG_MEMCG_KMEM | |
1505 | pcpu_mem_free(chunk->obj_cgroups); | |
1506 | #endif | |
1507 | pcpu_mem_free(chunk->md_blocks); | |
1508 | pcpu_mem_free(chunk->bound_map); | |
1509 | pcpu_mem_free(chunk->alloc_map); | |
1510 | pcpu_mem_free(chunk); | |
1511 | } | |
1512 | ||
1513 | /** | |
1514 | * pcpu_chunk_populated - post-population bookkeeping | |
1515 | * @chunk: pcpu_chunk which got populated | |
1516 | * @page_start: the start page | |
1517 | * @page_end: the end page | |
1518 | * | |
1519 | * Pages in [@page_start,@page_end) have been populated to @chunk. Update | |
1520 | * the bookkeeping information accordingly. Must be called after each | |
1521 | * successful population. | |
1522 | */ | |
1523 | static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, | |
1524 | int page_end) | |
1525 | { | |
1526 | int nr = page_end - page_start; | |
1527 | ||
1528 | lockdep_assert_held(&pcpu_lock); | |
1529 | ||
1530 | bitmap_set(chunk->populated, page_start, nr); | |
1531 | chunk->nr_populated += nr; | |
1532 | pcpu_nr_populated += nr; | |
1533 | ||
1534 | pcpu_update_empty_pages(chunk, nr); | |
1535 | } | |
1536 | ||
1537 | /** | |
1538 | * pcpu_chunk_depopulated - post-depopulation bookkeeping | |
1539 | * @chunk: pcpu_chunk which got depopulated | |
1540 | * @page_start: the start page | |
1541 | * @page_end: the end page | |
1542 | * | |
1543 | * Pages in [@page_start,@page_end) have been depopulated from @chunk. | |
1544 | * Update the bookkeeping information accordingly. Must be called after | |
1545 | * each successful depopulation. | |
1546 | */ | |
1547 | static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, | |
1548 | int page_start, int page_end) | |
1549 | { | |
1550 | int nr = page_end - page_start; | |
1551 | ||
1552 | lockdep_assert_held(&pcpu_lock); | |
1553 | ||
1554 | bitmap_clear(chunk->populated, page_start, nr); | |
1555 | chunk->nr_populated -= nr; | |
1556 | pcpu_nr_populated -= nr; | |
1557 | ||
1558 | pcpu_update_empty_pages(chunk, -nr); | |
1559 | } | |
1560 | ||
1561 | /* | |
1562 | * Chunk management implementation. | |
1563 | * | |
1564 | * To allow different implementations, chunk alloc/free and | |
1565 | * [de]population are implemented in a separate file which is pulled | |
1566 | * into this file and compiled together. The following functions | |
1567 | * should be implemented. | |
1568 | * | |
1569 | * pcpu_populate_chunk - populate the specified range of a chunk | |
1570 | * pcpu_depopulate_chunk - depopulate the specified range of a chunk | |
1571 | * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk | |
1572 | * pcpu_create_chunk - create a new chunk | |
1573 | * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop | |
1574 | * pcpu_addr_to_page - translate address to physical address | |
1575 | * pcpu_verify_alloc_info - check alloc_info is acceptable during init | |
1576 | */ | |
1577 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, | |
1578 | int page_start, int page_end, gfp_t gfp); | |
1579 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, | |
1580 | int page_start, int page_end); | |
1581 | static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, | |
1582 | int page_start, int page_end); | |
1583 | static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp); | |
1584 | static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); | |
1585 | static struct page *pcpu_addr_to_page(void *addr); | |
1586 | static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); | |
1587 | ||
1588 | #ifdef CONFIG_NEED_PER_CPU_KM | |
1589 | #include "percpu-km.c" | |
1590 | #else | |
1591 | #include "percpu-vm.c" | |
1592 | #endif | |
1593 | ||
1594 | /** | |
1595 | * pcpu_chunk_addr_search - determine chunk containing specified address | |
1596 | * @addr: address for which the chunk needs to be determined. | |
1597 | * | |
1598 | * This is an internal function that handles all but static allocations. | |
1599 | * Static percpu address values should never be passed into the allocator. | |
1600 | * | |
1601 | * RETURNS: | |
1602 | * The address of the found chunk. | |
1603 | */ | |
1604 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |
1605 | { | |
1606 | /* is it in the dynamic region (first chunk)? */ | |
1607 | if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) | |
1608 | return pcpu_first_chunk; | |
1609 | ||
1610 | /* is it in the reserved region? */ | |
1611 | if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) | |
1612 | return pcpu_reserved_chunk; | |
1613 | ||
1614 | /* | |
1615 | * The address is relative to unit0 which might be unused and | |
1616 | * thus unmapped. Offset the address to the unit space of the | |
1617 | * current processor before looking it up in the vmalloc | |
1618 | * space. Note that any possible cpu id can be used here, so | |
1619 | * there's no need to worry about preemption or cpu hotplug. | |
1620 | */ | |
1621 | addr += pcpu_unit_offsets[raw_smp_processor_id()]; | |
1622 | return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); | |
1623 | } | |
1624 | ||
1625 | #ifdef CONFIG_MEMCG_KMEM | |
1626 | static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, | |
1627 | struct obj_cgroup **objcgp) | |
1628 | { | |
1629 | struct obj_cgroup *objcg; | |
1630 | ||
1631 | if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT)) | |
1632 | return true; | |
1633 | ||
1634 | objcg = get_obj_cgroup_from_current(); | |
1635 | if (!objcg) | |
1636 | return true; | |
1637 | ||
1638 | if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) { | |
1639 | obj_cgroup_put(objcg); | |
1640 | return false; | |
1641 | } | |
1642 | ||
1643 | *objcgp = objcg; | |
1644 | return true; | |
1645 | } | |
1646 | ||
1647 | static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, | |
1648 | struct pcpu_chunk *chunk, int off, | |
1649 | size_t size) | |
1650 | { | |
1651 | if (!objcg) | |
1652 | return; | |
1653 | ||
1654 | if (likely(chunk && chunk->obj_cgroups)) { | |
1655 | chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; | |
1656 | ||
1657 | rcu_read_lock(); | |
1658 | mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, | |
1659 | size * num_possible_cpus()); | |
1660 | rcu_read_unlock(); | |
1661 | } else { | |
1662 | obj_cgroup_uncharge(objcg, size * num_possible_cpus()); | |
1663 | obj_cgroup_put(objcg); | |
1664 | } | |
1665 | } | |
1666 | ||
1667 | static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) | |
1668 | { | |
1669 | struct obj_cgroup *objcg; | |
1670 | ||
1671 | if (unlikely(!chunk->obj_cgroups)) | |
1672 | return; | |
1673 | ||
1674 | objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; | |
1675 | if (!objcg) | |
1676 | return; | |
1677 | chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; | |
1678 | ||
1679 | obj_cgroup_uncharge(objcg, size * num_possible_cpus()); | |
1680 | ||
1681 | rcu_read_lock(); | |
1682 | mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, | |
1683 | -(size * num_possible_cpus())); | |
1684 | rcu_read_unlock(); | |
1685 | ||
1686 | obj_cgroup_put(objcg); | |
1687 | } | |
1688 | ||
1689 | #else /* CONFIG_MEMCG_KMEM */ | |
1690 | static bool | |
1691 | pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) | |
1692 | { | |
1693 | return true; | |
1694 | } | |
1695 | ||
1696 | static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, | |
1697 | struct pcpu_chunk *chunk, int off, | |
1698 | size_t size) | |
1699 | { | |
1700 | } | |
1701 | ||
1702 | static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) | |
1703 | { | |
1704 | } | |
1705 | #endif /* CONFIG_MEMCG_KMEM */ | |
1706 | ||
1707 | /** | |
1708 | * pcpu_alloc - the percpu allocator | |
1709 | * @size: size of area to allocate in bytes | |
1710 | * @align: alignment of area (max PAGE_SIZE) | |
1711 | * @reserved: allocate from the reserved chunk if available | |
1712 | * @gfp: allocation flags | |
1713 | * | |
1714 | * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't | |
1715 | * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN | |
1716 | * then no warning will be triggered on invalid or failed allocation | |
1717 | * requests. | |
1718 | * | |
1719 | * RETURNS: | |
1720 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1721 | */ | |
1722 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, | |
1723 | gfp_t gfp) | |
1724 | { | |
1725 | gfp_t pcpu_gfp; | |
1726 | bool is_atomic; | |
1727 | bool do_warn; | |
1728 | struct obj_cgroup *objcg = NULL; | |
1729 | static int warn_limit = 10; | |
1730 | struct pcpu_chunk *chunk, *next; | |
1731 | const char *err; | |
1732 | int slot, off, cpu, ret; | |
1733 | unsigned long flags; | |
1734 | void __percpu *ptr; | |
1735 | size_t bits, bit_align; | |
1736 | ||
1737 | gfp = current_gfp_context(gfp); | |
1738 | /* whitelisted flags that can be passed to the backing allocators */ | |
1739 | pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); | |
1740 | is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; | |
1741 | do_warn = !(gfp & __GFP_NOWARN); | |
1742 | ||
1743 | /* | |
1744 | * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, | |
1745 | * therefore alignment must be a minimum of that many bytes. | |
1746 | * An allocation may have internal fragmentation from rounding up | |
1747 | * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. | |
1748 | */ | |
1749 | if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) | |
1750 | align = PCPU_MIN_ALLOC_SIZE; | |
1751 | ||
1752 | size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); | |
1753 | bits = size >> PCPU_MIN_ALLOC_SHIFT; | |
1754 | bit_align = align >> PCPU_MIN_ALLOC_SHIFT; | |
1755 | ||
1756 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || | |
1757 | !is_power_of_2(align))) { | |
1758 | WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", | |
1759 | size, align); | |
1760 | return NULL; | |
1761 | } | |
1762 | ||
1763 | if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg))) | |
1764 | return NULL; | |
1765 | ||
1766 | if (!is_atomic) { | |
1767 | /* | |
1768 | * pcpu_balance_workfn() allocates memory under this mutex, | |
1769 | * and it may wait for memory reclaim. Allow current task | |
1770 | * to become OOM victim, in case of memory pressure. | |
1771 | */ | |
1772 | if (gfp & __GFP_NOFAIL) { | |
1773 | mutex_lock(&pcpu_alloc_mutex); | |
1774 | } else if (mutex_lock_killable(&pcpu_alloc_mutex)) { | |
1775 | pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); | |
1776 | return NULL; | |
1777 | } | |
1778 | } | |
1779 | ||
1780 | spin_lock_irqsave(&pcpu_lock, flags); | |
1781 | ||
1782 | /* serve reserved allocations from the reserved chunk if available */ | |
1783 | if (reserved && pcpu_reserved_chunk) { | |
1784 | chunk = pcpu_reserved_chunk; | |
1785 | ||
1786 | off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); | |
1787 | if (off < 0) { | |
1788 | err = "alloc from reserved chunk failed"; | |
1789 | goto fail_unlock; | |
1790 | } | |
1791 | ||
1792 | off = pcpu_alloc_area(chunk, bits, bit_align, off); | |
1793 | if (off >= 0) | |
1794 | goto area_found; | |
1795 | ||
1796 | err = "alloc from reserved chunk failed"; | |
1797 | goto fail_unlock; | |
1798 | } | |
1799 | ||
1800 | restart: | |
1801 | /* search through normal chunks */ | |
1802 | for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) { | |
1803 | list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot], | |
1804 | list) { | |
1805 | off = pcpu_find_block_fit(chunk, bits, bit_align, | |
1806 | is_atomic); | |
1807 | if (off < 0) { | |
1808 | if (slot < PCPU_SLOT_FAIL_THRESHOLD) | |
1809 | pcpu_chunk_move(chunk, 0); | |
1810 | continue; | |
1811 | } | |
1812 | ||
1813 | off = pcpu_alloc_area(chunk, bits, bit_align, off); | |
1814 | if (off >= 0) { | |
1815 | pcpu_reintegrate_chunk(chunk); | |
1816 | goto area_found; | |
1817 | } | |
1818 | } | |
1819 | } | |
1820 | ||
1821 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
1822 | ||
1823 | /* | |
1824 | * No space left. Create a new chunk. We don't want multiple | |
1825 | * tasks to create chunks simultaneously. Serialize and create iff | |
1826 | * there's still no empty chunk after grabbing the mutex. | |
1827 | */ | |
1828 | if (is_atomic) { | |
1829 | err = "atomic alloc failed, no space left"; | |
1830 | goto fail; | |
1831 | } | |
1832 | ||
1833 | if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) { | |
1834 | chunk = pcpu_create_chunk(pcpu_gfp); | |
1835 | if (!chunk) { | |
1836 | err = "failed to allocate new chunk"; | |
1837 | goto fail; | |
1838 | } | |
1839 | ||
1840 | spin_lock_irqsave(&pcpu_lock, flags); | |
1841 | pcpu_chunk_relocate(chunk, -1); | |
1842 | } else { | |
1843 | spin_lock_irqsave(&pcpu_lock, flags); | |
1844 | } | |
1845 | ||
1846 | goto restart; | |
1847 | ||
1848 | area_found: | |
1849 | pcpu_stats_area_alloc(chunk, size); | |
1850 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
1851 | ||
1852 | /* populate if not all pages are already there */ | |
1853 | if (!is_atomic) { | |
1854 | unsigned int page_start, page_end, rs, re; | |
1855 | ||
1856 | page_start = PFN_DOWN(off); | |
1857 | page_end = PFN_UP(off + size); | |
1858 | ||
1859 | bitmap_for_each_clear_region(chunk->populated, rs, re, | |
1860 | page_start, page_end) { | |
1861 | WARN_ON(chunk->immutable); | |
1862 | ||
1863 | ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); | |
1864 | ||
1865 | spin_lock_irqsave(&pcpu_lock, flags); | |
1866 | if (ret) { | |
1867 | pcpu_free_area(chunk, off); | |
1868 | err = "failed to populate"; | |
1869 | goto fail_unlock; | |
1870 | } | |
1871 | pcpu_chunk_populated(chunk, rs, re); | |
1872 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
1873 | } | |
1874 | ||
1875 | mutex_unlock(&pcpu_alloc_mutex); | |
1876 | } | |
1877 | ||
1878 | if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) | |
1879 | pcpu_schedule_balance_work(); | |
1880 | ||
1881 | /* clear the areas and return address relative to base address */ | |
1882 | for_each_possible_cpu(cpu) | |
1883 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); | |
1884 | ||
1885 | ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); | |
1886 | kmemleak_alloc_percpu(ptr, size, gfp); | |
1887 | ||
1888 | trace_percpu_alloc_percpu(reserved, is_atomic, size, align, | |
1889 | chunk->base_addr, off, ptr); | |
1890 | ||
1891 | pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); | |
1892 | ||
1893 | return ptr; | |
1894 | ||
1895 | fail_unlock: | |
1896 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
1897 | fail: | |
1898 | trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); | |
1899 | ||
1900 | if (!is_atomic && do_warn && warn_limit) { | |
1901 | pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", | |
1902 | size, align, is_atomic, err); | |
1903 | dump_stack(); | |
1904 | if (!--warn_limit) | |
1905 | pr_info("limit reached, disable warning\n"); | |
1906 | } | |
1907 | if (is_atomic) { | |
1908 | /* see the flag handling in pcpu_balance_workfn() */ | |
1909 | pcpu_atomic_alloc_failed = true; | |
1910 | pcpu_schedule_balance_work(); | |
1911 | } else { | |
1912 | mutex_unlock(&pcpu_alloc_mutex); | |
1913 | } | |
1914 | ||
1915 | pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); | |
1916 | ||
1917 | return NULL; | |
1918 | } | |
1919 | ||
1920 | /** | |
1921 | * __alloc_percpu_gfp - allocate dynamic percpu area | |
1922 | * @size: size of area to allocate in bytes | |
1923 | * @align: alignment of area (max PAGE_SIZE) | |
1924 | * @gfp: allocation flags | |
1925 | * | |
1926 | * Allocate zero-filled percpu area of @size bytes aligned at @align. If | |
1927 | * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can | |
1928 | * be called from any context but is a lot more likely to fail. If @gfp | |
1929 | * has __GFP_NOWARN then no warning will be triggered on invalid or failed | |
1930 | * allocation requests. | |
1931 | * | |
1932 | * RETURNS: | |
1933 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1934 | */ | |
1935 | void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) | |
1936 | { | |
1937 | return pcpu_alloc(size, align, false, gfp); | |
1938 | } | |
1939 | EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); | |
1940 | ||
1941 | /** | |
1942 | * __alloc_percpu - allocate dynamic percpu area | |
1943 | * @size: size of area to allocate in bytes | |
1944 | * @align: alignment of area (max PAGE_SIZE) | |
1945 | * | |
1946 | * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). | |
1947 | */ | |
1948 | void __percpu *__alloc_percpu(size_t size, size_t align) | |
1949 | { | |
1950 | return pcpu_alloc(size, align, false, GFP_KERNEL); | |
1951 | } | |
1952 | EXPORT_SYMBOL_GPL(__alloc_percpu); | |
1953 | ||
1954 | /** | |
1955 | * __alloc_reserved_percpu - allocate reserved percpu area | |
1956 | * @size: size of area to allocate in bytes | |
1957 | * @align: alignment of area (max PAGE_SIZE) | |
1958 | * | |
1959 | * Allocate zero-filled percpu area of @size bytes aligned at @align | |
1960 | * from reserved percpu area if arch has set it up; otherwise, | |
1961 | * allocation is served from the same dynamic area. Might sleep. | |
1962 | * Might trigger writeouts. | |
1963 | * | |
1964 | * CONTEXT: | |
1965 | * Does GFP_KERNEL allocation. | |
1966 | * | |
1967 | * RETURNS: | |
1968 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1969 | */ | |
1970 | void __percpu *__alloc_reserved_percpu(size_t size, size_t align) | |
1971 | { | |
1972 | return pcpu_alloc(size, align, true, GFP_KERNEL); | |
1973 | } | |
1974 | ||
1975 | /** | |
1976 | * pcpu_balance_free - manage the amount of free chunks | |
1977 | * @empty_only: free chunks only if there are no populated pages | |
1978 | * | |
1979 | * If empty_only is %false, reclaim all fully free chunks regardless of the | |
1980 | * number of populated pages. Otherwise, only reclaim chunks that have no | |
1981 | * populated pages. | |
1982 | * | |
1983 | * CONTEXT: | |
1984 | * pcpu_lock (can be dropped temporarily) | |
1985 | */ | |
1986 | static void pcpu_balance_free(bool empty_only) | |
1987 | { | |
1988 | LIST_HEAD(to_free); | |
1989 | struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot]; | |
1990 | struct pcpu_chunk *chunk, *next; | |
1991 | ||
1992 | lockdep_assert_held(&pcpu_lock); | |
1993 | ||
1994 | /* | |
1995 | * There's no reason to keep around multiple unused chunks and VM | |
1996 | * areas can be scarce. Destroy all free chunks except for one. | |
1997 | */ | |
1998 | list_for_each_entry_safe(chunk, next, free_head, list) { | |
1999 | WARN_ON(chunk->immutable); | |
2000 | ||
2001 | /* spare the first one */ | |
2002 | if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) | |
2003 | continue; | |
2004 | ||
2005 | if (!empty_only || chunk->nr_empty_pop_pages == 0) | |
2006 | list_move(&chunk->list, &to_free); | |
2007 | } | |
2008 | ||
2009 | if (list_empty(&to_free)) | |
2010 | return; | |
2011 | ||
2012 | spin_unlock_irq(&pcpu_lock); | |
2013 | list_for_each_entry_safe(chunk, next, &to_free, list) { | |
2014 | unsigned int rs, re; | |
2015 | ||
2016 | bitmap_for_each_set_region(chunk->populated, rs, re, 0, | |
2017 | chunk->nr_pages) { | |
2018 | pcpu_depopulate_chunk(chunk, rs, re); | |
2019 | spin_lock_irq(&pcpu_lock); | |
2020 | pcpu_chunk_depopulated(chunk, rs, re); | |
2021 | spin_unlock_irq(&pcpu_lock); | |
2022 | } | |
2023 | pcpu_destroy_chunk(chunk); | |
2024 | cond_resched(); | |
2025 | } | |
2026 | spin_lock_irq(&pcpu_lock); | |
2027 | } | |
2028 | ||
2029 | /** | |
2030 | * pcpu_balance_populated - manage the amount of populated pages | |
2031 | * | |
2032 | * Maintain a certain amount of populated pages to satisfy atomic allocations. | |
2033 | * It is possible that this is called when physical memory is scarce causing | |
2034 | * OOM killer to be triggered. We should avoid doing so until an actual | |
2035 | * allocation causes the failure as it is possible that requests can be | |
2036 | * serviced from already backed regions. | |
2037 | * | |
2038 | * CONTEXT: | |
2039 | * pcpu_lock (can be dropped temporarily) | |
2040 | */ | |
2041 | static void pcpu_balance_populated(void) | |
2042 | { | |
2043 | /* gfp flags passed to underlying allocators */ | |
2044 | const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; | |
2045 | struct pcpu_chunk *chunk; | |
2046 | int slot, nr_to_pop, ret; | |
2047 | ||
2048 | lockdep_assert_held(&pcpu_lock); | |
2049 | ||
2050 | /* | |
2051 | * Ensure there are certain number of free populated pages for | |
2052 | * atomic allocs. Fill up from the most packed so that atomic | |
2053 | * allocs don't increase fragmentation. If atomic allocation | |
2054 | * failed previously, always populate the maximum amount. This | |
2055 | * should prevent atomic allocs larger than PAGE_SIZE from keeping | |
2056 | * failing indefinitely; however, large atomic allocs are not | |
2057 | * something we support properly and can be highly unreliable and | |
2058 | * inefficient. | |
2059 | */ | |
2060 | retry_pop: | |
2061 | if (pcpu_atomic_alloc_failed) { | |
2062 | nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; | |
2063 | /* best effort anyway, don't worry about synchronization */ | |
2064 | pcpu_atomic_alloc_failed = false; | |
2065 | } else { | |
2066 | nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - | |
2067 | pcpu_nr_empty_pop_pages, | |
2068 | 0, PCPU_EMPTY_POP_PAGES_HIGH); | |
2069 | } | |
2070 | ||
2071 | for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) { | |
2072 | unsigned int nr_unpop = 0, rs, re; | |
2073 | ||
2074 | if (!nr_to_pop) | |
2075 | break; | |
2076 | ||
2077 | list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) { | |
2078 | nr_unpop = chunk->nr_pages - chunk->nr_populated; | |
2079 | if (nr_unpop) | |
2080 | break; | |
2081 | } | |
2082 | ||
2083 | if (!nr_unpop) | |
2084 | continue; | |
2085 | ||
2086 | /* @chunk can't go away while pcpu_alloc_mutex is held */ | |
2087 | bitmap_for_each_clear_region(chunk->populated, rs, re, 0, | |
2088 | chunk->nr_pages) { | |
2089 | int nr = min_t(int, re - rs, nr_to_pop); | |
2090 | ||
2091 | spin_unlock_irq(&pcpu_lock); | |
2092 | ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); | |
2093 | cond_resched(); | |
2094 | spin_lock_irq(&pcpu_lock); | |
2095 | if (!ret) { | |
2096 | nr_to_pop -= nr; | |
2097 | pcpu_chunk_populated(chunk, rs, rs + nr); | |
2098 | } else { | |
2099 | nr_to_pop = 0; | |
2100 | } | |
2101 | ||
2102 | if (!nr_to_pop) | |
2103 | break; | |
2104 | } | |
2105 | } | |
2106 | ||
2107 | if (nr_to_pop) { | |
2108 | /* ran out of chunks to populate, create a new one and retry */ | |
2109 | spin_unlock_irq(&pcpu_lock); | |
2110 | chunk = pcpu_create_chunk(gfp); | |
2111 | cond_resched(); | |
2112 | spin_lock_irq(&pcpu_lock); | |
2113 | if (chunk) { | |
2114 | pcpu_chunk_relocate(chunk, -1); | |
2115 | goto retry_pop; | |
2116 | } | |
2117 | } | |
2118 | } | |
2119 | ||
2120 | /** | |
2121 | * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages | |
2122 | * | |
2123 | * Scan over chunks in the depopulate list and try to release unused populated | |
2124 | * pages back to the system. Depopulated chunks are sidelined to prevent | |
2125 | * repopulating these pages unless required. Fully free chunks are reintegrated | |
2126 | * and freed accordingly (1 is kept around). If we drop below the empty | |
2127 | * populated pages threshold, reintegrate the chunk if it has empty free pages. | |
2128 | * Each chunk is scanned in the reverse order to keep populated pages close to | |
2129 | * the beginning of the chunk. | |
2130 | * | |
2131 | * CONTEXT: | |
2132 | * pcpu_lock (can be dropped temporarily) | |
2133 | * | |
2134 | */ | |
2135 | static void pcpu_reclaim_populated(void) | |
2136 | { | |
2137 | struct pcpu_chunk *chunk; | |
2138 | struct pcpu_block_md *block; | |
2139 | int freed_page_start, freed_page_end; | |
2140 | int i, end; | |
2141 | bool reintegrate; | |
2142 | ||
2143 | lockdep_assert_held(&pcpu_lock); | |
2144 | ||
2145 | /* | |
2146 | * Once a chunk is isolated to the to_depopulate list, the chunk is no | |
2147 | * longer discoverable to allocations whom may populate pages. The only | |
2148 | * other accessor is the free path which only returns area back to the | |
2149 | * allocator not touching the populated bitmap. | |
2150 | */ | |
2151 | while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) { | |
2152 | chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot], | |
2153 | struct pcpu_chunk, list); | |
2154 | WARN_ON(chunk->immutable); | |
2155 | ||
2156 | /* | |
2157 | * Scan chunk's pages in the reverse order to keep populated | |
2158 | * pages close to the beginning of the chunk. | |
2159 | */ | |
2160 | freed_page_start = chunk->nr_pages; | |
2161 | freed_page_end = 0; | |
2162 | reintegrate = false; | |
2163 | for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) { | |
2164 | /* no more work to do */ | |
2165 | if (chunk->nr_empty_pop_pages == 0) | |
2166 | break; | |
2167 | ||
2168 | /* reintegrate chunk to prevent atomic alloc failures */ | |
2169 | if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) { | |
2170 | reintegrate = true; | |
2171 | goto end_chunk; | |
2172 | } | |
2173 | ||
2174 | /* | |
2175 | * If the page is empty and populated, start or | |
2176 | * extend the (i, end) range. If i == 0, decrease | |
2177 | * i and perform the depopulation to cover the last | |
2178 | * (first) page in the chunk. | |
2179 | */ | |
2180 | block = chunk->md_blocks + i; | |
2181 | if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS && | |
2182 | test_bit(i, chunk->populated)) { | |
2183 | if (end == -1) | |
2184 | end = i; | |
2185 | if (i > 0) | |
2186 | continue; | |
2187 | i--; | |
2188 | } | |
2189 | ||
2190 | /* depopulate if there is an active range */ | |
2191 | if (end == -1) | |
2192 | continue; | |
2193 | ||
2194 | spin_unlock_irq(&pcpu_lock); | |
2195 | pcpu_depopulate_chunk(chunk, i + 1, end + 1); | |
2196 | cond_resched(); | |
2197 | spin_lock_irq(&pcpu_lock); | |
2198 | ||
2199 | pcpu_chunk_depopulated(chunk, i + 1, end + 1); | |
2200 | freed_page_start = min(freed_page_start, i + 1); | |
2201 | freed_page_end = max(freed_page_end, end + 1); | |
2202 | ||
2203 | /* reset the range and continue */ | |
2204 | end = -1; | |
2205 | } | |
2206 | ||
2207 | end_chunk: | |
2208 | /* batch tlb flush per chunk to amortize cost */ | |
2209 | if (freed_page_start < freed_page_end) { | |
2210 | spin_unlock_irq(&pcpu_lock); | |
2211 | pcpu_post_unmap_tlb_flush(chunk, | |
2212 | freed_page_start, | |
2213 | freed_page_end); | |
2214 | cond_resched(); | |
2215 | spin_lock_irq(&pcpu_lock); | |
2216 | } | |
2217 | ||
2218 | if (reintegrate || chunk->free_bytes == pcpu_unit_size) | |
2219 | pcpu_reintegrate_chunk(chunk); | |
2220 | else | |
2221 | list_move_tail(&chunk->list, | |
2222 | &pcpu_chunk_lists[pcpu_sidelined_slot]); | |
2223 | } | |
2224 | } | |
2225 | ||
2226 | /** | |
2227 | * pcpu_balance_workfn - manage the amount of free chunks and populated pages | |
2228 | * @work: unused | |
2229 | * | |
2230 | * For each chunk type, manage the number of fully free chunks and the number of | |
2231 | * populated pages. An important thing to consider is when pages are freed and | |
2232 | * how they contribute to the global counts. | |
2233 | */ | |
2234 | static void pcpu_balance_workfn(struct work_struct *work) | |
2235 | { | |
2236 | /* | |
2237 | * pcpu_balance_free() is called twice because the first time we may | |
2238 | * trim pages in the active pcpu_nr_empty_pop_pages which may cause us | |
2239 | * to grow other chunks. This then gives pcpu_reclaim_populated() time | |
2240 | * to move fully free chunks to the active list to be freed if | |
2241 | * appropriate. | |
2242 | */ | |
2243 | mutex_lock(&pcpu_alloc_mutex); | |
2244 | spin_lock_irq(&pcpu_lock); | |
2245 | ||
2246 | pcpu_balance_free(false); | |
2247 | pcpu_reclaim_populated(); | |
2248 | pcpu_balance_populated(); | |
2249 | pcpu_balance_free(true); | |
2250 | ||
2251 | spin_unlock_irq(&pcpu_lock); | |
2252 | mutex_unlock(&pcpu_alloc_mutex); | |
2253 | } | |
2254 | ||
2255 | /** | |
2256 | * free_percpu - free percpu area | |
2257 | * @ptr: pointer to area to free | |
2258 | * | |
2259 | * Free percpu area @ptr. | |
2260 | * | |
2261 | * CONTEXT: | |
2262 | * Can be called from atomic context. | |
2263 | */ | |
2264 | void free_percpu(void __percpu *ptr) | |
2265 | { | |
2266 | void *addr; | |
2267 | struct pcpu_chunk *chunk; | |
2268 | unsigned long flags; | |
2269 | int size, off; | |
2270 | bool need_balance = false; | |
2271 | ||
2272 | if (!ptr) | |
2273 | return; | |
2274 | ||
2275 | kmemleak_free_percpu(ptr); | |
2276 | ||
2277 | addr = __pcpu_ptr_to_addr(ptr); | |
2278 | ||
2279 | spin_lock_irqsave(&pcpu_lock, flags); | |
2280 | ||
2281 | chunk = pcpu_chunk_addr_search(addr); | |
2282 | off = addr - chunk->base_addr; | |
2283 | ||
2284 | size = pcpu_free_area(chunk, off); | |
2285 | ||
2286 | pcpu_memcg_free_hook(chunk, off, size); | |
2287 | ||
2288 | /* | |
2289 | * If there are more than one fully free chunks, wake up grim reaper. | |
2290 | * If the chunk is isolated, it may be in the process of being | |
2291 | * reclaimed. Let reclaim manage cleaning up of that chunk. | |
2292 | */ | |
2293 | if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) { | |
2294 | struct pcpu_chunk *pos; | |
2295 | ||
2296 | list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list) | |
2297 | if (pos != chunk) { | |
2298 | need_balance = true; | |
2299 | break; | |
2300 | } | |
2301 | } else if (pcpu_should_reclaim_chunk(chunk)) { | |
2302 | pcpu_isolate_chunk(chunk); | |
2303 | need_balance = true; | |
2304 | } | |
2305 | ||
2306 | trace_percpu_free_percpu(chunk->base_addr, off, ptr); | |
2307 | ||
2308 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
2309 | ||
2310 | if (need_balance) | |
2311 | pcpu_schedule_balance_work(); | |
2312 | } | |
2313 | EXPORT_SYMBOL_GPL(free_percpu); | |
2314 | ||
2315 | bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) | |
2316 | { | |
2317 | #ifdef CONFIG_SMP | |
2318 | const size_t static_size = __per_cpu_end - __per_cpu_start; | |
2319 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); | |
2320 | unsigned int cpu; | |
2321 | ||
2322 | for_each_possible_cpu(cpu) { | |
2323 | void *start = per_cpu_ptr(base, cpu); | |
2324 | void *va = (void *)addr; | |
2325 | ||
2326 | if (va >= start && va < start + static_size) { | |
2327 | if (can_addr) { | |
2328 | *can_addr = (unsigned long) (va - start); | |
2329 | *can_addr += (unsigned long) | |
2330 | per_cpu_ptr(base, get_boot_cpu_id()); | |
2331 | } | |
2332 | return true; | |
2333 | } | |
2334 | } | |
2335 | #endif | |
2336 | /* on UP, can't distinguish from other static vars, always false */ | |
2337 | return false; | |
2338 | } | |
2339 | ||
2340 | /** | |
2341 | * is_kernel_percpu_address - test whether address is from static percpu area | |
2342 | * @addr: address to test | |
2343 | * | |
2344 | * Test whether @addr belongs to in-kernel static percpu area. Module | |
2345 | * static percpu areas are not considered. For those, use | |
2346 | * is_module_percpu_address(). | |
2347 | * | |
2348 | * RETURNS: | |
2349 | * %true if @addr is from in-kernel static percpu area, %false otherwise. | |
2350 | */ | |
2351 | bool is_kernel_percpu_address(unsigned long addr) | |
2352 | { | |
2353 | return __is_kernel_percpu_address(addr, NULL); | |
2354 | } | |
2355 | ||
2356 | /** | |
2357 | * per_cpu_ptr_to_phys - convert translated percpu address to physical address | |
2358 | * @addr: the address to be converted to physical address | |
2359 | * | |
2360 | * Given @addr which is dereferenceable address obtained via one of | |
2361 | * percpu access macros, this function translates it into its physical | |
2362 | * address. The caller is responsible for ensuring @addr stays valid | |
2363 | * until this function finishes. | |
2364 | * | |
2365 | * percpu allocator has special setup for the first chunk, which currently | |
2366 | * supports either embedding in linear address space or vmalloc mapping, | |
2367 | * and, from the second one, the backing allocator (currently either vm or | |
2368 | * km) provides translation. | |
2369 | * | |
2370 | * The addr can be translated simply without checking if it falls into the | |
2371 | * first chunk. But the current code reflects better how percpu allocator | |
2372 | * actually works, and the verification can discover both bugs in percpu | |
2373 | * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current | |
2374 | * code. | |
2375 | * | |
2376 | * RETURNS: | |
2377 | * The physical address for @addr. | |
2378 | */ | |
2379 | phys_addr_t per_cpu_ptr_to_phys(void *addr) | |
2380 | { | |
2381 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); | |
2382 | bool in_first_chunk = false; | |
2383 | unsigned long first_low, first_high; | |
2384 | unsigned int cpu; | |
2385 | ||
2386 | /* | |
2387 | * The following test on unit_low/high isn't strictly | |
2388 | * necessary but will speed up lookups of addresses which | |
2389 | * aren't in the first chunk. | |
2390 | * | |
2391 | * The address check is against full chunk sizes. pcpu_base_addr | |
2392 | * points to the beginning of the first chunk including the | |
2393 | * static region. Assumes good intent as the first chunk may | |
2394 | * not be full (ie. < pcpu_unit_pages in size). | |
2395 | */ | |
2396 | first_low = (unsigned long)pcpu_base_addr + | |
2397 | pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); | |
2398 | first_high = (unsigned long)pcpu_base_addr + | |
2399 | pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); | |
2400 | if ((unsigned long)addr >= first_low && | |
2401 | (unsigned long)addr < first_high) { | |
2402 | for_each_possible_cpu(cpu) { | |
2403 | void *start = per_cpu_ptr(base, cpu); | |
2404 | ||
2405 | if (addr >= start && addr < start + pcpu_unit_size) { | |
2406 | in_first_chunk = true; | |
2407 | break; | |
2408 | } | |
2409 | } | |
2410 | } | |
2411 | ||
2412 | if (in_first_chunk) { | |
2413 | if (!is_vmalloc_addr(addr)) | |
2414 | return __pa(addr); | |
2415 | else | |
2416 | return page_to_phys(vmalloc_to_page(addr)) + | |
2417 | offset_in_page(addr); | |
2418 | } else | |
2419 | return page_to_phys(pcpu_addr_to_page(addr)) + | |
2420 | offset_in_page(addr); | |
2421 | } | |
2422 | ||
2423 | /** | |
2424 | * pcpu_alloc_alloc_info - allocate percpu allocation info | |
2425 | * @nr_groups: the number of groups | |
2426 | * @nr_units: the number of units | |
2427 | * | |
2428 | * Allocate ai which is large enough for @nr_groups groups containing | |
2429 | * @nr_units units. The returned ai's groups[0].cpu_map points to the | |
2430 | * cpu_map array which is long enough for @nr_units and filled with | |
2431 | * NR_CPUS. It's the caller's responsibility to initialize cpu_map | |
2432 | * pointer of other groups. | |
2433 | * | |
2434 | * RETURNS: | |
2435 | * Pointer to the allocated pcpu_alloc_info on success, NULL on | |
2436 | * failure. | |
2437 | */ | |
2438 | struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, | |
2439 | int nr_units) | |
2440 | { | |
2441 | struct pcpu_alloc_info *ai; | |
2442 | size_t base_size, ai_size; | |
2443 | void *ptr; | |
2444 | int unit; | |
2445 | ||
2446 | base_size = ALIGN(struct_size(ai, groups, nr_groups), | |
2447 | __alignof__(ai->groups[0].cpu_map[0])); | |
2448 | ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); | |
2449 | ||
2450 | ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); | |
2451 | if (!ptr) | |
2452 | return NULL; | |
2453 | ai = ptr; | |
2454 | ptr += base_size; | |
2455 | ||
2456 | ai->groups[0].cpu_map = ptr; | |
2457 | ||
2458 | for (unit = 0; unit < nr_units; unit++) | |
2459 | ai->groups[0].cpu_map[unit] = NR_CPUS; | |
2460 | ||
2461 | ai->nr_groups = nr_groups; | |
2462 | ai->__ai_size = PFN_ALIGN(ai_size); | |
2463 | ||
2464 | return ai; | |
2465 | } | |
2466 | ||
2467 | /** | |
2468 | * pcpu_free_alloc_info - free percpu allocation info | |
2469 | * @ai: pcpu_alloc_info to free | |
2470 | * | |
2471 | * Free @ai which was allocated by pcpu_alloc_alloc_info(). | |
2472 | */ | |
2473 | void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) | |
2474 | { | |
2475 | memblock_free_early(__pa(ai), ai->__ai_size); | |
2476 | } | |
2477 | ||
2478 | /** | |
2479 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info | |
2480 | * @lvl: loglevel | |
2481 | * @ai: allocation info to dump | |
2482 | * | |
2483 | * Print out information about @ai using loglevel @lvl. | |
2484 | */ | |
2485 | static void pcpu_dump_alloc_info(const char *lvl, | |
2486 | const struct pcpu_alloc_info *ai) | |
2487 | { | |
2488 | int group_width = 1, cpu_width = 1, width; | |
2489 | char empty_str[] = "--------"; | |
2490 | int alloc = 0, alloc_end = 0; | |
2491 | int group, v; | |
2492 | int upa, apl; /* units per alloc, allocs per line */ | |
2493 | ||
2494 | v = ai->nr_groups; | |
2495 | while (v /= 10) | |
2496 | group_width++; | |
2497 | ||
2498 | v = num_possible_cpus(); | |
2499 | while (v /= 10) | |
2500 | cpu_width++; | |
2501 | empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; | |
2502 | ||
2503 | upa = ai->alloc_size / ai->unit_size; | |
2504 | width = upa * (cpu_width + 1) + group_width + 3; | |
2505 | apl = rounddown_pow_of_two(max(60 / width, 1)); | |
2506 | ||
2507 | printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", | |
2508 | lvl, ai->static_size, ai->reserved_size, ai->dyn_size, | |
2509 | ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); | |
2510 | ||
2511 | for (group = 0; group < ai->nr_groups; group++) { | |
2512 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
2513 | int unit = 0, unit_end = 0; | |
2514 | ||
2515 | BUG_ON(gi->nr_units % upa); | |
2516 | for (alloc_end += gi->nr_units / upa; | |
2517 | alloc < alloc_end; alloc++) { | |
2518 | if (!(alloc % apl)) { | |
2519 | pr_cont("\n"); | |
2520 | printk("%spcpu-alloc: ", lvl); | |
2521 | } | |
2522 | pr_cont("[%0*d] ", group_width, group); | |
2523 | ||
2524 | for (unit_end += upa; unit < unit_end; unit++) | |
2525 | if (gi->cpu_map[unit] != NR_CPUS) | |
2526 | pr_cont("%0*d ", | |
2527 | cpu_width, gi->cpu_map[unit]); | |
2528 | else | |
2529 | pr_cont("%s ", empty_str); | |
2530 | } | |
2531 | } | |
2532 | pr_cont("\n"); | |
2533 | } | |
2534 | ||
2535 | /** | |
2536 | * pcpu_setup_first_chunk - initialize the first percpu chunk | |
2537 | * @ai: pcpu_alloc_info describing how to percpu area is shaped | |
2538 | * @base_addr: mapped address | |
2539 | * | |
2540 | * Initialize the first percpu chunk which contains the kernel static | |
2541 | * percpu area. This function is to be called from arch percpu area | |
2542 | * setup path. | |
2543 | * | |
2544 | * @ai contains all information necessary to initialize the first | |
2545 | * chunk and prime the dynamic percpu allocator. | |
2546 | * | |
2547 | * @ai->static_size is the size of static percpu area. | |
2548 | * | |
2549 | * @ai->reserved_size, if non-zero, specifies the amount of bytes to | |
2550 | * reserve after the static area in the first chunk. This reserves | |
2551 | * the first chunk such that it's available only through reserved | |
2552 | * percpu allocation. This is primarily used to serve module percpu | |
2553 | * static areas on architectures where the addressing model has | |
2554 | * limited offset range for symbol relocations to guarantee module | |
2555 | * percpu symbols fall inside the relocatable range. | |
2556 | * | |
2557 | * @ai->dyn_size determines the number of bytes available for dynamic | |
2558 | * allocation in the first chunk. The area between @ai->static_size + | |
2559 | * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. | |
2560 | * | |
2561 | * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE | |
2562 | * and equal to or larger than @ai->static_size + @ai->reserved_size + | |
2563 | * @ai->dyn_size. | |
2564 | * | |
2565 | * @ai->atom_size is the allocation atom size and used as alignment | |
2566 | * for vm areas. | |
2567 | * | |
2568 | * @ai->alloc_size is the allocation size and always multiple of | |
2569 | * @ai->atom_size. This is larger than @ai->atom_size if | |
2570 | * @ai->unit_size is larger than @ai->atom_size. | |
2571 | * | |
2572 | * @ai->nr_groups and @ai->groups describe virtual memory layout of | |
2573 | * percpu areas. Units which should be colocated are put into the | |
2574 | * same group. Dynamic VM areas will be allocated according to these | |
2575 | * groupings. If @ai->nr_groups is zero, a single group containing | |
2576 | * all units is assumed. | |
2577 | * | |
2578 | * The caller should have mapped the first chunk at @base_addr and | |
2579 | * copied static data to each unit. | |
2580 | * | |
2581 | * The first chunk will always contain a static and a dynamic region. | |
2582 | * However, the static region is not managed by any chunk. If the first | |
2583 | * chunk also contains a reserved region, it is served by two chunks - | |
2584 | * one for the reserved region and one for the dynamic region. They | |
2585 | * share the same vm, but use offset regions in the area allocation map. | |
2586 | * The chunk serving the dynamic region is circulated in the chunk slots | |
2587 | * and available for dynamic allocation like any other chunk. | |
2588 | */ | |
2589 | void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |
2590 | void *base_addr) | |
2591 | { | |
2592 | size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; | |
2593 | size_t static_size, dyn_size; | |
2594 | struct pcpu_chunk *chunk; | |
2595 | unsigned long *group_offsets; | |
2596 | size_t *group_sizes; | |
2597 | unsigned long *unit_off; | |
2598 | unsigned int cpu; | |
2599 | int *unit_map; | |
2600 | int group, unit, i; | |
2601 | int map_size; | |
2602 | unsigned long tmp_addr; | |
2603 | size_t alloc_size; | |
2604 | ||
2605 | #define PCPU_SETUP_BUG_ON(cond) do { \ | |
2606 | if (unlikely(cond)) { \ | |
2607 | pr_emerg("failed to initialize, %s\n", #cond); \ | |
2608 | pr_emerg("cpu_possible_mask=%*pb\n", \ | |
2609 | cpumask_pr_args(cpu_possible_mask)); \ | |
2610 | pcpu_dump_alloc_info(KERN_EMERG, ai); \ | |
2611 | BUG(); \ | |
2612 | } \ | |
2613 | } while (0) | |
2614 | ||
2615 | /* sanity checks */ | |
2616 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); | |
2617 | #ifdef CONFIG_SMP | |
2618 | PCPU_SETUP_BUG_ON(!ai->static_size); | |
2619 | PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); | |
2620 | #endif | |
2621 | PCPU_SETUP_BUG_ON(!base_addr); | |
2622 | PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); | |
2623 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); | |
2624 | PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); | |
2625 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); | |
2626 | PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); | |
2627 | PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); | |
2628 | PCPU_SETUP_BUG_ON(!ai->dyn_size); | |
2629 | PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); | |
2630 | PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || | |
2631 | IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); | |
2632 | PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); | |
2633 | ||
2634 | /* process group information and build config tables accordingly */ | |
2635 | alloc_size = ai->nr_groups * sizeof(group_offsets[0]); | |
2636 | group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
2637 | if (!group_offsets) | |
2638 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2639 | alloc_size); | |
2640 | ||
2641 | alloc_size = ai->nr_groups * sizeof(group_sizes[0]); | |
2642 | group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
2643 | if (!group_sizes) | |
2644 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2645 | alloc_size); | |
2646 | ||
2647 | alloc_size = nr_cpu_ids * sizeof(unit_map[0]); | |
2648 | unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
2649 | if (!unit_map) | |
2650 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2651 | alloc_size); | |
2652 | ||
2653 | alloc_size = nr_cpu_ids * sizeof(unit_off[0]); | |
2654 | unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
2655 | if (!unit_off) | |
2656 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2657 | alloc_size); | |
2658 | ||
2659 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | |
2660 | unit_map[cpu] = UINT_MAX; | |
2661 | ||
2662 | pcpu_low_unit_cpu = NR_CPUS; | |
2663 | pcpu_high_unit_cpu = NR_CPUS; | |
2664 | ||
2665 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { | |
2666 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
2667 | ||
2668 | group_offsets[group] = gi->base_offset; | |
2669 | group_sizes[group] = gi->nr_units * ai->unit_size; | |
2670 | ||
2671 | for (i = 0; i < gi->nr_units; i++) { | |
2672 | cpu = gi->cpu_map[i]; | |
2673 | if (cpu == NR_CPUS) | |
2674 | continue; | |
2675 | ||
2676 | PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); | |
2677 | PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); | |
2678 | PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); | |
2679 | ||
2680 | unit_map[cpu] = unit + i; | |
2681 | unit_off[cpu] = gi->base_offset + i * ai->unit_size; | |
2682 | ||
2683 | /* determine low/high unit_cpu */ | |
2684 | if (pcpu_low_unit_cpu == NR_CPUS || | |
2685 | unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) | |
2686 | pcpu_low_unit_cpu = cpu; | |
2687 | if (pcpu_high_unit_cpu == NR_CPUS || | |
2688 | unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) | |
2689 | pcpu_high_unit_cpu = cpu; | |
2690 | } | |
2691 | } | |
2692 | pcpu_nr_units = unit; | |
2693 | ||
2694 | for_each_possible_cpu(cpu) | |
2695 | PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); | |
2696 | ||
2697 | /* we're done parsing the input, undefine BUG macro and dump config */ | |
2698 | #undef PCPU_SETUP_BUG_ON | |
2699 | pcpu_dump_alloc_info(KERN_DEBUG, ai); | |
2700 | ||
2701 | pcpu_nr_groups = ai->nr_groups; | |
2702 | pcpu_group_offsets = group_offsets; | |
2703 | pcpu_group_sizes = group_sizes; | |
2704 | pcpu_unit_map = unit_map; | |
2705 | pcpu_unit_offsets = unit_off; | |
2706 | ||
2707 | /* determine basic parameters */ | |
2708 | pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; | |
2709 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; | |
2710 | pcpu_atom_size = ai->atom_size; | |
2711 | pcpu_chunk_struct_size = struct_size(chunk, populated, | |
2712 | BITS_TO_LONGS(pcpu_unit_pages)); | |
2713 | ||
2714 | pcpu_stats_save_ai(ai); | |
2715 | ||
2716 | /* | |
2717 | * Allocate chunk slots. The slots after the active slots are: | |
2718 | * sidelined_slot - isolated, depopulated chunks | |
2719 | * free_slot - fully free chunks | |
2720 | * to_depopulate_slot - isolated, chunks to depopulate | |
2721 | */ | |
2722 | pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1; | |
2723 | pcpu_free_slot = pcpu_sidelined_slot + 1; | |
2724 | pcpu_to_depopulate_slot = pcpu_free_slot + 1; | |
2725 | pcpu_nr_slots = pcpu_to_depopulate_slot + 1; | |
2726 | pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * | |
2727 | sizeof(pcpu_chunk_lists[0]), | |
2728 | SMP_CACHE_BYTES); | |
2729 | if (!pcpu_chunk_lists) | |
2730 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2731 | pcpu_nr_slots * sizeof(pcpu_chunk_lists[0])); | |
2732 | ||
2733 | for (i = 0; i < pcpu_nr_slots; i++) | |
2734 | INIT_LIST_HEAD(&pcpu_chunk_lists[i]); | |
2735 | ||
2736 | /* | |
2737 | * The end of the static region needs to be aligned with the | |
2738 | * minimum allocation size as this offsets the reserved and | |
2739 | * dynamic region. The first chunk ends page aligned by | |
2740 | * expanding the dynamic region, therefore the dynamic region | |
2741 | * can be shrunk to compensate while still staying above the | |
2742 | * configured sizes. | |
2743 | */ | |
2744 | static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); | |
2745 | dyn_size = ai->dyn_size - (static_size - ai->static_size); | |
2746 | ||
2747 | /* | |
2748 | * Initialize first chunk. | |
2749 | * If the reserved_size is non-zero, this initializes the reserved | |
2750 | * chunk. If the reserved_size is zero, the reserved chunk is NULL | |
2751 | * and the dynamic region is initialized here. The first chunk, | |
2752 | * pcpu_first_chunk, will always point to the chunk that serves | |
2753 | * the dynamic region. | |
2754 | */ | |
2755 | tmp_addr = (unsigned long)base_addr + static_size; | |
2756 | map_size = ai->reserved_size ?: dyn_size; | |
2757 | chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); | |
2758 | ||
2759 | /* init dynamic chunk if necessary */ | |
2760 | if (ai->reserved_size) { | |
2761 | pcpu_reserved_chunk = chunk; | |
2762 | ||
2763 | tmp_addr = (unsigned long)base_addr + static_size + | |
2764 | ai->reserved_size; | |
2765 | map_size = dyn_size; | |
2766 | chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); | |
2767 | } | |
2768 | ||
2769 | /* link the first chunk in */ | |
2770 | pcpu_first_chunk = chunk; | |
2771 | pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; | |
2772 | pcpu_chunk_relocate(pcpu_first_chunk, -1); | |
2773 | ||
2774 | /* include all regions of the first chunk */ | |
2775 | pcpu_nr_populated += PFN_DOWN(size_sum); | |
2776 | ||
2777 | pcpu_stats_chunk_alloc(); | |
2778 | trace_percpu_create_chunk(base_addr); | |
2779 | ||
2780 | /* we're done */ | |
2781 | pcpu_base_addr = base_addr; | |
2782 | } | |
2783 | ||
2784 | #ifdef CONFIG_SMP | |
2785 | ||
2786 | const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { | |
2787 | [PCPU_FC_AUTO] = "auto", | |
2788 | [PCPU_FC_EMBED] = "embed", | |
2789 | [PCPU_FC_PAGE] = "page", | |
2790 | }; | |
2791 | ||
2792 | enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; | |
2793 | ||
2794 | static int __init percpu_alloc_setup(char *str) | |
2795 | { | |
2796 | if (!str) | |
2797 | return -EINVAL; | |
2798 | ||
2799 | if (0) | |
2800 | /* nada */; | |
2801 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK | |
2802 | else if (!strcmp(str, "embed")) | |
2803 | pcpu_chosen_fc = PCPU_FC_EMBED; | |
2804 | #endif | |
2805 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | |
2806 | else if (!strcmp(str, "page")) | |
2807 | pcpu_chosen_fc = PCPU_FC_PAGE; | |
2808 | #endif | |
2809 | else | |
2810 | pr_warn("unknown allocator %s specified\n", str); | |
2811 | ||
2812 | return 0; | |
2813 | } | |
2814 | early_param("percpu_alloc", percpu_alloc_setup); | |
2815 | ||
2816 | /* | |
2817 | * pcpu_embed_first_chunk() is used by the generic percpu setup. | |
2818 | * Build it if needed by the arch config or the generic setup is going | |
2819 | * to be used. | |
2820 | */ | |
2821 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ | |
2822 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) | |
2823 | #define BUILD_EMBED_FIRST_CHUNK | |
2824 | #endif | |
2825 | ||
2826 | /* build pcpu_page_first_chunk() iff needed by the arch config */ | |
2827 | #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) | |
2828 | #define BUILD_PAGE_FIRST_CHUNK | |
2829 | #endif | |
2830 | ||
2831 | /* pcpu_build_alloc_info() is used by both embed and page first chunk */ | |
2832 | #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) | |
2833 | /** | |
2834 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs | |
2835 | * @reserved_size: the size of reserved percpu area in bytes | |
2836 | * @dyn_size: minimum free size for dynamic allocation in bytes | |
2837 | * @atom_size: allocation atom size | |
2838 | * @cpu_distance_fn: callback to determine distance between cpus, optional | |
2839 | * | |
2840 | * This function determines grouping of units, their mappings to cpus | |
2841 | * and other parameters considering needed percpu size, allocation | |
2842 | * atom size and distances between CPUs. | |
2843 | * | |
2844 | * Groups are always multiples of atom size and CPUs which are of | |
2845 | * LOCAL_DISTANCE both ways are grouped together and share space for | |
2846 | * units in the same group. The returned configuration is guaranteed | |
2847 | * to have CPUs on different nodes on different groups and >=75% usage | |
2848 | * of allocated virtual address space. | |
2849 | * | |
2850 | * RETURNS: | |
2851 | * On success, pointer to the new allocation_info is returned. On | |
2852 | * failure, ERR_PTR value is returned. | |
2853 | */ | |
2854 | static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info( | |
2855 | size_t reserved_size, size_t dyn_size, | |
2856 | size_t atom_size, | |
2857 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | |
2858 | { | |
2859 | static int group_map[NR_CPUS] __initdata; | |
2860 | static int group_cnt[NR_CPUS] __initdata; | |
2861 | static struct cpumask mask __initdata; | |
2862 | const size_t static_size = __per_cpu_end - __per_cpu_start; | |
2863 | int nr_groups = 1, nr_units = 0; | |
2864 | size_t size_sum, min_unit_size, alloc_size; | |
2865 | int upa, max_upa, best_upa; /* units_per_alloc */ | |
2866 | int last_allocs, group, unit; | |
2867 | unsigned int cpu, tcpu; | |
2868 | struct pcpu_alloc_info *ai; | |
2869 | unsigned int *cpu_map; | |
2870 | ||
2871 | /* this function may be called multiple times */ | |
2872 | memset(group_map, 0, sizeof(group_map)); | |
2873 | memset(group_cnt, 0, sizeof(group_cnt)); | |
2874 | cpumask_clear(&mask); | |
2875 | ||
2876 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ | |
2877 | size_sum = PFN_ALIGN(static_size + reserved_size + | |
2878 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); | |
2879 | dyn_size = size_sum - static_size - reserved_size; | |
2880 | ||
2881 | /* | |
2882 | * Determine min_unit_size, alloc_size and max_upa such that | |
2883 | * alloc_size is multiple of atom_size and is the smallest | |
2884 | * which can accommodate 4k aligned segments which are equal to | |
2885 | * or larger than min_unit_size. | |
2886 | */ | |
2887 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | |
2888 | ||
2889 | /* determine the maximum # of units that can fit in an allocation */ | |
2890 | alloc_size = roundup(min_unit_size, atom_size); | |
2891 | upa = alloc_size / min_unit_size; | |
2892 | while (alloc_size % upa || (offset_in_page(alloc_size / upa))) | |
2893 | upa--; | |
2894 | max_upa = upa; | |
2895 | ||
2896 | cpumask_copy(&mask, cpu_possible_mask); | |
2897 | ||
2898 | /* group cpus according to their proximity */ | |
2899 | for (group = 0; !cpumask_empty(&mask); group++) { | |
2900 | /* pop the group's first cpu */ | |
2901 | cpu = cpumask_first(&mask); | |
2902 | group_map[cpu] = group; | |
2903 | group_cnt[group]++; | |
2904 | cpumask_clear_cpu(cpu, &mask); | |
2905 | ||
2906 | for_each_cpu(tcpu, &mask) { | |
2907 | if (!cpu_distance_fn || | |
2908 | (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE && | |
2909 | cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) { | |
2910 | group_map[tcpu] = group; | |
2911 | group_cnt[group]++; | |
2912 | cpumask_clear_cpu(tcpu, &mask); | |
2913 | } | |
2914 | } | |
2915 | } | |
2916 | nr_groups = group; | |
2917 | ||
2918 | /* | |
2919 | * Wasted space is caused by a ratio imbalance of upa to group_cnt. | |
2920 | * Expand the unit_size until we use >= 75% of the units allocated. | |
2921 | * Related to atom_size, which could be much larger than the unit_size. | |
2922 | */ | |
2923 | last_allocs = INT_MAX; | |
2924 | best_upa = 0; | |
2925 | for (upa = max_upa; upa; upa--) { | |
2926 | int allocs = 0, wasted = 0; | |
2927 | ||
2928 | if (alloc_size % upa || (offset_in_page(alloc_size / upa))) | |
2929 | continue; | |
2930 | ||
2931 | for (group = 0; group < nr_groups; group++) { | |
2932 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); | |
2933 | allocs += this_allocs; | |
2934 | wasted += this_allocs * upa - group_cnt[group]; | |
2935 | } | |
2936 | ||
2937 | /* | |
2938 | * Don't accept if wastage is over 1/3. The | |
2939 | * greater-than comparison ensures upa==1 always | |
2940 | * passes the following check. | |
2941 | */ | |
2942 | if (wasted > num_possible_cpus() / 3) | |
2943 | continue; | |
2944 | ||
2945 | /* and then don't consume more memory */ | |
2946 | if (allocs > last_allocs) | |
2947 | break; | |
2948 | last_allocs = allocs; | |
2949 | best_upa = upa; | |
2950 | } | |
2951 | BUG_ON(!best_upa); | |
2952 | upa = best_upa; | |
2953 | ||
2954 | /* allocate and fill alloc_info */ | |
2955 | for (group = 0; group < nr_groups; group++) | |
2956 | nr_units += roundup(group_cnt[group], upa); | |
2957 | ||
2958 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); | |
2959 | if (!ai) | |
2960 | return ERR_PTR(-ENOMEM); | |
2961 | cpu_map = ai->groups[0].cpu_map; | |
2962 | ||
2963 | for (group = 0; group < nr_groups; group++) { | |
2964 | ai->groups[group].cpu_map = cpu_map; | |
2965 | cpu_map += roundup(group_cnt[group], upa); | |
2966 | } | |
2967 | ||
2968 | ai->static_size = static_size; | |
2969 | ai->reserved_size = reserved_size; | |
2970 | ai->dyn_size = dyn_size; | |
2971 | ai->unit_size = alloc_size / upa; | |
2972 | ai->atom_size = atom_size; | |
2973 | ai->alloc_size = alloc_size; | |
2974 | ||
2975 | for (group = 0, unit = 0; group < nr_groups; group++) { | |
2976 | struct pcpu_group_info *gi = &ai->groups[group]; | |
2977 | ||
2978 | /* | |
2979 | * Initialize base_offset as if all groups are located | |
2980 | * back-to-back. The caller should update this to | |
2981 | * reflect actual allocation. | |
2982 | */ | |
2983 | gi->base_offset = unit * ai->unit_size; | |
2984 | ||
2985 | for_each_possible_cpu(cpu) | |
2986 | if (group_map[cpu] == group) | |
2987 | gi->cpu_map[gi->nr_units++] = cpu; | |
2988 | gi->nr_units = roundup(gi->nr_units, upa); | |
2989 | unit += gi->nr_units; | |
2990 | } | |
2991 | BUG_ON(unit != nr_units); | |
2992 | ||
2993 | return ai; | |
2994 | } | |
2995 | #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ | |
2996 | ||
2997 | #if defined(BUILD_EMBED_FIRST_CHUNK) | |
2998 | /** | |
2999 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | |
3000 | * @reserved_size: the size of reserved percpu area in bytes | |
3001 | * @dyn_size: minimum free size for dynamic allocation in bytes | |
3002 | * @atom_size: allocation atom size | |
3003 | * @cpu_distance_fn: callback to determine distance between cpus, optional | |
3004 | * @alloc_fn: function to allocate percpu page | |
3005 | * @free_fn: function to free percpu page | |
3006 | * | |
3007 | * This is a helper to ease setting up embedded first percpu chunk and | |
3008 | * can be called where pcpu_setup_first_chunk() is expected. | |
3009 | * | |
3010 | * If this function is used to setup the first chunk, it is allocated | |
3011 | * by calling @alloc_fn and used as-is without being mapped into | |
3012 | * vmalloc area. Allocations are always whole multiples of @atom_size | |
3013 | * aligned to @atom_size. | |
3014 | * | |
3015 | * This enables the first chunk to piggy back on the linear physical | |
3016 | * mapping which often uses larger page size. Please note that this | |
3017 | * can result in very sparse cpu->unit mapping on NUMA machines thus | |
3018 | * requiring large vmalloc address space. Don't use this allocator if | |
3019 | * vmalloc space is not orders of magnitude larger than distances | |
3020 | * between node memory addresses (ie. 32bit NUMA machines). | |
3021 | * | |
3022 | * @dyn_size specifies the minimum dynamic area size. | |
3023 | * | |
3024 | * If the needed size is smaller than the minimum or specified unit | |
3025 | * size, the leftover is returned using @free_fn. | |
3026 | * | |
3027 | * RETURNS: | |
3028 | * 0 on success, -errno on failure. | |
3029 | */ | |
3030 | int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, | |
3031 | size_t atom_size, | |
3032 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, | |
3033 | pcpu_fc_alloc_fn_t alloc_fn, | |
3034 | pcpu_fc_free_fn_t free_fn) | |
3035 | { | |
3036 | void *base = (void *)ULONG_MAX; | |
3037 | void **areas = NULL; | |
3038 | struct pcpu_alloc_info *ai; | |
3039 | size_t size_sum, areas_size; | |
3040 | unsigned long max_distance; | |
3041 | int group, i, highest_group, rc = 0; | |
3042 | ||
3043 | ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, | |
3044 | cpu_distance_fn); | |
3045 | if (IS_ERR(ai)) | |
3046 | return PTR_ERR(ai); | |
3047 | ||
3048 | size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; | |
3049 | areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); | |
3050 | ||
3051 | areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); | |
3052 | if (!areas) { | |
3053 | rc = -ENOMEM; | |
3054 | goto out_free; | |
3055 | } | |
3056 | ||
3057 | /* allocate, copy and determine base address & max_distance */ | |
3058 | highest_group = 0; | |
3059 | for (group = 0; group < ai->nr_groups; group++) { | |
3060 | struct pcpu_group_info *gi = &ai->groups[group]; | |
3061 | unsigned int cpu = NR_CPUS; | |
3062 | void *ptr; | |
3063 | ||
3064 | for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) | |
3065 | cpu = gi->cpu_map[i]; | |
3066 | BUG_ON(cpu == NR_CPUS); | |
3067 | ||
3068 | /* allocate space for the whole group */ | |
3069 | ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); | |
3070 | if (!ptr) { | |
3071 | rc = -ENOMEM; | |
3072 | goto out_free_areas; | |
3073 | } | |
3074 | /* kmemleak tracks the percpu allocations separately */ | |
3075 | kmemleak_free(ptr); | |
3076 | areas[group] = ptr; | |
3077 | ||
3078 | base = min(ptr, base); | |
3079 | if (ptr > areas[highest_group]) | |
3080 | highest_group = group; | |
3081 | } | |
3082 | max_distance = areas[highest_group] - base; | |
3083 | max_distance += ai->unit_size * ai->groups[highest_group].nr_units; | |
3084 | ||
3085 | /* warn if maximum distance is further than 75% of vmalloc space */ | |
3086 | if (max_distance > VMALLOC_TOTAL * 3 / 4) { | |
3087 | pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", | |
3088 | max_distance, VMALLOC_TOTAL); | |
3089 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | |
3090 | /* and fail if we have fallback */ | |
3091 | rc = -EINVAL; | |
3092 | goto out_free_areas; | |
3093 | #endif | |
3094 | } | |
3095 | ||
3096 | /* | |
3097 | * Copy data and free unused parts. This should happen after all | |
3098 | * allocations are complete; otherwise, we may end up with | |
3099 | * overlapping groups. | |
3100 | */ | |
3101 | for (group = 0; group < ai->nr_groups; group++) { | |
3102 | struct pcpu_group_info *gi = &ai->groups[group]; | |
3103 | void *ptr = areas[group]; | |
3104 | ||
3105 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { | |
3106 | if (gi->cpu_map[i] == NR_CPUS) { | |
3107 | /* unused unit, free whole */ | |
3108 | free_fn(ptr, ai->unit_size); | |
3109 | continue; | |
3110 | } | |
3111 | /* copy and return the unused part */ | |
3112 | memcpy(ptr, __per_cpu_load, ai->static_size); | |
3113 | free_fn(ptr + size_sum, ai->unit_size - size_sum); | |
3114 | } | |
3115 | } | |
3116 | ||
3117 | /* base address is now known, determine group base offsets */ | |
3118 | for (group = 0; group < ai->nr_groups; group++) { | |
3119 | ai->groups[group].base_offset = areas[group] - base; | |
3120 | } | |
3121 | ||
3122 | pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", | |
3123 | PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, | |
3124 | ai->dyn_size, ai->unit_size); | |
3125 | ||
3126 | pcpu_setup_first_chunk(ai, base); | |
3127 | goto out_free; | |
3128 | ||
3129 | out_free_areas: | |
3130 | for (group = 0; group < ai->nr_groups; group++) | |
3131 | if (areas[group]) | |
3132 | free_fn(areas[group], | |
3133 | ai->groups[group].nr_units * ai->unit_size); | |
3134 | out_free: | |
3135 | pcpu_free_alloc_info(ai); | |
3136 | if (areas) | |
3137 | memblock_free_early(__pa(areas), areas_size); | |
3138 | return rc; | |
3139 | } | |
3140 | #endif /* BUILD_EMBED_FIRST_CHUNK */ | |
3141 | ||
3142 | #ifdef BUILD_PAGE_FIRST_CHUNK | |
3143 | /** | |
3144 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages | |
3145 | * @reserved_size: the size of reserved percpu area in bytes | |
3146 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | |
3147 | * @free_fn: function to free percpu page, always called with PAGE_SIZE | |
3148 | * @populate_pte_fn: function to populate pte | |
3149 | * | |
3150 | * This is a helper to ease setting up page-remapped first percpu | |
3151 | * chunk and can be called where pcpu_setup_first_chunk() is expected. | |
3152 | * | |
3153 | * This is the basic allocator. Static percpu area is allocated | |
3154 | * page-by-page into vmalloc area. | |
3155 | * | |
3156 | * RETURNS: | |
3157 | * 0 on success, -errno on failure. | |
3158 | */ | |
3159 | int __init pcpu_page_first_chunk(size_t reserved_size, | |
3160 | pcpu_fc_alloc_fn_t alloc_fn, | |
3161 | pcpu_fc_free_fn_t free_fn, | |
3162 | pcpu_fc_populate_pte_fn_t populate_pte_fn) | |
3163 | { | |
3164 | static struct vm_struct vm; | |
3165 | struct pcpu_alloc_info *ai; | |
3166 | char psize_str[16]; | |
3167 | int unit_pages; | |
3168 | size_t pages_size; | |
3169 | struct page **pages; | |
3170 | int unit, i, j, rc = 0; | |
3171 | int upa; | |
3172 | int nr_g0_units; | |
3173 | ||
3174 | snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); | |
3175 | ||
3176 | ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); | |
3177 | if (IS_ERR(ai)) | |
3178 | return PTR_ERR(ai); | |
3179 | BUG_ON(ai->nr_groups != 1); | |
3180 | upa = ai->alloc_size/ai->unit_size; | |
3181 | nr_g0_units = roundup(num_possible_cpus(), upa); | |
3182 | if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { | |
3183 | pcpu_free_alloc_info(ai); | |
3184 | return -EINVAL; | |
3185 | } | |
3186 | ||
3187 | unit_pages = ai->unit_size >> PAGE_SHIFT; | |
3188 | ||
3189 | /* unaligned allocations can't be freed, round up to page size */ | |
3190 | pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * | |
3191 | sizeof(pages[0])); | |
3192 | pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); | |
3193 | if (!pages) | |
3194 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
3195 | pages_size); | |
3196 | ||
3197 | /* allocate pages */ | |
3198 | j = 0; | |
3199 | for (unit = 0; unit < num_possible_cpus(); unit++) { | |
3200 | unsigned int cpu = ai->groups[0].cpu_map[unit]; | |
3201 | for (i = 0; i < unit_pages; i++) { | |
3202 | void *ptr; | |
3203 | ||
3204 | ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); | |
3205 | if (!ptr) { | |
3206 | pr_warn("failed to allocate %s page for cpu%u\n", | |
3207 | psize_str, cpu); | |
3208 | goto enomem; | |
3209 | } | |
3210 | /* kmemleak tracks the percpu allocations separately */ | |
3211 | kmemleak_free(ptr); | |
3212 | pages[j++] = virt_to_page(ptr); | |
3213 | } | |
3214 | } | |
3215 | ||
3216 | /* allocate vm area, map the pages and copy static data */ | |
3217 | vm.flags = VM_ALLOC; | |
3218 | vm.size = num_possible_cpus() * ai->unit_size; | |
3219 | vm_area_register_early(&vm, PAGE_SIZE); | |
3220 | ||
3221 | for (unit = 0; unit < num_possible_cpus(); unit++) { | |
3222 | unsigned long unit_addr = | |
3223 | (unsigned long)vm.addr + unit * ai->unit_size; | |
3224 | ||
3225 | for (i = 0; i < unit_pages; i++) | |
3226 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); | |
3227 | ||
3228 | /* pte already populated, the following shouldn't fail */ | |
3229 | rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], | |
3230 | unit_pages); | |
3231 | if (rc < 0) | |
3232 | panic("failed to map percpu area, err=%d\n", rc); | |
3233 | ||
3234 | /* | |
3235 | * FIXME: Archs with virtual cache should flush local | |
3236 | * cache for the linear mapping here - something | |
3237 | * equivalent to flush_cache_vmap() on the local cpu. | |
3238 | * flush_cache_vmap() can't be used as most supporting | |
3239 | * data structures are not set up yet. | |
3240 | */ | |
3241 | ||
3242 | /* copy static data */ | |
3243 | memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); | |
3244 | } | |
3245 | ||
3246 | /* we're ready, commit */ | |
3247 | pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", | |
3248 | unit_pages, psize_str, ai->static_size, | |
3249 | ai->reserved_size, ai->dyn_size); | |
3250 | ||
3251 | pcpu_setup_first_chunk(ai, vm.addr); | |
3252 | goto out_free_ar; | |
3253 | ||
3254 | enomem: | |
3255 | while (--j >= 0) | |
3256 | free_fn(page_address(pages[j]), PAGE_SIZE); | |
3257 | rc = -ENOMEM; | |
3258 | out_free_ar: | |
3259 | memblock_free_early(__pa(pages), pages_size); | |
3260 | pcpu_free_alloc_info(ai); | |
3261 | return rc; | |
3262 | } | |
3263 | #endif /* BUILD_PAGE_FIRST_CHUNK */ | |
3264 | ||
3265 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | |
3266 | /* | |
3267 | * Generic SMP percpu area setup. | |
3268 | * | |
3269 | * The embedding helper is used because its behavior closely resembles | |
3270 | * the original non-dynamic generic percpu area setup. This is | |
3271 | * important because many archs have addressing restrictions and might | |
3272 | * fail if the percpu area is located far away from the previous | |
3273 | * location. As an added bonus, in non-NUMA cases, embedding is | |
3274 | * generally a good idea TLB-wise because percpu area can piggy back | |
3275 | * on the physical linear memory mapping which uses large page | |
3276 | * mappings on applicable archs. | |
3277 | */ | |
3278 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | |
3279 | EXPORT_SYMBOL(__per_cpu_offset); | |
3280 | ||
3281 | static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, | |
3282 | size_t align) | |
3283 | { | |
3284 | return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS)); | |
3285 | } | |
3286 | ||
3287 | static void __init pcpu_dfl_fc_free(void *ptr, size_t size) | |
3288 | { | |
3289 | memblock_free_early(__pa(ptr), size); | |
3290 | } | |
3291 | ||
3292 | void __init setup_per_cpu_areas(void) | |
3293 | { | |
3294 | unsigned long delta; | |
3295 | unsigned int cpu; | |
3296 | int rc; | |
3297 | ||
3298 | /* | |
3299 | * Always reserve area for module percpu variables. That's | |
3300 | * what the legacy allocator did. | |
3301 | */ | |
3302 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, | |
3303 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, | |
3304 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); | |
3305 | if (rc < 0) | |
3306 | panic("Failed to initialize percpu areas."); | |
3307 | ||
3308 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
3309 | for_each_possible_cpu(cpu) | |
3310 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | |
3311 | } | |
3312 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ | |
3313 | ||
3314 | #else /* CONFIG_SMP */ | |
3315 | ||
3316 | /* | |
3317 | * UP percpu area setup. | |
3318 | * | |
3319 | * UP always uses km-based percpu allocator with identity mapping. | |
3320 | * Static percpu variables are indistinguishable from the usual static | |
3321 | * variables and don't require any special preparation. | |
3322 | */ | |
3323 | void __init setup_per_cpu_areas(void) | |
3324 | { | |
3325 | const size_t unit_size = | |
3326 | roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, | |
3327 | PERCPU_DYNAMIC_RESERVE)); | |
3328 | struct pcpu_alloc_info *ai; | |
3329 | void *fc; | |
3330 | ||
3331 | ai = pcpu_alloc_alloc_info(1, 1); | |
3332 | fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | |
3333 | if (!ai || !fc) | |
3334 | panic("Failed to allocate memory for percpu areas."); | |
3335 | /* kmemleak tracks the percpu allocations separately */ | |
3336 | kmemleak_free(fc); | |
3337 | ||
3338 | ai->dyn_size = unit_size; | |
3339 | ai->unit_size = unit_size; | |
3340 | ai->atom_size = unit_size; | |
3341 | ai->alloc_size = unit_size; | |
3342 | ai->groups[0].nr_units = 1; | |
3343 | ai->groups[0].cpu_map[0] = 0; | |
3344 | ||
3345 | pcpu_setup_first_chunk(ai, fc); | |
3346 | pcpu_free_alloc_info(ai); | |
3347 | } | |
3348 | ||
3349 | #endif /* CONFIG_SMP */ | |
3350 | ||
3351 | /* | |
3352 | * pcpu_nr_pages - calculate total number of populated backing pages | |
3353 | * | |
3354 | * This reflects the number of pages populated to back chunks. Metadata is | |
3355 | * excluded in the number exposed in meminfo as the number of backing pages | |
3356 | * scales with the number of cpus and can quickly outweigh the memory used for | |
3357 | * metadata. It also keeps this calculation nice and simple. | |
3358 | * | |
3359 | * RETURNS: | |
3360 | * Total number of populated backing pages in use by the allocator. | |
3361 | */ | |
3362 | unsigned long pcpu_nr_pages(void) | |
3363 | { | |
3364 | return pcpu_nr_populated * pcpu_nr_units; | |
3365 | } | |
3366 | ||
3367 | /* | |
3368 | * Percpu allocator is initialized early during boot when neither slab or | |
3369 | * workqueue is available. Plug async management until everything is up | |
3370 | * and running. | |
3371 | */ | |
3372 | static int __init percpu_enable_async(void) | |
3373 | { | |
3374 | pcpu_async_enabled = true; | |
3375 | return 0; | |
3376 | } | |
3377 | subsys_initcall(percpu_enable_async); |