]>
Commit | Line | Data |
---|---|---|
fbf59bc9 | 1 | /* |
88999a89 | 2 | * mm/percpu.c - percpu memory allocator |
fbf59bc9 TH |
3 | * |
4 | * Copyright (C) 2009 SUSE Linux Products GmbH | |
5 | * Copyright (C) 2009 Tejun Heo <tj@kernel.org> | |
6 | * | |
9c015162 | 7 | * This file is released under the GPLv2 license. |
fbf59bc9 | 8 | * |
9c015162 DZF |
9 | * The percpu allocator handles both static and dynamic areas. Percpu |
10 | * areas are allocated in chunks which are divided into units. There is | |
11 | * a 1-to-1 mapping for units to possible cpus. These units are grouped | |
12 | * based on NUMA properties of the machine. | |
fbf59bc9 TH |
13 | * |
14 | * c0 c1 c2 | |
15 | * ------------------- ------------------- ------------ | |
16 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u | |
17 | * ------------------- ...... ------------------- .... ------------ | |
18 | * | |
9c015162 DZF |
19 | * Allocation is done by offsets into a unit's address space. Ie., an |
20 | * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, | |
21 | * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear | |
22 | * and even sparse. Access is handled by configuring percpu base | |
23 | * registers according to the cpu to unit mappings and offsetting the | |
24 | * base address using pcpu_unit_size. | |
25 | * | |
26 | * There is special consideration for the first chunk which must handle | |
27 | * the static percpu variables in the kernel image as allocation services | |
28 | * are not online yet. In short, the first chunk is structure like so: | |
29 | * | |
30 | * <Static | [Reserved] | Dynamic> | |
31 | * | |
32 | * The static data is copied from the original section managed by the | |
33 | * linker. The reserved section, if non-zero, primarily manages static | |
34 | * percpu variables from kernel modules. Finally, the dynamic section | |
35 | * takes care of normal allocations. | |
fbf59bc9 TH |
36 | * |
37 | * Allocation state in each chunk is kept using an array of integers | |
38 | * on chunk->map. A positive value in the map represents a free | |
39 | * region and negative allocated. Allocation inside a chunk is done | |
40 | * by scanning this map sequentially and serving the first matching | |
41 | * entry. This is mostly copied from the percpu_modalloc() allocator. | |
e1b9aa3f CL |
42 | * Chunks can be determined from the address using the index field |
43 | * in the page struct. The index field contains a pointer to the chunk. | |
fbf59bc9 | 44 | * |
9c015162 DZF |
45 | * These chunks are organized into lists according to free_size and |
46 | * tries to allocate from the fullest chunk first. Each chunk maintains | |
47 | * a maximum contiguous area size hint which is guaranteed to be equal | |
48 | * to or larger than the maximum contiguous area in the chunk. This | |
49 | * helps prevent the allocator from iterating over chunks unnecessarily. | |
50 | * | |
4091fb95 | 51 | * To use this allocator, arch code should do the following: |
fbf59bc9 | 52 | * |
fbf59bc9 | 53 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
e0100983 TH |
54 | * regular address to percpu pointer and back if they need to be |
55 | * different from the default | |
fbf59bc9 | 56 | * |
8d408b4b TH |
57 | * - use pcpu_setup_first_chunk() during percpu area initialization to |
58 | * setup the first chunk containing the kernel static percpu area | |
fbf59bc9 TH |
59 | */ |
60 | ||
870d4b12 JP |
61 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
62 | ||
fbf59bc9 TH |
63 | #include <linux/bitmap.h> |
64 | #include <linux/bootmem.h> | |
fd1e8a1f | 65 | #include <linux/err.h> |
ca460b3c | 66 | #include <linux/lcm.h> |
fbf59bc9 | 67 | #include <linux/list.h> |
a530b795 | 68 | #include <linux/log2.h> |
fbf59bc9 TH |
69 | #include <linux/mm.h> |
70 | #include <linux/module.h> | |
71 | #include <linux/mutex.h> | |
72 | #include <linux/percpu.h> | |
73 | #include <linux/pfn.h> | |
fbf59bc9 | 74 | #include <linux/slab.h> |
ccea34b5 | 75 | #include <linux/spinlock.h> |
fbf59bc9 | 76 | #include <linux/vmalloc.h> |
a56dbddf | 77 | #include <linux/workqueue.h> |
f528f0b8 | 78 | #include <linux/kmemleak.h> |
fbf59bc9 TH |
79 | |
80 | #include <asm/cacheflush.h> | |
e0100983 | 81 | #include <asm/sections.h> |
fbf59bc9 | 82 | #include <asm/tlbflush.h> |
3b034b0d | 83 | #include <asm/io.h> |
fbf59bc9 | 84 | |
df95e795 DZ |
85 | #define CREATE_TRACE_POINTS |
86 | #include <trace/events/percpu.h> | |
87 | ||
8fa3ed80 DZ |
88 | #include "percpu-internal.h" |
89 | ||
40064aec DZF |
90 | /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */ |
91 | #define PCPU_SLOT_BASE_SHIFT 5 | |
92 | ||
1a4d7607 TH |
93 | #define PCPU_EMPTY_POP_PAGES_LOW 2 |
94 | #define PCPU_EMPTY_POP_PAGES_HIGH 4 | |
fbf59bc9 | 95 | |
bbddff05 | 96 | #ifdef CONFIG_SMP |
e0100983 TH |
97 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
98 | #ifndef __addr_to_pcpu_ptr | |
99 | #define __addr_to_pcpu_ptr(addr) \ | |
43cf38eb TH |
100 | (void __percpu *)((unsigned long)(addr) - \ |
101 | (unsigned long)pcpu_base_addr + \ | |
102 | (unsigned long)__per_cpu_start) | |
e0100983 TH |
103 | #endif |
104 | #ifndef __pcpu_ptr_to_addr | |
105 | #define __pcpu_ptr_to_addr(ptr) \ | |
43cf38eb TH |
106 | (void __force *)((unsigned long)(ptr) + \ |
107 | (unsigned long)pcpu_base_addr - \ | |
108 | (unsigned long)__per_cpu_start) | |
e0100983 | 109 | #endif |
bbddff05 TH |
110 | #else /* CONFIG_SMP */ |
111 | /* on UP, it's always identity mapped */ | |
112 | #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) | |
113 | #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) | |
114 | #endif /* CONFIG_SMP */ | |
e0100983 | 115 | |
1328710b DM |
116 | static int pcpu_unit_pages __ro_after_init; |
117 | static int pcpu_unit_size __ro_after_init; | |
118 | static int pcpu_nr_units __ro_after_init; | |
119 | static int pcpu_atom_size __ro_after_init; | |
8fa3ed80 | 120 | int pcpu_nr_slots __ro_after_init; |
1328710b | 121 | static size_t pcpu_chunk_struct_size __ro_after_init; |
fbf59bc9 | 122 | |
a855b84c | 123 | /* cpus with the lowest and highest unit addresses */ |
1328710b DM |
124 | static unsigned int pcpu_low_unit_cpu __ro_after_init; |
125 | static unsigned int pcpu_high_unit_cpu __ro_after_init; | |
2f39e637 | 126 | |
fbf59bc9 | 127 | /* the address of the first chunk which starts with the kernel static area */ |
1328710b | 128 | void *pcpu_base_addr __ro_after_init; |
fbf59bc9 TH |
129 | EXPORT_SYMBOL_GPL(pcpu_base_addr); |
130 | ||
1328710b DM |
131 | static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ |
132 | const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ | |
2f39e637 | 133 | |
6563297c | 134 | /* group information, used for vm allocation */ |
1328710b DM |
135 | static int pcpu_nr_groups __ro_after_init; |
136 | static const unsigned long *pcpu_group_offsets __ro_after_init; | |
137 | static const size_t *pcpu_group_sizes __ro_after_init; | |
6563297c | 138 | |
ae9e6bc9 TH |
139 | /* |
140 | * The first chunk which always exists. Note that unlike other | |
141 | * chunks, this one can be allocated and mapped in several different | |
142 | * ways and thus often doesn't live in the vmalloc area. | |
143 | */ | |
8fa3ed80 | 144 | struct pcpu_chunk *pcpu_first_chunk __ro_after_init; |
ae9e6bc9 TH |
145 | |
146 | /* | |
147 | * Optional reserved chunk. This chunk reserves part of the first | |
e2266705 DZF |
148 | * chunk and serves it for reserved allocations. When the reserved |
149 | * region doesn't exist, the following variable is NULL. | |
ae9e6bc9 | 150 | */ |
8fa3ed80 | 151 | struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; |
edcb4639 | 152 | |
8fa3ed80 | 153 | DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ |
6710e594 | 154 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ |
fbf59bc9 | 155 | |
8fa3ed80 | 156 | struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */ |
fbf59bc9 | 157 | |
4f996e23 TH |
158 | /* chunks which need their map areas extended, protected by pcpu_lock */ |
159 | static LIST_HEAD(pcpu_map_extend_chunks); | |
160 | ||
b539b87f TH |
161 | /* |
162 | * The number of empty populated pages, protected by pcpu_lock. The | |
163 | * reserved chunk doesn't contribute to the count. | |
164 | */ | |
6b9b6f39 | 165 | int pcpu_nr_empty_pop_pages; |
b539b87f | 166 | |
1a4d7607 TH |
167 | /* |
168 | * Balance work is used to populate or destroy chunks asynchronously. We | |
169 | * try to keep the number of populated free pages between | |
170 | * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one | |
171 | * empty chunk. | |
172 | */ | |
fe6bd8c3 TH |
173 | static void pcpu_balance_workfn(struct work_struct *work); |
174 | static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); | |
1a4d7607 TH |
175 | static bool pcpu_async_enabled __read_mostly; |
176 | static bool pcpu_atomic_alloc_failed; | |
177 | ||
178 | static void pcpu_schedule_balance_work(void) | |
179 | { | |
180 | if (pcpu_async_enabled) | |
181 | schedule_work(&pcpu_balance_work); | |
182 | } | |
a56dbddf | 183 | |
c0ebfdc3 | 184 | /** |
560f2c23 DZF |
185 | * pcpu_addr_in_chunk - check if the address is served from this chunk |
186 | * @chunk: chunk of interest | |
187 | * @addr: percpu address | |
c0ebfdc3 DZF |
188 | * |
189 | * RETURNS: | |
560f2c23 | 190 | * True if the address is served from this chunk. |
c0ebfdc3 | 191 | */ |
560f2c23 | 192 | static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) |
020ec653 | 193 | { |
c0ebfdc3 DZF |
194 | void *start_addr, *end_addr; |
195 | ||
560f2c23 | 196 | if (!chunk) |
c0ebfdc3 | 197 | return false; |
020ec653 | 198 | |
560f2c23 DZF |
199 | start_addr = chunk->base_addr + chunk->start_offset; |
200 | end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - | |
201 | chunk->end_offset; | |
c0ebfdc3 DZF |
202 | |
203 | return addr >= start_addr && addr < end_addr; | |
020ec653 TH |
204 | } |
205 | ||
d9b55eeb | 206 | static int __pcpu_size_to_slot(int size) |
fbf59bc9 | 207 | { |
cae3aeb8 | 208 | int highbit = fls(size); /* size is in bytes */ |
fbf59bc9 TH |
209 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); |
210 | } | |
211 | ||
d9b55eeb TH |
212 | static int pcpu_size_to_slot(int size) |
213 | { | |
214 | if (size == pcpu_unit_size) | |
215 | return pcpu_nr_slots - 1; | |
216 | return __pcpu_size_to_slot(size); | |
217 | } | |
218 | ||
fbf59bc9 TH |
219 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) |
220 | { | |
40064aec | 221 | if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || chunk->contig_bits == 0) |
fbf59bc9 TH |
222 | return 0; |
223 | ||
40064aec | 224 | return pcpu_size_to_slot(chunk->free_bytes); |
fbf59bc9 TH |
225 | } |
226 | ||
88999a89 TH |
227 | /* set the pointer to a chunk in a page struct */ |
228 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) | |
229 | { | |
230 | page->index = (unsigned long)pcpu; | |
231 | } | |
232 | ||
233 | /* obtain pointer to a chunk from a page struct */ | |
234 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) | |
235 | { | |
236 | return (struct pcpu_chunk *)page->index; | |
237 | } | |
238 | ||
239 | static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) | |
fbf59bc9 | 240 | { |
2f39e637 | 241 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; |
fbf59bc9 TH |
242 | } |
243 | ||
c0ebfdc3 DZF |
244 | static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) |
245 | { | |
246 | return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); | |
247 | } | |
248 | ||
9983b6f0 TH |
249 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, |
250 | unsigned int cpu, int page_idx) | |
fbf59bc9 | 251 | { |
c0ebfdc3 DZF |
252 | return (unsigned long)chunk->base_addr + |
253 | pcpu_unit_page_offset(cpu, page_idx); | |
fbf59bc9 TH |
254 | } |
255 | ||
91e914c5 | 256 | static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end) |
ce3141a2 | 257 | { |
91e914c5 DZF |
258 | *rs = find_next_zero_bit(bitmap, end, *rs); |
259 | *re = find_next_bit(bitmap, end, *rs + 1); | |
ce3141a2 TH |
260 | } |
261 | ||
91e914c5 | 262 | static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end) |
ce3141a2 | 263 | { |
91e914c5 DZF |
264 | *rs = find_next_bit(bitmap, end, *rs); |
265 | *re = find_next_zero_bit(bitmap, end, *rs + 1); | |
ce3141a2 TH |
266 | } |
267 | ||
268 | /* | |
91e914c5 DZF |
269 | * Bitmap region iterators. Iterates over the bitmap between |
270 | * [@start, @end) in @chunk. @rs and @re should be integer variables | |
271 | * and will be set to start and end index of the current free region. | |
ce3141a2 | 272 | */ |
91e914c5 DZF |
273 | #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \ |
274 | for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \ | |
275 | (rs) < (re); \ | |
276 | (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end))) | |
ce3141a2 | 277 | |
91e914c5 DZF |
278 | #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \ |
279 | for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \ | |
280 | (rs) < (re); \ | |
281 | (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end))) | |
ce3141a2 | 282 | |
ca460b3c DZF |
283 | /* |
284 | * The following are helper functions to help access bitmaps and convert | |
285 | * between bitmap offsets to address offsets. | |
286 | */ | |
287 | static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) | |
288 | { | |
289 | return chunk->alloc_map + | |
290 | (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); | |
291 | } | |
292 | ||
293 | static unsigned long pcpu_off_to_block_index(int off) | |
294 | { | |
295 | return off / PCPU_BITMAP_BLOCK_BITS; | |
296 | } | |
297 | ||
298 | static unsigned long pcpu_off_to_block_off(int off) | |
299 | { | |
300 | return off & (PCPU_BITMAP_BLOCK_BITS - 1); | |
301 | } | |
302 | ||
fbf59bc9 | 303 | /** |
90459ce0 | 304 | * pcpu_mem_zalloc - allocate memory |
1880d93b | 305 | * @size: bytes to allocate |
fbf59bc9 | 306 | * |
1880d93b | 307 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, |
90459ce0 | 308 | * kzalloc() is used; otherwise, vzalloc() is used. The returned |
1880d93b | 309 | * memory is always zeroed. |
fbf59bc9 | 310 | * |
ccea34b5 TH |
311 | * CONTEXT: |
312 | * Does GFP_KERNEL allocation. | |
313 | * | |
fbf59bc9 | 314 | * RETURNS: |
1880d93b | 315 | * Pointer to the allocated area on success, NULL on failure. |
fbf59bc9 | 316 | */ |
90459ce0 | 317 | static void *pcpu_mem_zalloc(size_t size) |
fbf59bc9 | 318 | { |
099a19d9 TH |
319 | if (WARN_ON_ONCE(!slab_is_available())) |
320 | return NULL; | |
321 | ||
1880d93b TH |
322 | if (size <= PAGE_SIZE) |
323 | return kzalloc(size, GFP_KERNEL); | |
7af4c093 JJ |
324 | else |
325 | return vzalloc(size); | |
1880d93b | 326 | } |
fbf59bc9 | 327 | |
1880d93b TH |
328 | /** |
329 | * pcpu_mem_free - free memory | |
330 | * @ptr: memory to free | |
1880d93b | 331 | * |
90459ce0 | 332 | * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). |
1880d93b | 333 | */ |
1d5cfdb0 | 334 | static void pcpu_mem_free(void *ptr) |
1880d93b | 335 | { |
1d5cfdb0 | 336 | kvfree(ptr); |
fbf59bc9 TH |
337 | } |
338 | ||
339 | /** | |
340 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot | |
341 | * @chunk: chunk of interest | |
342 | * @oslot: the previous slot it was on | |
343 | * | |
344 | * This function is called after an allocation or free changed @chunk. | |
345 | * New slot according to the changed state is determined and @chunk is | |
edcb4639 TH |
346 | * moved to the slot. Note that the reserved chunk is never put on |
347 | * chunk slots. | |
ccea34b5 TH |
348 | * |
349 | * CONTEXT: | |
350 | * pcpu_lock. | |
fbf59bc9 TH |
351 | */ |
352 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |
353 | { | |
354 | int nslot = pcpu_chunk_slot(chunk); | |
355 | ||
edcb4639 | 356 | if (chunk != pcpu_reserved_chunk && oslot != nslot) { |
fbf59bc9 TH |
357 | if (oslot < nslot) |
358 | list_move(&chunk->list, &pcpu_slot[nslot]); | |
359 | else | |
360 | list_move_tail(&chunk->list, &pcpu_slot[nslot]); | |
361 | } | |
362 | } | |
363 | ||
9f7dcf22 | 364 | /** |
40064aec | 365 | * pcpu_cnt_pop_pages- counts populated backing pages in range |
833af842 | 366 | * @chunk: chunk of interest |
40064aec DZF |
367 | * @bit_off: start offset |
368 | * @bits: size of area to check | |
9f7dcf22 | 369 | * |
40064aec DZF |
370 | * Calculates the number of populated pages in the region |
371 | * [page_start, page_end). This keeps track of how many empty populated | |
372 | * pages are available and decide if async work should be scheduled. | |
ccea34b5 | 373 | * |
9f7dcf22 | 374 | * RETURNS: |
40064aec | 375 | * The nr of populated pages. |
9f7dcf22 | 376 | */ |
40064aec DZF |
377 | static inline int pcpu_cnt_pop_pages(struct pcpu_chunk *chunk, int bit_off, |
378 | int bits) | |
9f7dcf22 | 379 | { |
40064aec DZF |
380 | int page_start = PFN_UP(bit_off * PCPU_MIN_ALLOC_SIZE); |
381 | int page_end = PFN_DOWN((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); | |
4f996e23 | 382 | |
40064aec | 383 | if (page_start >= page_end) |
9f7dcf22 TH |
384 | return 0; |
385 | ||
40064aec DZF |
386 | /* |
387 | * bitmap_weight counts the number of bits set in a bitmap up to | |
388 | * the specified number of bits. This is counting the populated | |
389 | * pages up to page_end and then subtracting the populated pages | |
390 | * up to page_start to count the populated pages in | |
391 | * [page_start, page_end). | |
392 | */ | |
393 | return bitmap_weight(chunk->populated, page_end) - | |
394 | bitmap_weight(chunk->populated, page_start); | |
833af842 TH |
395 | } |
396 | ||
397 | /** | |
40064aec | 398 | * pcpu_chunk_update - updates the chunk metadata given a free area |
833af842 | 399 | * @chunk: chunk of interest |
40064aec DZF |
400 | * @bit_off: chunk offset |
401 | * @bits: size of free area | |
833af842 | 402 | * |
13f96637 | 403 | * This updates the chunk's contig hint and starting offset given a free area. |
268625a6 | 404 | * Choose the best starting offset if the contig hint is equal. |
40064aec DZF |
405 | */ |
406 | static void pcpu_chunk_update(struct pcpu_chunk *chunk, int bit_off, int bits) | |
407 | { | |
13f96637 DZF |
408 | if (bits > chunk->contig_bits) { |
409 | chunk->contig_bits_start = bit_off; | |
40064aec | 410 | chunk->contig_bits = bits; |
268625a6 DZF |
411 | } else if (bits == chunk->contig_bits && chunk->contig_bits_start && |
412 | (!bit_off || | |
413 | __ffs(bit_off) > __ffs(chunk->contig_bits_start))) { | |
414 | /* use the start with the best alignment */ | |
415 | chunk->contig_bits_start = bit_off; | |
13f96637 | 416 | } |
40064aec DZF |
417 | } |
418 | ||
419 | /** | |
420 | * pcpu_chunk_refresh_hint - updates metadata about a chunk | |
421 | * @chunk: chunk of interest | |
833af842 | 422 | * |
40064aec | 423 | * Iterates over the chunk to find the largest free area. |
833af842 | 424 | * |
40064aec DZF |
425 | * Updates: |
426 | * chunk->contig_bits | |
13f96637 | 427 | * chunk->contig_bits_start |
40064aec | 428 | * nr_empty_pop_pages |
833af842 | 429 | */ |
40064aec | 430 | static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk) |
833af842 | 431 | { |
40064aec DZF |
432 | int bits, nr_empty_pop_pages; |
433 | int rs, re; /* region start, region end */ | |
833af842 | 434 | |
40064aec DZF |
435 | /* clear metadata */ |
436 | chunk->contig_bits = 0; | |
6710e594 | 437 | |
40064aec | 438 | bits = nr_empty_pop_pages = 0; |
86b442fb | 439 | pcpu_for_each_unpop_region(chunk->alloc_map, rs, re, chunk->first_bit, |
40064aec DZF |
440 | pcpu_chunk_map_bits(chunk)) { |
441 | bits = re - rs; | |
ccea34b5 | 442 | |
40064aec | 443 | pcpu_chunk_update(chunk, rs, bits); |
833af842 | 444 | |
40064aec DZF |
445 | nr_empty_pop_pages += pcpu_cnt_pop_pages(chunk, rs, bits); |
446 | } | |
9f7dcf22 | 447 | |
40064aec DZF |
448 | /* |
449 | * Keep track of nr_empty_pop_pages. | |
450 | * | |
451 | * The chunk maintains the previous number of free pages it held, | |
452 | * so the delta is used to update the global counter. The reserved | |
453 | * chunk is not part of the free page count as they are populated | |
454 | * at init and are special to serving reserved allocations. | |
455 | */ | |
456 | if (chunk != pcpu_reserved_chunk) | |
457 | pcpu_nr_empty_pop_pages += | |
458 | (nr_empty_pop_pages - chunk->nr_empty_pop_pages); | |
a002d148 | 459 | |
40064aec DZF |
460 | chunk->nr_empty_pop_pages = nr_empty_pop_pages; |
461 | } | |
9f7dcf22 | 462 | |
ca460b3c DZF |
463 | /** |
464 | * pcpu_block_update - updates a block given a free area | |
465 | * @block: block of interest | |
466 | * @start: start offset in block | |
467 | * @end: end offset in block | |
468 | * | |
469 | * Updates a block given a known free area. The region [start, end) is | |
268625a6 DZF |
470 | * expected to be the entirety of the free area within a block. Chooses |
471 | * the best starting offset if the contig hints are equal. | |
ca460b3c DZF |
472 | */ |
473 | static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) | |
474 | { | |
475 | int contig = end - start; | |
476 | ||
477 | block->first_free = min(block->first_free, start); | |
478 | if (start == 0) | |
479 | block->left_free = contig; | |
480 | ||
481 | if (end == PCPU_BITMAP_BLOCK_BITS) | |
482 | block->right_free = contig; | |
483 | ||
484 | if (contig > block->contig_hint) { | |
485 | block->contig_hint_start = start; | |
486 | block->contig_hint = contig; | |
268625a6 DZF |
487 | } else if (block->contig_hint_start && contig == block->contig_hint && |
488 | (!start || __ffs(start) > __ffs(block->contig_hint_start))) { | |
489 | /* use the start with the best alignment */ | |
490 | block->contig_hint_start = start; | |
ca460b3c DZF |
491 | } |
492 | } | |
493 | ||
494 | /** | |
495 | * pcpu_block_refresh_hint | |
496 | * @chunk: chunk of interest | |
497 | * @index: index of the metadata block | |
498 | * | |
499 | * Scans over the block beginning at first_free and updates the block | |
500 | * metadata accordingly. | |
501 | */ | |
502 | static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) | |
503 | { | |
504 | struct pcpu_block_md *block = chunk->md_blocks + index; | |
505 | unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); | |
506 | int rs, re; /* region start, region end */ | |
507 | ||
508 | /* clear hints */ | |
509 | block->contig_hint = 0; | |
510 | block->left_free = block->right_free = 0; | |
511 | ||
512 | /* iterate over free areas and update the contig hints */ | |
513 | pcpu_for_each_unpop_region(alloc_map, rs, re, block->first_free, | |
514 | PCPU_BITMAP_BLOCK_BITS) { | |
515 | pcpu_block_update(block, rs, re); | |
516 | } | |
517 | } | |
518 | ||
519 | /** | |
520 | * pcpu_block_update_hint_alloc - update hint on allocation path | |
521 | * @chunk: chunk of interest | |
522 | * @bit_off: chunk offset | |
523 | * @bits: size of request | |
524 | */ | |
525 | static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, | |
526 | int bits) | |
527 | { | |
528 | struct pcpu_block_md *s_block, *e_block, *block; | |
529 | int s_index, e_index; /* block indexes of the freed allocation */ | |
530 | int s_off, e_off; /* block offsets of the freed allocation */ | |
531 | ||
532 | /* | |
533 | * Calculate per block offsets. | |
534 | * The calculation uses an inclusive range, but the resulting offsets | |
535 | * are [start, end). e_index always points to the last block in the | |
536 | * range. | |
537 | */ | |
538 | s_index = pcpu_off_to_block_index(bit_off); | |
539 | e_index = pcpu_off_to_block_index(bit_off + bits - 1); | |
540 | s_off = pcpu_off_to_block_off(bit_off); | |
541 | e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; | |
542 | ||
543 | s_block = chunk->md_blocks + s_index; | |
544 | e_block = chunk->md_blocks + e_index; | |
545 | ||
546 | /* | |
547 | * Update s_block. | |
548 | */ | |
549 | pcpu_block_refresh_hint(chunk, s_index); | |
550 | ||
551 | /* | |
552 | * Update e_block. | |
553 | */ | |
554 | if (s_index != e_index) { | |
555 | pcpu_block_refresh_hint(chunk, e_index); | |
556 | ||
557 | /* update in-between md_blocks */ | |
558 | for (block = s_block + 1; block < e_block; block++) { | |
559 | block->contig_hint = 0; | |
560 | block->left_free = 0; | |
561 | block->right_free = 0; | |
562 | } | |
563 | } | |
564 | ||
565 | pcpu_chunk_refresh_hint(chunk); | |
566 | } | |
567 | ||
568 | /** | |
569 | * pcpu_block_update_hint_free - updates the block hints on the free path | |
570 | * @chunk: chunk of interest | |
571 | * @bit_off: chunk offset | |
572 | * @bits: size of request | |
573 | */ | |
574 | static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, | |
575 | int bits) | |
576 | { | |
577 | struct pcpu_block_md *s_block, *e_block, *block; | |
578 | int s_index, e_index; /* block indexes of the freed allocation */ | |
579 | int s_off, e_off; /* block offsets of the freed allocation */ | |
580 | ||
581 | /* | |
582 | * Calculate per block offsets. | |
583 | * The calculation uses an inclusive range, but the resulting offsets | |
584 | * are [start, end). e_index always points to the last block in the | |
585 | * range. | |
586 | */ | |
587 | s_index = pcpu_off_to_block_index(bit_off); | |
588 | e_index = pcpu_off_to_block_index(bit_off + bits - 1); | |
589 | s_off = pcpu_off_to_block_off(bit_off); | |
590 | e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; | |
591 | ||
592 | s_block = chunk->md_blocks + s_index; | |
593 | e_block = chunk->md_blocks + e_index; | |
594 | ||
595 | /* update s_block */ | |
596 | pcpu_block_refresh_hint(chunk, s_index); | |
597 | ||
598 | /* freeing in the same block */ | |
599 | if (s_index != e_index) { | |
600 | /* update e_block */ | |
601 | pcpu_block_refresh_hint(chunk, e_index); | |
602 | ||
603 | /* reset md_blocks in the middle */ | |
604 | for (block = s_block + 1; block < e_block; block++) { | |
605 | block->first_free = 0; | |
606 | block->contig_hint_start = 0; | |
607 | block->contig_hint = PCPU_BITMAP_BLOCK_BITS; | |
608 | block->left_free = PCPU_BITMAP_BLOCK_BITS; | |
609 | block->right_free = PCPU_BITMAP_BLOCK_BITS; | |
610 | } | |
611 | } | |
612 | ||
613 | pcpu_chunk_refresh_hint(chunk); | |
614 | } | |
615 | ||
40064aec DZF |
616 | /** |
617 | * pcpu_is_populated - determines if the region is populated | |
618 | * @chunk: chunk of interest | |
619 | * @bit_off: chunk offset | |
620 | * @bits: size of area | |
621 | * @next_off: return value for the next offset to start searching | |
622 | * | |
623 | * For atomic allocations, check if the backing pages are populated. | |
624 | * | |
625 | * RETURNS: | |
626 | * Bool if the backing pages are populated. | |
627 | * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. | |
628 | */ | |
629 | static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, | |
630 | int *next_off) | |
631 | { | |
632 | int page_start, page_end, rs, re; | |
833af842 | 633 | |
40064aec DZF |
634 | page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); |
635 | page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); | |
833af842 | 636 | |
40064aec DZF |
637 | rs = page_start; |
638 | pcpu_next_unpop(chunk->populated, &rs, &re, page_end); | |
639 | if (rs >= page_end) | |
640 | return true; | |
833af842 | 641 | |
40064aec DZF |
642 | *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; |
643 | return false; | |
9f7dcf22 TH |
644 | } |
645 | ||
a16037c8 | 646 | /** |
40064aec DZF |
647 | * pcpu_find_block_fit - finds the block index to start searching |
648 | * @chunk: chunk of interest | |
649 | * @alloc_bits: size of request in allocation units | |
650 | * @align: alignment of area (max PAGE_SIZE bytes) | |
651 | * @pop_only: use populated regions only | |
652 | * | |
653 | * RETURNS: | |
654 | * The offset in the bitmap to begin searching. | |
655 | * -1 if no offset is found. | |
a16037c8 | 656 | */ |
40064aec DZF |
657 | static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, |
658 | size_t align, bool pop_only) | |
a16037c8 | 659 | { |
40064aec DZF |
660 | int bit_off, bits; |
661 | int re; /* region end */ | |
a16037c8 | 662 | |
13f96637 DZF |
663 | /* |
664 | * Check to see if the allocation can fit in the chunk's contig hint. | |
665 | * This is an optimization to prevent scanning by assuming if it | |
666 | * cannot fit in the global hint, there is memory pressure and creating | |
667 | * a new chunk would happen soon. | |
668 | */ | |
669 | bit_off = ALIGN(chunk->contig_bits_start, align) - | |
670 | chunk->contig_bits_start; | |
671 | if (bit_off + alloc_bits > chunk->contig_bits) | |
672 | return -1; | |
673 | ||
86b442fb DZF |
674 | pcpu_for_each_unpop_region(chunk->alloc_map, bit_off, re, |
675 | chunk->first_bit, | |
40064aec DZF |
676 | pcpu_chunk_map_bits(chunk)) { |
677 | bits = re - bit_off; | |
a16037c8 | 678 | |
40064aec DZF |
679 | /* check alignment */ |
680 | bits -= ALIGN(bit_off, align) - bit_off; | |
681 | bit_off = ALIGN(bit_off, align); | |
682 | if (bits < alloc_bits) | |
683 | continue; | |
a16037c8 | 684 | |
40064aec DZF |
685 | bits = alloc_bits; |
686 | if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, | |
687 | &bit_off)) | |
688 | break; | |
a16037c8 | 689 | |
40064aec | 690 | bits = 0; |
a16037c8 | 691 | } |
40064aec DZF |
692 | |
693 | if (bit_off == pcpu_chunk_map_bits(chunk)) | |
694 | return -1; | |
695 | ||
696 | return bit_off; | |
a16037c8 TH |
697 | } |
698 | ||
fbf59bc9 | 699 | /** |
40064aec | 700 | * pcpu_alloc_area - allocates an area from a pcpu_chunk |
fbf59bc9 | 701 | * @chunk: chunk of interest |
40064aec DZF |
702 | * @alloc_bits: size of request in allocation units |
703 | * @align: alignment of area (max PAGE_SIZE) | |
704 | * @start: bit_off to start searching | |
9f7dcf22 | 705 | * |
40064aec DZF |
706 | * This function takes in a @start offset to begin searching to fit an |
707 | * allocation of @alloc_bits with alignment @align. If it confirms a | |
708 | * valid free area, it then updates the allocation and boundary maps | |
709 | * accordingly. | |
ccea34b5 | 710 | * |
fbf59bc9 | 711 | * RETURNS: |
40064aec DZF |
712 | * Allocated addr offset in @chunk on success. |
713 | * -1 if no matching area is found. | |
fbf59bc9 | 714 | */ |
40064aec DZF |
715 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, |
716 | size_t align, int start) | |
fbf59bc9 | 717 | { |
40064aec DZF |
718 | size_t align_mask = (align) ? (align - 1) : 0; |
719 | int bit_off, end, oslot; | |
a16037c8 | 720 | |
40064aec | 721 | lockdep_assert_held(&pcpu_lock); |
fbf59bc9 | 722 | |
40064aec | 723 | oslot = pcpu_chunk_slot(chunk); |
fbf59bc9 | 724 | |
40064aec DZF |
725 | /* |
726 | * Search to find a fit. | |
727 | */ | |
728 | end = start + alloc_bits; | |
729 | bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start, | |
730 | alloc_bits, align_mask); | |
731 | if (bit_off >= end) | |
732 | return -1; | |
fbf59bc9 | 733 | |
40064aec DZF |
734 | /* update alloc map */ |
735 | bitmap_set(chunk->alloc_map, bit_off, alloc_bits); | |
3d331ad7 | 736 | |
40064aec DZF |
737 | /* update boundary map */ |
738 | set_bit(bit_off, chunk->bound_map); | |
739 | bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); | |
740 | set_bit(bit_off + alloc_bits, chunk->bound_map); | |
fbf59bc9 | 741 | |
40064aec | 742 | chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; |
fbf59bc9 | 743 | |
86b442fb DZF |
744 | /* update first free bit */ |
745 | if (bit_off == chunk->first_bit) | |
746 | chunk->first_bit = find_next_zero_bit( | |
747 | chunk->alloc_map, | |
748 | pcpu_chunk_map_bits(chunk), | |
749 | bit_off + alloc_bits); | |
750 | ||
ca460b3c | 751 | pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); |
fbf59bc9 | 752 | |
fbf59bc9 TH |
753 | pcpu_chunk_relocate(chunk, oslot); |
754 | ||
40064aec | 755 | return bit_off * PCPU_MIN_ALLOC_SIZE; |
fbf59bc9 TH |
756 | } |
757 | ||
758 | /** | |
40064aec | 759 | * pcpu_free_area - frees the corresponding offset |
fbf59bc9 | 760 | * @chunk: chunk of interest |
40064aec | 761 | * @off: addr offset into chunk |
ccea34b5 | 762 | * |
40064aec DZF |
763 | * This function determines the size of an allocation to free using |
764 | * the boundary bitmap and clears the allocation map. | |
fbf59bc9 | 765 | */ |
40064aec | 766 | static void pcpu_free_area(struct pcpu_chunk *chunk, int off) |
fbf59bc9 | 767 | { |
40064aec | 768 | int bit_off, bits, end, oslot; |
723ad1d9 | 769 | |
5ccd30e4 | 770 | lockdep_assert_held(&pcpu_lock); |
30a5b536 | 771 | pcpu_stats_area_dealloc(chunk); |
5ccd30e4 | 772 | |
40064aec | 773 | oslot = pcpu_chunk_slot(chunk); |
fbf59bc9 | 774 | |
40064aec | 775 | bit_off = off / PCPU_MIN_ALLOC_SIZE; |
3d331ad7 | 776 | |
40064aec DZF |
777 | /* find end index */ |
778 | end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), | |
779 | bit_off + 1); | |
780 | bits = end - bit_off; | |
781 | bitmap_clear(chunk->alloc_map, bit_off, bits); | |
fbf59bc9 | 782 | |
40064aec DZF |
783 | /* update metadata */ |
784 | chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE; | |
b539b87f | 785 | |
86b442fb DZF |
786 | /* update first free bit */ |
787 | chunk->first_bit = min(chunk->first_bit, bit_off); | |
788 | ||
ca460b3c | 789 | pcpu_block_update_hint_free(chunk, bit_off, bits); |
fbf59bc9 | 790 | |
fbf59bc9 TH |
791 | pcpu_chunk_relocate(chunk, oslot); |
792 | } | |
793 | ||
ca460b3c DZF |
794 | static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) |
795 | { | |
796 | struct pcpu_block_md *md_block; | |
797 | ||
798 | for (md_block = chunk->md_blocks; | |
799 | md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); | |
800 | md_block++) { | |
801 | md_block->contig_hint = PCPU_BITMAP_BLOCK_BITS; | |
802 | md_block->left_free = PCPU_BITMAP_BLOCK_BITS; | |
803 | md_block->right_free = PCPU_BITMAP_BLOCK_BITS; | |
804 | } | |
805 | } | |
806 | ||
40064aec DZF |
807 | /** |
808 | * pcpu_alloc_first_chunk - creates chunks that serve the first chunk | |
809 | * @tmp_addr: the start of the region served | |
810 | * @map_size: size of the region served | |
811 | * | |
812 | * This is responsible for creating the chunks that serve the first chunk. The | |
813 | * base_addr is page aligned down of @tmp_addr while the region end is page | |
814 | * aligned up. Offsets are kept track of to determine the region served. All | |
815 | * this is done to appease the bitmap allocator in avoiding partial blocks. | |
816 | * | |
817 | * RETURNS: | |
818 | * Chunk serving the region at @tmp_addr of @map_size. | |
819 | */ | |
c0ebfdc3 | 820 | static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, |
40064aec | 821 | int map_size) |
10edf5b0 DZF |
822 | { |
823 | struct pcpu_chunk *chunk; | |
ca460b3c | 824 | unsigned long aligned_addr, lcm_align; |
40064aec | 825 | int start_offset, offset_bits, region_size, region_bits; |
c0ebfdc3 DZF |
826 | |
827 | /* region calculations */ | |
828 | aligned_addr = tmp_addr & PAGE_MASK; | |
829 | ||
830 | start_offset = tmp_addr - aligned_addr; | |
6b9d7c8e | 831 | |
ca460b3c DZF |
832 | /* |
833 | * Align the end of the region with the LCM of PAGE_SIZE and | |
834 | * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of | |
835 | * the other. | |
836 | */ | |
837 | lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); | |
838 | region_size = ALIGN(start_offset + map_size, lcm_align); | |
10edf5b0 | 839 | |
c0ebfdc3 | 840 | /* allocate chunk */ |
8ab16c43 DZF |
841 | chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) + |
842 | BITS_TO_LONGS(region_size >> PAGE_SHIFT), | |
843 | 0); | |
c0ebfdc3 | 844 | |
10edf5b0 | 845 | INIT_LIST_HEAD(&chunk->list); |
c0ebfdc3 DZF |
846 | |
847 | chunk->base_addr = (void *)aligned_addr; | |
10edf5b0 | 848 | chunk->start_offset = start_offset; |
6b9d7c8e | 849 | chunk->end_offset = region_size - chunk->start_offset - map_size; |
c0ebfdc3 | 850 | |
8ab16c43 | 851 | chunk->nr_pages = region_size >> PAGE_SHIFT; |
40064aec | 852 | region_bits = pcpu_chunk_map_bits(chunk); |
c0ebfdc3 | 853 | |
ca460b3c DZF |
854 | chunk->alloc_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits) * |
855 | sizeof(chunk->alloc_map[0]), 0); | |
856 | chunk->bound_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits + 1) * | |
857 | sizeof(chunk->bound_map[0]), 0); | |
858 | chunk->md_blocks = memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk) * | |
859 | sizeof(chunk->md_blocks[0]), 0); | |
860 | pcpu_init_md_blocks(chunk); | |
10edf5b0 DZF |
861 | |
862 | /* manage populated page bitmap */ | |
863 | chunk->immutable = true; | |
8ab16c43 DZF |
864 | bitmap_fill(chunk->populated, chunk->nr_pages); |
865 | chunk->nr_populated = chunk->nr_pages; | |
40064aec DZF |
866 | chunk->nr_empty_pop_pages = |
867 | pcpu_cnt_pop_pages(chunk, start_offset / PCPU_MIN_ALLOC_SIZE, | |
868 | map_size / PCPU_MIN_ALLOC_SIZE); | |
10edf5b0 | 869 | |
40064aec DZF |
870 | chunk->contig_bits = map_size / PCPU_MIN_ALLOC_SIZE; |
871 | chunk->free_bytes = map_size; | |
c0ebfdc3 DZF |
872 | |
873 | if (chunk->start_offset) { | |
874 | /* hide the beginning of the bitmap */ | |
40064aec DZF |
875 | offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; |
876 | bitmap_set(chunk->alloc_map, 0, offset_bits); | |
877 | set_bit(0, chunk->bound_map); | |
878 | set_bit(offset_bits, chunk->bound_map); | |
ca460b3c | 879 | |
86b442fb DZF |
880 | chunk->first_bit = offset_bits; |
881 | ||
ca460b3c | 882 | pcpu_block_update_hint_alloc(chunk, 0, offset_bits); |
c0ebfdc3 DZF |
883 | } |
884 | ||
6b9d7c8e DZF |
885 | if (chunk->end_offset) { |
886 | /* hide the end of the bitmap */ | |
40064aec DZF |
887 | offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; |
888 | bitmap_set(chunk->alloc_map, | |
889 | pcpu_chunk_map_bits(chunk) - offset_bits, | |
890 | offset_bits); | |
891 | set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, | |
892 | chunk->bound_map); | |
893 | set_bit(region_bits, chunk->bound_map); | |
6b9d7c8e | 894 | |
ca460b3c DZF |
895 | pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) |
896 | - offset_bits, offset_bits); | |
897 | } | |
40064aec | 898 | |
10edf5b0 DZF |
899 | return chunk; |
900 | } | |
901 | ||
6081089f TH |
902 | static struct pcpu_chunk *pcpu_alloc_chunk(void) |
903 | { | |
904 | struct pcpu_chunk *chunk; | |
40064aec | 905 | int region_bits; |
6081089f | 906 | |
90459ce0 | 907 | chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); |
6081089f TH |
908 | if (!chunk) |
909 | return NULL; | |
910 | ||
40064aec DZF |
911 | INIT_LIST_HEAD(&chunk->list); |
912 | chunk->nr_pages = pcpu_unit_pages; | |
913 | region_bits = pcpu_chunk_map_bits(chunk); | |
6081089f | 914 | |
40064aec DZF |
915 | chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * |
916 | sizeof(chunk->alloc_map[0])); | |
917 | if (!chunk->alloc_map) | |
918 | goto alloc_map_fail; | |
6081089f | 919 | |
40064aec DZF |
920 | chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * |
921 | sizeof(chunk->bound_map[0])); | |
922 | if (!chunk->bound_map) | |
923 | goto bound_map_fail; | |
6081089f | 924 | |
ca460b3c DZF |
925 | chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * |
926 | sizeof(chunk->md_blocks[0])); | |
927 | if (!chunk->md_blocks) | |
928 | goto md_blocks_fail; | |
929 | ||
930 | pcpu_init_md_blocks(chunk); | |
931 | ||
40064aec DZF |
932 | /* init metadata */ |
933 | chunk->contig_bits = region_bits; | |
934 | chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; | |
c0ebfdc3 | 935 | |
6081089f | 936 | return chunk; |
40064aec | 937 | |
ca460b3c DZF |
938 | md_blocks_fail: |
939 | pcpu_mem_free(chunk->bound_map); | |
40064aec DZF |
940 | bound_map_fail: |
941 | pcpu_mem_free(chunk->alloc_map); | |
942 | alloc_map_fail: | |
943 | pcpu_mem_free(chunk); | |
944 | ||
945 | return NULL; | |
6081089f TH |
946 | } |
947 | ||
948 | static void pcpu_free_chunk(struct pcpu_chunk *chunk) | |
949 | { | |
950 | if (!chunk) | |
951 | return; | |
40064aec DZF |
952 | pcpu_mem_free(chunk->bound_map); |
953 | pcpu_mem_free(chunk->alloc_map); | |
1d5cfdb0 | 954 | pcpu_mem_free(chunk); |
6081089f TH |
955 | } |
956 | ||
b539b87f TH |
957 | /** |
958 | * pcpu_chunk_populated - post-population bookkeeping | |
959 | * @chunk: pcpu_chunk which got populated | |
960 | * @page_start: the start page | |
961 | * @page_end: the end page | |
40064aec | 962 | * @for_alloc: if this is to populate for allocation |
b539b87f TH |
963 | * |
964 | * Pages in [@page_start,@page_end) have been populated to @chunk. Update | |
965 | * the bookkeeping information accordingly. Must be called after each | |
966 | * successful population. | |
40064aec DZF |
967 | * |
968 | * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it | |
969 | * is to serve an allocation in that area. | |
b539b87f | 970 | */ |
40064aec DZF |
971 | static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, |
972 | int page_end, bool for_alloc) | |
b539b87f TH |
973 | { |
974 | int nr = page_end - page_start; | |
975 | ||
976 | lockdep_assert_held(&pcpu_lock); | |
977 | ||
978 | bitmap_set(chunk->populated, page_start, nr); | |
979 | chunk->nr_populated += nr; | |
40064aec DZF |
980 | |
981 | if (!for_alloc) { | |
982 | chunk->nr_empty_pop_pages += nr; | |
983 | pcpu_nr_empty_pop_pages += nr; | |
984 | } | |
b539b87f TH |
985 | } |
986 | ||
987 | /** | |
988 | * pcpu_chunk_depopulated - post-depopulation bookkeeping | |
989 | * @chunk: pcpu_chunk which got depopulated | |
990 | * @page_start: the start page | |
991 | * @page_end: the end page | |
992 | * | |
993 | * Pages in [@page_start,@page_end) have been depopulated from @chunk. | |
994 | * Update the bookkeeping information accordingly. Must be called after | |
995 | * each successful depopulation. | |
996 | */ | |
997 | static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, | |
998 | int page_start, int page_end) | |
999 | { | |
1000 | int nr = page_end - page_start; | |
1001 | ||
1002 | lockdep_assert_held(&pcpu_lock); | |
1003 | ||
1004 | bitmap_clear(chunk->populated, page_start, nr); | |
1005 | chunk->nr_populated -= nr; | |
0cecf50c | 1006 | chunk->nr_empty_pop_pages -= nr; |
b539b87f TH |
1007 | pcpu_nr_empty_pop_pages -= nr; |
1008 | } | |
1009 | ||
9f645532 TH |
1010 | /* |
1011 | * Chunk management implementation. | |
1012 | * | |
1013 | * To allow different implementations, chunk alloc/free and | |
1014 | * [de]population are implemented in a separate file which is pulled | |
1015 | * into this file and compiled together. The following functions | |
1016 | * should be implemented. | |
1017 | * | |
1018 | * pcpu_populate_chunk - populate the specified range of a chunk | |
1019 | * pcpu_depopulate_chunk - depopulate the specified range of a chunk | |
1020 | * pcpu_create_chunk - create a new chunk | |
1021 | * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop | |
1022 | * pcpu_addr_to_page - translate address to physical address | |
1023 | * pcpu_verify_alloc_info - check alloc_info is acceptable during init | |
fbf59bc9 | 1024 | */ |
9f645532 TH |
1025 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); |
1026 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); | |
1027 | static struct pcpu_chunk *pcpu_create_chunk(void); | |
1028 | static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); | |
1029 | static struct page *pcpu_addr_to_page(void *addr); | |
1030 | static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); | |
fbf59bc9 | 1031 | |
b0c9778b TH |
1032 | #ifdef CONFIG_NEED_PER_CPU_KM |
1033 | #include "percpu-km.c" | |
1034 | #else | |
9f645532 | 1035 | #include "percpu-vm.c" |
b0c9778b | 1036 | #endif |
fbf59bc9 | 1037 | |
88999a89 TH |
1038 | /** |
1039 | * pcpu_chunk_addr_search - determine chunk containing specified address | |
1040 | * @addr: address for which the chunk needs to be determined. | |
1041 | * | |
c0ebfdc3 DZF |
1042 | * This is an internal function that handles all but static allocations. |
1043 | * Static percpu address values should never be passed into the allocator. | |
1044 | * | |
88999a89 TH |
1045 | * RETURNS: |
1046 | * The address of the found chunk. | |
1047 | */ | |
1048 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |
1049 | { | |
c0ebfdc3 | 1050 | /* is it in the dynamic region (first chunk)? */ |
560f2c23 | 1051 | if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) |
88999a89 | 1052 | return pcpu_first_chunk; |
c0ebfdc3 DZF |
1053 | |
1054 | /* is it in the reserved region? */ | |
560f2c23 | 1055 | if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) |
c0ebfdc3 | 1056 | return pcpu_reserved_chunk; |
88999a89 TH |
1057 | |
1058 | /* | |
1059 | * The address is relative to unit0 which might be unused and | |
1060 | * thus unmapped. Offset the address to the unit space of the | |
1061 | * current processor before looking it up in the vmalloc | |
1062 | * space. Note that any possible cpu id can be used here, so | |
1063 | * there's no need to worry about preemption or cpu hotplug. | |
1064 | */ | |
1065 | addr += pcpu_unit_offsets[raw_smp_processor_id()]; | |
9f645532 | 1066 | return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); |
88999a89 TH |
1067 | } |
1068 | ||
fbf59bc9 | 1069 | /** |
edcb4639 | 1070 | * pcpu_alloc - the percpu allocator |
cae3aeb8 | 1071 | * @size: size of area to allocate in bytes |
fbf59bc9 | 1072 | * @align: alignment of area (max PAGE_SIZE) |
edcb4639 | 1073 | * @reserved: allocate from the reserved chunk if available |
5835d96e | 1074 | * @gfp: allocation flags |
fbf59bc9 | 1075 | * |
5835d96e TH |
1076 | * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't |
1077 | * contain %GFP_KERNEL, the allocation is atomic. | |
fbf59bc9 TH |
1078 | * |
1079 | * RETURNS: | |
1080 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1081 | */ | |
5835d96e TH |
1082 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, |
1083 | gfp_t gfp) | |
fbf59bc9 | 1084 | { |
f2badb0c | 1085 | static int warn_limit = 10; |
fbf59bc9 | 1086 | struct pcpu_chunk *chunk; |
f2badb0c | 1087 | const char *err; |
6ae833c7 | 1088 | bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; |
40064aec | 1089 | int slot, off, cpu, ret; |
403a91b1 | 1090 | unsigned long flags; |
f528f0b8 | 1091 | void __percpu *ptr; |
40064aec | 1092 | size_t bits, bit_align; |
fbf59bc9 | 1093 | |
723ad1d9 | 1094 | /* |
40064aec DZF |
1095 | * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, |
1096 | * therefore alignment must be a minimum of that many bytes. | |
1097 | * An allocation may have internal fragmentation from rounding up | |
1098 | * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. | |
723ad1d9 | 1099 | */ |
d2f3c384 DZF |
1100 | if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) |
1101 | align = PCPU_MIN_ALLOC_SIZE; | |
723ad1d9 | 1102 | |
d2f3c384 | 1103 | size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); |
40064aec DZF |
1104 | bits = size >> PCPU_MIN_ALLOC_SHIFT; |
1105 | bit_align = align >> PCPU_MIN_ALLOC_SHIFT; | |
2f69fa82 | 1106 | |
3ca45a46 | 1107 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || |
1108 | !is_power_of_2(align))) { | |
756a025f JP |
1109 | WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", |
1110 | size, align); | |
fbf59bc9 TH |
1111 | return NULL; |
1112 | } | |
1113 | ||
6710e594 TH |
1114 | if (!is_atomic) |
1115 | mutex_lock(&pcpu_alloc_mutex); | |
1116 | ||
403a91b1 | 1117 | spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9 | 1118 | |
edcb4639 TH |
1119 | /* serve reserved allocations from the reserved chunk if available */ |
1120 | if (reserved && pcpu_reserved_chunk) { | |
1121 | chunk = pcpu_reserved_chunk; | |
833af842 | 1122 | |
40064aec DZF |
1123 | off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); |
1124 | if (off < 0) { | |
833af842 | 1125 | err = "alloc from reserved chunk failed"; |
ccea34b5 | 1126 | goto fail_unlock; |
f2badb0c | 1127 | } |
833af842 | 1128 | |
40064aec | 1129 | off = pcpu_alloc_area(chunk, bits, bit_align, off); |
edcb4639 TH |
1130 | if (off >= 0) |
1131 | goto area_found; | |
833af842 | 1132 | |
f2badb0c | 1133 | err = "alloc from reserved chunk failed"; |
ccea34b5 | 1134 | goto fail_unlock; |
edcb4639 TH |
1135 | } |
1136 | ||
ccea34b5 | 1137 | restart: |
edcb4639 | 1138 | /* search through normal chunks */ |
fbf59bc9 TH |
1139 | for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { |
1140 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { | |
40064aec DZF |
1141 | off = pcpu_find_block_fit(chunk, bits, bit_align, |
1142 | is_atomic); | |
1143 | if (off < 0) | |
fbf59bc9 | 1144 | continue; |
ccea34b5 | 1145 | |
40064aec | 1146 | off = pcpu_alloc_area(chunk, bits, bit_align, off); |
fbf59bc9 TH |
1147 | if (off >= 0) |
1148 | goto area_found; | |
40064aec | 1149 | |
fbf59bc9 TH |
1150 | } |
1151 | } | |
1152 | ||
403a91b1 | 1153 | spin_unlock_irqrestore(&pcpu_lock, flags); |
ccea34b5 | 1154 | |
b38d08f3 TH |
1155 | /* |
1156 | * No space left. Create a new chunk. We don't want multiple | |
1157 | * tasks to create chunks simultaneously. Serialize and create iff | |
1158 | * there's still no empty chunk after grabbing the mutex. | |
1159 | */ | |
11df02bf DZ |
1160 | if (is_atomic) { |
1161 | err = "atomic alloc failed, no space left"; | |
5835d96e | 1162 | goto fail; |
11df02bf | 1163 | } |
5835d96e | 1164 | |
b38d08f3 TH |
1165 | if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { |
1166 | chunk = pcpu_create_chunk(); | |
1167 | if (!chunk) { | |
1168 | err = "failed to allocate new chunk"; | |
1169 | goto fail; | |
1170 | } | |
1171 | ||
1172 | spin_lock_irqsave(&pcpu_lock, flags); | |
1173 | pcpu_chunk_relocate(chunk, -1); | |
1174 | } else { | |
1175 | spin_lock_irqsave(&pcpu_lock, flags); | |
f2badb0c | 1176 | } |
ccea34b5 | 1177 | |
ccea34b5 | 1178 | goto restart; |
fbf59bc9 TH |
1179 | |
1180 | area_found: | |
30a5b536 | 1181 | pcpu_stats_area_alloc(chunk, size); |
403a91b1 | 1182 | spin_unlock_irqrestore(&pcpu_lock, flags); |
ccea34b5 | 1183 | |
dca49645 | 1184 | /* populate if not all pages are already there */ |
5835d96e | 1185 | if (!is_atomic) { |
e04d3208 | 1186 | int page_start, page_end, rs, re; |
dca49645 | 1187 | |
e04d3208 TH |
1188 | page_start = PFN_DOWN(off); |
1189 | page_end = PFN_UP(off + size); | |
b38d08f3 | 1190 | |
91e914c5 DZF |
1191 | pcpu_for_each_unpop_region(chunk->populated, rs, re, |
1192 | page_start, page_end) { | |
e04d3208 TH |
1193 | WARN_ON(chunk->immutable); |
1194 | ||
1195 | ret = pcpu_populate_chunk(chunk, rs, re); | |
1196 | ||
1197 | spin_lock_irqsave(&pcpu_lock, flags); | |
1198 | if (ret) { | |
40064aec | 1199 | pcpu_free_area(chunk, off); |
e04d3208 TH |
1200 | err = "failed to populate"; |
1201 | goto fail_unlock; | |
1202 | } | |
40064aec | 1203 | pcpu_chunk_populated(chunk, rs, re, true); |
e04d3208 | 1204 | spin_unlock_irqrestore(&pcpu_lock, flags); |
dca49645 | 1205 | } |
fbf59bc9 | 1206 | |
e04d3208 TH |
1207 | mutex_unlock(&pcpu_alloc_mutex); |
1208 | } | |
ccea34b5 | 1209 | |
1a4d7607 TH |
1210 | if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) |
1211 | pcpu_schedule_balance_work(); | |
1212 | ||
dca49645 TH |
1213 | /* clear the areas and return address relative to base address */ |
1214 | for_each_possible_cpu(cpu) | |
1215 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); | |
1216 | ||
f528f0b8 | 1217 | ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); |
8a8c35fa | 1218 | kmemleak_alloc_percpu(ptr, size, gfp); |
df95e795 DZ |
1219 | |
1220 | trace_percpu_alloc_percpu(reserved, is_atomic, size, align, | |
1221 | chunk->base_addr, off, ptr); | |
1222 | ||
f528f0b8 | 1223 | return ptr; |
ccea34b5 TH |
1224 | |
1225 | fail_unlock: | |
403a91b1 | 1226 | spin_unlock_irqrestore(&pcpu_lock, flags); |
b38d08f3 | 1227 | fail: |
df95e795 DZ |
1228 | trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); |
1229 | ||
5835d96e | 1230 | if (!is_atomic && warn_limit) { |
870d4b12 | 1231 | pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", |
598d8091 | 1232 | size, align, is_atomic, err); |
f2badb0c TH |
1233 | dump_stack(); |
1234 | if (!--warn_limit) | |
870d4b12 | 1235 | pr_info("limit reached, disable warning\n"); |
f2badb0c | 1236 | } |
1a4d7607 TH |
1237 | if (is_atomic) { |
1238 | /* see the flag handling in pcpu_blance_workfn() */ | |
1239 | pcpu_atomic_alloc_failed = true; | |
1240 | pcpu_schedule_balance_work(); | |
6710e594 TH |
1241 | } else { |
1242 | mutex_unlock(&pcpu_alloc_mutex); | |
1a4d7607 | 1243 | } |
ccea34b5 | 1244 | return NULL; |
fbf59bc9 | 1245 | } |
edcb4639 TH |
1246 | |
1247 | /** | |
5835d96e | 1248 | * __alloc_percpu_gfp - allocate dynamic percpu area |
edcb4639 TH |
1249 | * @size: size of area to allocate in bytes |
1250 | * @align: alignment of area (max PAGE_SIZE) | |
5835d96e | 1251 | * @gfp: allocation flags |
edcb4639 | 1252 | * |
5835d96e TH |
1253 | * Allocate zero-filled percpu area of @size bytes aligned at @align. If |
1254 | * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can | |
1255 | * be called from any context but is a lot more likely to fail. | |
ccea34b5 | 1256 | * |
edcb4639 TH |
1257 | * RETURNS: |
1258 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1259 | */ | |
5835d96e TH |
1260 | void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) |
1261 | { | |
1262 | return pcpu_alloc(size, align, false, gfp); | |
1263 | } | |
1264 | EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); | |
1265 | ||
1266 | /** | |
1267 | * __alloc_percpu - allocate dynamic percpu area | |
1268 | * @size: size of area to allocate in bytes | |
1269 | * @align: alignment of area (max PAGE_SIZE) | |
1270 | * | |
1271 | * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). | |
1272 | */ | |
43cf38eb | 1273 | void __percpu *__alloc_percpu(size_t size, size_t align) |
edcb4639 | 1274 | { |
5835d96e | 1275 | return pcpu_alloc(size, align, false, GFP_KERNEL); |
edcb4639 | 1276 | } |
fbf59bc9 TH |
1277 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
1278 | ||
edcb4639 TH |
1279 | /** |
1280 | * __alloc_reserved_percpu - allocate reserved percpu area | |
1281 | * @size: size of area to allocate in bytes | |
1282 | * @align: alignment of area (max PAGE_SIZE) | |
1283 | * | |
9329ba97 TH |
1284 | * Allocate zero-filled percpu area of @size bytes aligned at @align |
1285 | * from reserved percpu area if arch has set it up; otherwise, | |
1286 | * allocation is served from the same dynamic area. Might sleep. | |
1287 | * Might trigger writeouts. | |
edcb4639 | 1288 | * |
ccea34b5 TH |
1289 | * CONTEXT: |
1290 | * Does GFP_KERNEL allocation. | |
1291 | * | |
edcb4639 TH |
1292 | * RETURNS: |
1293 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1294 | */ | |
43cf38eb | 1295 | void __percpu *__alloc_reserved_percpu(size_t size, size_t align) |
edcb4639 | 1296 | { |
5835d96e | 1297 | return pcpu_alloc(size, align, true, GFP_KERNEL); |
edcb4639 TH |
1298 | } |
1299 | ||
a56dbddf | 1300 | /** |
1a4d7607 | 1301 | * pcpu_balance_workfn - manage the amount of free chunks and populated pages |
a56dbddf TH |
1302 | * @work: unused |
1303 | * | |
1304 | * Reclaim all fully free chunks except for the first one. | |
1305 | */ | |
fe6bd8c3 | 1306 | static void pcpu_balance_workfn(struct work_struct *work) |
fbf59bc9 | 1307 | { |
fe6bd8c3 TH |
1308 | LIST_HEAD(to_free); |
1309 | struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; | |
a56dbddf | 1310 | struct pcpu_chunk *chunk, *next; |
1a4d7607 | 1311 | int slot, nr_to_pop, ret; |
a56dbddf | 1312 | |
1a4d7607 TH |
1313 | /* |
1314 | * There's no reason to keep around multiple unused chunks and VM | |
1315 | * areas can be scarce. Destroy all free chunks except for one. | |
1316 | */ | |
ccea34b5 TH |
1317 | mutex_lock(&pcpu_alloc_mutex); |
1318 | spin_lock_irq(&pcpu_lock); | |
a56dbddf | 1319 | |
fe6bd8c3 | 1320 | list_for_each_entry_safe(chunk, next, free_head, list) { |
a56dbddf TH |
1321 | WARN_ON(chunk->immutable); |
1322 | ||
1323 | /* spare the first one */ | |
fe6bd8c3 | 1324 | if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) |
a56dbddf TH |
1325 | continue; |
1326 | ||
fe6bd8c3 | 1327 | list_move(&chunk->list, &to_free); |
a56dbddf TH |
1328 | } |
1329 | ||
ccea34b5 | 1330 | spin_unlock_irq(&pcpu_lock); |
a56dbddf | 1331 | |
fe6bd8c3 | 1332 | list_for_each_entry_safe(chunk, next, &to_free, list) { |
a93ace48 | 1333 | int rs, re; |
dca49645 | 1334 | |
91e914c5 DZF |
1335 | pcpu_for_each_pop_region(chunk->populated, rs, re, 0, |
1336 | chunk->nr_pages) { | |
a93ace48 | 1337 | pcpu_depopulate_chunk(chunk, rs, re); |
b539b87f TH |
1338 | spin_lock_irq(&pcpu_lock); |
1339 | pcpu_chunk_depopulated(chunk, rs, re); | |
1340 | spin_unlock_irq(&pcpu_lock); | |
a93ace48 | 1341 | } |
6081089f | 1342 | pcpu_destroy_chunk(chunk); |
a56dbddf | 1343 | } |
971f3918 | 1344 | |
1a4d7607 TH |
1345 | /* |
1346 | * Ensure there are certain number of free populated pages for | |
1347 | * atomic allocs. Fill up from the most packed so that atomic | |
1348 | * allocs don't increase fragmentation. If atomic allocation | |
1349 | * failed previously, always populate the maximum amount. This | |
1350 | * should prevent atomic allocs larger than PAGE_SIZE from keeping | |
1351 | * failing indefinitely; however, large atomic allocs are not | |
1352 | * something we support properly and can be highly unreliable and | |
1353 | * inefficient. | |
1354 | */ | |
1355 | retry_pop: | |
1356 | if (pcpu_atomic_alloc_failed) { | |
1357 | nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; | |
1358 | /* best effort anyway, don't worry about synchronization */ | |
1359 | pcpu_atomic_alloc_failed = false; | |
1360 | } else { | |
1361 | nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - | |
1362 | pcpu_nr_empty_pop_pages, | |
1363 | 0, PCPU_EMPTY_POP_PAGES_HIGH); | |
1364 | } | |
1365 | ||
1366 | for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { | |
1367 | int nr_unpop = 0, rs, re; | |
1368 | ||
1369 | if (!nr_to_pop) | |
1370 | break; | |
1371 | ||
1372 | spin_lock_irq(&pcpu_lock); | |
1373 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { | |
8ab16c43 | 1374 | nr_unpop = chunk->nr_pages - chunk->nr_populated; |
1a4d7607 TH |
1375 | if (nr_unpop) |
1376 | break; | |
1377 | } | |
1378 | spin_unlock_irq(&pcpu_lock); | |
1379 | ||
1380 | if (!nr_unpop) | |
1381 | continue; | |
1382 | ||
1383 | /* @chunk can't go away while pcpu_alloc_mutex is held */ | |
91e914c5 DZF |
1384 | pcpu_for_each_unpop_region(chunk->populated, rs, re, 0, |
1385 | chunk->nr_pages) { | |
1a4d7607 TH |
1386 | int nr = min(re - rs, nr_to_pop); |
1387 | ||
1388 | ret = pcpu_populate_chunk(chunk, rs, rs + nr); | |
1389 | if (!ret) { | |
1390 | nr_to_pop -= nr; | |
1391 | spin_lock_irq(&pcpu_lock); | |
40064aec | 1392 | pcpu_chunk_populated(chunk, rs, rs + nr, false); |
1a4d7607 TH |
1393 | spin_unlock_irq(&pcpu_lock); |
1394 | } else { | |
1395 | nr_to_pop = 0; | |
1396 | } | |
1397 | ||
1398 | if (!nr_to_pop) | |
1399 | break; | |
1400 | } | |
1401 | } | |
1402 | ||
1403 | if (nr_to_pop) { | |
1404 | /* ran out of chunks to populate, create a new one and retry */ | |
1405 | chunk = pcpu_create_chunk(); | |
1406 | if (chunk) { | |
1407 | spin_lock_irq(&pcpu_lock); | |
1408 | pcpu_chunk_relocate(chunk, -1); | |
1409 | spin_unlock_irq(&pcpu_lock); | |
1410 | goto retry_pop; | |
1411 | } | |
1412 | } | |
1413 | ||
971f3918 | 1414 | mutex_unlock(&pcpu_alloc_mutex); |
fbf59bc9 TH |
1415 | } |
1416 | ||
1417 | /** | |
1418 | * free_percpu - free percpu area | |
1419 | * @ptr: pointer to area to free | |
1420 | * | |
ccea34b5 TH |
1421 | * Free percpu area @ptr. |
1422 | * | |
1423 | * CONTEXT: | |
1424 | * Can be called from atomic context. | |
fbf59bc9 | 1425 | */ |
43cf38eb | 1426 | void free_percpu(void __percpu *ptr) |
fbf59bc9 | 1427 | { |
129182e5 | 1428 | void *addr; |
fbf59bc9 | 1429 | struct pcpu_chunk *chunk; |
ccea34b5 | 1430 | unsigned long flags; |
40064aec | 1431 | int off; |
fbf59bc9 TH |
1432 | |
1433 | if (!ptr) | |
1434 | return; | |
1435 | ||
f528f0b8 CM |
1436 | kmemleak_free_percpu(ptr); |
1437 | ||
129182e5 AM |
1438 | addr = __pcpu_ptr_to_addr(ptr); |
1439 | ||
ccea34b5 | 1440 | spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9 TH |
1441 | |
1442 | chunk = pcpu_chunk_addr_search(addr); | |
bba174f5 | 1443 | off = addr - chunk->base_addr; |
fbf59bc9 | 1444 | |
40064aec | 1445 | pcpu_free_area(chunk, off); |
fbf59bc9 | 1446 | |
a56dbddf | 1447 | /* if there are more than one fully free chunks, wake up grim reaper */ |
40064aec | 1448 | if (chunk->free_bytes == pcpu_unit_size) { |
fbf59bc9 TH |
1449 | struct pcpu_chunk *pos; |
1450 | ||
a56dbddf | 1451 | list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) |
fbf59bc9 | 1452 | if (pos != chunk) { |
1a4d7607 | 1453 | pcpu_schedule_balance_work(); |
fbf59bc9 TH |
1454 | break; |
1455 | } | |
1456 | } | |
1457 | ||
df95e795 DZ |
1458 | trace_percpu_free_percpu(chunk->base_addr, off, ptr); |
1459 | ||
ccea34b5 | 1460 | spin_unlock_irqrestore(&pcpu_lock, flags); |
fbf59bc9 TH |
1461 | } |
1462 | EXPORT_SYMBOL_GPL(free_percpu); | |
1463 | ||
383776fa | 1464 | bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) |
10fad5e4 | 1465 | { |
bbddff05 | 1466 | #ifdef CONFIG_SMP |
10fad5e4 TH |
1467 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
1468 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); | |
1469 | unsigned int cpu; | |
1470 | ||
1471 | for_each_possible_cpu(cpu) { | |
1472 | void *start = per_cpu_ptr(base, cpu); | |
383776fa | 1473 | void *va = (void *)addr; |
10fad5e4 | 1474 | |
383776fa | 1475 | if (va >= start && va < start + static_size) { |
8ce371f9 | 1476 | if (can_addr) { |
383776fa | 1477 | *can_addr = (unsigned long) (va - start); |
8ce371f9 PZ |
1478 | *can_addr += (unsigned long) |
1479 | per_cpu_ptr(base, get_boot_cpu_id()); | |
1480 | } | |
10fad5e4 | 1481 | return true; |
383776fa TG |
1482 | } |
1483 | } | |
bbddff05 TH |
1484 | #endif |
1485 | /* on UP, can't distinguish from other static vars, always false */ | |
10fad5e4 TH |
1486 | return false; |
1487 | } | |
1488 | ||
383776fa TG |
1489 | /** |
1490 | * is_kernel_percpu_address - test whether address is from static percpu area | |
1491 | * @addr: address to test | |
1492 | * | |
1493 | * Test whether @addr belongs to in-kernel static percpu area. Module | |
1494 | * static percpu areas are not considered. For those, use | |
1495 | * is_module_percpu_address(). | |
1496 | * | |
1497 | * RETURNS: | |
1498 | * %true if @addr is from in-kernel static percpu area, %false otherwise. | |
1499 | */ | |
1500 | bool is_kernel_percpu_address(unsigned long addr) | |
1501 | { | |
1502 | return __is_kernel_percpu_address(addr, NULL); | |
1503 | } | |
1504 | ||
3b034b0d VG |
1505 | /** |
1506 | * per_cpu_ptr_to_phys - convert translated percpu address to physical address | |
1507 | * @addr: the address to be converted to physical address | |
1508 | * | |
1509 | * Given @addr which is dereferenceable address obtained via one of | |
1510 | * percpu access macros, this function translates it into its physical | |
1511 | * address. The caller is responsible for ensuring @addr stays valid | |
1512 | * until this function finishes. | |
1513 | * | |
67589c71 DY |
1514 | * percpu allocator has special setup for the first chunk, which currently |
1515 | * supports either embedding in linear address space or vmalloc mapping, | |
1516 | * and, from the second one, the backing allocator (currently either vm or | |
1517 | * km) provides translation. | |
1518 | * | |
bffc4375 | 1519 | * The addr can be translated simply without checking if it falls into the |
67589c71 DY |
1520 | * first chunk. But the current code reflects better how percpu allocator |
1521 | * actually works, and the verification can discover both bugs in percpu | |
1522 | * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current | |
1523 | * code. | |
1524 | * | |
3b034b0d VG |
1525 | * RETURNS: |
1526 | * The physical address for @addr. | |
1527 | */ | |
1528 | phys_addr_t per_cpu_ptr_to_phys(void *addr) | |
1529 | { | |
9983b6f0 TH |
1530 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
1531 | bool in_first_chunk = false; | |
a855b84c | 1532 | unsigned long first_low, first_high; |
9983b6f0 TH |
1533 | unsigned int cpu; |
1534 | ||
1535 | /* | |
a855b84c | 1536 | * The following test on unit_low/high isn't strictly |
9983b6f0 TH |
1537 | * necessary but will speed up lookups of addresses which |
1538 | * aren't in the first chunk. | |
c0ebfdc3 DZF |
1539 | * |
1540 | * The address check is against full chunk sizes. pcpu_base_addr | |
1541 | * points to the beginning of the first chunk including the | |
1542 | * static region. Assumes good intent as the first chunk may | |
1543 | * not be full (ie. < pcpu_unit_pages in size). | |
9983b6f0 | 1544 | */ |
c0ebfdc3 DZF |
1545 | first_low = (unsigned long)pcpu_base_addr + |
1546 | pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); | |
1547 | first_high = (unsigned long)pcpu_base_addr + | |
1548 | pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); | |
a855b84c TH |
1549 | if ((unsigned long)addr >= first_low && |
1550 | (unsigned long)addr < first_high) { | |
9983b6f0 TH |
1551 | for_each_possible_cpu(cpu) { |
1552 | void *start = per_cpu_ptr(base, cpu); | |
1553 | ||
1554 | if (addr >= start && addr < start + pcpu_unit_size) { | |
1555 | in_first_chunk = true; | |
1556 | break; | |
1557 | } | |
1558 | } | |
1559 | } | |
1560 | ||
1561 | if (in_first_chunk) { | |
eac522ef | 1562 | if (!is_vmalloc_addr(addr)) |
020ec653 TH |
1563 | return __pa(addr); |
1564 | else | |
9f57bd4d ES |
1565 | return page_to_phys(vmalloc_to_page(addr)) + |
1566 | offset_in_page(addr); | |
020ec653 | 1567 | } else |
9f57bd4d ES |
1568 | return page_to_phys(pcpu_addr_to_page(addr)) + |
1569 | offset_in_page(addr); | |
3b034b0d VG |
1570 | } |
1571 | ||
fbf59bc9 | 1572 | /** |
fd1e8a1f TH |
1573 | * pcpu_alloc_alloc_info - allocate percpu allocation info |
1574 | * @nr_groups: the number of groups | |
1575 | * @nr_units: the number of units | |
1576 | * | |
1577 | * Allocate ai which is large enough for @nr_groups groups containing | |
1578 | * @nr_units units. The returned ai's groups[0].cpu_map points to the | |
1579 | * cpu_map array which is long enough for @nr_units and filled with | |
1580 | * NR_CPUS. It's the caller's responsibility to initialize cpu_map | |
1581 | * pointer of other groups. | |
1582 | * | |
1583 | * RETURNS: | |
1584 | * Pointer to the allocated pcpu_alloc_info on success, NULL on | |
1585 | * failure. | |
1586 | */ | |
1587 | struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, | |
1588 | int nr_units) | |
1589 | { | |
1590 | struct pcpu_alloc_info *ai; | |
1591 | size_t base_size, ai_size; | |
1592 | void *ptr; | |
1593 | int unit; | |
1594 | ||
1595 | base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), | |
1596 | __alignof__(ai->groups[0].cpu_map[0])); | |
1597 | ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); | |
1598 | ||
999c17e3 | 1599 | ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); |
fd1e8a1f TH |
1600 | if (!ptr) |
1601 | return NULL; | |
1602 | ai = ptr; | |
1603 | ptr += base_size; | |
1604 | ||
1605 | ai->groups[0].cpu_map = ptr; | |
1606 | ||
1607 | for (unit = 0; unit < nr_units; unit++) | |
1608 | ai->groups[0].cpu_map[unit] = NR_CPUS; | |
1609 | ||
1610 | ai->nr_groups = nr_groups; | |
1611 | ai->__ai_size = PFN_ALIGN(ai_size); | |
1612 | ||
1613 | return ai; | |
1614 | } | |
1615 | ||
1616 | /** | |
1617 | * pcpu_free_alloc_info - free percpu allocation info | |
1618 | * @ai: pcpu_alloc_info to free | |
1619 | * | |
1620 | * Free @ai which was allocated by pcpu_alloc_alloc_info(). | |
1621 | */ | |
1622 | void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) | |
1623 | { | |
999c17e3 | 1624 | memblock_free_early(__pa(ai), ai->__ai_size); |
fd1e8a1f TH |
1625 | } |
1626 | ||
fd1e8a1f TH |
1627 | /** |
1628 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info | |
1629 | * @lvl: loglevel | |
1630 | * @ai: allocation info to dump | |
1631 | * | |
1632 | * Print out information about @ai using loglevel @lvl. | |
1633 | */ | |
1634 | static void pcpu_dump_alloc_info(const char *lvl, | |
1635 | const struct pcpu_alloc_info *ai) | |
033e48fb | 1636 | { |
fd1e8a1f | 1637 | int group_width = 1, cpu_width = 1, width; |
033e48fb | 1638 | char empty_str[] = "--------"; |
fd1e8a1f TH |
1639 | int alloc = 0, alloc_end = 0; |
1640 | int group, v; | |
1641 | int upa, apl; /* units per alloc, allocs per line */ | |
1642 | ||
1643 | v = ai->nr_groups; | |
1644 | while (v /= 10) | |
1645 | group_width++; | |
033e48fb | 1646 | |
fd1e8a1f | 1647 | v = num_possible_cpus(); |
033e48fb | 1648 | while (v /= 10) |
fd1e8a1f TH |
1649 | cpu_width++; |
1650 | empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; | |
033e48fb | 1651 | |
fd1e8a1f TH |
1652 | upa = ai->alloc_size / ai->unit_size; |
1653 | width = upa * (cpu_width + 1) + group_width + 3; | |
1654 | apl = rounddown_pow_of_two(max(60 / width, 1)); | |
033e48fb | 1655 | |
fd1e8a1f TH |
1656 | printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", |
1657 | lvl, ai->static_size, ai->reserved_size, ai->dyn_size, | |
1658 | ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); | |
033e48fb | 1659 | |
fd1e8a1f TH |
1660 | for (group = 0; group < ai->nr_groups; group++) { |
1661 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
1662 | int unit = 0, unit_end = 0; | |
1663 | ||
1664 | BUG_ON(gi->nr_units % upa); | |
1665 | for (alloc_end += gi->nr_units / upa; | |
1666 | alloc < alloc_end; alloc++) { | |
1667 | if (!(alloc % apl)) { | |
1170532b | 1668 | pr_cont("\n"); |
fd1e8a1f TH |
1669 | printk("%spcpu-alloc: ", lvl); |
1670 | } | |
1170532b | 1671 | pr_cont("[%0*d] ", group_width, group); |
fd1e8a1f TH |
1672 | |
1673 | for (unit_end += upa; unit < unit_end; unit++) | |
1674 | if (gi->cpu_map[unit] != NR_CPUS) | |
1170532b JP |
1675 | pr_cont("%0*d ", |
1676 | cpu_width, gi->cpu_map[unit]); | |
fd1e8a1f | 1677 | else |
1170532b | 1678 | pr_cont("%s ", empty_str); |
033e48fb | 1679 | } |
033e48fb | 1680 | } |
1170532b | 1681 | pr_cont("\n"); |
033e48fb | 1682 | } |
033e48fb | 1683 | |
fbf59bc9 | 1684 | /** |
8d408b4b | 1685 | * pcpu_setup_first_chunk - initialize the first percpu chunk |
fd1e8a1f | 1686 | * @ai: pcpu_alloc_info describing how to percpu area is shaped |
38a6be52 | 1687 | * @base_addr: mapped address |
8d408b4b TH |
1688 | * |
1689 | * Initialize the first percpu chunk which contains the kernel static | |
1690 | * perpcu area. This function is to be called from arch percpu area | |
38a6be52 | 1691 | * setup path. |
8d408b4b | 1692 | * |
fd1e8a1f TH |
1693 | * @ai contains all information necessary to initialize the first |
1694 | * chunk and prime the dynamic percpu allocator. | |
1695 | * | |
1696 | * @ai->static_size is the size of static percpu area. | |
1697 | * | |
1698 | * @ai->reserved_size, if non-zero, specifies the amount of bytes to | |
edcb4639 TH |
1699 | * reserve after the static area in the first chunk. This reserves |
1700 | * the first chunk such that it's available only through reserved | |
1701 | * percpu allocation. This is primarily used to serve module percpu | |
1702 | * static areas on architectures where the addressing model has | |
1703 | * limited offset range for symbol relocations to guarantee module | |
1704 | * percpu symbols fall inside the relocatable range. | |
1705 | * | |
fd1e8a1f TH |
1706 | * @ai->dyn_size determines the number of bytes available for dynamic |
1707 | * allocation in the first chunk. The area between @ai->static_size + | |
1708 | * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. | |
6074d5b0 | 1709 | * |
fd1e8a1f TH |
1710 | * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE |
1711 | * and equal to or larger than @ai->static_size + @ai->reserved_size + | |
1712 | * @ai->dyn_size. | |
8d408b4b | 1713 | * |
fd1e8a1f TH |
1714 | * @ai->atom_size is the allocation atom size and used as alignment |
1715 | * for vm areas. | |
8d408b4b | 1716 | * |
fd1e8a1f TH |
1717 | * @ai->alloc_size is the allocation size and always multiple of |
1718 | * @ai->atom_size. This is larger than @ai->atom_size if | |
1719 | * @ai->unit_size is larger than @ai->atom_size. | |
1720 | * | |
1721 | * @ai->nr_groups and @ai->groups describe virtual memory layout of | |
1722 | * percpu areas. Units which should be colocated are put into the | |
1723 | * same group. Dynamic VM areas will be allocated according to these | |
1724 | * groupings. If @ai->nr_groups is zero, a single group containing | |
1725 | * all units is assumed. | |
8d408b4b | 1726 | * |
38a6be52 TH |
1727 | * The caller should have mapped the first chunk at @base_addr and |
1728 | * copied static data to each unit. | |
fbf59bc9 | 1729 | * |
c0ebfdc3 DZF |
1730 | * The first chunk will always contain a static and a dynamic region. |
1731 | * However, the static region is not managed by any chunk. If the first | |
1732 | * chunk also contains a reserved region, it is served by two chunks - | |
1733 | * one for the reserved region and one for the dynamic region. They | |
1734 | * share the same vm, but use offset regions in the area allocation map. | |
1735 | * The chunk serving the dynamic region is circulated in the chunk slots | |
1736 | * and available for dynamic allocation like any other chunk. | |
edcb4639 | 1737 | * |
fbf59bc9 | 1738 | * RETURNS: |
fb435d52 | 1739 | * 0 on success, -errno on failure. |
fbf59bc9 | 1740 | */ |
fb435d52 TH |
1741 | int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
1742 | void *base_addr) | |
fbf59bc9 | 1743 | { |
b9c39442 | 1744 | size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; |
d2f3c384 | 1745 | size_t static_size, dyn_size; |
0c4169c3 | 1746 | struct pcpu_chunk *chunk; |
6563297c TH |
1747 | unsigned long *group_offsets; |
1748 | size_t *group_sizes; | |
fb435d52 | 1749 | unsigned long *unit_off; |
fbf59bc9 | 1750 | unsigned int cpu; |
fd1e8a1f TH |
1751 | int *unit_map; |
1752 | int group, unit, i; | |
c0ebfdc3 DZF |
1753 | int map_size; |
1754 | unsigned long tmp_addr; | |
fbf59bc9 | 1755 | |
635b75fc TH |
1756 | #define PCPU_SETUP_BUG_ON(cond) do { \ |
1757 | if (unlikely(cond)) { \ | |
870d4b12 JP |
1758 | pr_emerg("failed to initialize, %s\n", #cond); \ |
1759 | pr_emerg("cpu_possible_mask=%*pb\n", \ | |
807de073 | 1760 | cpumask_pr_args(cpu_possible_mask)); \ |
635b75fc TH |
1761 | pcpu_dump_alloc_info(KERN_EMERG, ai); \ |
1762 | BUG(); \ | |
1763 | } \ | |
1764 | } while (0) | |
1765 | ||
2f39e637 | 1766 | /* sanity checks */ |
635b75fc | 1767 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); |
bbddff05 | 1768 | #ifdef CONFIG_SMP |
635b75fc | 1769 | PCPU_SETUP_BUG_ON(!ai->static_size); |
f09f1243 | 1770 | PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); |
bbddff05 | 1771 | #endif |
635b75fc | 1772 | PCPU_SETUP_BUG_ON(!base_addr); |
f09f1243 | 1773 | PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); |
635b75fc | 1774 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); |
f09f1243 | 1775 | PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); |
635b75fc | 1776 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); |
ca460b3c | 1777 | PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); |
099a19d9 | 1778 | PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); |
fb29a2cc | 1779 | PCPU_SETUP_BUG_ON(!ai->dyn_size); |
d2f3c384 | 1780 | PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); |
ca460b3c DZF |
1781 | PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || |
1782 | IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); | |
9f645532 | 1783 | PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); |
8d408b4b | 1784 | |
6563297c | 1785 | /* process group information and build config tables accordingly */ |
999c17e3 SS |
1786 | group_offsets = memblock_virt_alloc(ai->nr_groups * |
1787 | sizeof(group_offsets[0]), 0); | |
1788 | group_sizes = memblock_virt_alloc(ai->nr_groups * | |
1789 | sizeof(group_sizes[0]), 0); | |
1790 | unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); | |
1791 | unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); | |
2f39e637 | 1792 | |
fd1e8a1f | 1793 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
ffe0d5a5 | 1794 | unit_map[cpu] = UINT_MAX; |
a855b84c TH |
1795 | |
1796 | pcpu_low_unit_cpu = NR_CPUS; | |
1797 | pcpu_high_unit_cpu = NR_CPUS; | |
2f39e637 | 1798 | |
fd1e8a1f TH |
1799 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { |
1800 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
2f39e637 | 1801 | |
6563297c TH |
1802 | group_offsets[group] = gi->base_offset; |
1803 | group_sizes[group] = gi->nr_units * ai->unit_size; | |
1804 | ||
fd1e8a1f TH |
1805 | for (i = 0; i < gi->nr_units; i++) { |
1806 | cpu = gi->cpu_map[i]; | |
1807 | if (cpu == NR_CPUS) | |
1808 | continue; | |
8d408b4b | 1809 | |
9f295664 | 1810 | PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); |
635b75fc TH |
1811 | PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); |
1812 | PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); | |
fbf59bc9 | 1813 | |
fd1e8a1f | 1814 | unit_map[cpu] = unit + i; |
fb435d52 TH |
1815 | unit_off[cpu] = gi->base_offset + i * ai->unit_size; |
1816 | ||
a855b84c TH |
1817 | /* determine low/high unit_cpu */ |
1818 | if (pcpu_low_unit_cpu == NR_CPUS || | |
1819 | unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) | |
1820 | pcpu_low_unit_cpu = cpu; | |
1821 | if (pcpu_high_unit_cpu == NR_CPUS || | |
1822 | unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) | |
1823 | pcpu_high_unit_cpu = cpu; | |
fd1e8a1f | 1824 | } |
2f39e637 | 1825 | } |
fd1e8a1f TH |
1826 | pcpu_nr_units = unit; |
1827 | ||
1828 | for_each_possible_cpu(cpu) | |
635b75fc TH |
1829 | PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); |
1830 | ||
1831 | /* we're done parsing the input, undefine BUG macro and dump config */ | |
1832 | #undef PCPU_SETUP_BUG_ON | |
bcbea798 | 1833 | pcpu_dump_alloc_info(KERN_DEBUG, ai); |
fd1e8a1f | 1834 | |
6563297c TH |
1835 | pcpu_nr_groups = ai->nr_groups; |
1836 | pcpu_group_offsets = group_offsets; | |
1837 | pcpu_group_sizes = group_sizes; | |
fd1e8a1f | 1838 | pcpu_unit_map = unit_map; |
fb435d52 | 1839 | pcpu_unit_offsets = unit_off; |
2f39e637 TH |
1840 | |
1841 | /* determine basic parameters */ | |
fd1e8a1f | 1842 | pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; |
d9b55eeb | 1843 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
6563297c | 1844 | pcpu_atom_size = ai->atom_size; |
ce3141a2 TH |
1845 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + |
1846 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); | |
cafe8816 | 1847 | |
30a5b536 DZ |
1848 | pcpu_stats_save_ai(ai); |
1849 | ||
d9b55eeb TH |
1850 | /* |
1851 | * Allocate chunk slots. The additional last slot is for | |
1852 | * empty chunks. | |
1853 | */ | |
1854 | pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; | |
999c17e3 SS |
1855 | pcpu_slot = memblock_virt_alloc( |
1856 | pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); | |
fbf59bc9 TH |
1857 | for (i = 0; i < pcpu_nr_slots; i++) |
1858 | INIT_LIST_HEAD(&pcpu_slot[i]); | |
1859 | ||
d2f3c384 DZF |
1860 | /* |
1861 | * The end of the static region needs to be aligned with the | |
1862 | * minimum allocation size as this offsets the reserved and | |
1863 | * dynamic region. The first chunk ends page aligned by | |
1864 | * expanding the dynamic region, therefore the dynamic region | |
1865 | * can be shrunk to compensate while still staying above the | |
1866 | * configured sizes. | |
1867 | */ | |
1868 | static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); | |
1869 | dyn_size = ai->dyn_size - (static_size - ai->static_size); | |
1870 | ||
edcb4639 | 1871 | /* |
c0ebfdc3 DZF |
1872 | * Initialize first chunk. |
1873 | * If the reserved_size is non-zero, this initializes the reserved | |
1874 | * chunk. If the reserved_size is zero, the reserved chunk is NULL | |
1875 | * and the dynamic region is initialized here. The first chunk, | |
1876 | * pcpu_first_chunk, will always point to the chunk that serves | |
1877 | * the dynamic region. | |
edcb4639 | 1878 | */ |
d2f3c384 DZF |
1879 | tmp_addr = (unsigned long)base_addr + static_size; |
1880 | map_size = ai->reserved_size ?: dyn_size; | |
40064aec | 1881 | chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); |
61ace7fa | 1882 | |
edcb4639 | 1883 | /* init dynamic chunk if necessary */ |
b9c39442 | 1884 | if (ai->reserved_size) { |
0c4169c3 | 1885 | pcpu_reserved_chunk = chunk; |
b9c39442 | 1886 | |
d2f3c384 | 1887 | tmp_addr = (unsigned long)base_addr + static_size + |
c0ebfdc3 | 1888 | ai->reserved_size; |
d2f3c384 | 1889 | map_size = dyn_size; |
40064aec | 1890 | chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); |
edcb4639 TH |
1891 | } |
1892 | ||
2441d15c | 1893 | /* link the first chunk in */ |
0c4169c3 | 1894 | pcpu_first_chunk = chunk; |
0cecf50c | 1895 | pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; |
ae9e6bc9 | 1896 | pcpu_chunk_relocate(pcpu_first_chunk, -1); |
fbf59bc9 | 1897 | |
30a5b536 | 1898 | pcpu_stats_chunk_alloc(); |
df95e795 | 1899 | trace_percpu_create_chunk(base_addr); |
30a5b536 | 1900 | |
fbf59bc9 | 1901 | /* we're done */ |
bba174f5 | 1902 | pcpu_base_addr = base_addr; |
fb435d52 | 1903 | return 0; |
fbf59bc9 | 1904 | } |
66c3a757 | 1905 | |
bbddff05 TH |
1906 | #ifdef CONFIG_SMP |
1907 | ||
17f3609c | 1908 | const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { |
f58dc01b TH |
1909 | [PCPU_FC_AUTO] = "auto", |
1910 | [PCPU_FC_EMBED] = "embed", | |
1911 | [PCPU_FC_PAGE] = "page", | |
f58dc01b | 1912 | }; |
66c3a757 | 1913 | |
f58dc01b | 1914 | enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; |
66c3a757 | 1915 | |
f58dc01b TH |
1916 | static int __init percpu_alloc_setup(char *str) |
1917 | { | |
5479c78a CG |
1918 | if (!str) |
1919 | return -EINVAL; | |
1920 | ||
f58dc01b TH |
1921 | if (0) |
1922 | /* nada */; | |
1923 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK | |
1924 | else if (!strcmp(str, "embed")) | |
1925 | pcpu_chosen_fc = PCPU_FC_EMBED; | |
1926 | #endif | |
1927 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | |
1928 | else if (!strcmp(str, "page")) | |
1929 | pcpu_chosen_fc = PCPU_FC_PAGE; | |
f58dc01b TH |
1930 | #endif |
1931 | else | |
870d4b12 | 1932 | pr_warn("unknown allocator %s specified\n", str); |
66c3a757 | 1933 | |
f58dc01b | 1934 | return 0; |
66c3a757 | 1935 | } |
f58dc01b | 1936 | early_param("percpu_alloc", percpu_alloc_setup); |
66c3a757 | 1937 | |
3c9a024f TH |
1938 | /* |
1939 | * pcpu_embed_first_chunk() is used by the generic percpu setup. | |
1940 | * Build it if needed by the arch config or the generic setup is going | |
1941 | * to be used. | |
1942 | */ | |
08fc4580 TH |
1943 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ |
1944 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) | |
3c9a024f TH |
1945 | #define BUILD_EMBED_FIRST_CHUNK |
1946 | #endif | |
1947 | ||
1948 | /* build pcpu_page_first_chunk() iff needed by the arch config */ | |
1949 | #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) | |
1950 | #define BUILD_PAGE_FIRST_CHUNK | |
1951 | #endif | |
1952 | ||
1953 | /* pcpu_build_alloc_info() is used by both embed and page first chunk */ | |
1954 | #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) | |
1955 | /** | |
1956 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs | |
1957 | * @reserved_size: the size of reserved percpu area in bytes | |
1958 | * @dyn_size: minimum free size for dynamic allocation in bytes | |
1959 | * @atom_size: allocation atom size | |
1960 | * @cpu_distance_fn: callback to determine distance between cpus, optional | |
1961 | * | |
1962 | * This function determines grouping of units, their mappings to cpus | |
1963 | * and other parameters considering needed percpu size, allocation | |
1964 | * atom size and distances between CPUs. | |
1965 | * | |
bffc4375 | 1966 | * Groups are always multiples of atom size and CPUs which are of |
3c9a024f TH |
1967 | * LOCAL_DISTANCE both ways are grouped together and share space for |
1968 | * units in the same group. The returned configuration is guaranteed | |
1969 | * to have CPUs on different nodes on different groups and >=75% usage | |
1970 | * of allocated virtual address space. | |
1971 | * | |
1972 | * RETURNS: | |
1973 | * On success, pointer to the new allocation_info is returned. On | |
1974 | * failure, ERR_PTR value is returned. | |
1975 | */ | |
1976 | static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |
1977 | size_t reserved_size, size_t dyn_size, | |
1978 | size_t atom_size, | |
1979 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | |
1980 | { | |
1981 | static int group_map[NR_CPUS] __initdata; | |
1982 | static int group_cnt[NR_CPUS] __initdata; | |
1983 | const size_t static_size = __per_cpu_end - __per_cpu_start; | |
1984 | int nr_groups = 1, nr_units = 0; | |
1985 | size_t size_sum, min_unit_size, alloc_size; | |
1986 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | |
1987 | int last_allocs, group, unit; | |
1988 | unsigned int cpu, tcpu; | |
1989 | struct pcpu_alloc_info *ai; | |
1990 | unsigned int *cpu_map; | |
1991 | ||
1992 | /* this function may be called multiple times */ | |
1993 | memset(group_map, 0, sizeof(group_map)); | |
1994 | memset(group_cnt, 0, sizeof(group_cnt)); | |
1995 | ||
1996 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ | |
1997 | size_sum = PFN_ALIGN(static_size + reserved_size + | |
1998 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); | |
1999 | dyn_size = size_sum - static_size - reserved_size; | |
2000 | ||
2001 | /* | |
2002 | * Determine min_unit_size, alloc_size and max_upa such that | |
2003 | * alloc_size is multiple of atom_size and is the smallest | |
25985edc | 2004 | * which can accommodate 4k aligned segments which are equal to |
3c9a024f TH |
2005 | * or larger than min_unit_size. |
2006 | */ | |
2007 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | |
2008 | ||
9c015162 | 2009 | /* determine the maximum # of units that can fit in an allocation */ |
3c9a024f TH |
2010 | alloc_size = roundup(min_unit_size, atom_size); |
2011 | upa = alloc_size / min_unit_size; | |
f09f1243 | 2012 | while (alloc_size % upa || (offset_in_page(alloc_size / upa))) |
3c9a024f TH |
2013 | upa--; |
2014 | max_upa = upa; | |
2015 | ||
2016 | /* group cpus according to their proximity */ | |
2017 | for_each_possible_cpu(cpu) { | |
2018 | group = 0; | |
2019 | next_group: | |
2020 | for_each_possible_cpu(tcpu) { | |
2021 | if (cpu == tcpu) | |
2022 | break; | |
2023 | if (group_map[tcpu] == group && cpu_distance_fn && | |
2024 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || | |
2025 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { | |
2026 | group++; | |
2027 | nr_groups = max(nr_groups, group + 1); | |
2028 | goto next_group; | |
2029 | } | |
2030 | } | |
2031 | group_map[cpu] = group; | |
2032 | group_cnt[group]++; | |
2033 | } | |
2034 | ||
2035 | /* | |
9c015162 DZF |
2036 | * Wasted space is caused by a ratio imbalance of upa to group_cnt. |
2037 | * Expand the unit_size until we use >= 75% of the units allocated. | |
2038 | * Related to atom_size, which could be much larger than the unit_size. | |
3c9a024f TH |
2039 | */ |
2040 | last_allocs = INT_MAX; | |
2041 | for (upa = max_upa; upa; upa--) { | |
2042 | int allocs = 0, wasted = 0; | |
2043 | ||
f09f1243 | 2044 | if (alloc_size % upa || (offset_in_page(alloc_size / upa))) |
3c9a024f TH |
2045 | continue; |
2046 | ||
2047 | for (group = 0; group < nr_groups; group++) { | |
2048 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); | |
2049 | allocs += this_allocs; | |
2050 | wasted += this_allocs * upa - group_cnt[group]; | |
2051 | } | |
2052 | ||
2053 | /* | |
2054 | * Don't accept if wastage is over 1/3. The | |
2055 | * greater-than comparison ensures upa==1 always | |
2056 | * passes the following check. | |
2057 | */ | |
2058 | if (wasted > num_possible_cpus() / 3) | |
2059 | continue; | |
2060 | ||
2061 | /* and then don't consume more memory */ | |
2062 | if (allocs > last_allocs) | |
2063 | break; | |
2064 | last_allocs = allocs; | |
2065 | best_upa = upa; | |
2066 | } | |
2067 | upa = best_upa; | |
2068 | ||
2069 | /* allocate and fill alloc_info */ | |
2070 | for (group = 0; group < nr_groups; group++) | |
2071 | nr_units += roundup(group_cnt[group], upa); | |
2072 | ||
2073 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); | |
2074 | if (!ai) | |
2075 | return ERR_PTR(-ENOMEM); | |
2076 | cpu_map = ai->groups[0].cpu_map; | |
2077 | ||
2078 | for (group = 0; group < nr_groups; group++) { | |
2079 | ai->groups[group].cpu_map = cpu_map; | |
2080 | cpu_map += roundup(group_cnt[group], upa); | |
2081 | } | |
2082 | ||
2083 | ai->static_size = static_size; | |
2084 | ai->reserved_size = reserved_size; | |
2085 | ai->dyn_size = dyn_size; | |
2086 | ai->unit_size = alloc_size / upa; | |
2087 | ai->atom_size = atom_size; | |
2088 | ai->alloc_size = alloc_size; | |
2089 | ||
2090 | for (group = 0, unit = 0; group_cnt[group]; group++) { | |
2091 | struct pcpu_group_info *gi = &ai->groups[group]; | |
2092 | ||
2093 | /* | |
2094 | * Initialize base_offset as if all groups are located | |
2095 | * back-to-back. The caller should update this to | |
2096 | * reflect actual allocation. | |
2097 | */ | |
2098 | gi->base_offset = unit * ai->unit_size; | |
2099 | ||
2100 | for_each_possible_cpu(cpu) | |
2101 | if (group_map[cpu] == group) | |
2102 | gi->cpu_map[gi->nr_units++] = cpu; | |
2103 | gi->nr_units = roundup(gi->nr_units, upa); | |
2104 | unit += gi->nr_units; | |
2105 | } | |
2106 | BUG_ON(unit != nr_units); | |
2107 | ||
2108 | return ai; | |
2109 | } | |
2110 | #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ | |
2111 | ||
2112 | #if defined(BUILD_EMBED_FIRST_CHUNK) | |
66c3a757 TH |
2113 | /** |
2114 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | |
66c3a757 | 2115 | * @reserved_size: the size of reserved percpu area in bytes |
4ba6ce25 | 2116 | * @dyn_size: minimum free size for dynamic allocation in bytes |
c8826dd5 TH |
2117 | * @atom_size: allocation atom size |
2118 | * @cpu_distance_fn: callback to determine distance between cpus, optional | |
2119 | * @alloc_fn: function to allocate percpu page | |
25985edc | 2120 | * @free_fn: function to free percpu page |
66c3a757 TH |
2121 | * |
2122 | * This is a helper to ease setting up embedded first percpu chunk and | |
2123 | * can be called where pcpu_setup_first_chunk() is expected. | |
2124 | * | |
2125 | * If this function is used to setup the first chunk, it is allocated | |
c8826dd5 TH |
2126 | * by calling @alloc_fn and used as-is without being mapped into |
2127 | * vmalloc area. Allocations are always whole multiples of @atom_size | |
2128 | * aligned to @atom_size. | |
2129 | * | |
2130 | * This enables the first chunk to piggy back on the linear physical | |
2131 | * mapping which often uses larger page size. Please note that this | |
2132 | * can result in very sparse cpu->unit mapping on NUMA machines thus | |
2133 | * requiring large vmalloc address space. Don't use this allocator if | |
2134 | * vmalloc space is not orders of magnitude larger than distances | |
2135 | * between node memory addresses (ie. 32bit NUMA machines). | |
66c3a757 | 2136 | * |
4ba6ce25 | 2137 | * @dyn_size specifies the minimum dynamic area size. |
66c3a757 TH |
2138 | * |
2139 | * If the needed size is smaller than the minimum or specified unit | |
c8826dd5 | 2140 | * size, the leftover is returned using @free_fn. |
66c3a757 TH |
2141 | * |
2142 | * RETURNS: | |
fb435d52 | 2143 | * 0 on success, -errno on failure. |
66c3a757 | 2144 | */ |
4ba6ce25 | 2145 | int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, |
c8826dd5 TH |
2146 | size_t atom_size, |
2147 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, | |
2148 | pcpu_fc_alloc_fn_t alloc_fn, | |
2149 | pcpu_fc_free_fn_t free_fn) | |
66c3a757 | 2150 | { |
c8826dd5 TH |
2151 | void *base = (void *)ULONG_MAX; |
2152 | void **areas = NULL; | |
fd1e8a1f | 2153 | struct pcpu_alloc_info *ai; |
93c76b6b | 2154 | size_t size_sum, areas_size; |
2155 | unsigned long max_distance; | |
9b739662 | 2156 | int group, i, highest_group, rc; |
66c3a757 | 2157 | |
c8826dd5 TH |
2158 | ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, |
2159 | cpu_distance_fn); | |
fd1e8a1f TH |
2160 | if (IS_ERR(ai)) |
2161 | return PTR_ERR(ai); | |
66c3a757 | 2162 | |
fd1e8a1f | 2163 | size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; |
c8826dd5 | 2164 | areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); |
fa8a7094 | 2165 | |
999c17e3 | 2166 | areas = memblock_virt_alloc_nopanic(areas_size, 0); |
c8826dd5 | 2167 | if (!areas) { |
fb435d52 | 2168 | rc = -ENOMEM; |
c8826dd5 | 2169 | goto out_free; |
fa8a7094 | 2170 | } |
66c3a757 | 2171 | |
9b739662 | 2172 | /* allocate, copy and determine base address & max_distance */ |
2173 | highest_group = 0; | |
c8826dd5 TH |
2174 | for (group = 0; group < ai->nr_groups; group++) { |
2175 | struct pcpu_group_info *gi = &ai->groups[group]; | |
2176 | unsigned int cpu = NR_CPUS; | |
2177 | void *ptr; | |
2178 | ||
2179 | for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) | |
2180 | cpu = gi->cpu_map[i]; | |
2181 | BUG_ON(cpu == NR_CPUS); | |
2182 | ||
2183 | /* allocate space for the whole group */ | |
2184 | ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); | |
2185 | if (!ptr) { | |
2186 | rc = -ENOMEM; | |
2187 | goto out_free_areas; | |
2188 | } | |
f528f0b8 CM |
2189 | /* kmemleak tracks the percpu allocations separately */ |
2190 | kmemleak_free(ptr); | |
c8826dd5 | 2191 | areas[group] = ptr; |
fd1e8a1f | 2192 | |
c8826dd5 | 2193 | base = min(ptr, base); |
9b739662 | 2194 | if (ptr > areas[highest_group]) |
2195 | highest_group = group; | |
2196 | } | |
2197 | max_distance = areas[highest_group] - base; | |
2198 | max_distance += ai->unit_size * ai->groups[highest_group].nr_units; | |
2199 | ||
2200 | /* warn if maximum distance is further than 75% of vmalloc space */ | |
2201 | if (max_distance > VMALLOC_TOTAL * 3 / 4) { | |
2202 | pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", | |
2203 | max_distance, VMALLOC_TOTAL); | |
2204 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | |
2205 | /* and fail if we have fallback */ | |
2206 | rc = -EINVAL; | |
2207 | goto out_free_areas; | |
2208 | #endif | |
42b64281 TH |
2209 | } |
2210 | ||
2211 | /* | |
2212 | * Copy data and free unused parts. This should happen after all | |
2213 | * allocations are complete; otherwise, we may end up with | |
2214 | * overlapping groups. | |
2215 | */ | |
2216 | for (group = 0; group < ai->nr_groups; group++) { | |
2217 | struct pcpu_group_info *gi = &ai->groups[group]; | |
2218 | void *ptr = areas[group]; | |
c8826dd5 TH |
2219 | |
2220 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { | |
2221 | if (gi->cpu_map[i] == NR_CPUS) { | |
2222 | /* unused unit, free whole */ | |
2223 | free_fn(ptr, ai->unit_size); | |
2224 | continue; | |
2225 | } | |
2226 | /* copy and return the unused part */ | |
2227 | memcpy(ptr, __per_cpu_load, ai->static_size); | |
2228 | free_fn(ptr + size_sum, ai->unit_size - size_sum); | |
2229 | } | |
fa8a7094 | 2230 | } |
66c3a757 | 2231 | |
c8826dd5 | 2232 | /* base address is now known, determine group base offsets */ |
6ea529a2 | 2233 | for (group = 0; group < ai->nr_groups; group++) { |
c8826dd5 | 2234 | ai->groups[group].base_offset = areas[group] - base; |
6ea529a2 | 2235 | } |
c8826dd5 | 2236 | |
870d4b12 | 2237 | pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", |
fd1e8a1f TH |
2238 | PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, |
2239 | ai->dyn_size, ai->unit_size); | |
d4b95f80 | 2240 | |
fb435d52 | 2241 | rc = pcpu_setup_first_chunk(ai, base); |
c8826dd5 TH |
2242 | goto out_free; |
2243 | ||
2244 | out_free_areas: | |
2245 | for (group = 0; group < ai->nr_groups; group++) | |
f851c8d8 MH |
2246 | if (areas[group]) |
2247 | free_fn(areas[group], | |
2248 | ai->groups[group].nr_units * ai->unit_size); | |
c8826dd5 | 2249 | out_free: |
fd1e8a1f | 2250 | pcpu_free_alloc_info(ai); |
c8826dd5 | 2251 | if (areas) |
999c17e3 | 2252 | memblock_free_early(__pa(areas), areas_size); |
fb435d52 | 2253 | return rc; |
d4b95f80 | 2254 | } |
3c9a024f | 2255 | #endif /* BUILD_EMBED_FIRST_CHUNK */ |
d4b95f80 | 2256 | |
3c9a024f | 2257 | #ifdef BUILD_PAGE_FIRST_CHUNK |
d4b95f80 | 2258 | /** |
00ae4064 | 2259 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
d4b95f80 TH |
2260 | * @reserved_size: the size of reserved percpu area in bytes |
2261 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | |
25985edc | 2262 | * @free_fn: function to free percpu page, always called with PAGE_SIZE |
d4b95f80 TH |
2263 | * @populate_pte_fn: function to populate pte |
2264 | * | |
00ae4064 TH |
2265 | * This is a helper to ease setting up page-remapped first percpu |
2266 | * chunk and can be called where pcpu_setup_first_chunk() is expected. | |
d4b95f80 TH |
2267 | * |
2268 | * This is the basic allocator. Static percpu area is allocated | |
2269 | * page-by-page into vmalloc area. | |
2270 | * | |
2271 | * RETURNS: | |
fb435d52 | 2272 | * 0 on success, -errno on failure. |
d4b95f80 | 2273 | */ |
fb435d52 TH |
2274 | int __init pcpu_page_first_chunk(size_t reserved_size, |
2275 | pcpu_fc_alloc_fn_t alloc_fn, | |
2276 | pcpu_fc_free_fn_t free_fn, | |
2277 | pcpu_fc_populate_pte_fn_t populate_pte_fn) | |
d4b95f80 | 2278 | { |
8f05a6a6 | 2279 | static struct vm_struct vm; |
fd1e8a1f | 2280 | struct pcpu_alloc_info *ai; |
00ae4064 | 2281 | char psize_str[16]; |
ce3141a2 | 2282 | int unit_pages; |
d4b95f80 | 2283 | size_t pages_size; |
ce3141a2 | 2284 | struct page **pages; |
fb435d52 | 2285 | int unit, i, j, rc; |
8f606604 | 2286 | int upa; |
2287 | int nr_g0_units; | |
d4b95f80 | 2288 | |
00ae4064 TH |
2289 | snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); |
2290 | ||
4ba6ce25 | 2291 | ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); |
fd1e8a1f TH |
2292 | if (IS_ERR(ai)) |
2293 | return PTR_ERR(ai); | |
2294 | BUG_ON(ai->nr_groups != 1); | |
8f606604 | 2295 | upa = ai->alloc_size/ai->unit_size; |
2296 | nr_g0_units = roundup(num_possible_cpus(), upa); | |
2297 | if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) { | |
2298 | pcpu_free_alloc_info(ai); | |
2299 | return -EINVAL; | |
2300 | } | |
fd1e8a1f TH |
2301 | |
2302 | unit_pages = ai->unit_size >> PAGE_SHIFT; | |
d4b95f80 TH |
2303 | |
2304 | /* unaligned allocations can't be freed, round up to page size */ | |
fd1e8a1f TH |
2305 | pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * |
2306 | sizeof(pages[0])); | |
999c17e3 | 2307 | pages = memblock_virt_alloc(pages_size, 0); |
d4b95f80 | 2308 | |
8f05a6a6 | 2309 | /* allocate pages */ |
d4b95f80 | 2310 | j = 0; |
8f606604 | 2311 | for (unit = 0; unit < num_possible_cpus(); unit++) { |
2312 | unsigned int cpu = ai->groups[0].cpu_map[unit]; | |
ce3141a2 | 2313 | for (i = 0; i < unit_pages; i++) { |
d4b95f80 TH |
2314 | void *ptr; |
2315 | ||
3cbc8565 | 2316 | ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); |
d4b95f80 | 2317 | if (!ptr) { |
870d4b12 | 2318 | pr_warn("failed to allocate %s page for cpu%u\n", |
8f606604 | 2319 | psize_str, cpu); |
d4b95f80 TH |
2320 | goto enomem; |
2321 | } | |
f528f0b8 CM |
2322 | /* kmemleak tracks the percpu allocations separately */ |
2323 | kmemleak_free(ptr); | |
ce3141a2 | 2324 | pages[j++] = virt_to_page(ptr); |
d4b95f80 | 2325 | } |
8f606604 | 2326 | } |
d4b95f80 | 2327 | |
8f05a6a6 TH |
2328 | /* allocate vm area, map the pages and copy static data */ |
2329 | vm.flags = VM_ALLOC; | |
fd1e8a1f | 2330 | vm.size = num_possible_cpus() * ai->unit_size; |
8f05a6a6 TH |
2331 | vm_area_register_early(&vm, PAGE_SIZE); |
2332 | ||
fd1e8a1f | 2333 | for (unit = 0; unit < num_possible_cpus(); unit++) { |
1d9d3257 | 2334 | unsigned long unit_addr = |
fd1e8a1f | 2335 | (unsigned long)vm.addr + unit * ai->unit_size; |
8f05a6a6 | 2336 | |
ce3141a2 | 2337 | for (i = 0; i < unit_pages; i++) |
8f05a6a6 TH |
2338 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); |
2339 | ||
2340 | /* pte already populated, the following shouldn't fail */ | |
fb435d52 TH |
2341 | rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], |
2342 | unit_pages); | |
2343 | if (rc < 0) | |
2344 | panic("failed to map percpu area, err=%d\n", rc); | |
66c3a757 | 2345 | |
8f05a6a6 TH |
2346 | /* |
2347 | * FIXME: Archs with virtual cache should flush local | |
2348 | * cache for the linear mapping here - something | |
2349 | * equivalent to flush_cache_vmap() on the local cpu. | |
2350 | * flush_cache_vmap() can't be used as most supporting | |
2351 | * data structures are not set up yet. | |
2352 | */ | |
2353 | ||
2354 | /* copy static data */ | |
fd1e8a1f | 2355 | memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); |
66c3a757 TH |
2356 | } |
2357 | ||
2358 | /* we're ready, commit */ | |
870d4b12 | 2359 | pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n", |
fd1e8a1f TH |
2360 | unit_pages, psize_str, vm.addr, ai->static_size, |
2361 | ai->reserved_size, ai->dyn_size); | |
d4b95f80 | 2362 | |
fb435d52 | 2363 | rc = pcpu_setup_first_chunk(ai, vm.addr); |
d4b95f80 TH |
2364 | goto out_free_ar; |
2365 | ||
2366 | enomem: | |
2367 | while (--j >= 0) | |
ce3141a2 | 2368 | free_fn(page_address(pages[j]), PAGE_SIZE); |
fb435d52 | 2369 | rc = -ENOMEM; |
d4b95f80 | 2370 | out_free_ar: |
999c17e3 | 2371 | memblock_free_early(__pa(pages), pages_size); |
fd1e8a1f | 2372 | pcpu_free_alloc_info(ai); |
fb435d52 | 2373 | return rc; |
d4b95f80 | 2374 | } |
3c9a024f | 2375 | #endif /* BUILD_PAGE_FIRST_CHUNK */ |
d4b95f80 | 2376 | |
bbddff05 | 2377 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
e74e3962 | 2378 | /* |
bbddff05 | 2379 | * Generic SMP percpu area setup. |
e74e3962 TH |
2380 | * |
2381 | * The embedding helper is used because its behavior closely resembles | |
2382 | * the original non-dynamic generic percpu area setup. This is | |
2383 | * important because many archs have addressing restrictions and might | |
2384 | * fail if the percpu area is located far away from the previous | |
2385 | * location. As an added bonus, in non-NUMA cases, embedding is | |
2386 | * generally a good idea TLB-wise because percpu area can piggy back | |
2387 | * on the physical linear memory mapping which uses large page | |
2388 | * mappings on applicable archs. | |
2389 | */ | |
e74e3962 TH |
2390 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
2391 | EXPORT_SYMBOL(__per_cpu_offset); | |
2392 | ||
c8826dd5 TH |
2393 | static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, |
2394 | size_t align) | |
2395 | { | |
999c17e3 SS |
2396 | return memblock_virt_alloc_from_nopanic( |
2397 | size, align, __pa(MAX_DMA_ADDRESS)); | |
c8826dd5 | 2398 | } |
66c3a757 | 2399 | |
c8826dd5 TH |
2400 | static void __init pcpu_dfl_fc_free(void *ptr, size_t size) |
2401 | { | |
999c17e3 | 2402 | memblock_free_early(__pa(ptr), size); |
c8826dd5 TH |
2403 | } |
2404 | ||
e74e3962 TH |
2405 | void __init setup_per_cpu_areas(void) |
2406 | { | |
e74e3962 TH |
2407 | unsigned long delta; |
2408 | unsigned int cpu; | |
fb435d52 | 2409 | int rc; |
e74e3962 TH |
2410 | |
2411 | /* | |
2412 | * Always reserve area for module percpu variables. That's | |
2413 | * what the legacy allocator did. | |
2414 | */ | |
fb435d52 | 2415 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
c8826dd5 TH |
2416 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, |
2417 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); | |
fb435d52 | 2418 | if (rc < 0) |
bbddff05 | 2419 | panic("Failed to initialize percpu areas."); |
e74e3962 TH |
2420 | |
2421 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
2422 | for_each_possible_cpu(cpu) | |
fb435d52 | 2423 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
66c3a757 | 2424 | } |
bbddff05 TH |
2425 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
2426 | ||
2427 | #else /* CONFIG_SMP */ | |
2428 | ||
2429 | /* | |
2430 | * UP percpu area setup. | |
2431 | * | |
2432 | * UP always uses km-based percpu allocator with identity mapping. | |
2433 | * Static percpu variables are indistinguishable from the usual static | |
2434 | * variables and don't require any special preparation. | |
2435 | */ | |
2436 | void __init setup_per_cpu_areas(void) | |
2437 | { | |
2438 | const size_t unit_size = | |
2439 | roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, | |
2440 | PERCPU_DYNAMIC_RESERVE)); | |
2441 | struct pcpu_alloc_info *ai; | |
2442 | void *fc; | |
2443 | ||
2444 | ai = pcpu_alloc_alloc_info(1, 1); | |
999c17e3 SS |
2445 | fc = memblock_virt_alloc_from_nopanic(unit_size, |
2446 | PAGE_SIZE, | |
2447 | __pa(MAX_DMA_ADDRESS)); | |
bbddff05 TH |
2448 | if (!ai || !fc) |
2449 | panic("Failed to allocate memory for percpu areas."); | |
100d13c3 CM |
2450 | /* kmemleak tracks the percpu allocations separately */ |
2451 | kmemleak_free(fc); | |
bbddff05 TH |
2452 | |
2453 | ai->dyn_size = unit_size; | |
2454 | ai->unit_size = unit_size; | |
2455 | ai->atom_size = unit_size; | |
2456 | ai->alloc_size = unit_size; | |
2457 | ai->groups[0].nr_units = 1; | |
2458 | ai->groups[0].cpu_map[0] = 0; | |
2459 | ||
2460 | if (pcpu_setup_first_chunk(ai, fc) < 0) | |
2461 | panic("Failed to initialize percpu areas."); | |
2462 | } | |
2463 | ||
2464 | #endif /* CONFIG_SMP */ | |
099a19d9 | 2465 | |
1a4d7607 TH |
2466 | /* |
2467 | * Percpu allocator is initialized early during boot when neither slab or | |
2468 | * workqueue is available. Plug async management until everything is up | |
2469 | * and running. | |
2470 | */ | |
2471 | static int __init percpu_enable_async(void) | |
2472 | { | |
2473 | pcpu_async_enabled = true; | |
2474 | return 0; | |
2475 | } | |
2476 | subsys_initcall(percpu_enable_async); |