]>
Commit | Line | Data |
---|---|---|
fbf59bc9 TH |
1 | /* |
2 | * linux/mm/percpu.c - percpu memory allocator | |
3 | * | |
4 | * Copyright (C) 2009 SUSE Linux Products GmbH | |
5 | * Copyright (C) 2009 Tejun Heo <tj@kernel.org> | |
6 | * | |
7 | * This file is released under the GPLv2. | |
8 | * | |
9 | * This is percpu allocator which can handle both static and dynamic | |
10 | * areas. Percpu areas are allocated in chunks in vmalloc area. Each | |
2f39e637 TH |
11 | * chunk is consisted of boot-time determined number of units and the |
12 | * first chunk is used for static percpu variables in the kernel image | |
13 | * (special boot time alloc/init handling necessary as these areas | |
14 | * need to be brought up before allocation services are running). | |
15 | * Unit grows as necessary and all units grow or shrink in unison. | |
16 | * When a chunk is filled up, another chunk is allocated. ie. in | |
17 | * vmalloc area | |
fbf59bc9 TH |
18 | * |
19 | * c0 c1 c2 | |
20 | * ------------------- ------------------- ------------ | |
21 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u | |
22 | * ------------------- ...... ------------------- .... ------------ | |
23 | * | |
24 | * Allocation is done in offset-size areas of single unit space. Ie, | |
25 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, | |
2f39e637 TH |
26 | * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to |
27 | * cpus. On NUMA, the mapping can be non-linear and even sparse. | |
28 | * Percpu access can be done by configuring percpu base registers | |
29 | * according to cpu to unit mapping and pcpu_unit_size. | |
fbf59bc9 | 30 | * |
2f39e637 TH |
31 | * There are usually many small percpu allocations many of them being |
32 | * as small as 4 bytes. The allocator organizes chunks into lists | |
fbf59bc9 TH |
33 | * according to free size and tries to allocate from the fullest one. |
34 | * Each chunk keeps the maximum contiguous area size hint which is | |
35 | * guaranteed to be eqaul to or larger than the maximum contiguous | |
36 | * area in the chunk. This helps the allocator not to iterate the | |
37 | * chunk maps unnecessarily. | |
38 | * | |
39 | * Allocation state in each chunk is kept using an array of integers | |
40 | * on chunk->map. A positive value in the map represents a free | |
41 | * region and negative allocated. Allocation inside a chunk is done | |
42 | * by scanning this map sequentially and serving the first matching | |
43 | * entry. This is mostly copied from the percpu_modalloc() allocator. | |
e1b9aa3f CL |
44 | * Chunks can be determined from the address using the index field |
45 | * in the page struct. The index field contains a pointer to the chunk. | |
fbf59bc9 TH |
46 | * |
47 | * To use this allocator, arch code should do the followings. | |
48 | * | |
e74e3962 | 49 | * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA |
fbf59bc9 TH |
50 | * |
51 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate | |
e0100983 TH |
52 | * regular address to percpu pointer and back if they need to be |
53 | * different from the default | |
fbf59bc9 | 54 | * |
8d408b4b TH |
55 | * - use pcpu_setup_first_chunk() during percpu area initialization to |
56 | * setup the first chunk containing the kernel static percpu area | |
fbf59bc9 TH |
57 | */ |
58 | ||
59 | #include <linux/bitmap.h> | |
60 | #include <linux/bootmem.h> | |
fd1e8a1f | 61 | #include <linux/err.h> |
fbf59bc9 | 62 | #include <linux/list.h> |
a530b795 | 63 | #include <linux/log2.h> |
fbf59bc9 TH |
64 | #include <linux/mm.h> |
65 | #include <linux/module.h> | |
66 | #include <linux/mutex.h> | |
67 | #include <linux/percpu.h> | |
68 | #include <linux/pfn.h> | |
fbf59bc9 | 69 | #include <linux/slab.h> |
ccea34b5 | 70 | #include <linux/spinlock.h> |
fbf59bc9 | 71 | #include <linux/vmalloc.h> |
a56dbddf | 72 | #include <linux/workqueue.h> |
fbf59bc9 TH |
73 | |
74 | #include <asm/cacheflush.h> | |
e0100983 | 75 | #include <asm/sections.h> |
fbf59bc9 TH |
76 | #include <asm/tlbflush.h> |
77 | ||
fbf59bc9 TH |
78 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
79 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ | |
80 | ||
e0100983 TH |
81 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
82 | #ifndef __addr_to_pcpu_ptr | |
83 | #define __addr_to_pcpu_ptr(addr) \ | |
84 | (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ | |
85 | + (unsigned long)__per_cpu_start) | |
86 | #endif | |
87 | #ifndef __pcpu_ptr_to_addr | |
88 | #define __pcpu_ptr_to_addr(ptr) \ | |
89 | (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ | |
90 | - (unsigned long)__per_cpu_start) | |
91 | #endif | |
92 | ||
fbf59bc9 TH |
93 | struct pcpu_chunk { |
94 | struct list_head list; /* linked to pcpu_slot lists */ | |
fbf59bc9 TH |
95 | int free_size; /* free bytes in the chunk */ |
96 | int contig_hint; /* max contiguous size hint */ | |
bba174f5 | 97 | void *base_addr; /* base address of this chunk */ |
fbf59bc9 TH |
98 | int map_used; /* # of map entries used */ |
99 | int map_alloc; /* # of map entries allocated */ | |
100 | int *map; /* allocation map */ | |
bba174f5 | 101 | struct vm_struct *vm; /* mapped vmalloc region */ |
8d408b4b | 102 | bool immutable; /* no [de]population allowed */ |
ce3141a2 | 103 | unsigned long populated[]; /* populated bitmap */ |
fbf59bc9 TH |
104 | }; |
105 | ||
40150d37 TH |
106 | static int pcpu_unit_pages __read_mostly; |
107 | static int pcpu_unit_size __read_mostly; | |
2f39e637 | 108 | static int pcpu_nr_units __read_mostly; |
40150d37 TH |
109 | static int pcpu_chunk_size __read_mostly; |
110 | static int pcpu_nr_slots __read_mostly; | |
111 | static size_t pcpu_chunk_struct_size __read_mostly; | |
fbf59bc9 | 112 | |
2f39e637 TH |
113 | /* cpus with the lowest and highest unit numbers */ |
114 | static unsigned int pcpu_first_unit_cpu __read_mostly; | |
115 | static unsigned int pcpu_last_unit_cpu __read_mostly; | |
116 | ||
fbf59bc9 | 117 | /* the address of the first chunk which starts with the kernel static area */ |
40150d37 | 118 | void *pcpu_base_addr __read_mostly; |
fbf59bc9 TH |
119 | EXPORT_SYMBOL_GPL(pcpu_base_addr); |
120 | ||
fb435d52 TH |
121 | static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ |
122 | const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ | |
2f39e637 | 123 | |
ae9e6bc9 TH |
124 | /* |
125 | * The first chunk which always exists. Note that unlike other | |
126 | * chunks, this one can be allocated and mapped in several different | |
127 | * ways and thus often doesn't live in the vmalloc area. | |
128 | */ | |
129 | static struct pcpu_chunk *pcpu_first_chunk; | |
130 | ||
131 | /* | |
132 | * Optional reserved chunk. This chunk reserves part of the first | |
133 | * chunk and serves it for reserved allocations. The amount of | |
134 | * reserved offset is in pcpu_reserved_chunk_limit. When reserved | |
135 | * area doesn't exist, the following variables contain NULL and 0 | |
136 | * respectively. | |
137 | */ | |
edcb4639 | 138 | static struct pcpu_chunk *pcpu_reserved_chunk; |
edcb4639 TH |
139 | static int pcpu_reserved_chunk_limit; |
140 | ||
fbf59bc9 | 141 | /* |
ccea34b5 TH |
142 | * Synchronization rules. |
143 | * | |
144 | * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former | |
ce3141a2 TH |
145 | * protects allocation/reclaim paths, chunks, populated bitmap and |
146 | * vmalloc mapping. The latter is a spinlock and protects the index | |
147 | * data structures - chunk slots, chunks and area maps in chunks. | |
ccea34b5 TH |
148 | * |
149 | * During allocation, pcpu_alloc_mutex is kept locked all the time and | |
150 | * pcpu_lock is grabbed and released as necessary. All actual memory | |
151 | * allocations are done using GFP_KERNEL with pcpu_lock released. | |
152 | * | |
153 | * Free path accesses and alters only the index data structures, so it | |
154 | * can be safely called from atomic context. When memory needs to be | |
155 | * returned to the system, free path schedules reclaim_work which | |
156 | * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be | |
157 | * reclaimed, release both locks and frees the chunks. Note that it's | |
158 | * necessary to grab both locks to remove a chunk from circulation as | |
159 | * allocation path might be referencing the chunk with only | |
160 | * pcpu_alloc_mutex locked. | |
fbf59bc9 | 161 | */ |
ccea34b5 TH |
162 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ |
163 | static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ | |
fbf59bc9 | 164 | |
40150d37 | 165 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
fbf59bc9 | 166 | |
a56dbddf TH |
167 | /* reclaim work to release fully free chunks, scheduled from free path */ |
168 | static void pcpu_reclaim(struct work_struct *work); | |
169 | static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); | |
170 | ||
d9b55eeb | 171 | static int __pcpu_size_to_slot(int size) |
fbf59bc9 | 172 | { |
cae3aeb8 | 173 | int highbit = fls(size); /* size is in bytes */ |
fbf59bc9 TH |
174 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); |
175 | } | |
176 | ||
d9b55eeb TH |
177 | static int pcpu_size_to_slot(int size) |
178 | { | |
179 | if (size == pcpu_unit_size) | |
180 | return pcpu_nr_slots - 1; | |
181 | return __pcpu_size_to_slot(size); | |
182 | } | |
183 | ||
fbf59bc9 TH |
184 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) |
185 | { | |
186 | if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) | |
187 | return 0; | |
188 | ||
189 | return pcpu_size_to_slot(chunk->free_size); | |
190 | } | |
191 | ||
192 | static int pcpu_page_idx(unsigned int cpu, int page_idx) | |
193 | { | |
2f39e637 | 194 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; |
fbf59bc9 TH |
195 | } |
196 | ||
fbf59bc9 TH |
197 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, |
198 | unsigned int cpu, int page_idx) | |
199 | { | |
bba174f5 | 200 | return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + |
fb435d52 | 201 | (page_idx << PAGE_SHIFT); |
fbf59bc9 TH |
202 | } |
203 | ||
ce3141a2 TH |
204 | static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, |
205 | unsigned int cpu, int page_idx) | |
c8a51be4 | 206 | { |
ce3141a2 TH |
207 | /* must not be used on pre-mapped chunk */ |
208 | WARN_ON(chunk->immutable); | |
c8a51be4 | 209 | |
ce3141a2 | 210 | return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); |
fbf59bc9 TH |
211 | } |
212 | ||
e1b9aa3f CL |
213 | /* set the pointer to a chunk in a page struct */ |
214 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) | |
215 | { | |
216 | page->index = (unsigned long)pcpu; | |
217 | } | |
218 | ||
219 | /* obtain pointer to a chunk from a page struct */ | |
220 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) | |
221 | { | |
222 | return (struct pcpu_chunk *)page->index; | |
223 | } | |
224 | ||
ce3141a2 TH |
225 | static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) |
226 | { | |
227 | *rs = find_next_zero_bit(chunk->populated, end, *rs); | |
228 | *re = find_next_bit(chunk->populated, end, *rs + 1); | |
229 | } | |
230 | ||
231 | static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) | |
232 | { | |
233 | *rs = find_next_bit(chunk->populated, end, *rs); | |
234 | *re = find_next_zero_bit(chunk->populated, end, *rs + 1); | |
235 | } | |
236 | ||
237 | /* | |
238 | * (Un)populated page region iterators. Iterate over (un)populated | |
239 | * page regions betwen @start and @end in @chunk. @rs and @re should | |
240 | * be integer variables and will be set to start and end page index of | |
241 | * the current region. | |
242 | */ | |
243 | #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ | |
244 | for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ | |
245 | (rs) < (re); \ | |
246 | (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) | |
247 | ||
248 | #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ | |
249 | for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ | |
250 | (rs) < (re); \ | |
251 | (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) | |
252 | ||
fbf59bc9 | 253 | /** |
1880d93b TH |
254 | * pcpu_mem_alloc - allocate memory |
255 | * @size: bytes to allocate | |
fbf59bc9 | 256 | * |
1880d93b TH |
257 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, |
258 | * kzalloc() is used; otherwise, vmalloc() is used. The returned | |
259 | * memory is always zeroed. | |
fbf59bc9 | 260 | * |
ccea34b5 TH |
261 | * CONTEXT: |
262 | * Does GFP_KERNEL allocation. | |
263 | * | |
fbf59bc9 | 264 | * RETURNS: |
1880d93b | 265 | * Pointer to the allocated area on success, NULL on failure. |
fbf59bc9 | 266 | */ |
1880d93b | 267 | static void *pcpu_mem_alloc(size_t size) |
fbf59bc9 | 268 | { |
1880d93b TH |
269 | if (size <= PAGE_SIZE) |
270 | return kzalloc(size, GFP_KERNEL); | |
271 | else { | |
272 | void *ptr = vmalloc(size); | |
273 | if (ptr) | |
274 | memset(ptr, 0, size); | |
275 | return ptr; | |
276 | } | |
277 | } | |
fbf59bc9 | 278 | |
1880d93b TH |
279 | /** |
280 | * pcpu_mem_free - free memory | |
281 | * @ptr: memory to free | |
282 | * @size: size of the area | |
283 | * | |
284 | * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). | |
285 | */ | |
286 | static void pcpu_mem_free(void *ptr, size_t size) | |
287 | { | |
fbf59bc9 | 288 | if (size <= PAGE_SIZE) |
1880d93b | 289 | kfree(ptr); |
fbf59bc9 | 290 | else |
1880d93b | 291 | vfree(ptr); |
fbf59bc9 TH |
292 | } |
293 | ||
294 | /** | |
295 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot | |
296 | * @chunk: chunk of interest | |
297 | * @oslot: the previous slot it was on | |
298 | * | |
299 | * This function is called after an allocation or free changed @chunk. | |
300 | * New slot according to the changed state is determined and @chunk is | |
edcb4639 TH |
301 | * moved to the slot. Note that the reserved chunk is never put on |
302 | * chunk slots. | |
ccea34b5 TH |
303 | * |
304 | * CONTEXT: | |
305 | * pcpu_lock. | |
fbf59bc9 TH |
306 | */ |
307 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |
308 | { | |
309 | int nslot = pcpu_chunk_slot(chunk); | |
310 | ||
edcb4639 | 311 | if (chunk != pcpu_reserved_chunk && oslot != nslot) { |
fbf59bc9 TH |
312 | if (oslot < nslot) |
313 | list_move(&chunk->list, &pcpu_slot[nslot]); | |
314 | else | |
315 | list_move_tail(&chunk->list, &pcpu_slot[nslot]); | |
316 | } | |
317 | } | |
318 | ||
fbf59bc9 | 319 | /** |
e1b9aa3f CL |
320 | * pcpu_chunk_addr_search - determine chunk containing specified address |
321 | * @addr: address for which the chunk needs to be determined. | |
ccea34b5 | 322 | * |
fbf59bc9 TH |
323 | * RETURNS: |
324 | * The address of the found chunk. | |
325 | */ | |
326 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |
327 | { | |
bba174f5 | 328 | void *first_start = pcpu_first_chunk->base_addr; |
fbf59bc9 | 329 | |
ae9e6bc9 | 330 | /* is it in the first chunk? */ |
79ba6ac8 | 331 | if (addr >= first_start && addr < first_start + pcpu_unit_size) { |
ae9e6bc9 TH |
332 | /* is it in the reserved area? */ |
333 | if (addr < first_start + pcpu_reserved_chunk_limit) | |
edcb4639 | 334 | return pcpu_reserved_chunk; |
ae9e6bc9 | 335 | return pcpu_first_chunk; |
edcb4639 TH |
336 | } |
337 | ||
2f39e637 TH |
338 | /* |
339 | * The address is relative to unit0 which might be unused and | |
340 | * thus unmapped. Offset the address to the unit space of the | |
341 | * current processor before looking it up in the vmalloc | |
342 | * space. Note that any possible cpu id can be used here, so | |
343 | * there's no need to worry about preemption or cpu hotplug. | |
344 | */ | |
fb435d52 | 345 | addr += pcpu_unit_offsets[smp_processor_id()]; |
e1b9aa3f | 346 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); |
fbf59bc9 TH |
347 | } |
348 | ||
9f7dcf22 TH |
349 | /** |
350 | * pcpu_extend_area_map - extend area map for allocation | |
351 | * @chunk: target chunk | |
352 | * | |
353 | * Extend area map of @chunk so that it can accomodate an allocation. | |
354 | * A single allocation can split an area into three areas, so this | |
355 | * function makes sure that @chunk->map has at least two extra slots. | |
356 | * | |
ccea34b5 TH |
357 | * CONTEXT: |
358 | * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired | |
359 | * if area map is extended. | |
360 | * | |
9f7dcf22 TH |
361 | * RETURNS: |
362 | * 0 if noop, 1 if successfully extended, -errno on failure. | |
363 | */ | |
364 | static int pcpu_extend_area_map(struct pcpu_chunk *chunk) | |
365 | { | |
366 | int new_alloc; | |
367 | int *new; | |
368 | size_t size; | |
369 | ||
370 | /* has enough? */ | |
371 | if (chunk->map_alloc >= chunk->map_used + 2) | |
372 | return 0; | |
373 | ||
ccea34b5 TH |
374 | spin_unlock_irq(&pcpu_lock); |
375 | ||
9f7dcf22 TH |
376 | new_alloc = PCPU_DFL_MAP_ALLOC; |
377 | while (new_alloc < chunk->map_used + 2) | |
378 | new_alloc *= 2; | |
379 | ||
380 | new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); | |
ccea34b5 TH |
381 | if (!new) { |
382 | spin_lock_irq(&pcpu_lock); | |
9f7dcf22 | 383 | return -ENOMEM; |
ccea34b5 TH |
384 | } |
385 | ||
386 | /* | |
387 | * Acquire pcpu_lock and switch to new area map. Only free | |
388 | * could have happened inbetween, so map_used couldn't have | |
389 | * grown. | |
390 | */ | |
391 | spin_lock_irq(&pcpu_lock); | |
392 | BUG_ON(new_alloc < chunk->map_used + 2); | |
9f7dcf22 TH |
393 | |
394 | size = chunk->map_alloc * sizeof(chunk->map[0]); | |
395 | memcpy(new, chunk->map, size); | |
396 | ||
397 | /* | |
398 | * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is | |
399 | * one of the first chunks and still using static map. | |
400 | */ | |
401 | if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) | |
402 | pcpu_mem_free(chunk->map, size); | |
403 | ||
404 | chunk->map_alloc = new_alloc; | |
405 | chunk->map = new; | |
406 | return 0; | |
407 | } | |
408 | ||
fbf59bc9 TH |
409 | /** |
410 | * pcpu_split_block - split a map block | |
411 | * @chunk: chunk of interest | |
412 | * @i: index of map block to split | |
cae3aeb8 TH |
413 | * @head: head size in bytes (can be 0) |
414 | * @tail: tail size in bytes (can be 0) | |
fbf59bc9 TH |
415 | * |
416 | * Split the @i'th map block into two or three blocks. If @head is | |
417 | * non-zero, @head bytes block is inserted before block @i moving it | |
418 | * to @i+1 and reducing its size by @head bytes. | |
419 | * | |
420 | * If @tail is non-zero, the target block, which can be @i or @i+1 | |
421 | * depending on @head, is reduced by @tail bytes and @tail byte block | |
422 | * is inserted after the target block. | |
423 | * | |
9f7dcf22 | 424 | * @chunk->map must have enough free slots to accomodate the split. |
ccea34b5 TH |
425 | * |
426 | * CONTEXT: | |
427 | * pcpu_lock. | |
fbf59bc9 | 428 | */ |
9f7dcf22 TH |
429 | static void pcpu_split_block(struct pcpu_chunk *chunk, int i, |
430 | int head, int tail) | |
fbf59bc9 TH |
431 | { |
432 | int nr_extra = !!head + !!tail; | |
1880d93b | 433 | |
9f7dcf22 | 434 | BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); |
fbf59bc9 | 435 | |
9f7dcf22 | 436 | /* insert new subblocks */ |
fbf59bc9 TH |
437 | memmove(&chunk->map[i + nr_extra], &chunk->map[i], |
438 | sizeof(chunk->map[0]) * (chunk->map_used - i)); | |
439 | chunk->map_used += nr_extra; | |
440 | ||
441 | if (head) { | |
442 | chunk->map[i + 1] = chunk->map[i] - head; | |
443 | chunk->map[i++] = head; | |
444 | } | |
445 | if (tail) { | |
446 | chunk->map[i++] -= tail; | |
447 | chunk->map[i] = tail; | |
448 | } | |
fbf59bc9 TH |
449 | } |
450 | ||
451 | /** | |
452 | * pcpu_alloc_area - allocate area from a pcpu_chunk | |
453 | * @chunk: chunk of interest | |
cae3aeb8 | 454 | * @size: wanted size in bytes |
fbf59bc9 TH |
455 | * @align: wanted align |
456 | * | |
457 | * Try to allocate @size bytes area aligned at @align from @chunk. | |
458 | * Note that this function only allocates the offset. It doesn't | |
459 | * populate or map the area. | |
460 | * | |
9f7dcf22 TH |
461 | * @chunk->map must have at least two free slots. |
462 | * | |
ccea34b5 TH |
463 | * CONTEXT: |
464 | * pcpu_lock. | |
465 | * | |
fbf59bc9 | 466 | * RETURNS: |
9f7dcf22 TH |
467 | * Allocated offset in @chunk on success, -1 if no matching area is |
468 | * found. | |
fbf59bc9 TH |
469 | */ |
470 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | |
471 | { | |
472 | int oslot = pcpu_chunk_slot(chunk); | |
473 | int max_contig = 0; | |
474 | int i, off; | |
475 | ||
fbf59bc9 TH |
476 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { |
477 | bool is_last = i + 1 == chunk->map_used; | |
478 | int head, tail; | |
479 | ||
480 | /* extra for alignment requirement */ | |
481 | head = ALIGN(off, align) - off; | |
482 | BUG_ON(i == 0 && head != 0); | |
483 | ||
484 | if (chunk->map[i] < 0) | |
485 | continue; | |
486 | if (chunk->map[i] < head + size) { | |
487 | max_contig = max(chunk->map[i], max_contig); | |
488 | continue; | |
489 | } | |
490 | ||
491 | /* | |
492 | * If head is small or the previous block is free, | |
493 | * merge'em. Note that 'small' is defined as smaller | |
494 | * than sizeof(int), which is very small but isn't too | |
495 | * uncommon for percpu allocations. | |
496 | */ | |
497 | if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { | |
498 | if (chunk->map[i - 1] > 0) | |
499 | chunk->map[i - 1] += head; | |
500 | else { | |
501 | chunk->map[i - 1] -= head; | |
502 | chunk->free_size -= head; | |
503 | } | |
504 | chunk->map[i] -= head; | |
505 | off += head; | |
506 | head = 0; | |
507 | } | |
508 | ||
509 | /* if tail is small, just keep it around */ | |
510 | tail = chunk->map[i] - head - size; | |
511 | if (tail < sizeof(int)) | |
512 | tail = 0; | |
513 | ||
514 | /* split if warranted */ | |
515 | if (head || tail) { | |
9f7dcf22 | 516 | pcpu_split_block(chunk, i, head, tail); |
fbf59bc9 TH |
517 | if (head) { |
518 | i++; | |
519 | off += head; | |
520 | max_contig = max(chunk->map[i - 1], max_contig); | |
521 | } | |
522 | if (tail) | |
523 | max_contig = max(chunk->map[i + 1], max_contig); | |
524 | } | |
525 | ||
526 | /* update hint and mark allocated */ | |
527 | if (is_last) | |
528 | chunk->contig_hint = max_contig; /* fully scanned */ | |
529 | else | |
530 | chunk->contig_hint = max(chunk->contig_hint, | |
531 | max_contig); | |
532 | ||
533 | chunk->free_size -= chunk->map[i]; | |
534 | chunk->map[i] = -chunk->map[i]; | |
535 | ||
536 | pcpu_chunk_relocate(chunk, oslot); | |
537 | return off; | |
538 | } | |
539 | ||
540 | chunk->contig_hint = max_contig; /* fully scanned */ | |
541 | pcpu_chunk_relocate(chunk, oslot); | |
542 | ||
9f7dcf22 TH |
543 | /* tell the upper layer that this chunk has no matching area */ |
544 | return -1; | |
fbf59bc9 TH |
545 | } |
546 | ||
547 | /** | |
548 | * pcpu_free_area - free area to a pcpu_chunk | |
549 | * @chunk: chunk of interest | |
550 | * @freeme: offset of area to free | |
551 | * | |
552 | * Free area starting from @freeme to @chunk. Note that this function | |
553 | * only modifies the allocation map. It doesn't depopulate or unmap | |
554 | * the area. | |
ccea34b5 TH |
555 | * |
556 | * CONTEXT: | |
557 | * pcpu_lock. | |
fbf59bc9 TH |
558 | */ |
559 | static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) | |
560 | { | |
561 | int oslot = pcpu_chunk_slot(chunk); | |
562 | int i, off; | |
563 | ||
564 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) | |
565 | if (off == freeme) | |
566 | break; | |
567 | BUG_ON(off != freeme); | |
568 | BUG_ON(chunk->map[i] > 0); | |
569 | ||
570 | chunk->map[i] = -chunk->map[i]; | |
571 | chunk->free_size += chunk->map[i]; | |
572 | ||
573 | /* merge with previous? */ | |
574 | if (i > 0 && chunk->map[i - 1] >= 0) { | |
575 | chunk->map[i - 1] += chunk->map[i]; | |
576 | chunk->map_used--; | |
577 | memmove(&chunk->map[i], &chunk->map[i + 1], | |
578 | (chunk->map_used - i) * sizeof(chunk->map[0])); | |
579 | i--; | |
580 | } | |
581 | /* merge with next? */ | |
582 | if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { | |
583 | chunk->map[i] += chunk->map[i + 1]; | |
584 | chunk->map_used--; | |
585 | memmove(&chunk->map[i + 1], &chunk->map[i + 2], | |
586 | (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); | |
587 | } | |
588 | ||
589 | chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); | |
590 | pcpu_chunk_relocate(chunk, oslot); | |
591 | } | |
592 | ||
593 | /** | |
ce3141a2 TH |
594 | * pcpu_get_pages_and_bitmap - get temp pages array and bitmap |
595 | * @chunk: chunk of interest | |
596 | * @bitmapp: output parameter for bitmap | |
597 | * @may_alloc: may allocate the array | |
598 | * | |
599 | * Returns pointer to array of pointers to struct page and bitmap, | |
600 | * both of which can be indexed with pcpu_page_idx(). The returned | |
601 | * array is cleared to zero and *@bitmapp is copied from | |
602 | * @chunk->populated. Note that there is only one array and bitmap | |
603 | * and access exclusion is the caller's responsibility. | |
604 | * | |
605 | * CONTEXT: | |
606 | * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc. | |
607 | * Otherwise, don't care. | |
608 | * | |
609 | * RETURNS: | |
610 | * Pointer to temp pages array on success, NULL on failure. | |
611 | */ | |
612 | static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, | |
613 | unsigned long **bitmapp, | |
614 | bool may_alloc) | |
615 | { | |
616 | static struct page **pages; | |
617 | static unsigned long *bitmap; | |
2f39e637 | 618 | size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); |
ce3141a2 TH |
619 | size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) * |
620 | sizeof(unsigned long); | |
621 | ||
622 | if (!pages || !bitmap) { | |
623 | if (may_alloc && !pages) | |
624 | pages = pcpu_mem_alloc(pages_size); | |
625 | if (may_alloc && !bitmap) | |
626 | bitmap = pcpu_mem_alloc(bitmap_size); | |
627 | if (!pages || !bitmap) | |
628 | return NULL; | |
629 | } | |
630 | ||
631 | memset(pages, 0, pages_size); | |
632 | bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); | |
633 | ||
634 | *bitmapp = bitmap; | |
635 | return pages; | |
636 | } | |
637 | ||
638 | /** | |
639 | * pcpu_free_pages - free pages which were allocated for @chunk | |
640 | * @chunk: chunk pages were allocated for | |
641 | * @pages: array of pages to be freed, indexed by pcpu_page_idx() | |
642 | * @populated: populated bitmap | |
643 | * @page_start: page index of the first page to be freed | |
644 | * @page_end: page index of the last page to be freed + 1 | |
645 | * | |
646 | * Free pages [@page_start and @page_end) in @pages for all units. | |
647 | * The pages were allocated for @chunk. | |
648 | */ | |
649 | static void pcpu_free_pages(struct pcpu_chunk *chunk, | |
650 | struct page **pages, unsigned long *populated, | |
651 | int page_start, int page_end) | |
652 | { | |
653 | unsigned int cpu; | |
654 | int i; | |
655 | ||
656 | for_each_possible_cpu(cpu) { | |
657 | for (i = page_start; i < page_end; i++) { | |
658 | struct page *page = pages[pcpu_page_idx(cpu, i)]; | |
659 | ||
660 | if (page) | |
661 | __free_page(page); | |
662 | } | |
663 | } | |
664 | } | |
665 | ||
666 | /** | |
667 | * pcpu_alloc_pages - allocates pages for @chunk | |
668 | * @chunk: target chunk | |
669 | * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() | |
670 | * @populated: populated bitmap | |
671 | * @page_start: page index of the first page to be allocated | |
672 | * @page_end: page index of the last page to be allocated + 1 | |
673 | * | |
674 | * Allocate pages [@page_start,@page_end) into @pages for all units. | |
675 | * The allocation is for @chunk. Percpu core doesn't care about the | |
676 | * content of @pages and will pass it verbatim to pcpu_map_pages(). | |
677 | */ | |
678 | static int pcpu_alloc_pages(struct pcpu_chunk *chunk, | |
679 | struct page **pages, unsigned long *populated, | |
680 | int page_start, int page_end) | |
681 | { | |
682 | const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; | |
683 | unsigned int cpu; | |
684 | int i; | |
685 | ||
686 | for_each_possible_cpu(cpu) { | |
687 | for (i = page_start; i < page_end; i++) { | |
688 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; | |
689 | ||
690 | *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); | |
691 | if (!*pagep) { | |
692 | pcpu_free_pages(chunk, pages, populated, | |
693 | page_start, page_end); | |
694 | return -ENOMEM; | |
695 | } | |
696 | } | |
697 | } | |
698 | return 0; | |
699 | } | |
700 | ||
701 | /** | |
702 | * pcpu_pre_unmap_flush - flush cache prior to unmapping | |
703 | * @chunk: chunk the regions to be flushed belongs to | |
704 | * @page_start: page index of the first page to be flushed | |
705 | * @page_end: page index of the last page to be flushed + 1 | |
706 | * | |
707 | * Pages in [@page_start,@page_end) of @chunk are about to be | |
708 | * unmapped. Flush cache. As each flushing trial can be very | |
709 | * expensive, issue flush on the whole region at once rather than | |
710 | * doing it for each cpu. This could be an overkill but is more | |
711 | * scalable. | |
712 | */ | |
713 | static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, | |
714 | int page_start, int page_end) | |
715 | { | |
2f39e637 TH |
716 | flush_cache_vunmap( |
717 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), | |
718 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); | |
ce3141a2 TH |
719 | } |
720 | ||
721 | static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) | |
722 | { | |
723 | unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); | |
724 | } | |
725 | ||
726 | /** | |
727 | * pcpu_unmap_pages - unmap pages out of a pcpu_chunk | |
fbf59bc9 | 728 | * @chunk: chunk of interest |
ce3141a2 TH |
729 | * @pages: pages array which can be used to pass information to free |
730 | * @populated: populated bitmap | |
fbf59bc9 TH |
731 | * @page_start: page index of the first page to unmap |
732 | * @page_end: page index of the last page to unmap + 1 | |
fbf59bc9 TH |
733 | * |
734 | * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. | |
ce3141a2 TH |
735 | * Corresponding elements in @pages were cleared by the caller and can |
736 | * be used to carry information to pcpu_free_pages() which will be | |
737 | * called after all unmaps are finished. The caller should call | |
738 | * proper pre/post flush functions. | |
fbf59bc9 | 739 | */ |
ce3141a2 TH |
740 | static void pcpu_unmap_pages(struct pcpu_chunk *chunk, |
741 | struct page **pages, unsigned long *populated, | |
742 | int page_start, int page_end) | |
fbf59bc9 | 743 | { |
fbf59bc9 | 744 | unsigned int cpu; |
ce3141a2 | 745 | int i; |
fbf59bc9 | 746 | |
ce3141a2 TH |
747 | for_each_possible_cpu(cpu) { |
748 | for (i = page_start; i < page_end; i++) { | |
749 | struct page *page; | |
8d408b4b | 750 | |
ce3141a2 TH |
751 | page = pcpu_chunk_page(chunk, cpu, i); |
752 | WARN_ON(!page); | |
753 | pages[pcpu_page_idx(cpu, i)] = page; | |
754 | } | |
755 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), | |
756 | page_end - page_start); | |
757 | } | |
fbf59bc9 | 758 | |
ce3141a2 TH |
759 | for (i = page_start; i < page_end; i++) |
760 | __clear_bit(i, populated); | |
761 | } | |
762 | ||
763 | /** | |
764 | * pcpu_post_unmap_tlb_flush - flush TLB after unmapping | |
765 | * @chunk: pcpu_chunk the regions to be flushed belong to | |
766 | * @page_start: page index of the first page to be flushed | |
767 | * @page_end: page index of the last page to be flushed + 1 | |
768 | * | |
769 | * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush | |
770 | * TLB for the regions. This can be skipped if the area is to be | |
771 | * returned to vmalloc as vmalloc will handle TLB flushing lazily. | |
772 | * | |
773 | * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once | |
774 | * for the whole region. | |
775 | */ | |
776 | static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, | |
777 | int page_start, int page_end) | |
778 | { | |
2f39e637 TH |
779 | flush_tlb_kernel_range( |
780 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), | |
781 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); | |
fbf59bc9 TH |
782 | } |
783 | ||
c8a51be4 TH |
784 | static int __pcpu_map_pages(unsigned long addr, struct page **pages, |
785 | int nr_pages) | |
786 | { | |
787 | return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, | |
788 | PAGE_KERNEL, pages); | |
789 | } | |
790 | ||
791 | /** | |
ce3141a2 | 792 | * pcpu_map_pages - map pages into a pcpu_chunk |
c8a51be4 | 793 | * @chunk: chunk of interest |
ce3141a2 TH |
794 | * @pages: pages array containing pages to be mapped |
795 | * @populated: populated bitmap | |
c8a51be4 TH |
796 | * @page_start: page index of the first page to map |
797 | * @page_end: page index of the last page to map + 1 | |
798 | * | |
ce3141a2 TH |
799 | * For each cpu, map pages [@page_start,@page_end) into @chunk. The |
800 | * caller is responsible for calling pcpu_post_map_flush() after all | |
801 | * mappings are complete. | |
802 | * | |
803 | * This function is responsible for setting corresponding bits in | |
804 | * @chunk->populated bitmap and whatever is necessary for reverse | |
805 | * lookup (addr -> chunk). | |
c8a51be4 | 806 | */ |
ce3141a2 TH |
807 | static int pcpu_map_pages(struct pcpu_chunk *chunk, |
808 | struct page **pages, unsigned long *populated, | |
809 | int page_start, int page_end) | |
c8a51be4 | 810 | { |
ce3141a2 TH |
811 | unsigned int cpu, tcpu; |
812 | int i, err; | |
c8a51be4 TH |
813 | |
814 | for_each_possible_cpu(cpu) { | |
815 | err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), | |
ce3141a2 | 816 | &pages[pcpu_page_idx(cpu, page_start)], |
c8a51be4 TH |
817 | page_end - page_start); |
818 | if (err < 0) | |
ce3141a2 | 819 | goto err; |
c8a51be4 TH |
820 | } |
821 | ||
ce3141a2 TH |
822 | /* mapping successful, link chunk and mark populated */ |
823 | for (i = page_start; i < page_end; i++) { | |
824 | for_each_possible_cpu(cpu) | |
825 | pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], | |
826 | chunk); | |
827 | __set_bit(i, populated); | |
828 | } | |
829 | ||
830 | return 0; | |
831 | ||
832 | err: | |
833 | for_each_possible_cpu(tcpu) { | |
834 | if (tcpu == cpu) | |
835 | break; | |
836 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), | |
837 | page_end - page_start); | |
838 | } | |
839 | return err; | |
840 | } | |
841 | ||
842 | /** | |
843 | * pcpu_post_map_flush - flush cache after mapping | |
844 | * @chunk: pcpu_chunk the regions to be flushed belong to | |
845 | * @page_start: page index of the first page to be flushed | |
846 | * @page_end: page index of the last page to be flushed + 1 | |
847 | * | |
848 | * Pages [@page_start,@page_end) of @chunk have been mapped. Flush | |
849 | * cache. | |
850 | * | |
851 | * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once | |
852 | * for the whole region. | |
853 | */ | |
854 | static void pcpu_post_map_flush(struct pcpu_chunk *chunk, | |
855 | int page_start, int page_end) | |
856 | { | |
2f39e637 TH |
857 | flush_cache_vmap( |
858 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), | |
859 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); | |
c8a51be4 TH |
860 | } |
861 | ||
fbf59bc9 TH |
862 | /** |
863 | * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk | |
864 | * @chunk: chunk to depopulate | |
865 | * @off: offset to the area to depopulate | |
cae3aeb8 | 866 | * @size: size of the area to depopulate in bytes |
fbf59bc9 TH |
867 | * @flush: whether to flush cache and tlb or not |
868 | * | |
869 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) | |
870 | * from @chunk. If @flush is true, vcache is flushed before unmapping | |
871 | * and tlb after. | |
ccea34b5 TH |
872 | * |
873 | * CONTEXT: | |
874 | * pcpu_alloc_mutex. | |
fbf59bc9 | 875 | */ |
ce3141a2 | 876 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) |
fbf59bc9 TH |
877 | { |
878 | int page_start = PFN_DOWN(off); | |
879 | int page_end = PFN_UP(off + size); | |
ce3141a2 TH |
880 | struct page **pages; |
881 | unsigned long *populated; | |
882 | int rs, re; | |
883 | ||
884 | /* quick path, check whether it's empty already */ | |
885 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | |
886 | if (rs == page_start && re == page_end) | |
887 | return; | |
888 | break; | |
889 | } | |
fbf59bc9 | 890 | |
ce3141a2 TH |
891 | /* immutable chunks can't be depopulated */ |
892 | WARN_ON(chunk->immutable); | |
fbf59bc9 | 893 | |
ce3141a2 TH |
894 | /* |
895 | * If control reaches here, there must have been at least one | |
896 | * successful population attempt so the temp pages array must | |
897 | * be available now. | |
898 | */ | |
899 | pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); | |
900 | BUG_ON(!pages); | |
fbf59bc9 | 901 | |
ce3141a2 TH |
902 | /* unmap and free */ |
903 | pcpu_pre_unmap_flush(chunk, page_start, page_end); | |
fbf59bc9 | 904 | |
ce3141a2 TH |
905 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) |
906 | pcpu_unmap_pages(chunk, pages, populated, rs, re); | |
fbf59bc9 | 907 | |
ce3141a2 TH |
908 | /* no need to flush tlb, vmalloc will handle it lazily */ |
909 | ||
910 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) | |
911 | pcpu_free_pages(chunk, pages, populated, rs, re); | |
fbf59bc9 | 912 | |
ce3141a2 TH |
913 | /* commit new bitmap */ |
914 | bitmap_copy(chunk->populated, populated, pcpu_unit_pages); | |
fbf59bc9 TH |
915 | } |
916 | ||
fbf59bc9 TH |
917 | /** |
918 | * pcpu_populate_chunk - populate and map an area of a pcpu_chunk | |
919 | * @chunk: chunk of interest | |
920 | * @off: offset to the area to populate | |
cae3aeb8 | 921 | * @size: size of the area to populate in bytes |
fbf59bc9 TH |
922 | * |
923 | * For each cpu, populate and map pages [@page_start,@page_end) into | |
924 | * @chunk. The area is cleared on return. | |
ccea34b5 TH |
925 | * |
926 | * CONTEXT: | |
927 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. | |
fbf59bc9 TH |
928 | */ |
929 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | |
930 | { | |
fbf59bc9 TH |
931 | int page_start = PFN_DOWN(off); |
932 | int page_end = PFN_UP(off + size); | |
ce3141a2 TH |
933 | int free_end = page_start, unmap_end = page_start; |
934 | struct page **pages; | |
935 | unsigned long *populated; | |
fbf59bc9 | 936 | unsigned int cpu; |
ce3141a2 | 937 | int rs, re, rc; |
fbf59bc9 | 938 | |
ce3141a2 TH |
939 | /* quick path, check whether all pages are already there */ |
940 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) { | |
941 | if (rs == page_start && re == page_end) | |
942 | goto clear; | |
943 | break; | |
944 | } | |
fbf59bc9 | 945 | |
ce3141a2 TH |
946 | /* need to allocate and map pages, this chunk can't be immutable */ |
947 | WARN_ON(chunk->immutable); | |
fbf59bc9 | 948 | |
ce3141a2 TH |
949 | pages = pcpu_get_pages_and_bitmap(chunk, &populated, true); |
950 | if (!pages) | |
951 | return -ENOMEM; | |
fbf59bc9 | 952 | |
ce3141a2 TH |
953 | /* alloc and map */ |
954 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | |
955 | rc = pcpu_alloc_pages(chunk, pages, populated, rs, re); | |
956 | if (rc) | |
957 | goto err_free; | |
958 | free_end = re; | |
fbf59bc9 TH |
959 | } |
960 | ||
ce3141a2 TH |
961 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { |
962 | rc = pcpu_map_pages(chunk, pages, populated, rs, re); | |
963 | if (rc) | |
964 | goto err_unmap; | |
965 | unmap_end = re; | |
966 | } | |
967 | pcpu_post_map_flush(chunk, page_start, page_end); | |
fbf59bc9 | 968 | |
ce3141a2 TH |
969 | /* commit new bitmap */ |
970 | bitmap_copy(chunk->populated, populated, pcpu_unit_pages); | |
971 | clear: | |
fbf59bc9 | 972 | for_each_possible_cpu(cpu) |
2f39e637 | 973 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); |
fbf59bc9 | 974 | return 0; |
ce3141a2 TH |
975 | |
976 | err_unmap: | |
977 | pcpu_pre_unmap_flush(chunk, page_start, unmap_end); | |
978 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) | |
979 | pcpu_unmap_pages(chunk, pages, populated, rs, re); | |
980 | pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); | |
981 | err_free: | |
982 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) | |
983 | pcpu_free_pages(chunk, pages, populated, rs, re); | |
984 | return rc; | |
fbf59bc9 TH |
985 | } |
986 | ||
987 | static void free_pcpu_chunk(struct pcpu_chunk *chunk) | |
988 | { | |
989 | if (!chunk) | |
990 | return; | |
991 | if (chunk->vm) | |
992 | free_vm_area(chunk->vm); | |
1880d93b | 993 | pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); |
fbf59bc9 TH |
994 | kfree(chunk); |
995 | } | |
996 | ||
997 | static struct pcpu_chunk *alloc_pcpu_chunk(void) | |
998 | { | |
999 | struct pcpu_chunk *chunk; | |
1000 | ||
1001 | chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); | |
1002 | if (!chunk) | |
1003 | return NULL; | |
1004 | ||
1880d93b | 1005 | chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); |
fbf59bc9 TH |
1006 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; |
1007 | chunk->map[chunk->map_used++] = pcpu_unit_size; | |
1008 | ||
142d44b0 | 1009 | chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC); |
fbf59bc9 TH |
1010 | if (!chunk->vm) { |
1011 | free_pcpu_chunk(chunk); | |
1012 | return NULL; | |
1013 | } | |
1014 | ||
1015 | INIT_LIST_HEAD(&chunk->list); | |
1016 | chunk->free_size = pcpu_unit_size; | |
1017 | chunk->contig_hint = pcpu_unit_size; | |
bba174f5 | 1018 | chunk->base_addr = chunk->vm->addr; |
fbf59bc9 TH |
1019 | |
1020 | return chunk; | |
1021 | } | |
1022 | ||
1023 | /** | |
edcb4639 | 1024 | * pcpu_alloc - the percpu allocator |
cae3aeb8 | 1025 | * @size: size of area to allocate in bytes |
fbf59bc9 | 1026 | * @align: alignment of area (max PAGE_SIZE) |
edcb4639 | 1027 | * @reserved: allocate from the reserved chunk if available |
fbf59bc9 | 1028 | * |
ccea34b5 TH |
1029 | * Allocate percpu area of @size bytes aligned at @align. |
1030 | * | |
1031 | * CONTEXT: | |
1032 | * Does GFP_KERNEL allocation. | |
fbf59bc9 TH |
1033 | * |
1034 | * RETURNS: | |
1035 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1036 | */ | |
edcb4639 | 1037 | static void *pcpu_alloc(size_t size, size_t align, bool reserved) |
fbf59bc9 | 1038 | { |
fbf59bc9 TH |
1039 | struct pcpu_chunk *chunk; |
1040 | int slot, off; | |
1041 | ||
8d408b4b | 1042 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { |
fbf59bc9 TH |
1043 | WARN(true, "illegal size (%zu) or align (%zu) for " |
1044 | "percpu allocation\n", size, align); | |
1045 | return NULL; | |
1046 | } | |
1047 | ||
ccea34b5 TH |
1048 | mutex_lock(&pcpu_alloc_mutex); |
1049 | spin_lock_irq(&pcpu_lock); | |
fbf59bc9 | 1050 | |
edcb4639 TH |
1051 | /* serve reserved allocations from the reserved chunk if available */ |
1052 | if (reserved && pcpu_reserved_chunk) { | |
1053 | chunk = pcpu_reserved_chunk; | |
9f7dcf22 TH |
1054 | if (size > chunk->contig_hint || |
1055 | pcpu_extend_area_map(chunk) < 0) | |
ccea34b5 | 1056 | goto fail_unlock; |
edcb4639 TH |
1057 | off = pcpu_alloc_area(chunk, size, align); |
1058 | if (off >= 0) | |
1059 | goto area_found; | |
ccea34b5 | 1060 | goto fail_unlock; |
edcb4639 TH |
1061 | } |
1062 | ||
ccea34b5 | 1063 | restart: |
edcb4639 | 1064 | /* search through normal chunks */ |
fbf59bc9 TH |
1065 | for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { |
1066 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { | |
1067 | if (size > chunk->contig_hint) | |
1068 | continue; | |
ccea34b5 TH |
1069 | |
1070 | switch (pcpu_extend_area_map(chunk)) { | |
1071 | case 0: | |
1072 | break; | |
1073 | case 1: | |
1074 | goto restart; /* pcpu_lock dropped, restart */ | |
1075 | default: | |
1076 | goto fail_unlock; | |
1077 | } | |
1078 | ||
fbf59bc9 TH |
1079 | off = pcpu_alloc_area(chunk, size, align); |
1080 | if (off >= 0) | |
1081 | goto area_found; | |
fbf59bc9 TH |
1082 | } |
1083 | } | |
1084 | ||
1085 | /* hmmm... no space left, create a new chunk */ | |
ccea34b5 TH |
1086 | spin_unlock_irq(&pcpu_lock); |
1087 | ||
fbf59bc9 TH |
1088 | chunk = alloc_pcpu_chunk(); |
1089 | if (!chunk) | |
ccea34b5 TH |
1090 | goto fail_unlock_mutex; |
1091 | ||
1092 | spin_lock_irq(&pcpu_lock); | |
fbf59bc9 | 1093 | pcpu_chunk_relocate(chunk, -1); |
ccea34b5 | 1094 | goto restart; |
fbf59bc9 TH |
1095 | |
1096 | area_found: | |
ccea34b5 TH |
1097 | spin_unlock_irq(&pcpu_lock); |
1098 | ||
fbf59bc9 TH |
1099 | /* populate, map and clear the area */ |
1100 | if (pcpu_populate_chunk(chunk, off, size)) { | |
ccea34b5 | 1101 | spin_lock_irq(&pcpu_lock); |
fbf59bc9 | 1102 | pcpu_free_area(chunk, off); |
ccea34b5 | 1103 | goto fail_unlock; |
fbf59bc9 TH |
1104 | } |
1105 | ||
ccea34b5 TH |
1106 | mutex_unlock(&pcpu_alloc_mutex); |
1107 | ||
bba174f5 TH |
1108 | /* return address relative to base address */ |
1109 | return __addr_to_pcpu_ptr(chunk->base_addr + off); | |
ccea34b5 TH |
1110 | |
1111 | fail_unlock: | |
1112 | spin_unlock_irq(&pcpu_lock); | |
1113 | fail_unlock_mutex: | |
1114 | mutex_unlock(&pcpu_alloc_mutex); | |
1115 | return NULL; | |
fbf59bc9 | 1116 | } |
edcb4639 TH |
1117 | |
1118 | /** | |
1119 | * __alloc_percpu - allocate dynamic percpu area | |
1120 | * @size: size of area to allocate in bytes | |
1121 | * @align: alignment of area (max PAGE_SIZE) | |
1122 | * | |
1123 | * Allocate percpu area of @size bytes aligned at @align. Might | |
1124 | * sleep. Might trigger writeouts. | |
1125 | * | |
ccea34b5 TH |
1126 | * CONTEXT: |
1127 | * Does GFP_KERNEL allocation. | |
1128 | * | |
edcb4639 TH |
1129 | * RETURNS: |
1130 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1131 | */ | |
1132 | void *__alloc_percpu(size_t size, size_t align) | |
1133 | { | |
1134 | return pcpu_alloc(size, align, false); | |
1135 | } | |
fbf59bc9 TH |
1136 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
1137 | ||
edcb4639 TH |
1138 | /** |
1139 | * __alloc_reserved_percpu - allocate reserved percpu area | |
1140 | * @size: size of area to allocate in bytes | |
1141 | * @align: alignment of area (max PAGE_SIZE) | |
1142 | * | |
1143 | * Allocate percpu area of @size bytes aligned at @align from reserved | |
1144 | * percpu area if arch has set it up; otherwise, allocation is served | |
1145 | * from the same dynamic area. Might sleep. Might trigger writeouts. | |
1146 | * | |
ccea34b5 TH |
1147 | * CONTEXT: |
1148 | * Does GFP_KERNEL allocation. | |
1149 | * | |
edcb4639 TH |
1150 | * RETURNS: |
1151 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1152 | */ | |
1153 | void *__alloc_reserved_percpu(size_t size, size_t align) | |
1154 | { | |
1155 | return pcpu_alloc(size, align, true); | |
1156 | } | |
1157 | ||
a56dbddf TH |
1158 | /** |
1159 | * pcpu_reclaim - reclaim fully free chunks, workqueue function | |
1160 | * @work: unused | |
1161 | * | |
1162 | * Reclaim all fully free chunks except for the first one. | |
ccea34b5 TH |
1163 | * |
1164 | * CONTEXT: | |
1165 | * workqueue context. | |
a56dbddf TH |
1166 | */ |
1167 | static void pcpu_reclaim(struct work_struct *work) | |
fbf59bc9 | 1168 | { |
a56dbddf TH |
1169 | LIST_HEAD(todo); |
1170 | struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; | |
1171 | struct pcpu_chunk *chunk, *next; | |
1172 | ||
ccea34b5 TH |
1173 | mutex_lock(&pcpu_alloc_mutex); |
1174 | spin_lock_irq(&pcpu_lock); | |
a56dbddf TH |
1175 | |
1176 | list_for_each_entry_safe(chunk, next, head, list) { | |
1177 | WARN_ON(chunk->immutable); | |
1178 | ||
1179 | /* spare the first one */ | |
1180 | if (chunk == list_first_entry(head, struct pcpu_chunk, list)) | |
1181 | continue; | |
1182 | ||
a56dbddf TH |
1183 | list_move(&chunk->list, &todo); |
1184 | } | |
1185 | ||
ccea34b5 | 1186 | spin_unlock_irq(&pcpu_lock); |
a56dbddf TH |
1187 | |
1188 | list_for_each_entry_safe(chunk, next, &todo, list) { | |
ce3141a2 | 1189 | pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); |
a56dbddf TH |
1190 | free_pcpu_chunk(chunk); |
1191 | } | |
971f3918 TH |
1192 | |
1193 | mutex_unlock(&pcpu_alloc_mutex); | |
fbf59bc9 TH |
1194 | } |
1195 | ||
1196 | /** | |
1197 | * free_percpu - free percpu area | |
1198 | * @ptr: pointer to area to free | |
1199 | * | |
ccea34b5 TH |
1200 | * Free percpu area @ptr. |
1201 | * | |
1202 | * CONTEXT: | |
1203 | * Can be called from atomic context. | |
fbf59bc9 TH |
1204 | */ |
1205 | void free_percpu(void *ptr) | |
1206 | { | |
1207 | void *addr = __pcpu_ptr_to_addr(ptr); | |
1208 | struct pcpu_chunk *chunk; | |
ccea34b5 | 1209 | unsigned long flags; |
fbf59bc9 TH |
1210 | int off; |
1211 | ||
1212 | if (!ptr) | |
1213 | return; | |
1214 | ||
ccea34b5 | 1215 | spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9 TH |
1216 | |
1217 | chunk = pcpu_chunk_addr_search(addr); | |
bba174f5 | 1218 | off = addr - chunk->base_addr; |
fbf59bc9 TH |
1219 | |
1220 | pcpu_free_area(chunk, off); | |
1221 | ||
a56dbddf | 1222 | /* if there are more than one fully free chunks, wake up grim reaper */ |
fbf59bc9 TH |
1223 | if (chunk->free_size == pcpu_unit_size) { |
1224 | struct pcpu_chunk *pos; | |
1225 | ||
a56dbddf | 1226 | list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) |
fbf59bc9 | 1227 | if (pos != chunk) { |
a56dbddf | 1228 | schedule_work(&pcpu_reclaim_work); |
fbf59bc9 TH |
1229 | break; |
1230 | } | |
1231 | } | |
1232 | ||
ccea34b5 | 1233 | spin_unlock_irqrestore(&pcpu_lock, flags); |
fbf59bc9 TH |
1234 | } |
1235 | EXPORT_SYMBOL_GPL(free_percpu); | |
1236 | ||
033e48fb TH |
1237 | static inline size_t pcpu_calc_fc_sizes(size_t static_size, |
1238 | size_t reserved_size, | |
1239 | ssize_t *dyn_sizep) | |
1240 | { | |
1241 | size_t size_sum; | |
1242 | ||
1243 | size_sum = PFN_ALIGN(static_size + reserved_size + | |
1244 | (*dyn_sizep >= 0 ? *dyn_sizep : 0)); | |
1245 | if (*dyn_sizep != 0) | |
1246 | *dyn_sizep = size_sum - static_size - reserved_size; | |
1247 | ||
1248 | return size_sum; | |
1249 | } | |
1250 | ||
033e48fb | 1251 | /** |
fd1e8a1f TH |
1252 | * pcpu_alloc_alloc_info - allocate percpu allocation info |
1253 | * @nr_groups: the number of groups | |
1254 | * @nr_units: the number of units | |
1255 | * | |
1256 | * Allocate ai which is large enough for @nr_groups groups containing | |
1257 | * @nr_units units. The returned ai's groups[0].cpu_map points to the | |
1258 | * cpu_map array which is long enough for @nr_units and filled with | |
1259 | * NR_CPUS. It's the caller's responsibility to initialize cpu_map | |
1260 | * pointer of other groups. | |
1261 | * | |
1262 | * RETURNS: | |
1263 | * Pointer to the allocated pcpu_alloc_info on success, NULL on | |
1264 | * failure. | |
1265 | */ | |
1266 | struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, | |
1267 | int nr_units) | |
1268 | { | |
1269 | struct pcpu_alloc_info *ai; | |
1270 | size_t base_size, ai_size; | |
1271 | void *ptr; | |
1272 | int unit; | |
1273 | ||
1274 | base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), | |
1275 | __alignof__(ai->groups[0].cpu_map[0])); | |
1276 | ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); | |
1277 | ||
1278 | ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); | |
1279 | if (!ptr) | |
1280 | return NULL; | |
1281 | ai = ptr; | |
1282 | ptr += base_size; | |
1283 | ||
1284 | ai->groups[0].cpu_map = ptr; | |
1285 | ||
1286 | for (unit = 0; unit < nr_units; unit++) | |
1287 | ai->groups[0].cpu_map[unit] = NR_CPUS; | |
1288 | ||
1289 | ai->nr_groups = nr_groups; | |
1290 | ai->__ai_size = PFN_ALIGN(ai_size); | |
1291 | ||
1292 | return ai; | |
1293 | } | |
1294 | ||
1295 | /** | |
1296 | * pcpu_free_alloc_info - free percpu allocation info | |
1297 | * @ai: pcpu_alloc_info to free | |
1298 | * | |
1299 | * Free @ai which was allocated by pcpu_alloc_alloc_info(). | |
1300 | */ | |
1301 | void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) | |
1302 | { | |
1303 | free_bootmem(__pa(ai), ai->__ai_size); | |
1304 | } | |
1305 | ||
1306 | /** | |
1307 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs | |
033e48fb | 1308 | * @reserved_size: the size of reserved percpu area in bytes |
fd1e8a1f TH |
1309 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto |
1310 | * @atom_size: allocation atom size | |
1311 | * @cpu_distance_fn: callback to determine distance between cpus, optional | |
033e48fb | 1312 | * |
fd1e8a1f TH |
1313 | * This function determines grouping of units, their mappings to cpus |
1314 | * and other parameters considering needed percpu size, allocation | |
1315 | * atom size and distances between CPUs. | |
033e48fb | 1316 | * |
fd1e8a1f TH |
1317 | * Groups are always mutliples of atom size and CPUs which are of |
1318 | * LOCAL_DISTANCE both ways are grouped together and share space for | |
1319 | * units in the same group. The returned configuration is guaranteed | |
1320 | * to have CPUs on different nodes on different groups and >=75% usage | |
1321 | * of allocated virtual address space. | |
033e48fb TH |
1322 | * |
1323 | * RETURNS: | |
fd1e8a1f TH |
1324 | * On success, pointer to the new allocation_info is returned. On |
1325 | * failure, ERR_PTR value is returned. | |
033e48fb | 1326 | */ |
fd1e8a1f TH |
1327 | struct pcpu_alloc_info * __init pcpu_build_alloc_info( |
1328 | size_t reserved_size, ssize_t dyn_size, | |
1329 | size_t atom_size, | |
1330 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | |
033e48fb TH |
1331 | { |
1332 | static int group_map[NR_CPUS] __initdata; | |
1333 | static int group_cnt[NR_CPUS] __initdata; | |
1334 | const size_t static_size = __per_cpu_end - __per_cpu_start; | |
fd1e8a1f | 1335 | int group_cnt_max = 0, nr_groups = 1, nr_units = 0; |
033e48fb TH |
1336 | size_t size_sum, min_unit_size, alloc_size; |
1337 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | |
fd1e8a1f | 1338 | int last_allocs, group, unit; |
033e48fb | 1339 | unsigned int cpu, tcpu; |
fd1e8a1f TH |
1340 | struct pcpu_alloc_info *ai; |
1341 | unsigned int *cpu_map; | |
033e48fb TH |
1342 | |
1343 | /* | |
1344 | * Determine min_unit_size, alloc_size and max_upa such that | |
fd1e8a1f | 1345 | * alloc_size is multiple of atom_size and is the smallest |
033e48fb TH |
1346 | * which can accomodate 4k aligned segments which are equal to |
1347 | * or larger than min_unit_size. | |
1348 | */ | |
fd1e8a1f | 1349 | size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); |
033e48fb TH |
1350 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); |
1351 | ||
fd1e8a1f | 1352 | alloc_size = roundup(min_unit_size, atom_size); |
033e48fb TH |
1353 | upa = alloc_size / min_unit_size; |
1354 | while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | |
1355 | upa--; | |
1356 | max_upa = upa; | |
1357 | ||
1358 | /* group cpus according to their proximity */ | |
1359 | for_each_possible_cpu(cpu) { | |
1360 | group = 0; | |
1361 | next_group: | |
1362 | for_each_possible_cpu(tcpu) { | |
1363 | if (cpu == tcpu) | |
1364 | break; | |
fd1e8a1f | 1365 | if (group_map[tcpu] == group && cpu_distance_fn && |
033e48fb TH |
1366 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || |
1367 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { | |
1368 | group++; | |
fd1e8a1f | 1369 | nr_groups = max(nr_groups, group + 1); |
033e48fb TH |
1370 | goto next_group; |
1371 | } | |
1372 | } | |
1373 | group_map[cpu] = group; | |
1374 | group_cnt[group]++; | |
1375 | group_cnt_max = max(group_cnt_max, group_cnt[group]); | |
1376 | } | |
1377 | ||
1378 | /* | |
1379 | * Expand unit size until address space usage goes over 75% | |
1380 | * and then as much as possible without using more address | |
1381 | * space. | |
1382 | */ | |
1383 | last_allocs = INT_MAX; | |
1384 | for (upa = max_upa; upa; upa--) { | |
1385 | int allocs = 0, wasted = 0; | |
1386 | ||
1387 | if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | |
1388 | continue; | |
1389 | ||
fd1e8a1f | 1390 | for (group = 0; group < nr_groups; group++) { |
033e48fb TH |
1391 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); |
1392 | allocs += this_allocs; | |
1393 | wasted += this_allocs * upa - group_cnt[group]; | |
1394 | } | |
1395 | ||
1396 | /* | |
1397 | * Don't accept if wastage is over 25%. The | |
1398 | * greater-than comparison ensures upa==1 always | |
1399 | * passes the following check. | |
1400 | */ | |
1401 | if (wasted > num_possible_cpus() / 3) | |
1402 | continue; | |
1403 | ||
1404 | /* and then don't consume more memory */ | |
1405 | if (allocs > last_allocs) | |
1406 | break; | |
1407 | last_allocs = allocs; | |
1408 | best_upa = upa; | |
1409 | } | |
fd1e8a1f TH |
1410 | upa = best_upa; |
1411 | ||
1412 | /* allocate and fill alloc_info */ | |
1413 | for (group = 0; group < nr_groups; group++) | |
1414 | nr_units += roundup(group_cnt[group], upa); | |
1415 | ||
1416 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); | |
1417 | if (!ai) | |
1418 | return ERR_PTR(-ENOMEM); | |
1419 | cpu_map = ai->groups[0].cpu_map; | |
1420 | ||
1421 | for (group = 0; group < nr_groups; group++) { | |
1422 | ai->groups[group].cpu_map = cpu_map; | |
1423 | cpu_map += roundup(group_cnt[group], upa); | |
1424 | } | |
1425 | ||
1426 | ai->static_size = static_size; | |
1427 | ai->reserved_size = reserved_size; | |
1428 | ai->dyn_size = dyn_size; | |
1429 | ai->unit_size = alloc_size / upa; | |
1430 | ai->atom_size = atom_size; | |
1431 | ai->alloc_size = alloc_size; | |
1432 | ||
1433 | for (group = 0, unit = 0; group_cnt[group]; group++) { | |
1434 | struct pcpu_group_info *gi = &ai->groups[group]; | |
1435 | ||
1436 | /* | |
1437 | * Initialize base_offset as if all groups are located | |
1438 | * back-to-back. The caller should update this to | |
1439 | * reflect actual allocation. | |
1440 | */ | |
1441 | gi->base_offset = unit * ai->unit_size; | |
033e48fb | 1442 | |
033e48fb TH |
1443 | for_each_possible_cpu(cpu) |
1444 | if (group_map[cpu] == group) | |
fd1e8a1f TH |
1445 | gi->cpu_map[gi->nr_units++] = cpu; |
1446 | gi->nr_units = roundup(gi->nr_units, upa); | |
1447 | unit += gi->nr_units; | |
033e48fb | 1448 | } |
fd1e8a1f | 1449 | BUG_ON(unit != nr_units); |
033e48fb | 1450 | |
fd1e8a1f | 1451 | return ai; |
033e48fb TH |
1452 | } |
1453 | ||
fd1e8a1f TH |
1454 | /** |
1455 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info | |
1456 | * @lvl: loglevel | |
1457 | * @ai: allocation info to dump | |
1458 | * | |
1459 | * Print out information about @ai using loglevel @lvl. | |
1460 | */ | |
1461 | static void pcpu_dump_alloc_info(const char *lvl, | |
1462 | const struct pcpu_alloc_info *ai) | |
033e48fb | 1463 | { |
fd1e8a1f | 1464 | int group_width = 1, cpu_width = 1, width; |
033e48fb | 1465 | char empty_str[] = "--------"; |
fd1e8a1f TH |
1466 | int alloc = 0, alloc_end = 0; |
1467 | int group, v; | |
1468 | int upa, apl; /* units per alloc, allocs per line */ | |
1469 | ||
1470 | v = ai->nr_groups; | |
1471 | while (v /= 10) | |
1472 | group_width++; | |
033e48fb | 1473 | |
fd1e8a1f | 1474 | v = num_possible_cpus(); |
033e48fb | 1475 | while (v /= 10) |
fd1e8a1f TH |
1476 | cpu_width++; |
1477 | empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; | |
033e48fb | 1478 | |
fd1e8a1f TH |
1479 | upa = ai->alloc_size / ai->unit_size; |
1480 | width = upa * (cpu_width + 1) + group_width + 3; | |
1481 | apl = rounddown_pow_of_two(max(60 / width, 1)); | |
033e48fb | 1482 | |
fd1e8a1f TH |
1483 | printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", |
1484 | lvl, ai->static_size, ai->reserved_size, ai->dyn_size, | |
1485 | ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); | |
033e48fb | 1486 | |
fd1e8a1f TH |
1487 | for (group = 0; group < ai->nr_groups; group++) { |
1488 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
1489 | int unit = 0, unit_end = 0; | |
1490 | ||
1491 | BUG_ON(gi->nr_units % upa); | |
1492 | for (alloc_end += gi->nr_units / upa; | |
1493 | alloc < alloc_end; alloc++) { | |
1494 | if (!(alloc % apl)) { | |
033e48fb | 1495 | printk("\n"); |
fd1e8a1f TH |
1496 | printk("%spcpu-alloc: ", lvl); |
1497 | } | |
1498 | printk("[%0*d] ", group_width, group); | |
1499 | ||
1500 | for (unit_end += upa; unit < unit_end; unit++) | |
1501 | if (gi->cpu_map[unit] != NR_CPUS) | |
1502 | printk("%0*d ", cpu_width, | |
1503 | gi->cpu_map[unit]); | |
1504 | else | |
1505 | printk("%s ", empty_str); | |
033e48fb | 1506 | } |
033e48fb TH |
1507 | } |
1508 | printk("\n"); | |
1509 | } | |
033e48fb | 1510 | |
fbf59bc9 | 1511 | /** |
8d408b4b | 1512 | * pcpu_setup_first_chunk - initialize the first percpu chunk |
fd1e8a1f | 1513 | * @ai: pcpu_alloc_info describing how to percpu area is shaped |
38a6be52 | 1514 | * @base_addr: mapped address |
8d408b4b TH |
1515 | * |
1516 | * Initialize the first percpu chunk which contains the kernel static | |
1517 | * perpcu area. This function is to be called from arch percpu area | |
38a6be52 | 1518 | * setup path. |
8d408b4b | 1519 | * |
fd1e8a1f TH |
1520 | * @ai contains all information necessary to initialize the first |
1521 | * chunk and prime the dynamic percpu allocator. | |
1522 | * | |
1523 | * @ai->static_size is the size of static percpu area. | |
1524 | * | |
1525 | * @ai->reserved_size, if non-zero, specifies the amount of bytes to | |
edcb4639 TH |
1526 | * reserve after the static area in the first chunk. This reserves |
1527 | * the first chunk such that it's available only through reserved | |
1528 | * percpu allocation. This is primarily used to serve module percpu | |
1529 | * static areas on architectures where the addressing model has | |
1530 | * limited offset range for symbol relocations to guarantee module | |
1531 | * percpu symbols fall inside the relocatable range. | |
1532 | * | |
fd1e8a1f TH |
1533 | * @ai->dyn_size determines the number of bytes available for dynamic |
1534 | * allocation in the first chunk. The area between @ai->static_size + | |
1535 | * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. | |
6074d5b0 | 1536 | * |
fd1e8a1f TH |
1537 | * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE |
1538 | * and equal to or larger than @ai->static_size + @ai->reserved_size + | |
1539 | * @ai->dyn_size. | |
1540 | * | |
1541 | * @ai->atom_size is the allocation atom size and used as alignment | |
1542 | * for vm areas. | |
1543 | * | |
1544 | * @ai->alloc_size is the allocation size and always multiple of | |
1545 | * @ai->atom_size. This is larger than @ai->atom_size if | |
1546 | * @ai->unit_size is larger than @ai->atom_size. | |
1547 | * | |
1548 | * @ai->nr_groups and @ai->groups describe virtual memory layout of | |
1549 | * percpu areas. Units which should be colocated are put into the | |
1550 | * same group. Dynamic VM areas will be allocated according to these | |
1551 | * groupings. If @ai->nr_groups is zero, a single group containing | |
1552 | * all units is assumed. | |
8d408b4b | 1553 | * |
38a6be52 TH |
1554 | * The caller should have mapped the first chunk at @base_addr and |
1555 | * copied static data to each unit. | |
fbf59bc9 | 1556 | * |
edcb4639 TH |
1557 | * If the first chunk ends up with both reserved and dynamic areas, it |
1558 | * is served by two chunks - one to serve the core static and reserved | |
1559 | * areas and the other for the dynamic area. They share the same vm | |
1560 | * and page map but uses different area allocation map to stay away | |
1561 | * from each other. The latter chunk is circulated in the chunk slots | |
1562 | * and available for dynamic allocation like any other chunks. | |
1563 | * | |
fbf59bc9 | 1564 | * RETURNS: |
fb435d52 | 1565 | * 0 on success, -errno on failure. |
fbf59bc9 | 1566 | */ |
fb435d52 TH |
1567 | int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
1568 | void *base_addr) | |
fbf59bc9 | 1569 | { |
edcb4639 | 1570 | static int smap[2], dmap[2]; |
fd1e8a1f TH |
1571 | size_t dyn_size = ai->dyn_size; |
1572 | size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; | |
edcb4639 | 1573 | struct pcpu_chunk *schunk, *dchunk = NULL; |
fb435d52 | 1574 | unsigned long *unit_off; |
fd1e8a1f TH |
1575 | unsigned int cpu; |
1576 | int *unit_map; | |
1577 | int group, unit, i; | |
fbf59bc9 | 1578 | |
2f39e637 | 1579 | /* sanity checks */ |
edcb4639 TH |
1580 | BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || |
1581 | ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); | |
fd1e8a1f TH |
1582 | BUG_ON(ai->nr_groups <= 0); |
1583 | BUG_ON(!ai->static_size); | |
38a6be52 | 1584 | BUG_ON(!base_addr); |
fd1e8a1f TH |
1585 | BUG_ON(ai->unit_size < size_sum); |
1586 | BUG_ON(ai->unit_size & ~PAGE_MASK); | |
1587 | BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); | |
1588 | ||
1589 | pcpu_dump_alloc_info(KERN_DEBUG, ai); | |
8d408b4b | 1590 | |
fb435d52 | 1591 | /* determine number of units and initialize unit_map and base */ |
fd1e8a1f | 1592 | unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); |
fb435d52 | 1593 | unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); |
2f39e637 | 1594 | |
fd1e8a1f TH |
1595 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
1596 | unit_map[cpu] = NR_CPUS; | |
1597 | pcpu_first_unit_cpu = NR_CPUS; | |
2f39e637 | 1598 | |
fd1e8a1f TH |
1599 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { |
1600 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
2f39e637 | 1601 | |
fd1e8a1f TH |
1602 | for (i = 0; i < gi->nr_units; i++) { |
1603 | cpu = gi->cpu_map[i]; | |
1604 | if (cpu == NR_CPUS) | |
1605 | continue; | |
2f39e637 | 1606 | |
fd1e8a1f TH |
1607 | BUG_ON(cpu > nr_cpu_ids || !cpu_possible(cpu)); |
1608 | BUG_ON(unit_map[cpu] != NR_CPUS); | |
1609 | ||
1610 | unit_map[cpu] = unit + i; | |
fb435d52 TH |
1611 | unit_off[cpu] = gi->base_offset + i * ai->unit_size; |
1612 | ||
fd1e8a1f TH |
1613 | if (pcpu_first_unit_cpu == NR_CPUS) |
1614 | pcpu_first_unit_cpu = cpu; | |
1615 | } | |
2f39e637 | 1616 | } |
fd1e8a1f TH |
1617 | pcpu_last_unit_cpu = cpu; |
1618 | pcpu_nr_units = unit; | |
1619 | ||
1620 | for_each_possible_cpu(cpu) | |
1621 | BUG_ON(unit_map[cpu] == NR_CPUS); | |
1622 | ||
1623 | pcpu_unit_map = unit_map; | |
fb435d52 | 1624 | pcpu_unit_offsets = unit_off; |
2f39e637 TH |
1625 | |
1626 | /* determine basic parameters */ | |
fd1e8a1f | 1627 | pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; |
d9b55eeb | 1628 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
2f39e637 | 1629 | pcpu_chunk_size = pcpu_nr_units * pcpu_unit_size; |
ce3141a2 TH |
1630 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + |
1631 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); | |
fbf59bc9 | 1632 | |
d9b55eeb TH |
1633 | /* |
1634 | * Allocate chunk slots. The additional last slot is for | |
1635 | * empty chunks. | |
1636 | */ | |
1637 | pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; | |
fbf59bc9 TH |
1638 | pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); |
1639 | for (i = 0; i < pcpu_nr_slots; i++) | |
1640 | INIT_LIST_HEAD(&pcpu_slot[i]); | |
1641 | ||
edcb4639 TH |
1642 | /* |
1643 | * Initialize static chunk. If reserved_size is zero, the | |
1644 | * static chunk covers static area + dynamic allocation area | |
1645 | * in the first chunk. If reserved_size is not zero, it | |
1646 | * covers static area + reserved area (mostly used for module | |
1647 | * static percpu allocation). | |
1648 | */ | |
2441d15c TH |
1649 | schunk = alloc_bootmem(pcpu_chunk_struct_size); |
1650 | INIT_LIST_HEAD(&schunk->list); | |
bba174f5 | 1651 | schunk->base_addr = base_addr; |
61ace7fa TH |
1652 | schunk->map = smap; |
1653 | schunk->map_alloc = ARRAY_SIZE(smap); | |
38a6be52 | 1654 | schunk->immutable = true; |
ce3141a2 | 1655 | bitmap_fill(schunk->populated, pcpu_unit_pages); |
edcb4639 | 1656 | |
fd1e8a1f TH |
1657 | if (ai->reserved_size) { |
1658 | schunk->free_size = ai->reserved_size; | |
ae9e6bc9 | 1659 | pcpu_reserved_chunk = schunk; |
fd1e8a1f | 1660 | pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; |
edcb4639 TH |
1661 | } else { |
1662 | schunk->free_size = dyn_size; | |
1663 | dyn_size = 0; /* dynamic area covered */ | |
1664 | } | |
2441d15c | 1665 | schunk->contig_hint = schunk->free_size; |
fbf59bc9 | 1666 | |
fd1e8a1f | 1667 | schunk->map[schunk->map_used++] = -ai->static_size; |
61ace7fa TH |
1668 | if (schunk->free_size) |
1669 | schunk->map[schunk->map_used++] = schunk->free_size; | |
1670 | ||
edcb4639 TH |
1671 | /* init dynamic chunk if necessary */ |
1672 | if (dyn_size) { | |
ce3141a2 | 1673 | dchunk = alloc_bootmem(pcpu_chunk_struct_size); |
edcb4639 | 1674 | INIT_LIST_HEAD(&dchunk->list); |
bba174f5 | 1675 | dchunk->base_addr = base_addr; |
edcb4639 TH |
1676 | dchunk->map = dmap; |
1677 | dchunk->map_alloc = ARRAY_SIZE(dmap); | |
38a6be52 | 1678 | dchunk->immutable = true; |
ce3141a2 | 1679 | bitmap_fill(dchunk->populated, pcpu_unit_pages); |
edcb4639 TH |
1680 | |
1681 | dchunk->contig_hint = dchunk->free_size = dyn_size; | |
1682 | dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; | |
1683 | dchunk->map[dchunk->map_used++] = dchunk->free_size; | |
1684 | } | |
1685 | ||
2441d15c | 1686 | /* link the first chunk in */ |
ae9e6bc9 TH |
1687 | pcpu_first_chunk = dchunk ?: schunk; |
1688 | pcpu_chunk_relocate(pcpu_first_chunk, -1); | |
fbf59bc9 TH |
1689 | |
1690 | /* we're done */ | |
bba174f5 | 1691 | pcpu_base_addr = base_addr; |
fb435d52 | 1692 | return 0; |
fbf59bc9 | 1693 | } |
66c3a757 | 1694 | |
f58dc01b TH |
1695 | const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { |
1696 | [PCPU_FC_AUTO] = "auto", | |
1697 | [PCPU_FC_EMBED] = "embed", | |
1698 | [PCPU_FC_PAGE] = "page", | |
1699 | [PCPU_FC_LPAGE] = "lpage", | |
1700 | }; | |
1701 | ||
1702 | enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; | |
1703 | ||
1704 | static int __init percpu_alloc_setup(char *str) | |
1705 | { | |
1706 | if (0) | |
1707 | /* nada */; | |
1708 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK | |
1709 | else if (!strcmp(str, "embed")) | |
1710 | pcpu_chosen_fc = PCPU_FC_EMBED; | |
1711 | #endif | |
1712 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | |
1713 | else if (!strcmp(str, "page")) | |
1714 | pcpu_chosen_fc = PCPU_FC_PAGE; | |
1715 | #endif | |
1716 | #ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK | |
1717 | else if (!strcmp(str, "lpage")) | |
1718 | pcpu_chosen_fc = PCPU_FC_LPAGE; | |
1719 | #endif | |
1720 | else | |
1721 | pr_warning("PERCPU: unknown allocator %s specified\n", str); | |
1722 | ||
1723 | return 0; | |
1724 | } | |
1725 | early_param("percpu_alloc", percpu_alloc_setup); | |
1726 | ||
08fc4580 TH |
1727 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ |
1728 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) | |
66c3a757 TH |
1729 | /** |
1730 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | |
66c3a757 TH |
1731 | * @reserved_size: the size of reserved percpu area in bytes |
1732 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto | |
66c3a757 TH |
1733 | * |
1734 | * This is a helper to ease setting up embedded first percpu chunk and | |
1735 | * can be called where pcpu_setup_first_chunk() is expected. | |
1736 | * | |
1737 | * If this function is used to setup the first chunk, it is allocated | |
1738 | * as a contiguous area using bootmem allocator and used as-is without | |
1739 | * being mapped into vmalloc area. This enables the first chunk to | |
1740 | * piggy back on the linear physical mapping which often uses larger | |
1741 | * page size. | |
1742 | * | |
1743 | * When @dyn_size is positive, dynamic area might be larger than | |
788e5abc TH |
1744 | * specified to fill page alignment. When @dyn_size is auto, |
1745 | * @dyn_size is just big enough to fill page alignment after static | |
1746 | * and reserved areas. | |
66c3a757 TH |
1747 | * |
1748 | * If the needed size is smaller than the minimum or specified unit | |
1749 | * size, the leftover is returned to the bootmem allocator. | |
1750 | * | |
1751 | * RETURNS: | |
fb435d52 | 1752 | * 0 on success, -errno on failure. |
66c3a757 | 1753 | */ |
fb435d52 | 1754 | int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size) |
66c3a757 | 1755 | { |
fd1e8a1f TH |
1756 | struct pcpu_alloc_info *ai; |
1757 | size_t size_sum, chunk_size; | |
ce3141a2 | 1758 | void *base; |
fd1e8a1f | 1759 | int unit; |
fb435d52 | 1760 | int rc; |
66c3a757 | 1761 | |
fd1e8a1f TH |
1762 | ai = pcpu_build_alloc_info(reserved_size, dyn_size, PAGE_SIZE, NULL); |
1763 | if (IS_ERR(ai)) | |
1764 | return PTR_ERR(ai); | |
1765 | BUG_ON(ai->nr_groups != 1); | |
1766 | BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); | |
66c3a757 | 1767 | |
fd1e8a1f TH |
1768 | size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; |
1769 | chunk_size = ai->unit_size * num_possible_cpus(); | |
fa8a7094 | 1770 | |
ce3141a2 TH |
1771 | base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, |
1772 | __pa(MAX_DMA_ADDRESS)); | |
1773 | if (!base) { | |
fa8a7094 TH |
1774 | pr_warning("PERCPU: failed to allocate %zu bytes for " |
1775 | "embedding\n", chunk_size); | |
fb435d52 | 1776 | rc = -ENOMEM; |
fd1e8a1f | 1777 | goto out_free_ai; |
fa8a7094 | 1778 | } |
66c3a757 TH |
1779 | |
1780 | /* return the leftover and copy */ | |
fd1e8a1f TH |
1781 | for (unit = 0; unit < num_possible_cpus(); unit++) { |
1782 | void *ptr = base + unit * ai->unit_size; | |
1783 | ||
1784 | free_bootmem(__pa(ptr + size_sum), ai->unit_size - size_sum); | |
1785 | memcpy(ptr, __per_cpu_load, ai->static_size); | |
66c3a757 TH |
1786 | } |
1787 | ||
1788 | /* we're ready, commit */ | |
004018e2 | 1789 | pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", |
fd1e8a1f TH |
1790 | PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, |
1791 | ai->dyn_size, ai->unit_size); | |
d4b95f80 | 1792 | |
fb435d52 | 1793 | rc = pcpu_setup_first_chunk(ai, base); |
fd1e8a1f TH |
1794 | out_free_ai: |
1795 | pcpu_free_alloc_info(ai); | |
fb435d52 | 1796 | return rc; |
d4b95f80 | 1797 | } |
08fc4580 TH |
1798 | #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || |
1799 | !CONFIG_HAVE_SETUP_PER_CPU_AREA */ | |
d4b95f80 | 1800 | |
08fc4580 | 1801 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
d4b95f80 | 1802 | /** |
00ae4064 | 1803 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
d4b95f80 TH |
1804 | * @reserved_size: the size of reserved percpu area in bytes |
1805 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | |
1806 | * @free_fn: funtion to free percpu page, always called with PAGE_SIZE | |
1807 | * @populate_pte_fn: function to populate pte | |
1808 | * | |
00ae4064 TH |
1809 | * This is a helper to ease setting up page-remapped first percpu |
1810 | * chunk and can be called where pcpu_setup_first_chunk() is expected. | |
d4b95f80 TH |
1811 | * |
1812 | * This is the basic allocator. Static percpu area is allocated | |
1813 | * page-by-page into vmalloc area. | |
1814 | * | |
1815 | * RETURNS: | |
fb435d52 | 1816 | * 0 on success, -errno on failure. |
d4b95f80 | 1817 | */ |
fb435d52 TH |
1818 | int __init pcpu_page_first_chunk(size_t reserved_size, |
1819 | pcpu_fc_alloc_fn_t alloc_fn, | |
1820 | pcpu_fc_free_fn_t free_fn, | |
1821 | pcpu_fc_populate_pte_fn_t populate_pte_fn) | |
d4b95f80 | 1822 | { |
8f05a6a6 | 1823 | static struct vm_struct vm; |
fd1e8a1f | 1824 | struct pcpu_alloc_info *ai; |
00ae4064 | 1825 | char psize_str[16]; |
ce3141a2 | 1826 | int unit_pages; |
d4b95f80 | 1827 | size_t pages_size; |
ce3141a2 | 1828 | struct page **pages; |
fb435d52 | 1829 | int unit, i, j, rc; |
d4b95f80 | 1830 | |
00ae4064 TH |
1831 | snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); |
1832 | ||
fd1e8a1f TH |
1833 | ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL); |
1834 | if (IS_ERR(ai)) | |
1835 | return PTR_ERR(ai); | |
1836 | BUG_ON(ai->nr_groups != 1); | |
1837 | BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); | |
1838 | ||
1839 | unit_pages = ai->unit_size >> PAGE_SHIFT; | |
d4b95f80 TH |
1840 | |
1841 | /* unaligned allocations can't be freed, round up to page size */ | |
fd1e8a1f TH |
1842 | pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * |
1843 | sizeof(pages[0])); | |
ce3141a2 | 1844 | pages = alloc_bootmem(pages_size); |
d4b95f80 | 1845 | |
8f05a6a6 | 1846 | /* allocate pages */ |
d4b95f80 | 1847 | j = 0; |
fd1e8a1f | 1848 | for (unit = 0; unit < num_possible_cpus(); unit++) |
ce3141a2 | 1849 | for (i = 0; i < unit_pages; i++) { |
fd1e8a1f | 1850 | unsigned int cpu = ai->groups[0].cpu_map[unit]; |
d4b95f80 TH |
1851 | void *ptr; |
1852 | ||
3cbc8565 | 1853 | ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); |
d4b95f80 | 1854 | if (!ptr) { |
00ae4064 TH |
1855 | pr_warning("PERCPU: failed to allocate %s page " |
1856 | "for cpu%u\n", psize_str, cpu); | |
d4b95f80 TH |
1857 | goto enomem; |
1858 | } | |
ce3141a2 | 1859 | pages[j++] = virt_to_page(ptr); |
d4b95f80 TH |
1860 | } |
1861 | ||
8f05a6a6 TH |
1862 | /* allocate vm area, map the pages and copy static data */ |
1863 | vm.flags = VM_ALLOC; | |
fd1e8a1f | 1864 | vm.size = num_possible_cpus() * ai->unit_size; |
8f05a6a6 TH |
1865 | vm_area_register_early(&vm, PAGE_SIZE); |
1866 | ||
fd1e8a1f | 1867 | for (unit = 0; unit < num_possible_cpus(); unit++) { |
1d9d3257 | 1868 | unsigned long unit_addr = |
fd1e8a1f | 1869 | (unsigned long)vm.addr + unit * ai->unit_size; |
8f05a6a6 | 1870 | |
ce3141a2 | 1871 | for (i = 0; i < unit_pages; i++) |
8f05a6a6 TH |
1872 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); |
1873 | ||
1874 | /* pte already populated, the following shouldn't fail */ | |
fb435d52 TH |
1875 | rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], |
1876 | unit_pages); | |
1877 | if (rc < 0) | |
1878 | panic("failed to map percpu area, err=%d\n", rc); | |
8f05a6a6 TH |
1879 | |
1880 | /* | |
1881 | * FIXME: Archs with virtual cache should flush local | |
1882 | * cache for the linear mapping here - something | |
1883 | * equivalent to flush_cache_vmap() on the local cpu. | |
1884 | * flush_cache_vmap() can't be used as most supporting | |
1885 | * data structures are not set up yet. | |
1886 | */ | |
1887 | ||
1888 | /* copy static data */ | |
fd1e8a1f | 1889 | memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); |
8f05a6a6 TH |
1890 | } |
1891 | ||
d4b95f80 | 1892 | /* we're ready, commit */ |
1d9d3257 | 1893 | pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", |
fd1e8a1f TH |
1894 | unit_pages, psize_str, vm.addr, ai->static_size, |
1895 | ai->reserved_size, ai->dyn_size); | |
d4b95f80 | 1896 | |
fb435d52 | 1897 | rc = pcpu_setup_first_chunk(ai, vm.addr); |
d4b95f80 TH |
1898 | goto out_free_ar; |
1899 | ||
1900 | enomem: | |
1901 | while (--j >= 0) | |
ce3141a2 | 1902 | free_fn(page_address(pages[j]), PAGE_SIZE); |
fb435d52 | 1903 | rc = -ENOMEM; |
d4b95f80 | 1904 | out_free_ar: |
ce3141a2 | 1905 | free_bootmem(__pa(pages), pages_size); |
fd1e8a1f | 1906 | pcpu_free_alloc_info(ai); |
fb435d52 | 1907 | return rc; |
d4b95f80 | 1908 | } |
08fc4580 | 1909 | #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ |
d4b95f80 | 1910 | |
08fc4580 | 1911 | #ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK |
8c4bfc6e | 1912 | struct pcpul_ent { |
8c4bfc6e | 1913 | void *ptr; |
a530b795 | 1914 | void *map_addr; |
8c4bfc6e TH |
1915 | }; |
1916 | ||
1917 | static size_t pcpul_size; | |
a530b795 TH |
1918 | static size_t pcpul_lpage_size; |
1919 | static int pcpul_nr_lpages; | |
8c4bfc6e | 1920 | static struct pcpul_ent *pcpul_map; |
a530b795 | 1921 | |
fd1e8a1f | 1922 | static bool __init pcpul_unit_to_cpu(int unit, const struct pcpu_alloc_info *ai, |
a530b795 TH |
1923 | unsigned int *cpup) |
1924 | { | |
fd1e8a1f | 1925 | int group, cunit; |
a530b795 | 1926 | |
fd1e8a1f TH |
1927 | for (group = 0, cunit = 0; group < ai->nr_groups; group++) { |
1928 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
1929 | ||
1930 | if (unit < cunit + gi->nr_units) { | |
a530b795 | 1931 | if (cpup) |
fd1e8a1f | 1932 | *cpup = gi->cpu_map[unit - cunit]; |
a530b795 TH |
1933 | return true; |
1934 | } | |
fd1e8a1f TH |
1935 | cunit += gi->nr_units; |
1936 | } | |
a530b795 TH |
1937 | |
1938 | return false; | |
1939 | } | |
1940 | ||
fd1e8a1f TH |
1941 | static int __init pcpul_cpu_to_unit(int cpu, const struct pcpu_alloc_info *ai) |
1942 | { | |
1943 | int group, unit, i; | |
1944 | ||
1945 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { | |
1946 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
1947 | ||
1948 | for (i = 0; i < gi->nr_units; i++) | |
1949 | if (gi->cpu_map[i] == cpu) | |
1950 | return unit + i; | |
1951 | } | |
1952 | BUG(); | |
1953 | } | |
1954 | ||
8c4bfc6e TH |
1955 | /** |
1956 | * pcpu_lpage_first_chunk - remap the first percpu chunk using large page | |
fd1e8a1f | 1957 | * @ai: pcpu_alloc_info |
8c4bfc6e TH |
1958 | * @alloc_fn: function to allocate percpu lpage, always called with lpage_size |
1959 | * @free_fn: function to free percpu memory, @size <= lpage_size | |
1960 | * @map_fn: function to map percpu lpage, always called with lpage_size | |
1961 | * | |
a530b795 | 1962 | * This allocator uses large page to build and map the first chunk. |
fd1e8a1f TH |
1963 | * Unlike other helpers, the caller should provide fully initialized |
1964 | * @ai. This can be done using pcpu_build_alloc_info(). This two | |
1965 | * stage initialization is to allow arch code to evaluate the | |
a530b795 TH |
1966 | * parameters before committing to it. |
1967 | * | |
1968 | * Large pages are allocated as directed by @unit_map and other | |
1969 | * parameters and mapped to vmalloc space. Unused holes are returned | |
1970 | * to the page allocator. Note that these holes end up being actively | |
1971 | * mapped twice - once to the physical mapping and to the vmalloc area | |
1972 | * for the first percpu chunk. Depending on architecture, this might | |
1973 | * cause problem when changing page attributes of the returned area. | |
1974 | * These double mapped areas can be detected using | |
1975 | * pcpu_lpage_remapped(). | |
8c4bfc6e TH |
1976 | * |
1977 | * RETURNS: | |
fb435d52 | 1978 | * 0 on success, -errno on failure. |
8c4bfc6e | 1979 | */ |
fb435d52 TH |
1980 | int __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai, |
1981 | pcpu_fc_alloc_fn_t alloc_fn, | |
1982 | pcpu_fc_free_fn_t free_fn, | |
1983 | pcpu_fc_map_fn_t map_fn) | |
8c4bfc6e | 1984 | { |
a530b795 | 1985 | static struct vm_struct vm; |
fd1e8a1f TH |
1986 | const size_t lpage_size = ai->atom_size; |
1987 | size_t chunk_size, map_size; | |
8c4bfc6e | 1988 | unsigned int cpu; |
fb435d52 | 1989 | int i, j, unit, nr_units, rc; |
8c4bfc6e | 1990 | |
fd1e8a1f TH |
1991 | nr_units = 0; |
1992 | for (i = 0; i < ai->nr_groups; i++) | |
1993 | nr_units += ai->groups[i].nr_units; | |
8c4bfc6e | 1994 | |
fd1e8a1f | 1995 | chunk_size = ai->unit_size * nr_units; |
a530b795 TH |
1996 | BUG_ON(chunk_size % lpage_size); |
1997 | ||
fd1e8a1f | 1998 | pcpul_size = ai->static_size + ai->reserved_size + ai->dyn_size; |
a530b795 TH |
1999 | pcpul_lpage_size = lpage_size; |
2000 | pcpul_nr_lpages = chunk_size / lpage_size; | |
8c4bfc6e TH |
2001 | |
2002 | /* allocate pointer array and alloc large pages */ | |
a530b795 | 2003 | map_size = pcpul_nr_lpages * sizeof(pcpul_map[0]); |
8c4bfc6e TH |
2004 | pcpul_map = alloc_bootmem(map_size); |
2005 | ||
a530b795 TH |
2006 | /* allocate all pages */ |
2007 | for (i = 0; i < pcpul_nr_lpages; i++) { | |
2008 | size_t offset = i * lpage_size; | |
fd1e8a1f TH |
2009 | int first_unit = offset / ai->unit_size; |
2010 | int last_unit = (offset + lpage_size - 1) / ai->unit_size; | |
8c4bfc6e TH |
2011 | void *ptr; |
2012 | ||
a530b795 TH |
2013 | /* find out which cpu is mapped to this unit */ |
2014 | for (unit = first_unit; unit <= last_unit; unit++) | |
fd1e8a1f | 2015 | if (pcpul_unit_to_cpu(unit, ai, &cpu)) |
a530b795 TH |
2016 | goto found; |
2017 | continue; | |
2018 | found: | |
3cbc8565 | 2019 | ptr = alloc_fn(cpu, lpage_size, lpage_size); |
8c4bfc6e TH |
2020 | if (!ptr) { |
2021 | pr_warning("PERCPU: failed to allocate large page " | |
2022 | "for cpu%u\n", cpu); | |
2023 | goto enomem; | |
2024 | } | |
2025 | ||
a530b795 TH |
2026 | pcpul_map[i].ptr = ptr; |
2027 | } | |
8c4bfc6e | 2028 | |
a530b795 TH |
2029 | /* return unused holes */ |
2030 | for (unit = 0; unit < nr_units; unit++) { | |
fd1e8a1f TH |
2031 | size_t start = unit * ai->unit_size; |
2032 | size_t end = start + ai->unit_size; | |
a530b795 TH |
2033 | size_t off, next; |
2034 | ||
2035 | /* don't free used part of occupied unit */ | |
fd1e8a1f | 2036 | if (pcpul_unit_to_cpu(unit, ai, NULL)) |
a530b795 TH |
2037 | start += pcpul_size; |
2038 | ||
2039 | /* unit can span more than one page, punch the holes */ | |
2040 | for (off = start; off < end; off = next) { | |
2041 | void *ptr = pcpul_map[off / lpage_size].ptr; | |
2042 | next = min(roundup(off + 1, lpage_size), end); | |
2043 | if (ptr) | |
2044 | free_fn(ptr + off % lpage_size, next - off); | |
2045 | } | |
8c4bfc6e TH |
2046 | } |
2047 | ||
a530b795 TH |
2048 | /* allocate address, map and copy */ |
2049 | vm.flags = VM_ALLOC; | |
2050 | vm.size = chunk_size; | |
fd1e8a1f | 2051 | vm_area_register_early(&vm, ai->unit_size); |
a530b795 TH |
2052 | |
2053 | for (i = 0; i < pcpul_nr_lpages; i++) { | |
2054 | if (!pcpul_map[i].ptr) | |
2055 | continue; | |
2056 | pcpul_map[i].map_addr = vm.addr + i * lpage_size; | |
2057 | map_fn(pcpul_map[i].ptr, lpage_size, pcpul_map[i].map_addr); | |
2058 | } | |
8c4bfc6e TH |
2059 | |
2060 | for_each_possible_cpu(cpu) | |
fd1e8a1f TH |
2061 | memcpy(vm.addr + pcpul_cpu_to_unit(cpu, ai) * ai->unit_size, |
2062 | __per_cpu_load, ai->static_size); | |
8c4bfc6e TH |
2063 | |
2064 | /* we're ready, commit */ | |
004018e2 | 2065 | pr_info("PERCPU: large pages @%p s%zu r%zu d%zu u%zu\n", |
fd1e8a1f TH |
2066 | vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size, |
2067 | ai->unit_size); | |
8c4bfc6e | 2068 | |
fb435d52 | 2069 | rc = pcpu_setup_first_chunk(ai, vm.addr); |
a530b795 TH |
2070 | |
2071 | /* | |
2072 | * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped | |
2073 | * lpages are pushed to the end and trimmed. | |
2074 | */ | |
2075 | for (i = 0; i < pcpul_nr_lpages - 1; i++) | |
2076 | for (j = i + 1; j < pcpul_nr_lpages; j++) { | |
2077 | struct pcpul_ent tmp; | |
2078 | ||
2079 | if (!pcpul_map[j].ptr) | |
2080 | continue; | |
2081 | if (pcpul_map[i].ptr && | |
2082 | pcpul_map[i].ptr < pcpul_map[j].ptr) | |
2083 | continue; | |
2084 | ||
2085 | tmp = pcpul_map[i]; | |
2086 | pcpul_map[i] = pcpul_map[j]; | |
2087 | pcpul_map[j] = tmp; | |
2088 | } | |
2089 | ||
2090 | while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr) | |
2091 | pcpul_nr_lpages--; | |
8c4bfc6e | 2092 | |
fb435d52 | 2093 | return rc; |
8c4bfc6e TH |
2094 | |
2095 | enomem: | |
a530b795 TH |
2096 | for (i = 0; i < pcpul_nr_lpages; i++) |
2097 | if (pcpul_map[i].ptr) | |
2098 | free_fn(pcpul_map[i].ptr, lpage_size); | |
8c4bfc6e TH |
2099 | free_bootmem(__pa(pcpul_map), map_size); |
2100 | return -ENOMEM; | |
2101 | } | |
2102 | ||
2103 | /** | |
2104 | * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area | |
2105 | * @kaddr: the kernel address in question | |
2106 | * | |
2107 | * Determine whether @kaddr falls in the pcpul recycled area. This is | |
2108 | * used by pageattr to detect VM aliases and break up the pcpu large | |
2109 | * page mapping such that the same physical page is not mapped under | |
2110 | * different attributes. | |
2111 | * | |
2112 | * The recycled area is always at the tail of a partially used large | |
2113 | * page. | |
2114 | * | |
2115 | * RETURNS: | |
2116 | * Address of corresponding remapped pcpu address if match is found; | |
2117 | * otherwise, NULL. | |
2118 | */ | |
2119 | void *pcpu_lpage_remapped(void *kaddr) | |
2120 | { | |
a530b795 TH |
2121 | unsigned long lpage_mask = pcpul_lpage_size - 1; |
2122 | void *lpage_addr = (void *)((unsigned long)kaddr & ~lpage_mask); | |
2123 | unsigned long offset = (unsigned long)kaddr & lpage_mask; | |
2124 | int left = 0, right = pcpul_nr_lpages - 1; | |
8c4bfc6e TH |
2125 | int pos; |
2126 | ||
2127 | /* pcpul in use at all? */ | |
2128 | if (!pcpul_map) | |
2129 | return NULL; | |
2130 | ||
2131 | /* okay, perform binary search */ | |
2132 | while (left <= right) { | |
2133 | pos = (left + right) / 2; | |
2134 | ||
2135 | if (pcpul_map[pos].ptr < lpage_addr) | |
2136 | left = pos + 1; | |
2137 | else if (pcpul_map[pos].ptr > lpage_addr) | |
2138 | right = pos - 1; | |
a530b795 TH |
2139 | else |
2140 | return pcpul_map[pos].map_addr + offset; | |
8c4bfc6e TH |
2141 | } |
2142 | ||
2143 | return NULL; | |
2144 | } | |
08fc4580 | 2145 | #endif /* CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK */ |
8c4bfc6e | 2146 | |
e74e3962 TH |
2147 | /* |
2148 | * Generic percpu area setup. | |
2149 | * | |
2150 | * The embedding helper is used because its behavior closely resembles | |
2151 | * the original non-dynamic generic percpu area setup. This is | |
2152 | * important because many archs have addressing restrictions and might | |
2153 | * fail if the percpu area is located far away from the previous | |
2154 | * location. As an added bonus, in non-NUMA cases, embedding is | |
2155 | * generally a good idea TLB-wise because percpu area can piggy back | |
2156 | * on the physical linear memory mapping which uses large page | |
2157 | * mappings on applicable archs. | |
2158 | */ | |
2159 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | |
2160 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | |
2161 | EXPORT_SYMBOL(__per_cpu_offset); | |
2162 | ||
2163 | void __init setup_per_cpu_areas(void) | |
2164 | { | |
e74e3962 TH |
2165 | unsigned long delta; |
2166 | unsigned int cpu; | |
fb435d52 | 2167 | int rc; |
e74e3962 TH |
2168 | |
2169 | /* | |
2170 | * Always reserve area for module percpu variables. That's | |
2171 | * what the legacy allocator did. | |
2172 | */ | |
fb435d52 TH |
2173 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
2174 | PERCPU_DYNAMIC_RESERVE); | |
2175 | if (rc < 0) | |
e74e3962 TH |
2176 | panic("Failed to initialized percpu areas."); |
2177 | ||
2178 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
2179 | for_each_possible_cpu(cpu) | |
fb435d52 | 2180 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
e74e3962 TH |
2181 | } |
2182 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |