]>
Commit | Line | Data |
---|---|---|
fbf59bc9 TH |
1 | /* |
2 | * linux/mm/percpu.c - percpu memory allocator | |
3 | * | |
4 | * Copyright (C) 2009 SUSE Linux Products GmbH | |
5 | * Copyright (C) 2009 Tejun Heo <tj@kernel.org> | |
6 | * | |
7 | * This file is released under the GPLv2. | |
8 | * | |
9 | * This is percpu allocator which can handle both static and dynamic | |
10 | * areas. Percpu areas are allocated in chunks in vmalloc area. Each | |
11 | * chunk is consisted of num_possible_cpus() units and the first chunk | |
12 | * is used for static percpu variables in the kernel image (special | |
13 | * boot time alloc/init handling necessary as these areas need to be | |
14 | * brought up before allocation services are running). Unit grows as | |
15 | * necessary and all units grow or shrink in unison. When a chunk is | |
16 | * filled up, another chunk is allocated. ie. in vmalloc area | |
17 | * | |
18 | * c0 c1 c2 | |
19 | * ------------------- ------------------- ------------ | |
20 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u | |
21 | * ------------------- ...... ------------------- .... ------------ | |
22 | * | |
23 | * Allocation is done in offset-size areas of single unit space. Ie, | |
24 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, | |
25 | * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring | |
e1b9aa3f | 26 | * percpu base registers pcpu_unit_size apart. |
fbf59bc9 TH |
27 | * |
28 | * There are usually many small percpu allocations many of them as | |
29 | * small as 4 bytes. The allocator organizes chunks into lists | |
30 | * according to free size and tries to allocate from the fullest one. | |
31 | * Each chunk keeps the maximum contiguous area size hint which is | |
32 | * guaranteed to be eqaul to or larger than the maximum contiguous | |
33 | * area in the chunk. This helps the allocator not to iterate the | |
34 | * chunk maps unnecessarily. | |
35 | * | |
36 | * Allocation state in each chunk is kept using an array of integers | |
37 | * on chunk->map. A positive value in the map represents a free | |
38 | * region and negative allocated. Allocation inside a chunk is done | |
39 | * by scanning this map sequentially and serving the first matching | |
40 | * entry. This is mostly copied from the percpu_modalloc() allocator. | |
e1b9aa3f CL |
41 | * Chunks can be determined from the address using the index field |
42 | * in the page struct. The index field contains a pointer to the chunk. | |
fbf59bc9 TH |
43 | * |
44 | * To use this allocator, arch code should do the followings. | |
45 | * | |
e74e3962 | 46 | * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA |
fbf59bc9 TH |
47 | * |
48 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate | |
e0100983 TH |
49 | * regular address to percpu pointer and back if they need to be |
50 | * different from the default | |
fbf59bc9 | 51 | * |
8d408b4b TH |
52 | * - use pcpu_setup_first_chunk() during percpu area initialization to |
53 | * setup the first chunk containing the kernel static percpu area | |
fbf59bc9 TH |
54 | */ |
55 | ||
56 | #include <linux/bitmap.h> | |
57 | #include <linux/bootmem.h> | |
58 | #include <linux/list.h> | |
59 | #include <linux/mm.h> | |
60 | #include <linux/module.h> | |
61 | #include <linux/mutex.h> | |
62 | #include <linux/percpu.h> | |
63 | #include <linux/pfn.h> | |
fbf59bc9 | 64 | #include <linux/slab.h> |
ccea34b5 | 65 | #include <linux/spinlock.h> |
fbf59bc9 | 66 | #include <linux/vmalloc.h> |
a56dbddf | 67 | #include <linux/workqueue.h> |
fbf59bc9 TH |
68 | |
69 | #include <asm/cacheflush.h> | |
e0100983 | 70 | #include <asm/sections.h> |
fbf59bc9 TH |
71 | #include <asm/tlbflush.h> |
72 | ||
fbf59bc9 TH |
73 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
74 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ | |
75 | ||
e0100983 TH |
76 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
77 | #ifndef __addr_to_pcpu_ptr | |
78 | #define __addr_to_pcpu_ptr(addr) \ | |
79 | (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ | |
80 | + (unsigned long)__per_cpu_start) | |
81 | #endif | |
82 | #ifndef __pcpu_ptr_to_addr | |
83 | #define __pcpu_ptr_to_addr(ptr) \ | |
84 | (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ | |
85 | - (unsigned long)__per_cpu_start) | |
86 | #endif | |
87 | ||
fbf59bc9 TH |
88 | struct pcpu_chunk { |
89 | struct list_head list; /* linked to pcpu_slot lists */ | |
fbf59bc9 TH |
90 | int free_size; /* free bytes in the chunk */ |
91 | int contig_hint; /* max contiguous size hint */ | |
92 | struct vm_struct *vm; /* mapped vmalloc region */ | |
93 | int map_used; /* # of map entries used */ | |
94 | int map_alloc; /* # of map entries allocated */ | |
95 | int *map; /* allocation map */ | |
8d408b4b | 96 | bool immutable; /* no [de]population allowed */ |
3e24aa58 TH |
97 | struct page **page; /* points to page array */ |
98 | struct page *page_ar[]; /* #cpus * UNIT_PAGES */ | |
fbf59bc9 TH |
99 | }; |
100 | ||
40150d37 TH |
101 | static int pcpu_unit_pages __read_mostly; |
102 | static int pcpu_unit_size __read_mostly; | |
103 | static int pcpu_chunk_size __read_mostly; | |
104 | static int pcpu_nr_slots __read_mostly; | |
105 | static size_t pcpu_chunk_struct_size __read_mostly; | |
fbf59bc9 TH |
106 | |
107 | /* the address of the first chunk which starts with the kernel static area */ | |
40150d37 | 108 | void *pcpu_base_addr __read_mostly; |
fbf59bc9 TH |
109 | EXPORT_SYMBOL_GPL(pcpu_base_addr); |
110 | ||
ae9e6bc9 TH |
111 | /* |
112 | * The first chunk which always exists. Note that unlike other | |
113 | * chunks, this one can be allocated and mapped in several different | |
114 | * ways and thus often doesn't live in the vmalloc area. | |
115 | */ | |
116 | static struct pcpu_chunk *pcpu_first_chunk; | |
117 | ||
118 | /* | |
119 | * Optional reserved chunk. This chunk reserves part of the first | |
120 | * chunk and serves it for reserved allocations. The amount of | |
121 | * reserved offset is in pcpu_reserved_chunk_limit. When reserved | |
122 | * area doesn't exist, the following variables contain NULL and 0 | |
123 | * respectively. | |
124 | */ | |
edcb4639 | 125 | static struct pcpu_chunk *pcpu_reserved_chunk; |
edcb4639 TH |
126 | static int pcpu_reserved_chunk_limit; |
127 | ||
fbf59bc9 | 128 | /* |
ccea34b5 TH |
129 | * Synchronization rules. |
130 | * | |
131 | * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former | |
132 | * protects allocation/reclaim paths, chunks and chunk->page arrays. | |
133 | * The latter is a spinlock and protects the index data structures - | |
e1b9aa3f | 134 | * chunk slots, chunks and area maps in chunks. |
ccea34b5 TH |
135 | * |
136 | * During allocation, pcpu_alloc_mutex is kept locked all the time and | |
137 | * pcpu_lock is grabbed and released as necessary. All actual memory | |
138 | * allocations are done using GFP_KERNEL with pcpu_lock released. | |
139 | * | |
140 | * Free path accesses and alters only the index data structures, so it | |
141 | * can be safely called from atomic context. When memory needs to be | |
142 | * returned to the system, free path schedules reclaim_work which | |
143 | * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be | |
144 | * reclaimed, release both locks and frees the chunks. Note that it's | |
145 | * necessary to grab both locks to remove a chunk from circulation as | |
146 | * allocation path might be referencing the chunk with only | |
147 | * pcpu_alloc_mutex locked. | |
fbf59bc9 | 148 | */ |
ccea34b5 TH |
149 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ |
150 | static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ | |
fbf59bc9 | 151 | |
40150d37 | 152 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
fbf59bc9 | 153 | |
a56dbddf TH |
154 | /* reclaim work to release fully free chunks, scheduled from free path */ |
155 | static void pcpu_reclaim(struct work_struct *work); | |
156 | static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); | |
157 | ||
d9b55eeb | 158 | static int __pcpu_size_to_slot(int size) |
fbf59bc9 | 159 | { |
cae3aeb8 | 160 | int highbit = fls(size); /* size is in bytes */ |
fbf59bc9 TH |
161 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); |
162 | } | |
163 | ||
d9b55eeb TH |
164 | static int pcpu_size_to_slot(int size) |
165 | { | |
166 | if (size == pcpu_unit_size) | |
167 | return pcpu_nr_slots - 1; | |
168 | return __pcpu_size_to_slot(size); | |
169 | } | |
170 | ||
fbf59bc9 TH |
171 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) |
172 | { | |
173 | if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) | |
174 | return 0; | |
175 | ||
176 | return pcpu_size_to_slot(chunk->free_size); | |
177 | } | |
178 | ||
179 | static int pcpu_page_idx(unsigned int cpu, int page_idx) | |
180 | { | |
d9b55eeb | 181 | return cpu * pcpu_unit_pages + page_idx; |
fbf59bc9 TH |
182 | } |
183 | ||
184 | static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk, | |
185 | unsigned int cpu, int page_idx) | |
186 | { | |
187 | return &chunk->page[pcpu_page_idx(cpu, page_idx)]; | |
188 | } | |
189 | ||
190 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, | |
191 | unsigned int cpu, int page_idx) | |
192 | { | |
193 | return (unsigned long)chunk->vm->addr + | |
194 | (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); | |
195 | } | |
196 | ||
197 | static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, | |
198 | int page_idx) | |
199 | { | |
200 | return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; | |
201 | } | |
202 | ||
e1b9aa3f CL |
203 | /* set the pointer to a chunk in a page struct */ |
204 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) | |
205 | { | |
206 | page->index = (unsigned long)pcpu; | |
207 | } | |
208 | ||
209 | /* obtain pointer to a chunk from a page struct */ | |
210 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) | |
211 | { | |
212 | return (struct pcpu_chunk *)page->index; | |
213 | } | |
214 | ||
fbf59bc9 | 215 | /** |
1880d93b TH |
216 | * pcpu_mem_alloc - allocate memory |
217 | * @size: bytes to allocate | |
fbf59bc9 | 218 | * |
1880d93b TH |
219 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, |
220 | * kzalloc() is used; otherwise, vmalloc() is used. The returned | |
221 | * memory is always zeroed. | |
fbf59bc9 | 222 | * |
ccea34b5 TH |
223 | * CONTEXT: |
224 | * Does GFP_KERNEL allocation. | |
225 | * | |
fbf59bc9 | 226 | * RETURNS: |
1880d93b | 227 | * Pointer to the allocated area on success, NULL on failure. |
fbf59bc9 | 228 | */ |
1880d93b | 229 | static void *pcpu_mem_alloc(size_t size) |
fbf59bc9 | 230 | { |
1880d93b TH |
231 | if (size <= PAGE_SIZE) |
232 | return kzalloc(size, GFP_KERNEL); | |
233 | else { | |
234 | void *ptr = vmalloc(size); | |
235 | if (ptr) | |
236 | memset(ptr, 0, size); | |
237 | return ptr; | |
238 | } | |
239 | } | |
fbf59bc9 | 240 | |
1880d93b TH |
241 | /** |
242 | * pcpu_mem_free - free memory | |
243 | * @ptr: memory to free | |
244 | * @size: size of the area | |
245 | * | |
246 | * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). | |
247 | */ | |
248 | static void pcpu_mem_free(void *ptr, size_t size) | |
249 | { | |
fbf59bc9 | 250 | if (size <= PAGE_SIZE) |
1880d93b | 251 | kfree(ptr); |
fbf59bc9 | 252 | else |
1880d93b | 253 | vfree(ptr); |
fbf59bc9 TH |
254 | } |
255 | ||
256 | /** | |
257 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot | |
258 | * @chunk: chunk of interest | |
259 | * @oslot: the previous slot it was on | |
260 | * | |
261 | * This function is called after an allocation or free changed @chunk. | |
262 | * New slot according to the changed state is determined and @chunk is | |
edcb4639 TH |
263 | * moved to the slot. Note that the reserved chunk is never put on |
264 | * chunk slots. | |
ccea34b5 TH |
265 | * |
266 | * CONTEXT: | |
267 | * pcpu_lock. | |
fbf59bc9 TH |
268 | */ |
269 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |
270 | { | |
271 | int nslot = pcpu_chunk_slot(chunk); | |
272 | ||
edcb4639 | 273 | if (chunk != pcpu_reserved_chunk && oslot != nslot) { |
fbf59bc9 TH |
274 | if (oslot < nslot) |
275 | list_move(&chunk->list, &pcpu_slot[nslot]); | |
276 | else | |
277 | list_move_tail(&chunk->list, &pcpu_slot[nslot]); | |
278 | } | |
279 | } | |
280 | ||
fbf59bc9 | 281 | /** |
e1b9aa3f CL |
282 | * pcpu_chunk_addr_search - determine chunk containing specified address |
283 | * @addr: address for which the chunk needs to be determined. | |
ccea34b5 | 284 | * |
fbf59bc9 TH |
285 | * RETURNS: |
286 | * The address of the found chunk. | |
287 | */ | |
288 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |
289 | { | |
ae9e6bc9 | 290 | void *first_start = pcpu_first_chunk->vm->addr; |
fbf59bc9 | 291 | |
ae9e6bc9 | 292 | /* is it in the first chunk? */ |
79ba6ac8 | 293 | if (addr >= first_start && addr < first_start + pcpu_unit_size) { |
ae9e6bc9 TH |
294 | /* is it in the reserved area? */ |
295 | if (addr < first_start + pcpu_reserved_chunk_limit) | |
edcb4639 | 296 | return pcpu_reserved_chunk; |
ae9e6bc9 | 297 | return pcpu_first_chunk; |
edcb4639 TH |
298 | } |
299 | ||
e1b9aa3f | 300 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); |
fbf59bc9 TH |
301 | } |
302 | ||
9f7dcf22 TH |
303 | /** |
304 | * pcpu_extend_area_map - extend area map for allocation | |
305 | * @chunk: target chunk | |
306 | * | |
307 | * Extend area map of @chunk so that it can accomodate an allocation. | |
308 | * A single allocation can split an area into three areas, so this | |
309 | * function makes sure that @chunk->map has at least two extra slots. | |
310 | * | |
ccea34b5 TH |
311 | * CONTEXT: |
312 | * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired | |
313 | * if area map is extended. | |
314 | * | |
9f7dcf22 TH |
315 | * RETURNS: |
316 | * 0 if noop, 1 if successfully extended, -errno on failure. | |
317 | */ | |
318 | static int pcpu_extend_area_map(struct pcpu_chunk *chunk) | |
319 | { | |
320 | int new_alloc; | |
321 | int *new; | |
322 | size_t size; | |
323 | ||
324 | /* has enough? */ | |
325 | if (chunk->map_alloc >= chunk->map_used + 2) | |
326 | return 0; | |
327 | ||
ccea34b5 TH |
328 | spin_unlock_irq(&pcpu_lock); |
329 | ||
9f7dcf22 TH |
330 | new_alloc = PCPU_DFL_MAP_ALLOC; |
331 | while (new_alloc < chunk->map_used + 2) | |
332 | new_alloc *= 2; | |
333 | ||
334 | new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); | |
ccea34b5 TH |
335 | if (!new) { |
336 | spin_lock_irq(&pcpu_lock); | |
9f7dcf22 | 337 | return -ENOMEM; |
ccea34b5 TH |
338 | } |
339 | ||
340 | /* | |
341 | * Acquire pcpu_lock and switch to new area map. Only free | |
342 | * could have happened inbetween, so map_used couldn't have | |
343 | * grown. | |
344 | */ | |
345 | spin_lock_irq(&pcpu_lock); | |
346 | BUG_ON(new_alloc < chunk->map_used + 2); | |
9f7dcf22 TH |
347 | |
348 | size = chunk->map_alloc * sizeof(chunk->map[0]); | |
349 | memcpy(new, chunk->map, size); | |
350 | ||
351 | /* | |
352 | * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is | |
353 | * one of the first chunks and still using static map. | |
354 | */ | |
355 | if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) | |
356 | pcpu_mem_free(chunk->map, size); | |
357 | ||
358 | chunk->map_alloc = new_alloc; | |
359 | chunk->map = new; | |
360 | return 0; | |
361 | } | |
362 | ||
fbf59bc9 TH |
363 | /** |
364 | * pcpu_split_block - split a map block | |
365 | * @chunk: chunk of interest | |
366 | * @i: index of map block to split | |
cae3aeb8 TH |
367 | * @head: head size in bytes (can be 0) |
368 | * @tail: tail size in bytes (can be 0) | |
fbf59bc9 TH |
369 | * |
370 | * Split the @i'th map block into two or three blocks. If @head is | |
371 | * non-zero, @head bytes block is inserted before block @i moving it | |
372 | * to @i+1 and reducing its size by @head bytes. | |
373 | * | |
374 | * If @tail is non-zero, the target block, which can be @i or @i+1 | |
375 | * depending on @head, is reduced by @tail bytes and @tail byte block | |
376 | * is inserted after the target block. | |
377 | * | |
9f7dcf22 | 378 | * @chunk->map must have enough free slots to accomodate the split. |
ccea34b5 TH |
379 | * |
380 | * CONTEXT: | |
381 | * pcpu_lock. | |
fbf59bc9 | 382 | */ |
9f7dcf22 TH |
383 | static void pcpu_split_block(struct pcpu_chunk *chunk, int i, |
384 | int head, int tail) | |
fbf59bc9 TH |
385 | { |
386 | int nr_extra = !!head + !!tail; | |
1880d93b | 387 | |
9f7dcf22 | 388 | BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); |
fbf59bc9 | 389 | |
9f7dcf22 | 390 | /* insert new subblocks */ |
fbf59bc9 TH |
391 | memmove(&chunk->map[i + nr_extra], &chunk->map[i], |
392 | sizeof(chunk->map[0]) * (chunk->map_used - i)); | |
393 | chunk->map_used += nr_extra; | |
394 | ||
395 | if (head) { | |
396 | chunk->map[i + 1] = chunk->map[i] - head; | |
397 | chunk->map[i++] = head; | |
398 | } | |
399 | if (tail) { | |
400 | chunk->map[i++] -= tail; | |
401 | chunk->map[i] = tail; | |
402 | } | |
fbf59bc9 TH |
403 | } |
404 | ||
405 | /** | |
406 | * pcpu_alloc_area - allocate area from a pcpu_chunk | |
407 | * @chunk: chunk of interest | |
cae3aeb8 | 408 | * @size: wanted size in bytes |
fbf59bc9 TH |
409 | * @align: wanted align |
410 | * | |
411 | * Try to allocate @size bytes area aligned at @align from @chunk. | |
412 | * Note that this function only allocates the offset. It doesn't | |
413 | * populate or map the area. | |
414 | * | |
9f7dcf22 TH |
415 | * @chunk->map must have at least two free slots. |
416 | * | |
ccea34b5 TH |
417 | * CONTEXT: |
418 | * pcpu_lock. | |
419 | * | |
fbf59bc9 | 420 | * RETURNS: |
9f7dcf22 TH |
421 | * Allocated offset in @chunk on success, -1 if no matching area is |
422 | * found. | |
fbf59bc9 TH |
423 | */ |
424 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | |
425 | { | |
426 | int oslot = pcpu_chunk_slot(chunk); | |
427 | int max_contig = 0; | |
428 | int i, off; | |
429 | ||
fbf59bc9 TH |
430 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { |
431 | bool is_last = i + 1 == chunk->map_used; | |
432 | int head, tail; | |
433 | ||
434 | /* extra for alignment requirement */ | |
435 | head = ALIGN(off, align) - off; | |
436 | BUG_ON(i == 0 && head != 0); | |
437 | ||
438 | if (chunk->map[i] < 0) | |
439 | continue; | |
440 | if (chunk->map[i] < head + size) { | |
441 | max_contig = max(chunk->map[i], max_contig); | |
442 | continue; | |
443 | } | |
444 | ||
445 | /* | |
446 | * If head is small or the previous block is free, | |
447 | * merge'em. Note that 'small' is defined as smaller | |
448 | * than sizeof(int), which is very small but isn't too | |
449 | * uncommon for percpu allocations. | |
450 | */ | |
451 | if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { | |
452 | if (chunk->map[i - 1] > 0) | |
453 | chunk->map[i - 1] += head; | |
454 | else { | |
455 | chunk->map[i - 1] -= head; | |
456 | chunk->free_size -= head; | |
457 | } | |
458 | chunk->map[i] -= head; | |
459 | off += head; | |
460 | head = 0; | |
461 | } | |
462 | ||
463 | /* if tail is small, just keep it around */ | |
464 | tail = chunk->map[i] - head - size; | |
465 | if (tail < sizeof(int)) | |
466 | tail = 0; | |
467 | ||
468 | /* split if warranted */ | |
469 | if (head || tail) { | |
9f7dcf22 | 470 | pcpu_split_block(chunk, i, head, tail); |
fbf59bc9 TH |
471 | if (head) { |
472 | i++; | |
473 | off += head; | |
474 | max_contig = max(chunk->map[i - 1], max_contig); | |
475 | } | |
476 | if (tail) | |
477 | max_contig = max(chunk->map[i + 1], max_contig); | |
478 | } | |
479 | ||
480 | /* update hint and mark allocated */ | |
481 | if (is_last) | |
482 | chunk->contig_hint = max_contig; /* fully scanned */ | |
483 | else | |
484 | chunk->contig_hint = max(chunk->contig_hint, | |
485 | max_contig); | |
486 | ||
487 | chunk->free_size -= chunk->map[i]; | |
488 | chunk->map[i] = -chunk->map[i]; | |
489 | ||
490 | pcpu_chunk_relocate(chunk, oslot); | |
491 | return off; | |
492 | } | |
493 | ||
494 | chunk->contig_hint = max_contig; /* fully scanned */ | |
495 | pcpu_chunk_relocate(chunk, oslot); | |
496 | ||
9f7dcf22 TH |
497 | /* tell the upper layer that this chunk has no matching area */ |
498 | return -1; | |
fbf59bc9 TH |
499 | } |
500 | ||
501 | /** | |
502 | * pcpu_free_area - free area to a pcpu_chunk | |
503 | * @chunk: chunk of interest | |
504 | * @freeme: offset of area to free | |
505 | * | |
506 | * Free area starting from @freeme to @chunk. Note that this function | |
507 | * only modifies the allocation map. It doesn't depopulate or unmap | |
508 | * the area. | |
ccea34b5 TH |
509 | * |
510 | * CONTEXT: | |
511 | * pcpu_lock. | |
fbf59bc9 TH |
512 | */ |
513 | static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) | |
514 | { | |
515 | int oslot = pcpu_chunk_slot(chunk); | |
516 | int i, off; | |
517 | ||
518 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) | |
519 | if (off == freeme) | |
520 | break; | |
521 | BUG_ON(off != freeme); | |
522 | BUG_ON(chunk->map[i] > 0); | |
523 | ||
524 | chunk->map[i] = -chunk->map[i]; | |
525 | chunk->free_size += chunk->map[i]; | |
526 | ||
527 | /* merge with previous? */ | |
528 | if (i > 0 && chunk->map[i - 1] >= 0) { | |
529 | chunk->map[i - 1] += chunk->map[i]; | |
530 | chunk->map_used--; | |
531 | memmove(&chunk->map[i], &chunk->map[i + 1], | |
532 | (chunk->map_used - i) * sizeof(chunk->map[0])); | |
533 | i--; | |
534 | } | |
535 | /* merge with next? */ | |
536 | if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { | |
537 | chunk->map[i] += chunk->map[i + 1]; | |
538 | chunk->map_used--; | |
539 | memmove(&chunk->map[i + 1], &chunk->map[i + 2], | |
540 | (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); | |
541 | } | |
542 | ||
543 | chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); | |
544 | pcpu_chunk_relocate(chunk, oslot); | |
545 | } | |
546 | ||
547 | /** | |
548 | * pcpu_unmap - unmap pages out of a pcpu_chunk | |
549 | * @chunk: chunk of interest | |
550 | * @page_start: page index of the first page to unmap | |
551 | * @page_end: page index of the last page to unmap + 1 | |
85ae87c1 | 552 | * @flush_tlb: whether to flush tlb or not |
fbf59bc9 TH |
553 | * |
554 | * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. | |
555 | * If @flush is true, vcache is flushed before unmapping and tlb | |
556 | * after. | |
557 | */ | |
558 | static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, | |
85ae87c1 | 559 | bool flush_tlb) |
fbf59bc9 TH |
560 | { |
561 | unsigned int last = num_possible_cpus() - 1; | |
562 | unsigned int cpu; | |
563 | ||
8d408b4b TH |
564 | /* unmap must not be done on immutable chunk */ |
565 | WARN_ON(chunk->immutable); | |
566 | ||
fbf59bc9 TH |
567 | /* |
568 | * Each flushing trial can be very expensive, issue flush on | |
569 | * the whole region at once rather than doing it for each cpu. | |
570 | * This could be an overkill but is more scalable. | |
571 | */ | |
85ae87c1 TH |
572 | flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), |
573 | pcpu_chunk_addr(chunk, last, page_end)); | |
fbf59bc9 TH |
574 | |
575 | for_each_possible_cpu(cpu) | |
576 | unmap_kernel_range_noflush( | |
577 | pcpu_chunk_addr(chunk, cpu, page_start), | |
578 | (page_end - page_start) << PAGE_SHIFT); | |
579 | ||
580 | /* ditto as flush_cache_vunmap() */ | |
85ae87c1 | 581 | if (flush_tlb) |
fbf59bc9 TH |
582 | flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), |
583 | pcpu_chunk_addr(chunk, last, page_end)); | |
584 | } | |
585 | ||
586 | /** | |
587 | * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk | |
588 | * @chunk: chunk to depopulate | |
589 | * @off: offset to the area to depopulate | |
cae3aeb8 | 590 | * @size: size of the area to depopulate in bytes |
fbf59bc9 TH |
591 | * @flush: whether to flush cache and tlb or not |
592 | * | |
593 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) | |
594 | * from @chunk. If @flush is true, vcache is flushed before unmapping | |
595 | * and tlb after. | |
ccea34b5 TH |
596 | * |
597 | * CONTEXT: | |
598 | * pcpu_alloc_mutex. | |
fbf59bc9 | 599 | */ |
cae3aeb8 TH |
600 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size, |
601 | bool flush) | |
fbf59bc9 TH |
602 | { |
603 | int page_start = PFN_DOWN(off); | |
604 | int page_end = PFN_UP(off + size); | |
605 | int unmap_start = -1; | |
606 | int uninitialized_var(unmap_end); | |
607 | unsigned int cpu; | |
608 | int i; | |
609 | ||
610 | for (i = page_start; i < page_end; i++) { | |
611 | for_each_possible_cpu(cpu) { | |
612 | struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); | |
613 | ||
614 | if (!*pagep) | |
615 | continue; | |
616 | ||
617 | __free_page(*pagep); | |
618 | ||
619 | /* | |
620 | * If it's partial depopulation, it might get | |
621 | * populated or depopulated again. Mark the | |
622 | * page gone. | |
623 | */ | |
624 | *pagep = NULL; | |
625 | ||
626 | unmap_start = unmap_start < 0 ? i : unmap_start; | |
627 | unmap_end = i + 1; | |
628 | } | |
629 | } | |
630 | ||
631 | if (unmap_start >= 0) | |
632 | pcpu_unmap(chunk, unmap_start, unmap_end, flush); | |
633 | } | |
634 | ||
8f05a6a6 TH |
635 | static int __pcpu_map_pages(unsigned long addr, struct page **pages, |
636 | int nr_pages) | |
637 | { | |
638 | return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, | |
639 | PAGE_KERNEL, pages); | |
640 | } | |
641 | ||
fbf59bc9 TH |
642 | /** |
643 | * pcpu_map - map pages into a pcpu_chunk | |
644 | * @chunk: chunk of interest | |
645 | * @page_start: page index of the first page to map | |
646 | * @page_end: page index of the last page to map + 1 | |
647 | * | |
648 | * For each cpu, map pages [@page_start,@page_end) into @chunk. | |
649 | * vcache is flushed afterwards. | |
650 | */ | |
651 | static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) | |
652 | { | |
653 | unsigned int last = num_possible_cpus() - 1; | |
654 | unsigned int cpu; | |
655 | int err; | |
656 | ||
8d408b4b TH |
657 | /* map must not be done on immutable chunk */ |
658 | WARN_ON(chunk->immutable); | |
659 | ||
fbf59bc9 | 660 | for_each_possible_cpu(cpu) { |
8f05a6a6 TH |
661 | err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), |
662 | pcpu_chunk_pagep(chunk, cpu, page_start), | |
663 | page_end - page_start); | |
fbf59bc9 TH |
664 | if (err < 0) |
665 | return err; | |
666 | } | |
667 | ||
668 | /* flush at once, please read comments in pcpu_unmap() */ | |
669 | flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start), | |
670 | pcpu_chunk_addr(chunk, last, page_end)); | |
671 | return 0; | |
672 | } | |
673 | ||
674 | /** | |
675 | * pcpu_populate_chunk - populate and map an area of a pcpu_chunk | |
676 | * @chunk: chunk of interest | |
677 | * @off: offset to the area to populate | |
cae3aeb8 | 678 | * @size: size of the area to populate in bytes |
fbf59bc9 TH |
679 | * |
680 | * For each cpu, populate and map pages [@page_start,@page_end) into | |
681 | * @chunk. The area is cleared on return. | |
ccea34b5 TH |
682 | * |
683 | * CONTEXT: | |
684 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. | |
fbf59bc9 TH |
685 | */ |
686 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | |
687 | { | |
688 | const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; | |
689 | int page_start = PFN_DOWN(off); | |
690 | int page_end = PFN_UP(off + size); | |
691 | int map_start = -1; | |
02d51fdf | 692 | int uninitialized_var(map_end); |
fbf59bc9 TH |
693 | unsigned int cpu; |
694 | int i; | |
695 | ||
696 | for (i = page_start; i < page_end; i++) { | |
697 | if (pcpu_chunk_page_occupied(chunk, i)) { | |
698 | if (map_start >= 0) { | |
699 | if (pcpu_map(chunk, map_start, map_end)) | |
700 | goto err; | |
701 | map_start = -1; | |
702 | } | |
703 | continue; | |
704 | } | |
705 | ||
706 | map_start = map_start < 0 ? i : map_start; | |
707 | map_end = i + 1; | |
708 | ||
709 | for_each_possible_cpu(cpu) { | |
710 | struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); | |
711 | ||
712 | *pagep = alloc_pages_node(cpu_to_node(cpu), | |
713 | alloc_mask, 0); | |
714 | if (!*pagep) | |
715 | goto err; | |
e1b9aa3f | 716 | pcpu_set_page_chunk(*pagep, chunk); |
fbf59bc9 TH |
717 | } |
718 | } | |
719 | ||
720 | if (map_start >= 0 && pcpu_map(chunk, map_start, map_end)) | |
721 | goto err; | |
722 | ||
723 | for_each_possible_cpu(cpu) | |
d9b55eeb | 724 | memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0, |
fbf59bc9 TH |
725 | size); |
726 | ||
727 | return 0; | |
728 | err: | |
729 | /* likely under heavy memory pressure, give memory back */ | |
730 | pcpu_depopulate_chunk(chunk, off, size, true); | |
731 | return -ENOMEM; | |
732 | } | |
733 | ||
734 | static void free_pcpu_chunk(struct pcpu_chunk *chunk) | |
735 | { | |
736 | if (!chunk) | |
737 | return; | |
738 | if (chunk->vm) | |
739 | free_vm_area(chunk->vm); | |
1880d93b | 740 | pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); |
fbf59bc9 TH |
741 | kfree(chunk); |
742 | } | |
743 | ||
744 | static struct pcpu_chunk *alloc_pcpu_chunk(void) | |
745 | { | |
746 | struct pcpu_chunk *chunk; | |
747 | ||
748 | chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); | |
749 | if (!chunk) | |
750 | return NULL; | |
751 | ||
1880d93b | 752 | chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); |
fbf59bc9 TH |
753 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; |
754 | chunk->map[chunk->map_used++] = pcpu_unit_size; | |
3e24aa58 | 755 | chunk->page = chunk->page_ar; |
fbf59bc9 TH |
756 | |
757 | chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL); | |
758 | if (!chunk->vm) { | |
759 | free_pcpu_chunk(chunk); | |
760 | return NULL; | |
761 | } | |
762 | ||
763 | INIT_LIST_HEAD(&chunk->list); | |
764 | chunk->free_size = pcpu_unit_size; | |
765 | chunk->contig_hint = pcpu_unit_size; | |
766 | ||
767 | return chunk; | |
768 | } | |
769 | ||
770 | /** | |
edcb4639 | 771 | * pcpu_alloc - the percpu allocator |
cae3aeb8 | 772 | * @size: size of area to allocate in bytes |
fbf59bc9 | 773 | * @align: alignment of area (max PAGE_SIZE) |
edcb4639 | 774 | * @reserved: allocate from the reserved chunk if available |
fbf59bc9 | 775 | * |
ccea34b5 TH |
776 | * Allocate percpu area of @size bytes aligned at @align. |
777 | * | |
778 | * CONTEXT: | |
779 | * Does GFP_KERNEL allocation. | |
fbf59bc9 TH |
780 | * |
781 | * RETURNS: | |
782 | * Percpu pointer to the allocated area on success, NULL on failure. | |
783 | */ | |
edcb4639 | 784 | static void *pcpu_alloc(size_t size, size_t align, bool reserved) |
fbf59bc9 | 785 | { |
fbf59bc9 TH |
786 | struct pcpu_chunk *chunk; |
787 | int slot, off; | |
788 | ||
8d408b4b | 789 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { |
fbf59bc9 TH |
790 | WARN(true, "illegal size (%zu) or align (%zu) for " |
791 | "percpu allocation\n", size, align); | |
792 | return NULL; | |
793 | } | |
794 | ||
ccea34b5 TH |
795 | mutex_lock(&pcpu_alloc_mutex); |
796 | spin_lock_irq(&pcpu_lock); | |
fbf59bc9 | 797 | |
edcb4639 TH |
798 | /* serve reserved allocations from the reserved chunk if available */ |
799 | if (reserved && pcpu_reserved_chunk) { | |
800 | chunk = pcpu_reserved_chunk; | |
9f7dcf22 TH |
801 | if (size > chunk->contig_hint || |
802 | pcpu_extend_area_map(chunk) < 0) | |
ccea34b5 | 803 | goto fail_unlock; |
edcb4639 TH |
804 | off = pcpu_alloc_area(chunk, size, align); |
805 | if (off >= 0) | |
806 | goto area_found; | |
ccea34b5 | 807 | goto fail_unlock; |
edcb4639 TH |
808 | } |
809 | ||
ccea34b5 | 810 | restart: |
edcb4639 | 811 | /* search through normal chunks */ |
fbf59bc9 TH |
812 | for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { |
813 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { | |
814 | if (size > chunk->contig_hint) | |
815 | continue; | |
ccea34b5 TH |
816 | |
817 | switch (pcpu_extend_area_map(chunk)) { | |
818 | case 0: | |
819 | break; | |
820 | case 1: | |
821 | goto restart; /* pcpu_lock dropped, restart */ | |
822 | default: | |
823 | goto fail_unlock; | |
824 | } | |
825 | ||
fbf59bc9 TH |
826 | off = pcpu_alloc_area(chunk, size, align); |
827 | if (off >= 0) | |
828 | goto area_found; | |
fbf59bc9 TH |
829 | } |
830 | } | |
831 | ||
832 | /* hmmm... no space left, create a new chunk */ | |
ccea34b5 TH |
833 | spin_unlock_irq(&pcpu_lock); |
834 | ||
fbf59bc9 TH |
835 | chunk = alloc_pcpu_chunk(); |
836 | if (!chunk) | |
ccea34b5 TH |
837 | goto fail_unlock_mutex; |
838 | ||
839 | spin_lock_irq(&pcpu_lock); | |
fbf59bc9 | 840 | pcpu_chunk_relocate(chunk, -1); |
ccea34b5 | 841 | goto restart; |
fbf59bc9 TH |
842 | |
843 | area_found: | |
ccea34b5 TH |
844 | spin_unlock_irq(&pcpu_lock); |
845 | ||
fbf59bc9 TH |
846 | /* populate, map and clear the area */ |
847 | if (pcpu_populate_chunk(chunk, off, size)) { | |
ccea34b5 | 848 | spin_lock_irq(&pcpu_lock); |
fbf59bc9 | 849 | pcpu_free_area(chunk, off); |
ccea34b5 | 850 | goto fail_unlock; |
fbf59bc9 TH |
851 | } |
852 | ||
ccea34b5 TH |
853 | mutex_unlock(&pcpu_alloc_mutex); |
854 | ||
855 | return __addr_to_pcpu_ptr(chunk->vm->addr + off); | |
856 | ||
857 | fail_unlock: | |
858 | spin_unlock_irq(&pcpu_lock); | |
859 | fail_unlock_mutex: | |
860 | mutex_unlock(&pcpu_alloc_mutex); | |
861 | return NULL; | |
fbf59bc9 | 862 | } |
edcb4639 TH |
863 | |
864 | /** | |
865 | * __alloc_percpu - allocate dynamic percpu area | |
866 | * @size: size of area to allocate in bytes | |
867 | * @align: alignment of area (max PAGE_SIZE) | |
868 | * | |
869 | * Allocate percpu area of @size bytes aligned at @align. Might | |
870 | * sleep. Might trigger writeouts. | |
871 | * | |
ccea34b5 TH |
872 | * CONTEXT: |
873 | * Does GFP_KERNEL allocation. | |
874 | * | |
edcb4639 TH |
875 | * RETURNS: |
876 | * Percpu pointer to the allocated area on success, NULL on failure. | |
877 | */ | |
878 | void *__alloc_percpu(size_t size, size_t align) | |
879 | { | |
880 | return pcpu_alloc(size, align, false); | |
881 | } | |
fbf59bc9 TH |
882 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
883 | ||
edcb4639 TH |
884 | /** |
885 | * __alloc_reserved_percpu - allocate reserved percpu area | |
886 | * @size: size of area to allocate in bytes | |
887 | * @align: alignment of area (max PAGE_SIZE) | |
888 | * | |
889 | * Allocate percpu area of @size bytes aligned at @align from reserved | |
890 | * percpu area if arch has set it up; otherwise, allocation is served | |
891 | * from the same dynamic area. Might sleep. Might trigger writeouts. | |
892 | * | |
ccea34b5 TH |
893 | * CONTEXT: |
894 | * Does GFP_KERNEL allocation. | |
895 | * | |
edcb4639 TH |
896 | * RETURNS: |
897 | * Percpu pointer to the allocated area on success, NULL on failure. | |
898 | */ | |
899 | void *__alloc_reserved_percpu(size_t size, size_t align) | |
900 | { | |
901 | return pcpu_alloc(size, align, true); | |
902 | } | |
903 | ||
a56dbddf TH |
904 | /** |
905 | * pcpu_reclaim - reclaim fully free chunks, workqueue function | |
906 | * @work: unused | |
907 | * | |
908 | * Reclaim all fully free chunks except for the first one. | |
ccea34b5 TH |
909 | * |
910 | * CONTEXT: | |
911 | * workqueue context. | |
a56dbddf TH |
912 | */ |
913 | static void pcpu_reclaim(struct work_struct *work) | |
fbf59bc9 | 914 | { |
a56dbddf TH |
915 | LIST_HEAD(todo); |
916 | struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; | |
917 | struct pcpu_chunk *chunk, *next; | |
918 | ||
ccea34b5 TH |
919 | mutex_lock(&pcpu_alloc_mutex); |
920 | spin_lock_irq(&pcpu_lock); | |
a56dbddf TH |
921 | |
922 | list_for_each_entry_safe(chunk, next, head, list) { | |
923 | WARN_ON(chunk->immutable); | |
924 | ||
925 | /* spare the first one */ | |
926 | if (chunk == list_first_entry(head, struct pcpu_chunk, list)) | |
927 | continue; | |
928 | ||
a56dbddf TH |
929 | list_move(&chunk->list, &todo); |
930 | } | |
931 | ||
ccea34b5 TH |
932 | spin_unlock_irq(&pcpu_lock); |
933 | mutex_unlock(&pcpu_alloc_mutex); | |
a56dbddf TH |
934 | |
935 | list_for_each_entry_safe(chunk, next, &todo, list) { | |
936 | pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); | |
937 | free_pcpu_chunk(chunk); | |
938 | } | |
fbf59bc9 TH |
939 | } |
940 | ||
941 | /** | |
942 | * free_percpu - free percpu area | |
943 | * @ptr: pointer to area to free | |
944 | * | |
ccea34b5 TH |
945 | * Free percpu area @ptr. |
946 | * | |
947 | * CONTEXT: | |
948 | * Can be called from atomic context. | |
fbf59bc9 TH |
949 | */ |
950 | void free_percpu(void *ptr) | |
951 | { | |
952 | void *addr = __pcpu_ptr_to_addr(ptr); | |
953 | struct pcpu_chunk *chunk; | |
ccea34b5 | 954 | unsigned long flags; |
fbf59bc9 TH |
955 | int off; |
956 | ||
957 | if (!ptr) | |
958 | return; | |
959 | ||
ccea34b5 | 960 | spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9 TH |
961 | |
962 | chunk = pcpu_chunk_addr_search(addr); | |
963 | off = addr - chunk->vm->addr; | |
964 | ||
965 | pcpu_free_area(chunk, off); | |
966 | ||
a56dbddf | 967 | /* if there are more than one fully free chunks, wake up grim reaper */ |
fbf59bc9 TH |
968 | if (chunk->free_size == pcpu_unit_size) { |
969 | struct pcpu_chunk *pos; | |
970 | ||
a56dbddf | 971 | list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) |
fbf59bc9 | 972 | if (pos != chunk) { |
a56dbddf | 973 | schedule_work(&pcpu_reclaim_work); |
fbf59bc9 TH |
974 | break; |
975 | } | |
976 | } | |
977 | ||
ccea34b5 | 978 | spin_unlock_irqrestore(&pcpu_lock, flags); |
fbf59bc9 TH |
979 | } |
980 | EXPORT_SYMBOL_GPL(free_percpu); | |
981 | ||
982 | /** | |
8d408b4b TH |
983 | * pcpu_setup_first_chunk - initialize the first percpu chunk |
984 | * @get_page_fn: callback to fetch page pointer | |
985 | * @static_size: the size of static percpu area in bytes | |
edcb4639 | 986 | * @reserved_size: the size of reserved percpu area in bytes |
cafe8816 | 987 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto |
6074d5b0 | 988 | * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto |
8d408b4b TH |
989 | * @base_addr: mapped address, NULL for auto |
990 | * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary | |
991 | * | |
992 | * Initialize the first percpu chunk which contains the kernel static | |
993 | * perpcu area. This function is to be called from arch percpu area | |
994 | * setup path. The first two parameters are mandatory. The rest are | |
995 | * optional. | |
996 | * | |
997 | * @get_page_fn() should return pointer to percpu page given cpu | |
998 | * number and page number. It should at least return enough pages to | |
999 | * cover the static area. The returned pages for static area should | |
1000 | * have been initialized with valid data. If @unit_size is specified, | |
1001 | * it can also return pages after the static area. NULL return | |
1002 | * indicates end of pages for the cpu. Note that @get_page_fn() must | |
1003 | * return the same number of pages for all cpus. | |
1004 | * | |
edcb4639 TH |
1005 | * @reserved_size, if non-zero, specifies the amount of bytes to |
1006 | * reserve after the static area in the first chunk. This reserves | |
1007 | * the first chunk such that it's available only through reserved | |
1008 | * percpu allocation. This is primarily used to serve module percpu | |
1009 | * static areas on architectures where the addressing model has | |
1010 | * limited offset range for symbol relocations to guarantee module | |
1011 | * percpu symbols fall inside the relocatable range. | |
1012 | * | |
6074d5b0 TH |
1013 | * @dyn_size, if non-negative, determines the number of bytes |
1014 | * available for dynamic allocation in the first chunk. Specifying | |
1015 | * non-negative value makes percpu leave alone the area beyond | |
1016 | * @static_size + @reserved_size + @dyn_size. | |
1017 | * | |
cafe8816 TH |
1018 | * @unit_size, if non-negative, specifies unit size and must be |
1019 | * aligned to PAGE_SIZE and equal to or larger than @static_size + | |
6074d5b0 | 1020 | * @reserved_size + if non-negative, @dyn_size. |
8d408b4b TH |
1021 | * |
1022 | * Non-null @base_addr means that the caller already allocated virtual | |
1023 | * region for the first chunk and mapped it. percpu must not mess | |
1024 | * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL | |
1025 | * @populate_pte_fn doesn't make any sense. | |
1026 | * | |
1027 | * @populate_pte_fn is used to populate the pagetable. NULL means the | |
1028 | * caller already populated the pagetable. | |
fbf59bc9 | 1029 | * |
edcb4639 TH |
1030 | * If the first chunk ends up with both reserved and dynamic areas, it |
1031 | * is served by two chunks - one to serve the core static and reserved | |
1032 | * areas and the other for the dynamic area. They share the same vm | |
1033 | * and page map but uses different area allocation map to stay away | |
1034 | * from each other. The latter chunk is circulated in the chunk slots | |
1035 | * and available for dynamic allocation like any other chunks. | |
1036 | * | |
fbf59bc9 TH |
1037 | * RETURNS: |
1038 | * The determined pcpu_unit_size which can be used to initialize | |
1039 | * percpu access. | |
1040 | */ | |
8d408b4b | 1041 | size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, |
edcb4639 | 1042 | size_t static_size, size_t reserved_size, |
6074d5b0 | 1043 | ssize_t dyn_size, ssize_t unit_size, |
cafe8816 | 1044 | void *base_addr, |
d4b95f80 | 1045 | pcpu_fc_populate_pte_fn_t populate_pte_fn) |
fbf59bc9 | 1046 | { |
2441d15c | 1047 | static struct vm_struct first_vm; |
edcb4639 | 1048 | static int smap[2], dmap[2]; |
6074d5b0 TH |
1049 | size_t size_sum = static_size + reserved_size + |
1050 | (dyn_size >= 0 ? dyn_size : 0); | |
edcb4639 | 1051 | struct pcpu_chunk *schunk, *dchunk = NULL; |
fbf59bc9 | 1052 | unsigned int cpu; |
8d408b4b | 1053 | int nr_pages; |
fbf59bc9 TH |
1054 | int err, i; |
1055 | ||
8d408b4b | 1056 | /* santiy checks */ |
edcb4639 TH |
1057 | BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || |
1058 | ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); | |
8d408b4b | 1059 | BUG_ON(!static_size); |
cafe8816 | 1060 | if (unit_size >= 0) { |
6074d5b0 | 1061 | BUG_ON(unit_size < size_sum); |
cafe8816 | 1062 | BUG_ON(unit_size & ~PAGE_MASK); |
6074d5b0 TH |
1063 | BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE); |
1064 | } else | |
cafe8816 | 1065 | BUG_ON(base_addr); |
8d408b4b | 1066 | BUG_ON(base_addr && populate_pte_fn); |
fbf59bc9 | 1067 | |
cafe8816 | 1068 | if (unit_size >= 0) |
8d408b4b TH |
1069 | pcpu_unit_pages = unit_size >> PAGE_SHIFT; |
1070 | else | |
1071 | pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, | |
6074d5b0 | 1072 | PFN_UP(size_sum)); |
8d408b4b | 1073 | |
d9b55eeb | 1074 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
fbf59bc9 | 1075 | pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; |
fbf59bc9 | 1076 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) |
cb83b42e | 1077 | + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *); |
fbf59bc9 | 1078 | |
cafe8816 | 1079 | if (dyn_size < 0) |
edcb4639 | 1080 | dyn_size = pcpu_unit_size - static_size - reserved_size; |
cafe8816 | 1081 | |
d9b55eeb TH |
1082 | /* |
1083 | * Allocate chunk slots. The additional last slot is for | |
1084 | * empty chunks. | |
1085 | */ | |
1086 | pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; | |
fbf59bc9 TH |
1087 | pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); |
1088 | for (i = 0; i < pcpu_nr_slots; i++) | |
1089 | INIT_LIST_HEAD(&pcpu_slot[i]); | |
1090 | ||
edcb4639 TH |
1091 | /* |
1092 | * Initialize static chunk. If reserved_size is zero, the | |
1093 | * static chunk covers static area + dynamic allocation area | |
1094 | * in the first chunk. If reserved_size is not zero, it | |
1095 | * covers static area + reserved area (mostly used for module | |
1096 | * static percpu allocation). | |
1097 | */ | |
2441d15c TH |
1098 | schunk = alloc_bootmem(pcpu_chunk_struct_size); |
1099 | INIT_LIST_HEAD(&schunk->list); | |
1100 | schunk->vm = &first_vm; | |
61ace7fa TH |
1101 | schunk->map = smap; |
1102 | schunk->map_alloc = ARRAY_SIZE(smap); | |
3e24aa58 | 1103 | schunk->page = schunk->page_ar; |
edcb4639 TH |
1104 | |
1105 | if (reserved_size) { | |
1106 | schunk->free_size = reserved_size; | |
ae9e6bc9 TH |
1107 | pcpu_reserved_chunk = schunk; |
1108 | pcpu_reserved_chunk_limit = static_size + reserved_size; | |
edcb4639 TH |
1109 | } else { |
1110 | schunk->free_size = dyn_size; | |
1111 | dyn_size = 0; /* dynamic area covered */ | |
1112 | } | |
2441d15c | 1113 | schunk->contig_hint = schunk->free_size; |
fbf59bc9 | 1114 | |
61ace7fa TH |
1115 | schunk->map[schunk->map_used++] = -static_size; |
1116 | if (schunk->free_size) | |
1117 | schunk->map[schunk->map_used++] = schunk->free_size; | |
1118 | ||
edcb4639 TH |
1119 | /* init dynamic chunk if necessary */ |
1120 | if (dyn_size) { | |
1121 | dchunk = alloc_bootmem(sizeof(struct pcpu_chunk)); | |
1122 | INIT_LIST_HEAD(&dchunk->list); | |
1123 | dchunk->vm = &first_vm; | |
1124 | dchunk->map = dmap; | |
1125 | dchunk->map_alloc = ARRAY_SIZE(dmap); | |
1126 | dchunk->page = schunk->page_ar; /* share page map with schunk */ | |
1127 | ||
1128 | dchunk->contig_hint = dchunk->free_size = dyn_size; | |
1129 | dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; | |
1130 | dchunk->map[dchunk->map_used++] = dchunk->free_size; | |
1131 | } | |
1132 | ||
8d408b4b | 1133 | /* allocate vm address */ |
2441d15c TH |
1134 | first_vm.flags = VM_ALLOC; |
1135 | first_vm.size = pcpu_chunk_size; | |
8d408b4b TH |
1136 | |
1137 | if (!base_addr) | |
2441d15c | 1138 | vm_area_register_early(&first_vm, PAGE_SIZE); |
8d408b4b TH |
1139 | else { |
1140 | /* | |
1141 | * Pages already mapped. No need to remap into | |
edcb4639 TH |
1142 | * vmalloc area. In this case the first chunks can't |
1143 | * be mapped or unmapped by percpu and are marked | |
8d408b4b TH |
1144 | * immutable. |
1145 | */ | |
2441d15c TH |
1146 | first_vm.addr = base_addr; |
1147 | schunk->immutable = true; | |
edcb4639 TH |
1148 | if (dchunk) |
1149 | dchunk->immutable = true; | |
8d408b4b TH |
1150 | } |
1151 | ||
1152 | /* assign pages */ | |
1153 | nr_pages = -1; | |
fbf59bc9 | 1154 | for_each_possible_cpu(cpu) { |
8d408b4b TH |
1155 | for (i = 0; i < pcpu_unit_pages; i++) { |
1156 | struct page *page = get_page_fn(cpu, i); | |
1157 | ||
1158 | if (!page) | |
1159 | break; | |
2441d15c | 1160 | *pcpu_chunk_pagep(schunk, cpu, i) = page; |
fbf59bc9 | 1161 | } |
8d408b4b | 1162 | |
61ace7fa | 1163 | BUG_ON(i < PFN_UP(static_size)); |
8d408b4b TH |
1164 | |
1165 | if (nr_pages < 0) | |
1166 | nr_pages = i; | |
1167 | else | |
1168 | BUG_ON(nr_pages != i); | |
fbf59bc9 TH |
1169 | } |
1170 | ||
8d408b4b TH |
1171 | /* map them */ |
1172 | if (populate_pte_fn) { | |
1173 | for_each_possible_cpu(cpu) | |
1174 | for (i = 0; i < nr_pages; i++) | |
2441d15c | 1175 | populate_pte_fn(pcpu_chunk_addr(schunk, |
8d408b4b TH |
1176 | cpu, i)); |
1177 | ||
2441d15c | 1178 | err = pcpu_map(schunk, 0, nr_pages); |
8d408b4b TH |
1179 | if (err) |
1180 | panic("failed to setup static percpu area, err=%d\n", | |
1181 | err); | |
1182 | } | |
fbf59bc9 | 1183 | |
2441d15c | 1184 | /* link the first chunk in */ |
ae9e6bc9 TH |
1185 | pcpu_first_chunk = dchunk ?: schunk; |
1186 | pcpu_chunk_relocate(pcpu_first_chunk, -1); | |
fbf59bc9 TH |
1187 | |
1188 | /* we're done */ | |
2441d15c | 1189 | pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0); |
fbf59bc9 TH |
1190 | return pcpu_unit_size; |
1191 | } | |
66c3a757 TH |
1192 | |
1193 | /* | |
1194 | * Embedding first chunk setup helper. | |
1195 | */ | |
1196 | static void *pcpue_ptr __initdata; | |
1197 | static size_t pcpue_size __initdata; | |
1198 | static size_t pcpue_unit_size __initdata; | |
1199 | ||
1200 | static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | |
1201 | { | |
1202 | size_t off = (size_t)pageno << PAGE_SHIFT; | |
1203 | ||
1204 | if (off >= pcpue_size) | |
1205 | return NULL; | |
1206 | ||
1207 | return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off); | |
1208 | } | |
1209 | ||
1210 | /** | |
1211 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | |
1212 | * @static_size: the size of static percpu area in bytes | |
1213 | * @reserved_size: the size of reserved percpu area in bytes | |
1214 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto | |
66c3a757 TH |
1215 | * |
1216 | * This is a helper to ease setting up embedded first percpu chunk and | |
1217 | * can be called where pcpu_setup_first_chunk() is expected. | |
1218 | * | |
1219 | * If this function is used to setup the first chunk, it is allocated | |
1220 | * as a contiguous area using bootmem allocator and used as-is without | |
1221 | * being mapped into vmalloc area. This enables the first chunk to | |
1222 | * piggy back on the linear physical mapping which often uses larger | |
1223 | * page size. | |
1224 | * | |
1225 | * When @dyn_size is positive, dynamic area might be larger than | |
788e5abc TH |
1226 | * specified to fill page alignment. When @dyn_size is auto, |
1227 | * @dyn_size is just big enough to fill page alignment after static | |
1228 | * and reserved areas. | |
66c3a757 TH |
1229 | * |
1230 | * If the needed size is smaller than the minimum or specified unit | |
1231 | * size, the leftover is returned to the bootmem allocator. | |
1232 | * | |
1233 | * RETURNS: | |
1234 | * The determined pcpu_unit_size which can be used to initialize | |
1235 | * percpu access on success, -errno on failure. | |
1236 | */ | |
1237 | ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |
788e5abc | 1238 | ssize_t dyn_size) |
66c3a757 | 1239 | { |
fa8a7094 | 1240 | size_t chunk_size; |
66c3a757 TH |
1241 | unsigned int cpu; |
1242 | ||
1243 | /* determine parameters and allocate */ | |
1244 | pcpue_size = PFN_ALIGN(static_size + reserved_size + | |
1245 | (dyn_size >= 0 ? dyn_size : 0)); | |
1246 | if (dyn_size != 0) | |
1247 | dyn_size = pcpue_size - static_size - reserved_size; | |
1248 | ||
788e5abc | 1249 | pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); |
fa8a7094 TH |
1250 | chunk_size = pcpue_unit_size * num_possible_cpus(); |
1251 | ||
1252 | pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, | |
1253 | __pa(MAX_DMA_ADDRESS)); | |
1254 | if (!pcpue_ptr) { | |
1255 | pr_warning("PERCPU: failed to allocate %zu bytes for " | |
1256 | "embedding\n", chunk_size); | |
66c3a757 | 1257 | return -ENOMEM; |
fa8a7094 | 1258 | } |
66c3a757 TH |
1259 | |
1260 | /* return the leftover and copy */ | |
1261 | for_each_possible_cpu(cpu) { | |
1262 | void *ptr = pcpue_ptr + cpu * pcpue_unit_size; | |
1263 | ||
1264 | free_bootmem(__pa(ptr + pcpue_size), | |
1265 | pcpue_unit_size - pcpue_size); | |
1266 | memcpy(ptr, __per_cpu_load, static_size); | |
1267 | } | |
1268 | ||
1269 | /* we're ready, commit */ | |
1270 | pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", | |
1271 | pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); | |
1272 | ||
1273 | return pcpu_setup_first_chunk(pcpue_get_page, static_size, | |
1274 | reserved_size, dyn_size, | |
1275 | pcpue_unit_size, pcpue_ptr, NULL); | |
1276 | } | |
e74e3962 | 1277 | |
d4b95f80 TH |
1278 | /* |
1279 | * 4k page first chunk setup helper. | |
1280 | */ | |
1281 | static struct page **pcpu4k_pages __initdata; | |
8f05a6a6 | 1282 | static int pcpu4k_unit_pages __initdata; |
d4b95f80 TH |
1283 | |
1284 | static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) | |
1285 | { | |
8f05a6a6 TH |
1286 | if (pageno < pcpu4k_unit_pages) |
1287 | return pcpu4k_pages[cpu * pcpu4k_unit_pages + pageno]; | |
d4b95f80 TH |
1288 | return NULL; |
1289 | } | |
1290 | ||
1291 | /** | |
1292 | * pcpu_4k_first_chunk - map the first chunk using PAGE_SIZE pages | |
1293 | * @static_size: the size of static percpu area in bytes | |
1294 | * @reserved_size: the size of reserved percpu area in bytes | |
1295 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | |
1296 | * @free_fn: funtion to free percpu page, always called with PAGE_SIZE | |
1297 | * @populate_pte_fn: function to populate pte | |
1298 | * | |
1299 | * This is a helper to ease setting up embedded first percpu chunk and | |
1300 | * can be called where pcpu_setup_first_chunk() is expected. | |
1301 | * | |
1302 | * This is the basic allocator. Static percpu area is allocated | |
1303 | * page-by-page into vmalloc area. | |
1304 | * | |
1305 | * RETURNS: | |
1306 | * The determined pcpu_unit_size which can be used to initialize | |
1307 | * percpu access on success, -errno on failure. | |
1308 | */ | |
1309 | ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size, | |
1310 | pcpu_fc_alloc_fn_t alloc_fn, | |
1311 | pcpu_fc_free_fn_t free_fn, | |
1312 | pcpu_fc_populate_pte_fn_t populate_pte_fn) | |
1313 | { | |
8f05a6a6 | 1314 | static struct vm_struct vm; |
d4b95f80 TH |
1315 | size_t pages_size; |
1316 | unsigned int cpu; | |
1317 | int i, j; | |
1318 | ssize_t ret; | |
1319 | ||
8f05a6a6 TH |
1320 | pcpu4k_unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size, |
1321 | PCPU_MIN_UNIT_SIZE)); | |
d4b95f80 TH |
1322 | |
1323 | /* unaligned allocations can't be freed, round up to page size */ | |
8f05a6a6 | 1324 | pages_size = PFN_ALIGN(pcpu4k_unit_pages * num_possible_cpus() * |
d4b95f80 TH |
1325 | sizeof(pcpu4k_pages[0])); |
1326 | pcpu4k_pages = alloc_bootmem(pages_size); | |
1327 | ||
8f05a6a6 | 1328 | /* allocate pages */ |
d4b95f80 TH |
1329 | j = 0; |
1330 | for_each_possible_cpu(cpu) | |
8f05a6a6 | 1331 | for (i = 0; i < pcpu4k_unit_pages; i++) { |
d4b95f80 TH |
1332 | void *ptr; |
1333 | ||
1334 | ptr = alloc_fn(cpu, PAGE_SIZE); | |
1335 | if (!ptr) { | |
1336 | pr_warning("PERCPU: failed to allocate " | |
1337 | "4k page for cpu%u\n", cpu); | |
1338 | goto enomem; | |
1339 | } | |
d4b95f80 TH |
1340 | pcpu4k_pages[j++] = virt_to_page(ptr); |
1341 | } | |
1342 | ||
8f05a6a6 TH |
1343 | /* allocate vm area, map the pages and copy static data */ |
1344 | vm.flags = VM_ALLOC; | |
1345 | vm.size = num_possible_cpus() * pcpu4k_unit_pages << PAGE_SHIFT; | |
1346 | vm_area_register_early(&vm, PAGE_SIZE); | |
1347 | ||
1348 | for_each_possible_cpu(cpu) { | |
1349 | unsigned long unit_addr = (unsigned long)vm.addr + | |
1350 | (cpu * pcpu4k_unit_pages << PAGE_SHIFT); | |
1351 | ||
1352 | for (i = 0; i < pcpu4k_unit_pages; i++) | |
1353 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); | |
1354 | ||
1355 | /* pte already populated, the following shouldn't fail */ | |
1356 | ret = __pcpu_map_pages(unit_addr, | |
1357 | &pcpu4k_pages[cpu * pcpu4k_unit_pages], | |
1358 | pcpu4k_unit_pages); | |
1359 | if (ret < 0) | |
1360 | panic("failed to map percpu area, err=%zd\n", ret); | |
1361 | ||
1362 | /* | |
1363 | * FIXME: Archs with virtual cache should flush local | |
1364 | * cache for the linear mapping here - something | |
1365 | * equivalent to flush_cache_vmap() on the local cpu. | |
1366 | * flush_cache_vmap() can't be used as most supporting | |
1367 | * data structures are not set up yet. | |
1368 | */ | |
1369 | ||
1370 | /* copy static data */ | |
1371 | memcpy((void *)unit_addr, __per_cpu_load, static_size); | |
1372 | } | |
1373 | ||
d4b95f80 | 1374 | /* we're ready, commit */ |
8f05a6a6 TH |
1375 | pr_info("PERCPU: %d 4k pages per cpu, static data %zu bytes\n", |
1376 | pcpu4k_unit_pages, static_size); | |
d4b95f80 TH |
1377 | |
1378 | ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, | |
1379 | reserved_size, -1, | |
8f05a6a6 TH |
1380 | pcpu4k_unit_pages << PAGE_SHIFT, vm.addr, |
1381 | NULL); | |
d4b95f80 TH |
1382 | goto out_free_ar; |
1383 | ||
1384 | enomem: | |
1385 | while (--j >= 0) | |
1386 | free_fn(page_address(pcpu4k_pages[j]), PAGE_SIZE); | |
1387 | ret = -ENOMEM; | |
1388 | out_free_ar: | |
1389 | free_bootmem(__pa(pcpu4k_pages), pages_size); | |
1390 | return ret; | |
1391 | } | |
1392 | ||
e74e3962 TH |
1393 | /* |
1394 | * Generic percpu area setup. | |
1395 | * | |
1396 | * The embedding helper is used because its behavior closely resembles | |
1397 | * the original non-dynamic generic percpu area setup. This is | |
1398 | * important because many archs have addressing restrictions and might | |
1399 | * fail if the percpu area is located far away from the previous | |
1400 | * location. As an added bonus, in non-NUMA cases, embedding is | |
1401 | * generally a good idea TLB-wise because percpu area can piggy back | |
1402 | * on the physical linear memory mapping which uses large page | |
1403 | * mappings on applicable archs. | |
1404 | */ | |
1405 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | |
1406 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | |
1407 | EXPORT_SYMBOL(__per_cpu_offset); | |
1408 | ||
1409 | void __init setup_per_cpu_areas(void) | |
1410 | { | |
1411 | size_t static_size = __per_cpu_end - __per_cpu_start; | |
1412 | ssize_t unit_size; | |
1413 | unsigned long delta; | |
1414 | unsigned int cpu; | |
1415 | ||
1416 | /* | |
1417 | * Always reserve area for module percpu variables. That's | |
1418 | * what the legacy allocator did. | |
1419 | */ | |
1420 | unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, | |
788e5abc | 1421 | PERCPU_DYNAMIC_RESERVE); |
e74e3962 TH |
1422 | if (unit_size < 0) |
1423 | panic("Failed to initialized percpu areas."); | |
1424 | ||
1425 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
1426 | for_each_possible_cpu(cpu) | |
1427 | __per_cpu_offset[cpu] = delta + cpu * unit_size; | |
1428 | } | |
1429 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |