]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/percpu-km.c
Merge tag 'ieee802154-for-davem-2020-09-08' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-jammy-kernel.git] / mm / percpu-km.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/percpu-km.c - kernel memory based chunk allocation
4 *
5 * Copyright (C) 2010 SUSE Linux Products GmbH
6 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
7 *
8 * Chunks are allocated as a contiguous kernel memory using gfp
9 * allocation. This is to be used on nommu architectures.
10 *
11 * To use percpu-km,
12 *
13 * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig.
14 *
15 * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined. It's
16 * not compatible with PER_CPU_KM. EMBED_FIRST_CHUNK should work
17 * fine.
18 *
19 * - NUMA is not supported. When setting up the first chunk,
20 * @cpu_distance_fn should be NULL or report all CPUs to be nearer
21 * than or at LOCAL_DISTANCE.
22 *
23 * - It's best if the chunk size is power of two multiple of
24 * PAGE_SIZE. Because each chunk is allocated as a contiguous
25 * kernel memory block using alloc_pages(), memory will be wasted if
26 * chunk size is not aligned. percpu-km code will whine about it.
27 */
28
29 #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
30 #error "contiguous percpu allocation is incompatible with paged first chunk"
31 #endif
32
33 #include <linux/log2.h>
34
35 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
36 int page_start, int page_end, gfp_t gfp)
37 {
38 return 0;
39 }
40
41 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
42 int page_start, int page_end)
43 {
44 /* nada */
45 }
46
47 static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
48 gfp_t gfp)
49 {
50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
51 struct pcpu_chunk *chunk;
52 struct page *pages;
53 unsigned long flags;
54 int i;
55
56 chunk = pcpu_alloc_chunk(type, gfp);
57 if (!chunk)
58 return NULL;
59
60 pages = alloc_pages(gfp, order_base_2(nr_pages));
61 if (!pages) {
62 pcpu_free_chunk(chunk);
63 return NULL;
64 }
65
66 for (i = 0; i < nr_pages; i++)
67 pcpu_set_page_chunk(nth_page(pages, i), chunk);
68
69 chunk->data = pages;
70 chunk->base_addr = page_address(pages);
71
72 spin_lock_irqsave(&pcpu_lock, flags);
73 pcpu_chunk_populated(chunk, 0, nr_pages);
74 spin_unlock_irqrestore(&pcpu_lock, flags);
75
76 pcpu_stats_chunk_alloc();
77 trace_percpu_create_chunk(chunk->base_addr);
78
79 return chunk;
80 }
81
82 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
83 {
84 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
85
86 if (!chunk)
87 return;
88
89 pcpu_stats_chunk_dealloc();
90 trace_percpu_destroy_chunk(chunk->base_addr);
91
92 if (chunk->data)
93 __free_pages(chunk->data, order_base_2(nr_pages));
94 pcpu_free_chunk(chunk);
95 }
96
97 static struct page *pcpu_addr_to_page(void *addr)
98 {
99 return virt_to_page(addr);
100 }
101
102 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
103 {
104 size_t nr_pages, alloc_pages;
105
106 /* all units must be in a single group */
107 if (ai->nr_groups != 1) {
108 pr_crit("can't handle more than one group\n");
109 return -EINVAL;
110 }
111
112 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT;
113 alloc_pages = roundup_pow_of_two(nr_pages);
114
115 if (alloc_pages > nr_pages)
116 pr_warn("wasting %zu pages per chunk\n",
117 alloc_pages - nr_pages);
118
119 return 0;
120 }