]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - mm/percpu-internal.h
Merge tag 'wireless-2022-09-03' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-kernels.git] / mm / percpu-internal.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _MM_PERCPU_INTERNAL_H
3 #define _MM_PERCPU_INTERNAL_H
4
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7
8 /*
9 * pcpu_block_md is the metadata block struct.
10 * Each chunk's bitmap is split into a number of full blocks.
11 * All units are in terms of bits.
12 *
13 * The scan hint is the largest known contiguous area before the contig hint.
14 * It is not necessarily the actual largest contig hint though. There is an
15 * invariant that the scan_hint_start > contig_hint_start iff
16 * scan_hint == contig_hint. This is necessary because when scanning forward,
17 * we don't know if a new contig hint would be better than the current one.
18 */
19 struct pcpu_block_md {
20 int scan_hint; /* scan hint for block */
21 int scan_hint_start; /* block relative starting
22 position of the scan hint */
23 int contig_hint; /* contig hint for block */
24 int contig_hint_start; /* block relative starting
25 position of the contig hint */
26 int left_free; /* size of free space along
27 the left side of the block */
28 int right_free; /* size of free space along
29 the right side of the block */
30 int first_free; /* block position of first free */
31 int nr_bits; /* total bits responsible for */
32 };
33
34 struct pcpu_chunk {
35 #ifdef CONFIG_PERCPU_STATS
36 int nr_alloc; /* # of allocations */
37 size_t max_alloc_size; /* largest allocation size */
38 #endif
39
40 struct list_head list; /* linked to pcpu_slot lists */
41 int free_bytes; /* free bytes in the chunk */
42 struct pcpu_block_md chunk_md;
43 void *base_addr; /* base address of this chunk */
44
45 unsigned long *alloc_map; /* allocation map */
46 unsigned long *bound_map; /* boundary map */
47 struct pcpu_block_md *md_blocks; /* metadata blocks */
48
49 void *data; /* chunk data */
50 bool immutable; /* no [de]population allowed */
51 bool isolated; /* isolated from active chunk
52 slots */
53 int start_offset; /* the overlap with the previous
54 region to have a page aligned
55 base_addr */
56 int end_offset; /* additional area required to
57 have the region end page
58 aligned */
59 #ifdef CONFIG_MEMCG_KMEM
60 struct obj_cgroup **obj_cgroups; /* vector of object cgroups */
61 #endif
62
63 int nr_pages; /* # of pages served by this chunk */
64 int nr_populated; /* # of populated pages */
65 int nr_empty_pop_pages; /* # of empty populated pages */
66 unsigned long populated[]; /* populated bitmap */
67 };
68
69 extern spinlock_t pcpu_lock;
70
71 extern struct list_head *pcpu_chunk_lists;
72 extern int pcpu_nr_slots;
73 extern int pcpu_sidelined_slot;
74 extern int pcpu_to_depopulate_slot;
75 extern int pcpu_nr_empty_pop_pages;
76
77 extern struct pcpu_chunk *pcpu_first_chunk;
78 extern struct pcpu_chunk *pcpu_reserved_chunk;
79
80 /**
81 * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
82 * @chunk: chunk of interest
83 *
84 * This conversion is from the number of physical pages that the chunk
85 * serves to the number of bitmap blocks used.
86 */
87 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
88 {
89 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
90 }
91
92 /**
93 * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
94 * @pages: number of physical pages
95 *
96 * This conversion is from physical pages to the number of bits
97 * required in the bitmap.
98 */
99 static inline int pcpu_nr_pages_to_map_bits(int pages)
100 {
101 return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
102 }
103
104 /**
105 * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
106 * @chunk: chunk of interest
107 *
108 * This conversion is from the number of physical pages that the chunk
109 * serves to the number of bits in the bitmap.
110 */
111 static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
112 {
113 return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
114 }
115
116 /**
117 * pcpu_obj_full_size - helper to calculate size of each accounted object
118 * @size: size of area to allocate in bytes
119 *
120 * For each accounted object there is an extra space which is used to store
121 * obj_cgroup membership. Charge it too.
122 */
123 static inline size_t pcpu_obj_full_size(size_t size)
124 {
125 size_t extra_size = 0;
126
127 #ifdef CONFIG_MEMCG_KMEM
128 extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
129 #endif
130
131 return size * num_possible_cpus() + extra_size;
132 }
133
134 #ifdef CONFIG_PERCPU_STATS
135
136 #include <linux/spinlock.h>
137
138 struct percpu_stats {
139 u64 nr_alloc; /* lifetime # of allocations */
140 u64 nr_dealloc; /* lifetime # of deallocations */
141 u64 nr_cur_alloc; /* current # of allocations */
142 u64 nr_max_alloc; /* max # of live allocations */
143 u32 nr_chunks; /* current # of live chunks */
144 u32 nr_max_chunks; /* max # of live chunks */
145 size_t min_alloc_size; /* min allocation size */
146 size_t max_alloc_size; /* max allocation size */
147 };
148
149 extern struct percpu_stats pcpu_stats;
150 extern struct pcpu_alloc_info pcpu_stats_ai;
151
152 /*
153 * For debug purposes. We don't care about the flexible array.
154 */
155 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
156 {
157 memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
158
159 /* initialize min_alloc_size to unit_size */
160 pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
161 }
162
163 /*
164 * pcpu_stats_area_alloc - increment area allocation stats
165 * @chunk: the location of the area being allocated
166 * @size: size of area to allocate in bytes
167 *
168 * CONTEXT:
169 * pcpu_lock.
170 */
171 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
172 {
173 lockdep_assert_held(&pcpu_lock);
174
175 pcpu_stats.nr_alloc++;
176 pcpu_stats.nr_cur_alloc++;
177 pcpu_stats.nr_max_alloc =
178 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
179 pcpu_stats.min_alloc_size =
180 min(pcpu_stats.min_alloc_size, size);
181 pcpu_stats.max_alloc_size =
182 max(pcpu_stats.max_alloc_size, size);
183
184 chunk->nr_alloc++;
185 chunk->max_alloc_size = max(chunk->max_alloc_size, size);
186 }
187
188 /*
189 * pcpu_stats_area_dealloc - decrement allocation stats
190 * @chunk: the location of the area being deallocated
191 *
192 * CONTEXT:
193 * pcpu_lock.
194 */
195 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
196 {
197 lockdep_assert_held(&pcpu_lock);
198
199 pcpu_stats.nr_dealloc++;
200 pcpu_stats.nr_cur_alloc--;
201
202 chunk->nr_alloc--;
203 }
204
205 /*
206 * pcpu_stats_chunk_alloc - increment chunk stats
207 */
208 static inline void pcpu_stats_chunk_alloc(void)
209 {
210 unsigned long flags;
211 spin_lock_irqsave(&pcpu_lock, flags);
212
213 pcpu_stats.nr_chunks++;
214 pcpu_stats.nr_max_chunks =
215 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
216
217 spin_unlock_irqrestore(&pcpu_lock, flags);
218 }
219
220 /*
221 * pcpu_stats_chunk_dealloc - decrement chunk stats
222 */
223 static inline void pcpu_stats_chunk_dealloc(void)
224 {
225 unsigned long flags;
226 spin_lock_irqsave(&pcpu_lock, flags);
227
228 pcpu_stats.nr_chunks--;
229
230 spin_unlock_irqrestore(&pcpu_lock, flags);
231 }
232
233 #else
234
235 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
236 {
237 }
238
239 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
240 {
241 }
242
243 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
244 {
245 }
246
247 static inline void pcpu_stats_chunk_alloc(void)
248 {
249 }
250
251 static inline void pcpu_stats_chunk_dealloc(void)
252 {
253 }
254
255 #endif /* !CONFIG_PERCPU_STATS */
256
257 #endif