]>
Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
cd11016e AP |
2 | /* |
3 | * Generic stack depot for storing stack traces. | |
4 | * | |
5 | * Some debugging tools need to save stack traces of certain events which can | |
6 | * be later presented to the user. For example, KASAN needs to safe alloc and | |
7 | * free stacks for each object, but storing two stack traces per object | |
8 | * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for | |
9 | * that). | |
10 | * | |
11 | * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc | |
12 | * and free stacks repeat a lot, we save about 100x space. | |
13 | * Stacks are never removed from depot, so we store them contiguously one after | |
14 | * another in a contiguos memory allocation. | |
15 | * | |
16 | * Author: Alexander Potapenko <glider@google.com> | |
17 | * Copyright (C) 2016 Google, Inc. | |
18 | * | |
19 | * Based on code by Dmitry Chernenkov. | |
cd11016e AP |
20 | */ |
21 | ||
22 | #include <linux/gfp.h> | |
505a0ef1 | 23 | #include <linux/interrupt.h> |
cd11016e AP |
24 | #include <linux/jhash.h> |
25 | #include <linux/kernel.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/percpu.h> | |
28 | #include <linux/printk.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/stacktrace.h> | |
31 | #include <linux/stackdepot.h> | |
32 | #include <linux/string.h> | |
33 | #include <linux/types.h> | |
e1fdc403 | 34 | #include <linux/memblock.h> |
cd11016e AP |
35 | |
36 | #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) | |
37 | ||
7c31190b | 38 | #define STACK_ALLOC_NULL_PROTECTION_BITS 1 |
cd11016e AP |
39 | #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ |
40 | #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) | |
41 | #define STACK_ALLOC_ALIGN 4 | |
42 | #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ | |
43 | STACK_ALLOC_ALIGN) | |
7c31190b JK |
44 | #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ |
45 | STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) | |
02754e0a | 46 | #define STACK_ALLOC_SLABS_CAP 8192 |
cd11016e AP |
47 | #define STACK_ALLOC_MAX_SLABS \ |
48 | (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ | |
49 | (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP) | |
50 | ||
51 | /* The compact structure to store the reference to stacks. */ | |
52 | union handle_parts { | |
53 | depot_stack_handle_t handle; | |
54 | struct { | |
55 | u32 slabindex : STACK_ALLOC_INDEX_BITS; | |
56 | u32 offset : STACK_ALLOC_OFFSET_BITS; | |
7c31190b | 57 | u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS; |
cd11016e AP |
58 | }; |
59 | }; | |
60 | ||
61 | struct stack_record { | |
62 | struct stack_record *next; /* Link in the hashtable */ | |
63 | u32 hash; /* Hash in the hastable */ | |
64 | u32 size; /* Number of frames in the stack */ | |
65 | union handle_parts handle; | |
3a2b67e6 | 66 | unsigned long entries[]; /* Variable-sized array of entries. */ |
cd11016e AP |
67 | }; |
68 | ||
69 | static void *stack_slabs[STACK_ALLOC_MAX_SLABS]; | |
70 | ||
71 | static int depot_index; | |
72 | static int next_slab_inited; | |
73 | static size_t depot_offset; | |
74 | static DEFINE_SPINLOCK(depot_lock); | |
75 | ||
76 | static bool init_stack_slab(void **prealloc) | |
77 | { | |
78 | if (!*prealloc) | |
79 | return false; | |
80 | /* | |
81 | * This smp_load_acquire() pairs with smp_store_release() to | |
82 | * |next_slab_inited| below and in depot_alloc_stack(). | |
83 | */ | |
84 | if (smp_load_acquire(&next_slab_inited)) | |
85 | return true; | |
86 | if (stack_slabs[depot_index] == NULL) { | |
87 | stack_slabs[depot_index] = *prealloc; | |
305e519c | 88 | *prealloc = NULL; |
cd11016e | 89 | } else { |
305e519c AP |
90 | /* If this is the last depot slab, do not touch the next one. */ |
91 | if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) { | |
92 | stack_slabs[depot_index + 1] = *prealloc; | |
93 | *prealloc = NULL; | |
94 | } | |
cd11016e AP |
95 | /* |
96 | * This smp_store_release pairs with smp_load_acquire() from | |
ee050dc8 | 97 | * |next_slab_inited| above and in stack_depot_save(). |
cd11016e AP |
98 | */ |
99 | smp_store_release(&next_slab_inited, 1); | |
100 | } | |
cd11016e AP |
101 | return true; |
102 | } | |
103 | ||
104 | /* Allocation of a new stack in raw storage */ | |
105 | static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, | |
106 | u32 hash, void **prealloc, gfp_t alloc_flags) | |
107 | { | |
cd11016e | 108 | struct stack_record *stack; |
3a2b67e6 | 109 | size_t required_size = struct_size(stack, entries, size); |
cd11016e AP |
110 | |
111 | required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN); | |
112 | ||
113 | if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) { | |
114 | if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) { | |
115 | WARN_ONCE(1, "Stack depot reached limit capacity"); | |
116 | return NULL; | |
117 | } | |
118 | depot_index++; | |
119 | depot_offset = 0; | |
120 | /* | |
121 | * smp_store_release() here pairs with smp_load_acquire() from | |
ee050dc8 | 122 | * |next_slab_inited| in stack_depot_save() and |
cd11016e AP |
123 | * init_stack_slab(). |
124 | */ | |
125 | if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) | |
126 | smp_store_release(&next_slab_inited, 0); | |
127 | } | |
128 | init_stack_slab(prealloc); | |
129 | if (stack_slabs[depot_index] == NULL) | |
130 | return NULL; | |
131 | ||
132 | stack = stack_slabs[depot_index] + depot_offset; | |
133 | ||
134 | stack->hash = hash; | |
135 | stack->size = size; | |
136 | stack->handle.slabindex = depot_index; | |
137 | stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; | |
7c31190b | 138 | stack->handle.valid = 1; |
47e684aa | 139 | memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); |
cd11016e AP |
140 | depot_offset += required_size; |
141 | ||
142 | return stack; | |
143 | } | |
144 | ||
d2620936 | 145 | #define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER) |
cd11016e AP |
146 | #define STACK_HASH_MASK (STACK_HASH_SIZE - 1) |
147 | #define STACK_HASH_SEED 0x9747b28c | |
148 | ||
e1fdc403 VJ |
149 | static bool stack_depot_disable; |
150 | static struct stack_record **stack_table; | |
151 | ||
152 | static int __init is_stack_depot_disabled(char *str) | |
153 | { | |
64427985 VJ |
154 | int ret; |
155 | ||
156 | ret = kstrtobool(str, &stack_depot_disable); | |
157 | if (!ret && stack_depot_disable) { | |
e1fdc403 VJ |
158 | pr_info("Stack Depot is disabled\n"); |
159 | stack_table = NULL; | |
160 | } | |
161 | return 0; | |
162 | } | |
163 | early_param("stack_depot_disable", is_stack_depot_disabled); | |
164 | ||
165 | int __init stack_depot_init(void) | |
166 | { | |
167 | if (!stack_depot_disable) { | |
168 | size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); | |
169 | int i; | |
170 | ||
171 | stack_table = memblock_alloc(size, size); | |
172 | for (i = 0; i < STACK_HASH_SIZE; i++) | |
173 | stack_table[i] = NULL; | |
174 | } | |
175 | return 0; | |
176 | } | |
cd11016e AP |
177 | |
178 | /* Calculate hash for a stack */ | |
179 | static inline u32 hash_stack(unsigned long *entries, unsigned int size) | |
180 | { | |
181 | return jhash2((u32 *)entries, | |
180644f8 GS |
182 | array_size(size, sizeof(*entries)) / sizeof(u32), |
183 | STACK_HASH_SEED); | |
cd11016e AP |
184 | } |
185 | ||
a571b272 AP |
186 | /* Use our own, non-instrumented version of memcmp(). |
187 | * | |
188 | * We actually don't care about the order, just the equality. | |
189 | */ | |
190 | static inline | |
191 | int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, | |
192 | unsigned int n) | |
193 | { | |
194 | for ( ; n-- ; u1++, u2++) { | |
195 | if (*u1 != *u2) | |
196 | return 1; | |
197 | } | |
198 | return 0; | |
199 | } | |
200 | ||
cd11016e AP |
201 | /* Find a stack that is equal to the one stored in entries in the hash */ |
202 | static inline struct stack_record *find_stack(struct stack_record *bucket, | |
203 | unsigned long *entries, int size, | |
204 | u32 hash) | |
205 | { | |
206 | struct stack_record *found; | |
207 | ||
208 | for (found = bucket; found; found = found->next) { | |
209 | if (found->hash == hash && | |
210 | found->size == size && | |
a571b272 | 211 | !stackdepot_memcmp(entries, found->entries, size)) |
cd11016e | 212 | return found; |
cd11016e AP |
213 | } |
214 | return NULL; | |
215 | } | |
216 | ||
c0cfc337 TG |
217 | /** |
218 | * stack_depot_fetch - Fetch stack entries from a depot | |
219 | * | |
220 | * @handle: Stack depot handle which was returned from | |
221 | * stack_depot_save(). | |
222 | * @entries: Pointer to store the entries address | |
223 | * | |
224 | * Return: The number of trace entries for this depot. | |
225 | */ | |
226 | unsigned int stack_depot_fetch(depot_stack_handle_t handle, | |
227 | unsigned long **entries) | |
cd11016e AP |
228 | { |
229 | union handle_parts parts = { .handle = handle }; | |
69866e15 | 230 | void *slab; |
cd11016e | 231 | size_t offset = parts.offset << STACK_ALLOC_ALIGN; |
69866e15 AP |
232 | struct stack_record *stack; |
233 | ||
234 | *entries = NULL; | |
235 | if (parts.slabindex > depot_index) { | |
236 | WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n", | |
237 | parts.slabindex, depot_index, handle); | |
238 | return 0; | |
239 | } | |
240 | slab = stack_slabs[parts.slabindex]; | |
241 | if (!slab) | |
242 | return 0; | |
243 | stack = slab + offset; | |
cd11016e | 244 | |
c0cfc337 TG |
245 | *entries = stack->entries; |
246 | return stack->size; | |
247 | } | |
248 | EXPORT_SYMBOL_GPL(stack_depot_fetch); | |
249 | ||
cd11016e | 250 | /** |
c0cfc337 TG |
251 | * stack_depot_save - Save a stack trace from an array |
252 | * | |
253 | * @entries: Pointer to storage array | |
254 | * @nr_entries: Size of the storage array | |
255 | * @alloc_flags: Allocation gfp flags | |
cd11016e | 256 | * |
c0cfc337 | 257 | * Return: The handle of the stack struct stored in depot |
cd11016e | 258 | */ |
c0cfc337 TG |
259 | depot_stack_handle_t stack_depot_save(unsigned long *entries, |
260 | unsigned int nr_entries, | |
261 | gfp_t alloc_flags) | |
cd11016e | 262 | { |
cd11016e | 263 | struct stack_record *found = NULL, **bucket; |
c0cfc337 | 264 | depot_stack_handle_t retval = 0; |
cd11016e AP |
265 | struct page *page = NULL; |
266 | void *prealloc = NULL; | |
c0cfc337 TG |
267 | unsigned long flags; |
268 | u32 hash; | |
cd11016e | 269 | |
e1fdc403 | 270 | if (unlikely(nr_entries == 0) || stack_depot_disable) |
cd11016e AP |
271 | goto fast_exit; |
272 | ||
c0cfc337 | 273 | hash = hash_stack(entries, nr_entries); |
cd11016e AP |
274 | bucket = &stack_table[hash & STACK_HASH_MASK]; |
275 | ||
276 | /* | |
277 | * Fast path: look the stack trace up without locking. | |
278 | * The smp_load_acquire() here pairs with smp_store_release() to | |
279 | * |bucket| below. | |
280 | */ | |
c0cfc337 TG |
281 | found = find_stack(smp_load_acquire(bucket), entries, |
282 | nr_entries, hash); | |
cd11016e AP |
283 | if (found) |
284 | goto exit; | |
285 | ||
286 | /* | |
287 | * Check if the current or the next stack slab need to be initialized. | |
288 | * If so, allocate the memory - we won't be able to do that under the | |
289 | * lock. | |
290 | * | |
291 | * The smp_load_acquire() here pairs with smp_store_release() to | |
292 | * |next_slab_inited| in depot_alloc_stack() and init_stack_slab(). | |
293 | */ | |
294 | if (unlikely(!smp_load_acquire(&next_slab_inited))) { | |
295 | /* | |
296 | * Zero out zone modifiers, as we don't have specific zone | |
297 | * requirements. Keep the flags related to allocation in atomic | |
298 | * contexts and I/O. | |
299 | */ | |
300 | alloc_flags &= ~GFP_ZONEMASK; | |
301 | alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); | |
87cc271d | 302 | alloc_flags |= __GFP_NOWARN; |
cd11016e AP |
303 | page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); |
304 | if (page) | |
305 | prealloc = page_address(page); | |
306 | } | |
307 | ||
308 | spin_lock_irqsave(&depot_lock, flags); | |
309 | ||
c0cfc337 | 310 | found = find_stack(*bucket, entries, nr_entries, hash); |
cd11016e AP |
311 | if (!found) { |
312 | struct stack_record *new = | |
c0cfc337 | 313 | depot_alloc_stack(entries, nr_entries, |
cd11016e AP |
314 | hash, &prealloc, alloc_flags); |
315 | if (new) { | |
316 | new->next = *bucket; | |
317 | /* | |
318 | * This smp_store_release() pairs with | |
319 | * smp_load_acquire() from |bucket| above. | |
320 | */ | |
321 | smp_store_release(bucket, new); | |
322 | found = new; | |
323 | } | |
324 | } else if (prealloc) { | |
325 | /* | |
326 | * We didn't need to store this stack trace, but let's keep | |
327 | * the preallocated memory for the future. | |
328 | */ | |
329 | WARN_ON(!init_stack_slab(&prealloc)); | |
330 | } | |
331 | ||
332 | spin_unlock_irqrestore(&depot_lock, flags); | |
333 | exit: | |
334 | if (prealloc) { | |
335 | /* Nobody used this memory, ok to free it. */ | |
336 | free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER); | |
337 | } | |
338 | if (found) | |
339 | retval = found->handle.handle; | |
340 | fast_exit: | |
341 | return retval; | |
342 | } | |
c0cfc337 | 343 | EXPORT_SYMBOL_GPL(stack_depot_save); |
505a0ef1 AP |
344 | |
345 | static inline int in_irqentry_text(unsigned long ptr) | |
346 | { | |
347 | return (ptr >= (unsigned long)&__irqentry_text_start && | |
348 | ptr < (unsigned long)&__irqentry_text_end) || | |
349 | (ptr >= (unsigned long)&__softirqentry_text_start && | |
350 | ptr < (unsigned long)&__softirqentry_text_end); | |
351 | } | |
352 | ||
353 | unsigned int filter_irq_stacks(unsigned long *entries, | |
354 | unsigned int nr_entries) | |
355 | { | |
356 | unsigned int i; | |
357 | ||
358 | for (i = 0; i < nr_entries; i++) { | |
359 | if (in_irqentry_text(entries[i])) { | |
360 | /* Include the irqentry function into the stack. */ | |
361 | return i + 1; | |
362 | } | |
363 | } | |
364 | return nr_entries; | |
365 | } | |
366 | EXPORT_SYMBOL_GPL(filter_irq_stacks); |