1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic stack depot for storing stack traces.
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
14 * another in a contiguous memory allocation.
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
19 * Based on code by Dmitry Chernenkov.
22 #include <linux/gfp.h>
23 #include <linux/interrupt.h>
24 #include <linux/jhash.h>
25 #include <linux/kernel.h>
27 #include <linux/percpu.h>
28 #include <linux/printk.h>
29 #include <linux/slab.h>
30 #include <linux/stacktrace.h>
31 #include <linux/stackdepot.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/memblock.h>
36 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
38 #define STACK_ALLOC_NULL_PROTECTION_BITS 1
39 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
40 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
41 #define STACK_ALLOC_ALIGN 4
42 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
44 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
45 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
46 #define STACK_ALLOC_SLABS_CAP 8192
47 #define STACK_ALLOC_MAX_SLABS \
48 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
49 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
51 /* The compact structure to store the reference to stacks. */
53 depot_stack_handle_t handle
;
55 u32 slabindex
: STACK_ALLOC_INDEX_BITS
;
56 u32 offset
: STACK_ALLOC_OFFSET_BITS
;
57 u32 valid
: STACK_ALLOC_NULL_PROTECTION_BITS
;
62 struct stack_record
*next
; /* Link in the hashtable */
63 u32 hash
; /* Hash in the hastable */
64 u32 size
; /* Number of frames in the stack */
65 union handle_parts handle
;
66 unsigned long entries
[]; /* Variable-sized array of entries. */
69 static void *stack_slabs
[STACK_ALLOC_MAX_SLABS
];
71 static int depot_index
;
72 static int next_slab_inited
;
73 static size_t depot_offset
;
74 static DEFINE_RAW_SPINLOCK(depot_lock
);
76 static bool init_stack_slab(void **prealloc
)
81 * This smp_load_acquire() pairs with smp_store_release() to
82 * |next_slab_inited| below and in depot_alloc_stack().
84 if (smp_load_acquire(&next_slab_inited
))
86 if (stack_slabs
[depot_index
] == NULL
) {
87 stack_slabs
[depot_index
] = *prealloc
;
90 /* If this is the last depot slab, do not touch the next one. */
91 if (depot_index
+ 1 < STACK_ALLOC_MAX_SLABS
) {
92 stack_slabs
[depot_index
+ 1] = *prealloc
;
96 * This smp_store_release pairs with smp_load_acquire() from
97 * |next_slab_inited| above and in stack_depot_save().
99 smp_store_release(&next_slab_inited
, 1);
104 /* Allocation of a new stack in raw storage */
105 static struct stack_record
*depot_alloc_stack(unsigned long *entries
, int size
,
106 u32 hash
, void **prealloc
, gfp_t alloc_flags
)
108 struct stack_record
*stack
;
109 size_t required_size
= struct_size(stack
, entries
, size
);
111 required_size
= ALIGN(required_size
, 1 << STACK_ALLOC_ALIGN
);
113 if (unlikely(depot_offset
+ required_size
> STACK_ALLOC_SIZE
)) {
114 if (unlikely(depot_index
+ 1 >= STACK_ALLOC_MAX_SLABS
)) {
115 WARN_ONCE(1, "Stack depot reached limit capacity");
121 * smp_store_release() here pairs with smp_load_acquire() from
122 * |next_slab_inited| in stack_depot_save() and
125 if (depot_index
+ 1 < STACK_ALLOC_MAX_SLABS
)
126 smp_store_release(&next_slab_inited
, 0);
128 init_stack_slab(prealloc
);
129 if (stack_slabs
[depot_index
] == NULL
)
132 stack
= stack_slabs
[depot_index
] + depot_offset
;
136 stack
->handle
.slabindex
= depot_index
;
137 stack
->handle
.offset
= depot_offset
>> STACK_ALLOC_ALIGN
;
138 stack
->handle
.valid
= 1;
139 memcpy(stack
->entries
, entries
, flex_array_size(stack
, entries
, size
));
140 depot_offset
+= required_size
;
145 #define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
146 #define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
147 #define STACK_HASH_SEED 0x9747b28c
149 static bool stack_depot_disable
;
150 static struct stack_record
**stack_table
;
152 static int __init
is_stack_depot_disabled(char *str
)
156 ret
= kstrtobool(str
, &stack_depot_disable
);
157 if (!ret
&& stack_depot_disable
) {
158 pr_info("Stack Depot is disabled\n");
163 early_param("stack_depot_disable", is_stack_depot_disabled
);
165 int __init
stack_depot_init(void)
167 if (!stack_depot_disable
) {
168 size_t size
= (STACK_HASH_SIZE
* sizeof(struct stack_record
*));
171 stack_table
= memblock_alloc(size
, size
);
172 for (i
= 0; i
< STACK_HASH_SIZE
; i
++)
173 stack_table
[i
] = NULL
;
178 /* Calculate hash for a stack */
179 static inline u32
hash_stack(unsigned long *entries
, unsigned int size
)
181 return jhash2((u32
*)entries
,
182 array_size(size
, sizeof(*entries
)) / sizeof(u32
),
186 /* Use our own, non-instrumented version of memcmp().
188 * We actually don't care about the order, just the equality.
191 int stackdepot_memcmp(const unsigned long *u1
, const unsigned long *u2
,
194 for ( ; n
-- ; u1
++, u2
++) {
201 /* Find a stack that is equal to the one stored in entries in the hash */
202 static inline struct stack_record
*find_stack(struct stack_record
*bucket
,
203 unsigned long *entries
, int size
,
206 struct stack_record
*found
;
208 for (found
= bucket
; found
; found
= found
->next
) {
209 if (found
->hash
== hash
&&
210 found
->size
== size
&&
211 !stackdepot_memcmp(entries
, found
->entries
, size
))
218 * stack_depot_fetch - Fetch stack entries from a depot
220 * @handle: Stack depot handle which was returned from
221 * stack_depot_save().
222 * @entries: Pointer to store the entries address
224 * Return: The number of trace entries for this depot.
226 unsigned int stack_depot_fetch(depot_stack_handle_t handle
,
227 unsigned long **entries
)
229 union handle_parts parts
= { .handle
= handle
};
231 size_t offset
= parts
.offset
<< STACK_ALLOC_ALIGN
;
232 struct stack_record
*stack
;
235 if (parts
.slabindex
> depot_index
) {
236 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
237 parts
.slabindex
, depot_index
, handle
);
240 slab
= stack_slabs
[parts
.slabindex
];
243 stack
= slab
+ offset
;
245 *entries
= stack
->entries
;
248 EXPORT_SYMBOL_GPL(stack_depot_fetch
);
251 * stack_depot_save - Save a stack trace from an array
253 * @entries: Pointer to storage array
254 * @nr_entries: Size of the storage array
255 * @alloc_flags: Allocation gfp flags
257 * Return: The handle of the stack struct stored in depot
259 depot_stack_handle_t
stack_depot_save(unsigned long *entries
,
260 unsigned int nr_entries
,
263 struct stack_record
*found
= NULL
, **bucket
;
264 depot_stack_handle_t retval
= 0;
265 struct page
*page
= NULL
;
266 void *prealloc
= NULL
;
270 if (unlikely(nr_entries
== 0) || stack_depot_disable
)
273 hash
= hash_stack(entries
, nr_entries
);
274 bucket
= &stack_table
[hash
& STACK_HASH_MASK
];
277 * Fast path: look the stack trace up without locking.
278 * The smp_load_acquire() here pairs with smp_store_release() to
281 found
= find_stack(smp_load_acquire(bucket
), entries
,
287 * Check if the current or the next stack slab need to be initialized.
288 * If so, allocate the memory - we won't be able to do that under the
291 * The smp_load_acquire() here pairs with smp_store_release() to
292 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
294 if (unlikely(!smp_load_acquire(&next_slab_inited
))) {
296 * Zero out zone modifiers, as we don't have specific zone
297 * requirements. Keep the flags related to allocation in atomic
300 alloc_flags
&= ~GFP_ZONEMASK
;
301 alloc_flags
&= (GFP_ATOMIC
| GFP_KERNEL
);
302 alloc_flags
|= __GFP_NOWARN
;
303 page
= alloc_pages(alloc_flags
, STACK_ALLOC_ORDER
);
305 prealloc
= page_address(page
);
308 raw_spin_lock_irqsave(&depot_lock
, flags
);
310 found
= find_stack(*bucket
, entries
, nr_entries
, hash
);
312 struct stack_record
*new =
313 depot_alloc_stack(entries
, nr_entries
,
314 hash
, &prealloc
, alloc_flags
);
318 * This smp_store_release() pairs with
319 * smp_load_acquire() from |bucket| above.
321 smp_store_release(bucket
, new);
324 } else if (prealloc
) {
326 * We didn't need to store this stack trace, but let's keep
327 * the preallocated memory for the future.
329 WARN_ON(!init_stack_slab(&prealloc
));
332 raw_spin_unlock_irqrestore(&depot_lock
, flags
);
335 /* Nobody used this memory, ok to free it. */
336 free_pages((unsigned long)prealloc
, STACK_ALLOC_ORDER
);
339 retval
= found
->handle
.handle
;
343 EXPORT_SYMBOL_GPL(stack_depot_save
);
345 static inline int in_irqentry_text(unsigned long ptr
)
347 return (ptr
>= (unsigned long)&__irqentry_text_start
&&
348 ptr
< (unsigned long)&__irqentry_text_end
) ||
349 (ptr
>= (unsigned long)&__softirqentry_text_start
&&
350 ptr
< (unsigned long)&__softirqentry_text_end
);
353 unsigned int filter_irq_stacks(unsigned long *entries
,
354 unsigned int nr_entries
)
358 for (i
= 0; i
< nr_entries
; i
++) {
359 if (in_irqentry_text(entries
[i
])) {
360 /* Include the irqentry function into the stack. */
366 EXPORT_SYMBOL_GPL(filter_irq_stacks
);