2 * Generic stack depot for storing stack traces.
4 * Some debugging tools need to save stack traces of certain events which can
5 * be later presented to the user. For example, KASAN needs to safe alloc and
6 * free stacks for each object, but storing two stack traces per object
7 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
10 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
11 * and free stacks repeat a lot, we save about 100x space.
12 * Stacks are never removed from depot, so we store them contiguously one after
13 * another in a contiguos memory allocation.
15 * Author: Alexander Potapenko <glider@google.com>
16 * Copyright (C) 2016 Google, Inc.
18 * Based on code by Dmitry Chernenkov.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
24 * This program is distributed in the hope that it will be useful, but
25 * WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 * General Public License for more details.
31 #include <linux/gfp.h>
32 #include <linux/jhash.h>
33 #include <linux/kernel.h>
35 #include <linux/percpu.h>
36 #include <linux/printk.h>
37 #include <linux/slab.h>
38 #include <linux/stacktrace.h>
39 #include <linux/stackdepot.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
43 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
45 #define STACK_ALLOC_NULL_PROTECTION_BITS 1
46 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
47 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
48 #define STACK_ALLOC_ALIGN 4
49 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
51 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
52 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
53 #define STACK_ALLOC_SLABS_CAP 1024
54 #define STACK_ALLOC_MAX_SLABS \
55 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
56 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
58 /* The compact structure to store the reference to stacks. */
60 depot_stack_handle_t handle
;
62 u32 slabindex
: STACK_ALLOC_INDEX_BITS
;
63 u32 offset
: STACK_ALLOC_OFFSET_BITS
;
64 u32 valid
: STACK_ALLOC_NULL_PROTECTION_BITS
;
69 struct stack_record
*next
; /* Link in the hashtable */
70 u32 hash
; /* Hash in the hastable */
71 u32 size
; /* Number of frames in the stack */
72 union handle_parts handle
;
73 unsigned long entries
[1]; /* Variable-sized array of entries. */
76 static void *stack_slabs
[STACK_ALLOC_MAX_SLABS
];
78 static int depot_index
;
79 static int next_slab_inited
;
80 static size_t depot_offset
;
81 static DEFINE_SPINLOCK(depot_lock
);
83 static bool init_stack_slab(void **prealloc
)
88 * This smp_load_acquire() pairs with smp_store_release() to
89 * |next_slab_inited| below and in depot_alloc_stack().
91 if (smp_load_acquire(&next_slab_inited
))
93 if (stack_slabs
[depot_index
] == NULL
) {
94 stack_slabs
[depot_index
] = *prealloc
;
96 stack_slabs
[depot_index
+ 1] = *prealloc
;
98 * This smp_store_release pairs with smp_load_acquire() from
99 * |next_slab_inited| above and in depot_save_stack().
101 smp_store_release(&next_slab_inited
, 1);
107 /* Allocation of a new stack in raw storage */
108 static struct stack_record
*depot_alloc_stack(unsigned long *entries
, int size
,
109 u32 hash
, void **prealloc
, gfp_t alloc_flags
)
111 int required_size
= offsetof(struct stack_record
, entries
) +
112 sizeof(unsigned long) * size
;
113 struct stack_record
*stack
;
115 required_size
= ALIGN(required_size
, 1 << STACK_ALLOC_ALIGN
);
117 if (unlikely(depot_offset
+ required_size
> STACK_ALLOC_SIZE
)) {
118 if (unlikely(depot_index
+ 1 >= STACK_ALLOC_MAX_SLABS
)) {
119 WARN_ONCE(1, "Stack depot reached limit capacity");
125 * smp_store_release() here pairs with smp_load_acquire() from
126 * |next_slab_inited| in depot_save_stack() and
129 if (depot_index
+ 1 < STACK_ALLOC_MAX_SLABS
)
130 smp_store_release(&next_slab_inited
, 0);
132 init_stack_slab(prealloc
);
133 if (stack_slabs
[depot_index
] == NULL
)
136 stack
= stack_slabs
[depot_index
] + depot_offset
;
140 stack
->handle
.slabindex
= depot_index
;
141 stack
->handle
.offset
= depot_offset
>> STACK_ALLOC_ALIGN
;
142 stack
->handle
.valid
= 1;
143 memcpy(stack
->entries
, entries
, size
* sizeof(unsigned long));
144 depot_offset
+= required_size
;
149 #define STACK_HASH_ORDER 20
150 #define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
151 #define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
152 #define STACK_HASH_SEED 0x9747b28c
154 static struct stack_record
*stack_table
[STACK_HASH_SIZE
] = {
155 [0 ... STACK_HASH_SIZE
- 1] = NULL
158 /* Calculate hash for a stack */
159 static inline u32
hash_stack(unsigned long *entries
, unsigned int size
)
161 return jhash2((u32
*)entries
,
162 size
* sizeof(unsigned long) / sizeof(u32
),
166 /* Find a stack that is equal to the one stored in entries in the hash */
167 static inline struct stack_record
*find_stack(struct stack_record
*bucket
,
168 unsigned long *entries
, int size
,
171 struct stack_record
*found
;
173 for (found
= bucket
; found
; found
= found
->next
) {
174 if (found
->hash
== hash
&&
175 found
->size
== size
&&
176 !memcmp(entries
, found
->entries
,
177 size
* sizeof(unsigned long))) {
184 void depot_fetch_stack(depot_stack_handle_t handle
, struct stack_trace
*trace
)
186 union handle_parts parts
= { .handle
= handle
};
187 void *slab
= stack_slabs
[parts
.slabindex
];
188 size_t offset
= parts
.offset
<< STACK_ALLOC_ALIGN
;
189 struct stack_record
*stack
= slab
+ offset
;
191 trace
->nr_entries
= trace
->max_entries
= stack
->size
;
192 trace
->entries
= stack
->entries
;
197 * depot_save_stack - save stack in a stack depot.
198 * @trace - the stacktrace to save.
199 * @alloc_flags - flags for allocating additional memory if required.
201 * Returns the handle of the stack struct stored in depot.
203 depot_stack_handle_t
depot_save_stack(struct stack_trace
*trace
,
207 depot_stack_handle_t retval
= 0;
208 struct stack_record
*found
= NULL
, **bucket
;
210 struct page
*page
= NULL
;
211 void *prealloc
= NULL
;
213 if (unlikely(trace
->nr_entries
== 0))
216 hash
= hash_stack(trace
->entries
, trace
->nr_entries
);
217 bucket
= &stack_table
[hash
& STACK_HASH_MASK
];
220 * Fast path: look the stack trace up without locking.
221 * The smp_load_acquire() here pairs with smp_store_release() to
224 found
= find_stack(smp_load_acquire(bucket
), trace
->entries
,
225 trace
->nr_entries
, hash
);
230 * Check if the current or the next stack slab need to be initialized.
231 * If so, allocate the memory - we won't be able to do that under the
234 * The smp_load_acquire() here pairs with smp_store_release() to
235 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
237 if (unlikely(!smp_load_acquire(&next_slab_inited
))) {
239 * Zero out zone modifiers, as we don't have specific zone
240 * requirements. Keep the flags related to allocation in atomic
243 alloc_flags
&= ~GFP_ZONEMASK
;
244 alloc_flags
&= (GFP_ATOMIC
| GFP_KERNEL
);
245 alloc_flags
|= __GFP_NOWARN
;
246 page
= alloc_pages(alloc_flags
, STACK_ALLOC_ORDER
);
248 prealloc
= page_address(page
);
251 spin_lock_irqsave(&depot_lock
, flags
);
253 found
= find_stack(*bucket
, trace
->entries
, trace
->nr_entries
, hash
);
255 struct stack_record
*new =
256 depot_alloc_stack(trace
->entries
, trace
->nr_entries
,
257 hash
, &prealloc
, alloc_flags
);
261 * This smp_store_release() pairs with
262 * smp_load_acquire() from |bucket| above.
264 smp_store_release(bucket
, new);
267 } else if (prealloc
) {
269 * We didn't need to store this stack trace, but let's keep
270 * the preallocated memory for the future.
272 WARN_ON(!init_stack_slab(&prealloc
));
275 spin_unlock_irqrestore(&depot_lock
, flags
);
278 /* Nobody used this memory, ok to free it. */
279 free_pages((unsigned long)prealloc
, STACK_ALLOC_ORDER
);
282 retval
= found
->handle
.handle
;