]>
Commit | Line | Data |
---|---|---|
0ce20dd8 AP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * KFENCE guarded object allocator and fault handling. | |
4 | * | |
5 | * Copyright (C) 2020, Google LLC. | |
6 | */ | |
7 | ||
8 | #define pr_fmt(fmt) "kfence: " fmt | |
9 | ||
10 | #include <linux/atomic.h> | |
11 | #include <linux/bug.h> | |
12 | #include <linux/debugfs.h> | |
407f1d8c | 13 | #include <linux/irq_work.h> |
0ce20dd8 AP |
14 | #include <linux/kcsan-checks.h> |
15 | #include <linux/kfence.h> | |
95511580 | 16 | #include <linux/kmemleak.h> |
0ce20dd8 AP |
17 | #include <linux/list.h> |
18 | #include <linux/lockdep.h> | |
19 | #include <linux/memblock.h> | |
20 | #include <linux/moduleparam.h> | |
21 | #include <linux/random.h> | |
22 | #include <linux/rcupdate.h> | |
4bbf04aa | 23 | #include <linux/sched/clock.h> |
37c9284f | 24 | #include <linux/sched/sysctl.h> |
0ce20dd8 AP |
25 | #include <linux/seq_file.h> |
26 | #include <linux/slab.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/string.h> | |
29 | ||
30 | #include <asm/kfence.h> | |
31 | ||
32 | #include "kfence.h" | |
33 | ||
34 | /* Disables KFENCE on the first warning assuming an irrecoverable error. */ | |
35 | #define KFENCE_WARN_ON(cond) \ | |
36 | ({ \ | |
37 | const bool __cond = WARN_ON(cond); \ | |
38 | if (unlikely(__cond)) \ | |
39 | WRITE_ONCE(kfence_enabled, false); \ | |
40 | __cond; \ | |
41 | }) | |
42 | ||
43 | /* === Data ================================================================= */ | |
44 | ||
45 | static bool kfence_enabled __read_mostly; | |
46 | ||
47 | static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; | |
48 | ||
49 | #ifdef MODULE_PARAM_PREFIX | |
50 | #undef MODULE_PARAM_PREFIX | |
51 | #endif | |
52 | #define MODULE_PARAM_PREFIX "kfence." | |
53 | ||
54 | static int param_set_sample_interval(const char *val, const struct kernel_param *kp) | |
55 | { | |
56 | unsigned long num; | |
57 | int ret = kstrtoul(val, 0, &num); | |
58 | ||
59 | if (ret < 0) | |
60 | return ret; | |
61 | ||
62 | if (!num) /* Using 0 to indicate KFENCE is disabled. */ | |
63 | WRITE_ONCE(kfence_enabled, false); | |
64 | else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) | |
65 | return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */ | |
66 | ||
67 | *((unsigned long *)kp->arg) = num; | |
68 | return 0; | |
69 | } | |
70 | ||
71 | static int param_get_sample_interval(char *buffer, const struct kernel_param *kp) | |
72 | { | |
73 | if (!READ_ONCE(kfence_enabled)) | |
74 | return sprintf(buffer, "0\n"); | |
75 | ||
76 | return param_get_ulong(buffer, kp); | |
77 | } | |
78 | ||
79 | static const struct kernel_param_ops sample_interval_param_ops = { | |
80 | .set = param_set_sample_interval, | |
81 | .get = param_get_sample_interval, | |
82 | }; | |
83 | module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); | |
84 | ||
85 | /* The pool of pages used for guard pages and objects. */ | |
86 | char *__kfence_pool __ro_after_init; | |
87 | EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ | |
88 | ||
89 | /* | |
90 | * Per-object metadata, with one-to-one mapping of object metadata to | |
91 | * backing pages (in __kfence_pool). | |
92 | */ | |
93 | static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); | |
94 | struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; | |
95 | ||
96 | /* Freelist with available objects. */ | |
97 | static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); | |
98 | static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ | |
99 | ||
db3e7b53 ME |
100 | /* |
101 | * The static key to set up a KFENCE allocation; or if static keys are not used | |
102 | * to gate allocations, to avoid a load and compare if KFENCE is disabled. | |
103 | */ | |
0ce20dd8 | 104 | DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); |
0ce20dd8 AP |
105 | |
106 | /* Gates the allocation, ensuring only one succeeds in a given period. */ | |
107 | atomic_t kfence_allocation_gate = ATOMIC_INIT(1); | |
108 | ||
109 | /* Statistics counters for debugfs. */ | |
110 | enum kfence_counter_id { | |
111 | KFENCE_COUNTER_ALLOCATED, | |
112 | KFENCE_COUNTER_ALLOCS, | |
113 | KFENCE_COUNTER_FREES, | |
114 | KFENCE_COUNTER_ZOMBIES, | |
115 | KFENCE_COUNTER_BUGS, | |
6f77f09f ME |
116 | KFENCE_COUNTER_SKIP_INCOMPAT, |
117 | KFENCE_COUNTER_SKIP_CAPACITY, | |
0ce20dd8 AP |
118 | KFENCE_COUNTER_COUNT, |
119 | }; | |
120 | static atomic_long_t counters[KFENCE_COUNTER_COUNT]; | |
121 | static const char *const counter_names[] = { | |
122 | [KFENCE_COUNTER_ALLOCATED] = "currently allocated", | |
123 | [KFENCE_COUNTER_ALLOCS] = "total allocations", | |
124 | [KFENCE_COUNTER_FREES] = "total frees", | |
125 | [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", | |
126 | [KFENCE_COUNTER_BUGS] = "total bugs", | |
6f77f09f ME |
127 | [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)", |
128 | [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)", | |
0ce20dd8 AP |
129 | }; |
130 | static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); | |
131 | ||
132 | /* === Internals ============================================================ */ | |
133 | ||
134 | static bool kfence_protect(unsigned long addr) | |
135 | { | |
136 | return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true)); | |
137 | } | |
138 | ||
139 | static bool kfence_unprotect(unsigned long addr) | |
140 | { | |
141 | return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); | |
142 | } | |
143 | ||
144 | static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) | |
145 | { | |
146 | long index; | |
147 | ||
148 | /* The checks do not affect performance; only called from slow-paths. */ | |
149 | ||
150 | if (!is_kfence_address((void *)addr)) | |
151 | return NULL; | |
152 | ||
153 | /* | |
154 | * May be an invalid index if called with an address at the edge of | |
155 | * __kfence_pool, in which case we would report an "invalid access" | |
156 | * error. | |
157 | */ | |
158 | index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; | |
159 | if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) | |
160 | return NULL; | |
161 | ||
162 | return &kfence_metadata[index]; | |
163 | } | |
164 | ||
165 | static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) | |
166 | { | |
167 | unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; | |
168 | unsigned long pageaddr = (unsigned long)&__kfence_pool[offset]; | |
169 | ||
170 | /* The checks do not affect performance; only called from slow-paths. */ | |
171 | ||
172 | /* Only call with a pointer into kfence_metadata. */ | |
173 | if (KFENCE_WARN_ON(meta < kfence_metadata || | |
174 | meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) | |
175 | return 0; | |
176 | ||
177 | /* | |
178 | * This metadata object only ever maps to 1 page; verify that the stored | |
179 | * address is in the expected range. | |
180 | */ | |
181 | if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) | |
182 | return 0; | |
183 | ||
184 | return pageaddr; | |
185 | } | |
186 | ||
187 | /* | |
188 | * Update the object's metadata state, including updating the alloc/free stacks | |
189 | * depending on the state transition. | |
190 | */ | |
4d62f922 ME |
191 | static noinline void |
192 | metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next, | |
193 | unsigned long *stack_entries, size_t num_stack_entries) | |
0ce20dd8 AP |
194 | { |
195 | struct kfence_track *track = | |
196 | next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; | |
197 | ||
198 | lockdep_assert_held(&meta->lock); | |
199 | ||
4d62f922 ME |
200 | if (stack_entries) { |
201 | memcpy(track->stack_entries, stack_entries, | |
202 | num_stack_entries * sizeof(stack_entries[0])); | |
203 | } else { | |
204 | /* | |
205 | * Skip over 1 (this) functions; noinline ensures we do not | |
206 | * accidentally skip over the caller by never inlining. | |
207 | */ | |
208 | num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1); | |
209 | } | |
210 | track->num_stack_entries = num_stack_entries; | |
0ce20dd8 | 211 | track->pid = task_pid_nr(current); |
4bbf04aa ME |
212 | track->cpu = raw_smp_processor_id(); |
213 | track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ | |
0ce20dd8 AP |
214 | |
215 | /* | |
216 | * Pairs with READ_ONCE() in | |
217 | * kfence_shutdown_cache(), | |
218 | * kfence_handle_page_fault(). | |
219 | */ | |
220 | WRITE_ONCE(meta->state, next); | |
221 | } | |
222 | ||
223 | /* Write canary byte to @addr. */ | |
224 | static inline bool set_canary_byte(u8 *addr) | |
225 | { | |
226 | *addr = KFENCE_CANARY_PATTERN(addr); | |
227 | return true; | |
228 | } | |
229 | ||
230 | /* Check canary byte at @addr. */ | |
231 | static inline bool check_canary_byte(u8 *addr) | |
232 | { | |
233 | if (likely(*addr == KFENCE_CANARY_PATTERN(addr))) | |
234 | return true; | |
235 | ||
236 | atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); | |
bc8fbc5f | 237 | kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr), |
0ce20dd8 AP |
238 | KFENCE_ERROR_CORRUPTION); |
239 | return false; | |
240 | } | |
241 | ||
242 | /* __always_inline this to ensure we won't do an indirect call to fn. */ | |
243 | static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *)) | |
244 | { | |
245 | const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); | |
246 | unsigned long addr; | |
247 | ||
248 | lockdep_assert_held(&meta->lock); | |
249 | ||
250 | /* | |
251 | * We'll iterate over each canary byte per-side until fn() returns | |
252 | * false. However, we'll still iterate over the canary bytes to the | |
253 | * right of the object even if there was an error in the canary bytes to | |
254 | * the left of the object. Specifically, if check_canary_byte() | |
255 | * generates an error, showing both sides might give more clues as to | |
256 | * what the error is about when displaying which bytes were corrupted. | |
257 | */ | |
258 | ||
259 | /* Apply to left of object. */ | |
260 | for (addr = pageaddr; addr < meta->addr; addr++) { | |
261 | if (!fn((u8 *)addr)) | |
262 | break; | |
263 | } | |
264 | ||
265 | /* Apply to right of object. */ | |
266 | for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) { | |
267 | if (!fn((u8 *)addr)) | |
268 | break; | |
269 | } | |
270 | } | |
271 | ||
4d62f922 ME |
272 | static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, |
273 | unsigned long *stack_entries, size_t num_stack_entries) | |
0ce20dd8 AP |
274 | { |
275 | struct kfence_metadata *meta = NULL; | |
276 | unsigned long flags; | |
277 | struct page *page; | |
278 | void *addr; | |
279 | ||
280 | /* Try to obtain a free object. */ | |
281 | raw_spin_lock_irqsave(&kfence_freelist_lock, flags); | |
282 | if (!list_empty(&kfence_freelist)) { | |
283 | meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); | |
284 | list_del_init(&meta->list); | |
285 | } | |
286 | raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); | |
6f77f09f ME |
287 | if (!meta) { |
288 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); | |
0ce20dd8 | 289 | return NULL; |
6f77f09f | 290 | } |
0ce20dd8 AP |
291 | |
292 | if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { | |
293 | /* | |
294 | * This is extremely unlikely -- we are reporting on a | |
295 | * use-after-free, which locked meta->lock, and the reporting | |
296 | * code via printk calls kmalloc() which ends up in | |
297 | * kfence_alloc() and tries to grab the same object that we're | |
298 | * reporting on. While it has never been observed, lockdep does | |
299 | * report that there is a possibility of deadlock. Fix it by | |
300 | * using trylock and bailing out gracefully. | |
301 | */ | |
302 | raw_spin_lock_irqsave(&kfence_freelist_lock, flags); | |
303 | /* Put the object back on the freelist. */ | |
304 | list_add_tail(&meta->list, &kfence_freelist); | |
305 | raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); | |
306 | ||
307 | return NULL; | |
308 | } | |
309 | ||
310 | meta->addr = metadata_to_pageaddr(meta); | |
311 | /* Unprotect if we're reusing this page. */ | |
312 | if (meta->state == KFENCE_OBJECT_FREED) | |
313 | kfence_unprotect(meta->addr); | |
314 | ||
315 | /* | |
316 | * Note: for allocations made before RNG initialization, will always | |
317 | * return zero. We still benefit from enabling KFENCE as early as | |
318 | * possible, even when the RNG is not yet available, as this will allow | |
319 | * KFENCE to detect bugs due to earlier allocations. The only downside | |
320 | * is that the out-of-bounds accesses detected are deterministic for | |
321 | * such allocations. | |
322 | */ | |
323 | if (prandom_u32_max(2)) { | |
324 | /* Allocate on the "right" side, re-calculate address. */ | |
325 | meta->addr += PAGE_SIZE - size; | |
326 | meta->addr = ALIGN_DOWN(meta->addr, cache->align); | |
327 | } | |
328 | ||
329 | addr = (void *)meta->addr; | |
330 | ||
331 | /* Update remaining metadata. */ | |
4d62f922 | 332 | metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); |
0ce20dd8 AP |
333 | /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ |
334 | WRITE_ONCE(meta->cache, cache); | |
335 | meta->size = size; | |
336 | for_each_canary(meta, set_canary_byte); | |
337 | ||
338 | /* Set required struct page fields. */ | |
339 | page = virt_to_page(meta->addr); | |
340 | page->slab_cache = cache; | |
b89fb5ef AP |
341 | if (IS_ENABLED(CONFIG_SLUB)) |
342 | page->objects = 1; | |
d3fb45f3 AP |
343 | if (IS_ENABLED(CONFIG_SLAB)) |
344 | page->s_mem = addr; | |
0ce20dd8 AP |
345 | |
346 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
347 | ||
348 | /* Memory initialization. */ | |
349 | ||
350 | /* | |
351 | * We check slab_want_init_on_alloc() ourselves, rather than letting | |
352 | * SL*B do the initialization, as otherwise we might overwrite KFENCE's | |
353 | * redzone. | |
354 | */ | |
355 | if (unlikely(slab_want_init_on_alloc(gfp, cache))) | |
356 | memzero_explicit(addr, size); | |
357 | if (cache->ctor) | |
358 | cache->ctor(addr); | |
359 | ||
360 | if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS)) | |
361 | kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ | |
362 | ||
363 | atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); | |
364 | atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); | |
365 | ||
366 | return addr; | |
367 | } | |
368 | ||
369 | static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) | |
370 | { | |
371 | struct kcsan_scoped_access assert_page_exclusive; | |
372 | unsigned long flags; | |
373 | ||
374 | raw_spin_lock_irqsave(&meta->lock, flags); | |
375 | ||
376 | if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { | |
377 | /* Invalid or double-free, bail out. */ | |
378 | atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); | |
bc8fbc5f ME |
379 | kfence_report_error((unsigned long)addr, false, NULL, meta, |
380 | KFENCE_ERROR_INVALID_FREE); | |
0ce20dd8 AP |
381 | raw_spin_unlock_irqrestore(&meta->lock, flags); |
382 | return; | |
383 | } | |
384 | ||
385 | /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ | |
386 | kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, | |
387 | KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, | |
388 | &assert_page_exclusive); | |
389 | ||
390 | if (CONFIG_KFENCE_STRESS_TEST_FAULTS) | |
391 | kfence_unprotect((unsigned long)addr); /* To check canary bytes. */ | |
392 | ||
393 | /* Restore page protection if there was an OOB access. */ | |
394 | if (meta->unprotected_page) { | |
94868a1e | 395 | memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); |
0ce20dd8 AP |
396 | kfence_protect(meta->unprotected_page); |
397 | meta->unprotected_page = 0; | |
398 | } | |
399 | ||
400 | /* Check canary bytes for memory corruption. */ | |
401 | for_each_canary(meta, check_canary_byte); | |
402 | ||
403 | /* | |
404 | * Clear memory if init-on-free is set. While we protect the page, the | |
405 | * data is still there, and after a use-after-free is detected, we | |
406 | * unprotect the page, so the data is still accessible. | |
407 | */ | |
408 | if (!zombie && unlikely(slab_want_init_on_free(meta->cache))) | |
409 | memzero_explicit(addr, meta->size); | |
410 | ||
411 | /* Mark the object as freed. */ | |
4d62f922 | 412 | metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); |
0ce20dd8 AP |
413 | |
414 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
415 | ||
416 | /* Protect to detect use-after-frees. */ | |
417 | kfence_protect((unsigned long)addr); | |
418 | ||
419 | kcsan_end_scoped_access(&assert_page_exclusive); | |
420 | if (!zombie) { | |
421 | /* Add it to the tail of the freelist for reuse. */ | |
422 | raw_spin_lock_irqsave(&kfence_freelist_lock, flags); | |
423 | KFENCE_WARN_ON(!list_empty(&meta->list)); | |
424 | list_add_tail(&meta->list, &kfence_freelist); | |
425 | raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); | |
426 | ||
427 | atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); | |
428 | atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); | |
429 | } else { | |
430 | /* See kfence_shutdown_cache(). */ | |
431 | atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); | |
432 | } | |
433 | } | |
434 | ||
435 | static void rcu_guarded_free(struct rcu_head *h) | |
436 | { | |
437 | struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); | |
438 | ||
439 | kfence_guarded_free((void *)meta->addr, meta, false); | |
440 | } | |
441 | ||
442 | static bool __init kfence_init_pool(void) | |
443 | { | |
444 | unsigned long addr = (unsigned long)__kfence_pool; | |
445 | struct page *pages; | |
446 | int i; | |
447 | ||
448 | if (!__kfence_pool) | |
449 | return false; | |
450 | ||
451 | if (!arch_kfence_init_pool()) | |
452 | goto err; | |
453 | ||
454 | pages = virt_to_page(addr); | |
455 | ||
456 | /* | |
457 | * Set up object pages: they must have PG_slab set, to avoid freeing | |
458 | * these as real pages. | |
459 | * | |
460 | * We also want to avoid inserting kfence_free() in the kfree() | |
461 | * fast-path in SLUB, and therefore need to ensure kfree() correctly | |
462 | * enters __slab_free() slow-path. | |
463 | */ | |
464 | for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { | |
465 | if (!i || (i % 2)) | |
466 | continue; | |
467 | ||
468 | /* Verify we do not have a compound head page. */ | |
469 | if (WARN_ON(compound_head(&pages[i]) != &pages[i])) | |
470 | goto err; | |
471 | ||
472 | __SetPageSlab(&pages[i]); | |
473 | } | |
474 | ||
475 | /* | |
476 | * Protect the first 2 pages. The first page is mostly unnecessary, and | |
477 | * merely serves as an extended guard page. However, adding one | |
478 | * additional page in the beginning gives us an even number of pages, | |
479 | * which simplifies the mapping of address to metadata index. | |
480 | */ | |
481 | for (i = 0; i < 2; i++) { | |
482 | if (unlikely(!kfence_protect(addr))) | |
483 | goto err; | |
484 | ||
485 | addr += PAGE_SIZE; | |
486 | } | |
487 | ||
488 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
489 | struct kfence_metadata *meta = &kfence_metadata[i]; | |
490 | ||
491 | /* Initialize metadata. */ | |
492 | INIT_LIST_HEAD(&meta->list); | |
493 | raw_spin_lock_init(&meta->lock); | |
494 | meta->state = KFENCE_OBJECT_UNUSED; | |
495 | meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ | |
496 | list_add_tail(&meta->list, &kfence_freelist); | |
497 | ||
498 | /* Protect the right redzone. */ | |
499 | if (unlikely(!kfence_protect(addr + PAGE_SIZE))) | |
500 | goto err; | |
501 | ||
502 | addr += 2 * PAGE_SIZE; | |
503 | } | |
504 | ||
95511580 ME |
505 | /* |
506 | * The pool is live and will never be deallocated from this point on. | |
507 | * Remove the pool object from the kmemleak object tree, as it would | |
508 | * otherwise overlap with allocations returned by kfence_alloc(), which | |
509 | * are registered with kmemleak through the slab post-alloc hook. | |
510 | */ | |
511 | kmemleak_free(__kfence_pool); | |
512 | ||
0ce20dd8 AP |
513 | return true; |
514 | ||
515 | err: | |
516 | /* | |
517 | * Only release unprotected pages, and do not try to go back and change | |
518 | * page attributes due to risk of failing to do so as well. If changing | |
519 | * page attributes for some pages fails, it is very likely that it also | |
520 | * fails for the first page, and therefore expect addr==__kfence_pool in | |
521 | * most failure cases. | |
522 | */ | |
523 | memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); | |
524 | __kfence_pool = NULL; | |
525 | return false; | |
526 | } | |
527 | ||
528 | /* === DebugFS Interface ==================================================== */ | |
529 | ||
530 | static int stats_show(struct seq_file *seq, void *v) | |
531 | { | |
532 | int i; | |
533 | ||
534 | seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); | |
535 | for (i = 0; i < KFENCE_COUNTER_COUNT; i++) | |
536 | seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i])); | |
537 | ||
538 | return 0; | |
539 | } | |
540 | DEFINE_SHOW_ATTRIBUTE(stats); | |
541 | ||
542 | /* | |
543 | * debugfs seq_file operations for /sys/kernel/debug/kfence/objects. | |
544 | * start_object() and next_object() return the object index + 1, because NULL is used | |
545 | * to stop iteration. | |
546 | */ | |
547 | static void *start_object(struct seq_file *seq, loff_t *pos) | |
548 | { | |
549 | if (*pos < CONFIG_KFENCE_NUM_OBJECTS) | |
550 | return (void *)((long)*pos + 1); | |
551 | return NULL; | |
552 | } | |
553 | ||
554 | static void stop_object(struct seq_file *seq, void *v) | |
555 | { | |
556 | } | |
557 | ||
558 | static void *next_object(struct seq_file *seq, void *v, loff_t *pos) | |
559 | { | |
560 | ++*pos; | |
561 | if (*pos < CONFIG_KFENCE_NUM_OBJECTS) | |
562 | return (void *)((long)*pos + 1); | |
563 | return NULL; | |
564 | } | |
565 | ||
566 | static int show_object(struct seq_file *seq, void *v) | |
567 | { | |
568 | struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; | |
569 | unsigned long flags; | |
570 | ||
571 | raw_spin_lock_irqsave(&meta->lock, flags); | |
572 | kfence_print_object(seq, meta); | |
573 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
574 | seq_puts(seq, "---------------------------------\n"); | |
575 | ||
576 | return 0; | |
577 | } | |
578 | ||
579 | static const struct seq_operations object_seqops = { | |
580 | .start = start_object, | |
581 | .next = next_object, | |
582 | .stop = stop_object, | |
583 | .show = show_object, | |
584 | }; | |
585 | ||
586 | static int open_objects(struct inode *inode, struct file *file) | |
587 | { | |
588 | return seq_open(file, &object_seqops); | |
589 | } | |
590 | ||
591 | static const struct file_operations objects_fops = { | |
592 | .open = open_objects, | |
593 | .read = seq_read, | |
594 | .llseek = seq_lseek, | |
cf0d62bb | 595 | .release = seq_release, |
0ce20dd8 AP |
596 | }; |
597 | ||
598 | static int __init kfence_debugfs_init(void) | |
599 | { | |
600 | struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL); | |
601 | ||
602 | debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); | |
603 | debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); | |
604 | return 0; | |
605 | } | |
606 | ||
607 | late_initcall(kfence_debugfs_init); | |
608 | ||
609 | /* === Allocation Gate Timer ================================================ */ | |
610 | ||
407f1d8c ME |
611 | #ifdef CONFIG_KFENCE_STATIC_KEYS |
612 | /* Wait queue to wake up allocation-gate timer task. */ | |
613 | static DECLARE_WAIT_QUEUE_HEAD(allocation_wait); | |
614 | ||
615 | static void wake_up_kfence_timer(struct irq_work *work) | |
616 | { | |
617 | wake_up(&allocation_wait); | |
618 | } | |
619 | static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer); | |
620 | #endif | |
621 | ||
0ce20dd8 AP |
622 | /* |
623 | * Set up delayed work, which will enable and disable the static key. We need to | |
624 | * use a work queue (rather than a simple timer), since enabling and disabling a | |
625 | * static key cannot be done from an interrupt. | |
626 | * | |
627 | * Note: Toggling a static branch currently causes IPIs, and here we'll end up | |
628 | * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with | |
629 | * more aggressive sampling intervals), we could get away with a variant that | |
630 | * avoids IPIs, at the cost of not immediately capturing allocations if the | |
631 | * instructions remain cached. | |
632 | */ | |
633 | static struct delayed_work kfence_timer; | |
634 | static void toggle_allocation_gate(struct work_struct *work) | |
635 | { | |
636 | if (!READ_ONCE(kfence_enabled)) | |
637 | return; | |
638 | ||
0ce20dd8 AP |
639 | atomic_set(&kfence_allocation_gate, 0); |
640 | #ifdef CONFIG_KFENCE_STATIC_KEYS | |
407f1d8c | 641 | /* Enable static key, and await allocation to happen. */ |
0ce20dd8 | 642 | static_branch_enable(&kfence_allocation_key); |
407f1d8c | 643 | |
37c9284f ME |
644 | if (sysctl_hung_task_timeout_secs) { |
645 | /* | |
646 | * During low activity with no allocations we might wait a | |
647 | * while; let's avoid the hung task warning. | |
648 | */ | |
8fd0e995 ME |
649 | wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), |
650 | sysctl_hung_task_timeout_secs * HZ / 2); | |
37c9284f | 651 | } else { |
8fd0e995 | 652 | wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate)); |
37c9284f | 653 | } |
407f1d8c | 654 | |
0ce20dd8 AP |
655 | /* Disable static key and reset timer. */ |
656 | static_branch_disable(&kfence_allocation_key); | |
657 | #endif | |
ff06e45d | 658 | queue_delayed_work(system_unbound_wq, &kfence_timer, |
36f0b35d | 659 | msecs_to_jiffies(kfence_sample_interval)); |
0ce20dd8 AP |
660 | } |
661 | static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate); | |
662 | ||
663 | /* === Public interface ===================================================== */ | |
664 | ||
665 | void __init kfence_alloc_pool(void) | |
666 | { | |
667 | if (!kfence_sample_interval) | |
668 | return; | |
669 | ||
670 | __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); | |
671 | ||
672 | if (!__kfence_pool) | |
673 | pr_err("failed to allocate pool\n"); | |
674 | } | |
675 | ||
676 | void __init kfence_init(void) | |
677 | { | |
678 | /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ | |
679 | if (!kfence_sample_interval) | |
680 | return; | |
681 | ||
682 | if (!kfence_init_pool()) { | |
683 | pr_err("%s failed\n", __func__); | |
684 | return; | |
685 | } | |
686 | ||
db3e7b53 ME |
687 | if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) |
688 | static_branch_enable(&kfence_allocation_key); | |
0ce20dd8 | 689 | WRITE_ONCE(kfence_enabled, true); |
ff06e45d | 690 | queue_delayed_work(system_unbound_wq, &kfence_timer, 0); |
35beccf0 ME |
691 | pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, |
692 | CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, | |
693 | (void *)(__kfence_pool + KFENCE_POOL_SIZE)); | |
0ce20dd8 AP |
694 | } |
695 | ||
696 | void kfence_shutdown_cache(struct kmem_cache *s) | |
697 | { | |
698 | unsigned long flags; | |
699 | struct kfence_metadata *meta; | |
700 | int i; | |
701 | ||
702 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
703 | bool in_use; | |
704 | ||
705 | meta = &kfence_metadata[i]; | |
706 | ||
707 | /* | |
708 | * If we observe some inconsistent cache and state pair where we | |
709 | * should have returned false here, cache destruction is racing | |
710 | * with either kmem_cache_alloc() or kmem_cache_free(). Taking | |
711 | * the lock will not help, as different critical section | |
712 | * serialization will have the same outcome. | |
713 | */ | |
714 | if (READ_ONCE(meta->cache) != s || | |
715 | READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED) | |
716 | continue; | |
717 | ||
718 | raw_spin_lock_irqsave(&meta->lock, flags); | |
719 | in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; | |
720 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
721 | ||
722 | if (in_use) { | |
723 | /* | |
724 | * This cache still has allocations, and we should not | |
725 | * release them back into the freelist so they can still | |
726 | * safely be used and retain the kernel's default | |
727 | * behaviour of keeping the allocations alive (leak the | |
728 | * cache); however, they effectively become "zombie | |
729 | * allocations" as the KFENCE objects are the only ones | |
730 | * still in use and the owning cache is being destroyed. | |
731 | * | |
732 | * We mark them freed, so that any subsequent use shows | |
733 | * more useful error messages that will include stack | |
734 | * traces of the user of the object, the original | |
735 | * allocation, and caller to shutdown_cache(). | |
736 | */ | |
737 | kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true); | |
738 | } | |
739 | } | |
740 | ||
741 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
742 | meta = &kfence_metadata[i]; | |
743 | ||
744 | /* See above. */ | |
745 | if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) | |
746 | continue; | |
747 | ||
748 | raw_spin_lock_irqsave(&meta->lock, flags); | |
749 | if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) | |
750 | meta->cache = NULL; | |
751 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
752 | } | |
753 | } | |
754 | ||
755 | void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) | |
756 | { | |
4d62f922 ME |
757 | unsigned long stack_entries[KFENCE_STACK_DEPTH]; |
758 | size_t num_stack_entries; | |
759 | ||
235a85cb AP |
760 | /* |
761 | * Perform size check before switching kfence_allocation_gate, so that | |
762 | * we don't disable KFENCE without making an allocation. | |
763 | */ | |
6f77f09f ME |
764 | if (size > PAGE_SIZE) { |
765 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); | |
235a85cb | 766 | return NULL; |
6f77f09f | 767 | } |
235a85cb | 768 | |
236e9f15 AP |
769 | /* |
770 | * Skip allocations from non-default zones, including DMA. We cannot | |
771 | * guarantee that pages in the KFENCE pool will have the requested | |
772 | * properties (e.g. reside in DMAable memory). | |
773 | */ | |
774 | if ((flags & GFP_ZONEMASK) || | |
6f77f09f ME |
775 | (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { |
776 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); | |
236e9f15 | 777 | return NULL; |
6f77f09f | 778 | } |
236e9f15 | 779 | |
db3e7b53 | 780 | if (atomic_inc_return(&kfence_allocation_gate) > 1) |
0ce20dd8 | 781 | return NULL; |
407f1d8c ME |
782 | #ifdef CONFIG_KFENCE_STATIC_KEYS |
783 | /* | |
784 | * waitqueue_active() is fully ordered after the update of | |
785 | * kfence_allocation_gate per atomic_inc_return(). | |
786 | */ | |
787 | if (waitqueue_active(&allocation_wait)) { | |
788 | /* | |
789 | * Calling wake_up() here may deadlock when allocations happen | |
790 | * from within timer code. Use an irq_work to defer it. | |
791 | */ | |
792 | irq_work_queue(&wake_up_kfence_timer_work); | |
793 | } | |
794 | #endif | |
0ce20dd8 AP |
795 | |
796 | if (!READ_ONCE(kfence_enabled)) | |
797 | return NULL; | |
798 | ||
4d62f922 ME |
799 | num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); |
800 | ||
801 | return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries); | |
0ce20dd8 AP |
802 | } |
803 | ||
804 | size_t kfence_ksize(const void *addr) | |
805 | { | |
806 | const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); | |
807 | ||
808 | /* | |
809 | * Read locklessly -- if there is a race with __kfence_alloc(), this is | |
810 | * either a use-after-free or invalid access. | |
811 | */ | |
812 | return meta ? meta->size : 0; | |
813 | } | |
814 | ||
815 | void *kfence_object_start(const void *addr) | |
816 | { | |
817 | const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); | |
818 | ||
819 | /* | |
820 | * Read locklessly -- if there is a race with __kfence_alloc(), this is | |
821 | * either a use-after-free or invalid access. | |
822 | */ | |
823 | return meta ? (void *)meta->addr : NULL; | |
824 | } | |
825 | ||
826 | void __kfence_free(void *addr) | |
827 | { | |
828 | struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); | |
829 | ||
830 | /* | |
831 | * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing | |
832 | * the object, as the object page may be recycled for other-typed | |
833 | * objects once it has been freed. meta->cache may be NULL if the cache | |
834 | * was destroyed. | |
835 | */ | |
836 | if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) | |
837 | call_rcu(&meta->rcu_head, rcu_guarded_free); | |
838 | else | |
839 | kfence_guarded_free(addr, meta, false); | |
840 | } | |
841 | ||
bc8fbc5f | 842 | bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) |
0ce20dd8 AP |
843 | { |
844 | const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; | |
845 | struct kfence_metadata *to_report = NULL; | |
846 | enum kfence_error_type error_type; | |
847 | unsigned long flags; | |
848 | ||
849 | if (!is_kfence_address((void *)addr)) | |
850 | return false; | |
851 | ||
852 | if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ | |
853 | return kfence_unprotect(addr); /* ... unprotect and proceed. */ | |
854 | ||
855 | atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); | |
856 | ||
857 | if (page_index % 2) { | |
858 | /* This is a redzone, report a buffer overflow. */ | |
859 | struct kfence_metadata *meta; | |
860 | int distance = 0; | |
861 | ||
862 | meta = addr_to_metadata(addr - PAGE_SIZE); | |
863 | if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { | |
864 | to_report = meta; | |
865 | /* Data race ok; distance calculation approximate. */ | |
866 | distance = addr - data_race(meta->addr + meta->size); | |
867 | } | |
868 | ||
869 | meta = addr_to_metadata(addr + PAGE_SIZE); | |
870 | if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { | |
871 | /* Data race ok; distance calculation approximate. */ | |
872 | if (!to_report || distance > data_race(meta->addr) - addr) | |
873 | to_report = meta; | |
874 | } | |
875 | ||
876 | if (!to_report) | |
877 | goto out; | |
878 | ||
879 | raw_spin_lock_irqsave(&to_report->lock, flags); | |
880 | to_report->unprotected_page = addr; | |
881 | error_type = KFENCE_ERROR_OOB; | |
882 | ||
883 | /* | |
884 | * If the object was freed before we took the look we can still | |
885 | * report this as an OOB -- the report will simply show the | |
886 | * stacktrace of the free as well. | |
887 | */ | |
888 | } else { | |
889 | to_report = addr_to_metadata(addr); | |
890 | if (!to_report) | |
891 | goto out; | |
892 | ||
893 | raw_spin_lock_irqsave(&to_report->lock, flags); | |
894 | error_type = KFENCE_ERROR_UAF; | |
895 | /* | |
896 | * We may race with __kfence_alloc(), and it is possible that a | |
897 | * freed object may be reallocated. We simply report this as a | |
898 | * use-after-free, with the stack trace showing the place where | |
899 | * the object was re-allocated. | |
900 | */ | |
901 | } | |
902 | ||
903 | out: | |
904 | if (to_report) { | |
bc8fbc5f | 905 | kfence_report_error(addr, is_write, regs, to_report, error_type); |
0ce20dd8 AP |
906 | raw_spin_unlock_irqrestore(&to_report->lock, flags); |
907 | } else { | |
908 | /* This may be a UAF or OOB access, but we can't be sure. */ | |
bc8fbc5f | 909 | kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID); |
0ce20dd8 AP |
910 | } |
911 | ||
912 | return kfence_unprotect(addr); /* Unprotect and let access proceed. */ | |
913 | } |