]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/kasan/common.c
11bc85c09e3af6fbf8e0f4ffd85fcd2d1cd06489
[mirror_ubuntu-jammy-kernel.git] / mm / kasan / common.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file contains common KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 */
11
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/slab.h>
25 #include <linux/stacktrace.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/bug.h>
29
30 #include "kasan.h"
31 #include "../slab.h"
32
33 depot_stack_handle_t kasan_save_stack(gfp_t flags)
34 {
35 unsigned long entries[KASAN_STACK_DEPTH];
36 unsigned int nr_entries;
37
38 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39 nr_entries = filter_irq_stacks(entries, nr_entries);
40 return stack_depot_save(entries, nr_entries, flags);
41 }
42
43 void kasan_set_track(struct kasan_track *track, gfp_t flags)
44 {
45 track->pid = current->pid;
46 track->stack = kasan_save_stack(flags);
47 }
48
49 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
50 void kasan_enable_current(void)
51 {
52 current->kasan_depth++;
53 }
54
55 void kasan_disable_current(void)
56 {
57 current->kasan_depth--;
58 }
59 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
60
61 void kasan_unpoison_range(const void *address, size_t size)
62 {
63 unpoison_range(address, size);
64 }
65
66 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
67 {
68 void *base = task_stack_page(task);
69 size_t size = sp - base;
70
71 unpoison_range(base, size);
72 }
73
74 /* Unpoison the entire stack for a task. */
75 void kasan_unpoison_task_stack(struct task_struct *task)
76 {
77 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
78 }
79
80 /* Unpoison the stack for the current task beyond a watermark sp value. */
81 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
82 {
83 /*
84 * Calculate the task stack base address. Avoid using 'current'
85 * because this function is called by early resume code which hasn't
86 * yet set up the percpu register (%gs).
87 */
88 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
89
90 unpoison_range(base, watermark - base);
91 }
92
93 void kasan_alloc_pages(struct page *page, unsigned int order)
94 {
95 u8 tag;
96 unsigned long i;
97
98 if (unlikely(PageHighMem(page)))
99 return;
100
101 tag = random_tag();
102 for (i = 0; i < (1 << order); i++)
103 page_kasan_tag_set(page + i, tag);
104 unpoison_range(page_address(page), PAGE_SIZE << order);
105 }
106
107 void kasan_free_pages(struct page *page, unsigned int order)
108 {
109 if (likely(!PageHighMem(page)))
110 poison_range(page_address(page),
111 PAGE_SIZE << order,
112 KASAN_FREE_PAGE);
113 }
114
115 /*
116 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
117 * For larger allocations larger redzones are used.
118 */
119 static inline unsigned int optimal_redzone(unsigned int object_size)
120 {
121 if (!IS_ENABLED(CONFIG_KASAN_GENERIC))
122 return 0;
123
124 return
125 object_size <= 64 - 16 ? 16 :
126 object_size <= 128 - 32 ? 32 :
127 object_size <= 512 - 64 ? 64 :
128 object_size <= 4096 - 128 ? 128 :
129 object_size <= (1 << 14) - 256 ? 256 :
130 object_size <= (1 << 15) - 512 ? 512 :
131 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
132 }
133
134 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
135 slab_flags_t *flags)
136 {
137 unsigned int orig_size = *size;
138 unsigned int redzone_size;
139 int redzone_adjust;
140
141 /* Add alloc meta. */
142 cache->kasan_info.alloc_meta_offset = *size;
143 *size += sizeof(struct kasan_alloc_meta);
144
145 /* Add free meta. */
146 if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
147 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
148 cache->object_size < sizeof(struct kasan_free_meta))) {
149 cache->kasan_info.free_meta_offset = *size;
150 *size += sizeof(struct kasan_free_meta);
151 }
152
153 redzone_size = optimal_redzone(cache->object_size);
154 redzone_adjust = redzone_size - (*size - cache->object_size);
155 if (redzone_adjust > 0)
156 *size += redzone_adjust;
157
158 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
159 max(*size, cache->object_size + redzone_size));
160
161 /*
162 * If the metadata doesn't fit, don't enable KASAN at all.
163 */
164 if (*size <= cache->kasan_info.alloc_meta_offset ||
165 *size <= cache->kasan_info.free_meta_offset) {
166 cache->kasan_info.alloc_meta_offset = 0;
167 cache->kasan_info.free_meta_offset = 0;
168 *size = orig_size;
169 return;
170 }
171
172 *flags |= SLAB_KASAN;
173 }
174
175 size_t kasan_metadata_size(struct kmem_cache *cache)
176 {
177 return (cache->kasan_info.alloc_meta_offset ?
178 sizeof(struct kasan_alloc_meta) : 0) +
179 (cache->kasan_info.free_meta_offset ?
180 sizeof(struct kasan_free_meta) : 0);
181 }
182
183 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
184 const void *object)
185 {
186 return (void *)reset_tag(object) + cache->kasan_info.alloc_meta_offset;
187 }
188
189 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
190 const void *object)
191 {
192 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
193 return (void *)reset_tag(object) + cache->kasan_info.free_meta_offset;
194 }
195
196 void kasan_poison_slab(struct page *page)
197 {
198 unsigned long i;
199
200 for (i = 0; i < compound_nr(page); i++)
201 page_kasan_tag_reset(page + i);
202 poison_range(page_address(page), page_size(page),
203 KASAN_KMALLOC_REDZONE);
204 }
205
206 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
207 {
208 unpoison_range(object, cache->object_size);
209 }
210
211 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
212 {
213 poison_range(object,
214 round_up(cache->object_size, KASAN_GRANULE_SIZE),
215 KASAN_KMALLOC_REDZONE);
216 }
217
218 /*
219 * This function assigns a tag to an object considering the following:
220 * 1. A cache might have a constructor, which might save a pointer to a slab
221 * object somewhere (e.g. in the object itself). We preassign a tag for
222 * each object in caches with constructors during slab creation and reuse
223 * the same tag each time a particular object is allocated.
224 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
225 * accessed after being freed. We preassign tags for objects in these
226 * caches as well.
227 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
228 * is stored as an array of indexes instead of a linked list. Assign tags
229 * based on objects indexes, so that objects that are next to each other
230 * get different tags.
231 */
232 static u8 assign_tag(struct kmem_cache *cache, const void *object,
233 bool init, bool keep_tag)
234 {
235 /*
236 * 1. When an object is kmalloc()'ed, two hooks are called:
237 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
238 * tag only in the first one.
239 * 2. We reuse the same tag for krealloc'ed objects.
240 */
241 if (keep_tag)
242 return get_tag(object);
243
244 /*
245 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
246 * set, assign a tag when the object is being allocated (init == false).
247 */
248 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
249 return init ? KASAN_TAG_KERNEL : random_tag();
250
251 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
252 #ifdef CONFIG_SLAB
253 /* For SLAB assign tags based on the object index in the freelist. */
254 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
255 #else
256 /*
257 * For SLUB assign a random tag during slab creation, otherwise reuse
258 * the already assigned tag.
259 */
260 return init ? random_tag() : get_tag(object);
261 #endif
262 }
263
264 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
265 const void *object)
266 {
267 struct kasan_alloc_meta *alloc_meta;
268
269 if (!(cache->flags & SLAB_KASAN))
270 return (void *)object;
271
272 alloc_meta = kasan_get_alloc_meta(cache, object);
273 __memset(alloc_meta, 0, sizeof(*alloc_meta));
274
275 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
276 object = set_tag(object, assign_tag(cache, object, true, false));
277
278 return (void *)object;
279 }
280
281 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
282 unsigned long ip, bool quarantine)
283 {
284 u8 tag;
285 void *tagged_object;
286 unsigned long rounded_up_size;
287
288 tag = get_tag(object);
289 tagged_object = object;
290 object = reset_tag(object);
291
292 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
293 object)) {
294 kasan_report_invalid_free(tagged_object, ip);
295 return true;
296 }
297
298 /* RCU slabs could be legally used after free within the RCU period */
299 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
300 return false;
301
302 if (check_invalid_free(tagged_object)) {
303 kasan_report_invalid_free(tagged_object, ip);
304 return true;
305 }
306
307 rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
308 poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
309
310 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
311 unlikely(!(cache->flags & SLAB_KASAN)))
312 return false;
313
314 kasan_set_free_info(cache, object, tag);
315
316 quarantine_put(cache, object);
317
318 return IS_ENABLED(CONFIG_KASAN_GENERIC);
319 }
320
321 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
322 {
323 return __kasan_slab_free(cache, object, ip, true);
324 }
325
326 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
327 size_t size, gfp_t flags, bool keep_tag)
328 {
329 unsigned long redzone_start;
330 unsigned long redzone_end;
331 u8 tag = 0xff;
332
333 if (gfpflags_allow_blocking(flags))
334 quarantine_reduce();
335
336 if (unlikely(object == NULL))
337 return NULL;
338
339 redzone_start = round_up((unsigned long)(object + size),
340 KASAN_GRANULE_SIZE);
341 redzone_end = round_up((unsigned long)object + cache->object_size,
342 KASAN_GRANULE_SIZE);
343
344 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) || IS_ENABLED(CONFIG_KASAN_HW_TAGS))
345 tag = assign_tag(cache, object, false, keep_tag);
346
347 /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
348 unpoison_range(set_tag(object, tag), size);
349 poison_range((void *)redzone_start, redzone_end - redzone_start,
350 KASAN_KMALLOC_REDZONE);
351
352 if (cache->flags & SLAB_KASAN)
353 kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
354
355 return set_tag(object, tag);
356 }
357
358 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
359 gfp_t flags)
360 {
361 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
362 }
363
364 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
365 size_t size, gfp_t flags)
366 {
367 return __kasan_kmalloc(cache, object, size, flags, true);
368 }
369 EXPORT_SYMBOL(kasan_kmalloc);
370
371 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
372 gfp_t flags)
373 {
374 struct page *page;
375 unsigned long redzone_start;
376 unsigned long redzone_end;
377
378 if (gfpflags_allow_blocking(flags))
379 quarantine_reduce();
380
381 if (unlikely(ptr == NULL))
382 return NULL;
383
384 page = virt_to_page(ptr);
385 redzone_start = round_up((unsigned long)(ptr + size),
386 KASAN_GRANULE_SIZE);
387 redzone_end = (unsigned long)ptr + page_size(page);
388
389 unpoison_range(ptr, size);
390 poison_range((void *)redzone_start, redzone_end - redzone_start,
391 KASAN_PAGE_REDZONE);
392
393 return (void *)ptr;
394 }
395
396 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
397 {
398 struct page *page;
399
400 if (unlikely(object == ZERO_SIZE_PTR))
401 return (void *)object;
402
403 page = virt_to_head_page(object);
404
405 if (unlikely(!PageSlab(page)))
406 return kasan_kmalloc_large(object, size, flags);
407 else
408 return __kasan_kmalloc(page->slab_cache, object, size,
409 flags, true);
410 }
411
412 void kasan_poison_kfree(void *ptr, unsigned long ip)
413 {
414 struct page *page;
415
416 page = virt_to_head_page(ptr);
417
418 if (unlikely(!PageSlab(page))) {
419 if (ptr != page_address(page)) {
420 kasan_report_invalid_free(ptr, ip);
421 return;
422 }
423 poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
424 } else {
425 __kasan_slab_free(page->slab_cache, ptr, ip, false);
426 }
427 }
428
429 void kasan_kfree_large(void *ptr, unsigned long ip)
430 {
431 if (ptr != page_address(virt_to_head_page(ptr)))
432 kasan_report_invalid_free(ptr, ip);
433 /* The object will be poisoned by page_alloc. */
434 }