]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/kasan/common.c
11bc85c09e3af6fbf8e0f4ffd85fcd2d1cd06489
1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/slab.h>
25 #include <linux/stacktrace.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/bug.h>
33 depot_stack_handle_t
kasan_save_stack(gfp_t flags
)
35 unsigned long entries
[KASAN_STACK_DEPTH
];
36 unsigned int nr_entries
;
38 nr_entries
= stack_trace_save(entries
, ARRAY_SIZE(entries
), 0);
39 nr_entries
= filter_irq_stacks(entries
, nr_entries
);
40 return stack_depot_save(entries
, nr_entries
, flags
);
43 void kasan_set_track(struct kasan_track
*track
, gfp_t flags
)
45 track
->pid
= current
->pid
;
46 track
->stack
= kasan_save_stack(flags
);
49 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
50 void kasan_enable_current(void)
52 current
->kasan_depth
++;
55 void kasan_disable_current(void)
57 current
->kasan_depth
--;
59 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
61 void kasan_unpoison_range(const void *address
, size_t size
)
63 unpoison_range(address
, size
);
66 static void __kasan_unpoison_stack(struct task_struct
*task
, const void *sp
)
68 void *base
= task_stack_page(task
);
69 size_t size
= sp
- base
;
71 unpoison_range(base
, size
);
74 /* Unpoison the entire stack for a task. */
75 void kasan_unpoison_task_stack(struct task_struct
*task
)
77 __kasan_unpoison_stack(task
, task_stack_page(task
) + THREAD_SIZE
);
80 /* Unpoison the stack for the current task beyond a watermark sp value. */
81 asmlinkage
void kasan_unpoison_task_stack_below(const void *watermark
)
84 * Calculate the task stack base address. Avoid using 'current'
85 * because this function is called by early resume code which hasn't
86 * yet set up the percpu register (%gs).
88 void *base
= (void *)((unsigned long)watermark
& ~(THREAD_SIZE
- 1));
90 unpoison_range(base
, watermark
- base
);
93 void kasan_alloc_pages(struct page
*page
, unsigned int order
)
98 if (unlikely(PageHighMem(page
)))
102 for (i
= 0; i
< (1 << order
); i
++)
103 page_kasan_tag_set(page
+ i
, tag
);
104 unpoison_range(page_address(page
), PAGE_SIZE
<< order
);
107 void kasan_free_pages(struct page
*page
, unsigned int order
)
109 if (likely(!PageHighMem(page
)))
110 poison_range(page_address(page
),
116 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
117 * For larger allocations larger redzones are used.
119 static inline unsigned int optimal_redzone(unsigned int object_size
)
121 if (!IS_ENABLED(CONFIG_KASAN_GENERIC
))
125 object_size
<= 64 - 16 ? 16 :
126 object_size
<= 128 - 32 ? 32 :
127 object_size
<= 512 - 64 ? 64 :
128 object_size
<= 4096 - 128 ? 128 :
129 object_size
<= (1 << 14) - 256 ? 256 :
130 object_size
<= (1 << 15) - 512 ? 512 :
131 object_size
<= (1 << 16) - 1024 ? 1024 : 2048;
134 void kasan_cache_create(struct kmem_cache
*cache
, unsigned int *size
,
137 unsigned int orig_size
= *size
;
138 unsigned int redzone_size
;
141 /* Add alloc meta. */
142 cache
->kasan_info
.alloc_meta_offset
= *size
;
143 *size
+= sizeof(struct kasan_alloc_meta
);
146 if (IS_ENABLED(CONFIG_KASAN_GENERIC
) &&
147 (cache
->flags
& SLAB_TYPESAFE_BY_RCU
|| cache
->ctor
||
148 cache
->object_size
< sizeof(struct kasan_free_meta
))) {
149 cache
->kasan_info
.free_meta_offset
= *size
;
150 *size
+= sizeof(struct kasan_free_meta
);
153 redzone_size
= optimal_redzone(cache
->object_size
);
154 redzone_adjust
= redzone_size
- (*size
- cache
->object_size
);
155 if (redzone_adjust
> 0)
156 *size
+= redzone_adjust
;
158 *size
= min_t(unsigned int, KMALLOC_MAX_SIZE
,
159 max(*size
, cache
->object_size
+ redzone_size
));
162 * If the metadata doesn't fit, don't enable KASAN at all.
164 if (*size
<= cache
->kasan_info
.alloc_meta_offset
||
165 *size
<= cache
->kasan_info
.free_meta_offset
) {
166 cache
->kasan_info
.alloc_meta_offset
= 0;
167 cache
->kasan_info
.free_meta_offset
= 0;
172 *flags
|= SLAB_KASAN
;
175 size_t kasan_metadata_size(struct kmem_cache
*cache
)
177 return (cache
->kasan_info
.alloc_meta_offset
?
178 sizeof(struct kasan_alloc_meta
) : 0) +
179 (cache
->kasan_info
.free_meta_offset
?
180 sizeof(struct kasan_free_meta
) : 0);
183 struct kasan_alloc_meta
*kasan_get_alloc_meta(struct kmem_cache
*cache
,
186 return (void *)reset_tag(object
) + cache
->kasan_info
.alloc_meta_offset
;
189 struct kasan_free_meta
*kasan_get_free_meta(struct kmem_cache
*cache
,
192 BUILD_BUG_ON(sizeof(struct kasan_free_meta
) > 32);
193 return (void *)reset_tag(object
) + cache
->kasan_info
.free_meta_offset
;
196 void kasan_poison_slab(struct page
*page
)
200 for (i
= 0; i
< compound_nr(page
); i
++)
201 page_kasan_tag_reset(page
+ i
);
202 poison_range(page_address(page
), page_size(page
),
203 KASAN_KMALLOC_REDZONE
);
206 void kasan_unpoison_object_data(struct kmem_cache
*cache
, void *object
)
208 unpoison_range(object
, cache
->object_size
);
211 void kasan_poison_object_data(struct kmem_cache
*cache
, void *object
)
214 round_up(cache
->object_size
, KASAN_GRANULE_SIZE
),
215 KASAN_KMALLOC_REDZONE
);
219 * This function assigns a tag to an object considering the following:
220 * 1. A cache might have a constructor, which might save a pointer to a slab
221 * object somewhere (e.g. in the object itself). We preassign a tag for
222 * each object in caches with constructors during slab creation and reuse
223 * the same tag each time a particular object is allocated.
224 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
225 * accessed after being freed. We preassign tags for objects in these
227 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
228 * is stored as an array of indexes instead of a linked list. Assign tags
229 * based on objects indexes, so that objects that are next to each other
230 * get different tags.
232 static u8
assign_tag(struct kmem_cache
*cache
, const void *object
,
233 bool init
, bool keep_tag
)
236 * 1. When an object is kmalloc()'ed, two hooks are called:
237 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
238 * tag only in the first one.
239 * 2. We reuse the same tag for krealloc'ed objects.
242 return get_tag(object
);
245 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
246 * set, assign a tag when the object is being allocated (init == false).
248 if (!cache
->ctor
&& !(cache
->flags
& SLAB_TYPESAFE_BY_RCU
))
249 return init
? KASAN_TAG_KERNEL
: random_tag();
251 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
253 /* For SLAB assign tags based on the object index in the freelist. */
254 return (u8
)obj_to_index(cache
, virt_to_page(object
), (void *)object
);
257 * For SLUB assign a random tag during slab creation, otherwise reuse
258 * the already assigned tag.
260 return init
? random_tag() : get_tag(object
);
264 void * __must_check
kasan_init_slab_obj(struct kmem_cache
*cache
,
267 struct kasan_alloc_meta
*alloc_meta
;
269 if (!(cache
->flags
& SLAB_KASAN
))
270 return (void *)object
;
272 alloc_meta
= kasan_get_alloc_meta(cache
, object
);
273 __memset(alloc_meta
, 0, sizeof(*alloc_meta
));
275 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
) || IS_ENABLED(CONFIG_KASAN_HW_TAGS
))
276 object
= set_tag(object
, assign_tag(cache
, object
, true, false));
278 return (void *)object
;
281 static bool __kasan_slab_free(struct kmem_cache
*cache
, void *object
,
282 unsigned long ip
, bool quarantine
)
286 unsigned long rounded_up_size
;
288 tag
= get_tag(object
);
289 tagged_object
= object
;
290 object
= reset_tag(object
);
292 if (unlikely(nearest_obj(cache
, virt_to_head_page(object
), object
) !=
294 kasan_report_invalid_free(tagged_object
, ip
);
298 /* RCU slabs could be legally used after free within the RCU period */
299 if (unlikely(cache
->flags
& SLAB_TYPESAFE_BY_RCU
))
302 if (check_invalid_free(tagged_object
)) {
303 kasan_report_invalid_free(tagged_object
, ip
);
307 rounded_up_size
= round_up(cache
->object_size
, KASAN_GRANULE_SIZE
);
308 poison_range(object
, rounded_up_size
, KASAN_KMALLOC_FREE
);
310 if ((IS_ENABLED(CONFIG_KASAN_GENERIC
) && !quarantine
) ||
311 unlikely(!(cache
->flags
& SLAB_KASAN
)))
314 kasan_set_free_info(cache
, object
, tag
);
316 quarantine_put(cache
, object
);
318 return IS_ENABLED(CONFIG_KASAN_GENERIC
);
321 bool kasan_slab_free(struct kmem_cache
*cache
, void *object
, unsigned long ip
)
323 return __kasan_slab_free(cache
, object
, ip
, true);
326 static void *__kasan_kmalloc(struct kmem_cache
*cache
, const void *object
,
327 size_t size
, gfp_t flags
, bool keep_tag
)
329 unsigned long redzone_start
;
330 unsigned long redzone_end
;
333 if (gfpflags_allow_blocking(flags
))
336 if (unlikely(object
== NULL
))
339 redzone_start
= round_up((unsigned long)(object
+ size
),
341 redzone_end
= round_up((unsigned long)object
+ cache
->object_size
,
344 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
) || IS_ENABLED(CONFIG_KASAN_HW_TAGS
))
345 tag
= assign_tag(cache
, object
, false, keep_tag
);
347 /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
348 unpoison_range(set_tag(object
, tag
), size
);
349 poison_range((void *)redzone_start
, redzone_end
- redzone_start
,
350 KASAN_KMALLOC_REDZONE
);
352 if (cache
->flags
& SLAB_KASAN
)
353 kasan_set_track(&kasan_get_alloc_meta(cache
, object
)->alloc_track
, flags
);
355 return set_tag(object
, tag
);
358 void * __must_check
kasan_slab_alloc(struct kmem_cache
*cache
, void *object
,
361 return __kasan_kmalloc(cache
, object
, cache
->object_size
, flags
, false);
364 void * __must_check
kasan_kmalloc(struct kmem_cache
*cache
, const void *object
,
365 size_t size
, gfp_t flags
)
367 return __kasan_kmalloc(cache
, object
, size
, flags
, true);
369 EXPORT_SYMBOL(kasan_kmalloc
);
371 void * __must_check
kasan_kmalloc_large(const void *ptr
, size_t size
,
375 unsigned long redzone_start
;
376 unsigned long redzone_end
;
378 if (gfpflags_allow_blocking(flags
))
381 if (unlikely(ptr
== NULL
))
384 page
= virt_to_page(ptr
);
385 redzone_start
= round_up((unsigned long)(ptr
+ size
),
387 redzone_end
= (unsigned long)ptr
+ page_size(page
);
389 unpoison_range(ptr
, size
);
390 poison_range((void *)redzone_start
, redzone_end
- redzone_start
,
396 void * __must_check
kasan_krealloc(const void *object
, size_t size
, gfp_t flags
)
400 if (unlikely(object
== ZERO_SIZE_PTR
))
401 return (void *)object
;
403 page
= virt_to_head_page(object
);
405 if (unlikely(!PageSlab(page
)))
406 return kasan_kmalloc_large(object
, size
, flags
);
408 return __kasan_kmalloc(page
->slab_cache
, object
, size
,
412 void kasan_poison_kfree(void *ptr
, unsigned long ip
)
416 page
= virt_to_head_page(ptr
);
418 if (unlikely(!PageSlab(page
))) {
419 if (ptr
!= page_address(page
)) {
420 kasan_report_invalid_free(ptr
, ip
);
423 poison_range(ptr
, page_size(page
), KASAN_FREE_PAGE
);
425 __kasan_slab_free(page
->slab_cache
, ptr
, ip
, false);
429 void kasan_kfree_large(void *ptr
, unsigned long ip
)
431 if (ptr
!= page_address(virt_to_head_page(ptr
)))
432 kasan_report_invalid_free(ptr
, ip
);
433 /* The object will be poisoned by page_alloc. */