]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/kasan/common.c
1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/slab.h>
25 #include <linux/stacktrace.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/bug.h>
33 depot_stack_handle_t
kasan_save_stack(gfp_t flags
)
35 unsigned long entries
[KASAN_STACK_DEPTH
];
36 unsigned int nr_entries
;
38 nr_entries
= stack_trace_save(entries
, ARRAY_SIZE(entries
), 0);
39 nr_entries
= filter_irq_stacks(entries
, nr_entries
);
40 return stack_depot_save(entries
, nr_entries
, flags
);
43 void kasan_set_track(struct kasan_track
*track
, gfp_t flags
)
45 track
->pid
= current
->pid
;
46 track
->stack
= kasan_save_stack(flags
);
49 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
50 void kasan_enable_current(void)
52 current
->kasan_depth
++;
55 void kasan_disable_current(void)
57 current
->kasan_depth
--;
59 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
61 void __kasan_unpoison_range(const void *address
, size_t size
)
63 unpoison_range(address
, size
);
66 #if CONFIG_KASAN_STACK
67 /* Unpoison the entire stack for a task. */
68 void kasan_unpoison_task_stack(struct task_struct
*task
)
70 void *base
= task_stack_page(task
);
72 unpoison_range(base
, THREAD_SIZE
);
75 /* Unpoison the stack for the current task beyond a watermark sp value. */
76 asmlinkage
void kasan_unpoison_task_stack_below(const void *watermark
)
79 * Calculate the task stack base address. Avoid using 'current'
80 * because this function is called by early resume code which hasn't
81 * yet set up the percpu register (%gs).
83 void *base
= (void *)((unsigned long)watermark
& ~(THREAD_SIZE
- 1));
85 unpoison_range(base
, watermark
- base
);
87 #endif /* CONFIG_KASAN_STACK */
90 * Only allow cache merging when stack collection is disabled and no metadata
93 slab_flags_t
__kasan_never_merge(void)
95 if (kasan_stack_collection_enabled())
100 void __kasan_alloc_pages(struct page
*page
, unsigned int order
)
105 if (unlikely(PageHighMem(page
)))
109 for (i
= 0; i
< (1 << order
); i
++)
110 page_kasan_tag_set(page
+ i
, tag
);
111 unpoison_range(page_address(page
), PAGE_SIZE
<< order
);
114 void __kasan_free_pages(struct page
*page
, unsigned int order
)
116 if (likely(!PageHighMem(page
)))
117 poison_range(page_address(page
),
123 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
124 * For larger allocations larger redzones are used.
126 static inline unsigned int optimal_redzone(unsigned int object_size
)
129 object_size
<= 64 - 16 ? 16 :
130 object_size
<= 128 - 32 ? 32 :
131 object_size
<= 512 - 64 ? 64 :
132 object_size
<= 4096 - 128 ? 128 :
133 object_size
<= (1 << 14) - 256 ? 256 :
134 object_size
<= (1 << 15) - 512 ? 512 :
135 object_size
<= (1 << 16) - 1024 ? 1024 : 2048;
138 void __kasan_cache_create(struct kmem_cache
*cache
, unsigned int *size
,
141 unsigned int ok_size
;
142 unsigned int optimal_size
;
145 * SLAB_KASAN is used to mark caches as ones that are sanitized by
146 * KASAN. Currently this flag is used in two places:
147 * 1. In slab_ksize() when calculating the size of the accessible
148 * memory within the object.
149 * 2. In slab_common.c to prevent merging of sanitized caches.
151 *flags
|= SLAB_KASAN
;
153 if (!kasan_stack_collection_enabled())
158 /* Add alloc meta into redzone. */
159 cache
->kasan_info
.alloc_meta_offset
= *size
;
160 *size
+= sizeof(struct kasan_alloc_meta
);
163 * If alloc meta doesn't fit, don't add it.
164 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
165 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
168 if (*size
> KMALLOC_MAX_SIZE
) {
169 cache
->kasan_info
.alloc_meta_offset
= 0;
171 /* Continue, since free meta might still fit. */
174 /* Only the generic mode uses free meta or flexible redzones. */
175 if (!IS_ENABLED(CONFIG_KASAN_GENERIC
)) {
176 cache
->kasan_info
.free_meta_offset
= KASAN_NO_FREE_META
;
181 * Add free meta into redzone when it's not possible to store
182 * it in the object. This is the case when:
183 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
184 * be touched after it was freed, or
185 * 2. Object has a constructor, which means it's expected to
186 * retain its content until the next allocation, or
187 * 3. Object is too small.
188 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
190 if ((cache
->flags
& SLAB_TYPESAFE_BY_RCU
) || cache
->ctor
||
191 cache
->object_size
< sizeof(struct kasan_free_meta
)) {
194 cache
->kasan_info
.free_meta_offset
= *size
;
195 *size
+= sizeof(struct kasan_free_meta
);
197 /* If free meta doesn't fit, don't add it. */
198 if (*size
> KMALLOC_MAX_SIZE
) {
199 cache
->kasan_info
.free_meta_offset
= KASAN_NO_FREE_META
;
204 /* Calculate size with optimal redzone. */
205 optimal_size
= cache
->object_size
+ optimal_redzone(cache
->object_size
);
206 /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
207 if (optimal_size
> KMALLOC_MAX_SIZE
)
208 optimal_size
= KMALLOC_MAX_SIZE
;
209 /* Use optimal size if the size with added metas is not large enough. */
210 if (*size
< optimal_size
)
211 *size
= optimal_size
;
214 size_t __kasan_metadata_size(struct kmem_cache
*cache
)
216 if (!kasan_stack_collection_enabled())
218 return (cache
->kasan_info
.alloc_meta_offset
?
219 sizeof(struct kasan_alloc_meta
) : 0) +
220 (cache
->kasan_info
.free_meta_offset
?
221 sizeof(struct kasan_free_meta
) : 0);
224 struct kasan_alloc_meta
*kasan_get_alloc_meta(struct kmem_cache
*cache
,
227 if (!cache
->kasan_info
.alloc_meta_offset
)
229 return kasan_reset_tag(object
) + cache
->kasan_info
.alloc_meta_offset
;
232 #ifdef CONFIG_KASAN_GENERIC
233 struct kasan_free_meta
*kasan_get_free_meta(struct kmem_cache
*cache
,
236 BUILD_BUG_ON(sizeof(struct kasan_free_meta
) > 32);
237 if (cache
->kasan_info
.free_meta_offset
== KASAN_NO_FREE_META
)
239 return kasan_reset_tag(object
) + cache
->kasan_info
.free_meta_offset
;
243 void __kasan_poison_slab(struct page
*page
)
247 for (i
= 0; i
< compound_nr(page
); i
++)
248 page_kasan_tag_reset(page
+ i
);
249 poison_range(page_address(page
), page_size(page
),
250 KASAN_KMALLOC_REDZONE
);
253 void __kasan_unpoison_object_data(struct kmem_cache
*cache
, void *object
)
255 unpoison_range(object
, cache
->object_size
);
258 void __kasan_poison_object_data(struct kmem_cache
*cache
, void *object
)
260 poison_range(object
, cache
->object_size
, KASAN_KMALLOC_REDZONE
);
264 * This function assigns a tag to an object considering the following:
265 * 1. A cache might have a constructor, which might save a pointer to a slab
266 * object somewhere (e.g. in the object itself). We preassign a tag for
267 * each object in caches with constructors during slab creation and reuse
268 * the same tag each time a particular object is allocated.
269 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
270 * accessed after being freed. We preassign tags for objects in these
272 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
273 * is stored as an array of indexes instead of a linked list. Assign tags
274 * based on objects indexes, so that objects that are next to each other
275 * get different tags.
277 static u8
assign_tag(struct kmem_cache
*cache
, const void *object
,
278 bool init
, bool keep_tag
)
280 if (IS_ENABLED(CONFIG_KASAN_GENERIC
))
284 * 1. When an object is kmalloc()'ed, two hooks are called:
285 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
286 * tag only in the first one.
287 * 2. We reuse the same tag for krealloc'ed objects.
290 return get_tag(object
);
293 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
294 * set, assign a tag when the object is being allocated (init == false).
296 if (!cache
->ctor
&& !(cache
->flags
& SLAB_TYPESAFE_BY_RCU
))
297 return init
? KASAN_TAG_KERNEL
: random_tag();
299 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
301 /* For SLAB assign tags based on the object index in the freelist. */
302 return (u8
)obj_to_index(cache
, virt_to_page(object
), (void *)object
);
305 * For SLUB assign a random tag during slab creation, otherwise reuse
306 * the already assigned tag.
308 return init
? random_tag() : get_tag(object
);
312 void * __must_check
__kasan_init_slab_obj(struct kmem_cache
*cache
,
315 struct kasan_alloc_meta
*alloc_meta
;
317 if (kasan_stack_collection_enabled()) {
318 alloc_meta
= kasan_get_alloc_meta(cache
, object
);
320 __memset(alloc_meta
, 0, sizeof(*alloc_meta
));
323 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
324 object
= set_tag(object
, assign_tag(cache
, object
, true, false));
326 return (void *)object
;
329 static bool ____kasan_slab_free(struct kmem_cache
*cache
, void *object
,
330 unsigned long ip
, bool quarantine
)
335 tag
= get_tag(object
);
336 tagged_object
= object
;
337 object
= kasan_reset_tag(object
);
339 if (unlikely(nearest_obj(cache
, virt_to_head_page(object
), object
) !=
341 kasan_report_invalid_free(tagged_object
, ip
);
345 /* RCU slabs could be legally used after free within the RCU period */
346 if (unlikely(cache
->flags
& SLAB_TYPESAFE_BY_RCU
))
349 if (check_invalid_free(tagged_object
)) {
350 kasan_report_invalid_free(tagged_object
, ip
);
354 poison_range(object
, cache
->object_size
, KASAN_KMALLOC_FREE
);
356 if (!kasan_stack_collection_enabled())
359 if ((IS_ENABLED(CONFIG_KASAN_GENERIC
) && !quarantine
))
362 kasan_set_free_info(cache
, object
, tag
);
364 return quarantine_put(cache
, object
);
367 bool __kasan_slab_free(struct kmem_cache
*cache
, void *object
, unsigned long ip
)
369 return ____kasan_slab_free(cache
, object
, ip
, true);
372 void __kasan_slab_free_mempool(void *ptr
, unsigned long ip
)
376 page
= virt_to_head_page(ptr
);
379 * Even though this function is only called for kmem_cache_alloc and
380 * kmalloc backed mempool allocations, those allocations can still be
381 * !PageSlab() when the size provided to kmalloc is larger than
382 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
384 if (unlikely(!PageSlab(page
))) {
385 if (ptr
!= page_address(page
)) {
386 kasan_report_invalid_free(ptr
, ip
);
389 poison_range(ptr
, page_size(page
), KASAN_FREE_PAGE
);
391 ____kasan_slab_free(page
->slab_cache
, ptr
, ip
, false);
395 static void set_alloc_info(struct kmem_cache
*cache
, void *object
, gfp_t flags
)
397 struct kasan_alloc_meta
*alloc_meta
;
399 alloc_meta
= kasan_get_alloc_meta(cache
, object
);
401 kasan_set_track(&alloc_meta
->alloc_track
, flags
);
404 static void *____kasan_kmalloc(struct kmem_cache
*cache
, const void *object
,
405 size_t size
, gfp_t flags
, bool keep_tag
)
407 unsigned long redzone_start
;
408 unsigned long redzone_end
;
411 if (gfpflags_allow_blocking(flags
))
414 if (unlikely(object
== NULL
))
417 redzone_start
= round_up((unsigned long)(object
+ size
),
419 redzone_end
= round_up((unsigned long)object
+ cache
->object_size
,
421 tag
= assign_tag(cache
, object
, false, keep_tag
);
423 /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
424 unpoison_range(set_tag(object
, tag
), size
);
425 poison_range((void *)redzone_start
, redzone_end
- redzone_start
,
426 KASAN_KMALLOC_REDZONE
);
428 if (kasan_stack_collection_enabled())
429 set_alloc_info(cache
, (void *)object
, flags
);
431 return set_tag(object
, tag
);
434 void * __must_check
__kasan_slab_alloc(struct kmem_cache
*cache
,
435 void *object
, gfp_t flags
)
437 return ____kasan_kmalloc(cache
, object
, cache
->object_size
, flags
, false);
440 void * __must_check
__kasan_kmalloc(struct kmem_cache
*cache
, const void *object
,
441 size_t size
, gfp_t flags
)
443 return ____kasan_kmalloc(cache
, object
, size
, flags
, true);
445 EXPORT_SYMBOL(__kasan_kmalloc
);
447 void * __must_check
__kasan_kmalloc_large(const void *ptr
, size_t size
,
451 unsigned long redzone_start
;
452 unsigned long redzone_end
;
454 if (gfpflags_allow_blocking(flags
))
457 if (unlikely(ptr
== NULL
))
460 page
= virt_to_page(ptr
);
461 redzone_start
= round_up((unsigned long)(ptr
+ size
),
463 redzone_end
= (unsigned long)ptr
+ page_size(page
);
465 unpoison_range(ptr
, size
);
466 poison_range((void *)redzone_start
, redzone_end
- redzone_start
,
472 void * __must_check
__kasan_krealloc(const void *object
, size_t size
, gfp_t flags
)
476 if (unlikely(object
== ZERO_SIZE_PTR
))
477 return (void *)object
;
479 page
= virt_to_head_page(object
);
481 if (unlikely(!PageSlab(page
)))
482 return __kasan_kmalloc_large(object
, size
, flags
);
484 return ____kasan_kmalloc(page
->slab_cache
, object
, size
,
488 void __kasan_kfree_large(void *ptr
, unsigned long ip
)
490 if (ptr
!= page_address(virt_to_head_page(ptr
)))
491 kasan_report_invalid_free(ptr
, ip
);
492 /* The object will be poisoned by kasan_free_pages(). */