]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/kasan/common.c
Documentation: Add documentation for Processor MMIO Stale Data
[mirror_ubuntu-jammy-kernel.git] / mm / kasan / common.c
CommitLineData
e886bf9d 1// SPDX-License-Identifier: GPL-2.0
bffa986c 2/*
bb359dbc 3 * This file contains common KASAN code.
bffa986c
AK
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
bffa986c
AK
10 */
11
12#include <linux/export.h>
bffa986c
AK
13#include <linux/init.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
bffa986c
AK
16#include <linux/linkage.h>
17#include <linux/memblock.h>
18#include <linux/memory.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/printk.h>
22#include <linux/sched.h>
23#include <linux/sched/task_stack.h>
24#include <linux/slab.h>
25#include <linux/stacktrace.h>
26#include <linux/string.h>
27#include <linux/types.h>
bffa986c
AK
28#include <linux/bug.h>
29
30#include "kasan.h"
31#include "../slab.h"
32
26e760c9 33depot_stack_handle_t kasan_save_stack(gfp_t flags)
bffa986c
AK
34{
35 unsigned long entries[KASAN_STACK_DEPTH];
880e049c 36 unsigned int nr_entries;
bffa986c 37
880e049c
TG
38 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39 nr_entries = filter_irq_stacks(entries, nr_entries);
40 return stack_depot_save(entries, nr_entries, flags);
bffa986c
AK
41}
42
e4b7818b 43void kasan_set_track(struct kasan_track *track, gfp_t flags)
bffa986c
AK
44{
45 track->pid = current->pid;
26e760c9 46 track->stack = kasan_save_stack(flags);
bffa986c
AK
47}
48
d73b4936 49#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bffa986c
AK
50void kasan_enable_current(void)
51{
52 current->kasan_depth++;
53}
1f9f78b1 54EXPORT_SYMBOL(kasan_enable_current);
bffa986c
AK
55
56void kasan_disable_current(void)
57{
58 current->kasan_depth--;
59}
1f9f78b1
OG
60EXPORT_SYMBOL(kasan_disable_current);
61
d73b4936 62#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
bffa986c 63
34303244 64void __kasan_unpoison_range(const void *address, size_t size)
cebd0eb2 65{
aa5c219c 66 kasan_unpoison(address, size, false);
cebd0eb2
AK
67}
68
02c58773 69#ifdef CONFIG_KASAN_STACK
bffa986c
AK
70/* Unpoison the entire stack for a task. */
71void kasan_unpoison_task_stack(struct task_struct *task)
72{
77f57c98
AK
73 void *base = task_stack_page(task);
74
aa5c219c 75 kasan_unpoison(base, THREAD_SIZE, false);
bffa986c
AK
76}
77
78/* Unpoison the stack for the current task beyond a watermark sp value. */
79asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
80{
81 /*
82 * Calculate the task stack base address. Avoid using 'current'
83 * because this function is called by early resume code which hasn't
84 * yet set up the percpu register (%gs).
85 */
86 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
87
aa5c219c 88 kasan_unpoison(base, watermark - base, false);
bffa986c 89}
d56a9ef8 90#endif /* CONFIG_KASAN_STACK */
bffa986c 91
e86f8b09
AK
92/*
93 * Only allow cache merging when stack collection is disabled and no metadata
94 * is present.
95 */
96slab_flags_t __kasan_never_merge(void)
97{
98 if (kasan_stack_collection_enabled())
99 return SLAB_KASAN;
100 return 0;
101}
102
7a3b8353 103void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
bffa986c 104{
2813b9c0
AK
105 u8 tag;
106 unsigned long i;
107
7f94ffbc
AK
108 if (unlikely(PageHighMem(page)))
109 return;
2813b9c0 110
f00748bf 111 tag = kasan_random_tag();
2813b9c0
AK
112 for (i = 0; i < (1 << order); i++)
113 page_kasan_tag_set(page + i, tag);
1bb5eab3 114 kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
bffa986c
AK
115}
116
7a3b8353 117void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
bffa986c
AK
118{
119 if (likely(!PageHighMem(page)))
f00748bf 120 kasan_poison(page_address(page), PAGE_SIZE << order,
1bb5eab3 121 KASAN_FREE_PAGE, init);
bffa986c
AK
122}
123
124/*
125 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
126 * For larger allocations larger redzones are used.
127 */
128static inline unsigned int optimal_redzone(unsigned int object_size)
129{
130 return
131 object_size <= 64 - 16 ? 16 :
132 object_size <= 128 - 32 ? 32 :
133 object_size <= 512 - 64 ? 64 :
134 object_size <= 4096 - 128 ? 128 :
135 object_size <= (1 << 14) - 256 ? 256 :
136 object_size <= (1 << 15) - 512 ? 512 :
137 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
138}
139
34303244
AK
140void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
141 slab_flags_t *flags)
bffa986c 142{
97593cad
AK
143 unsigned int ok_size;
144 unsigned int optimal_size;
145
146 /*
147 * SLAB_KASAN is used to mark caches as ones that are sanitized by
148 * KASAN. Currently this flag is used in two places:
149 * 1. In slab_ksize() when calculating the size of the accessible
150 * memory within the object.
151 * 2. In slab_common.c to prevent merging of sanitized caches.
152 */
153 *flags |= SLAB_KASAN;
bffa986c 154
97593cad 155 if (!kasan_stack_collection_enabled())
8028caac 156 return;
8028caac 157
97593cad
AK
158 ok_size = *size;
159
160 /* Add alloc meta into redzone. */
bffa986c
AK
161 cache->kasan_info.alloc_meta_offset = *size;
162 *size += sizeof(struct kasan_alloc_meta);
163
97593cad
AK
164 /*
165 * If alloc meta doesn't fit, don't add it.
166 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
167 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
168 * larger sizes.
169 */
170 if (*size > KMALLOC_MAX_SIZE) {
171 cache->kasan_info.alloc_meta_offset = 0;
172 *size = ok_size;
173 /* Continue, since free meta might still fit. */
bffa986c 174 }
bffa986c 175
97593cad
AK
176 /* Only the generic mode uses free meta or flexible redzones. */
177 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
178 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
179 return;
180 }
bffa986c
AK
181
182 /*
97593cad
AK
183 * Add free meta into redzone when it's not possible to store
184 * it in the object. This is the case when:
185 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
186 * be touched after it was freed, or
187 * 2. Object has a constructor, which means it's expected to
188 * retain its content until the next allocation, or
189 * 3. Object is too small.
190 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
bffa986c 191 */
97593cad
AK
192 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
193 cache->object_size < sizeof(struct kasan_free_meta)) {
194 ok_size = *size;
195
196 cache->kasan_info.free_meta_offset = *size;
197 *size += sizeof(struct kasan_free_meta);
198
199 /* If free meta doesn't fit, don't add it. */
200 if (*size > KMALLOC_MAX_SIZE) {
201 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
202 *size = ok_size;
203 }
bffa986c
AK
204 }
205
97593cad
AK
206 /* Calculate size with optimal redzone. */
207 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
208 /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
209 if (optimal_size > KMALLOC_MAX_SIZE)
210 optimal_size = KMALLOC_MAX_SIZE;
211 /* Use optimal size if the size with added metas is not large enough. */
212 if (*size < optimal_size)
213 *size = optimal_size;
bffa986c
AK
214}
215
92850134
AK
216void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
217{
218 cache->kasan_info.is_kmalloc = true;
219}
220
34303244 221size_t __kasan_metadata_size(struct kmem_cache *cache)
bffa986c 222{
8028caac
AK
223 if (!kasan_stack_collection_enabled())
224 return 0;
bffa986c
AK
225 return (cache->kasan_info.alloc_meta_offset ?
226 sizeof(struct kasan_alloc_meta) : 0) +
227 (cache->kasan_info.free_meta_offset ?
228 sizeof(struct kasan_free_meta) : 0);
229}
230
6476792f
AK
231struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
232 const void *object)
bffa986c 233{
97593cad
AK
234 if (!cache->kasan_info.alloc_meta_offset)
235 return NULL;
c0054c56 236 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
bffa986c
AK
237}
238
97593cad 239#ifdef CONFIG_KASAN_GENERIC
6476792f
AK
240struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
241 const void *object)
bffa986c
AK
242{
243 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
97593cad
AK
244 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
245 return NULL;
c0054c56 246 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
bffa986c 247}
97593cad 248#endif
bffa986c 249
34303244 250void __kasan_poison_slab(struct page *page)
bffa986c 251{
2813b9c0
AK
252 unsigned long i;
253
d8c6546b 254 for (i = 0; i < compound_nr(page); i++)
2813b9c0 255 page_kasan_tag_reset(page + i);
f00748bf 256 kasan_poison(page_address(page), page_size(page),
aa5c219c 257 KASAN_KMALLOC_REDZONE, false);
bffa986c
AK
258}
259
34303244 260void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
bffa986c 261{
aa5c219c 262 kasan_unpoison(object, cache->object_size, false);
bffa986c
AK
263}
264
34303244 265void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
bffa986c 266{
cde8a7eb 267 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
aa5c219c 268 KASAN_KMALLOC_REDZONE, false);
bffa986c
AK
269}
270
7f94ffbc 271/*
a3fe7cdf
AK
272 * This function assigns a tag to an object considering the following:
273 * 1. A cache might have a constructor, which might save a pointer to a slab
274 * object somewhere (e.g. in the object itself). We preassign a tag for
275 * each object in caches with constructors during slab creation and reuse
276 * the same tag each time a particular object is allocated.
277 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
278 * accessed after being freed. We preassign tags for objects in these
279 * caches as well.
280 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
281 * is stored as an array of indexes instead of a linked list. Assign tags
282 * based on objects indexes, so that objects that are next to each other
283 * get different tags.
7f94ffbc 284 */
c80a0366
AK
285static inline u8 assign_tag(struct kmem_cache *cache,
286 const void *object, bool init)
7f94ffbc 287{
1ef3133b
AK
288 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
289 return 0xff;
290
a3fe7cdf
AK
291 /*
292 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
293 * set, assign a tag when the object is being allocated (init == false).
294 */
7f94ffbc 295 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
f00748bf 296 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
7f94ffbc 297
a3fe7cdf 298 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
7f94ffbc 299#ifdef CONFIG_SLAB
a3fe7cdf 300 /* For SLAB assign tags based on the object index in the freelist. */
7f94ffbc
AK
301 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
302#else
a3fe7cdf
AK
303 /*
304 * For SLUB assign a random tag during slab creation, otherwise reuse
305 * the already assigned tag.
306 */
f00748bf 307 return init ? kasan_random_tag() : get_tag(object);
7f94ffbc
AK
308#endif
309}
310
34303244 311void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
66afc7f1 312 const void *object)
bffa986c 313{
6476792f 314 struct kasan_alloc_meta *alloc_meta;
bffa986c 315
8028caac 316 if (kasan_stack_collection_enabled()) {
8028caac 317 alloc_meta = kasan_get_alloc_meta(cache, object);
97593cad
AK
318 if (alloc_meta)
319 __memset(alloc_meta, 0, sizeof(*alloc_meta));
8028caac 320 }
bffa986c 321
1ef3133b 322 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
e2db1a9a 323 object = set_tag(object, assign_tag(cache, object, true));
7f94ffbc 324
bffa986c
AK
325 return (void *)object;
326}
327
d57a964e
AK
328static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
329 unsigned long ip, bool quarantine, bool init)
bffa986c 330{
7f94ffbc
AK
331 u8 tag;
332 void *tagged_object;
bffa986c 333
af3751f3
DA
334 if (!kasan_arch_is_ready())
335 return false;
336
7f94ffbc
AK
337 tag = get_tag(object);
338 tagged_object = object;
c0054c56 339 object = kasan_reset_tag(object);
7f94ffbc 340
2b830526
AP
341 if (is_kfence_address(object))
342 return false;
343
bffa986c
AK
344 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
345 object)) {
7f94ffbc 346 kasan_report_invalid_free(tagged_object, ip);
bffa986c
AK
347 return true;
348 }
349
350 /* RCU slabs could be legally used after free within the RCU period */
351 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
352 return false;
353
611806b4 354 if (!kasan_byte_accessible(tagged_object)) {
7f94ffbc 355 kasan_report_invalid_free(tagged_object, ip);
bffa986c
AK
356 return true;
357 }
358
cde8a7eb 359 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
d57a964e 360 KASAN_KMALLOC_FREE, init);
bffa986c 361
97593cad 362 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
bffa986c
AK
363 return false;
364
df54b383
AK
365 if (kasan_stack_collection_enabled())
366 kasan_set_free_info(cache, object, tag);
ae8f06b3 367
f00748bf 368 return kasan_quarantine_put(cache, object);
bffa986c
AK
369}
370
d57a964e
AK
371bool __kasan_slab_free(struct kmem_cache *cache, void *object,
372 unsigned long ip, bool init)
bffa986c 373{
d57a964e 374 return ____kasan_slab_free(cache, object, ip, true, init);
bffa986c
AK
375}
376
c80a0366 377static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
200072ce
AK
378{
379 if (ptr != page_address(virt_to_head_page(ptr))) {
380 kasan_report_invalid_free(ptr, ip);
381 return true;
382 }
383
384 if (!kasan_byte_accessible(ptr)) {
385 kasan_report_invalid_free(ptr, ip);
386 return true;
387 }
388
389 /*
390 * The object will be poisoned by kasan_free_pages() or
391 * kasan_slab_free_mempool().
392 */
393
394 return false;
395}
396
397void __kasan_kfree_large(void *ptr, unsigned long ip)
398{
399 ____kasan_kfree_large(ptr, ip);
400}
401
eeb3160c
AK
402void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
403{
404 struct page *page;
405
406 page = virt_to_head_page(ptr);
407
408 /*
409 * Even though this function is only called for kmem_cache_alloc and
410 * kmalloc backed mempool allocations, those allocations can still be
411 * !PageSlab() when the size provided to kmalloc is larger than
412 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
413 */
414 if (unlikely(!PageSlab(page))) {
200072ce 415 if (____kasan_kfree_large(ptr, ip))
eeb3160c 416 return;
aa5c219c 417 kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
eeb3160c 418 } else {
d57a964e 419 ____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
eeb3160c
AK
420 }
421}
422
92850134
AK
423static void set_alloc_info(struct kmem_cache *cache, void *object,
424 gfp_t flags, bool is_kmalloc)
8bb0009b 425{
97593cad
AK
426 struct kasan_alloc_meta *alloc_meta;
427
92850134
AK
428 /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
429 if (cache->kasan_info.is_kmalloc && !is_kmalloc)
430 return;
431
97593cad
AK
432 alloc_meta = kasan_get_alloc_meta(cache, object);
433 if (alloc_meta)
434 kasan_set_track(&alloc_meta->alloc_track, flags);
8bb0009b
AK
435}
436
e2db1a9a 437void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
da844b78 438 void *object, gfp_t flags, bool init)
e2db1a9a
AK
439{
440 u8 tag;
441 void *tagged_object;
442
443 if (gfpflags_allow_blocking(flags))
444 kasan_quarantine_reduce();
445
446 if (unlikely(object == NULL))
447 return NULL;
448
449 if (is_kfence_address(object))
450 return (void *)object;
451
452 /*
453 * Generate and assign random tag for tag-based modes.
454 * Tag is ignored in set_tag() for the generic mode.
455 */
456 tag = assign_tag(cache, object, false);
457 tagged_object = set_tag(object, tag);
458
459 /*
460 * Unpoison the whole object.
461 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
462 */
da844b78 463 kasan_unpoison(tagged_object, cache->object_size, init);
e2db1a9a
AK
464
465 /* Save alloc info (if possible) for non-kmalloc() allocations. */
466 if (kasan_stack_collection_enabled())
467 set_alloc_info(cache, (void *)object, flags, false);
468
469 return tagged_object;
470}
471
c80a0366
AK
472static inline void *____kasan_kmalloc(struct kmem_cache *cache,
473 const void *object, size_t size, gfp_t flags)
bffa986c
AK
474{
475 unsigned long redzone_start;
476 unsigned long redzone_end;
477
478 if (gfpflags_allow_blocking(flags))
f00748bf 479 kasan_quarantine_reduce();
bffa986c
AK
480
481 if (unlikely(object == NULL))
482 return NULL;
483
2b830526
AP
484 if (is_kfence_address(kasan_reset_tag(object)))
485 return (void *)object;
486
e2db1a9a
AK
487 /*
488 * The object has already been unpoisoned by kasan_slab_alloc() for
d12d9ad8 489 * kmalloc() or by kasan_krealloc() for krealloc().
e2db1a9a
AK
490 */
491
492 /*
493 * The redzone has byte-level precision for the generic mode.
494 * Partially poison the last object granule to cover the unaligned
495 * part of the redzone.
496 */
497 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
498 kasan_poison_last_granule((void *)object, size);
499
500 /* Poison the aligned part of the redzone. */
bffa986c 501 redzone_start = round_up((unsigned long)(object + size),
1f600626 502 KASAN_GRANULE_SIZE);
cde8a7eb
AK
503 redzone_end = round_up((unsigned long)(object + cache->object_size),
504 KASAN_GRANULE_SIZE);
f00748bf 505 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
aa5c219c 506 KASAN_KMALLOC_REDZONE, false);
bffa986c 507
e2db1a9a
AK
508 /*
509 * Save alloc info (if possible) for kmalloc() allocations.
510 * This also rewrites the alloc info when called from kasan_krealloc().
511 */
97593cad 512 if (kasan_stack_collection_enabled())
e2db1a9a 513 set_alloc_info(cache, (void *)object, flags, true);
bffa986c 514
e2db1a9a
AK
515 /* Keep the tag that was set by kasan_slab_alloc(). */
516 return (void *)object;
e1db95be
AK
517}
518
34303244
AK
519void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
520 size_t size, gfp_t flags)
a3fe7cdf 521{
e2db1a9a 522 return ____kasan_kmalloc(cache, object, size, flags);
a3fe7cdf 523}
34303244 524EXPORT_SYMBOL(__kasan_kmalloc);
bffa986c 525
34303244 526void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
66afc7f1 527 gfp_t flags)
bffa986c 528{
bffa986c
AK
529 unsigned long redzone_start;
530 unsigned long redzone_end;
531
532 if (gfpflags_allow_blocking(flags))
f00748bf 533 kasan_quarantine_reduce();
bffa986c
AK
534
535 if (unlikely(ptr == NULL))
536 return NULL;
537
43a219cb
AK
538 /*
539 * The object has already been unpoisoned by kasan_alloc_pages() for
d12d9ad8 540 * alloc_pages() or by kasan_krealloc() for krealloc().
43a219cb
AK
541 */
542
543 /*
544 * The redzone has byte-level precision for the generic mode.
545 * Partially poison the last object granule to cover the unaligned
546 * part of the redzone.
547 */
548 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
549 kasan_poison_last_granule(ptr, size);
550
551 /* Poison the aligned part of the redzone. */
bffa986c 552 redzone_start = round_up((unsigned long)(ptr + size),
1f600626 553 KASAN_GRANULE_SIZE);
43a219cb 554 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
f00748bf 555 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
aa5c219c 556 KASAN_PAGE_REDZONE, false);
bffa986c
AK
557
558 return (void *)ptr;
559}
560
34303244 561void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
bffa986c
AK
562{
563 struct page *page;
564
565 if (unlikely(object == ZERO_SIZE_PTR))
566 return (void *)object;
567
d12d9ad8
AK
568 /*
569 * Unpoison the object's data.
570 * Part of it might already have been unpoisoned, but it's unknown
571 * how big that part is.
572 */
aa5c219c 573 kasan_unpoison(object, size, false);
d12d9ad8 574
bffa986c
AK
575 page = virt_to_head_page(object);
576
d12d9ad8 577 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
bffa986c 578 if (unlikely(!PageSlab(page)))
34303244 579 return __kasan_kmalloc_large(object, size, flags);
bffa986c 580 else
e2db1a9a 581 return ____kasan_kmalloc(page->slab_cache, object, size, flags);
bffa986c
AK
582}
583
611806b4
AK
584bool __kasan_check_byte(const void *address, unsigned long ip)
585{
586 if (!kasan_byte_accessible(address)) {
587 kasan_report((unsigned long)address, 1, false, ip);
588 return false;
589 }
590 return true;
591}