2 * This file contains shadow memory manipulation code.
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 * Andrey Konovalov <adech.fo@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/slab.h>
34 #include <linux/stacktrace.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/vmalloc.h>
38 #include <linux/bug.h>
43 void kasan_enable_current(void)
45 current
->kasan_depth
++;
48 void kasan_disable_current(void)
50 current
->kasan_depth
--;
54 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
55 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
57 static void kasan_poison_shadow(const void *address
, size_t size
, u8 value
)
59 void *shadow_start
, *shadow_end
;
61 shadow_start
= kasan_mem_to_shadow(address
);
62 shadow_end
= kasan_mem_to_shadow(address
+ size
);
64 memset(shadow_start
, value
, shadow_end
- shadow_start
);
67 void kasan_unpoison_shadow(const void *address
, size_t size
)
69 kasan_poison_shadow(address
, size
, 0);
71 if (size
& KASAN_SHADOW_MASK
) {
72 u8
*shadow
= (u8
*)kasan_mem_to_shadow(address
+ size
);
73 *shadow
= size
& KASAN_SHADOW_MASK
;
77 static void __kasan_unpoison_stack(struct task_struct
*task
, const void *sp
)
79 void *base
= task_stack_page(task
);
80 size_t size
= sp
- base
;
82 kasan_unpoison_shadow(base
, size
);
85 /* Unpoison the entire stack for a task. */
86 void kasan_unpoison_task_stack(struct task_struct
*task
)
88 __kasan_unpoison_stack(task
, task_stack_page(task
) + THREAD_SIZE
);
91 /* Unpoison the stack for the current task beyond a watermark sp value. */
92 asmlinkage
void kasan_unpoison_task_stack_below(const void *watermark
)
95 * Calculate the task stack base address. Avoid using 'current'
96 * because this function is called by early resume code which hasn't
97 * yet set up the percpu register (%gs).
99 void *base
= (void *)((unsigned long)watermark
& ~(THREAD_SIZE
- 1));
101 kasan_unpoison_shadow(base
, watermark
- base
);
105 * Clear all poison for the region between the current SP and a provided
106 * watermark value, as is sometimes required prior to hand-crafted asm function
107 * returns in the middle of functions.
109 void kasan_unpoison_stack_above_sp_to(const void *watermark
)
111 const void *sp
= __builtin_frame_address(0);
112 size_t size
= watermark
- sp
;
114 if (WARN_ON(sp
> watermark
))
116 kasan_unpoison_shadow(sp
, size
);
120 * All functions below always inlined so compiler could
121 * perform better optimizations in each of __asan_loadX/__assn_storeX
122 * depending on memory access size X.
125 static __always_inline
bool memory_is_poisoned_1(unsigned long addr
)
127 s8 shadow_value
= *(s8
*)kasan_mem_to_shadow((void *)addr
);
129 if (unlikely(shadow_value
)) {
130 s8 last_accessible_byte
= addr
& KASAN_SHADOW_MASK
;
131 return unlikely(last_accessible_byte
>= shadow_value
);
137 static __always_inline
bool memory_is_poisoned_2(unsigned long addr
)
139 u16
*shadow_addr
= (u16
*)kasan_mem_to_shadow((void *)addr
);
141 if (unlikely(*shadow_addr
)) {
142 if (memory_is_poisoned_1(addr
+ 1))
146 * If single shadow byte covers 2-byte access, we don't
147 * need to do anything more. Otherwise, test the first
150 if (likely(((addr
+ 1) & KASAN_SHADOW_MASK
) != 0))
153 return unlikely(*(u8
*)shadow_addr
);
159 static __always_inline
bool memory_is_poisoned_4(unsigned long addr
)
161 u16
*shadow_addr
= (u16
*)kasan_mem_to_shadow((void *)addr
);
163 if (unlikely(*shadow_addr
)) {
164 if (memory_is_poisoned_1(addr
+ 3))
168 * If single shadow byte covers 4-byte access, we don't
169 * need to do anything more. Otherwise, test the first
172 if (likely(((addr
+ 3) & KASAN_SHADOW_MASK
) >= 3))
175 return unlikely(*(u8
*)shadow_addr
);
181 static __always_inline
bool memory_is_poisoned_8(unsigned long addr
)
183 u16
*shadow_addr
= (u16
*)kasan_mem_to_shadow((void *)addr
);
185 if (unlikely(*shadow_addr
)) {
186 if (memory_is_poisoned_1(addr
+ 7))
190 * If single shadow byte covers 8-byte access, we don't
191 * need to do anything more. Otherwise, test the first
194 if (likely(IS_ALIGNED(addr
, KASAN_SHADOW_SCALE_SIZE
)))
197 return unlikely(*(u8
*)shadow_addr
);
203 static __always_inline
bool memory_is_poisoned_16(unsigned long addr
)
205 u32
*shadow_addr
= (u32
*)kasan_mem_to_shadow((void *)addr
);
207 if (unlikely(*shadow_addr
)) {
208 u16 shadow_first_bytes
= *(u16
*)shadow_addr
;
210 if (unlikely(shadow_first_bytes
))
214 * If two shadow bytes covers 16-byte access, we don't
215 * need to do anything more. Otherwise, test the last
218 if (likely(IS_ALIGNED(addr
, KASAN_SHADOW_SCALE_SIZE
)))
221 return memory_is_poisoned_1(addr
+ 15);
227 static __always_inline
unsigned long bytes_is_zero(const u8
*start
,
231 if (unlikely(*start
))
232 return (unsigned long)start
;
240 static __always_inline
unsigned long memory_is_zero(const void *start
,
245 unsigned int prefix
= (unsigned long)start
% 8;
247 if (end
- start
<= 16)
248 return bytes_is_zero(start
, end
- start
);
252 ret
= bytes_is_zero(start
, prefix
);
258 words
= (end
- start
) / 8;
260 if (unlikely(*(u64
*)start
))
261 return bytes_is_zero(start
, 8);
266 return bytes_is_zero(start
, (end
- start
) % 8);
269 static __always_inline
bool memory_is_poisoned_n(unsigned long addr
,
274 ret
= memory_is_zero(kasan_mem_to_shadow((void *)addr
),
275 kasan_mem_to_shadow((void *)addr
+ size
- 1) + 1);
278 unsigned long last_byte
= addr
+ size
- 1;
279 s8
*last_shadow
= (s8
*)kasan_mem_to_shadow((void *)last_byte
);
281 if (unlikely(ret
!= (unsigned long)last_shadow
||
282 ((long)(last_byte
& KASAN_SHADOW_MASK
) >= *last_shadow
)))
288 static __always_inline
bool memory_is_poisoned(unsigned long addr
, size_t size
)
290 if (__builtin_constant_p(size
)) {
293 return memory_is_poisoned_1(addr
);
295 return memory_is_poisoned_2(addr
);
297 return memory_is_poisoned_4(addr
);
299 return memory_is_poisoned_8(addr
);
301 return memory_is_poisoned_16(addr
);
307 return memory_is_poisoned_n(addr
, size
);
310 static __always_inline
void check_memory_region_inline(unsigned long addr
,
311 size_t size
, bool write
,
312 unsigned long ret_ip
)
314 if (unlikely(size
== 0))
317 if (unlikely((void *)addr
<
318 kasan_shadow_to_mem((void *)KASAN_SHADOW_START
))) {
319 kasan_report(addr
, size
, write
, ret_ip
);
323 if (likely(!memory_is_poisoned(addr
, size
)))
326 kasan_report(addr
, size
, write
, ret_ip
);
329 static void check_memory_region(unsigned long addr
,
330 size_t size
, bool write
,
331 unsigned long ret_ip
)
333 check_memory_region_inline(addr
, size
, write
, ret_ip
);
336 void kasan_check_read(const void *p
, unsigned int size
)
338 check_memory_region((unsigned long)p
, size
, false, _RET_IP_
);
340 EXPORT_SYMBOL(kasan_check_read
);
342 void kasan_check_write(const void *p
, unsigned int size
)
344 check_memory_region((unsigned long)p
, size
, true, _RET_IP_
);
346 EXPORT_SYMBOL(kasan_check_write
);
349 void *memset(void *addr
, int c
, size_t len
)
351 check_memory_region((unsigned long)addr
, len
, true, _RET_IP_
);
353 return __memset(addr
, c
, len
);
357 void *memmove(void *dest
, const void *src
, size_t len
)
359 check_memory_region((unsigned long)src
, len
, false, _RET_IP_
);
360 check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
);
362 return __memmove(dest
, src
, len
);
366 void *memcpy(void *dest
, const void *src
, size_t len
)
368 check_memory_region((unsigned long)src
, len
, false, _RET_IP_
);
369 check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
);
371 return __memcpy(dest
, src
, len
);
374 void kasan_alloc_pages(struct page
*page
, unsigned int order
)
376 if (likely(!PageHighMem(page
)))
377 kasan_unpoison_shadow(page_address(page
), PAGE_SIZE
<< order
);
380 void kasan_free_pages(struct page
*page
, unsigned int order
)
382 if (likely(!PageHighMem(page
)))
383 kasan_poison_shadow(page_address(page
),
389 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
390 * For larger allocations larger redzones are used.
392 static size_t optimal_redzone(size_t object_size
)
395 object_size
<= 64 - 16 ? 16 :
396 object_size
<= 128 - 32 ? 32 :
397 object_size
<= 512 - 64 ? 64 :
398 object_size
<= 4096 - 128 ? 128 :
399 object_size
<= (1 << 14) - 256 ? 256 :
400 object_size
<= (1 << 15) - 512 ? 512 :
401 object_size
<= (1 << 16) - 1024 ? 1024 : 2048;
405 void kasan_cache_create(struct kmem_cache
*cache
, size_t *size
,
406 unsigned long *flags
)
409 int orig_size
= *size
;
411 /* Add alloc meta. */
412 cache
->kasan_info
.alloc_meta_offset
= *size
;
413 *size
+= sizeof(struct kasan_alloc_meta
);
416 if (cache
->flags
& SLAB_DESTROY_BY_RCU
|| cache
->ctor
||
417 cache
->object_size
< sizeof(struct kasan_free_meta
)) {
418 cache
->kasan_info
.free_meta_offset
= *size
;
419 *size
+= sizeof(struct kasan_free_meta
);
421 redzone_adjust
= optimal_redzone(cache
->object_size
) -
422 (*size
- cache
->object_size
);
424 if (redzone_adjust
> 0)
425 *size
+= redzone_adjust
;
427 *size
= min(KMALLOC_MAX_SIZE
, max(*size
, cache
->object_size
+
428 optimal_redzone(cache
->object_size
)));
431 * If the metadata doesn't fit, don't enable KASAN at all.
433 if (*size
<= cache
->kasan_info
.alloc_meta_offset
||
434 *size
<= cache
->kasan_info
.free_meta_offset
) {
435 cache
->kasan_info
.alloc_meta_offset
= 0;
436 cache
->kasan_info
.free_meta_offset
= 0;
441 *flags
|= SLAB_KASAN
;
444 void kasan_cache_shrink(struct kmem_cache
*cache
)
446 quarantine_remove_cache(cache
);
449 void kasan_cache_shutdown(struct kmem_cache
*cache
)
451 quarantine_remove_cache(cache
);
454 size_t kasan_metadata_size(struct kmem_cache
*cache
)
456 return (cache
->kasan_info
.alloc_meta_offset
?
457 sizeof(struct kasan_alloc_meta
) : 0) +
458 (cache
->kasan_info
.free_meta_offset
?
459 sizeof(struct kasan_free_meta
) : 0);
462 void kasan_poison_slab(struct page
*page
)
464 kasan_poison_shadow(page_address(page
),
465 PAGE_SIZE
<< compound_order(page
),
466 KASAN_KMALLOC_REDZONE
);
469 void kasan_unpoison_object_data(struct kmem_cache
*cache
, void *object
)
471 kasan_unpoison_shadow(object
, cache
->object_size
);
474 void kasan_poison_object_data(struct kmem_cache
*cache
, void *object
)
476 kasan_poison_shadow(object
,
477 round_up(cache
->object_size
, KASAN_SHADOW_SCALE_SIZE
),
478 KASAN_KMALLOC_REDZONE
);
481 static inline int in_irqentry_text(unsigned long ptr
)
483 return (ptr
>= (unsigned long)&__irqentry_text_start
&&
484 ptr
< (unsigned long)&__irqentry_text_end
) ||
485 (ptr
>= (unsigned long)&__softirqentry_text_start
&&
486 ptr
< (unsigned long)&__softirqentry_text_end
);
489 static inline void filter_irq_stacks(struct stack_trace
*trace
)
493 if (!trace
->nr_entries
)
495 for (i
= 0; i
< trace
->nr_entries
; i
++)
496 if (in_irqentry_text(trace
->entries
[i
])) {
497 /* Include the irqentry function into the stack. */
498 trace
->nr_entries
= i
+ 1;
503 static inline depot_stack_handle_t
save_stack(gfp_t flags
)
505 unsigned long entries
[KASAN_STACK_DEPTH
];
506 struct stack_trace trace
= {
509 .max_entries
= KASAN_STACK_DEPTH
,
513 save_stack_trace(&trace
);
514 filter_irq_stacks(&trace
);
515 if (trace
.nr_entries
!= 0 &&
516 trace
.entries
[trace
.nr_entries
-1] == ULONG_MAX
)
519 return depot_save_stack(&trace
, flags
);
522 static inline void set_track(struct kasan_track
*track
, gfp_t flags
)
524 track
->pid
= current
->pid
;
525 track
->stack
= save_stack(flags
);
528 struct kasan_alloc_meta
*get_alloc_info(struct kmem_cache
*cache
,
531 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta
) > 32);
532 return (void *)object
+ cache
->kasan_info
.alloc_meta_offset
;
535 struct kasan_free_meta
*get_free_info(struct kmem_cache
*cache
,
538 BUILD_BUG_ON(sizeof(struct kasan_free_meta
) > 32);
539 return (void *)object
+ cache
->kasan_info
.free_meta_offset
;
542 void kasan_init_slab_obj(struct kmem_cache
*cache
, const void *object
)
544 struct kasan_alloc_meta
*alloc_info
;
546 if (!(cache
->flags
& SLAB_KASAN
))
549 alloc_info
= get_alloc_info(cache
, object
);
550 __memset(alloc_info
, 0, sizeof(*alloc_info
));
553 void kasan_slab_alloc(struct kmem_cache
*cache
, void *object
, gfp_t flags
)
555 kasan_kmalloc(cache
, object
, cache
->object_size
, flags
);
558 static void kasan_poison_slab_free(struct kmem_cache
*cache
, void *object
)
560 unsigned long size
= cache
->object_size
;
561 unsigned long rounded_up_size
= round_up(size
, KASAN_SHADOW_SCALE_SIZE
);
563 /* RCU slabs could be legally used after free within the RCU period */
564 if (unlikely(cache
->flags
& SLAB_DESTROY_BY_RCU
))
567 kasan_poison_shadow(object
, rounded_up_size
, KASAN_KMALLOC_FREE
);
570 bool kasan_slab_free(struct kmem_cache
*cache
, void *object
)
574 /* RCU slabs could be legally used after free within the RCU period */
575 if (unlikely(cache
->flags
& SLAB_DESTROY_BY_RCU
))
578 shadow_byte
= READ_ONCE(*(s8
*)kasan_mem_to_shadow(object
));
579 if (shadow_byte
< 0 || shadow_byte
>= KASAN_SHADOW_SCALE_SIZE
) {
580 kasan_report_double_free(cache
, object
,
581 __builtin_return_address(1));
585 kasan_poison_slab_free(cache
, object
);
587 if (unlikely(!(cache
->flags
& SLAB_KASAN
)))
590 set_track(&get_alloc_info(cache
, object
)->free_track
, GFP_NOWAIT
);
591 quarantine_put(get_free_info(cache
, object
), cache
);
595 void kasan_kmalloc(struct kmem_cache
*cache
, const void *object
, size_t size
,
598 unsigned long redzone_start
;
599 unsigned long redzone_end
;
601 if (gfpflags_allow_blocking(flags
))
604 if (unlikely(object
== NULL
))
607 redzone_start
= round_up((unsigned long)(object
+ size
),
608 KASAN_SHADOW_SCALE_SIZE
);
609 redzone_end
= round_up((unsigned long)object
+ cache
->object_size
,
610 KASAN_SHADOW_SCALE_SIZE
);
612 kasan_unpoison_shadow(object
, size
);
613 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
614 KASAN_KMALLOC_REDZONE
);
616 if (cache
->flags
& SLAB_KASAN
)
617 set_track(&get_alloc_info(cache
, object
)->alloc_track
, flags
);
619 EXPORT_SYMBOL(kasan_kmalloc
);
621 void kasan_kmalloc_large(const void *ptr
, size_t size
, gfp_t flags
)
624 unsigned long redzone_start
;
625 unsigned long redzone_end
;
627 if (gfpflags_allow_blocking(flags
))
630 if (unlikely(ptr
== NULL
))
633 page
= virt_to_page(ptr
);
634 redzone_start
= round_up((unsigned long)(ptr
+ size
),
635 KASAN_SHADOW_SCALE_SIZE
);
636 redzone_end
= (unsigned long)ptr
+ (PAGE_SIZE
<< compound_order(page
));
638 kasan_unpoison_shadow(ptr
, size
);
639 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
643 void kasan_krealloc(const void *object
, size_t size
, gfp_t flags
)
647 if (unlikely(object
== ZERO_SIZE_PTR
))
650 page
= virt_to_head_page(object
);
652 if (unlikely(!PageSlab(page
)))
653 kasan_kmalloc_large(object
, size
, flags
);
655 kasan_kmalloc(page
->slab_cache
, object
, size
, flags
);
658 void kasan_poison_kfree(void *ptr
)
662 page
= virt_to_head_page(ptr
);
664 if (unlikely(!PageSlab(page
)))
665 kasan_poison_shadow(ptr
, PAGE_SIZE
<< compound_order(page
),
668 kasan_poison_slab_free(page
->slab_cache
, ptr
);
671 void kasan_kfree_large(const void *ptr
)
673 struct page
*page
= virt_to_page(ptr
);
675 kasan_poison_shadow(ptr
, PAGE_SIZE
<< compound_order(page
),
679 int kasan_module_alloc(void *addr
, size_t size
)
683 unsigned long shadow_start
;
685 shadow_start
= (unsigned long)kasan_mem_to_shadow(addr
);
686 shadow_size
= round_up(size
>> KASAN_SHADOW_SCALE_SHIFT
,
689 if (WARN_ON(!PAGE_ALIGNED(shadow_start
)))
692 ret
= __vmalloc_node_range(shadow_size
, 1, shadow_start
,
693 shadow_start
+ shadow_size
,
694 GFP_KERNEL
| __GFP_ZERO
,
695 PAGE_KERNEL
, VM_NO_GUARD
, NUMA_NO_NODE
,
696 __builtin_return_address(0));
699 find_vm_area(addr
)->flags
|= VM_KASAN
;
700 kmemleak_ignore(ret
);
707 void kasan_free_shadow(const struct vm_struct
*vm
)
709 if (vm
->flags
& VM_KASAN
)
710 vfree(kasan_mem_to_shadow(vm
->addr
));
713 static void register_global(struct kasan_global
*global
)
715 size_t aligned_size
= round_up(global
->size
, KASAN_SHADOW_SCALE_SIZE
);
717 kasan_unpoison_shadow(global
->beg
, global
->size
);
719 kasan_poison_shadow(global
->beg
+ aligned_size
,
720 global
->size_with_redzone
- aligned_size
,
721 KASAN_GLOBAL_REDZONE
);
724 void __asan_register_globals(struct kasan_global
*globals
, size_t size
)
728 for (i
= 0; i
< size
; i
++)
729 register_global(&globals
[i
]);
731 EXPORT_SYMBOL(__asan_register_globals
);
733 void __asan_unregister_globals(struct kasan_global
*globals
, size_t size
)
736 EXPORT_SYMBOL(__asan_unregister_globals
);
738 #define DEFINE_ASAN_LOAD_STORE(size) \
739 void __asan_load##size(unsigned long addr) \
741 check_memory_region_inline(addr, size, false, _RET_IP_);\
743 EXPORT_SYMBOL(__asan_load##size); \
744 __alias(__asan_load##size) \
745 void __asan_load##size##_noabort(unsigned long); \
746 EXPORT_SYMBOL(__asan_load##size##_noabort); \
747 void __asan_store##size(unsigned long addr) \
749 check_memory_region_inline(addr, size, true, _RET_IP_); \
751 EXPORT_SYMBOL(__asan_store##size); \
752 __alias(__asan_store##size) \
753 void __asan_store##size##_noabort(unsigned long); \
754 EXPORT_SYMBOL(__asan_store##size##_noabort)
756 DEFINE_ASAN_LOAD_STORE(1);
757 DEFINE_ASAN_LOAD_STORE(2);
758 DEFINE_ASAN_LOAD_STORE(4);
759 DEFINE_ASAN_LOAD_STORE(8);
760 DEFINE_ASAN_LOAD_STORE(16);
762 void __asan_loadN(unsigned long addr
, size_t size
)
764 check_memory_region(addr
, size
, false, _RET_IP_
);
766 EXPORT_SYMBOL(__asan_loadN
);
768 __alias(__asan_loadN
)
769 void __asan_loadN_noabort(unsigned long, size_t);
770 EXPORT_SYMBOL(__asan_loadN_noabort
);
772 void __asan_storeN(unsigned long addr
, size_t size
)
774 check_memory_region(addr
, size
, true, _RET_IP_
);
776 EXPORT_SYMBOL(__asan_storeN
);
778 __alias(__asan_storeN
)
779 void __asan_storeN_noabort(unsigned long, size_t);
780 EXPORT_SYMBOL(__asan_storeN_noabort
);
782 /* to shut up compiler complaints */
783 void __asan_handle_no_return(void) {}
784 EXPORT_SYMBOL(__asan_handle_no_return
);
786 /* Emitted by compiler to poison large objects when they go out of scope. */
787 void __asan_poison_stack_memory(const void *addr
, size_t size
)
790 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
791 * by redzones, so we simply round up size to simplify logic.
793 kasan_poison_shadow(addr
, round_up(size
, KASAN_SHADOW_SCALE_SIZE
),
794 KASAN_USE_AFTER_SCOPE
);
796 EXPORT_SYMBOL(__asan_poison_stack_memory
);
798 /* Emitted by compiler to unpoison large objects when they go into scope. */
799 void __asan_unpoison_stack_memory(const void *addr
, size_t size
)
801 kasan_unpoison_shadow(addr
, size
);
803 EXPORT_SYMBOL(__asan_unpoison_stack_memory
);
805 #ifdef CONFIG_MEMORY_HOTPLUG
806 static int kasan_mem_notifier(struct notifier_block
*nb
,
807 unsigned long action
, void *data
)
809 return (action
== MEM_GOING_ONLINE
) ? NOTIFY_BAD
: NOTIFY_OK
;
812 static int __init
kasan_memhotplug_init(void)
814 pr_info("WARNING: KASAN doesn't support memory hot-add\n");
815 pr_info("Memory hot-add will be disabled\n");
817 hotplug_memory_notifier(kasan_mem_notifier
, 0);
822 module_init(kasan_memhotplug_init
);