]>
Commit | Line | Data |
---|---|---|
0b24becc AR |
1 | /* |
2 | * This file contains shadow memory manipulation code. | |
3 | * | |
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
2baf9e89 | 5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
0b24becc | 6 | * |
5d0926ef | 7 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
0b24becc AR |
8 | * Andrey Konovalov <adech.fo@gmail.com> |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | */ | |
15 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
17 | #define DISABLE_BRANCH_PROFILING | |
18 | ||
19 | #include <linux/export.h> | |
cd11016e | 20 | #include <linux/interrupt.h> |
0b24becc | 21 | #include <linux/init.h> |
cd11016e | 22 | #include <linux/kasan.h> |
0b24becc | 23 | #include <linux/kernel.h> |
45937254 | 24 | #include <linux/kmemleak.h> |
e3ae1163 | 25 | #include <linux/linkage.h> |
0b24becc | 26 | #include <linux/memblock.h> |
786a8959 | 27 | #include <linux/memory.h> |
0b24becc | 28 | #include <linux/mm.h> |
bebf56a1 | 29 | #include <linux/module.h> |
0b24becc AR |
30 | #include <linux/printk.h> |
31 | #include <linux/sched.h> | |
68db0cf1 | 32 | #include <linux/sched/task_stack.h> |
0b24becc AR |
33 | #include <linux/slab.h> |
34 | #include <linux/stacktrace.h> | |
35 | #include <linux/string.h> | |
36 | #include <linux/types.h> | |
a5af5aa8 | 37 | #include <linux/vmalloc.h> |
9f7d416c | 38 | #include <linux/bug.h> |
0b24becc AR |
39 | |
40 | #include "kasan.h" | |
0316bec2 | 41 | #include "../slab.h" |
0b24becc | 42 | |
af8601ad IM |
43 | void kasan_enable_current(void) |
44 | { | |
45 | current->kasan_depth++; | |
46 | } | |
47 | ||
48 | void kasan_disable_current(void) | |
49 | { | |
50 | current->kasan_depth--; | |
51 | } | |
52 | ||
0b24becc AR |
53 | /* |
54 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. | |
55 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. | |
56 | */ | |
57 | static void kasan_poison_shadow(const void *address, size_t size, u8 value) | |
58 | { | |
59 | void *shadow_start, *shadow_end; | |
60 | ||
61 | shadow_start = kasan_mem_to_shadow(address); | |
62 | shadow_end = kasan_mem_to_shadow(address + size); | |
63 | ||
64 | memset(shadow_start, value, shadow_end - shadow_start); | |
65 | } | |
66 | ||
67 | void kasan_unpoison_shadow(const void *address, size_t size) | |
68 | { | |
69 | kasan_poison_shadow(address, size, 0); | |
70 | ||
71 | if (size & KASAN_SHADOW_MASK) { | |
72 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); | |
73 | *shadow = size & KASAN_SHADOW_MASK; | |
74 | } | |
75 | } | |
76 | ||
9f7d416c | 77 | static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) |
e3ae1163 MR |
78 | { |
79 | void *base = task_stack_page(task); | |
80 | size_t size = sp - base; | |
81 | ||
82 | kasan_unpoison_shadow(base, size); | |
83 | } | |
84 | ||
85 | /* Unpoison the entire stack for a task. */ | |
86 | void kasan_unpoison_task_stack(struct task_struct *task) | |
87 | { | |
88 | __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); | |
89 | } | |
90 | ||
91 | /* Unpoison the stack for the current task beyond a watermark sp value. */ | |
9f7d416c | 92 | asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) |
e3ae1163 | 93 | { |
b53f40db JP |
94 | /* |
95 | * Calculate the task stack base address. Avoid using 'current' | |
96 | * because this function is called by early resume code which hasn't | |
97 | * yet set up the percpu register (%gs). | |
98 | */ | |
99 | void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); | |
100 | ||
101 | kasan_unpoison_shadow(base, watermark - base); | |
9f7d416c DV |
102 | } |
103 | ||
104 | /* | |
105 | * Clear all poison for the region between the current SP and a provided | |
106 | * watermark value, as is sometimes required prior to hand-crafted asm function | |
107 | * returns in the middle of functions. | |
108 | */ | |
109 | void kasan_unpoison_stack_above_sp_to(const void *watermark) | |
110 | { | |
111 | const void *sp = __builtin_frame_address(0); | |
112 | size_t size = watermark - sp; | |
113 | ||
114 | if (WARN_ON(sp > watermark)) | |
115 | return; | |
116 | kasan_unpoison_shadow(sp, size); | |
e3ae1163 | 117 | } |
0b24becc AR |
118 | |
119 | /* | |
120 | * All functions below always inlined so compiler could | |
121 | * perform better optimizations in each of __asan_loadX/__assn_storeX | |
122 | * depending on memory access size X. | |
123 | */ | |
124 | ||
125 | static __always_inline bool memory_is_poisoned_1(unsigned long addr) | |
126 | { | |
127 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); | |
128 | ||
129 | if (unlikely(shadow_value)) { | |
130 | s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; | |
131 | return unlikely(last_accessible_byte >= shadow_value); | |
132 | } | |
133 | ||
134 | return false; | |
135 | } | |
136 | ||
c634d807 AR |
137 | static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr, |
138 | unsigned long size) | |
0b24becc | 139 | { |
c634d807 | 140 | u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); |
0b24becc | 141 | |
c634d807 AR |
142 | /* |
143 | * Access crosses 8(shadow size)-byte boundary. Such access maps | |
144 | * into 2 shadow bytes, so we need to check them both. | |
145 | */ | |
146 | if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) | |
147 | return *shadow_addr || memory_is_poisoned_1(addr + size - 1); | |
0b24becc | 148 | |
c634d807 | 149 | return memory_is_poisoned_1(addr + size - 1); |
0b24becc AR |
150 | } |
151 | ||
152 | static __always_inline bool memory_is_poisoned_16(unsigned long addr) | |
153 | { | |
c634d807 | 154 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
0b24becc | 155 | |
c634d807 AR |
156 | /* Unaligned 16-bytes access maps into 3 shadow bytes. */ |
157 | if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) | |
158 | return *shadow_addr || memory_is_poisoned_1(addr + 15); | |
0b24becc | 159 | |
c634d807 | 160 | return *shadow_addr; |
0b24becc AR |
161 | } |
162 | ||
f5bd62cd | 163 | static __always_inline unsigned long bytes_is_nonzero(const u8 *start, |
0b24becc AR |
164 | size_t size) |
165 | { | |
166 | while (size) { | |
167 | if (unlikely(*start)) | |
168 | return (unsigned long)start; | |
169 | start++; | |
170 | size--; | |
171 | } | |
172 | ||
173 | return 0; | |
174 | } | |
175 | ||
f5bd62cd | 176 | static __always_inline unsigned long memory_is_nonzero(const void *start, |
0b24becc AR |
177 | const void *end) |
178 | { | |
179 | unsigned int words; | |
180 | unsigned long ret; | |
181 | unsigned int prefix = (unsigned long)start % 8; | |
182 | ||
183 | if (end - start <= 16) | |
f5bd62cd | 184 | return bytes_is_nonzero(start, end - start); |
0b24becc AR |
185 | |
186 | if (prefix) { | |
187 | prefix = 8 - prefix; | |
f5bd62cd | 188 | ret = bytes_is_nonzero(start, prefix); |
0b24becc AR |
189 | if (unlikely(ret)) |
190 | return ret; | |
191 | start += prefix; | |
192 | } | |
193 | ||
194 | words = (end - start) / 8; | |
195 | while (words) { | |
196 | if (unlikely(*(u64 *)start)) | |
f5bd62cd | 197 | return bytes_is_nonzero(start, 8); |
0b24becc AR |
198 | start += 8; |
199 | words--; | |
200 | } | |
201 | ||
f5bd62cd | 202 | return bytes_is_nonzero(start, (end - start) % 8); |
0b24becc AR |
203 | } |
204 | ||
205 | static __always_inline bool memory_is_poisoned_n(unsigned long addr, | |
206 | size_t size) | |
207 | { | |
208 | unsigned long ret; | |
209 | ||
f5bd62cd | 210 | ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), |
0b24becc AR |
211 | kasan_mem_to_shadow((void *)addr + size - 1) + 1); |
212 | ||
213 | if (unlikely(ret)) { | |
214 | unsigned long last_byte = addr + size - 1; | |
215 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); | |
216 | ||
217 | if (unlikely(ret != (unsigned long)last_shadow || | |
e0d57714 | 218 | ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) |
0b24becc AR |
219 | return true; |
220 | } | |
221 | return false; | |
222 | } | |
223 | ||
224 | static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) | |
225 | { | |
226 | if (__builtin_constant_p(size)) { | |
227 | switch (size) { | |
228 | case 1: | |
229 | return memory_is_poisoned_1(addr); | |
230 | case 2: | |
0b24becc | 231 | case 4: |
0b24becc | 232 | case 8: |
c634d807 | 233 | return memory_is_poisoned_2_4_8(addr, size); |
0b24becc AR |
234 | case 16: |
235 | return memory_is_poisoned_16(addr); | |
236 | default: | |
237 | BUILD_BUG(); | |
238 | } | |
239 | } | |
240 | ||
241 | return memory_is_poisoned_n(addr, size); | |
242 | } | |
243 | ||
936bb4bb AR |
244 | static __always_inline void check_memory_region_inline(unsigned long addr, |
245 | size_t size, bool write, | |
246 | unsigned long ret_ip) | |
0b24becc | 247 | { |
0b24becc AR |
248 | if (unlikely(size == 0)) |
249 | return; | |
250 | ||
251 | if (unlikely((void *)addr < | |
252 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { | |
936bb4bb | 253 | kasan_report(addr, size, write, ret_ip); |
0b24becc AR |
254 | return; |
255 | } | |
256 | ||
257 | if (likely(!memory_is_poisoned(addr, size))) | |
258 | return; | |
259 | ||
936bb4bb | 260 | kasan_report(addr, size, write, ret_ip); |
0b24becc AR |
261 | } |
262 | ||
936bb4bb AR |
263 | static void check_memory_region(unsigned long addr, |
264 | size_t size, bool write, | |
265 | unsigned long ret_ip) | |
266 | { | |
267 | check_memory_region_inline(addr, size, write, ret_ip); | |
268 | } | |
393f203f | 269 | |
64f8ebaf AR |
270 | void kasan_check_read(const void *p, unsigned int size) |
271 | { | |
272 | check_memory_region((unsigned long)p, size, false, _RET_IP_); | |
273 | } | |
274 | EXPORT_SYMBOL(kasan_check_read); | |
275 | ||
276 | void kasan_check_write(const void *p, unsigned int size) | |
277 | { | |
278 | check_memory_region((unsigned long)p, size, true, _RET_IP_); | |
279 | } | |
280 | EXPORT_SYMBOL(kasan_check_write); | |
281 | ||
393f203f AR |
282 | #undef memset |
283 | void *memset(void *addr, int c, size_t len) | |
284 | { | |
936bb4bb | 285 | check_memory_region((unsigned long)addr, len, true, _RET_IP_); |
393f203f AR |
286 | |
287 | return __memset(addr, c, len); | |
288 | } | |
289 | ||
290 | #undef memmove | |
291 | void *memmove(void *dest, const void *src, size_t len) | |
292 | { | |
936bb4bb AR |
293 | check_memory_region((unsigned long)src, len, false, _RET_IP_); |
294 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | |
393f203f AR |
295 | |
296 | return __memmove(dest, src, len); | |
297 | } | |
298 | ||
299 | #undef memcpy | |
300 | void *memcpy(void *dest, const void *src, size_t len) | |
301 | { | |
936bb4bb AR |
302 | check_memory_region((unsigned long)src, len, false, _RET_IP_); |
303 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | |
393f203f AR |
304 | |
305 | return __memcpy(dest, src, len); | |
306 | } | |
307 | ||
b8c73fc2 AR |
308 | void kasan_alloc_pages(struct page *page, unsigned int order) |
309 | { | |
310 | if (likely(!PageHighMem(page))) | |
311 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); | |
312 | } | |
313 | ||
314 | void kasan_free_pages(struct page *page, unsigned int order) | |
315 | { | |
316 | if (likely(!PageHighMem(page))) | |
317 | kasan_poison_shadow(page_address(page), | |
318 | PAGE_SIZE << order, | |
319 | KASAN_FREE_PAGE); | |
320 | } | |
321 | ||
7ed2f9e6 AP |
322 | /* |
323 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. | |
324 | * For larger allocations larger redzones are used. | |
325 | */ | |
326 | static size_t optimal_redzone(size_t object_size) | |
327 | { | |
328 | int rz = | |
329 | object_size <= 64 - 16 ? 16 : | |
330 | object_size <= 128 - 32 ? 32 : | |
331 | object_size <= 512 - 64 ? 64 : | |
332 | object_size <= 4096 - 128 ? 128 : | |
333 | object_size <= (1 << 14) - 256 ? 256 : | |
334 | object_size <= (1 << 15) - 512 ? 512 : | |
335 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; | |
336 | return rz; | |
337 | } | |
338 | ||
339 | void kasan_cache_create(struct kmem_cache *cache, size_t *size, | |
340 | unsigned long *flags) | |
341 | { | |
342 | int redzone_adjust; | |
80a9201a AP |
343 | int orig_size = *size; |
344 | ||
7ed2f9e6 AP |
345 | /* Add alloc meta. */ |
346 | cache->kasan_info.alloc_meta_offset = *size; | |
347 | *size += sizeof(struct kasan_alloc_meta); | |
348 | ||
349 | /* Add free meta. */ | |
5f0d5a3a | 350 | if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || |
7ed2f9e6 AP |
351 | cache->object_size < sizeof(struct kasan_free_meta)) { |
352 | cache->kasan_info.free_meta_offset = *size; | |
353 | *size += sizeof(struct kasan_free_meta); | |
354 | } | |
355 | redzone_adjust = optimal_redzone(cache->object_size) - | |
356 | (*size - cache->object_size); | |
80a9201a | 357 | |
7ed2f9e6 AP |
358 | if (redzone_adjust > 0) |
359 | *size += redzone_adjust; | |
80a9201a AP |
360 | |
361 | *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size + | |
362 | optimal_redzone(cache->object_size))); | |
363 | ||
364 | /* | |
365 | * If the metadata doesn't fit, don't enable KASAN at all. | |
366 | */ | |
367 | if (*size <= cache->kasan_info.alloc_meta_offset || | |
368 | *size <= cache->kasan_info.free_meta_offset) { | |
369 | cache->kasan_info.alloc_meta_offset = 0; | |
370 | cache->kasan_info.free_meta_offset = 0; | |
371 | *size = orig_size; | |
372 | return; | |
373 | } | |
374 | ||
375 | *flags |= SLAB_KASAN; | |
7ed2f9e6 | 376 | } |
7ed2f9e6 | 377 | |
55834c59 AP |
378 | void kasan_cache_shrink(struct kmem_cache *cache) |
379 | { | |
380 | quarantine_remove_cache(cache); | |
381 | } | |
382 | ||
f9fa1d91 | 383 | void kasan_cache_shutdown(struct kmem_cache *cache) |
55834c59 AP |
384 | { |
385 | quarantine_remove_cache(cache); | |
386 | } | |
387 | ||
80a9201a AP |
388 | size_t kasan_metadata_size(struct kmem_cache *cache) |
389 | { | |
390 | return (cache->kasan_info.alloc_meta_offset ? | |
391 | sizeof(struct kasan_alloc_meta) : 0) + | |
392 | (cache->kasan_info.free_meta_offset ? | |
393 | sizeof(struct kasan_free_meta) : 0); | |
394 | } | |
395 | ||
0316bec2 AR |
396 | void kasan_poison_slab(struct page *page) |
397 | { | |
398 | kasan_poison_shadow(page_address(page), | |
399 | PAGE_SIZE << compound_order(page), | |
400 | KASAN_KMALLOC_REDZONE); | |
401 | } | |
402 | ||
403 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) | |
404 | { | |
405 | kasan_unpoison_shadow(object, cache->object_size); | |
406 | } | |
407 | ||
408 | void kasan_poison_object_data(struct kmem_cache *cache, void *object) | |
409 | { | |
410 | kasan_poison_shadow(object, | |
411 | round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), | |
412 | KASAN_KMALLOC_REDZONE); | |
413 | } | |
414 | ||
cd11016e AP |
415 | static inline int in_irqentry_text(unsigned long ptr) |
416 | { | |
417 | return (ptr >= (unsigned long)&__irqentry_text_start && | |
418 | ptr < (unsigned long)&__irqentry_text_end) || | |
419 | (ptr >= (unsigned long)&__softirqentry_text_start && | |
420 | ptr < (unsigned long)&__softirqentry_text_end); | |
421 | } | |
422 | ||
423 | static inline void filter_irq_stacks(struct stack_trace *trace) | |
424 | { | |
425 | int i; | |
426 | ||
427 | if (!trace->nr_entries) | |
428 | return; | |
429 | for (i = 0; i < trace->nr_entries; i++) | |
430 | if (in_irqentry_text(trace->entries[i])) { | |
431 | /* Include the irqentry function into the stack. */ | |
432 | trace->nr_entries = i + 1; | |
433 | break; | |
434 | } | |
435 | } | |
436 | ||
437 | static inline depot_stack_handle_t save_stack(gfp_t flags) | |
438 | { | |
439 | unsigned long entries[KASAN_STACK_DEPTH]; | |
440 | struct stack_trace trace = { | |
441 | .nr_entries = 0, | |
442 | .entries = entries, | |
443 | .max_entries = KASAN_STACK_DEPTH, | |
444 | .skip = 0 | |
445 | }; | |
446 | ||
447 | save_stack_trace(&trace); | |
448 | filter_irq_stacks(&trace); | |
449 | if (trace.nr_entries != 0 && | |
450 | trace.entries[trace.nr_entries-1] == ULONG_MAX) | |
451 | trace.nr_entries--; | |
452 | ||
453 | return depot_save_stack(&trace, flags); | |
454 | } | |
455 | ||
456 | static inline void set_track(struct kasan_track *track, gfp_t flags) | |
7ed2f9e6 | 457 | { |
7ed2f9e6 | 458 | track->pid = current->pid; |
cd11016e | 459 | track->stack = save_stack(flags); |
7ed2f9e6 AP |
460 | } |
461 | ||
7ed2f9e6 AP |
462 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, |
463 | const void *object) | |
464 | { | |
cd11016e | 465 | BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); |
7ed2f9e6 AP |
466 | return (void *)object + cache->kasan_info.alloc_meta_offset; |
467 | } | |
468 | ||
469 | struct kasan_free_meta *get_free_info(struct kmem_cache *cache, | |
470 | const void *object) | |
471 | { | |
cd11016e | 472 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
7ed2f9e6 AP |
473 | return (void *)object + cache->kasan_info.free_meta_offset; |
474 | } | |
7ed2f9e6 | 475 | |
b3cbd9bf AR |
476 | void kasan_init_slab_obj(struct kmem_cache *cache, const void *object) |
477 | { | |
478 | struct kasan_alloc_meta *alloc_info; | |
479 | ||
480 | if (!(cache->flags & SLAB_KASAN)) | |
481 | return; | |
482 | ||
483 | alloc_info = get_alloc_info(cache, object); | |
484 | __memset(alloc_info, 0, sizeof(*alloc_info)); | |
485 | } | |
486 | ||
505f5dcb | 487 | void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) |
0316bec2 | 488 | { |
505f5dcb | 489 | kasan_kmalloc(cache, object, cache->object_size, flags); |
0316bec2 AR |
490 | } |
491 | ||
9b75a867 | 492 | static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) |
0316bec2 AR |
493 | { |
494 | unsigned long size = cache->object_size; | |
495 | unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); | |
496 | ||
497 | /* RCU slabs could be legally used after free within the RCU period */ | |
5f0d5a3a | 498 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
0316bec2 AR |
499 | return; |
500 | ||
55834c59 AP |
501 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); |
502 | } | |
503 | ||
504 | bool kasan_slab_free(struct kmem_cache *cache, void *object) | |
505 | { | |
b3cbd9bf AR |
506 | s8 shadow_byte; |
507 | ||
55834c59 | 508 | /* RCU slabs could be legally used after free within the RCU period */ |
5f0d5a3a | 509 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
55834c59 AP |
510 | return false; |
511 | ||
b3cbd9bf AR |
512 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); |
513 | if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { | |
5ab6d91a AK |
514 | kasan_report_double_free(cache, object, |
515 | __builtin_return_address(1)); | |
b3cbd9bf AR |
516 | return true; |
517 | } | |
80a9201a | 518 | |
b3cbd9bf | 519 | kasan_poison_slab_free(cache, object); |
55834c59 | 520 | |
b3cbd9bf AR |
521 | if (unlikely(!(cache->flags & SLAB_KASAN))) |
522 | return false; | |
523 | ||
524 | set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); | |
525 | quarantine_put(get_free_info(cache, object), cache); | |
526 | return true; | |
0316bec2 AR |
527 | } |
528 | ||
505f5dcb AP |
529 | void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, |
530 | gfp_t flags) | |
0316bec2 AR |
531 | { |
532 | unsigned long redzone_start; | |
533 | unsigned long redzone_end; | |
534 | ||
4b3ec5a3 | 535 | if (gfpflags_allow_blocking(flags)) |
55834c59 AP |
536 | quarantine_reduce(); |
537 | ||
0316bec2 AR |
538 | if (unlikely(object == NULL)) |
539 | return; | |
540 | ||
541 | redzone_start = round_up((unsigned long)(object + size), | |
542 | KASAN_SHADOW_SCALE_SIZE); | |
543 | redzone_end = round_up((unsigned long)object + cache->object_size, | |
544 | KASAN_SHADOW_SCALE_SIZE); | |
545 | ||
546 | kasan_unpoison_shadow(object, size); | |
547 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | |
548 | KASAN_KMALLOC_REDZONE); | |
7ed2f9e6 | 549 | |
b3cbd9bf AR |
550 | if (cache->flags & SLAB_KASAN) |
551 | set_track(&get_alloc_info(cache, object)->alloc_track, flags); | |
0316bec2 AR |
552 | } |
553 | EXPORT_SYMBOL(kasan_kmalloc); | |
554 | ||
505f5dcb | 555 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
0316bec2 AR |
556 | { |
557 | struct page *page; | |
558 | unsigned long redzone_start; | |
559 | unsigned long redzone_end; | |
560 | ||
4b3ec5a3 | 561 | if (gfpflags_allow_blocking(flags)) |
55834c59 AP |
562 | quarantine_reduce(); |
563 | ||
0316bec2 AR |
564 | if (unlikely(ptr == NULL)) |
565 | return; | |
566 | ||
567 | page = virt_to_page(ptr); | |
568 | redzone_start = round_up((unsigned long)(ptr + size), | |
569 | KASAN_SHADOW_SCALE_SIZE); | |
570 | redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); | |
571 | ||
572 | kasan_unpoison_shadow(ptr, size); | |
573 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | |
574 | KASAN_PAGE_REDZONE); | |
575 | } | |
576 | ||
505f5dcb | 577 | void kasan_krealloc(const void *object, size_t size, gfp_t flags) |
0316bec2 AR |
578 | { |
579 | struct page *page; | |
580 | ||
581 | if (unlikely(object == ZERO_SIZE_PTR)) | |
582 | return; | |
583 | ||
584 | page = virt_to_head_page(object); | |
585 | ||
586 | if (unlikely(!PageSlab(page))) | |
505f5dcb | 587 | kasan_kmalloc_large(object, size, flags); |
0316bec2 | 588 | else |
505f5dcb | 589 | kasan_kmalloc(page->slab_cache, object, size, flags); |
0316bec2 AR |
590 | } |
591 | ||
9b75a867 | 592 | void kasan_poison_kfree(void *ptr) |
92393615 AR |
593 | { |
594 | struct page *page; | |
595 | ||
596 | page = virt_to_head_page(ptr); | |
597 | ||
598 | if (unlikely(!PageSlab(page))) | |
599 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | |
600 | KASAN_FREE_PAGE); | |
601 | else | |
9b75a867 | 602 | kasan_poison_slab_free(page->slab_cache, ptr); |
92393615 AR |
603 | } |
604 | ||
0316bec2 AR |
605 | void kasan_kfree_large(const void *ptr) |
606 | { | |
607 | struct page *page = virt_to_page(ptr); | |
608 | ||
609 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | |
610 | KASAN_FREE_PAGE); | |
611 | } | |
612 | ||
bebf56a1 AR |
613 | int kasan_module_alloc(void *addr, size_t size) |
614 | { | |
615 | void *ret; | |
616 | size_t shadow_size; | |
617 | unsigned long shadow_start; | |
618 | ||
619 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); | |
620 | shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, | |
621 | PAGE_SIZE); | |
622 | ||
623 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) | |
624 | return -EINVAL; | |
625 | ||
626 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, | |
627 | shadow_start + shadow_size, | |
19809c2d | 628 | GFP_KERNEL | __GFP_ZERO, |
bebf56a1 AR |
629 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
630 | __builtin_return_address(0)); | |
a5af5aa8 AR |
631 | |
632 | if (ret) { | |
633 | find_vm_area(addr)->flags |= VM_KASAN; | |
45937254 | 634 | kmemleak_ignore(ret); |
a5af5aa8 AR |
635 | return 0; |
636 | } | |
637 | ||
638 | return -ENOMEM; | |
bebf56a1 AR |
639 | } |
640 | ||
a5af5aa8 | 641 | void kasan_free_shadow(const struct vm_struct *vm) |
bebf56a1 | 642 | { |
a5af5aa8 AR |
643 | if (vm->flags & VM_KASAN) |
644 | vfree(kasan_mem_to_shadow(vm->addr)); | |
bebf56a1 AR |
645 | } |
646 | ||
647 | static void register_global(struct kasan_global *global) | |
648 | { | |
649 | size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); | |
650 | ||
651 | kasan_unpoison_shadow(global->beg, global->size); | |
652 | ||
653 | kasan_poison_shadow(global->beg + aligned_size, | |
654 | global->size_with_redzone - aligned_size, | |
655 | KASAN_GLOBAL_REDZONE); | |
656 | } | |
657 | ||
658 | void __asan_register_globals(struct kasan_global *globals, size_t size) | |
659 | { | |
660 | int i; | |
661 | ||
662 | for (i = 0; i < size; i++) | |
663 | register_global(&globals[i]); | |
664 | } | |
665 | EXPORT_SYMBOL(__asan_register_globals); | |
666 | ||
667 | void __asan_unregister_globals(struct kasan_global *globals, size_t size) | |
668 | { | |
669 | } | |
670 | EXPORT_SYMBOL(__asan_unregister_globals); | |
671 | ||
936bb4bb AR |
672 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
673 | void __asan_load##size(unsigned long addr) \ | |
674 | { \ | |
675 | check_memory_region_inline(addr, size, false, _RET_IP_);\ | |
676 | } \ | |
677 | EXPORT_SYMBOL(__asan_load##size); \ | |
678 | __alias(__asan_load##size) \ | |
679 | void __asan_load##size##_noabort(unsigned long); \ | |
680 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ | |
681 | void __asan_store##size(unsigned long addr) \ | |
682 | { \ | |
683 | check_memory_region_inline(addr, size, true, _RET_IP_); \ | |
684 | } \ | |
685 | EXPORT_SYMBOL(__asan_store##size); \ | |
686 | __alias(__asan_store##size) \ | |
687 | void __asan_store##size##_noabort(unsigned long); \ | |
0b24becc AR |
688 | EXPORT_SYMBOL(__asan_store##size##_noabort) |
689 | ||
690 | DEFINE_ASAN_LOAD_STORE(1); | |
691 | DEFINE_ASAN_LOAD_STORE(2); | |
692 | DEFINE_ASAN_LOAD_STORE(4); | |
693 | DEFINE_ASAN_LOAD_STORE(8); | |
694 | DEFINE_ASAN_LOAD_STORE(16); | |
695 | ||
696 | void __asan_loadN(unsigned long addr, size_t size) | |
697 | { | |
936bb4bb | 698 | check_memory_region(addr, size, false, _RET_IP_); |
0b24becc AR |
699 | } |
700 | EXPORT_SYMBOL(__asan_loadN); | |
701 | ||
702 | __alias(__asan_loadN) | |
703 | void __asan_loadN_noabort(unsigned long, size_t); | |
704 | EXPORT_SYMBOL(__asan_loadN_noabort); | |
705 | ||
706 | void __asan_storeN(unsigned long addr, size_t size) | |
707 | { | |
936bb4bb | 708 | check_memory_region(addr, size, true, _RET_IP_); |
0b24becc AR |
709 | } |
710 | EXPORT_SYMBOL(__asan_storeN); | |
711 | ||
712 | __alias(__asan_storeN) | |
713 | void __asan_storeN_noabort(unsigned long, size_t); | |
714 | EXPORT_SYMBOL(__asan_storeN_noabort); | |
715 | ||
716 | /* to shut up compiler complaints */ | |
717 | void __asan_handle_no_return(void) {} | |
718 | EXPORT_SYMBOL(__asan_handle_no_return); | |
786a8959 | 719 | |
828347f8 DV |
720 | /* Emitted by compiler to poison large objects when they go out of scope. */ |
721 | void __asan_poison_stack_memory(const void *addr, size_t size) | |
722 | { | |
723 | /* | |
724 | * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded | |
725 | * by redzones, so we simply round up size to simplify logic. | |
726 | */ | |
727 | kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE), | |
728 | KASAN_USE_AFTER_SCOPE); | |
729 | } | |
730 | EXPORT_SYMBOL(__asan_poison_stack_memory); | |
731 | ||
732 | /* Emitted by compiler to unpoison large objects when they go into scope. */ | |
733 | void __asan_unpoison_stack_memory(const void *addr, size_t size) | |
734 | { | |
735 | kasan_unpoison_shadow(addr, size); | |
736 | } | |
737 | EXPORT_SYMBOL(__asan_unpoison_stack_memory); | |
738 | ||
786a8959 | 739 | #ifdef CONFIG_MEMORY_HOTPLUG |
fa69b598 | 740 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, |
786a8959 AR |
741 | unsigned long action, void *data) |
742 | { | |
fa69b598 AR |
743 | struct memory_notify *mem_data = data; |
744 | unsigned long nr_shadow_pages, start_kaddr, shadow_start; | |
745 | unsigned long shadow_end, shadow_size; | |
746 | ||
747 | nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; | |
748 | start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); | |
749 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); | |
750 | shadow_size = nr_shadow_pages << PAGE_SHIFT; | |
751 | shadow_end = shadow_start + shadow_size; | |
752 | ||
753 | if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || | |
754 | WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) | |
755 | return NOTIFY_BAD; | |
756 | ||
757 | switch (action) { | |
758 | case MEM_GOING_ONLINE: { | |
759 | void *ret; | |
760 | ||
761 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, | |
762 | shadow_end, GFP_KERNEL, | |
763 | PAGE_KERNEL, VM_NO_GUARD, | |
764 | pfn_to_nid(mem_data->start_pfn), | |
765 | __builtin_return_address(0)); | |
766 | if (!ret) | |
767 | return NOTIFY_BAD; | |
768 | ||
769 | kmemleak_ignore(ret); | |
770 | return NOTIFY_OK; | |
771 | } | |
772 | case MEM_OFFLINE: | |
773 | vfree((void *)shadow_start); | |
774 | } | |
775 | ||
776 | return NOTIFY_OK; | |
786a8959 AR |
777 | } |
778 | ||
779 | static int __init kasan_memhotplug_init(void) | |
780 | { | |
786a8959 AR |
781 | hotplug_memory_notifier(kasan_mem_notifier, 0); |
782 | ||
783 | return 0; | |
784 | } | |
785 | ||
786 | module_init(kasan_memhotplug_init); | |
787 | #endif |