1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Google LLC.
8 #include <linux/stdarg.h>
10 #include <linux/kernel.h>
11 #include <linux/lockdep.h>
12 #include <linux/math.h>
13 #include <linux/printk.h>
14 #include <linux/sched/debug.h>
15 #include <linux/seq_file.h>
16 #include <linux/stacktrace.h>
17 #include <linux/string.h>
18 #include <trace/events/error_report.h>
20 #include <asm/kfence.h>
24 /* May be overridden by <asm/kfence.h>. */
25 #ifndef ARCH_FUNC_PREFIX
26 #define ARCH_FUNC_PREFIX ""
29 extern bool no_hash_pointers
;
31 /* Helper function to either print to a seq_file or to console. */
33 static void seq_con_printf(struct seq_file
*seq
, const char *fmt
, ...)
39 seq_vprintf(seq
, fmt
, args
);
46 * Get the number of stack entries to skip to get out of MM internals. @type is
47 * optional, and if set to NULL, assumes an allocation or free stack.
49 static int get_stack_skipnr(const unsigned long stack_entries
[], int num_entries
,
50 const enum kfence_error_type
*type
)
53 int skipnr
, fallback
= 0;
56 /* Depending on error type, find different stack entries. */
58 case KFENCE_ERROR_UAF
:
59 case KFENCE_ERROR_OOB
:
60 case KFENCE_ERROR_INVALID
:
62 * kfence_handle_page_fault() may be called with pt_regs
63 * set to NULL; in that case we'll simply show the full
67 case KFENCE_ERROR_CORRUPTION
:
68 case KFENCE_ERROR_INVALID_FREE
:
73 for (skipnr
= 0; skipnr
< num_entries
; skipnr
++) {
74 int len
= scnprintf(buf
, sizeof(buf
), "%ps", (void *)stack_entries
[skipnr
]);
76 if (str_has_prefix(buf
, ARCH_FUNC_PREFIX
"kfence_") ||
77 str_has_prefix(buf
, ARCH_FUNC_PREFIX
"__kfence_") ||
78 !strncmp(buf
, ARCH_FUNC_PREFIX
"__slab_free", len
)) {
80 * In case of tail calls from any of the below
81 * to any of the above.
83 fallback
= skipnr
+ 1;
86 /* Also the *_bulk() variants by only checking prefixes. */
87 if (str_has_prefix(buf
, ARCH_FUNC_PREFIX
"kfree") ||
88 str_has_prefix(buf
, ARCH_FUNC_PREFIX
"kmem_cache_free") ||
89 str_has_prefix(buf
, ARCH_FUNC_PREFIX
"__kmalloc") ||
90 str_has_prefix(buf
, ARCH_FUNC_PREFIX
"kmem_cache_alloc"))
93 if (fallback
< num_entries
)
97 return skipnr
< num_entries
? skipnr
: 0;
100 static void kfence_print_stack(struct seq_file
*seq
, const struct kfence_metadata
*meta
,
103 const struct kfence_track
*track
= show_alloc
? &meta
->alloc_track
: &meta
->free_track
;
104 u64 ts_sec
= track
->ts_nsec
;
105 unsigned long rem_nsec
= do_div(ts_sec
, NSEC_PER_SEC
);
107 /* Timestamp matches printk timestamp format. */
108 seq_con_printf(seq
, "%s by task %d on cpu %d at %lu.%06lus:\n",
109 show_alloc
? "allocated" : "freed", track
->pid
,
110 track
->cpu
, (unsigned long)ts_sec
, rem_nsec
/ 1000);
112 if (track
->num_stack_entries
) {
113 /* Skip allocation/free internals stack. */
114 int i
= get_stack_skipnr(track
->stack_entries
, track
->num_stack_entries
, NULL
);
116 /* stack_trace_seq_print() does not exist; open code our own. */
117 for (; i
< track
->num_stack_entries
; i
++)
118 seq_con_printf(seq
, " %pS\n", (void *)track
->stack_entries
[i
]);
120 seq_con_printf(seq
, " no %s stack\n", show_alloc
? "allocation" : "deallocation");
124 void kfence_print_object(struct seq_file
*seq
, const struct kfence_metadata
*meta
)
126 const int size
= abs(meta
->size
);
127 const unsigned long start
= meta
->addr
;
128 const struct kmem_cache
*const cache
= meta
->cache
;
130 lockdep_assert_held(&meta
->lock
);
132 if (meta
->state
== KFENCE_OBJECT_UNUSED
) {
133 seq_con_printf(seq
, "kfence-#%td unused\n", meta
- kfence_metadata
);
137 seq_con_printf(seq
, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n",
138 meta
- kfence_metadata
, (void *)start
, (void *)(start
+ size
- 1),
139 size
, (cache
&& cache
->name
) ? cache
->name
: "<destroyed>");
141 kfence_print_stack(seq
, meta
, true);
143 if (meta
->state
== KFENCE_OBJECT_FREED
) {
144 seq_con_printf(seq
, "\n");
145 kfence_print_stack(seq
, meta
, false);
150 * Show bytes at @addr that are different from the expected canary values, up to
153 static void print_diff_canary(unsigned long address
, size_t bytes_to_show
,
154 const struct kfence_metadata
*meta
)
156 const unsigned long show_until_addr
= address
+ bytes_to_show
;
159 /* Do not show contents of object nor read into following guard page. */
160 end
= (const u8
*)(address
< meta
->addr
? min(show_until_addr
, meta
->addr
)
161 : min(show_until_addr
, PAGE_ALIGN(address
)));
164 for (cur
= (const u8
*)address
; cur
< end
; cur
++) {
165 if (*cur
== KFENCE_CANARY_PATTERN(cur
))
167 else if (no_hash_pointers
)
168 pr_cont(" 0x%02x", *cur
);
169 else /* Do not leak kernel memory in non-debug builds. */
175 static const char *get_access_type(bool is_write
)
177 return is_write
? "write" : "read";
180 void kfence_report_error(unsigned long address
, bool is_write
, struct pt_regs
*regs
,
181 const struct kfence_metadata
*meta
, enum kfence_error_type type
)
183 unsigned long stack_entries
[KFENCE_STACK_DEPTH
] = { 0 };
184 const ptrdiff_t object_index
= meta
? meta
- kfence_metadata
: -1;
185 int num_stack_entries
;
189 num_stack_entries
= stack_trace_save_regs(regs
, stack_entries
, KFENCE_STACK_DEPTH
, 0);
191 num_stack_entries
= stack_trace_save(stack_entries
, KFENCE_STACK_DEPTH
, 1);
192 skipnr
= get_stack_skipnr(stack_entries
, num_stack_entries
, &type
);
195 /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
196 if (WARN_ON(type
!= KFENCE_ERROR_INVALID
&& !meta
))
200 lockdep_assert_held(&meta
->lock
);
202 * Because we may generate reports in printk-unfriendly parts of the
203 * kernel, such as scheduler code, the use of printk() could deadlock.
204 * Until such time that all printing code here is safe in all parts of
205 * the kernel, accept the risk, and just get our message out (given the
206 * system might already behave unpredictably due to the memory error).
207 * As such, also disable lockdep to hide warnings, and avoid disabling
208 * lockdep for the rest of the kernel.
212 pr_err("==================================================================\n");
213 /* Print report header. */
215 case KFENCE_ERROR_OOB
: {
216 const bool left_of_object
= address
< meta
->addr
;
218 pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write
),
219 (void *)stack_entries
[skipnr
]);
220 pr_err("Out-of-bounds %s at 0x%p (%luB %s of kfence-#%td):\n",
221 get_access_type(is_write
), (void *)address
,
222 left_of_object
? meta
->addr
- address
: address
- meta
->addr
,
223 left_of_object
? "left" : "right", object_index
);
226 case KFENCE_ERROR_UAF
:
227 pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write
),
228 (void *)stack_entries
[skipnr
]);
229 pr_err("Use-after-free %s at 0x%p (in kfence-#%td):\n",
230 get_access_type(is_write
), (void *)address
, object_index
);
232 case KFENCE_ERROR_CORRUPTION
:
233 pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries
[skipnr
]);
234 pr_err("Corrupted memory at 0x%p ", (void *)address
);
235 print_diff_canary(address
, 16, meta
);
236 pr_cont(" (in kfence-#%td):\n", object_index
);
238 case KFENCE_ERROR_INVALID
:
239 pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write
),
240 (void *)stack_entries
[skipnr
]);
241 pr_err("Invalid %s at 0x%p:\n", get_access_type(is_write
),
244 case KFENCE_ERROR_INVALID_FREE
:
245 pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries
[skipnr
]);
246 pr_err("Invalid free of 0x%p (in kfence-#%td):\n", (void *)address
,
251 /* Print stack trace and object info. */
252 stack_trace_print(stack_entries
+ skipnr
, num_stack_entries
- skipnr
, 0);
256 kfence_print_object(NULL
, meta
);
259 /* Print report footer. */
261 if (no_hash_pointers
&& regs
)
264 dump_stack_print_info(KERN_ERR
);
265 trace_error_report_end(ERROR_DETECTOR_KFENCE
, address
);
266 pr_err("==================================================================\n");
271 panic("panic_on_warn set ...\n");
273 /* We encountered a memory safety error, taint the kernel! */
274 add_taint(TAINT_BAD_PAGE
, LOCKDEP_STILL_OK
);