]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * This implements the various checks for CONFIG_HARDENED_USERCOPY*, | |
3 | * which are designed to protect kernel memory from needless exposure | |
4 | * and overwrite under many unintended conditions. This code is based | |
5 | * on PAX_USERCOPY, which is: | |
6 | * | |
7 | * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source | |
8 | * Security Inc. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | */ | |
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
16 | ||
17 | #include <linux/mm.h> | |
18 | #include <linux/highmem.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/sched/task.h> | |
22 | #include <linux/sched/task_stack.h> | |
23 | #include <linux/thread_info.h> | |
24 | #include <asm/sections.h> | |
25 | ||
26 | /* | |
27 | * Checks if a given pointer and length is contained by the current | |
28 | * stack frame (if possible). | |
29 | * | |
30 | * Returns: | |
31 | * NOT_STACK: not at all on the stack | |
32 | * GOOD_FRAME: fully within a valid stack frame | |
33 | * GOOD_STACK: fully on the stack (when can't do frame-checking) | |
34 | * BAD_STACK: error condition (invalid stack position or bad stack frame) | |
35 | */ | |
36 | static noinline int check_stack_object(const void *obj, unsigned long len) | |
37 | { | |
38 | const void * const stack = task_stack_page(current); | |
39 | const void * const stackend = stack + THREAD_SIZE; | |
40 | int ret; | |
41 | ||
42 | /* Object is not on the stack at all. */ | |
43 | if (obj + len <= stack || stackend <= obj) | |
44 | return NOT_STACK; | |
45 | ||
46 | /* | |
47 | * Reject: object partially overlaps the stack (passing the | |
48 | * the check above means at least one end is within the stack, | |
49 | * so if this check fails, the other end is outside the stack). | |
50 | */ | |
51 | if (obj < stack || stackend < obj + len) | |
52 | return BAD_STACK; | |
53 | ||
54 | /* Check if object is safely within a valid frame. */ | |
55 | ret = arch_within_stack_frames(stack, stackend, obj, len); | |
56 | if (ret) | |
57 | return ret; | |
58 | ||
59 | return GOOD_STACK; | |
60 | } | |
61 | ||
62 | static void report_usercopy(const void *ptr, unsigned long len, | |
63 | bool to_user, const char *type) | |
64 | { | |
65 | pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", | |
66 | to_user ? "exposure" : "overwrite", | |
67 | to_user ? "from" : "to", ptr, type ? : "unknown", len); | |
68 | /* | |
69 | * For greater effect, it would be nice to do do_group_exit(), | |
70 | * but BUG() actually hooks all the lock-breaking and per-arch | |
71 | * Oops code, so that is used here instead. | |
72 | */ | |
73 | BUG(); | |
74 | } | |
75 | ||
76 | /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ | |
77 | static bool overlaps(const void *ptr, unsigned long n, unsigned long low, | |
78 | unsigned long high) | |
79 | { | |
80 | unsigned long check_low = (uintptr_t)ptr; | |
81 | unsigned long check_high = check_low + n; | |
82 | ||
83 | /* Does not overlap if entirely above or entirely below. */ | |
84 | if (check_low >= high || check_high <= low) | |
85 | return false; | |
86 | ||
87 | return true; | |
88 | } | |
89 | ||
90 | /* Is this address range in the kernel text area? */ | |
91 | static inline const char *check_kernel_text_object(const void *ptr, | |
92 | unsigned long n) | |
93 | { | |
94 | unsigned long textlow = (unsigned long)_stext; | |
95 | unsigned long texthigh = (unsigned long)_etext; | |
96 | unsigned long textlow_linear, texthigh_linear; | |
97 | ||
98 | if (overlaps(ptr, n, textlow, texthigh)) | |
99 | return "<kernel text>"; | |
100 | ||
101 | /* | |
102 | * Some architectures have virtual memory mappings with a secondary | |
103 | * mapping of the kernel text, i.e. there is more than one virtual | |
104 | * kernel address that points to the kernel image. It is usually | |
105 | * when there is a separate linear physical memory mapping, in that | |
106 | * __pa() is not just the reverse of __va(). This can be detected | |
107 | * and checked: | |
108 | */ | |
109 | textlow_linear = (unsigned long)lm_alias(textlow); | |
110 | /* No different mapping: we're done. */ | |
111 | if (textlow_linear == textlow) | |
112 | return NULL; | |
113 | ||
114 | /* Check the secondary mapping... */ | |
115 | texthigh_linear = (unsigned long)lm_alias(texthigh); | |
116 | if (overlaps(ptr, n, textlow_linear, texthigh_linear)) | |
117 | return "<linear kernel text>"; | |
118 | ||
119 | return NULL; | |
120 | } | |
121 | ||
122 | static inline const char *check_bogus_address(const void *ptr, unsigned long n) | |
123 | { | |
124 | /* Reject if object wraps past end of memory. */ | |
125 | if ((unsigned long)ptr + (n - 1) < (unsigned long)ptr) | |
126 | return "<wrapped address>"; | |
127 | ||
128 | /* Reject if NULL or ZERO-allocation. */ | |
129 | if (ZERO_OR_NULL_PTR(ptr)) | |
130 | return "<null>"; | |
131 | ||
132 | return NULL; | |
133 | } | |
134 | ||
135 | /* Checks for allocs that are marked in some way as spanning multiple pages. */ | |
136 | static inline const char *check_page_span(const void *ptr, unsigned long n, | |
137 | struct page *page, bool to_user) | |
138 | { | |
139 | #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN | |
140 | const void *end = ptr + n - 1; | |
141 | struct page *endpage; | |
142 | bool is_reserved, is_cma; | |
143 | ||
144 | /* | |
145 | * Sometimes the kernel data regions are not marked Reserved (see | |
146 | * check below). And sometimes [_sdata,_edata) does not cover | |
147 | * rodata and/or bss, so check each range explicitly. | |
148 | */ | |
149 | ||
150 | /* Allow reads of kernel rodata region (if not marked as Reserved). */ | |
151 | if (ptr >= (const void *)__start_rodata && | |
152 | end <= (const void *)__end_rodata) { | |
153 | if (!to_user) | |
154 | return "<rodata>"; | |
155 | return NULL; | |
156 | } | |
157 | ||
158 | /* Allow kernel data region (if not marked as Reserved). */ | |
159 | if (ptr >= (const void *)_sdata && end <= (const void *)_edata) | |
160 | return NULL; | |
161 | ||
162 | /* Allow kernel bss region (if not marked as Reserved). */ | |
163 | if (ptr >= (const void *)__bss_start && | |
164 | end <= (const void *)__bss_stop) | |
165 | return NULL; | |
166 | ||
167 | /* Is the object wholly within one base page? */ | |
168 | if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == | |
169 | ((unsigned long)end & (unsigned long)PAGE_MASK))) | |
170 | return NULL; | |
171 | ||
172 | /* Allow if fully inside the same compound (__GFP_COMP) page. */ | |
173 | endpage = virt_to_head_page(end); | |
174 | if (likely(endpage == page)) | |
175 | return NULL; | |
176 | ||
177 | /* | |
178 | * Reject if range is entirely either Reserved (i.e. special or | |
179 | * device memory), or CMA. Otherwise, reject since the object spans | |
180 | * several independently allocated pages. | |
181 | */ | |
182 | is_reserved = PageReserved(page); | |
183 | is_cma = is_migrate_cma_page(page); | |
184 | if (!is_reserved && !is_cma) | |
185 | return "<spans multiple pages>"; | |
186 | ||
187 | for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { | |
188 | page = virt_to_head_page(ptr); | |
189 | if (is_reserved && !PageReserved(page)) | |
190 | return "<spans Reserved and non-Reserved pages>"; | |
191 | if (is_cma && !is_migrate_cma_page(page)) | |
192 | return "<spans CMA and non-CMA pages>"; | |
193 | } | |
194 | #endif | |
195 | ||
196 | return NULL; | |
197 | } | |
198 | ||
199 | static inline const char *check_heap_object(const void *ptr, unsigned long n, | |
200 | bool to_user) | |
201 | { | |
202 | struct page *page; | |
203 | ||
204 | if (!virt_addr_valid(ptr)) | |
205 | return NULL; | |
206 | ||
207 | /* | |
208 | * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the | |
209 | * highmem page or fallback to virt_to_page(). The following | |
210 | * is effectively a highmem-aware virt_to_head_page(). | |
211 | */ | |
212 | page = compound_head(kmap_to_page((void *)ptr)); | |
213 | ||
214 | /* Check slab allocator for flags and size. */ | |
215 | if (PageSlab(page)) | |
216 | return __check_heap_object(ptr, n, page); | |
217 | ||
218 | /* Verify object does not incorrectly span multiple pages. */ | |
219 | return check_page_span(ptr, n, page, to_user); | |
220 | } | |
221 | ||
222 | /* | |
223 | * Validates that the given object is: | |
224 | * - not bogus address | |
225 | * - fully contained by stack (or stack frame, when available) | |
226 | * - fully within SLAB object (or object whitelist area, when available) | |
227 | * - not in kernel text | |
228 | */ | |
229 | void __check_object_size(const void *ptr, unsigned long n, bool to_user) | |
230 | { | |
231 | const char *err; | |
232 | ||
233 | /* Skip all tests if size is zero. */ | |
234 | if (!n) | |
235 | return; | |
236 | ||
237 | /* Check for invalid addresses. */ | |
238 | err = check_bogus_address(ptr, n); | |
239 | if (err) | |
240 | goto report; | |
241 | ||
242 | /* Check for bad stack object. */ | |
243 | switch (check_stack_object(ptr, n)) { | |
244 | case NOT_STACK: | |
245 | /* Object is not touching the current process stack. */ | |
246 | break; | |
247 | case GOOD_FRAME: | |
248 | case GOOD_STACK: | |
249 | /* | |
250 | * Object is either in the correct frame (when it | |
251 | * is possible to check) or just generally on the | |
252 | * process stack (when frame checking not available). | |
253 | */ | |
254 | return; | |
255 | default: | |
256 | err = "<process stack>"; | |
257 | goto report; | |
258 | } | |
259 | ||
260 | /* Check for bad heap object. */ | |
261 | check_heap_object(ptr, n, to_user); | |
262 | ||
263 | /* Check for object in kernel to avoid text exposure. */ | |
264 | err = check_kernel_text_object(ptr, n); | |
265 | if (!err) | |
266 | return; | |
267 | ||
268 | report: | |
269 | report_usercopy(ptr, n, to_user, err); | |
270 | } | |
271 | EXPORT_SYMBOL(__check_object_size); |