]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/misc/lkdtm_usercopy.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-bionic-kernel.git] / drivers / misc / lkdtm_usercopy.c
1 /*
2 * This is for all the tests related to copy_to_user() and copy_from_user()
3 * hardening.
4 */
5 #include "lkdtm.h"
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/sched/task_stack.h>
9 #include <linux/mman.h>
10 #include <linux/uaccess.h>
11 #include <asm/cacheflush.h>
12
13 /*
14 * Many of the tests here end up using const sizes, but those would
15 * normally be ignored by hardened usercopy, so force the compiler
16 * into choosing the non-const path to make sure we trigger the
17 * hardened usercopy checks by added "unconst" to all the const copies,
18 * and making sure "cache_size" isn't optimized into a const.
19 */
20 static volatile size_t unconst = 0;
21 static volatile size_t cache_size = 1024;
22 static struct kmem_cache *bad_cache;
23
24 static const unsigned char test_text[] = "This is a test.\n";
25
26 /*
27 * Instead of adding -Wno-return-local-addr, just pass the stack address
28 * through a function to obfuscate it from the compiler.
29 */
30 static noinline unsigned char *trick_compiler(unsigned char *stack)
31 {
32 return stack + 0;
33 }
34
35 static noinline unsigned char *do_usercopy_stack_callee(int value)
36 {
37 unsigned char buf[32];
38 int i;
39
40 /* Exercise stack to avoid everything living in registers. */
41 for (i = 0; i < sizeof(buf); i++) {
42 buf[i] = value & 0xff;
43 }
44
45 return trick_compiler(buf);
46 }
47
48 static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
49 {
50 unsigned long user_addr;
51 unsigned char good_stack[32];
52 unsigned char *bad_stack;
53 int i;
54
55 /* Exercise stack to avoid everything living in registers. */
56 for (i = 0; i < sizeof(good_stack); i++)
57 good_stack[i] = test_text[i % sizeof(test_text)];
58
59 /* This is a pointer to outside our current stack frame. */
60 if (bad_frame) {
61 bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
62 } else {
63 /* Put start address just inside stack. */
64 bad_stack = task_stack_page(current) + THREAD_SIZE;
65 bad_stack -= sizeof(unsigned long);
66 }
67
68 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
69 PROT_READ | PROT_WRITE | PROT_EXEC,
70 MAP_ANONYMOUS | MAP_PRIVATE, 0);
71 if (user_addr >= TASK_SIZE) {
72 pr_warn("Failed to allocate user memory\n");
73 return;
74 }
75
76 if (to_user) {
77 pr_info("attempting good copy_to_user of local stack\n");
78 if (copy_to_user((void __user *)user_addr, good_stack,
79 unconst + sizeof(good_stack))) {
80 pr_warn("copy_to_user failed unexpectedly?!\n");
81 goto free_user;
82 }
83
84 pr_info("attempting bad copy_to_user of distant stack\n");
85 if (copy_to_user((void __user *)user_addr, bad_stack,
86 unconst + sizeof(good_stack))) {
87 pr_warn("copy_to_user failed, but lacked Oops\n");
88 goto free_user;
89 }
90 } else {
91 /*
92 * There isn't a safe way to not be protected by usercopy
93 * if we're going to write to another thread's stack.
94 */
95 if (!bad_frame)
96 goto free_user;
97
98 pr_info("attempting good copy_from_user of local stack\n");
99 if (copy_from_user(good_stack, (void __user *)user_addr,
100 unconst + sizeof(good_stack))) {
101 pr_warn("copy_from_user failed unexpectedly?!\n");
102 goto free_user;
103 }
104
105 pr_info("attempting bad copy_from_user of distant stack\n");
106 if (copy_from_user(bad_stack, (void __user *)user_addr,
107 unconst + sizeof(good_stack))) {
108 pr_warn("copy_from_user failed, but lacked Oops\n");
109 goto free_user;
110 }
111 }
112
113 free_user:
114 vm_munmap(user_addr, PAGE_SIZE);
115 }
116
117 static void do_usercopy_heap_size(bool to_user)
118 {
119 unsigned long user_addr;
120 unsigned char *one, *two;
121 size_t size = unconst + 1024;
122
123 one = kmalloc(size, GFP_KERNEL);
124 two = kmalloc(size, GFP_KERNEL);
125 if (!one || !two) {
126 pr_warn("Failed to allocate kernel memory\n");
127 goto free_kernel;
128 }
129
130 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
131 PROT_READ | PROT_WRITE | PROT_EXEC,
132 MAP_ANONYMOUS | MAP_PRIVATE, 0);
133 if (user_addr >= TASK_SIZE) {
134 pr_warn("Failed to allocate user memory\n");
135 goto free_kernel;
136 }
137
138 memset(one, 'A', size);
139 memset(two, 'B', size);
140
141 if (to_user) {
142 pr_info("attempting good copy_to_user of correct size\n");
143 if (copy_to_user((void __user *)user_addr, one, size)) {
144 pr_warn("copy_to_user failed unexpectedly?!\n");
145 goto free_user;
146 }
147
148 pr_info("attempting bad copy_to_user of too large size\n");
149 if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
150 pr_warn("copy_to_user failed, but lacked Oops\n");
151 goto free_user;
152 }
153 } else {
154 pr_info("attempting good copy_from_user of correct size\n");
155 if (copy_from_user(one, (void __user *)user_addr, size)) {
156 pr_warn("copy_from_user failed unexpectedly?!\n");
157 goto free_user;
158 }
159
160 pr_info("attempting bad copy_from_user of too large size\n");
161 if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
162 pr_warn("copy_from_user failed, but lacked Oops\n");
163 goto free_user;
164 }
165 }
166
167 free_user:
168 vm_munmap(user_addr, PAGE_SIZE);
169 free_kernel:
170 kfree(one);
171 kfree(two);
172 }
173
174 static void do_usercopy_heap_flag(bool to_user)
175 {
176 unsigned long user_addr;
177 unsigned char *good_buf = NULL;
178 unsigned char *bad_buf = NULL;
179
180 /* Make sure cache was prepared. */
181 if (!bad_cache) {
182 pr_warn("Failed to allocate kernel cache\n");
183 return;
184 }
185
186 /*
187 * Allocate one buffer from each cache (kmalloc will have the
188 * SLAB_USERCOPY flag already, but "bad_cache" won't).
189 */
190 good_buf = kmalloc(cache_size, GFP_KERNEL);
191 bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
192 if (!good_buf || !bad_buf) {
193 pr_warn("Failed to allocate buffers from caches\n");
194 goto free_alloc;
195 }
196
197 /* Allocate user memory we'll poke at. */
198 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
199 PROT_READ | PROT_WRITE | PROT_EXEC,
200 MAP_ANONYMOUS | MAP_PRIVATE, 0);
201 if (user_addr >= TASK_SIZE) {
202 pr_warn("Failed to allocate user memory\n");
203 goto free_alloc;
204 }
205
206 memset(good_buf, 'A', cache_size);
207 memset(bad_buf, 'B', cache_size);
208
209 if (to_user) {
210 pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
211 if (copy_to_user((void __user *)user_addr, good_buf,
212 cache_size)) {
213 pr_warn("copy_to_user failed unexpectedly?!\n");
214 goto free_user;
215 }
216
217 pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
218 if (copy_to_user((void __user *)user_addr, bad_buf,
219 cache_size)) {
220 pr_warn("copy_to_user failed, but lacked Oops\n");
221 goto free_user;
222 }
223 } else {
224 pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
225 if (copy_from_user(good_buf, (void __user *)user_addr,
226 cache_size)) {
227 pr_warn("copy_from_user failed unexpectedly?!\n");
228 goto free_user;
229 }
230
231 pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
232 if (copy_from_user(bad_buf, (void __user *)user_addr,
233 cache_size)) {
234 pr_warn("copy_from_user failed, but lacked Oops\n");
235 goto free_user;
236 }
237 }
238
239 free_user:
240 vm_munmap(user_addr, PAGE_SIZE);
241 free_alloc:
242 if (bad_buf)
243 kmem_cache_free(bad_cache, bad_buf);
244 kfree(good_buf);
245 }
246
247 /* Callable tests. */
248 void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
249 {
250 do_usercopy_heap_size(true);
251 }
252
253 void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
254 {
255 do_usercopy_heap_size(false);
256 }
257
258 void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
259 {
260 do_usercopy_heap_flag(true);
261 }
262
263 void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
264 {
265 do_usercopy_heap_flag(false);
266 }
267
268 void lkdtm_USERCOPY_STACK_FRAME_TO(void)
269 {
270 do_usercopy_stack(true, true);
271 }
272
273 void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
274 {
275 do_usercopy_stack(false, true);
276 }
277
278 void lkdtm_USERCOPY_STACK_BEYOND(void)
279 {
280 do_usercopy_stack(true, false);
281 }
282
283 void lkdtm_USERCOPY_KERNEL(void)
284 {
285 unsigned long user_addr;
286
287 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
288 PROT_READ | PROT_WRITE | PROT_EXEC,
289 MAP_ANONYMOUS | MAP_PRIVATE, 0);
290 if (user_addr >= TASK_SIZE) {
291 pr_warn("Failed to allocate user memory\n");
292 return;
293 }
294
295 pr_info("attempting good copy_to_user from kernel rodata\n");
296 if (copy_to_user((void __user *)user_addr, test_text,
297 unconst + sizeof(test_text))) {
298 pr_warn("copy_to_user failed unexpectedly?!\n");
299 goto free_user;
300 }
301
302 pr_info("attempting bad copy_to_user from kernel text\n");
303 if (copy_to_user((void __user *)user_addr, vm_mmap,
304 unconst + PAGE_SIZE)) {
305 pr_warn("copy_to_user failed, but lacked Oops\n");
306 goto free_user;
307 }
308
309 free_user:
310 vm_munmap(user_addr, PAGE_SIZE);
311 }
312
313 void __init lkdtm_usercopy_init(void)
314 {
315 /* Prepare cache that lacks SLAB_USERCOPY flag. */
316 bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
317 0, NULL);
318 }
319
320 void __exit lkdtm_usercopy_exit(void)
321 {
322 kmem_cache_destroy(bad_cache);
323 }