]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/misc/lkdtm_core.c
lkdtm: split atomic test into over and underflow
[mirror_ubuntu-bionic-kernel.git] / drivers / misc / lkdtm_core.c
1 /*
2 * Linux Kernel Dump Test Module for testing kernel crashes conditions:
3 * induces system failures at predefined crashpoints and under predefined
4 * operational conditions in order to evaluate the reliability of kernel
5 * sanity checking and crash dumps obtained using different dumping
6 * solutions.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (C) IBM Corporation, 2006
23 *
24 * Author: Ankita Garg <ankita@in.ibm.com>
25 *
26 * It is adapted from the Linux Kernel Dump Test Tool by
27 * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
28 *
29 * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
30 *
31 * See Documentation/fault-injection/provoke-crashes.txt for instructions
32 */
33 #define pr_fmt(fmt) "lkdtm: " fmt
34
35 #include <linux/kernel.h>
36 #include <linux/fs.h>
37 #include <linux/module.h>
38 #include <linux/buffer_head.h>
39 #include <linux/kprobes.h>
40 #include <linux/list.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/hrtimer.h>
44 #include <linux/slab.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <linux/debugfs.h>
47 #include <linux/vmalloc.h>
48 #include <linux/mman.h>
49 #include <asm/cacheflush.h>
50
51 #ifdef CONFIG_IDE
52 #include <linux/ide.h>
53 #endif
54
55 #include "lkdtm.h"
56
57 /*
58 * Make sure our attempts to over run the kernel stack doesn't trigger
59 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
60 * recurse past the end of THREAD_SIZE by default.
61 */
62 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
63 #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
64 #else
65 #define REC_STACK_SIZE (THREAD_SIZE / 8)
66 #endif
67 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
68
69 #define DEFAULT_COUNT 10
70 #define EXEC_SIZE 64
71
72 enum cname {
73 CN_INVALID,
74 CN_INT_HARDWARE_ENTRY,
75 CN_INT_HW_IRQ_EN,
76 CN_INT_TASKLET_ENTRY,
77 CN_FS_DEVRW,
78 CN_MEM_SWAPOUT,
79 CN_TIMERADD,
80 CN_SCSI_DISPATCH_CMD,
81 CN_IDE_CORE_CP,
82 CN_DIRECT,
83 };
84
85 enum ctype {
86 CT_NONE,
87 CT_PANIC,
88 CT_BUG,
89 CT_WARNING,
90 CT_EXCEPTION,
91 CT_LOOP,
92 CT_OVERFLOW,
93 CT_CORRUPT_STACK,
94 CT_UNALIGNED_LOAD_STORE_WRITE,
95 CT_OVERWRITE_ALLOCATION,
96 CT_WRITE_AFTER_FREE,
97 CT_READ_AFTER_FREE,
98 CT_WRITE_BUDDY_AFTER_FREE,
99 CT_READ_BUDDY_AFTER_FREE,
100 CT_SOFTLOCKUP,
101 CT_HARDLOCKUP,
102 CT_SPINLOCKUP,
103 CT_HUNG_TASK,
104 CT_EXEC_DATA,
105 CT_EXEC_STACK,
106 CT_EXEC_KMALLOC,
107 CT_EXEC_VMALLOC,
108 CT_EXEC_RODATA,
109 CT_EXEC_USERSPACE,
110 CT_ACCESS_USERSPACE,
111 CT_WRITE_RO,
112 CT_WRITE_RO_AFTER_INIT,
113 CT_WRITE_KERN,
114 CT_ATOMIC_UNDERFLOW,
115 CT_ATOMIC_OVERFLOW,
116 CT_USERCOPY_HEAP_SIZE_TO,
117 CT_USERCOPY_HEAP_SIZE_FROM,
118 CT_USERCOPY_HEAP_FLAG_TO,
119 CT_USERCOPY_HEAP_FLAG_FROM,
120 CT_USERCOPY_STACK_FRAME_TO,
121 CT_USERCOPY_STACK_FRAME_FROM,
122 CT_USERCOPY_STACK_BEYOND,
123 };
124
125 static char* cp_name[] = {
126 "INT_HARDWARE_ENTRY",
127 "INT_HW_IRQ_EN",
128 "INT_TASKLET_ENTRY",
129 "FS_DEVRW",
130 "MEM_SWAPOUT",
131 "TIMERADD",
132 "SCSI_DISPATCH_CMD",
133 "IDE_CORE_CP",
134 "DIRECT",
135 };
136
137 static char* cp_type[] = {
138 "PANIC",
139 "BUG",
140 "WARNING",
141 "EXCEPTION",
142 "LOOP",
143 "OVERFLOW",
144 "CORRUPT_STACK",
145 "UNALIGNED_LOAD_STORE_WRITE",
146 "OVERWRITE_ALLOCATION",
147 "WRITE_AFTER_FREE",
148 "READ_AFTER_FREE",
149 "WRITE_BUDDY_AFTER_FREE",
150 "READ_BUDDY_AFTER_FREE",
151 "SOFTLOCKUP",
152 "HARDLOCKUP",
153 "SPINLOCKUP",
154 "HUNG_TASK",
155 "EXEC_DATA",
156 "EXEC_STACK",
157 "EXEC_KMALLOC",
158 "EXEC_VMALLOC",
159 "EXEC_RODATA",
160 "EXEC_USERSPACE",
161 "ACCESS_USERSPACE",
162 "WRITE_RO",
163 "WRITE_RO_AFTER_INIT",
164 "WRITE_KERN",
165 "ATOMIC_UNDERFLOW",
166 "ATOMIC_OVERFLOW",
167 "USERCOPY_HEAP_SIZE_TO",
168 "USERCOPY_HEAP_SIZE_FROM",
169 "USERCOPY_HEAP_FLAG_TO",
170 "USERCOPY_HEAP_FLAG_FROM",
171 "USERCOPY_STACK_FRAME_TO",
172 "USERCOPY_STACK_FRAME_FROM",
173 "USERCOPY_STACK_BEYOND",
174 };
175
176 static struct jprobe lkdtm;
177
178 static int lkdtm_parse_commandline(void);
179 static void lkdtm_handler(void);
180
181 static char* cpoint_name;
182 static char* cpoint_type;
183 static int cpoint_count = DEFAULT_COUNT;
184 static int recur_count = REC_NUM_DEFAULT;
185 static int alloc_size = 1024;
186 static size_t cache_size;
187
188 static enum cname cpoint = CN_INVALID;
189 static enum ctype cptype = CT_NONE;
190 static int count = DEFAULT_COUNT;
191 static DEFINE_SPINLOCK(count_lock);
192 static DEFINE_SPINLOCK(lock_me_up);
193
194 static u8 data_area[EXEC_SIZE];
195 static struct kmem_cache *bad_cache;
196
197 static const unsigned char test_text[] = "This is a test.\n";
198 static const unsigned long rodata = 0xAA55AA55;
199 static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
200
201 module_param(recur_count, int, 0644);
202 MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
203 module_param(cpoint_name, charp, 0444);
204 MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
205 module_param(cpoint_type, charp, 0444);
206 MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
207 "hitting the crash point");
208 module_param(cpoint_count, int, 0644);
209 MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
210 "crash point is to be hit to trigger action");
211 module_param(alloc_size, int, 0644);
212 MODULE_PARM_DESC(alloc_size, " Size of allocation for user copy tests "\
213 "(from 1 to PAGE_SIZE)");
214
215 static unsigned int jp_do_irq(unsigned int irq)
216 {
217 lkdtm_handler();
218 jprobe_return();
219 return 0;
220 }
221
222 static irqreturn_t jp_handle_irq_event(unsigned int irq,
223 struct irqaction *action)
224 {
225 lkdtm_handler();
226 jprobe_return();
227 return 0;
228 }
229
230 static void jp_tasklet_action(struct softirq_action *a)
231 {
232 lkdtm_handler();
233 jprobe_return();
234 }
235
236 static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
237 {
238 lkdtm_handler();
239 jprobe_return();
240 }
241
242 struct scan_control;
243
244 static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
245 struct zone *zone,
246 struct scan_control *sc)
247 {
248 lkdtm_handler();
249 jprobe_return();
250 return 0;
251 }
252
253 static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
254 const enum hrtimer_mode mode)
255 {
256 lkdtm_handler();
257 jprobe_return();
258 return 0;
259 }
260
261 static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
262 {
263 lkdtm_handler();
264 jprobe_return();
265 return 0;
266 }
267
268 #ifdef CONFIG_IDE
269 static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
270 struct block_device *bdev, unsigned int cmd,
271 unsigned long arg)
272 {
273 lkdtm_handler();
274 jprobe_return();
275 return 0;
276 }
277 #endif
278
279 /* Return the crashpoint number or NONE if the name is invalid */
280 static enum ctype parse_cp_type(const char *what, size_t count)
281 {
282 int i;
283
284 for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
285 if (!strcmp(what, cp_type[i]))
286 return i + 1;
287 }
288
289 return CT_NONE;
290 }
291
292 static const char *cp_type_to_str(enum ctype type)
293 {
294 if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
295 return "None";
296
297 return cp_type[type - 1];
298 }
299
300 static const char *cp_name_to_str(enum cname name)
301 {
302 if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
303 return "INVALID";
304
305 return cp_name[name - 1];
306 }
307
308
309 static int lkdtm_parse_commandline(void)
310 {
311 int i;
312 unsigned long flags;
313
314 if (cpoint_count < 1 || recur_count < 1)
315 return -EINVAL;
316
317 spin_lock_irqsave(&count_lock, flags);
318 count = cpoint_count;
319 spin_unlock_irqrestore(&count_lock, flags);
320
321 /* No special parameters */
322 if (!cpoint_type && !cpoint_name)
323 return 0;
324
325 /* Neither or both of these need to be set */
326 if (!cpoint_type || !cpoint_name)
327 return -EINVAL;
328
329 cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
330 if (cptype == CT_NONE)
331 return -EINVAL;
332
333 for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
334 if (!strcmp(cpoint_name, cp_name[i])) {
335 cpoint = i + 1;
336 return 0;
337 }
338 }
339
340 /* Could not find a valid crash point */
341 return -EINVAL;
342 }
343
344 static int recursive_loop(int remaining)
345 {
346 char buf[REC_STACK_SIZE];
347
348 /* Make sure compiler does not optimize this away. */
349 memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
350 if (!remaining)
351 return 0;
352 else
353 return recursive_loop(remaining - 1);
354 }
355
356 static void do_nothing(void)
357 {
358 return;
359 }
360
361 /* Must immediately follow do_nothing for size calculuations to work out. */
362 static void do_overwritten(void)
363 {
364 pr_info("do_overwritten wasn't overwritten!\n");
365 return;
366 }
367
368 static noinline void corrupt_stack(void)
369 {
370 /* Use default char array length that triggers stack protection. */
371 char data[8];
372
373 memset((void *)data, 0, 64);
374 }
375
376 static noinline void execute_location(void *dst, bool write)
377 {
378 void (*func)(void) = dst;
379
380 pr_info("attempting ok execution at %p\n", do_nothing);
381 do_nothing();
382
383 if (write) {
384 memcpy(dst, do_nothing, EXEC_SIZE);
385 flush_icache_range((unsigned long)dst,
386 (unsigned long)dst + EXEC_SIZE);
387 }
388 pr_info("attempting bad execution at %p\n", func);
389 func();
390 }
391
392 static void execute_user_location(void *dst)
393 {
394 /* Intentionally crossing kernel/user memory boundary. */
395 void (*func)(void) = dst;
396
397 pr_info("attempting ok execution at %p\n", do_nothing);
398 do_nothing();
399
400 if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
401 return;
402 flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
403 pr_info("attempting bad execution at %p\n", func);
404 func();
405 }
406
407 /*
408 * Instead of adding -Wno-return-local-addr, just pass the stack address
409 * through a function to obfuscate it from the compiler.
410 */
411 static noinline unsigned char *trick_compiler(unsigned char *stack)
412 {
413 return stack + 0;
414 }
415
416 static noinline unsigned char *do_usercopy_stack_callee(int value)
417 {
418 unsigned char buf[32];
419 int i;
420
421 /* Exercise stack to avoid everything living in registers. */
422 for (i = 0; i < sizeof(buf); i++) {
423 buf[i] = value & 0xff;
424 }
425
426 return trick_compiler(buf);
427 }
428
429 static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
430 {
431 unsigned long user_addr;
432 unsigned char good_stack[32];
433 unsigned char *bad_stack;
434 int i;
435
436 /* Exercise stack to avoid everything living in registers. */
437 for (i = 0; i < sizeof(good_stack); i++)
438 good_stack[i] = test_text[i % sizeof(test_text)];
439
440 /* This is a pointer to outside our current stack frame. */
441 if (bad_frame) {
442 bad_stack = do_usercopy_stack_callee(alloc_size);
443 } else {
444 /* Put start address just inside stack. */
445 bad_stack = task_stack_page(current) + THREAD_SIZE;
446 bad_stack -= sizeof(unsigned long);
447 }
448
449 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
450 PROT_READ | PROT_WRITE | PROT_EXEC,
451 MAP_ANONYMOUS | MAP_PRIVATE, 0);
452 if (user_addr >= TASK_SIZE) {
453 pr_warn("Failed to allocate user memory\n");
454 return;
455 }
456
457 if (to_user) {
458 pr_info("attempting good copy_to_user of local stack\n");
459 if (copy_to_user((void __user *)user_addr, good_stack,
460 sizeof(good_stack))) {
461 pr_warn("copy_to_user failed unexpectedly?!\n");
462 goto free_user;
463 }
464
465 pr_info("attempting bad copy_to_user of distant stack\n");
466 if (copy_to_user((void __user *)user_addr, bad_stack,
467 sizeof(good_stack))) {
468 pr_warn("copy_to_user failed, but lacked Oops\n");
469 goto free_user;
470 }
471 } else {
472 /*
473 * There isn't a safe way to not be protected by usercopy
474 * if we're going to write to another thread's stack.
475 */
476 if (!bad_frame)
477 goto free_user;
478
479 pr_info("attempting good copy_from_user of local stack\n");
480 if (copy_from_user(good_stack, (void __user *)user_addr,
481 sizeof(good_stack))) {
482 pr_warn("copy_from_user failed unexpectedly?!\n");
483 goto free_user;
484 }
485
486 pr_info("attempting bad copy_from_user of distant stack\n");
487 if (copy_from_user(bad_stack, (void __user *)user_addr,
488 sizeof(good_stack))) {
489 pr_warn("copy_from_user failed, but lacked Oops\n");
490 goto free_user;
491 }
492 }
493
494 free_user:
495 vm_munmap(user_addr, PAGE_SIZE);
496 }
497
498 static void do_usercopy_heap_size(bool to_user)
499 {
500 unsigned long user_addr;
501 unsigned char *one, *two;
502 size_t size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
503
504 one = kmalloc(size, GFP_KERNEL);
505 two = kmalloc(size, GFP_KERNEL);
506 if (!one || !two) {
507 pr_warn("Failed to allocate kernel memory\n");
508 goto free_kernel;
509 }
510
511 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
512 PROT_READ | PROT_WRITE | PROT_EXEC,
513 MAP_ANONYMOUS | MAP_PRIVATE, 0);
514 if (user_addr >= TASK_SIZE) {
515 pr_warn("Failed to allocate user memory\n");
516 goto free_kernel;
517 }
518
519 memset(one, 'A', size);
520 memset(two, 'B', size);
521
522 if (to_user) {
523 pr_info("attempting good copy_to_user of correct size\n");
524 if (copy_to_user((void __user *)user_addr, one, size)) {
525 pr_warn("copy_to_user failed unexpectedly?!\n");
526 goto free_user;
527 }
528
529 pr_info("attempting bad copy_to_user of too large size\n");
530 if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
531 pr_warn("copy_to_user failed, but lacked Oops\n");
532 goto free_user;
533 }
534 } else {
535 pr_info("attempting good copy_from_user of correct size\n");
536 if (copy_from_user(one, (void __user *)user_addr,
537 size)) {
538 pr_warn("copy_from_user failed unexpectedly?!\n");
539 goto free_user;
540 }
541
542 pr_info("attempting bad copy_from_user of too large size\n");
543 if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
544 pr_warn("copy_from_user failed, but lacked Oops\n");
545 goto free_user;
546 }
547 }
548
549 free_user:
550 vm_munmap(user_addr, PAGE_SIZE);
551 free_kernel:
552 kfree(one);
553 kfree(two);
554 }
555
556 static void do_usercopy_heap_flag(bool to_user)
557 {
558 unsigned long user_addr;
559 unsigned char *good_buf = NULL;
560 unsigned char *bad_buf = NULL;
561
562 /* Make sure cache was prepared. */
563 if (!bad_cache) {
564 pr_warn("Failed to allocate kernel cache\n");
565 return;
566 }
567
568 /*
569 * Allocate one buffer from each cache (kmalloc will have the
570 * SLAB_USERCOPY flag already, but "bad_cache" won't).
571 */
572 good_buf = kmalloc(cache_size, GFP_KERNEL);
573 bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
574 if (!good_buf || !bad_buf) {
575 pr_warn("Failed to allocate buffers from caches\n");
576 goto free_alloc;
577 }
578
579 /* Allocate user memory we'll poke at. */
580 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
581 PROT_READ | PROT_WRITE | PROT_EXEC,
582 MAP_ANONYMOUS | MAP_PRIVATE, 0);
583 if (user_addr >= TASK_SIZE) {
584 pr_warn("Failed to allocate user memory\n");
585 goto free_alloc;
586 }
587
588 memset(good_buf, 'A', cache_size);
589 memset(bad_buf, 'B', cache_size);
590
591 if (to_user) {
592 pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
593 if (copy_to_user((void __user *)user_addr, good_buf,
594 cache_size)) {
595 pr_warn("copy_to_user failed unexpectedly?!\n");
596 goto free_user;
597 }
598
599 pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
600 if (copy_to_user((void __user *)user_addr, bad_buf,
601 cache_size)) {
602 pr_warn("copy_to_user failed, but lacked Oops\n");
603 goto free_user;
604 }
605 } else {
606 pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
607 if (copy_from_user(good_buf, (void __user *)user_addr,
608 cache_size)) {
609 pr_warn("copy_from_user failed unexpectedly?!\n");
610 goto free_user;
611 }
612
613 pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
614 if (copy_from_user(bad_buf, (void __user *)user_addr,
615 cache_size)) {
616 pr_warn("copy_from_user failed, but lacked Oops\n");
617 goto free_user;
618 }
619 }
620
621 free_user:
622 vm_munmap(user_addr, PAGE_SIZE);
623 free_alloc:
624 if (bad_buf)
625 kmem_cache_free(bad_cache, bad_buf);
626 kfree(good_buf);
627 }
628
629 static void lkdtm_do_action(enum ctype which)
630 {
631 switch (which) {
632 case CT_PANIC:
633 panic("dumptest");
634 break;
635 case CT_BUG:
636 BUG();
637 break;
638 case CT_WARNING:
639 WARN_ON(1);
640 break;
641 case CT_EXCEPTION:
642 *((int *) 0) = 0;
643 break;
644 case CT_LOOP:
645 for (;;)
646 ;
647 break;
648 case CT_OVERFLOW:
649 (void) recursive_loop(recur_count);
650 break;
651 case CT_CORRUPT_STACK:
652 corrupt_stack();
653 break;
654 case CT_UNALIGNED_LOAD_STORE_WRITE: {
655 static u8 data[5] __attribute__((aligned(4))) = {1, 2,
656 3, 4, 5};
657 u32 *p;
658 u32 val = 0x12345678;
659
660 p = (u32 *)(data + 1);
661 if (*p == 0)
662 val = 0x87654321;
663 *p = val;
664 break;
665 }
666 case CT_OVERWRITE_ALLOCATION: {
667 size_t len = 1020;
668 u32 *data = kmalloc(len, GFP_KERNEL);
669
670 data[1024 / sizeof(u32)] = 0x12345678;
671 kfree(data);
672 break;
673 }
674 case CT_WRITE_AFTER_FREE: {
675 int *base, *again;
676 size_t len = 1024;
677 /*
678 * The slub allocator uses the first word to store the free
679 * pointer in some configurations. Use the middle of the
680 * allocation to avoid running into the freelist
681 */
682 size_t offset = (len / sizeof(*base)) / 2;
683
684 base = kmalloc(len, GFP_KERNEL);
685 pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
686 pr_info("Attempting bad write to freed memory at %p\n",
687 &base[offset]);
688 kfree(base);
689 base[offset] = 0x0abcdef0;
690 /* Attempt to notice the overwrite. */
691 again = kmalloc(len, GFP_KERNEL);
692 kfree(again);
693 if (again != base)
694 pr_info("Hmm, didn't get the same memory range.\n");
695
696 break;
697 }
698 case CT_READ_AFTER_FREE: {
699 int *base, *val, saw;
700 size_t len = 1024;
701 /*
702 * The slub allocator uses the first word to store the free
703 * pointer in some configurations. Use the middle of the
704 * allocation to avoid running into the freelist
705 */
706 size_t offset = (len / sizeof(*base)) / 2;
707
708 base = kmalloc(len, GFP_KERNEL);
709 if (!base)
710 break;
711
712 val = kmalloc(len, GFP_KERNEL);
713 if (!val) {
714 kfree(base);
715 break;
716 }
717
718 *val = 0x12345678;
719 base[offset] = *val;
720 pr_info("Value in memory before free: %x\n", base[offset]);
721
722 kfree(base);
723
724 pr_info("Attempting bad read from freed memory\n");
725 saw = base[offset];
726 if (saw != *val) {
727 /* Good! Poisoning happened, so declare a win. */
728 pr_info("Memory correctly poisoned (%x)\n", saw);
729 BUG();
730 }
731 pr_info("Memory was not poisoned\n");
732
733 kfree(val);
734 break;
735 }
736 case CT_WRITE_BUDDY_AFTER_FREE: {
737 unsigned long p = __get_free_page(GFP_KERNEL);
738 if (!p)
739 break;
740 pr_info("Writing to the buddy page before free\n");
741 memset((void *)p, 0x3, PAGE_SIZE);
742 free_page(p);
743 schedule();
744 pr_info("Attempting bad write to the buddy page after free\n");
745 memset((void *)p, 0x78, PAGE_SIZE);
746 /* Attempt to notice the overwrite. */
747 p = __get_free_page(GFP_KERNEL);
748 free_page(p);
749 schedule();
750
751 break;
752 }
753 case CT_READ_BUDDY_AFTER_FREE: {
754 unsigned long p = __get_free_page(GFP_KERNEL);
755 int saw, *val;
756 int *base;
757
758 if (!p)
759 break;
760
761 val = kmalloc(1024, GFP_KERNEL);
762 if (!val) {
763 free_page(p);
764 break;
765 }
766
767 base = (int *)p;
768
769 *val = 0x12345678;
770 base[0] = *val;
771 pr_info("Value in memory before free: %x\n", base[0]);
772 free_page(p);
773 pr_info("Attempting to read from freed memory\n");
774 saw = base[0];
775 if (saw != *val) {
776 /* Good! Poisoning happened, so declare a win. */
777 pr_info("Memory correctly poisoned (%x)\n", saw);
778 BUG();
779 }
780 pr_info("Buddy page was not poisoned\n");
781
782 kfree(val);
783 break;
784 }
785 case CT_SOFTLOCKUP:
786 preempt_disable();
787 for (;;)
788 cpu_relax();
789 break;
790 case CT_HARDLOCKUP:
791 local_irq_disable();
792 for (;;)
793 cpu_relax();
794 break;
795 case CT_SPINLOCKUP:
796 /* Must be called twice to trigger. */
797 spin_lock(&lock_me_up);
798 /* Let sparse know we intended to exit holding the lock. */
799 __release(&lock_me_up);
800 break;
801 case CT_HUNG_TASK:
802 set_current_state(TASK_UNINTERRUPTIBLE);
803 schedule();
804 break;
805 case CT_EXEC_DATA:
806 execute_location(data_area, true);
807 break;
808 case CT_EXEC_STACK: {
809 u8 stack_area[EXEC_SIZE];
810 execute_location(stack_area, true);
811 break;
812 }
813 case CT_EXEC_KMALLOC: {
814 u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
815 execute_location(kmalloc_area, true);
816 kfree(kmalloc_area);
817 break;
818 }
819 case CT_EXEC_VMALLOC: {
820 u32 *vmalloc_area = vmalloc(EXEC_SIZE);
821 execute_location(vmalloc_area, true);
822 vfree(vmalloc_area);
823 break;
824 }
825 case CT_EXEC_RODATA:
826 execute_location(lkdtm_rodata_do_nothing, false);
827 break;
828 case CT_EXEC_USERSPACE: {
829 unsigned long user_addr;
830
831 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
832 PROT_READ | PROT_WRITE | PROT_EXEC,
833 MAP_ANONYMOUS | MAP_PRIVATE, 0);
834 if (user_addr >= TASK_SIZE) {
835 pr_warn("Failed to allocate user memory\n");
836 return;
837 }
838 execute_user_location((void *)user_addr);
839 vm_munmap(user_addr, PAGE_SIZE);
840 break;
841 }
842 case CT_ACCESS_USERSPACE: {
843 unsigned long user_addr, tmp = 0;
844 unsigned long *ptr;
845
846 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
847 PROT_READ | PROT_WRITE | PROT_EXEC,
848 MAP_ANONYMOUS | MAP_PRIVATE, 0);
849 if (user_addr >= TASK_SIZE) {
850 pr_warn("Failed to allocate user memory\n");
851 return;
852 }
853
854 if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
855 pr_warn("copy_to_user failed\n");
856 vm_munmap(user_addr, PAGE_SIZE);
857 return;
858 }
859
860 ptr = (unsigned long *)user_addr;
861
862 pr_info("attempting bad read at %p\n", ptr);
863 tmp = *ptr;
864 tmp += 0xc0dec0de;
865
866 pr_info("attempting bad write at %p\n", ptr);
867 *ptr = tmp;
868
869 vm_munmap(user_addr, PAGE_SIZE);
870
871 break;
872 }
873 case CT_WRITE_RO: {
874 /* Explicitly cast away "const" for the test. */
875 unsigned long *ptr = (unsigned long *)&rodata;
876
877 pr_info("attempting bad rodata write at %p\n", ptr);
878 *ptr ^= 0xabcd1234;
879
880 break;
881 }
882 case CT_WRITE_RO_AFTER_INIT: {
883 unsigned long *ptr = &ro_after_init;
884
885 /*
886 * Verify we were written to during init. Since an Oops
887 * is considered a "success", a failure is to just skip the
888 * real test.
889 */
890 if ((*ptr & 0xAA) != 0xAA) {
891 pr_info("%p was NOT written during init!?\n", ptr);
892 break;
893 }
894
895 pr_info("attempting bad ro_after_init write at %p\n", ptr);
896 *ptr ^= 0xabcd1234;
897
898 break;
899 }
900 case CT_WRITE_KERN: {
901 size_t size;
902 unsigned char *ptr;
903
904 size = (unsigned long)do_overwritten -
905 (unsigned long)do_nothing;
906 ptr = (unsigned char *)do_overwritten;
907
908 pr_info("attempting bad %zu byte write at %p\n", size, ptr);
909 memcpy(ptr, (unsigned char *)do_nothing, size);
910 flush_icache_range((unsigned long)ptr,
911 (unsigned long)(ptr + size));
912
913 do_overwritten();
914 break;
915 }
916 case CT_ATOMIC_UNDERFLOW: {
917 atomic_t under = ATOMIC_INIT(INT_MIN);
918
919 pr_info("attempting good atomic increment\n");
920 atomic_inc(&under);
921 atomic_dec(&under);
922
923 pr_info("attempting bad atomic underflow\n");
924 atomic_dec(&under);
925 break;
926 }
927 case CT_ATOMIC_OVERFLOW: {
928 atomic_t over = ATOMIC_INIT(INT_MAX);
929
930 pr_info("attempting good atomic decrement\n");
931 atomic_dec(&over);
932 atomic_inc(&over);
933
934 pr_info("attempting bad atomic overflow\n");
935 atomic_inc(&over);
936
937 return;
938 }
939 case CT_USERCOPY_HEAP_SIZE_TO:
940 do_usercopy_heap_size(true);
941 break;
942 case CT_USERCOPY_HEAP_SIZE_FROM:
943 do_usercopy_heap_size(false);
944 break;
945 case CT_USERCOPY_HEAP_FLAG_TO:
946 do_usercopy_heap_flag(true);
947 break;
948 case CT_USERCOPY_HEAP_FLAG_FROM:
949 do_usercopy_heap_flag(false);
950 break;
951 case CT_USERCOPY_STACK_FRAME_TO:
952 do_usercopy_stack(true, true);
953 break;
954 case CT_USERCOPY_STACK_FRAME_FROM:
955 do_usercopy_stack(false, true);
956 break;
957 case CT_USERCOPY_STACK_BEYOND:
958 do_usercopy_stack(true, false);
959 break;
960 case CT_NONE:
961 default:
962 break;
963 }
964
965 }
966
967 static void lkdtm_handler(void)
968 {
969 unsigned long flags;
970 bool do_it = false;
971
972 spin_lock_irqsave(&count_lock, flags);
973 count--;
974 pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
975 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
976
977 if (count == 0) {
978 do_it = true;
979 count = cpoint_count;
980 }
981 spin_unlock_irqrestore(&count_lock, flags);
982
983 if (do_it)
984 lkdtm_do_action(cptype);
985 }
986
987 static int lkdtm_register_cpoint(enum cname which)
988 {
989 int ret;
990
991 cpoint = CN_INVALID;
992 if (lkdtm.entry != NULL)
993 unregister_jprobe(&lkdtm);
994
995 switch (which) {
996 case CN_DIRECT:
997 lkdtm_do_action(cptype);
998 return 0;
999 case CN_INT_HARDWARE_ENTRY:
1000 lkdtm.kp.symbol_name = "do_IRQ";
1001 lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
1002 break;
1003 case CN_INT_HW_IRQ_EN:
1004 lkdtm.kp.symbol_name = "handle_IRQ_event";
1005 lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
1006 break;
1007 case CN_INT_TASKLET_ENTRY:
1008 lkdtm.kp.symbol_name = "tasklet_action";
1009 lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
1010 break;
1011 case CN_FS_DEVRW:
1012 lkdtm.kp.symbol_name = "ll_rw_block";
1013 lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
1014 break;
1015 case CN_MEM_SWAPOUT:
1016 lkdtm.kp.symbol_name = "shrink_inactive_list";
1017 lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
1018 break;
1019 case CN_TIMERADD:
1020 lkdtm.kp.symbol_name = "hrtimer_start";
1021 lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
1022 break;
1023 case CN_SCSI_DISPATCH_CMD:
1024 lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
1025 lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
1026 break;
1027 case CN_IDE_CORE_CP:
1028 #ifdef CONFIG_IDE
1029 lkdtm.kp.symbol_name = "generic_ide_ioctl";
1030 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
1031 #else
1032 pr_info("Crash point not available\n");
1033 return -EINVAL;
1034 #endif
1035 break;
1036 default:
1037 pr_info("Invalid Crash Point\n");
1038 return -EINVAL;
1039 }
1040
1041 cpoint = which;
1042 if ((ret = register_jprobe(&lkdtm)) < 0) {
1043 pr_info("Couldn't register jprobe\n");
1044 cpoint = CN_INVALID;
1045 }
1046
1047 return ret;
1048 }
1049
1050 static ssize_t do_register_entry(enum cname which, struct file *f,
1051 const char __user *user_buf, size_t count, loff_t *off)
1052 {
1053 char *buf;
1054 int err;
1055
1056 if (count >= PAGE_SIZE)
1057 return -EINVAL;
1058
1059 buf = (char *)__get_free_page(GFP_KERNEL);
1060 if (!buf)
1061 return -ENOMEM;
1062 if (copy_from_user(buf, user_buf, count)) {
1063 free_page((unsigned long) buf);
1064 return -EFAULT;
1065 }
1066 /* NULL-terminate and remove enter */
1067 buf[count] = '\0';
1068 strim(buf);
1069
1070 cptype = parse_cp_type(buf, count);
1071 free_page((unsigned long) buf);
1072
1073 if (cptype == CT_NONE)
1074 return -EINVAL;
1075
1076 err = lkdtm_register_cpoint(which);
1077 if (err < 0)
1078 return err;
1079
1080 *off += count;
1081
1082 return count;
1083 }
1084
1085 /* Generic read callback that just prints out the available crash types */
1086 static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
1087 size_t count, loff_t *off)
1088 {
1089 char *buf;
1090 int i, n, out;
1091
1092 buf = (char *)__get_free_page(GFP_KERNEL);
1093 if (buf == NULL)
1094 return -ENOMEM;
1095
1096 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
1097 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
1098 n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
1099 buf[n] = '\0';
1100
1101 out = simple_read_from_buffer(user_buf, count, off,
1102 buf, n);
1103 free_page((unsigned long) buf);
1104
1105 return out;
1106 }
1107
1108 static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
1109 {
1110 return 0;
1111 }
1112
1113
1114 static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
1115 size_t count, loff_t *off)
1116 {
1117 return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
1118 }
1119
1120 static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
1121 size_t count, loff_t *off)
1122 {
1123 return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
1124 }
1125
1126 static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
1127 size_t count, loff_t *off)
1128 {
1129 return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
1130 }
1131
1132 static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
1133 size_t count, loff_t *off)
1134 {
1135 return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
1136 }
1137
1138 static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
1139 size_t count, loff_t *off)
1140 {
1141 return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
1142 }
1143
1144 static ssize_t timeradd_entry(struct file *f, const char __user *buf,
1145 size_t count, loff_t *off)
1146 {
1147 return do_register_entry(CN_TIMERADD, f, buf, count, off);
1148 }
1149
1150 static ssize_t scsi_dispatch_cmd_entry(struct file *f,
1151 const char __user *buf, size_t count, loff_t *off)
1152 {
1153 return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
1154 }
1155
1156 static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
1157 size_t count, loff_t *off)
1158 {
1159 return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
1160 }
1161
1162 /* Special entry to just crash directly. Available without KPROBEs */
1163 static ssize_t direct_entry(struct file *f, const char __user *user_buf,
1164 size_t count, loff_t *off)
1165 {
1166 enum ctype type;
1167 char *buf;
1168
1169 if (count >= PAGE_SIZE)
1170 return -EINVAL;
1171 if (count < 1)
1172 return -EINVAL;
1173
1174 buf = (char *)__get_free_page(GFP_KERNEL);
1175 if (!buf)
1176 return -ENOMEM;
1177 if (copy_from_user(buf, user_buf, count)) {
1178 free_page((unsigned long) buf);
1179 return -EFAULT;
1180 }
1181 /* NULL-terminate and remove enter */
1182 buf[count] = '\0';
1183 strim(buf);
1184
1185 type = parse_cp_type(buf, count);
1186 free_page((unsigned long) buf);
1187 if (type == CT_NONE)
1188 return -EINVAL;
1189
1190 pr_info("Performing direct entry %s\n", cp_type_to_str(type));
1191 lkdtm_do_action(type);
1192 *off += count;
1193
1194 return count;
1195 }
1196
1197 struct crash_entry {
1198 const char *name;
1199 const struct file_operations fops;
1200 };
1201
1202 static const struct crash_entry crash_entries[] = {
1203 {"DIRECT", {.read = lkdtm_debugfs_read,
1204 .llseek = generic_file_llseek,
1205 .open = lkdtm_debugfs_open,
1206 .write = direct_entry} },
1207 {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
1208 .llseek = generic_file_llseek,
1209 .open = lkdtm_debugfs_open,
1210 .write = int_hardware_entry} },
1211 {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
1212 .llseek = generic_file_llseek,
1213 .open = lkdtm_debugfs_open,
1214 .write = int_hw_irq_en} },
1215 {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
1216 .llseek = generic_file_llseek,
1217 .open = lkdtm_debugfs_open,
1218 .write = int_tasklet_entry} },
1219 {"FS_DEVRW", {.read = lkdtm_debugfs_read,
1220 .llseek = generic_file_llseek,
1221 .open = lkdtm_debugfs_open,
1222 .write = fs_devrw_entry} },
1223 {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
1224 .llseek = generic_file_llseek,
1225 .open = lkdtm_debugfs_open,
1226 .write = mem_swapout_entry} },
1227 {"TIMERADD", {.read = lkdtm_debugfs_read,
1228 .llseek = generic_file_llseek,
1229 .open = lkdtm_debugfs_open,
1230 .write = timeradd_entry} },
1231 {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
1232 .llseek = generic_file_llseek,
1233 .open = lkdtm_debugfs_open,
1234 .write = scsi_dispatch_cmd_entry} },
1235 {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
1236 .llseek = generic_file_llseek,
1237 .open = lkdtm_debugfs_open,
1238 .write = ide_core_cp_entry} },
1239 };
1240
1241 static struct dentry *lkdtm_debugfs_root;
1242
1243 static int __init lkdtm_module_init(void)
1244 {
1245 int ret = -EINVAL;
1246 int n_debugfs_entries = 1; /* Assume only the direct entry */
1247 int i;
1248
1249 /* Make sure we can write to __ro_after_init values during __init */
1250 ro_after_init |= 0xAA;
1251
1252 /* Prepare cache that lacks SLAB_USERCOPY flag. */
1253 cache_size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
1254 bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
1255 0, NULL);
1256
1257 /* Register debugfs interface */
1258 lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
1259 if (!lkdtm_debugfs_root) {
1260 pr_err("creating root dir failed\n");
1261 return -ENODEV;
1262 }
1263
1264 #ifdef CONFIG_KPROBES
1265 n_debugfs_entries = ARRAY_SIZE(crash_entries);
1266 #endif
1267
1268 for (i = 0; i < n_debugfs_entries; i++) {
1269 const struct crash_entry *cur = &crash_entries[i];
1270 struct dentry *de;
1271
1272 de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
1273 NULL, &cur->fops);
1274 if (de == NULL) {
1275 pr_err("could not create %s\n", cur->name);
1276 goto out_err;
1277 }
1278 }
1279
1280 if (lkdtm_parse_commandline() == -EINVAL) {
1281 pr_info("Invalid command\n");
1282 goto out_err;
1283 }
1284
1285 if (cpoint != CN_INVALID && cptype != CT_NONE) {
1286 ret = lkdtm_register_cpoint(cpoint);
1287 if (ret < 0) {
1288 pr_info("Invalid crash point %d\n", cpoint);
1289 goto out_err;
1290 }
1291 pr_info("Crash point %s of type %s registered\n",
1292 cpoint_name, cpoint_type);
1293 } else {
1294 pr_info("No crash points registered, enable through debugfs\n");
1295 }
1296
1297 return 0;
1298
1299 out_err:
1300 debugfs_remove_recursive(lkdtm_debugfs_root);
1301 return ret;
1302 }
1303
1304 static void __exit lkdtm_module_exit(void)
1305 {
1306 debugfs_remove_recursive(lkdtm_debugfs_root);
1307
1308 kmem_cache_destroy(bad_cache);
1309
1310 unregister_jprobe(&lkdtm);
1311 pr_info("Crash point unregistered\n");
1312 }
1313
1314 module_init(lkdtm_module_init);
1315 module_exit(lkdtm_module_exit);
1316
1317 MODULE_LICENSE("GPL");
1318 MODULE_DESCRIPTION("Kprobe module for testing crash dumps");