]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/avr32/kernel/kprobes.c
kprobes: support kretprobe blacklist
[mirror_ubuntu-artful-kernel.git] / arch / avr32 / kernel / kprobes.c
1 /*
2 * Kernel Probes (KProbes)
3 *
4 * Copyright (C) 2005-2006 Atmel Corporation
5 *
6 * Based on arch/ppc64/kernel/kprobes.c
7 * Copyright (C) IBM Corporation, 2002, 2004
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/kprobes.h>
15 #include <linux/ptrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <linux/kdebug.h>
19 #include <asm/ocd.h>
20
21 DEFINE_PER_CPU(struct kprobe *, current_kprobe);
22 static unsigned long kprobe_status;
23 static struct pt_regs jprobe_saved_regs;
24
25 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
26
27 int __kprobes arch_prepare_kprobe(struct kprobe *p)
28 {
29 int ret = 0;
30
31 if ((unsigned long)p->addr & 0x01) {
32 printk("Attempt to register kprobe at an unaligned address\n");
33 ret = -EINVAL;
34 }
35
36 /* XXX: Might be a good idea to check if p->addr is a valid
37 * kernel address as well... */
38
39 if (!ret) {
40 pr_debug("copy kprobe at %p\n", p->addr);
41 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
42 p->opcode = *p->addr;
43 }
44
45 return ret;
46 }
47
48 void __kprobes arch_arm_kprobe(struct kprobe *p)
49 {
50 pr_debug("arming kprobe at %p\n", p->addr);
51 *p->addr = BREAKPOINT_INSTRUCTION;
52 flush_icache_range((unsigned long)p->addr,
53 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
54 }
55
56 void __kprobes arch_disarm_kprobe(struct kprobe *p)
57 {
58 pr_debug("disarming kprobe at %p\n", p->addr);
59 *p->addr = p->opcode;
60 flush_icache_range((unsigned long)p->addr,
61 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
62 }
63
64 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
65 {
66 unsigned long dc;
67
68 pr_debug("preparing to singlestep over %p (PC=%08lx)\n",
69 p->addr, regs->pc);
70
71 BUG_ON(!(sysreg_read(SR) & SYSREG_BIT(SR_D)));
72
73 dc = __mfdr(DBGREG_DC);
74 dc |= DC_SS;
75 __mtdr(DBGREG_DC, dc);
76
77 /*
78 * We must run the instruction from its original location
79 * since it may actually reference PC.
80 *
81 * TODO: Do the instruction replacement directly in icache.
82 */
83 *p->addr = p->opcode;
84 flush_icache_range((unsigned long)p->addr,
85 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
86 }
87
88 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
89 {
90 unsigned long dc;
91
92 pr_debug("resuming execution at PC=%08lx\n", regs->pc);
93
94 dc = __mfdr(DBGREG_DC);
95 dc &= ~DC_SS;
96 __mtdr(DBGREG_DC, dc);
97
98 *p->addr = BREAKPOINT_INSTRUCTION;
99 flush_icache_range((unsigned long)p->addr,
100 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
101 }
102
103 static void __kprobes set_current_kprobe(struct kprobe *p)
104 {
105 __get_cpu_var(current_kprobe) = p;
106 }
107
108 static int __kprobes kprobe_handler(struct pt_regs *regs)
109 {
110 struct kprobe *p;
111 void *addr = (void *)regs->pc;
112 int ret = 0;
113
114 pr_debug("kprobe_handler: kprobe_running=%p\n",
115 kprobe_running());
116
117 /*
118 * We don't want to be preempted for the entire
119 * duration of kprobe processing
120 */
121 preempt_disable();
122
123 /* Check that we're not recursing */
124 if (kprobe_running()) {
125 p = get_kprobe(addr);
126 if (p) {
127 if (kprobe_status == KPROBE_HIT_SS) {
128 printk("FIXME: kprobe hit while single-stepping!\n");
129 goto no_kprobe;
130 }
131
132 printk("FIXME: kprobe hit while handling another kprobe\n");
133 goto no_kprobe;
134 } else {
135 p = kprobe_running();
136 if (p->break_handler && p->break_handler(p, regs))
137 goto ss_probe;
138 }
139 /* If it's not ours, can't be delete race, (we hold lock). */
140 goto no_kprobe;
141 }
142
143 p = get_kprobe(addr);
144 if (!p)
145 goto no_kprobe;
146
147 kprobe_status = KPROBE_HIT_ACTIVE;
148 set_current_kprobe(p);
149 if (p->pre_handler && p->pre_handler(p, regs))
150 /* handler has already set things up, so skip ss setup */
151 return 1;
152
153 ss_probe:
154 prepare_singlestep(p, regs);
155 kprobe_status = KPROBE_HIT_SS;
156 return 1;
157
158 no_kprobe:
159 preempt_enable_no_resched();
160 return ret;
161 }
162
163 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
164 {
165 struct kprobe *cur = kprobe_running();
166
167 pr_debug("post_kprobe_handler, cur=%p\n", cur);
168
169 if (!cur)
170 return 0;
171
172 if (cur->post_handler) {
173 kprobe_status = KPROBE_HIT_SSDONE;
174 cur->post_handler(cur, regs, 0);
175 }
176
177 resume_execution(cur, regs);
178 reset_current_kprobe();
179 preempt_enable_no_resched();
180
181 return 1;
182 }
183
184 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
185 {
186 struct kprobe *cur = kprobe_running();
187
188 pr_debug("kprobe_fault_handler: trapnr=%d\n", trapnr);
189
190 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
191 return 1;
192
193 if (kprobe_status & KPROBE_HIT_SS) {
194 resume_execution(cur, regs);
195 preempt_enable_no_resched();
196 }
197 return 0;
198 }
199
200 /*
201 * Wrapper routine to for handling exceptions.
202 */
203 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
204 unsigned long val, void *data)
205 {
206 struct die_args *args = (struct die_args *)data;
207 int ret = NOTIFY_DONE;
208
209 pr_debug("kprobe_exceptions_notify: val=%lu, data=%p\n",
210 val, data);
211
212 switch (val) {
213 case DIE_BREAKPOINT:
214 if (kprobe_handler(args->regs))
215 ret = NOTIFY_STOP;
216 break;
217 case DIE_SSTEP:
218 if (post_kprobe_handler(args->regs))
219 ret = NOTIFY_STOP;
220 break;
221 default:
222 break;
223 }
224
225 return ret;
226 }
227
228 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
229 {
230 struct jprobe *jp = container_of(p, struct jprobe, kp);
231
232 memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
233
234 /*
235 * TODO: We should probably save some of the stack here as
236 * well, since gcc may pass arguments on the stack for certain
237 * functions (lots of arguments, large aggregates, varargs)
238 */
239
240 /* setup return addr to the jprobe handler routine */
241 regs->pc = (unsigned long)jp->entry;
242 return 1;
243 }
244
245 void __kprobes jprobe_return(void)
246 {
247 asm volatile("breakpoint" ::: "memory");
248 }
249
250 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
251 {
252 /*
253 * FIXME - we should ideally be validating that we got here 'cos
254 * of the "trap" in jprobe_return() above, before restoring the
255 * saved regs...
256 */
257 memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
258 return 1;
259 }
260
261 int __init arch_init_kprobes(void)
262 {
263 printk("KPROBES: Enabling monitor mode (MM|DBE)...\n");
264 __mtdr(DBGREG_DC, DC_MM | DC_DBE);
265
266 /* TODO: Register kretprobe trampoline */
267 return 0;
268 }