]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/sh/kernel/hw_breakpoint.c
sh: Convert ptrace to hw_breakpoint API.
[mirror_ubuntu-zesty-kernel.git] / arch / sh / kernel / hw_breakpoint.c
1 /*
2 * arch/sh/kernel/hw_breakpoint.c
3 *
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5 *
6 * Copyright (C) 2009 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12 #include <linux/init.h>
13 #include <linux/perf_event.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/percpu.h>
16 #include <linux/kallsyms.h>
17 #include <linux/notifier.h>
18 #include <linux/kprobes.h>
19 #include <linux/kdebug.h>
20 #include <linux/io.h>
21 #include <asm/hw_breakpoint.h>
22 #include <asm/mmu_context.h>
23 #include <asm/ptrace.h>
24
25 struct ubc_context {
26 unsigned long pc;
27 unsigned long state;
28 };
29
30 /* Per cpu ubc channel state */
31 static DEFINE_PER_CPU(struct ubc_context, ubc_ctx[HBP_NUM]);
32
33 /*
34 * Stores the breakpoints currently in use on each breakpoint address
35 * register for each cpus
36 */
37 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
38
39 static int __init ubc_init(void)
40 {
41 __raw_writel(0, UBC_CAMR0);
42 __raw_writel(0, UBC_CBR0);
43 __raw_writel(0, UBC_CBCR);
44
45 __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR0);
46
47 /* dummy read for write posting */
48 (void)__raw_readl(UBC_CRR0);
49
50 return 0;
51 }
52 arch_initcall(ubc_init);
53
54 /*
55 * Install a perf counter breakpoint.
56 *
57 * We seek a free UBC channel and use it for this breakpoint.
58 *
59 * Atomic: we hold the counter->ctx->lock and we only handle variables
60 * and registers local to this cpu.
61 */
62 int arch_install_hw_breakpoint(struct perf_event *bp)
63 {
64 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
65 struct ubc_context *ubc_ctx;
66 int i;
67
68 for (i = 0; i < HBP_NUM; i++) {
69 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
70
71 if (!*slot) {
72 *slot = bp;
73 break;
74 }
75 }
76
77 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
78 return -EBUSY;
79
80 ubc_ctx = &__get_cpu_var(ubc_ctx[i]);
81
82 ubc_ctx->pc = info->address;
83 ubc_ctx->state = info->len | info->type;
84
85 __raw_writel(UBC_CBR_CE | ubc_ctx->state, UBC_CBR0);
86 __raw_writel(ubc_ctx->pc, UBC_CAR0);
87
88 return 0;
89 }
90
91 /*
92 * Uninstall the breakpoint contained in the given counter.
93 *
94 * First we search the debug address register it uses and then we disable
95 * it.
96 *
97 * Atomic: we hold the counter->ctx->lock and we only handle variables
98 * and registers local to this cpu.
99 */
100 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
101 {
102 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
103 struct ubc_context *ubc_ctx;
104 int i;
105
106 for (i = 0; i < HBP_NUM; i++) {
107 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
108
109 if (*slot == bp) {
110 *slot = NULL;
111 break;
112 }
113 }
114
115 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
116 return;
117
118 ubc_ctx = &__get_cpu_var(ubc_ctx[i]);
119 ubc_ctx->pc = 0;
120 ubc_ctx->state &= ~(info->len | info->type);
121
122 __raw_writel(ubc_ctx->pc, UBC_CBR0);
123 __raw_writel(ubc_ctx->state, UBC_CAR0);
124 }
125
126 static int get_hbp_len(u16 hbp_len)
127 {
128 unsigned int len_in_bytes = 0;
129
130 switch (hbp_len) {
131 case SH_BREAKPOINT_LEN_1:
132 len_in_bytes = 1;
133 break;
134 case SH_BREAKPOINT_LEN_2:
135 len_in_bytes = 2;
136 break;
137 case SH_BREAKPOINT_LEN_4:
138 len_in_bytes = 4;
139 break;
140 case SH_BREAKPOINT_LEN_8:
141 len_in_bytes = 8;
142 break;
143 }
144 return len_in_bytes;
145 }
146
147 /*
148 * Check for virtual address in user space.
149 */
150 int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
151 {
152 unsigned int len;
153
154 len = get_hbp_len(hbp_len);
155
156 return (va <= TASK_SIZE - len);
157 }
158
159 /*
160 * Check for virtual address in kernel space.
161 */
162 static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
163 {
164 unsigned int len;
165
166 len = get_hbp_len(hbp_len);
167
168 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
169 }
170
171 /*
172 * Store a breakpoint's encoded address, length, and type.
173 */
174 static int arch_store_info(struct perf_event *bp)
175 {
176 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
177
178 /*
179 * User-space requests will always have the address field populated
180 * For kernel-addresses, either the address or symbol name can be
181 * specified.
182 */
183 if (info->name)
184 info->address = (unsigned long)kallsyms_lookup_name(info->name);
185 if (info->address) {
186 info->asid = get_asid();
187 return 0;
188 }
189
190 return -EINVAL;
191 }
192
193 int arch_bp_generic_fields(int sh_len, int sh_type,
194 int *gen_len, int *gen_type)
195 {
196 /* Len */
197 switch (sh_len) {
198 case SH_BREAKPOINT_LEN_1:
199 *gen_len = HW_BREAKPOINT_LEN_1;
200 break;
201 case SH_BREAKPOINT_LEN_2:
202 *gen_len = HW_BREAKPOINT_LEN_2;
203 break;
204 case SH_BREAKPOINT_LEN_4:
205 *gen_len = HW_BREAKPOINT_LEN_4;
206 break;
207 case SH_BREAKPOINT_LEN_8:
208 *gen_len = HW_BREAKPOINT_LEN_8;
209 break;
210 default:
211 return -EINVAL;
212 }
213
214 /* Type */
215 switch (sh_type) {
216 case SH_BREAKPOINT_READ:
217 *gen_type = HW_BREAKPOINT_R;
218 case SH_BREAKPOINT_WRITE:
219 *gen_type = HW_BREAKPOINT_W;
220 break;
221 case SH_BREAKPOINT_RW:
222 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
223 break;
224 default:
225 return -EINVAL;
226 }
227
228 return 0;
229 }
230
231 static int arch_build_bp_info(struct perf_event *bp)
232 {
233 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
234
235 info->address = bp->attr.bp_addr;
236
237 /* Len */
238 switch (bp->attr.bp_len) {
239 case HW_BREAKPOINT_LEN_1:
240 info->len = SH_BREAKPOINT_LEN_1;
241 break;
242 case HW_BREAKPOINT_LEN_2:
243 info->len = SH_BREAKPOINT_LEN_2;
244 break;
245 case HW_BREAKPOINT_LEN_4:
246 info->len = SH_BREAKPOINT_LEN_4;
247 break;
248 case HW_BREAKPOINT_LEN_8:
249 info->len = SH_BREAKPOINT_LEN_8;
250 break;
251 default:
252 return -EINVAL;
253 }
254
255 /* Type */
256 switch (bp->attr.bp_type) {
257 case HW_BREAKPOINT_R:
258 info->type = SH_BREAKPOINT_READ;
259 break;
260 case HW_BREAKPOINT_W:
261 info->type = SH_BREAKPOINT_WRITE;
262 break;
263 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
264 info->type = SH_BREAKPOINT_RW;
265 break;
266 default:
267 return -EINVAL;
268 }
269
270 return 0;
271 }
272
273 /*
274 * Validate the arch-specific HW Breakpoint register settings
275 */
276 int arch_validate_hwbkpt_settings(struct perf_event *bp,
277 struct task_struct *tsk)
278 {
279 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
280 unsigned int align;
281 int ret;
282
283 ret = arch_build_bp_info(bp);
284 if (ret)
285 return ret;
286
287 ret = -EINVAL;
288
289 switch (info->len) {
290 case SH_BREAKPOINT_LEN_1:
291 align = 0;
292 break;
293 case SH_BREAKPOINT_LEN_2:
294 align = 1;
295 break;
296 case SH_BREAKPOINT_LEN_4:
297 align = 3;
298 break;
299 case SH_BREAKPOINT_LEN_8:
300 align = 7;
301 break;
302 default:
303 return ret;
304 }
305
306 ret = arch_store_info(bp);
307
308 if (ret < 0)
309 return ret;
310
311 /*
312 * Check that the low-order bits of the address are appropriate
313 * for the alignment implied by len.
314 */
315 if (info->address & align)
316 return -EINVAL;
317
318 /* Check that the virtual address is in the proper range */
319 if (tsk) {
320 if (!arch_check_va_in_userspace(info->address, info->len))
321 return -EFAULT;
322 } else {
323 if (!arch_check_va_in_kernelspace(info->address, info->len))
324 return -EFAULT;
325 }
326
327 return 0;
328 }
329
330 /*
331 * Release the user breakpoints used by ptrace
332 */
333 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
334 {
335 int i;
336 struct thread_struct *t = &tsk->thread;
337
338 for (i = 0; i < HBP_NUM; i++) {
339 unregister_hw_breakpoint(t->ptrace_bps[i]);
340 t->ptrace_bps[i] = NULL;
341 }
342 }
343
344 static int __kprobes hw_breakpoint_handler(struct die_args *args)
345 {
346 int cpu, i, rc = NOTIFY_STOP;
347 struct perf_event *bp;
348 unsigned long val;
349
350 val = __raw_readl(UBC_CBR0);
351 __raw_writel(val & ~UBC_CBR_CE, UBC_CBR0);
352
353 cpu = get_cpu();
354 for (i = 0; i < HBP_NUM; i++) {
355 /*
356 * The counter may be concurrently released but that can only
357 * occur from a call_rcu() path. We can then safely fetch
358 * the breakpoint, use its callback, touch its counter
359 * while we are in an rcu_read_lock() path.
360 */
361 rcu_read_lock();
362
363 bp = per_cpu(bp_per_reg[i], cpu);
364 if (bp) {
365 rc = NOTIFY_DONE;
366 } else {
367 rcu_read_unlock();
368 break;
369 }
370
371 perf_bp_event(bp, args->regs);
372
373 rcu_read_unlock();
374 }
375
376 if (bp && bp->overflow_handler != ptrace_triggered) {
377 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
378
379 __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR0);
380 __raw_writel(info->address, UBC_CAR0);
381 }
382
383 put_cpu();
384
385 return rc;
386 }
387
388 BUILD_TRAP_HANDLER(breakpoint)
389 {
390 unsigned long ex = lookup_exception_vector();
391 siginfo_t info;
392 int err;
393 TRAP_HANDLER_DECL;
394
395 err = notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
396 if (err == NOTIFY_STOP)
397 return;
398
399 /* Deliver the signal to userspace */
400 info.si_signo = SIGTRAP;
401 info.si_errno = 0;
402 info.si_code = TRAP_HWBKPT;
403 force_sig_info(SIGTRAP, &info, current);
404 }
405
406 /*
407 * Handle debug exception notifications.
408 */
409 int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
410 unsigned long val, void *data)
411 {
412 struct die_args *args = data;
413
414 if (val != DIE_BREAKPOINT)
415 return NOTIFY_DONE;
416
417 /*
418 * If the breakpoint hasn't been triggered by the UBC, it's
419 * probably from a debugger, so don't do anything more here.
420 */
421 if (args->trapnr != 0x1e0)
422 return NOTIFY_DONE;
423
424 return hw_breakpoint_handler(data);
425 }
426
427 void hw_breakpoint_pmu_read(struct perf_event *bp)
428 {
429 /* TODO */
430 }
431
432 void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
433 {
434 /* TODO */
435 }