]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/sh/kernel/hw_breakpoint.c
2 * arch/sh/kernel/hw_breakpoint.c
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
6 * Copyright (C) 2009 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/init.h>
13 #include <linux/perf_event.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/percpu.h>
16 #include <linux/kallsyms.h>
17 #include <linux/notifier.h>
18 #include <linux/kprobes.h>
19 #include <linux/kdebug.h>
21 #include <asm/hw_breakpoint.h>
22 #include <asm/mmu_context.h>
23 #include <asm/ptrace.h>
30 /* Per cpu ubc channel state */
31 static DEFINE_PER_CPU(struct ubc_context
, ubc_ctx
[HBP_NUM
]);
34 * Stores the breakpoints currently in use on each breakpoint address
35 * register for each cpus
37 static DEFINE_PER_CPU(struct perf_event
*, bp_per_reg
[HBP_NUM
]);
39 static int __init
ubc_init(void)
41 __raw_writel(0, UBC_CAMR0
);
42 __raw_writel(0, UBC_CBR0
);
43 __raw_writel(0, UBC_CBCR
);
45 __raw_writel(UBC_CRR_BIE
| UBC_CRR_PCB
, UBC_CRR0
);
47 /* dummy read for write posting */
48 (void)__raw_readl(UBC_CRR0
);
52 arch_initcall(ubc_init
);
55 * Install a perf counter breakpoint.
57 * We seek a free UBC channel and use it for this breakpoint.
59 * Atomic: we hold the counter->ctx->lock and we only handle variables
60 * and registers local to this cpu.
62 int arch_install_hw_breakpoint(struct perf_event
*bp
)
64 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
65 struct ubc_context
*ubc_ctx
;
68 for (i
= 0; i
< HBP_NUM
; i
++) {
69 struct perf_event
**slot
= &__get_cpu_var(bp_per_reg
[i
]);
77 if (WARN_ONCE(i
== HBP_NUM
, "Can't find any breakpoint slot"))
80 ubc_ctx
= &__get_cpu_var(ubc_ctx
[i
]);
82 ubc_ctx
->pc
= info
->address
;
83 ubc_ctx
->state
= info
->len
| info
->type
;
85 __raw_writel(UBC_CBR_CE
| ubc_ctx
->state
, UBC_CBR0
);
86 __raw_writel(ubc_ctx
->pc
, UBC_CAR0
);
92 * Uninstall the breakpoint contained in the given counter.
94 * First we search the debug address register it uses and then we disable
97 * Atomic: we hold the counter->ctx->lock and we only handle variables
98 * and registers local to this cpu.
100 void arch_uninstall_hw_breakpoint(struct perf_event
*bp
)
102 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
103 struct ubc_context
*ubc_ctx
;
106 for (i
= 0; i
< HBP_NUM
; i
++) {
107 struct perf_event
**slot
= &__get_cpu_var(bp_per_reg
[i
]);
115 if (WARN_ONCE(i
== HBP_NUM
, "Can't find any breakpoint slot"))
118 ubc_ctx
= &__get_cpu_var(ubc_ctx
[i
]);
120 ubc_ctx
->state
&= ~(info
->len
| info
->type
);
122 __raw_writel(ubc_ctx
->pc
, UBC_CBR0
);
123 __raw_writel(ubc_ctx
->state
, UBC_CAR0
);
126 static int get_hbp_len(u16 hbp_len
)
128 unsigned int len_in_bytes
= 0;
131 case SH_BREAKPOINT_LEN_1
:
134 case SH_BREAKPOINT_LEN_2
:
137 case SH_BREAKPOINT_LEN_4
:
140 case SH_BREAKPOINT_LEN_8
:
148 * Check for virtual address in user space.
150 int arch_check_va_in_userspace(unsigned long va
, u16 hbp_len
)
154 len
= get_hbp_len(hbp_len
);
156 return (va
<= TASK_SIZE
- len
);
160 * Check for virtual address in kernel space.
162 static int arch_check_va_in_kernelspace(unsigned long va
, u8 hbp_len
)
166 len
= get_hbp_len(hbp_len
);
168 return (va
>= TASK_SIZE
) && ((va
+ len
- 1) >= TASK_SIZE
);
172 * Store a breakpoint's encoded address, length, and type.
174 static int arch_store_info(struct perf_event
*bp
)
176 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
179 * User-space requests will always have the address field populated
180 * For kernel-addresses, either the address or symbol name can be
184 info
->address
= (unsigned long)kallsyms_lookup_name(info
->name
);
186 info
->asid
= get_asid();
193 int arch_bp_generic_fields(int sh_len
, int sh_type
,
194 int *gen_len
, int *gen_type
)
198 case SH_BREAKPOINT_LEN_1
:
199 *gen_len
= HW_BREAKPOINT_LEN_1
;
201 case SH_BREAKPOINT_LEN_2
:
202 *gen_len
= HW_BREAKPOINT_LEN_2
;
204 case SH_BREAKPOINT_LEN_4
:
205 *gen_len
= HW_BREAKPOINT_LEN_4
;
207 case SH_BREAKPOINT_LEN_8
:
208 *gen_len
= HW_BREAKPOINT_LEN_8
;
216 case SH_BREAKPOINT_READ
:
217 *gen_type
= HW_BREAKPOINT_R
;
218 case SH_BREAKPOINT_WRITE
:
219 *gen_type
= HW_BREAKPOINT_W
;
221 case SH_BREAKPOINT_RW
:
222 *gen_type
= HW_BREAKPOINT_W
| HW_BREAKPOINT_R
;
231 static int arch_build_bp_info(struct perf_event
*bp
)
233 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
235 info
->address
= bp
->attr
.bp_addr
;
238 switch (bp
->attr
.bp_len
) {
239 case HW_BREAKPOINT_LEN_1
:
240 info
->len
= SH_BREAKPOINT_LEN_1
;
242 case HW_BREAKPOINT_LEN_2
:
243 info
->len
= SH_BREAKPOINT_LEN_2
;
245 case HW_BREAKPOINT_LEN_4
:
246 info
->len
= SH_BREAKPOINT_LEN_4
;
248 case HW_BREAKPOINT_LEN_8
:
249 info
->len
= SH_BREAKPOINT_LEN_8
;
256 switch (bp
->attr
.bp_type
) {
257 case HW_BREAKPOINT_R
:
258 info
->type
= SH_BREAKPOINT_READ
;
260 case HW_BREAKPOINT_W
:
261 info
->type
= SH_BREAKPOINT_WRITE
;
263 case HW_BREAKPOINT_W
| HW_BREAKPOINT_R
:
264 info
->type
= SH_BREAKPOINT_RW
;
274 * Validate the arch-specific HW Breakpoint register settings
276 int arch_validate_hwbkpt_settings(struct perf_event
*bp
,
277 struct task_struct
*tsk
)
279 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
283 ret
= arch_build_bp_info(bp
);
290 case SH_BREAKPOINT_LEN_1
:
293 case SH_BREAKPOINT_LEN_2
:
296 case SH_BREAKPOINT_LEN_4
:
299 case SH_BREAKPOINT_LEN_8
:
306 ret
= arch_store_info(bp
);
312 * Check that the low-order bits of the address are appropriate
313 * for the alignment implied by len.
315 if (info
->address
& align
)
318 /* Check that the virtual address is in the proper range */
320 if (!arch_check_va_in_userspace(info
->address
, info
->len
))
323 if (!arch_check_va_in_kernelspace(info
->address
, info
->len
))
331 * Release the user breakpoints used by ptrace
333 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
336 struct thread_struct
*t
= &tsk
->thread
;
338 for (i
= 0; i
< HBP_NUM
; i
++) {
339 unregister_hw_breakpoint(t
->ptrace_bps
[i
]);
340 t
->ptrace_bps
[i
] = NULL
;
344 static int __kprobes
hw_breakpoint_handler(struct die_args
*args
)
346 int cpu
, i
, rc
= NOTIFY_STOP
;
347 struct perf_event
*bp
;
350 val
= __raw_readl(UBC_CBR0
);
351 __raw_writel(val
& ~UBC_CBR_CE
, UBC_CBR0
);
354 for (i
= 0; i
< HBP_NUM
; i
++) {
356 * The counter may be concurrently released but that can only
357 * occur from a call_rcu() path. We can then safely fetch
358 * the breakpoint, use its callback, touch its counter
359 * while we are in an rcu_read_lock() path.
363 bp
= per_cpu(bp_per_reg
[i
], cpu
);
371 perf_bp_event(bp
, args
->regs
);
376 if (bp
&& bp
->overflow_handler
!= ptrace_triggered
) {
377 struct arch_hw_breakpoint
*info
= counter_arch_bp(bp
);
379 __raw_writel(UBC_CBR_CE
| info
->len
| info
->type
, UBC_CBR0
);
380 __raw_writel(info
->address
, UBC_CAR0
);
388 BUILD_TRAP_HANDLER(breakpoint
)
390 unsigned long ex
= lookup_exception_vector();
395 err
= notify_die(DIE_BREAKPOINT
, "breakpoint", regs
, 0, ex
, SIGTRAP
);
396 if (err
== NOTIFY_STOP
)
399 /* Deliver the signal to userspace */
400 info
.si_signo
= SIGTRAP
;
402 info
.si_code
= TRAP_HWBKPT
;
403 force_sig_info(SIGTRAP
, &info
, current
);
407 * Handle debug exception notifications.
409 int __kprobes
hw_breakpoint_exceptions_notify(struct notifier_block
*unused
,
410 unsigned long val
, void *data
)
412 struct die_args
*args
= data
;
414 if (val
!= DIE_BREAKPOINT
)
418 * If the breakpoint hasn't been triggered by the UBC, it's
419 * probably from a debugger, so don't do anything more here.
421 if (args
->trapnr
!= 0x1e0)
424 return hw_breakpoint_handler(data
);
427 void hw_breakpoint_pmu_read(struct perf_event
*bp
)
432 void hw_breakpoint_pmu_unthrottle(struct perf_event
*bp
)