]>
Commit | Line | Data |
---|---|---|
5aae8a53 P |
1 | /* |
2 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | |
3 | * using the CPU's debug registers. Derived from | |
4 | * "arch/x86/kernel/hw_breakpoint.c" | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
20 | * Copyright 2010 IBM Corporation | |
21 | * Author: K.Prasad <prasad@linux.vnet.ibm.com> | |
22 | * | |
23 | */ | |
24 | ||
25 | #include <linux/hw_breakpoint.h> | |
26 | #include <linux/notifier.h> | |
27 | #include <linux/kprobes.h> | |
28 | #include <linux/percpu.h> | |
29 | #include <linux/kernel.h> | |
5aae8a53 | 30 | #include <linux/sched.h> |
5aae8a53 P |
31 | #include <linux/smp.h> |
32 | ||
33 | #include <asm/hw_breakpoint.h> | |
34 | #include <asm/processor.h> | |
35 | #include <asm/sstep.h> | |
36 | #include <asm/uaccess.h> | |
37 | ||
38 | /* | |
39 | * Stores the breakpoints currently in use on each breakpoint address | |
40 | * register for every cpu | |
41 | */ | |
42 | static DEFINE_PER_CPU(struct perf_event *, bp_per_reg); | |
43 | ||
d09ec738 PM |
44 | /* |
45 | * Returns total number of data or instruction breakpoints available. | |
46 | */ | |
47 | int hw_breakpoint_slots(int type) | |
48 | { | |
49 | if (type == TYPE_DATA) | |
50 | return HBP_NUM; | |
51 | return 0; /* no instruction breakpoints available */ | |
52 | } | |
53 | ||
5aae8a53 P |
54 | /* |
55 | * Install a perf counter breakpoint. | |
56 | * | |
57 | * We seek a free debug address register and use it for this | |
58 | * breakpoint. | |
59 | * | |
60 | * Atomic: we hold the counter->ctx->lock and we only handle variables | |
61 | * and registers local to this cpu. | |
62 | */ | |
63 | int arch_install_hw_breakpoint(struct perf_event *bp) | |
64 | { | |
65 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
69111bac | 66 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg); |
5aae8a53 P |
67 | |
68 | *slot = bp; | |
69 | ||
70 | /* | |
71 | * Do not install DABR values if the instruction must be single-stepped. | |
72 | * If so, DABR will be populated in single_step_dabr_instruction(). | |
73 | */ | |
74 | if (current->thread.last_hit_ubp != bp) | |
21f58507 | 75 | __set_breakpoint(info); |
5aae8a53 P |
76 | |
77 | return 0; | |
78 | } | |
79 | ||
80 | /* | |
81 | * Uninstall the breakpoint contained in the given counter. | |
82 | * | |
83 | * First we search the debug address register it uses and then we disable | |
84 | * it. | |
85 | * | |
86 | * Atomic: we hold the counter->ctx->lock and we only handle variables | |
87 | * and registers local to this cpu. | |
88 | */ | |
89 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |
90 | { | |
69111bac | 91 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg); |
5aae8a53 P |
92 | |
93 | if (*slot != bp) { | |
94 | WARN_ONCE(1, "Can't find the breakpoint"); | |
95 | return; | |
96 | } | |
97 | ||
98 | *slot = NULL; | |
9422de3e | 99 | hw_breakpoint_disable(); |
5aae8a53 P |
100 | } |
101 | ||
102 | /* | |
103 | * Perform cleanup of arch-specific counters during unregistration | |
104 | * of the perf-event | |
105 | */ | |
106 | void arch_unregister_hw_breakpoint(struct perf_event *bp) | |
107 | { | |
108 | /* | |
109 | * If the breakpoint is unregistered between a hw_breakpoint_handler() | |
110 | * and the single_step_dabr_instruction(), then cleanup the breakpoint | |
111 | * restoration variables to prevent dangling pointers. | |
fb822e60 | 112 | * FIXME, this should not be using bp->ctx at all! Sayeth peterz. |
5aae8a53 | 113 | */ |
fb822e60 | 114 | if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) |
5aae8a53 P |
115 | bp->ctx->task->thread.last_hit_ubp = NULL; |
116 | } | |
117 | ||
118 | /* | |
119 | * Check for virtual address in kernel space. | |
120 | */ | |
121 | int arch_check_bp_in_kernelspace(struct perf_event *bp) | |
122 | { | |
123 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
124 | ||
125 | return is_kernel_addr(info->address); | |
126 | } | |
127 | ||
128 | int arch_bp_generic_fields(int type, int *gen_bp_type) | |
129 | { | |
9422de3e MN |
130 | *gen_bp_type = 0; |
131 | if (type & HW_BRK_TYPE_READ) | |
132 | *gen_bp_type |= HW_BREAKPOINT_R; | |
133 | if (type & HW_BRK_TYPE_WRITE) | |
134 | *gen_bp_type |= HW_BREAKPOINT_W; | |
135 | if (*gen_bp_type == 0) | |
5aae8a53 | 136 | return -EINVAL; |
5aae8a53 P |
137 | return 0; |
138 | } | |
139 | ||
140 | /* | |
141 | * Validate the arch-specific HW Breakpoint register settings | |
142 | */ | |
143 | int arch_validate_hwbkpt_settings(struct perf_event *bp) | |
144 | { | |
4ae7ebe9 | 145 | int ret = -EINVAL, length_max; |
5aae8a53 P |
146 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
147 | ||
148 | if (!bp) | |
149 | return ret; | |
150 | ||
9422de3e MN |
151 | info->type = HW_BRK_TYPE_TRANSLATE; |
152 | if (bp->attr.bp_type & HW_BREAKPOINT_R) | |
153 | info->type |= HW_BRK_TYPE_READ; | |
154 | if (bp->attr.bp_type & HW_BREAKPOINT_W) | |
155 | info->type |= HW_BRK_TYPE_WRITE; | |
156 | if (info->type == HW_BRK_TYPE_TRANSLATE) | |
157 | /* must set alteast read or write */ | |
5aae8a53 | 158 | return ret; |
9422de3e MN |
159 | if (!(bp->attr.exclude_user)) |
160 | info->type |= HW_BRK_TYPE_USER; | |
161 | if (!(bp->attr.exclude_kernel)) | |
162 | info->type |= HW_BRK_TYPE_KERNEL; | |
163 | if (!(bp->attr.exclude_hv)) | |
164 | info->type |= HW_BRK_TYPE_HYP; | |
5aae8a53 P |
165 | info->address = bp->attr.bp_addr; |
166 | info->len = bp->attr.bp_len; | |
167 | ||
168 | /* | |
169 | * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) | |
170 | * and breakpoint addresses are aligned to nearest double-word | |
171 | * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the | |
172 | * 'symbolsize' should satisfy the check below. | |
173 | */ | |
4ae7ebe9 MN |
174 | length_max = 8; /* DABR */ |
175 | if (cpu_has_feature(CPU_FTR_DAWR)) { | |
176 | length_max = 512 ; /* 64 doublewords */ | |
177 | /* DAWR region can't cross 512 boundary */ | |
178 | if ((bp->attr.bp_addr >> 10) != | |
e2a800be | 179 | ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) |
4ae7ebe9 MN |
180 | return -EINVAL; |
181 | } | |
5aae8a53 | 182 | if (info->len > |
4ae7ebe9 | 183 | (length_max - (info->address & HW_BREAKPOINT_ALIGN))) |
5aae8a53 P |
184 | return -EINVAL; |
185 | return 0; | |
186 | } | |
187 | ||
06532a67 P |
188 | /* |
189 | * Restores the breakpoint on the debug registers. | |
190 | * Invoke this function if it is known that the execution context is | |
191 | * about to change to cause loss of MSR_SE settings. | |
192 | */ | |
193 | void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) | |
194 | { | |
195 | struct arch_hw_breakpoint *info; | |
196 | ||
197 | if (likely(!tsk->thread.last_hit_ubp)) | |
198 | return; | |
199 | ||
200 | info = counter_arch_bp(tsk->thread.last_hit_ubp); | |
201 | regs->msr &= ~MSR_SE; | |
21f58507 | 202 | __set_breakpoint(info); |
06532a67 P |
203 | tsk->thread.last_hit_ubp = NULL; |
204 | } | |
205 | ||
5aae8a53 P |
206 | /* |
207 | * Handle debug exception notifications. | |
208 | */ | |
03465f89 | 209 | int hw_breakpoint_handler(struct die_args *args) |
5aae8a53 | 210 | { |
5aae8a53 P |
211 | int rc = NOTIFY_STOP; |
212 | struct perf_event *bp; | |
213 | struct pt_regs *regs = args->regs; | |
214 | int stepped = 1; | |
215 | struct arch_hw_breakpoint *info; | |
216 | unsigned int instr; | |
e3e94084 | 217 | unsigned long dar = regs->dar; |
5aae8a53 P |
218 | |
219 | /* Disable breakpoints during exception handling */ | |
9422de3e | 220 | hw_breakpoint_disable(); |
574cb248 | 221 | |
5aae8a53 P |
222 | /* |
223 | * The counter may be concurrently released but that can only | |
224 | * occur from a call_rcu() path. We can then safely fetch | |
225 | * the breakpoint, use its callback, touch its counter | |
226 | * while we are in an rcu_read_lock() path. | |
227 | */ | |
228 | rcu_read_lock(); | |
229 | ||
69111bac | 230 | bp = __this_cpu_read(bp_per_reg); |
5aae8a53 P |
231 | if (!bp) |
232 | goto out; | |
233 | info = counter_arch_bp(bp); | |
5aae8a53 P |
234 | |
235 | /* | |
236 | * Return early after invoking user-callback function without restoring | |
237 | * DABR if the breakpoint is from ptrace which always operates in | |
238 | * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal | |
239 | * generated in do_dabr(). | |
240 | */ | |
574cb248 | 241 | if (bp->overflow_handler == ptrace_triggered) { |
5aae8a53 P |
242 | perf_bp_event(bp, regs); |
243 | rc = NOTIFY_DONE; | |
244 | goto out; | |
245 | } | |
246 | ||
e3e94084 P |
247 | /* |
248 | * Verify if dar lies within the address range occupied by the symbol | |
574cb248 PM |
249 | * being watched to filter extraneous exceptions. If it doesn't, |
250 | * we still need to single-step the instruction, but we don't | |
251 | * generate an event. | |
e3e94084 | 252 | */ |
540e07c6 | 253 | info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; |
9422de3e MN |
254 | if (!((bp->attr.bp_addr <= dar) && |
255 | (dar - bp->attr.bp_addr < bp->attr.bp_len))) | |
256 | info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; | |
e3e94084 | 257 | |
5aae8a53 P |
258 | /* Do not emulate user-space instructions, instead single-step them */ |
259 | if (user_mode(regs)) { | |
6d9c00c6 | 260 | current->thread.last_hit_ubp = bp; |
5aae8a53 P |
261 | regs->msr |= MSR_SE; |
262 | goto out; | |
263 | } | |
264 | ||
265 | stepped = 0; | |
266 | instr = 0; | |
267 | if (!__get_user_inatomic(instr, (unsigned int *) regs->nip)) | |
268 | stepped = emulate_step(regs, instr); | |
269 | ||
270 | /* | |
271 | * emulate_step() could not execute it. We've failed in reliably | |
272 | * handling the hw-breakpoint. Unregister it and throw a warning | |
273 | * message to let the user know about it. | |
274 | */ | |
275 | if (!stepped) { | |
276 | WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " | |
277 | "0x%lx will be disabled.", info->address); | |
5aab90ce | 278 | perf_event_disable_inatomic(bp); |
5aae8a53 P |
279 | goto out; |
280 | } | |
281 | /* | |
282 | * As a policy, the callback is invoked in a 'trigger-after-execute' | |
283 | * fashion | |
284 | */ | |
9422de3e | 285 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) |
e3e94084 | 286 | perf_bp_event(bp, regs); |
5aae8a53 | 287 | |
21f58507 | 288 | __set_breakpoint(info); |
5aae8a53 P |
289 | out: |
290 | rcu_read_unlock(); | |
291 | return rc; | |
292 | } | |
03465f89 | 293 | NOKPROBE_SYMBOL(hw_breakpoint_handler); |
5aae8a53 P |
294 | |
295 | /* | |
296 | * Handle single-step exceptions following a DABR hit. | |
297 | */ | |
03465f89 | 298 | static int single_step_dabr_instruction(struct die_args *args) |
5aae8a53 P |
299 | { |
300 | struct pt_regs *regs = args->regs; | |
301 | struct perf_event *bp = NULL; | |
3f4693ee | 302 | struct arch_hw_breakpoint *info; |
5aae8a53 P |
303 | |
304 | bp = current->thread.last_hit_ubp; | |
305 | /* | |
306 | * Check if we are single-stepping as a result of a | |
307 | * previous HW Breakpoint exception | |
308 | */ | |
309 | if (!bp) | |
310 | return NOTIFY_DONE; | |
311 | ||
3f4693ee | 312 | info = counter_arch_bp(bp); |
5aae8a53 P |
313 | |
314 | /* | |
315 | * We shall invoke the user-defined callback function in the single | |
316 | * stepping handler to confirm to 'trigger-after-execute' semantics | |
317 | */ | |
9422de3e | 318 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) |
e3e94084 | 319 | perf_bp_event(bp, regs); |
5aae8a53 | 320 | |
21f58507 | 321 | __set_breakpoint(info); |
76b0f133 PM |
322 | current->thread.last_hit_ubp = NULL; |
323 | ||
5aae8a53 | 324 | /* |
76b0f133 PM |
325 | * If the process was being single-stepped by ptrace, let the |
326 | * other single-step actions occur (e.g. generate SIGTRAP). | |
5aae8a53 | 327 | */ |
76b0f133 PM |
328 | if (test_thread_flag(TIF_SINGLESTEP)) |
329 | return NOTIFY_DONE; | |
5aae8a53 | 330 | |
5aae8a53 P |
331 | return NOTIFY_STOP; |
332 | } | |
03465f89 | 333 | NOKPROBE_SYMBOL(single_step_dabr_instruction); |
5aae8a53 P |
334 | |
335 | /* | |
336 | * Handle debug exception notifications. | |
337 | */ | |
03465f89 | 338 | int hw_breakpoint_exceptions_notify( |
5aae8a53 P |
339 | struct notifier_block *unused, unsigned long val, void *data) |
340 | { | |
341 | int ret = NOTIFY_DONE; | |
342 | ||
343 | switch (val) { | |
344 | case DIE_DABR_MATCH: | |
345 | ret = hw_breakpoint_handler(data); | |
346 | break; | |
347 | case DIE_SSTEP: | |
348 | ret = single_step_dabr_instruction(data); | |
349 | break; | |
350 | } | |
351 | ||
352 | return ret; | |
353 | } | |
03465f89 | 354 | NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify); |
5aae8a53 P |
355 | |
356 | /* | |
357 | * Release the user breakpoints used by ptrace | |
358 | */ | |
359 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | |
360 | { | |
361 | struct thread_struct *t = &tsk->thread; | |
362 | ||
363 | unregister_hw_breakpoint(t->ptrace_bps[0]); | |
364 | t->ptrace_bps[0] = NULL; | |
365 | } | |
366 | ||
367 | void hw_breakpoint_pmu_read(struct perf_event *bp) | |
368 | { | |
369 | /* TODO */ | |
370 | } |