]>
Commit | Line | Data |
---|---|---|
62a038d3 P |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License as published by | |
4 | * the Free Software Foundation; either version 2 of the License, or | |
5 | * (at your option) any later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
15 | * | |
16 | * Copyright (C) 2007 Alan Stern | |
17 | * Copyright (C) IBM Corporation, 2009 | |
24f1e32c | 18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
ba1c813a FW |
19 | * |
20 | * Thanks to Ingo Molnar for his many suggestions. | |
62a038d3 P |
21 | */ |
22 | ||
23 | /* | |
24 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | |
25 | * using the CPU's debug registers. | |
26 | * This file contains the arch-independent routines. | |
27 | */ | |
28 | ||
29 | #include <linux/irqflags.h> | |
30 | #include <linux/kallsyms.h> | |
31 | #include <linux/notifier.h> | |
32 | #include <linux/kprobes.h> | |
33 | #include <linux/kdebug.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/module.h> | |
36 | #include <linux/percpu.h> | |
37 | #include <linux/sched.h> | |
38 | #include <linux/init.h> | |
39 | #include <linux/smp.h> | |
40 | ||
24f1e32c FW |
41 | #include <linux/hw_breakpoint.h> |
42 | ||
ba1c813a FW |
43 | /* |
44 | * Constraints data | |
45 | */ | |
62a038d3 | 46 | |
ba1c813a FW |
47 | /* Number of pinned cpu breakpoints in a cpu */ |
48 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); | |
49 | ||
50 | /* Number of pinned task breakpoints in a cpu */ | |
51 | static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); | |
52 | ||
53 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | |
54 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); | |
55 | ||
56 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ | |
57 | struct bp_busy_slots { | |
58 | unsigned int pinned; | |
59 | unsigned int flexible; | |
60 | }; | |
61 | ||
62 | /* Serialize accesses to the above constraints */ | |
63 | static DEFINE_MUTEX(nr_bp_mutex); | |
64 | ||
65 | /* | |
66 | * Report the maximum number of pinned breakpoints a task | |
67 | * have in this cpu | |
68 | */ | |
69 | static unsigned int max_task_bp_pinned(int cpu) | |
62a038d3 | 70 | { |
ba1c813a FW |
71 | int i; |
72 | unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); | |
62a038d3 | 73 | |
ba1c813a FW |
74 | for (i = HBP_NUM -1; i >= 0; i--) { |
75 | if (tsk_pinned[i] > 0) | |
76 | return i + 1; | |
62a038d3 P |
77 | } |
78 | ||
24f1e32c | 79 | return 0; |
62a038d3 P |
80 | } |
81 | ||
ba1c813a FW |
82 | /* |
83 | * Report the number of pinned/un-pinned breakpoints we have in | |
84 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | |
85 | */ | |
86 | static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |
87 | { | |
88 | if (cpu >= 0) { | |
89 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); | |
90 | slots->pinned += max_task_bp_pinned(cpu); | |
91 | slots->flexible = per_cpu(nr_bp_flexible, cpu); | |
92 | ||
93 | return; | |
94 | } | |
95 | ||
96 | for_each_online_cpu(cpu) { | |
97 | unsigned int nr; | |
98 | ||
99 | nr = per_cpu(nr_cpu_bp_pinned, cpu); | |
100 | nr += max_task_bp_pinned(cpu); | |
101 | ||
102 | if (nr > slots->pinned) | |
103 | slots->pinned = nr; | |
104 | ||
105 | nr = per_cpu(nr_bp_flexible, cpu); | |
106 | ||
107 | if (nr > slots->flexible) | |
108 | slots->flexible = nr; | |
109 | } | |
110 | } | |
111 | ||
112 | /* | |
113 | * Add a pinned breakpoint for the given task in our constraint table | |
114 | */ | |
115 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | |
116 | { | |
117 | int count = 0; | |
118 | struct perf_event *bp; | |
119 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | |
120 | unsigned int *task_bp_pinned; | |
121 | struct list_head *list; | |
122 | unsigned long flags; | |
123 | ||
124 | if (WARN_ONCE(!ctx, "No perf context for this task")) | |
125 | return; | |
126 | ||
127 | list = &ctx->event_list; | |
128 | ||
129 | spin_lock_irqsave(&ctx->lock, flags); | |
130 | ||
131 | /* | |
132 | * The current breakpoint counter is not included in the list | |
133 | * at the open() callback time | |
134 | */ | |
135 | list_for_each_entry(bp, list, event_entry) { | |
136 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | |
137 | count++; | |
138 | } | |
139 | ||
140 | spin_unlock_irqrestore(&ctx->lock, flags); | |
141 | ||
142 | if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) | |
143 | return; | |
144 | ||
145 | task_bp_pinned = per_cpu(task_bp_pinned, cpu); | |
146 | if (enable) { | |
147 | task_bp_pinned[count]++; | |
148 | if (count > 0) | |
149 | task_bp_pinned[count-1]--; | |
150 | } else { | |
151 | task_bp_pinned[count]--; | |
152 | if (count > 0) | |
153 | task_bp_pinned[count-1]++; | |
154 | } | |
155 | } | |
156 | ||
157 | /* | |
158 | * Add/remove the given breakpoint in our constraint table | |
159 | */ | |
160 | static void toggle_bp_slot(struct perf_event *bp, bool enable) | |
161 | { | |
162 | int cpu = bp->cpu; | |
163 | struct task_struct *tsk = bp->ctx->task; | |
164 | ||
165 | /* Pinned counter task profiling */ | |
166 | if (tsk) { | |
167 | if (cpu >= 0) { | |
168 | toggle_bp_task_slot(tsk, cpu, enable); | |
169 | return; | |
170 | } | |
171 | ||
172 | for_each_online_cpu(cpu) | |
173 | toggle_bp_task_slot(tsk, cpu, enable); | |
174 | return; | |
175 | } | |
176 | ||
177 | /* Pinned counter cpu profiling */ | |
178 | if (enable) | |
179 | per_cpu(nr_cpu_bp_pinned, bp->cpu)++; | |
180 | else | |
181 | per_cpu(nr_cpu_bp_pinned, bp->cpu)--; | |
182 | } | |
183 | ||
184 | /* | |
185 | * Contraints to check before allowing this new breakpoint counter: | |
186 | * | |
187 | * == Non-pinned counter == (Considered as pinned for now) | |
188 | * | |
189 | * - If attached to a single cpu, check: | |
190 | * | |
191 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | |
192 | * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM | |
193 | * | |
194 | * -> If there are already non-pinned counters in this cpu, it means | |
195 | * there is already a free slot for them. | |
196 | * Otherwise, we check that the maximum number of per task | |
197 | * breakpoints (for this cpu) plus the number of per cpu breakpoint | |
198 | * (for this cpu) doesn't cover every registers. | |
199 | * | |
200 | * - If attached to every cpus, check: | |
201 | * | |
202 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | |
203 | * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM | |
204 | * | |
205 | * -> This is roughly the same, except we check the number of per cpu | |
206 | * bp for every cpu and we keep the max one. Same for the per tasks | |
207 | * breakpoints. | |
208 | * | |
209 | * | |
210 | * == Pinned counter == | |
211 | * | |
212 | * - If attached to a single cpu, check: | |
213 | * | |
214 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | |
215 | * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM | |
216 | * | |
217 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | |
218 | * one register at least (or they will never be fed). | |
219 | * | |
220 | * - If attached to every cpus, check: | |
221 | * | |
222 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | |
223 | * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM | |
224 | */ | |
225 | int reserve_bp_slot(struct perf_event *bp) | |
226 | { | |
227 | struct bp_busy_slots slots = {0}; | |
228 | int ret = 0; | |
229 | ||
230 | mutex_lock(&nr_bp_mutex); | |
231 | ||
232 | fetch_bp_busy_slots(&slots, bp->cpu); | |
233 | ||
234 | /* Flexible counters need to keep at least one slot */ | |
235 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | |
236 | ret = -ENOSPC; | |
237 | goto end; | |
238 | } | |
239 | ||
240 | toggle_bp_slot(bp, true); | |
241 | ||
242 | end: | |
243 | mutex_unlock(&nr_bp_mutex); | |
244 | ||
245 | return ret; | |
246 | } | |
247 | ||
24f1e32c | 248 | void release_bp_slot(struct perf_event *bp) |
62a038d3 | 249 | { |
ba1c813a FW |
250 | mutex_lock(&nr_bp_mutex); |
251 | ||
252 | toggle_bp_slot(bp, false); | |
253 | ||
254 | mutex_unlock(&nr_bp_mutex); | |
62a038d3 P |
255 | } |
256 | ||
ba1c813a | 257 | |
24f1e32c | 258 | int __register_perf_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 259 | { |
24f1e32c | 260 | int ret; |
62a038d3 | 261 | |
24f1e32c FW |
262 | ret = reserve_bp_slot(bp); |
263 | if (ret) | |
264 | return ret; | |
62a038d3 | 265 | |
24f1e32c FW |
266 | if (!bp->attr.disabled) |
267 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | |
62a038d3 | 268 | |
24f1e32c FW |
269 | return ret; |
270 | } | |
62a038d3 | 271 | |
24f1e32c FW |
272 | int register_perf_hw_breakpoint(struct perf_event *bp) |
273 | { | |
274 | bp->callback = perf_bp_event; | |
62a038d3 | 275 | |
24f1e32c | 276 | return __register_perf_hw_breakpoint(bp); |
62a038d3 P |
277 | } |
278 | ||
279 | /* | |
24f1e32c FW |
280 | * Register a breakpoint bound to a task and a given cpu. |
281 | * If cpu is -1, the breakpoint is active for the task in every cpu | |
282 | * If the task is -1, the breakpoint is active for every tasks in the given | |
283 | * cpu. | |
62a038d3 | 284 | */ |
24f1e32c FW |
285 | static struct perf_event * |
286 | register_user_hw_breakpoint_cpu(unsigned long addr, | |
287 | int len, | |
288 | int type, | |
289 | perf_callback_t triggered, | |
290 | pid_t pid, | |
291 | int cpu, | |
292 | bool active) | |
62a038d3 | 293 | { |
24f1e32c FW |
294 | struct perf_event_attr *attr; |
295 | struct perf_event *bp; | |
296 | ||
297 | attr = kzalloc(sizeof(*attr), GFP_KERNEL); | |
298 | if (!attr) | |
299 | return ERR_PTR(-ENOMEM); | |
300 | ||
301 | attr->type = PERF_TYPE_BREAKPOINT; | |
302 | attr->size = sizeof(*attr); | |
303 | attr->bp_addr = addr; | |
304 | attr->bp_len = len; | |
305 | attr->bp_type = type; | |
62a038d3 | 306 | /* |
24f1e32c FW |
307 | * Such breakpoints are used by debuggers to trigger signals when |
308 | * we hit the excepted memory op. We can't miss such events, they | |
309 | * must be pinned. | |
62a038d3 | 310 | */ |
24f1e32c | 311 | attr->pinned = 1; |
62a038d3 | 312 | |
24f1e32c FW |
313 | if (!active) |
314 | attr->disabled = 1; | |
62a038d3 | 315 | |
24f1e32c FW |
316 | bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered); |
317 | kfree(attr); | |
62a038d3 | 318 | |
24f1e32c | 319 | return bp; |
62a038d3 P |
320 | } |
321 | ||
322 | /** | |
323 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | |
24f1e32c FW |
324 | * @addr: is the memory address that triggers the breakpoint |
325 | * @len: the length of the access to the memory (1 byte, 2 bytes etc...) | |
326 | * @type: the type of the access to the memory (read/write/exec) | |
327 | * @triggered: callback to trigger when we hit the breakpoint | |
62a038d3 | 328 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
24f1e32c | 329 | * @active: should we activate it while registering it |
62a038d3 P |
330 | * |
331 | */ | |
24f1e32c FW |
332 | struct perf_event * |
333 | register_user_hw_breakpoint(unsigned long addr, | |
334 | int len, | |
335 | int type, | |
336 | perf_callback_t triggered, | |
337 | struct task_struct *tsk, | |
338 | bool active) | |
62a038d3 | 339 | { |
24f1e32c FW |
340 | return register_user_hw_breakpoint_cpu(addr, len, type, triggered, |
341 | tsk->pid, -1, active); | |
62a038d3 P |
342 | } |
343 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |
344 | ||
345 | /** | |
346 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | |
24f1e32c FW |
347 | * @bp: the breakpoint structure to modify |
348 | * @addr: is the memory address that triggers the breakpoint | |
349 | * @len: the length of the access to the memory (1 byte, 2 bytes etc...) | |
350 | * @type: the type of the access to the memory (read/write/exec) | |
351 | * @triggered: callback to trigger when we hit the breakpoint | |
62a038d3 | 352 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
24f1e32c | 353 | * @active: should we activate it while registering it |
62a038d3 | 354 | */ |
24f1e32c FW |
355 | struct perf_event * |
356 | modify_user_hw_breakpoint(struct perf_event *bp, | |
357 | unsigned long addr, | |
358 | int len, | |
359 | int type, | |
360 | perf_callback_t triggered, | |
361 | struct task_struct *tsk, | |
362 | bool active) | |
62a038d3 | 363 | { |
24f1e32c FW |
364 | /* |
365 | * FIXME: do it without unregistering | |
366 | * - We don't want to lose our slot | |
367 | * - If the new bp is incorrect, don't lose the older one | |
368 | */ | |
369 | unregister_hw_breakpoint(bp); | |
62a038d3 | 370 | |
24f1e32c FW |
371 | return register_user_hw_breakpoint(addr, len, type, triggered, |
372 | tsk, active); | |
62a038d3 P |
373 | } |
374 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | |
375 | ||
376 | /** | |
24f1e32c | 377 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
62a038d3 | 378 | * @bp: the breakpoint structure to unregister |
62a038d3 | 379 | */ |
24f1e32c | 380 | void unregister_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 381 | { |
24f1e32c FW |
382 | if (!bp) |
383 | return; | |
384 | perf_event_release_kernel(bp); | |
385 | } | |
386 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |
387 | ||
388 | static struct perf_event * | |
389 | register_kernel_hw_breakpoint_cpu(unsigned long addr, | |
390 | int len, | |
391 | int type, | |
392 | perf_callback_t triggered, | |
393 | int cpu, | |
394 | bool active) | |
395 | { | |
396 | return register_user_hw_breakpoint_cpu(addr, len, type, triggered, | |
397 | -1, cpu, active); | |
62a038d3 | 398 | } |
62a038d3 P |
399 | |
400 | /** | |
24f1e32c FW |
401 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
402 | * @addr: is the memory address that triggers the breakpoint | |
403 | * @len: the length of the access to the memory (1 byte, 2 bytes etc...) | |
404 | * @type: the type of the access to the memory (read/write/exec) | |
405 | * @triggered: callback to trigger when we hit the breakpoint | |
406 | * @active: should we activate it while registering it | |
62a038d3 | 407 | * |
24f1e32c | 408 | * @return a set of per_cpu pointers to perf events |
62a038d3 | 409 | */ |
24f1e32c FW |
410 | struct perf_event ** |
411 | register_wide_hw_breakpoint(unsigned long addr, | |
412 | int len, | |
413 | int type, | |
414 | perf_callback_t triggered, | |
415 | bool active) | |
62a038d3 | 416 | { |
24f1e32c FW |
417 | struct perf_event **cpu_events, **pevent, *bp; |
418 | long err; | |
419 | int cpu; | |
420 | ||
421 | cpu_events = alloc_percpu(typeof(*cpu_events)); | |
422 | if (!cpu_events) | |
423 | return ERR_PTR(-ENOMEM); | |
62a038d3 | 424 | |
24f1e32c FW |
425 | for_each_possible_cpu(cpu) { |
426 | pevent = per_cpu_ptr(cpu_events, cpu); | |
427 | bp = register_kernel_hw_breakpoint_cpu(addr, len, type, | |
428 | triggered, cpu, active); | |
62a038d3 | 429 | |
24f1e32c | 430 | *pevent = bp; |
62a038d3 | 431 | |
24f1e32c FW |
432 | if (IS_ERR(bp) || !bp) { |
433 | err = PTR_ERR(bp); | |
434 | goto fail; | |
435 | } | |
62a038d3 P |
436 | } |
437 | ||
24f1e32c FW |
438 | return cpu_events; |
439 | ||
440 | fail: | |
441 | for_each_possible_cpu(cpu) { | |
442 | pevent = per_cpu_ptr(cpu_events, cpu); | |
443 | if (IS_ERR(*pevent) || !*pevent) | |
444 | break; | |
445 | unregister_hw_breakpoint(*pevent); | |
446 | } | |
447 | free_percpu(cpu_events); | |
448 | /* return the error if any */ | |
449 | return ERR_PTR(err); | |
62a038d3 | 450 | } |
f60d24d2 | 451 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
62a038d3 P |
452 | |
453 | /** | |
24f1e32c FW |
454 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
455 | * @cpu_events: the per cpu set of events to unregister | |
62a038d3 | 456 | */ |
24f1e32c | 457 | void unregister_wide_hw_breakpoint(struct perf_event **cpu_events) |
62a038d3 | 458 | { |
24f1e32c FW |
459 | int cpu; |
460 | struct perf_event **pevent; | |
62a038d3 | 461 | |
24f1e32c FW |
462 | for_each_possible_cpu(cpu) { |
463 | pevent = per_cpu_ptr(cpu_events, cpu); | |
464 | unregister_hw_breakpoint(*pevent); | |
62a038d3 | 465 | } |
24f1e32c | 466 | free_percpu(cpu_events); |
62a038d3 | 467 | } |
f60d24d2 | 468 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
62a038d3 P |
469 | |
470 | static struct notifier_block hw_breakpoint_exceptions_nb = { | |
471 | .notifier_call = hw_breakpoint_exceptions_notify, | |
472 | /* we need to be notified first */ | |
473 | .priority = 0x7fffffff | |
474 | }; | |
475 | ||
476 | static int __init init_hw_breakpoint(void) | |
477 | { | |
478 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | |
479 | } | |
62a038d3 | 480 | core_initcall(init_hw_breakpoint); |
24f1e32c FW |
481 | |
482 | ||
483 | struct pmu perf_ops_bp = { | |
484 | .enable = arch_install_hw_breakpoint, | |
485 | .disable = arch_uninstall_hw_breakpoint, | |
486 | .read = hw_breakpoint_pmu_read, | |
487 | .unthrottle = hw_breakpoint_pmu_unthrottle | |
488 | }; |