]>
Commit | Line | Data |
---|---|---|
62a038d3 P |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License as published by | |
4 | * the Free Software Foundation; either version 2 of the License, or | |
5 | * (at your option) any later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
15 | * | |
16 | * Copyright (C) 2007 Alan Stern | |
17 | * Copyright (C) IBM Corporation, 2009 | |
24f1e32c | 18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
ba1c813a FW |
19 | * |
20 | * Thanks to Ingo Molnar for his many suggestions. | |
ba6909b7 P |
21 | * |
22 | * Authors: Alan Stern <stern@rowland.harvard.edu> | |
23 | * K.Prasad <prasad@linux.vnet.ibm.com> | |
24 | * Frederic Weisbecker <fweisbec@gmail.com> | |
62a038d3 P |
25 | */ |
26 | ||
27 | /* | |
28 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | |
29 | * using the CPU's debug registers. | |
30 | * This file contains the arch-independent routines. | |
31 | */ | |
32 | ||
33 | #include <linux/irqflags.h> | |
34 | #include <linux/kallsyms.h> | |
35 | #include <linux/notifier.h> | |
36 | #include <linux/kprobes.h> | |
37 | #include <linux/kdebug.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <linux/module.h> | |
40 | #include <linux/percpu.h> | |
41 | #include <linux/sched.h> | |
42 | #include <linux/init.h> | |
43 | #include <linux/smp.h> | |
44 | ||
24f1e32c FW |
45 | #include <linux/hw_breakpoint.h> |
46 | ||
ba1c813a FW |
47 | /* |
48 | * Constraints data | |
49 | */ | |
62a038d3 | 50 | |
ba1c813a FW |
51 | /* Number of pinned cpu breakpoints in a cpu */ |
52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); | |
53 | ||
54 | /* Number of pinned task breakpoints in a cpu */ | |
55 | static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); | |
56 | ||
57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | |
58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); | |
59 | ||
60 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ | |
61 | struct bp_busy_slots { | |
62 | unsigned int pinned; | |
63 | unsigned int flexible; | |
64 | }; | |
65 | ||
66 | /* Serialize accesses to the above constraints */ | |
67 | static DEFINE_MUTEX(nr_bp_mutex); | |
68 | ||
69 | /* | |
70 | * Report the maximum number of pinned breakpoints a task | |
71 | * have in this cpu | |
72 | */ | |
73 | static unsigned int max_task_bp_pinned(int cpu) | |
62a038d3 | 74 | { |
ba1c813a FW |
75 | int i; |
76 | unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); | |
62a038d3 | 77 | |
ba1c813a FW |
78 | for (i = HBP_NUM -1; i >= 0; i--) { |
79 | if (tsk_pinned[i] > 0) | |
80 | return i + 1; | |
62a038d3 P |
81 | } |
82 | ||
24f1e32c | 83 | return 0; |
62a038d3 P |
84 | } |
85 | ||
ba1c813a FW |
86 | /* |
87 | * Report the number of pinned/un-pinned breakpoints we have in | |
88 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | |
89 | */ | |
90 | static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |
91 | { | |
92 | if (cpu >= 0) { | |
93 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); | |
94 | slots->pinned += max_task_bp_pinned(cpu); | |
95 | slots->flexible = per_cpu(nr_bp_flexible, cpu); | |
96 | ||
97 | return; | |
98 | } | |
99 | ||
100 | for_each_online_cpu(cpu) { | |
101 | unsigned int nr; | |
102 | ||
103 | nr = per_cpu(nr_cpu_bp_pinned, cpu); | |
104 | nr += max_task_bp_pinned(cpu); | |
105 | ||
106 | if (nr > slots->pinned) | |
107 | slots->pinned = nr; | |
108 | ||
109 | nr = per_cpu(nr_bp_flexible, cpu); | |
110 | ||
111 | if (nr > slots->flexible) | |
112 | slots->flexible = nr; | |
113 | } | |
114 | } | |
115 | ||
116 | /* | |
117 | * Add a pinned breakpoint for the given task in our constraint table | |
118 | */ | |
119 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | |
120 | { | |
121 | int count = 0; | |
122 | struct perf_event *bp; | |
123 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | |
11e66357 | 124 | unsigned int *tsk_pinned; |
ba1c813a FW |
125 | struct list_head *list; |
126 | unsigned long flags; | |
127 | ||
128 | if (WARN_ONCE(!ctx, "No perf context for this task")) | |
129 | return; | |
130 | ||
131 | list = &ctx->event_list; | |
132 | ||
133 | spin_lock_irqsave(&ctx->lock, flags); | |
134 | ||
135 | /* | |
136 | * The current breakpoint counter is not included in the list | |
137 | * at the open() callback time | |
138 | */ | |
139 | list_for_each_entry(bp, list, event_entry) { | |
140 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | |
141 | count++; | |
142 | } | |
143 | ||
144 | spin_unlock_irqrestore(&ctx->lock, flags); | |
145 | ||
146 | if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) | |
147 | return; | |
148 | ||
11e66357 | 149 | tsk_pinned = per_cpu(task_bp_pinned, cpu); |
ba1c813a | 150 | if (enable) { |
11e66357 | 151 | tsk_pinned[count]++; |
ba1c813a | 152 | if (count > 0) |
11e66357 | 153 | tsk_pinned[count-1]--; |
ba1c813a | 154 | } else { |
11e66357 | 155 | tsk_pinned[count]--; |
ba1c813a | 156 | if (count > 0) |
11e66357 | 157 | tsk_pinned[count-1]++; |
ba1c813a FW |
158 | } |
159 | } | |
160 | ||
161 | /* | |
162 | * Add/remove the given breakpoint in our constraint table | |
163 | */ | |
164 | static void toggle_bp_slot(struct perf_event *bp, bool enable) | |
165 | { | |
166 | int cpu = bp->cpu; | |
167 | struct task_struct *tsk = bp->ctx->task; | |
168 | ||
169 | /* Pinned counter task profiling */ | |
170 | if (tsk) { | |
171 | if (cpu >= 0) { | |
172 | toggle_bp_task_slot(tsk, cpu, enable); | |
173 | return; | |
174 | } | |
175 | ||
176 | for_each_online_cpu(cpu) | |
177 | toggle_bp_task_slot(tsk, cpu, enable); | |
178 | return; | |
179 | } | |
180 | ||
181 | /* Pinned counter cpu profiling */ | |
182 | if (enable) | |
183 | per_cpu(nr_cpu_bp_pinned, bp->cpu)++; | |
184 | else | |
185 | per_cpu(nr_cpu_bp_pinned, bp->cpu)--; | |
186 | } | |
187 | ||
188 | /* | |
189 | * Contraints to check before allowing this new breakpoint counter: | |
190 | * | |
191 | * == Non-pinned counter == (Considered as pinned for now) | |
192 | * | |
193 | * - If attached to a single cpu, check: | |
194 | * | |
195 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | |
196 | * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM | |
197 | * | |
198 | * -> If there are already non-pinned counters in this cpu, it means | |
199 | * there is already a free slot for them. | |
200 | * Otherwise, we check that the maximum number of per task | |
201 | * breakpoints (for this cpu) plus the number of per cpu breakpoint | |
202 | * (for this cpu) doesn't cover every registers. | |
203 | * | |
204 | * - If attached to every cpus, check: | |
205 | * | |
206 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | |
207 | * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM | |
208 | * | |
209 | * -> This is roughly the same, except we check the number of per cpu | |
210 | * bp for every cpu and we keep the max one. Same for the per tasks | |
211 | * breakpoints. | |
212 | * | |
213 | * | |
214 | * == Pinned counter == | |
215 | * | |
216 | * - If attached to a single cpu, check: | |
217 | * | |
218 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | |
219 | * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM | |
220 | * | |
221 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | |
222 | * one register at least (or they will never be fed). | |
223 | * | |
224 | * - If attached to every cpus, check: | |
225 | * | |
226 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | |
227 | * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM | |
228 | */ | |
229 | int reserve_bp_slot(struct perf_event *bp) | |
230 | { | |
231 | struct bp_busy_slots slots = {0}; | |
232 | int ret = 0; | |
233 | ||
234 | mutex_lock(&nr_bp_mutex); | |
235 | ||
236 | fetch_bp_busy_slots(&slots, bp->cpu); | |
237 | ||
238 | /* Flexible counters need to keep at least one slot */ | |
239 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | |
240 | ret = -ENOSPC; | |
241 | goto end; | |
242 | } | |
243 | ||
244 | toggle_bp_slot(bp, true); | |
245 | ||
246 | end: | |
247 | mutex_unlock(&nr_bp_mutex); | |
248 | ||
249 | return ret; | |
250 | } | |
251 | ||
24f1e32c | 252 | void release_bp_slot(struct perf_event *bp) |
62a038d3 | 253 | { |
ba1c813a FW |
254 | mutex_lock(&nr_bp_mutex); |
255 | ||
256 | toggle_bp_slot(bp, false); | |
257 | ||
258 | mutex_unlock(&nr_bp_mutex); | |
62a038d3 P |
259 | } |
260 | ||
ba1c813a | 261 | |
b326e956 | 262 | int register_perf_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 263 | { |
24f1e32c | 264 | int ret; |
62a038d3 | 265 | |
24f1e32c FW |
266 | ret = reserve_bp_slot(bp); |
267 | if (ret) | |
268 | return ret; | |
62a038d3 | 269 | |
fdf6bc95 FW |
270 | /* |
271 | * Ptrace breakpoints can be temporary perf events only | |
272 | * meant to reserve a slot. In this case, it is created disabled and | |
273 | * we don't want to check the params right now (as we put a null addr) | |
274 | * But perf tools create events as disabled and we want to check | |
275 | * the params for them. | |
276 | * This is a quick hack that will be removed soon, once we remove | |
277 | * the tmp breakpoints from ptrace | |
278 | */ | |
b326e956 | 279 | if (!bp->attr.disabled || !bp->overflow_handler) |
24f1e32c | 280 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
62a038d3 | 281 | |
24f1e32c FW |
282 | return ret; |
283 | } | |
62a038d3 | 284 | |
62a038d3 P |
285 | /** |
286 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | |
5fa10b28 | 287 | * @attr: breakpoint attributes |
24f1e32c | 288 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 289 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 290 | */ |
24f1e32c | 291 | struct perf_event * |
5fa10b28 | 292 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
b326e956 | 293 | perf_overflow_handler_t triggered, |
5fa10b28 | 294 | struct task_struct *tsk) |
62a038d3 | 295 | { |
5fa10b28 | 296 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); |
62a038d3 P |
297 | } |
298 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |
299 | ||
300 | /** | |
301 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | |
24f1e32c | 302 | * @bp: the breakpoint structure to modify |
5fa10b28 | 303 | * @attr: new breakpoint attributes |
24f1e32c | 304 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 305 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 306 | */ |
24f1e32c | 307 | struct perf_event * |
2f0993e0 | 308 | modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
62a038d3 | 309 | { |
24f1e32c FW |
310 | /* |
311 | * FIXME: do it without unregistering | |
312 | * - We don't want to lose our slot | |
313 | * - If the new bp is incorrect, don't lose the older one | |
314 | */ | |
315 | unregister_hw_breakpoint(bp); | |
62a038d3 | 316 | |
2f0993e0 | 317 | return perf_event_create_kernel_counter(attr, -1, bp->ctx->task->pid, |
b326e956 | 318 | bp->overflow_handler); |
62a038d3 P |
319 | } |
320 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | |
321 | ||
322 | /** | |
24f1e32c | 323 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
62a038d3 | 324 | * @bp: the breakpoint structure to unregister |
62a038d3 | 325 | */ |
24f1e32c | 326 | void unregister_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 327 | { |
24f1e32c FW |
328 | if (!bp) |
329 | return; | |
330 | perf_event_release_kernel(bp); | |
331 | } | |
332 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |
333 | ||
62a038d3 | 334 | /** |
24f1e32c | 335 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
dd1853c3 | 336 | * @attr: breakpoint attributes |
24f1e32c | 337 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 338 | * |
24f1e32c | 339 | * @return a set of per_cpu pointers to perf events |
62a038d3 | 340 | */ |
24f1e32c | 341 | struct perf_event ** |
dd1853c3 | 342 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
b326e956 | 343 | perf_overflow_handler_t triggered) |
62a038d3 | 344 | { |
24f1e32c FW |
345 | struct perf_event **cpu_events, **pevent, *bp; |
346 | long err; | |
347 | int cpu; | |
348 | ||
349 | cpu_events = alloc_percpu(typeof(*cpu_events)); | |
350 | if (!cpu_events) | |
351 | return ERR_PTR(-ENOMEM); | |
62a038d3 | 352 | |
24f1e32c FW |
353 | for_each_possible_cpu(cpu) { |
354 | pevent = per_cpu_ptr(cpu_events, cpu); | |
dd1853c3 | 355 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); |
62a038d3 | 356 | |
24f1e32c | 357 | *pevent = bp; |
62a038d3 | 358 | |
605bfaee | 359 | if (IS_ERR(bp)) { |
24f1e32c FW |
360 | err = PTR_ERR(bp); |
361 | goto fail; | |
362 | } | |
62a038d3 P |
363 | } |
364 | ||
24f1e32c FW |
365 | return cpu_events; |
366 | ||
367 | fail: | |
368 | for_each_possible_cpu(cpu) { | |
369 | pevent = per_cpu_ptr(cpu_events, cpu); | |
605bfaee | 370 | if (IS_ERR(*pevent)) |
24f1e32c FW |
371 | break; |
372 | unregister_hw_breakpoint(*pevent); | |
373 | } | |
374 | free_percpu(cpu_events); | |
375 | /* return the error if any */ | |
376 | return ERR_PTR(err); | |
62a038d3 | 377 | } |
f60d24d2 | 378 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
62a038d3 P |
379 | |
380 | /** | |
24f1e32c FW |
381 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
382 | * @cpu_events: the per cpu set of events to unregister | |
62a038d3 | 383 | */ |
24f1e32c | 384 | void unregister_wide_hw_breakpoint(struct perf_event **cpu_events) |
62a038d3 | 385 | { |
24f1e32c FW |
386 | int cpu; |
387 | struct perf_event **pevent; | |
62a038d3 | 388 | |
24f1e32c FW |
389 | for_each_possible_cpu(cpu) { |
390 | pevent = per_cpu_ptr(cpu_events, cpu); | |
391 | unregister_hw_breakpoint(*pevent); | |
62a038d3 | 392 | } |
24f1e32c | 393 | free_percpu(cpu_events); |
62a038d3 | 394 | } |
f60d24d2 | 395 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
62a038d3 P |
396 | |
397 | static struct notifier_block hw_breakpoint_exceptions_nb = { | |
398 | .notifier_call = hw_breakpoint_exceptions_notify, | |
399 | /* we need to be notified first */ | |
400 | .priority = 0x7fffffff | |
401 | }; | |
402 | ||
403 | static int __init init_hw_breakpoint(void) | |
404 | { | |
405 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | |
406 | } | |
62a038d3 | 407 | core_initcall(init_hw_breakpoint); |
24f1e32c FW |
408 | |
409 | ||
410 | struct pmu perf_ops_bp = { | |
411 | .enable = arch_install_hw_breakpoint, | |
412 | .disable = arch_uninstall_hw_breakpoint, | |
413 | .read = hw_breakpoint_pmu_read, | |
414 | .unthrottle = hw_breakpoint_pmu_unthrottle | |
415 | }; |