]>
Commit | Line | Data |
---|---|---|
8b7d89d0 PP |
1 | /* Support for MMIO probes. |
2 | * Benfit many code from kprobes | |
3 | * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. | |
4 | * 2007 Alexander Eichner | |
5 | * 2008 Pekka Paalanen <pq@iki.fi> | |
6 | */ | |
7 | ||
1bd591a5 JP |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | ||
0fd0e3da | 10 | #include <linux/list.h> |
668a6c36 | 11 | #include <linux/rculist.h> |
8b7d89d0 PP |
12 | #include <linux/spinlock.h> |
13 | #include <linux/hash.h> | |
4b599fed | 14 | #include <linux/export.h> |
8b7d89d0 | 15 | #include <linux/kernel.h> |
8b7d89d0 PP |
16 | #include <linux/uaccess.h> |
17 | #include <linux/ptrace.h> | |
18 | #include <linux/preempt.h> | |
f5136380 | 19 | #include <linux/percpu.h> |
0fd0e3da | 20 | #include <linux/kdebug.h> |
d61fc448 | 21 | #include <linux/mutex.h> |
970e6fa0 | 22 | #include <linux/io.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
8b7d89d0 | 24 | #include <asm/cacheflush.h> |
8b7d89d0 | 25 | #include <asm/tlbflush.h> |
970e6fa0 | 26 | #include <linux/errno.h> |
13829537 | 27 | #include <asm/debugreg.h> |
0fd0e3da | 28 | #include <linux/mmiotrace.h> |
8b7d89d0 | 29 | |
8b7d89d0 PP |
30 | #define KMMIO_PAGE_HASH_BITS 4 |
31 | #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS) | |
32 | ||
0fd0e3da PP |
33 | struct kmmio_fault_page { |
34 | struct list_head list; | |
35 | struct kmmio_fault_page *release_next; | |
cfa52c0c | 36 | unsigned long addr; /* the requested address */ |
46e91d00 | 37 | pteval_t old_presence; /* page presence prior to arming */ |
5359b585 | 38 | bool armed; |
0fd0e3da PP |
39 | |
40 | /* | |
41 | * Number of times this page has been registered as a part | |
42 | * of a probe. If zero, page is disarmed and this may be freed. | |
340430c5 PP |
43 | * Used only by writers (RCU) and post_kmmio_handler(). |
44 | * Protected by kmmio_lock, when linked into kmmio_page_table. | |
0fd0e3da PP |
45 | */ |
46 | int count; | |
8b8f79b9 MS |
47 | |
48 | bool scheduled_for_release; | |
0fd0e3da PP |
49 | }; |
50 | ||
51 | struct kmmio_delayed_release { | |
52 | struct rcu_head rcu; | |
53 | struct kmmio_fault_page *release_list; | |
54 | }; | |
55 | ||
8b7d89d0 PP |
56 | struct kmmio_context { |
57 | struct kmmio_fault_page *fpage; | |
58 | struct kmmio_probe *probe; | |
59 | unsigned long saved_flags; | |
0fd0e3da | 60 | unsigned long addr; |
8b7d89d0 PP |
61 | int active; |
62 | }; | |
63 | ||
8b7d89d0 PP |
64 | static DEFINE_SPINLOCK(kmmio_lock); |
65 | ||
13829537 | 66 | /* Protected by kmmio_lock */ |
8b7d89d0 | 67 | unsigned int kmmio_count; |
0fd0e3da PP |
68 | |
69 | /* Read-protected by RCU, write-protected by kmmio_lock. */ | |
8b7d89d0 PP |
70 | static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; |
71 | static LIST_HEAD(kmmio_probes); | |
72 | ||
cfa52c0c | 73 | static struct list_head *kmmio_page_list(unsigned long addr) |
0fd0e3da | 74 | { |
cfa52c0c KH |
75 | unsigned int l; |
76 | pte_t *pte = lookup_address(addr, &l); | |
77 | ||
78 | if (!pte) | |
79 | return NULL; | |
80 | addr &= page_level_mask(l); | |
81 | ||
82 | return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)]; | |
0fd0e3da PP |
83 | } |
84 | ||
f5136380 PP |
85 | /* Accessed per-cpu */ |
86 | static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); | |
8b7d89d0 | 87 | |
8b7d89d0 PP |
88 | /* |
89 | * this is basically a dynamic stabbing problem: | |
90 | * Could use the existing prio tree code or | |
91 | * Possible better implementations: | |
92 | * The Interval Skip List: A Data Structure for Finding All Intervals That | |
93 | * Overlap a Point (might be simple) | |
94 | * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup | |
95 | */ | |
0fd0e3da | 96 | /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */ |
8b7d89d0 PP |
97 | static struct kmmio_probe *get_kmmio_probe(unsigned long addr) |
98 | { | |
99 | struct kmmio_probe *p; | |
0fd0e3da | 100 | list_for_each_entry_rcu(p, &kmmio_probes, list) { |
33015c85 | 101 | if (addr >= p->addr && addr < (p->addr + p->len)) |
8b7d89d0 PP |
102 | return p; |
103 | } | |
104 | return NULL; | |
105 | } | |
106 | ||
0fd0e3da | 107 | /* You must be holding RCU read lock. */ |
cfa52c0c | 108 | static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr) |
8b7d89d0 | 109 | { |
0fd0e3da | 110 | struct list_head *head; |
0492e1bb | 111 | struct kmmio_fault_page *f; |
cfa52c0c KH |
112 | unsigned int l; |
113 | pte_t *pte = lookup_address(addr, &l); | |
8b7d89d0 | 114 | |
cfa52c0c KH |
115 | if (!pte) |
116 | return NULL; | |
117 | addr &= page_level_mask(l); | |
118 | head = kmmio_page_list(addr); | |
0492e1bb | 119 | list_for_each_entry_rcu(f, head, list) { |
cfa52c0c | 120 | if (f->addr == addr) |
0492e1bb | 121 | return f; |
8b7d89d0 | 122 | } |
8b7d89d0 PP |
123 | return NULL; |
124 | } | |
125 | ||
46e91d00 | 126 | static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) |
0b700a6a PP |
127 | { |
128 | pmdval_t v = pmd_val(*pmd); | |
46e91d00 SB |
129 | if (clear) { |
130 | *old = v & _PAGE_PRESENT; | |
131 | v &= ~_PAGE_PRESENT; | |
132 | } else /* presume this has been called with clear==true previously */ | |
133 | v |= *old; | |
0b700a6a PP |
134 | set_pmd(pmd, __pmd(v)); |
135 | } | |
136 | ||
46e91d00 | 137 | static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) |
0b700a6a PP |
138 | { |
139 | pteval_t v = pte_val(*pte); | |
46e91d00 SB |
140 | if (clear) { |
141 | *old = v & _PAGE_PRESENT; | |
142 | v &= ~_PAGE_PRESENT; | |
143 | } else /* presume this has been called with clear==true previously */ | |
144 | v |= *old; | |
0b700a6a PP |
145 | set_pte_atomic(pte, __pte(v)); |
146 | } | |
147 | ||
46e91d00 | 148 | static int clear_page_presence(struct kmmio_fault_page *f, bool clear) |
8b7d89d0 | 149 | { |
790e2a29 | 150 | unsigned int level; |
cfa52c0c | 151 | pte_t *pte = lookup_address(f->addr, &level); |
8b7d89d0 | 152 | |
75bb8835 | 153 | if (!pte) { |
cfa52c0c | 154 | pr_err("no pte for addr 0x%08lx\n", f->addr); |
e9d54cae | 155 | return -1; |
75bb8835 PP |
156 | } |
157 | ||
13829537 PP |
158 | switch (level) { |
159 | case PG_LEVEL_2M: | |
46e91d00 | 160 | clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence); |
13829537 | 161 | break; |
13829537 | 162 | case PG_LEVEL_4K: |
46e91d00 | 163 | clear_pte_presence(pte, clear, &f->old_presence); |
13829537 | 164 | break; |
13829537 | 165 | default: |
1bd591a5 | 166 | pr_err("unexpected page level 0x%x.\n", level); |
e9d54cae | 167 | return -1; |
8b7d89d0 PP |
168 | } |
169 | ||
cfa52c0c | 170 | __flush_tlb_one(f->addr); |
e9d54cae | 171 | return 0; |
13829537 | 172 | } |
75bb8835 | 173 | |
5359b585 PP |
174 | /* |
175 | * Mark the given page as not present. Access to it will trigger a fault. | |
176 | * | |
177 | * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the | |
178 | * protection is ignored here. RCU read lock is assumed held, so the struct | |
179 | * will not disappear unexpectedly. Furthermore, the caller must guarantee, | |
180 | * that double arming the same virtual address (page) cannot occur. | |
181 | * | |
182 | * Double disarming on the other hand is allowed, and may occur when a fault | |
183 | * and mmiotrace shutdown happen simultaneously. | |
184 | */ | |
185 | static int arm_kmmio_fault_page(struct kmmio_fault_page *f) | |
13829537 | 186 | { |
5359b585 | 187 | int ret; |
1bd591a5 | 188 | WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); |
5359b585 | 189 | if (f->armed) { |
cfa52c0c KH |
190 | pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n", |
191 | f->addr, f->count, !!f->old_presence); | |
5359b585 | 192 | } |
46e91d00 | 193 | ret = clear_page_presence(f, true); |
cfa52c0c KH |
194 | WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"), |
195 | f->addr); | |
5359b585 | 196 | f->armed = true; |
e9d54cae | 197 | return ret; |
8b7d89d0 PP |
198 | } |
199 | ||
5359b585 PP |
200 | /** Restore the given page to saved presence state. */ |
201 | static void disarm_kmmio_fault_page(struct kmmio_fault_page *f) | |
8b7d89d0 | 202 | { |
46e91d00 | 203 | int ret = clear_page_presence(f, false); |
5359b585 | 204 | WARN_ONCE(ret < 0, |
cfa52c0c | 205 | KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr); |
5359b585 | 206 | f->armed = false; |
8b7d89d0 PP |
207 | } |
208 | ||
0fd0e3da PP |
209 | /* |
210 | * This is being called from do_page_fault(). | |
211 | * | |
212 | * We may be in an interrupt or a critical section. Also prefecthing may | |
213 | * trigger a page fault. We may be in the middle of process switch. | |
214 | * We cannot take any locks, because we could be executing especially | |
215 | * within a kmmio critical section. | |
216 | * | |
217 | * Local interrupts are disabled, so preemption cannot happen. | |
218 | * Do not enable interrupts, do not sleep, and watch out for other CPUs. | |
219 | */ | |
8b7d89d0 PP |
220 | /* |
221 | * Interrupts are disabled on entry as trap3 is an interrupt gate | |
af901ca1 | 222 | * and they remain disabled throughout this function. |
8b7d89d0 | 223 | */ |
0fd0e3da | 224 | int kmmio_handler(struct pt_regs *regs, unsigned long addr) |
8b7d89d0 | 225 | { |
0fd0e3da PP |
226 | struct kmmio_context *ctx; |
227 | struct kmmio_fault_page *faultpage; | |
13829537 | 228 | int ret = 0; /* default to fault not handled */ |
cfa52c0c KH |
229 | unsigned long page_base = addr; |
230 | unsigned int l; | |
231 | pte_t *pte = lookup_address(addr, &l); | |
232 | if (!pte) | |
233 | return -EINVAL; | |
234 | page_base &= page_level_mask(l); | |
8b7d89d0 PP |
235 | |
236 | /* | |
237 | * Preemption is now disabled to prevent process switch during | |
238 | * single stepping. We can only handle one active kmmio trace | |
239 | * per cpu, so ensure that we finish it before something else | |
d61fc448 PP |
240 | * gets to run. We also hold the RCU read lock over single |
241 | * stepping to avoid looking up the probe and kmmio_fault_page | |
242 | * again. | |
8b7d89d0 PP |
243 | */ |
244 | preempt_disable(); | |
0fd0e3da | 245 | rcu_read_lock(); |
d61fc448 | 246 | |
cfa52c0c | 247 | faultpage = get_kmmio_fault_page(page_base); |
0fd0e3da PP |
248 | if (!faultpage) { |
249 | /* | |
250 | * Either this page fault is not caused by kmmio, or | |
251 | * another CPU just pulled the kmmio probe from under | |
13829537 | 252 | * our feet. The latter case should not be possible. |
0fd0e3da PP |
253 | */ |
254 | goto no_kmmio; | |
255 | } | |
256 | ||
257 | ctx = &get_cpu_var(kmmio_ctx); | |
8b7d89d0 | 258 | if (ctx->active) { |
cfa52c0c | 259 | if (page_base == ctx->addr) { |
13829537 | 260 | /* |
3e39aa15 SB |
261 | * A second fault on the same page means some other |
262 | * condition needs handling by do_page_fault(), the | |
263 | * page really not being present is the most common. | |
13829537 | 264 | */ |
1bd591a5 JP |
265 | pr_debug("secondary hit for 0x%08lx CPU %d.\n", |
266 | addr, smp_processor_id()); | |
3e39aa15 SB |
267 | |
268 | if (!faultpage->old_presence) | |
1bd591a5 JP |
269 | pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n", |
270 | addr, smp_processor_id()); | |
3e39aa15 SB |
271 | } else { |
272 | /* | |
273 | * Prevent overwriting already in-flight context. | |
274 | * This should not happen, let's hope disarming at | |
275 | * least prevents a panic. | |
276 | */ | |
1bd591a5 JP |
277 | pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n", |
278 | smp_processor_id(), addr); | |
279 | pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr); | |
3e39aa15 SB |
280 | disarm_kmmio_fault_page(faultpage); |
281 | } | |
0fd0e3da | 282 | goto no_kmmio_ctx; |
8b7d89d0 PP |
283 | } |
284 | ctx->active++; | |
285 | ||
0fd0e3da | 286 | ctx->fpage = faultpage; |
cfa52c0c | 287 | ctx->probe = get_kmmio_probe(page_base); |
49023168 | 288 | ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
cfa52c0c | 289 | ctx->addr = page_base; |
8b7d89d0 PP |
290 | |
291 | if (ctx->probe && ctx->probe->pre_handler) | |
292 | ctx->probe->pre_handler(ctx->probe, regs, addr); | |
293 | ||
d61fc448 PP |
294 | /* |
295 | * Enable single-stepping and disable interrupts for the faulting | |
296 | * context. Local interrupts must not get enabled during stepping. | |
297 | */ | |
49023168 IM |
298 | regs->flags |= X86_EFLAGS_TF; |
299 | regs->flags &= ~X86_EFLAGS_IF; | |
8b7d89d0 | 300 | |
0fd0e3da | 301 | /* Now we set present bit in PTE and single step. */ |
5359b585 | 302 | disarm_kmmio_fault_page(ctx->fpage); |
8b7d89d0 | 303 | |
d61fc448 PP |
304 | /* |
305 | * If another cpu accesses the same page while we are stepping, | |
306 | * the access will not be caught. It will simply succeed and the | |
307 | * only downside is we lose the event. If this becomes a problem, | |
308 | * the user should drop to single cpu before tracing. | |
309 | */ | |
310 | ||
f5136380 | 311 | put_cpu_var(kmmio_ctx); |
13829537 | 312 | return 1; /* fault handled */ |
8b7d89d0 | 313 | |
0fd0e3da PP |
314 | no_kmmio_ctx: |
315 | put_cpu_var(kmmio_ctx); | |
8b7d89d0 | 316 | no_kmmio: |
0fd0e3da | 317 | rcu_read_unlock(); |
8b7d89d0 | 318 | preempt_enable_no_resched(); |
13829537 | 319 | return ret; |
8b7d89d0 PP |
320 | } |
321 | ||
322 | /* | |
323 | * Interrupts are disabled on entry as trap1 is an interrupt gate | |
af901ca1 | 324 | * and they remain disabled throughout this function. |
0fd0e3da | 325 | * This must always get called as the pair to kmmio_handler(). |
8b7d89d0 PP |
326 | */ |
327 | static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) | |
328 | { | |
f5136380 PP |
329 | int ret = 0; |
330 | struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); | |
8b7d89d0 | 331 | |
13829537 | 332 | if (!ctx->active) { |
0f9a623d SB |
333 | /* |
334 | * debug traps without an active context are due to either | |
335 | * something external causing them (f.e. using a debugger while | |
336 | * mmio tracing enabled), or erroneous behaviour | |
337 | */ | |
1bd591a5 JP |
338 | pr_warning("unexpected debug trap on CPU %d.\n", |
339 | smp_processor_id()); | |
f5136380 | 340 | goto out; |
13829537 | 341 | } |
8b7d89d0 PP |
342 | |
343 | if (ctx->probe && ctx->probe->post_handler) | |
344 | ctx->probe->post_handler(ctx->probe, condition, regs); | |
345 | ||
340430c5 PP |
346 | /* Prevent racing against release_kmmio_fault_page(). */ |
347 | spin_lock(&kmmio_lock); | |
348 | if (ctx->fpage->count) | |
349 | arm_kmmio_fault_page(ctx->fpage); | |
350 | spin_unlock(&kmmio_lock); | |
8b7d89d0 | 351 | |
49023168 | 352 | regs->flags &= ~X86_EFLAGS_TF; |
8b7d89d0 PP |
353 | regs->flags |= ctx->saved_flags; |
354 | ||
355 | /* These were acquired in kmmio_handler(). */ | |
356 | ctx->active--; | |
0fd0e3da | 357 | BUG_ON(ctx->active); |
d61fc448 | 358 | rcu_read_unlock(); |
8b7d89d0 PP |
359 | preempt_enable_no_resched(); |
360 | ||
361 | /* | |
362 | * if somebody else is singlestepping across a probe point, flags | |
363 | * will have TF set, in which case, continue the remaining processing | |
364 | * of do_debug, as if this is not a probe hit. | |
365 | */ | |
49023168 | 366 | if (!(regs->flags & X86_EFLAGS_TF)) |
f5136380 | 367 | ret = 1; |
f5136380 PP |
368 | out: |
369 | put_cpu_var(kmmio_ctx); | |
370 | return ret; | |
8b7d89d0 PP |
371 | } |
372 | ||
0fd0e3da | 373 | /* You must be holding kmmio_lock. */ |
cfa52c0c | 374 | static int add_kmmio_fault_page(unsigned long addr) |
8b7d89d0 PP |
375 | { |
376 | struct kmmio_fault_page *f; | |
377 | ||
cfa52c0c | 378 | f = get_kmmio_fault_page(addr); |
8b7d89d0 | 379 | if (f) { |
0fd0e3da | 380 | if (!f->count) |
5359b585 | 381 | arm_kmmio_fault_page(f); |
8b7d89d0 PP |
382 | f->count++; |
383 | return 0; | |
384 | } | |
385 | ||
5359b585 | 386 | f = kzalloc(sizeof(*f), GFP_ATOMIC); |
8b7d89d0 PP |
387 | if (!f) |
388 | return -1; | |
389 | ||
390 | f->count = 1; | |
cfa52c0c | 391 | f->addr = addr; |
8b7d89d0 | 392 | |
5359b585 | 393 | if (arm_kmmio_fault_page(f)) { |
e9d54cae SB |
394 | kfree(f); |
395 | return -1; | |
396 | } | |
397 | ||
cfa52c0c | 398 | list_add_rcu(&f->list, kmmio_page_list(f->addr)); |
8b7d89d0 PP |
399 | |
400 | return 0; | |
401 | } | |
402 | ||
0fd0e3da | 403 | /* You must be holding kmmio_lock. */ |
cfa52c0c | 404 | static void release_kmmio_fault_page(unsigned long addr, |
0fd0e3da | 405 | struct kmmio_fault_page **release_list) |
8b7d89d0 PP |
406 | { |
407 | struct kmmio_fault_page *f; | |
408 | ||
cfa52c0c | 409 | f = get_kmmio_fault_page(addr); |
8b7d89d0 PP |
410 | if (!f) |
411 | return; | |
412 | ||
413 | f->count--; | |
0fd0e3da | 414 | BUG_ON(f->count < 0); |
8b7d89d0 | 415 | if (!f->count) { |
5359b585 | 416 | disarm_kmmio_fault_page(f); |
8b8f79b9 MS |
417 | if (!f->scheduled_for_release) { |
418 | f->release_next = *release_list; | |
419 | *release_list = f; | |
420 | f->scheduled_for_release = true; | |
421 | } | |
8b7d89d0 PP |
422 | } |
423 | } | |
424 | ||
87e547fe PP |
425 | /* |
426 | * With page-unaligned ioremaps, one or two armed pages may contain | |
427 | * addresses from outside the intended mapping. Events for these addresses | |
428 | * are currently silently dropped. The events may result only from programming | |
429 | * mistakes by accessing addresses before the beginning or past the end of a | |
430 | * mapping. | |
431 | */ | |
8b7d89d0 PP |
432 | int register_kmmio_probe(struct kmmio_probe *p) |
433 | { | |
d61fc448 | 434 | unsigned long flags; |
8b7d89d0 PP |
435 | int ret = 0; |
436 | unsigned long size = 0; | |
87e547fe | 437 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
cfa52c0c KH |
438 | unsigned int l; |
439 | pte_t *pte; | |
8b7d89d0 | 440 | |
d61fc448 | 441 | spin_lock_irqsave(&kmmio_lock, flags); |
8b7d89d0 PP |
442 | if (get_kmmio_probe(p->addr)) { |
443 | ret = -EEXIST; | |
444 | goto out; | |
445 | } | |
cfa52c0c KH |
446 | |
447 | pte = lookup_address(p->addr, &l); | |
448 | if (!pte) { | |
449 | ret = -EINVAL; | |
450 | goto out; | |
451 | } | |
452 | ||
d61fc448 | 453 | kmmio_count++; |
0fd0e3da | 454 | list_add_rcu(&p->list, &kmmio_probes); |
87e547fe | 455 | while (size < size_lim) { |
8b7d89d0 | 456 | if (add_kmmio_fault_page(p->addr + size)) |
1bd591a5 | 457 | pr_err("Unable to set page fault.\n"); |
cfa52c0c | 458 | size += page_level_size(l); |
8b7d89d0 | 459 | } |
8b7d89d0 | 460 | out: |
d61fc448 | 461 | spin_unlock_irqrestore(&kmmio_lock, flags); |
8b7d89d0 PP |
462 | /* |
463 | * XXX: What should I do here? | |
464 | * Here was a call to global_flush_tlb(), but it does not exist | |
0fd0e3da | 465 | * anymore. It seems it's not needed after all. |
8b7d89d0 PP |
466 | */ |
467 | return ret; | |
468 | } | |
0fd0e3da | 469 | EXPORT_SYMBOL(register_kmmio_probe); |
8b7d89d0 | 470 | |
0fd0e3da PP |
471 | static void rcu_free_kmmio_fault_pages(struct rcu_head *head) |
472 | { | |
473 | struct kmmio_delayed_release *dr = container_of( | |
474 | head, | |
475 | struct kmmio_delayed_release, | |
476 | rcu); | |
0492e1bb SB |
477 | struct kmmio_fault_page *f = dr->release_list; |
478 | while (f) { | |
479 | struct kmmio_fault_page *next = f->release_next; | |
480 | BUG_ON(f->count); | |
481 | kfree(f); | |
482 | f = next; | |
0fd0e3da PP |
483 | } |
484 | kfree(dr); | |
485 | } | |
486 | ||
487 | static void remove_kmmio_fault_pages(struct rcu_head *head) | |
488 | { | |
d0fc63f7 SB |
489 | struct kmmio_delayed_release *dr = |
490 | container_of(head, struct kmmio_delayed_release, rcu); | |
0492e1bb | 491 | struct kmmio_fault_page *f = dr->release_list; |
0fd0e3da PP |
492 | struct kmmio_fault_page **prevp = &dr->release_list; |
493 | unsigned long flags; | |
d0fc63f7 | 494 | |
0fd0e3da | 495 | spin_lock_irqsave(&kmmio_lock, flags); |
0492e1bb SB |
496 | while (f) { |
497 | if (!f->count) { | |
498 | list_del_rcu(&f->list); | |
499 | prevp = &f->release_next; | |
d0fc63f7 | 500 | } else { |
0492e1bb | 501 | *prevp = f->release_next; |
8b8f79b9 MS |
502 | f->release_next = NULL; |
503 | f->scheduled_for_release = false; | |
d0fc63f7 | 504 | } |
8b8f79b9 | 505 | f = *prevp; |
0fd0e3da PP |
506 | } |
507 | spin_unlock_irqrestore(&kmmio_lock, flags); | |
d0fc63f7 | 508 | |
0fd0e3da PP |
509 | /* This is the real RCU destroy call. */ |
510 | call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); | |
511 | } | |
512 | ||
513 | /* | |
514 | * Remove a kmmio probe. You have to synchronize_rcu() before you can be | |
d61fc448 PP |
515 | * sure that the callbacks will not be called anymore. Only after that |
516 | * you may actually release your struct kmmio_probe. | |
0fd0e3da PP |
517 | * |
518 | * Unregistering a kmmio fault page has three steps: | |
519 | * 1. release_kmmio_fault_page() | |
520 | * Disarm the page, wait a grace period to let all faults finish. | |
521 | * 2. remove_kmmio_fault_pages() | |
522 | * Remove the pages from kmmio_page_table. | |
523 | * 3. rcu_free_kmmio_fault_pages() | |
8055039c | 524 | * Actually free the kmmio_fault_page structs as with RCU. |
0fd0e3da | 525 | */ |
8b7d89d0 PP |
526 | void unregister_kmmio_probe(struct kmmio_probe *p) |
527 | { | |
d61fc448 | 528 | unsigned long flags; |
8b7d89d0 | 529 | unsigned long size = 0; |
87e547fe | 530 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
0fd0e3da PP |
531 | struct kmmio_fault_page *release_list = NULL; |
532 | struct kmmio_delayed_release *drelease; | |
cfa52c0c KH |
533 | unsigned int l; |
534 | pte_t *pte; | |
535 | ||
536 | pte = lookup_address(p->addr, &l); | |
537 | if (!pte) | |
538 | return; | |
8b7d89d0 | 539 | |
d61fc448 | 540 | spin_lock_irqsave(&kmmio_lock, flags); |
87e547fe | 541 | while (size < size_lim) { |
0fd0e3da | 542 | release_kmmio_fault_page(p->addr + size, &release_list); |
cfa52c0c | 543 | size += page_level_size(l); |
8b7d89d0 | 544 | } |
0fd0e3da | 545 | list_del_rcu(&p->list); |
8b7d89d0 | 546 | kmmio_count--; |
d61fc448 | 547 | spin_unlock_irqrestore(&kmmio_lock, flags); |
8b7d89d0 | 548 | |
8b8f79b9 MS |
549 | if (!release_list) |
550 | return; | |
551 | ||
0fd0e3da PP |
552 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); |
553 | if (!drelease) { | |
1bd591a5 | 554 | pr_crit("leaking kmmio_fault_page objects.\n"); |
0fd0e3da PP |
555 | return; |
556 | } | |
557 | drelease->release_list = release_list; | |
558 | ||
559 | /* | |
560 | * This is not really RCU here. We have just disarmed a set of | |
561 | * pages so that they cannot trigger page faults anymore. However, | |
562 | * we cannot remove the pages from kmmio_page_table, | |
563 | * because a probe hit might be in flight on another CPU. The | |
564 | * pages are collected into a list, and they will be removed from | |
565 | * kmmio_page_table when it is certain that no probe hit related to | |
566 | * these pages can be in flight. RCU grace period sounds like a | |
567 | * good choice. | |
568 | * | |
569 | * If we removed the pages too early, kmmio page fault handler might | |
570 | * not find the respective kmmio_fault_page and determine it's not | |
571 | * a kmmio fault, when it actually is. This would lead to madness. | |
572 | */ | |
573 | call_rcu(&drelease->rcu, remove_kmmio_fault_pages); | |
8b7d89d0 | 574 | } |
0fd0e3da | 575 | EXPORT_SYMBOL(unregister_kmmio_probe); |
8b7d89d0 | 576 | |
0f9a623d SB |
577 | static int |
578 | kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) | |
8b7d89d0 PP |
579 | { |
580 | struct die_args *arg = args; | |
0bb7a95f | 581 | unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err); |
8b7d89d0 | 582 | |
0bb7a95f LB |
583 | if (val == DIE_DEBUG && (*dr6_p & DR_STEP)) |
584 | if (post_kmmio_handler(*dr6_p, arg->regs) == 1) { | |
62edab90 P |
585 | /* |
586 | * Reset the BS bit in dr6 (pointed by args->err) to | |
587 | * denote completion of processing | |
588 | */ | |
0bb7a95f | 589 | *dr6_p &= ~DR_STEP; |
8b7d89d0 | 590 | return NOTIFY_STOP; |
62edab90 | 591 | } |
8b7d89d0 PP |
592 | |
593 | return NOTIFY_DONE; | |
594 | } | |
13829537 PP |
595 | |
596 | static struct notifier_block nb_die = { | |
597 | .notifier_call = kmmio_die_notifier | |
598 | }; | |
599 | ||
0f9a623d | 600 | int kmmio_init(void) |
13829537 PP |
601 | { |
602 | int i; | |
0f9a623d | 603 | |
13829537 PP |
604 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) |
605 | INIT_LIST_HEAD(&kmmio_page_table[i]); | |
0f9a623d | 606 | |
13829537 PP |
607 | return register_die_notifier(&nb_die); |
608 | } | |
0f9a623d SB |
609 | |
610 | void kmmio_cleanup(void) | |
611 | { | |
612 | int i; | |
613 | ||
614 | unregister_die_notifier(&nb_die); | |
615 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) { | |
616 | WARN_ONCE(!list_empty(&kmmio_page_table[i]), | |
617 | KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n"); | |
618 | } | |
619 | } |