]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/x86/mm/kmmio.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-eoan-kernel.git] / arch / x86 / mm / kmmio.c
CommitLineData
8b7d89d0
PP
1/* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
6 */
7
1bd591a5
JP
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
0fd0e3da 10#include <linux/list.h>
668a6c36 11#include <linux/rculist.h>
8b7d89d0
PP
12#include <linux/spinlock.h>
13#include <linux/hash.h>
14#include <linux/init.h>
15#include <linux/module.h>
8b7d89d0 16#include <linux/kernel.h>
8b7d89d0
PP
17#include <linux/uaccess.h>
18#include <linux/ptrace.h>
19#include <linux/preempt.h>
f5136380 20#include <linux/percpu.h>
0fd0e3da 21#include <linux/kdebug.h>
d61fc448 22#include <linux/mutex.h>
970e6fa0 23#include <linux/io.h>
5a0e3ad6 24#include <linux/slab.h>
8b7d89d0 25#include <asm/cacheflush.h>
8b7d89d0 26#include <asm/tlbflush.h>
970e6fa0 27#include <linux/errno.h>
13829537 28#include <asm/debugreg.h>
0fd0e3da 29#include <linux/mmiotrace.h>
8b7d89d0 30
8b7d89d0
PP
31#define KMMIO_PAGE_HASH_BITS 4
32#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
33
0fd0e3da
PP
34struct kmmio_fault_page {
35 struct list_head list;
36 struct kmmio_fault_page *release_next;
37 unsigned long page; /* location of the fault page */
46e91d00 38 pteval_t old_presence; /* page presence prior to arming */
5359b585 39 bool armed;
0fd0e3da
PP
40
41 /*
42 * Number of times this page has been registered as a part
43 * of a probe. If zero, page is disarmed and this may be freed.
340430c5
PP
44 * Used only by writers (RCU) and post_kmmio_handler().
45 * Protected by kmmio_lock, when linked into kmmio_page_table.
0fd0e3da
PP
46 */
47 int count;
48};
49
50struct kmmio_delayed_release {
51 struct rcu_head rcu;
52 struct kmmio_fault_page *release_list;
53};
54
8b7d89d0
PP
55struct kmmio_context {
56 struct kmmio_fault_page *fpage;
57 struct kmmio_probe *probe;
58 unsigned long saved_flags;
0fd0e3da 59 unsigned long addr;
8b7d89d0
PP
60 int active;
61};
62
8b7d89d0
PP
63static DEFINE_SPINLOCK(kmmio_lock);
64
13829537 65/* Protected by kmmio_lock */
8b7d89d0 66unsigned int kmmio_count;
0fd0e3da
PP
67
68/* Read-protected by RCU, write-protected by kmmio_lock. */
8b7d89d0
PP
69static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
70static LIST_HEAD(kmmio_probes);
71
0fd0e3da
PP
72static struct list_head *kmmio_page_list(unsigned long page)
73{
74 return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
75}
76
f5136380
PP
77/* Accessed per-cpu */
78static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
8b7d89d0 79
8b7d89d0
PP
80/*
81 * this is basically a dynamic stabbing problem:
82 * Could use the existing prio tree code or
83 * Possible better implementations:
84 * The Interval Skip List: A Data Structure for Finding All Intervals That
85 * Overlap a Point (might be simple)
86 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
87 */
0fd0e3da 88/* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
8b7d89d0
PP
89static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
90{
91 struct kmmio_probe *p;
0fd0e3da 92 list_for_each_entry_rcu(p, &kmmio_probes, list) {
33015c85 93 if (addr >= p->addr && addr < (p->addr + p->len))
8b7d89d0
PP
94 return p;
95 }
96 return NULL;
97}
98
0fd0e3da 99/* You must be holding RCU read lock. */
8b7d89d0
PP
100static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
101{
0fd0e3da 102 struct list_head *head;
0492e1bb 103 struct kmmio_fault_page *f;
8b7d89d0
PP
104
105 page &= PAGE_MASK;
0fd0e3da 106 head = kmmio_page_list(page);
0492e1bb
SB
107 list_for_each_entry_rcu(f, head, list) {
108 if (f->page == page)
109 return f;
8b7d89d0 110 }
8b7d89d0
PP
111 return NULL;
112}
113
46e91d00 114static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
0b700a6a
PP
115{
116 pmdval_t v = pmd_val(*pmd);
46e91d00
SB
117 if (clear) {
118 *old = v & _PAGE_PRESENT;
119 v &= ~_PAGE_PRESENT;
120 } else /* presume this has been called with clear==true previously */
121 v |= *old;
0b700a6a
PP
122 set_pmd(pmd, __pmd(v));
123}
124
46e91d00 125static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
0b700a6a
PP
126{
127 pteval_t v = pte_val(*pte);
46e91d00
SB
128 if (clear) {
129 *old = v & _PAGE_PRESENT;
130 v &= ~_PAGE_PRESENT;
131 } else /* presume this has been called with clear==true previously */
132 v |= *old;
0b700a6a
PP
133 set_pte_atomic(pte, __pte(v));
134}
135
46e91d00 136static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
8b7d89d0 137{
790e2a29 138 unsigned int level;
46e91d00 139 pte_t *pte = lookup_address(f->page, &level);
8b7d89d0 140
75bb8835 141 if (!pte) {
1bd591a5 142 pr_err("no pte for page 0x%08lx\n", f->page);
e9d54cae 143 return -1;
75bb8835
PP
144 }
145
13829537
PP
146 switch (level) {
147 case PG_LEVEL_2M:
46e91d00 148 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
13829537 149 break;
13829537 150 case PG_LEVEL_4K:
46e91d00 151 clear_pte_presence(pte, clear, &f->old_presence);
13829537 152 break;
13829537 153 default:
1bd591a5 154 pr_err("unexpected page level 0x%x.\n", level);
e9d54cae 155 return -1;
8b7d89d0
PP
156 }
157
46e91d00 158 __flush_tlb_one(f->page);
e9d54cae 159 return 0;
13829537 160}
75bb8835 161
5359b585
PP
162/*
163 * Mark the given page as not present. Access to it will trigger a fault.
164 *
165 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
166 * protection is ignored here. RCU read lock is assumed held, so the struct
167 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
168 * that double arming the same virtual address (page) cannot occur.
169 *
170 * Double disarming on the other hand is allowed, and may occur when a fault
171 * and mmiotrace shutdown happen simultaneously.
172 */
173static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
13829537 174{
5359b585 175 int ret;
1bd591a5 176 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
5359b585 177 if (f->armed) {
1bd591a5
JP
178 pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
179 f->page, f->count, !!f->old_presence);
5359b585 180 }
46e91d00 181 ret = clear_page_presence(f, true);
1bd591a5
JP
182 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
183 f->page);
5359b585 184 f->armed = true;
e9d54cae 185 return ret;
8b7d89d0
PP
186}
187
5359b585
PP
188/** Restore the given page to saved presence state. */
189static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
8b7d89d0 190{
46e91d00 191 int ret = clear_page_presence(f, false);
5359b585
PP
192 WARN_ONCE(ret < 0,
193 KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
194 f->armed = false;
8b7d89d0
PP
195}
196
0fd0e3da
PP
197/*
198 * This is being called from do_page_fault().
199 *
200 * We may be in an interrupt or a critical section. Also prefecthing may
201 * trigger a page fault. We may be in the middle of process switch.
202 * We cannot take any locks, because we could be executing especially
203 * within a kmmio critical section.
204 *
205 * Local interrupts are disabled, so preemption cannot happen.
206 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
207 */
8b7d89d0
PP
208/*
209 * Interrupts are disabled on entry as trap3 is an interrupt gate
af901ca1 210 * and they remain disabled throughout this function.
8b7d89d0 211 */
0fd0e3da 212int kmmio_handler(struct pt_regs *regs, unsigned long addr)
8b7d89d0 213{
0fd0e3da
PP
214 struct kmmio_context *ctx;
215 struct kmmio_fault_page *faultpage;
13829537 216 int ret = 0; /* default to fault not handled */
8b7d89d0
PP
217
218 /*
219 * Preemption is now disabled to prevent process switch during
220 * single stepping. We can only handle one active kmmio trace
221 * per cpu, so ensure that we finish it before something else
d61fc448
PP
222 * gets to run. We also hold the RCU read lock over single
223 * stepping to avoid looking up the probe and kmmio_fault_page
224 * again.
8b7d89d0
PP
225 */
226 preempt_disable();
0fd0e3da 227 rcu_read_lock();
d61fc448 228
0fd0e3da
PP
229 faultpage = get_kmmio_fault_page(addr);
230 if (!faultpage) {
231 /*
232 * Either this page fault is not caused by kmmio, or
233 * another CPU just pulled the kmmio probe from under
13829537 234 * our feet. The latter case should not be possible.
0fd0e3da
PP
235 */
236 goto no_kmmio;
237 }
238
239 ctx = &get_cpu_var(kmmio_ctx);
8b7d89d0 240 if (ctx->active) {
13829537
PP
241 if (addr == ctx->addr) {
242 /*
3e39aa15
SB
243 * A second fault on the same page means some other
244 * condition needs handling by do_page_fault(), the
245 * page really not being present is the most common.
13829537 246 */
1bd591a5
JP
247 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
248 addr, smp_processor_id());
3e39aa15
SB
249
250 if (!faultpage->old_presence)
1bd591a5
JP
251 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
252 addr, smp_processor_id());
3e39aa15
SB
253 } else {
254 /*
255 * Prevent overwriting already in-flight context.
256 * This should not happen, let's hope disarming at
257 * least prevents a panic.
258 */
1bd591a5
JP
259 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
260 smp_processor_id(), addr);
261 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
3e39aa15
SB
262 disarm_kmmio_fault_page(faultpage);
263 }
0fd0e3da 264 goto no_kmmio_ctx;
8b7d89d0
PP
265 }
266 ctx->active++;
267
0fd0e3da 268 ctx->fpage = faultpage;
8b7d89d0 269 ctx->probe = get_kmmio_probe(addr);
49023168 270 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
0fd0e3da 271 ctx->addr = addr;
8b7d89d0
PP
272
273 if (ctx->probe && ctx->probe->pre_handler)
274 ctx->probe->pre_handler(ctx->probe, regs, addr);
275
d61fc448
PP
276 /*
277 * Enable single-stepping and disable interrupts for the faulting
278 * context. Local interrupts must not get enabled during stepping.
279 */
49023168
IM
280 regs->flags |= X86_EFLAGS_TF;
281 regs->flags &= ~X86_EFLAGS_IF;
8b7d89d0 282
0fd0e3da 283 /* Now we set present bit in PTE and single step. */
5359b585 284 disarm_kmmio_fault_page(ctx->fpage);
8b7d89d0 285
d61fc448
PP
286 /*
287 * If another cpu accesses the same page while we are stepping,
288 * the access will not be caught. It will simply succeed and the
289 * only downside is we lose the event. If this becomes a problem,
290 * the user should drop to single cpu before tracing.
291 */
292
f5136380 293 put_cpu_var(kmmio_ctx);
13829537 294 return 1; /* fault handled */
8b7d89d0 295
0fd0e3da
PP
296no_kmmio_ctx:
297 put_cpu_var(kmmio_ctx);
8b7d89d0 298no_kmmio:
0fd0e3da 299 rcu_read_unlock();
8b7d89d0 300 preempt_enable_no_resched();
13829537 301 return ret;
8b7d89d0
PP
302}
303
304/*
305 * Interrupts are disabled on entry as trap1 is an interrupt gate
af901ca1 306 * and they remain disabled throughout this function.
0fd0e3da 307 * This must always get called as the pair to kmmio_handler().
8b7d89d0
PP
308 */
309static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
310{
f5136380
PP
311 int ret = 0;
312 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
8b7d89d0 313
13829537 314 if (!ctx->active) {
0f9a623d
SB
315 /*
316 * debug traps without an active context are due to either
317 * something external causing them (f.e. using a debugger while
318 * mmio tracing enabled), or erroneous behaviour
319 */
1bd591a5
JP
320 pr_warning("unexpected debug trap on CPU %d.\n",
321 smp_processor_id());
f5136380 322 goto out;
13829537 323 }
8b7d89d0
PP
324
325 if (ctx->probe && ctx->probe->post_handler)
326 ctx->probe->post_handler(ctx->probe, condition, regs);
327
340430c5
PP
328 /* Prevent racing against release_kmmio_fault_page(). */
329 spin_lock(&kmmio_lock);
330 if (ctx->fpage->count)
331 arm_kmmio_fault_page(ctx->fpage);
332 spin_unlock(&kmmio_lock);
8b7d89d0 333
49023168 334 regs->flags &= ~X86_EFLAGS_TF;
8b7d89d0
PP
335 regs->flags |= ctx->saved_flags;
336
337 /* These were acquired in kmmio_handler(). */
338 ctx->active--;
0fd0e3da 339 BUG_ON(ctx->active);
d61fc448 340 rcu_read_unlock();
8b7d89d0
PP
341 preempt_enable_no_resched();
342
343 /*
344 * if somebody else is singlestepping across a probe point, flags
345 * will have TF set, in which case, continue the remaining processing
346 * of do_debug, as if this is not a probe hit.
347 */
49023168 348 if (!(regs->flags & X86_EFLAGS_TF))
f5136380 349 ret = 1;
f5136380
PP
350out:
351 put_cpu_var(kmmio_ctx);
352 return ret;
8b7d89d0
PP
353}
354
0fd0e3da 355/* You must be holding kmmio_lock. */
8b7d89d0
PP
356static int add_kmmio_fault_page(unsigned long page)
357{
358 struct kmmio_fault_page *f;
359
360 page &= PAGE_MASK;
361 f = get_kmmio_fault_page(page);
362 if (f) {
0fd0e3da 363 if (!f->count)
5359b585 364 arm_kmmio_fault_page(f);
8b7d89d0
PP
365 f->count++;
366 return 0;
367 }
368
5359b585 369 f = kzalloc(sizeof(*f), GFP_ATOMIC);
8b7d89d0
PP
370 if (!f)
371 return -1;
372
373 f->count = 1;
374 f->page = page;
8b7d89d0 375
5359b585 376 if (arm_kmmio_fault_page(f)) {
e9d54cae
SB
377 kfree(f);
378 return -1;
379 }
380
381 list_add_rcu(&f->list, kmmio_page_list(f->page));
8b7d89d0
PP
382
383 return 0;
384}
385
0fd0e3da
PP
386/* You must be holding kmmio_lock. */
387static void release_kmmio_fault_page(unsigned long page,
388 struct kmmio_fault_page **release_list)
8b7d89d0
PP
389{
390 struct kmmio_fault_page *f;
391
392 page &= PAGE_MASK;
393 f = get_kmmio_fault_page(page);
394 if (!f)
395 return;
396
397 f->count--;
0fd0e3da 398 BUG_ON(f->count < 0);
8b7d89d0 399 if (!f->count) {
5359b585 400 disarm_kmmio_fault_page(f);
0fd0e3da
PP
401 f->release_next = *release_list;
402 *release_list = f;
8b7d89d0
PP
403 }
404}
405
87e547fe
PP
406/*
407 * With page-unaligned ioremaps, one or two armed pages may contain
408 * addresses from outside the intended mapping. Events for these addresses
409 * are currently silently dropped. The events may result only from programming
410 * mistakes by accessing addresses before the beginning or past the end of a
411 * mapping.
412 */
8b7d89d0
PP
413int register_kmmio_probe(struct kmmio_probe *p)
414{
d61fc448 415 unsigned long flags;
8b7d89d0
PP
416 int ret = 0;
417 unsigned long size = 0;
87e547fe 418 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
8b7d89d0 419
d61fc448 420 spin_lock_irqsave(&kmmio_lock, flags);
8b7d89d0
PP
421 if (get_kmmio_probe(p->addr)) {
422 ret = -EEXIST;
423 goto out;
424 }
d61fc448 425 kmmio_count++;
0fd0e3da 426 list_add_rcu(&p->list, &kmmio_probes);
87e547fe 427 while (size < size_lim) {
8b7d89d0 428 if (add_kmmio_fault_page(p->addr + size))
1bd591a5 429 pr_err("Unable to set page fault.\n");
8b7d89d0
PP
430 size += PAGE_SIZE;
431 }
8b7d89d0 432out:
d61fc448 433 spin_unlock_irqrestore(&kmmio_lock, flags);
8b7d89d0
PP
434 /*
435 * XXX: What should I do here?
436 * Here was a call to global_flush_tlb(), but it does not exist
0fd0e3da 437 * anymore. It seems it's not needed after all.
8b7d89d0
PP
438 */
439 return ret;
440}
0fd0e3da 441EXPORT_SYMBOL(register_kmmio_probe);
8b7d89d0 442
0fd0e3da
PP
443static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
444{
445 struct kmmio_delayed_release *dr = container_of(
446 head,
447 struct kmmio_delayed_release,
448 rcu);
0492e1bb
SB
449 struct kmmio_fault_page *f = dr->release_list;
450 while (f) {
451 struct kmmio_fault_page *next = f->release_next;
452 BUG_ON(f->count);
453 kfree(f);
454 f = next;
0fd0e3da
PP
455 }
456 kfree(dr);
457}
458
459static void remove_kmmio_fault_pages(struct rcu_head *head)
460{
d0fc63f7
SB
461 struct kmmio_delayed_release *dr =
462 container_of(head, struct kmmio_delayed_release, rcu);
0492e1bb 463 struct kmmio_fault_page *f = dr->release_list;
0fd0e3da
PP
464 struct kmmio_fault_page **prevp = &dr->release_list;
465 unsigned long flags;
d0fc63f7 466
0fd0e3da 467 spin_lock_irqsave(&kmmio_lock, flags);
0492e1bb
SB
468 while (f) {
469 if (!f->count) {
470 list_del_rcu(&f->list);
471 prevp = &f->release_next;
d0fc63f7 472 } else {
0492e1bb 473 *prevp = f->release_next;
d0fc63f7 474 }
0492e1bb 475 f = f->release_next;
0fd0e3da
PP
476 }
477 spin_unlock_irqrestore(&kmmio_lock, flags);
d0fc63f7 478
0fd0e3da
PP
479 /* This is the real RCU destroy call. */
480 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
481}
482
483/*
484 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
d61fc448
PP
485 * sure that the callbacks will not be called anymore. Only after that
486 * you may actually release your struct kmmio_probe.
0fd0e3da
PP
487 *
488 * Unregistering a kmmio fault page has three steps:
489 * 1. release_kmmio_fault_page()
490 * Disarm the page, wait a grace period to let all faults finish.
491 * 2. remove_kmmio_fault_pages()
492 * Remove the pages from kmmio_page_table.
493 * 3. rcu_free_kmmio_fault_pages()
8055039c 494 * Actually free the kmmio_fault_page structs as with RCU.
0fd0e3da 495 */
8b7d89d0
PP
496void unregister_kmmio_probe(struct kmmio_probe *p)
497{
d61fc448 498 unsigned long flags;
8b7d89d0 499 unsigned long size = 0;
87e547fe 500 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
0fd0e3da
PP
501 struct kmmio_fault_page *release_list = NULL;
502 struct kmmio_delayed_release *drelease;
8b7d89d0 503
d61fc448 504 spin_lock_irqsave(&kmmio_lock, flags);
87e547fe 505 while (size < size_lim) {
0fd0e3da 506 release_kmmio_fault_page(p->addr + size, &release_list);
8b7d89d0
PP
507 size += PAGE_SIZE;
508 }
0fd0e3da 509 list_del_rcu(&p->list);
8b7d89d0 510 kmmio_count--;
d61fc448 511 spin_unlock_irqrestore(&kmmio_lock, flags);
8b7d89d0 512
0fd0e3da
PP
513 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
514 if (!drelease) {
1bd591a5 515 pr_crit("leaking kmmio_fault_page objects.\n");
0fd0e3da
PP
516 return;
517 }
518 drelease->release_list = release_list;
519
520 /*
521 * This is not really RCU here. We have just disarmed a set of
522 * pages so that they cannot trigger page faults anymore. However,
523 * we cannot remove the pages from kmmio_page_table,
524 * because a probe hit might be in flight on another CPU. The
525 * pages are collected into a list, and they will be removed from
526 * kmmio_page_table when it is certain that no probe hit related to
527 * these pages can be in flight. RCU grace period sounds like a
528 * good choice.
529 *
530 * If we removed the pages too early, kmmio page fault handler might
531 * not find the respective kmmio_fault_page and determine it's not
532 * a kmmio fault, when it actually is. This would lead to madness.
533 */
534 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
8b7d89d0 535}
0fd0e3da 536EXPORT_SYMBOL(unregister_kmmio_probe);
8b7d89d0 537
0f9a623d
SB
538static int
539kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
8b7d89d0
PP
540{
541 struct die_args *arg = args;
0bb7a95f 542 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
8b7d89d0 543
0bb7a95f
LB
544 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
545 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
62edab90
P
546 /*
547 * Reset the BS bit in dr6 (pointed by args->err) to
548 * denote completion of processing
549 */
0bb7a95f 550 *dr6_p &= ~DR_STEP;
8b7d89d0 551 return NOTIFY_STOP;
62edab90 552 }
8b7d89d0
PP
553
554 return NOTIFY_DONE;
555}
13829537
PP
556
557static struct notifier_block nb_die = {
558 .notifier_call = kmmio_die_notifier
559};
560
0f9a623d 561int kmmio_init(void)
13829537
PP
562{
563 int i;
0f9a623d 564
13829537
PP
565 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
566 INIT_LIST_HEAD(&kmmio_page_table[i]);
0f9a623d 567
13829537
PP
568 return register_die_notifier(&nb_die);
569}
0f9a623d
SB
570
571void kmmio_cleanup(void)
572{
573 int i;
574
575 unregister_die_notifier(&nb_die);
576 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
577 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
578 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
579 }
580}