]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/arm/mm/fault.c
signal: Ensure every siginfo we send has all bits initialized
[mirror_ubuntu-hirsute-kernel.git] / arch / arm / mm / fault.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2004 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
0ea9365a 11#include <linux/extable.h>
1da177e4 12#include <linux/signal.h>
1da177e4 13#include <linux/mm.h>
67306da6 14#include <linux/hardirq.h>
1da177e4 15#include <linux/init.h>
25ce1dd7 16#include <linux/kprobes.h>
33fa9b13 17#include <linux/uaccess.h>
252d4c27 18#include <linux/page-flags.h>
3f07c014 19#include <linux/sched/signal.h>
b17b0153 20#include <linux/sched/debug.h>
65cec8e3 21#include <linux/highmem.h>
7ada189f 22#include <linux/perf_event.h>
1da177e4 23
1da177e4 24#include <asm/pgtable.h>
9f97da78
DH
25#include <asm/system_misc.h>
26#include <asm/system_info.h>
1da177e4 27#include <asm/tlbflush.h>
1da177e4
LT
28
29#include "fault.h"
30
09529f7a 31#ifdef CONFIG_MMU
25ce1dd7
NP
32
33#ifdef CONFIG_KPROBES
34static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
35{
36 int ret = 0;
37
38 if (!user_mode(regs)) {
39 /* kprobe_running() needs smp_processor_id() */
40 preempt_disable();
41 if (kprobe_running() && kprobe_fault_handler(regs, fsr))
42 ret = 1;
43 preempt_enable();
44 }
45
46 return ret;
47}
48#else
49static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
50{
51 return 0;
52}
53#endif
54
1da177e4
LT
55/*
56 * This is useful to dump out the page tables associated with
57 * 'addr' in mm 'mm'.
58 */
59void show_pte(struct mm_struct *mm, unsigned long addr)
60{
61 pgd_t *pgd;
62
63 if (!mm)
64 mm = &init_mm;
65
4ed89f22 66 pr_alert("pgd = %p\n", mm->pgd);
1da177e4 67 pgd = pgd_offset(mm, addr);
4ed89f22 68 pr_alert("[%08lx] *pgd=%08llx",
29a38193 69 addr, (long long)pgd_val(*pgd));
1da177e4
LT
70
71 do {
516295e5 72 pud_t *pud;
1da177e4
LT
73 pmd_t *pmd;
74 pte_t *pte;
75
76 if (pgd_none(*pgd))
77 break;
78
79 if (pgd_bad(*pgd)) {
4ed89f22 80 pr_cont("(bad)");
1da177e4
LT
81 break;
82 }
83
516295e5
RK
84 pud = pud_offset(pgd, addr);
85 if (PTRS_PER_PUD != 1)
4ed89f22 86 pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
516295e5
RK
87
88 if (pud_none(*pud))
89 break;
90
91 if (pud_bad(*pud)) {
4ed89f22 92 pr_cont("(bad)");
516295e5
RK
93 break;
94 }
95
96 pmd = pmd_offset(pud, addr);
da46c79a 97 if (PTRS_PER_PMD != 1)
4ed89f22 98 pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
1da177e4
LT
99
100 if (pmd_none(*pmd))
101 break;
102
103 if (pmd_bad(*pmd)) {
4ed89f22 104 pr_cont("(bad)");
1da177e4
LT
105 break;
106 }
107
1da177e4 108 /* We must not map this if we have highmem enabled */
252d4c27
NP
109 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
110 break;
111
1da177e4 112 pte = pte_offset_map(pmd, addr);
4ed89f22 113 pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
f7b8156d 114#ifndef CONFIG_ARM_LPAE
4ed89f22 115 pr_cont(", *ppte=%08llx",
29a38193 116 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
f7b8156d 117#endif
1da177e4 118 pte_unmap(pte);
1da177e4
LT
119 } while(0);
120
4ed89f22 121 pr_cont("\n");
1da177e4 122}
09529f7a
CM
123#else /* CONFIG_MMU */
124void show_pte(struct mm_struct *mm, unsigned long addr)
125{ }
126#endif /* CONFIG_MMU */
1da177e4
LT
127
128/*
129 * Oops. The kernel tried to access some page that wasn't present.
130 */
131static void
132__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
133 struct pt_regs *regs)
134{
135 /*
136 * Are we prepared to handle this kernel fault?
137 */
138 if (fixup_exception(regs))
139 return;
140
141 /*
142 * No handler, we'll have to terminate things with extreme prejudice.
143 */
144 bust_spinlocks(1);
4ed89f22
RK
145 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
146 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
147 "paging request", addr);
1da177e4
LT
148
149 show_pte(mm, addr);
150 die("Oops", regs, fsr);
151 bust_spinlocks(0);
152 do_exit(SIGKILL);
153}
154
155/*
156 * Something tried to access memory that isn't in our memory map..
157 * User mode accesses just cause a SIGSEGV
158 */
159static void
160__do_user_fault(struct task_struct *tsk, unsigned long addr,
2d137c24 161 unsigned int fsr, unsigned int sig, int code,
162 struct pt_regs *regs)
1da177e4
LT
163{
164 struct siginfo si;
165
3eb0f519
EB
166 clear_siginfo(&si);
167
1da177e4 168#ifdef CONFIG_DEBUG_USER
f5274c2d
JM
169 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
170 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
2d137c24 171 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
172 tsk->comm, sig, addr, fsr);
1da177e4
LT
173 show_pte(tsk->mm, addr);
174 show_regs(regs);
175 }
176#endif
177
178 tsk->thread.address = addr;
179 tsk->thread.error_code = fsr;
180 tsk->thread.trap_no = 14;
2d137c24 181 si.si_signo = sig;
1da177e4
LT
182 si.si_errno = 0;
183 si.si_code = code;
184 si.si_addr = (void __user *)addr;
2d137c24 185 force_sig_info(sig, &si, tsk);
1da177e4
LT
186}
187
e5beac37 188void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1da177e4 189{
e5beac37
RK
190 struct task_struct *tsk = current;
191 struct mm_struct *mm = tsk->active_mm;
192
1da177e4
LT
193 /*
194 * If we are in kernel mode at this point, we
195 * have no context to handle this fault with.
196 */
197 if (user_mode(regs))
2d137c24 198 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
1da177e4
LT
199 else
200 __do_kernel_fault(mm, addr, fsr, regs);
201}
202
09529f7a 203#ifdef CONFIG_MMU
5c72fc5c
NP
204#define VM_FAULT_BADMAP 0x010000
205#define VM_FAULT_BADACCESS 0x020000
1da177e4 206
d374bf14
RK
207/*
208 * Check that the permissions on the VMA allow for the fault which occurred.
209 * If we encountered a write fault, we must have write permission, otherwise
210 * we allow any permission.
211 */
212static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
213{
214 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
215
216 if (fsr & FSR_WRITE)
217 mask = VM_WRITE;
df297bf6
RK
218 if (fsr & FSR_LNX_PF)
219 mask = VM_EXEC;
d374bf14
RK
220
221 return vma->vm_flags & mask ? false : true;
222}
223
224static int __kprobes
1da177e4 225__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
8878a539 226 unsigned int flags, struct task_struct *tsk)
1da177e4
LT
227{
228 struct vm_area_struct *vma;
d374bf14 229 int fault;
1da177e4
LT
230
231 vma = find_vma(mm, addr);
232 fault = VM_FAULT_BADMAP;
d374bf14 233 if (unlikely(!vma))
1da177e4 234 goto out;
d374bf14 235 if (unlikely(vma->vm_start > addr))
1da177e4
LT
236 goto check_stack;
237
238 /*
239 * Ok, we have a good vm_area for this
240 * memory access, so we can handle it.
241 */
242good_area:
d374bf14
RK
243 if (access_error(fsr, vma)) {
244 fault = VM_FAULT_BADACCESS;
1da177e4 245 goto out;
d374bf14 246 }
1da177e4 247
dcddffd4 248 return handle_mm_fault(vma, addr & PAGE_MASK, flags);
1da177e4 249
1da177e4 250check_stack:
9b61a4d1
RK
251 /* Don't allow expansion below FIRST_USER_ADDRESS */
252 if (vma->vm_flags & VM_GROWSDOWN &&
253 addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
1da177e4
LT
254 goto good_area;
255out:
256 return fault;
257}
258
785d3cd2 259static int __kprobes
1da177e4
LT
260do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
261{
262 struct task_struct *tsk;
263 struct mm_struct *mm;
2d137c24 264 int fault, sig, code;
759496ba 265 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1da177e4 266
25ce1dd7
NP
267 if (notify_page_fault(regs, fsr))
268 return 0;
269
1da177e4
LT
270 tsk = current;
271 mm = tsk->mm;
272
02fe2845
RK
273 /* Enable interrupts if they were enabled in the parent context. */
274 if (interrupts_enabled(regs))
275 local_irq_enable();
276
1da177e4
LT
277 /*
278 * If we're in an interrupt or have no user
279 * context, we must not take the fault..
280 */
70ffdb93 281 if (faulthandler_disabled() || !mm)
1da177e4
LT
282 goto no_context;
283
759496ba
JW
284 if (user_mode(regs))
285 flags |= FAULT_FLAG_USER;
286 if (fsr & FSR_WRITE)
287 flags |= FAULT_FLAG_WRITE;
288
840ff6a4
RK
289 /*
290 * As per x86, we may deadlock here. However, since the kernel only
291 * validly references user space from well defined areas of the code,
292 * we can bug out early if this is from code which shouldn't.
293 */
294 if (!down_read_trylock(&mm->mmap_sem)) {
295 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
296 goto no_context;
8878a539 297retry:
840ff6a4 298 down_read(&mm->mmap_sem);
bf456992
RK
299 } else {
300 /*
301 * The above down_read_trylock() might have succeeded in
302 * which case, we'll have missed the might_sleep() from
303 * down_read()
304 */
305 might_sleep();
1d212712
ID
306#ifdef CONFIG_DEBUG_VM
307 if (!user_mode(regs) &&
308 !search_exception_tables(regs->ARM_pc))
309 goto no_context;
310#endif
840ff6a4
RK
311 }
312
8878a539
KC
313 fault = __do_page_fault(mm, addr, fsr, flags, tsk);
314
315 /* If we need to retry but a fatal signal is pending, handle the
316 * signal first. We do not need to release the mmap_sem because
317 * it would already be released in __lock_page_or_retry in
318 * mm/filemap.c. */
746a272e
MR
319 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
320 if (!user_mode(regs))
321 goto no_context;
8878a539 322 return 0;
746a272e 323 }
8878a539
KC
324
325 /*
326 * Major/minor page fault accounting is only done on the
327 * initial attempt. If we go through a retry, it is extremely
328 * likely that the page will be found in page cache at that point.
329 */
1da177e4 330
a8b0ca17 331 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
dff2aa7a 332 if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
8878a539
KC
333 if (fault & VM_FAULT_MAJOR) {
334 tsk->maj_flt++;
335 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
336 regs, addr);
337 } else {
338 tsk->min_flt++;
339 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
340 regs, addr);
341 }
342 if (fault & VM_FAULT_RETRY) {
343 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
344 * of starvation. */
345 flags &= ~FAULT_FLAG_ALLOW_RETRY;
45cac65b 346 flags |= FAULT_FLAG_TRIED;
8878a539
KC
347 goto retry;
348 }
349 }
350
351 up_read(&mm->mmap_sem);
7ada189f 352
1da177e4 353 /*
0e8fb931 354 * Handle the "normal" case first - VM_FAULT_MAJOR
1da177e4 355 */
5c72fc5c 356 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
1da177e4
LT
357 return 0;
358
87134102
JW
359 /*
360 * If we are in kernel mode at this point, we
361 * have no context to handle this fault with.
362 */
363 if (!user_mode(regs))
364 goto no_context;
365
b42c6344
RK
366 if (fault & VM_FAULT_OOM) {
367 /*
368 * We ran out of memory, call the OOM killer, and return to
369 * userspace (which will retry the fault, or kill us if we
370 * got oom-killed)
371 */
372 pagefault_out_of_memory();
373 return 0;
374 }
375
83c54070 376 if (fault & VM_FAULT_SIGBUS) {
2d137c24 377 /*
378 * We had some memory, but were unable to
379 * successfully fix up this page fault.
380 */
381 sig = SIGBUS;
382 code = BUS_ADRERR;
83c54070 383 } else {
2d137c24 384 /*
385 * Something tried to access memory that
386 * isn't in our memory map..
387 */
388 sig = SIGSEGV;
389 code = fault == VM_FAULT_BADACCESS ?
390 SEGV_ACCERR : SEGV_MAPERR;
1da177e4 391 }
1da177e4 392
2d137c24 393 __do_user_fault(tsk, addr, fsr, sig, code, regs);
394 return 0;
1da177e4
LT
395
396no_context:
397 __do_kernel_fault(mm, addr, fsr, regs);
398 return 0;
399}
09529f7a
CM
400#else /* CONFIG_MMU */
401static int
402do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
403{
404 return 0;
405}
406#endif /* CONFIG_MMU */
1da177e4
LT
407
408/*
409 * First Level Translation Fault Handler
410 *
411 * We enter here because the first level page table doesn't contain
412 * a valid entry for the address.
413 *
414 * If the address is in kernel space (>= TASK_SIZE), then we are
415 * probably faulting in the vmalloc() area.
416 *
417 * If the init_task's first level page tables contains the relevant
418 * entry, we copy the it to this task. If not, we send the process
419 * a signal, fixup the exception, or oops the kernel.
420 *
421 * NOTE! We MUST NOT take any locks for this case. We may be in an
422 * interrupt or a critical region, and should only copy the information
423 * from the master page table, nothing more.
424 */
09529f7a 425#ifdef CONFIG_MMU
785d3cd2 426static int __kprobes
1da177e4
LT
427do_translation_fault(unsigned long addr, unsigned int fsr,
428 struct pt_regs *regs)
429{
1da177e4
LT
430 unsigned int index;
431 pgd_t *pgd, *pgd_k;
516295e5 432 pud_t *pud, *pud_k;
1da177e4
LT
433 pmd_t *pmd, *pmd_k;
434
435 if (addr < TASK_SIZE)
436 return do_page_fault(addr, fsr, regs);
437
5e27fb78
A
438 if (user_mode(regs))
439 goto bad_area;
440
1da177e4
LT
441 index = pgd_index(addr);
442
1da177e4
LT
443 pgd = cpu_get_pgd() + index;
444 pgd_k = init_mm.pgd + index;
445
446 if (pgd_none(*pgd_k))
447 goto bad_area;
1da177e4
LT
448 if (!pgd_present(*pgd))
449 set_pgd(pgd, *pgd_k);
450
516295e5
RK
451 pud = pud_offset(pgd, addr);
452 pud_k = pud_offset(pgd_k, addr);
453
454 if (pud_none(*pud_k))
455 goto bad_area;
456 if (!pud_present(*pud))
457 set_pud(pud, *pud_k);
458
459 pmd = pmd_offset(pud, addr);
460 pmd_k = pmd_offset(pud_k, addr);
1da177e4 461
f7b8156d
CM
462#ifdef CONFIG_ARM_LPAE
463 /*
464 * Only one hardware entry per PMD with LPAE.
465 */
466 index = 0;
467#else
33a9c41b
KS
468 /*
469 * On ARM one Linux PGD entry contains two hardware entries (see page
470 * tables layout in pgtable.h). We normally guarantee that we always
471 * fill both L1 entries. But create_mapping() doesn't follow the rule.
472 * It can create inidividual L1 entries, so here we have to call
473 * pmd_none() check for the entry really corresponded to address, not
474 * for the first of pair.
475 */
476 index = (addr >> SECTION_SHIFT) & 1;
f7b8156d 477#endif
33a9c41b 478 if (pmd_none(pmd_k[index]))
1da177e4
LT
479 goto bad_area;
480
481 copy_pmd(pmd, pmd_k);
482 return 0;
483
484bad_area:
e5beac37 485 do_bad_area(addr, fsr, regs);
1da177e4
LT
486 return 0;
487}
09529f7a
CM
488#else /* CONFIG_MMU */
489static int
490do_translation_fault(unsigned long addr, unsigned int fsr,
491 struct pt_regs *regs)
492{
493 return 0;
494}
495#endif /* CONFIG_MMU */
1da177e4
LT
496
497/*
498 * Some section permission faults need to be handled gracefully.
499 * They can happen due to a __{get,put}_user during an oops.
500 */
809e660f 501#ifndef CONFIG_ARM_LPAE
1da177e4
LT
502static int
503do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
504{
e5beac37 505 do_bad_area(addr, fsr, regs);
1da177e4
LT
506 return 0;
507}
809e660f 508#endif /* CONFIG_ARM_LPAE */
1da177e4
LT
509
510/*
511 * This abort handler always returns "fault".
512 */
513static int
514do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
515{
516 return 1;
517}
518
136848d4 519struct fsr_info {
1da177e4
LT
520 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
521 int sig;
cfb0810e 522 int code;
1da177e4 523 const char *name;
1da177e4
LT
524};
525
136848d4 526/* FSR definition */
f7b8156d
CM
527#ifdef CONFIG_ARM_LPAE
528#include "fsr-3level.c"
529#else
136848d4 530#include "fsr-2level.c"
f7b8156d 531#endif
136848d4 532
1da177e4
LT
533void __init
534hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
6338a6aa 535 int sig, int code, const char *name)
1da177e4 536{
6338a6aa
KS
537 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
538 BUG();
539
540 fsr_info[nr].fn = fn;
541 fsr_info[nr].sig = sig;
542 fsr_info[nr].code = code;
543 fsr_info[nr].name = name;
1da177e4
LT
544}
545
546/*
547 * Dispatch a data abort to the relevant handler.
548 */
c6089061 549asmlinkage void
1da177e4
LT
550do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
551{
c88d6aa7 552 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
cfb0810e 553 struct siginfo info;
1da177e4 554
df297bf6 555 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
1da177e4
LT
556 return;
557
4ed89f22 558 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
1da177e4 559 inf->name, fsr, addr);
6d021b72 560 show_pte(current->mm, addr);
cfb0810e 561
3eb0f519 562 clear_siginfo(&info);
cfb0810e
RK
563 info.si_signo = inf->sig;
564 info.si_errno = 0;
565 info.si_code = inf->code;
566 info.si_addr = (void __user *)addr;
1eeb66a1 567 arm_notify_die("", regs, &info, fsr, 0);
1da177e4
LT
568}
569
3a4b5dca
WD
570void __init
571hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
572 int sig, int code, const char *name)
573{
574 if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
575 BUG();
576
577 ifsr_info[nr].fn = fn;
578 ifsr_info[nr].sig = sig;
579 ifsr_info[nr].code = code;
580 ifsr_info[nr].name = name;
581}
582
c6089061 583asmlinkage void
4fb28474 584do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1da177e4 585{
d25ef8b8
KS
586 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
587 struct siginfo info;
588
589 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
590 return;
591
4ed89f22 592 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
d25ef8b8
KS
593 inf->name, ifsr, addr);
594
3eb0f519 595 clear_siginfo(&info);
d25ef8b8
KS
596 info.si_signo = inf->sig;
597 info.si_errno = 0;
598 info.si_code = inf->code;
599 info.si_addr = (void __user *)addr;
600 arm_notify_die("", regs, &info, ifsr, 0);
1da177e4
LT
601}
602
9254970c
LS
603/*
604 * Abort handler to be used only during first unmasking of asynchronous aborts
605 * on the boot CPU. This makes sure that the machine will not die if the
606 * firmware/bootloader left an imprecise abort pending for us to trip over.
607 */
608static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
609 struct pt_regs *regs)
610{
611 pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
612 "first unmask, this is most likely caused by a "
613 "firmware/bootloader bug.\n", fsr);
614
615 return 0;
616}
617
618void __init early_abt_enable(void)
619{
97a98ae5 620 fsr_info[FSR_FS_AEA].fn = early_abort_handler;
9254970c 621 local_abt_enable();
97a98ae5 622 fsr_info[FSR_FS_AEA].fn = do_bad;
9254970c
LS
623}
624
f7b8156d 625#ifndef CONFIG_ARM_LPAE
993bf4ec
KS
626static int __init exceptions_init(void)
627{
628 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
629 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
630 "I-cache maintenance fault");
631 }
632
b8ab5397
KS
633 if (cpu_architecture() >= CPU_ARCH_ARMv7) {
634 /*
635 * TODO: Access flag faults introduced in ARMv6K.
636 * Runtime check for 'K' extension is needed
637 */
638 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
639 "section access flag fault");
640 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
641 "section access flag fault");
642 }
643
993bf4ec
KS
644 return 0;
645}
646
647arch_initcall(exceptions_init);
f7b8156d 648#endif