]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/powerpc/mm/fault.c
perf, x86: Add hw_watchdog_set_attr() in a sake of nmi-watchdog on P4
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / mm / fault.c
CommitLineData
14cf11af 1/*
14cf11af
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Derived from "arch/i386/mm/fault.c"
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 *
8 * Modified by Cort Dougan and Paul Mackerras.
9 *
10 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
14cf11af
PM
18#include <linux/signal.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/string.h>
23#include <linux/types.h>
24#include <linux/ptrace.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/interrupt.h>
28#include <linux/highmem.h>
29#include <linux/module.h>
30#include <linux/kprobes.h>
1eeb66a1 31#include <linux/kdebug.h>
cdd6c482 32#include <linux/perf_event.h>
28b54990 33#include <linux/magic.h>
14cf11af 34
40900194 35#include <asm/firmware.h>
14cf11af
PM
36#include <asm/page.h>
37#include <asm/pgtable.h>
38#include <asm/mmu.h>
39#include <asm/mmu_context.h>
40#include <asm/system.h>
41#include <asm/uaccess.h>
42#include <asm/tlbflush.h>
14cf11af 43#include <asm/siginfo.h>
5efab4a0 44#include <mm/mmu_decl.h>
4f9e87c0 45
9f90b997
CH
46#ifdef CONFIG_KPROBES
47static inline int notify_page_fault(struct pt_regs *regs)
4f9e87c0 48{
9f90b997
CH
49 int ret = 0;
50
51 /* kprobe_running() needs smp_processor_id() */
52 if (!user_mode(regs)) {
53 preempt_disable();
54 if (kprobe_running() && kprobe_fault_handler(regs, 11))
55 ret = 1;
56 preempt_enable();
57 }
4f9e87c0 58
9f90b997 59 return ret;
4f9e87c0
AK
60}
61#else
9f90b997 62static inline int notify_page_fault(struct pt_regs *regs)
4f9e87c0 63{
9f90b997 64 return 0;
4f9e87c0
AK
65}
66#endif
67
14cf11af
PM
68/*
69 * Check whether the instruction at regs->nip is a store using
70 * an update addressing form which will update r1.
71 */
72static int store_updates_sp(struct pt_regs *regs)
73{
74 unsigned int inst;
75
76 if (get_user(inst, (unsigned int __user *)regs->nip))
77 return 0;
78 /* check for 1 in the rA field */
79 if (((inst >> 16) & 0x1f) != 1)
80 return 0;
81 /* check major opcode */
82 switch (inst >> 26) {
83 case 37: /* stwu */
84 case 39: /* stbu */
85 case 45: /* sthu */
86 case 53: /* stfsu */
87 case 55: /* stfdu */
88 return 1;
89 case 62: /* std or stdu */
90 return (inst & 3) == 1;
91 case 31:
92 /* check minor opcode */
93 switch ((inst >> 1) & 0x3ff) {
94 case 181: /* stdux */
95 case 183: /* stwux */
96 case 247: /* stbux */
97 case 439: /* sthux */
98 case 695: /* stfsux */
99 case 759: /* stfdux */
100 return 1;
101 }
102 }
103 return 0;
104}
105
14cf11af
PM
106/*
107 * For 600- and 800-family processors, the error_code parameter is DSISR
108 * for a data fault, SRR1 for an instruction fault. For 400-family processors
109 * the error_code parameter is ESR for a data fault, 0 for an instruction
110 * fault.
111 * For 64-bit processors, the error_code parameter is
112 * - DSISR for a non-SLB data access fault,
113 * - SRR1 & 0x08000000 for a non-SLB instruction access fault
114 * - 0 any SLB fault.
115 *
116 * The return value is 0 if the fault was handled, or the signal
117 * number if this is a kernel fault that can't be handled here.
118 */
119int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
120 unsigned long error_code)
121{
122 struct vm_area_struct * vma;
123 struct mm_struct *mm = current->mm;
124 siginfo_t info;
125 int code = SEGV_MAPERR;
83c54070 126 int is_write = 0, ret;
14cf11af
PM
127 int trap = TRAP(regs);
128 int is_exec = trap == 0x400;
129
130#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
131 /*
132 * Fortunately the bit assignments in SRR1 for an instruction
133 * fault and DSISR for a data fault are mostly the same for the
134 * bits we are interested in. But there are some bits which
135 * indicate errors in DSISR but can validly be set in SRR1.
136 */
137 if (trap == 0x400)
138 error_code &= 0x48200000;
139 else
140 is_write = error_code & DSISR_ISSTORE;
141#else
142 is_write = error_code & ESR_DST;
143#endif /* CONFIG_4xx || CONFIG_BOOKE */
144
9f90b997 145 if (notify_page_fault(regs))
14cf11af
PM
146 return 0;
147
c3b75bd7
MN
148 if (unlikely(debugger_fault_handler(regs)))
149 return 0;
14cf11af
PM
150
151 /* On a kernel SLB miss we can only check for a valid exception entry */
152 if (!user_mode(regs) && (address >= TASK_SIZE))
153 return SIGSEGV;
154
9c7cc234
P
155#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
156 defined(CONFIG_PPC_BOOK3S_64))
14cf11af
PM
157 if (error_code & DSISR_DABRMATCH) {
158 /* DABR match */
bce6c5fd 159 do_dabr(regs, address, error_code);
14cf11af
PM
160 return 0;
161 }
9c7cc234 162#endif
14cf11af
PM
163
164 if (in_atomic() || mm == NULL) {
165 if (!user_mode(regs))
166 return SIGSEGV;
167 /* in_atomic() in user mode is really bad,
168 as is current->mm == NULL. */
df3c9019 169 printk(KERN_EMERG "Page fault in user mode with "
14cf11af
PM
170 "in_atomic() = %d mm = %p\n", in_atomic(), mm);
171 printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
172 regs->nip, regs->msr);
173 die("Weird page fault", regs, SIGSEGV);
174 }
175
cdd6c482 176 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
7dd1fcc2 177
14cf11af
PM
178 /* When running in the kernel we expect faults to occur only to
179 * addresses in user space. All other faults represent errors in the
fc5266ea
AB
180 * kernel and should generate an OOPS. Unfortunately, in the case of an
181 * erroneous fault occurring in a code path which already holds mmap_sem
14cf11af
PM
182 * we will deadlock attempting to validate the fault against the
183 * address space. Luckily the kernel only validly references user
184 * space from well defined areas of code, which are listed in the
185 * exceptions table.
186 *
187 * As the vast majority of faults will be valid we will only perform
fc5266ea 188 * the source reference check when there is a possibility of a deadlock.
14cf11af
PM
189 * Attempt to lock the address space, if we cannot we then validate the
190 * source. If this is invalid we can skip the address space check,
191 * thus avoiding the deadlock.
192 */
193 if (!down_read_trylock(&mm->mmap_sem)) {
194 if (!user_mode(regs) && !search_exception_tables(regs->nip))
195 goto bad_area_nosemaphore;
196
197 down_read(&mm->mmap_sem);
198 }
199
200 vma = find_vma(mm, address);
201 if (!vma)
202 goto bad_area;
203 if (vma->vm_start <= address)
204 goto good_area;
205 if (!(vma->vm_flags & VM_GROWSDOWN))
206 goto bad_area;
207
208 /*
209 * N.B. The POWER/Open ABI allows programs to access up to
210 * 288 bytes below the stack pointer.
211 * The kernel signal delivery code writes up to about 1.5kB
212 * below the stack pointer (r1) before decrementing it.
213 * The exec code can write slightly over 640kB to the stack
214 * before setting the user r1. Thus we allow the stack to
215 * expand to 1MB without further checks.
216 */
217 if (address + 0x100000 < vma->vm_end) {
218 /* get user regs even if this fault is in kernel mode */
219 struct pt_regs *uregs = current->thread.regs;
220 if (uregs == NULL)
221 goto bad_area;
222
223 /*
224 * A user-mode access to an address a long way below
225 * the stack pointer is only valid if the instruction
226 * is one which would update the stack pointer to the
227 * address accessed if the instruction completed,
228 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
229 * (or the byte, halfword, float or double forms).
230 *
231 * If we don't check this then any write to the area
232 * between the last mapped region and the stack will
233 * expand the stack rather than segfaulting.
234 */
235 if (address + 2048 < uregs->gpr[1]
236 && (!user_mode(regs) || !store_updates_sp(regs)))
237 goto bad_area;
238 }
239 if (expand_stack(vma, address))
240 goto bad_area;
241
242good_area:
243 code = SEGV_ACCERR;
244#if defined(CONFIG_6xx)
245 if (error_code & 0x95700000)
246 /* an error such as lwarx to I/O controller space,
247 address matching DABR, eciwx, etc. */
248 goto bad_area;
249#endif /* CONFIG_6xx */
250#if defined(CONFIG_8xx)
5efab4a0
JT
251 /* 8xx sometimes need to load a invalid/non-present TLBs.
252 * These must be invalidated separately as linux mm don't.
253 */
254 if (error_code & 0x40000000) /* no translation? */
255 _tlbil_va(address, 0, 0, 0);
256
14cf11af
PM
257 /* The MPC8xx seems to always set 0x80000000, which is
258 * "undefined". Of those that can be set, this is the only
259 * one which seems bad.
260 */
261 if (error_code & 0x10000000)
262 /* Guarded storage error. */
263 goto bad_area;
264#endif /* CONFIG_8xx */
265
266 if (is_exec) {
8d30c14c
BH
267#ifdef CONFIG_PPC_STD_MMU
268 /* Protection fault on exec go straight to failure on
269 * Hash based MMUs as they either don't support per-page
270 * execute permission, or if they do, it's handled already
271 * at the hash level. This test would probably have to
272 * be removed if we change the way this works to make hash
273 * processors use the same I/D cache coherency mechanism
274 * as embedded.
275 */
14cf11af
PM
276 if (error_code & DSISR_PROTFAULT)
277 goto bad_area;
8d30c14c
BH
278#endif /* CONFIG_PPC_STD_MMU */
279
08ae6cc1
PM
280 /*
281 * Allow execution from readable areas if the MMU does not
282 * provide separate controls over reading and executing.
8d30c14c
BH
283 *
284 * Note: That code used to not be enabled for 4xx/BookE.
285 * It is now as I/D cache coherency for these is done at
286 * set_pte_at() time and I see no reason why the test
287 * below wouldn't be valid on those processors. This -may-
288 * break programs compiled with a really old ABI though.
08ae6cc1
PM
289 */
290 if (!(vma->vm_flags & VM_EXEC) &&
291 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
292 !(vma->vm_flags & (VM_READ | VM_WRITE))))
14cf11af 293 goto bad_area;
14cf11af
PM
294 /* a write */
295 } else if (is_write) {
296 if (!(vma->vm_flags & VM_WRITE))
297 goto bad_area;
298 /* a read */
299 } else {
300 /* protection fault */
301 if (error_code & 0x08000000)
302 goto bad_area;
df67b3da 303 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
14cf11af
PM
304 goto bad_area;
305 }
306
307 /*
308 * If for any reason at all we couldn't handle the fault,
309 * make sure we exit gracefully rather than endlessly redo
310 * the fault.
311 */
d06063cc 312 ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
83c54070
NP
313 if (unlikely(ret & VM_FAULT_ERROR)) {
314 if (ret & VM_FAULT_OOM)
315 goto out_of_memory;
316 else if (ret & VM_FAULT_SIGBUS)
317 goto do_sigbus;
14cf11af
PM
318 BUG();
319 }
40900194 320 if (ret & VM_FAULT_MAJOR) {
83c54070 321 current->maj_flt++;
cdd6c482 322 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
78f13e95 323 regs, address);
40900194
BK
324#ifdef CONFIG_PPC_SMLPAR
325 if (firmware_has_feature(FW_FEATURE_CMO)) {
326 preempt_disable();
a6326e98 327 get_lppaca()->page_ins += (1 << PAGE_FACTOR);
40900194
BK
328 preempt_enable();
329 }
330#endif
ac17dc8e 331 } else {
83c54070 332 current->min_flt++;
cdd6c482 333 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
78f13e95 334 regs, address);
ac17dc8e 335 }
14cf11af
PM
336 up_read(&mm->mmap_sem);
337 return 0;
338
339bad_area:
340 up_read(&mm->mmap_sem);
341
342bad_area_nosemaphore:
343 /* User mode accesses cause a SIGSEGV */
344 if (user_mode(regs)) {
345 _exception(SIGSEGV, regs, code, address);
346 return 0;
347 }
348
349 if (is_exec && (error_code & DSISR_PROTFAULT)
350 && printk_ratelimit())
351 printk(KERN_CRIT "kernel tried to execute NX-protected"
352 " page (%lx) - exploit attempt? (uid: %d)\n",
1330deb0 353 address, current_uid());
14cf11af
PM
354
355 return SIGSEGV;
356
357/*
358 * We ran out of memory, or some other thing happened to us that made
359 * us unable to handle the page fault gracefully.
360 */
361out_of_memory:
362 up_read(&mm->mmap_sem);
e460c2c9
BH
363 if (!user_mode(regs))
364 return SIGKILL;
365 pagefault_out_of_memory();
366 return 0;
14cf11af
PM
367
368do_sigbus:
369 up_read(&mm->mmap_sem);
370 if (user_mode(regs)) {
371 info.si_signo = SIGBUS;
372 info.si_errno = 0;
373 info.si_code = BUS_ADRERR;
374 info.si_addr = (void __user *)address;
375 force_sig_info(SIGBUS, &info, current);
376 return 0;
377 }
378 return SIGBUS;
379}
380
381/*
382 * bad_page_fault is called when we have a bad access from the kernel.
383 * It is called from the DSI and ISI handlers in head.S and from some
384 * of the procedures in traps.c.
385 */
386void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
387{
388 const struct exception_table_entry *entry;
28b54990 389 unsigned long *stackend;
14cf11af
PM
390
391 /* Are we prepared to handle this fault? */
392 if ((entry = search_exception_tables(regs->nip)) != NULL) {
393 regs->nip = entry->fixup;
394 return;
395 }
396
397 /* kernel has accessed a bad area */
723925b7 398
723925b7 399 switch (regs->trap) {
a416dd8d
ME
400 case 0x300:
401 case 0x380:
402 printk(KERN_ALERT "Unable to handle kernel paging request for "
403 "data at address 0x%08lx\n", regs->dar);
404 break;
405 case 0x400:
406 case 0x480:
407 printk(KERN_ALERT "Unable to handle kernel paging request for "
408 "instruction fetch\n");
409 break;
410 default:
411 printk(KERN_ALERT "Unable to handle kernel paging request for "
412 "unknown fault\n");
413 break;
723925b7
OJ
414 }
415 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
416 regs->nip);
417
28b54990
AB
418 stackend = end_of_stack(current);
419 if (current != &init_task && *stackend != STACK_END_MAGIC)
420 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
421
14cf11af
PM
422 die("Kernel access of bad area", regs, sig);
423}