]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/sh/mm/fault.c
mmap locking API: convert mmap_sem API comments
[mirror_ubuntu-jammy-kernel.git] / arch / sh / mm / fault.c
1 /*
2 * Page fault handler for SH with an MMU.
3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2012 Paul Mundt
6 *
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
19 #include <linux/perf_event.h>
20 #include <linux/kdebug.h>
21 #include <linux/uaccess.h>
22 #include <asm/io_trapped.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/traps.h>
26
27 static void
28 force_sig_info_fault(int si_signo, int si_code, unsigned long address)
29 {
30 force_sig_fault(si_signo, si_code, (void __user *)address);
31 }
32
33 /*
34 * This is useful to dump out the page tables associated with
35 * 'addr' in mm 'mm'.
36 */
37 static void show_pte(struct mm_struct *mm, unsigned long addr)
38 {
39 pgd_t *pgd;
40
41 if (mm) {
42 pgd = mm->pgd;
43 } else {
44 pgd = get_TTB();
45
46 if (unlikely(!pgd))
47 pgd = swapper_pg_dir;
48 }
49
50 pr_alert("pgd = %p\n", pgd);
51 pgd += pgd_index(addr);
52 pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2),
53 (u64)pgd_val(*pgd));
54
55 do {
56 p4d_t *p4d;
57 pud_t *pud;
58 pmd_t *pmd;
59 pte_t *pte;
60
61 if (pgd_none(*pgd))
62 break;
63
64 if (pgd_bad(*pgd)) {
65 pr_cont("(bad)");
66 break;
67 }
68
69 p4d = p4d_offset(pgd, addr);
70 if (PTRS_PER_P4D != 1)
71 pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),
72 (u64)p4d_val(*p4d));
73
74 if (p4d_none(*p4d))
75 break;
76
77 if (p4d_bad(*p4d)) {
78 pr_cont("(bad)");
79 break;
80 }
81
82 pud = pud_offset(p4d, addr);
83 if (PTRS_PER_PUD != 1)
84 pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),
85 (u64)pud_val(*pud));
86
87 if (pud_none(*pud))
88 break;
89
90 if (pud_bad(*pud)) {
91 pr_cont("(bad)");
92 break;
93 }
94
95 pmd = pmd_offset(pud, addr);
96 if (PTRS_PER_PMD != 1)
97 pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),
98 (u64)pmd_val(*pmd));
99
100 if (pmd_none(*pmd))
101 break;
102
103 if (pmd_bad(*pmd)) {
104 pr_cont("(bad)");
105 break;
106 }
107
108 /* We must not map this if we have highmem enabled */
109 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
110 break;
111
112 pte = pte_offset_kernel(pmd, addr);
113 pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2),
114 (u64)pte_val(*pte));
115 } while (0);
116
117 pr_cont("\n");
118 }
119
120 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
121 {
122 unsigned index = pgd_index(address);
123 pgd_t *pgd_k;
124 p4d_t *p4d, *p4d_k;
125 pud_t *pud, *pud_k;
126 pmd_t *pmd, *pmd_k;
127
128 pgd += index;
129 pgd_k = init_mm.pgd + index;
130
131 if (!pgd_present(*pgd_k))
132 return NULL;
133
134 p4d = p4d_offset(pgd, address);
135 p4d_k = p4d_offset(pgd_k, address);
136 if (!p4d_present(*p4d_k))
137 return NULL;
138
139 pud = pud_offset(p4d, address);
140 pud_k = pud_offset(p4d_k, address);
141 if (!pud_present(*pud_k))
142 return NULL;
143
144 if (!pud_present(*pud))
145 set_pud(pud, *pud_k);
146
147 pmd = pmd_offset(pud, address);
148 pmd_k = pmd_offset(pud_k, address);
149 if (!pmd_present(*pmd_k))
150 return NULL;
151
152 if (!pmd_present(*pmd))
153 set_pmd(pmd, *pmd_k);
154 else {
155 /*
156 * The page tables are fully synchronised so there must
157 * be another reason for the fault. Return NULL here to
158 * signal that we have not taken care of the fault.
159 */
160 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
161 return NULL;
162 }
163
164 return pmd_k;
165 }
166
167 #ifdef CONFIG_SH_STORE_QUEUES
168 #define __FAULT_ADDR_LIMIT P3_ADDR_MAX
169 #else
170 #define __FAULT_ADDR_LIMIT VMALLOC_END
171 #endif
172
173 /*
174 * Handle a fault on the vmalloc or module mapping area
175 */
176 static noinline int vmalloc_fault(unsigned long address)
177 {
178 pgd_t *pgd_k;
179 pmd_t *pmd_k;
180 pte_t *pte_k;
181
182 /* Make sure we are in vmalloc/module/P3 area: */
183 if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
184 return -1;
185
186 /*
187 * Synchronize this task's top level page-table
188 * with the 'reference' page table.
189 *
190 * Do _not_ use "current" here. We might be inside
191 * an interrupt in the middle of a task switch..
192 */
193 pgd_k = get_TTB();
194 pmd_k = vmalloc_sync_one(pgd_k, address);
195 if (!pmd_k)
196 return -1;
197
198 pte_k = pte_offset_kernel(pmd_k, address);
199 if (!pte_present(*pte_k))
200 return -1;
201
202 return 0;
203 }
204
205 static void
206 show_fault_oops(struct pt_regs *regs, unsigned long address)
207 {
208 if (!oops_may_print())
209 return;
210
211 printk(KERN_ALERT "PC:");
212 pr_alert("BUG: unable to handle kernel %s at %08lx\n",
213 address < PAGE_SIZE ? "NULL pointer dereference"
214 : "paging request",
215 address);
216 pr_alert("PC:");
217 printk_address(regs->pc, 1, KERN_ALERT);
218
219 show_pte(NULL, address);
220 }
221
222 static noinline void
223 no_context(struct pt_regs *regs, unsigned long error_code,
224 unsigned long address)
225 {
226 /* Are we prepared to handle this kernel fault? */
227 if (fixup_exception(regs))
228 return;
229
230 if (handle_trapped_io(regs, address))
231 return;
232
233 /*
234 * Oops. The kernel tried to access some bad page. We'll have to
235 * terminate things with extreme prejudice.
236 */
237 bust_spinlocks(1);
238
239 show_fault_oops(regs, address);
240
241 die("Oops", regs, error_code);
242 bust_spinlocks(0);
243 do_exit(SIGKILL);
244 }
245
246 static void
247 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
248 unsigned long address, int si_code)
249 {
250 /* User mode accesses just cause a SIGSEGV */
251 if (user_mode(regs)) {
252 /*
253 * It's possible to have interrupts off here:
254 */
255 local_irq_enable();
256
257 force_sig_info_fault(SIGSEGV, si_code, address);
258
259 return;
260 }
261
262 no_context(regs, error_code, address);
263 }
264
265 static noinline void
266 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
267 unsigned long address)
268 {
269 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
270 }
271
272 static void
273 __bad_area(struct pt_regs *regs, unsigned long error_code,
274 unsigned long address, int si_code)
275 {
276 struct mm_struct *mm = current->mm;
277
278 /*
279 * Something tried to access memory that isn't in our memory map..
280 * Fix it, but check if it's kernel or user first..
281 */
282 mmap_read_unlock(mm);
283
284 __bad_area_nosemaphore(regs, error_code, address, si_code);
285 }
286
287 static noinline void
288 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
289 {
290 __bad_area(regs, error_code, address, SEGV_MAPERR);
291 }
292
293 static noinline void
294 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
295 unsigned long address)
296 {
297 __bad_area(regs, error_code, address, SEGV_ACCERR);
298 }
299
300 static void
301 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
302 {
303 struct task_struct *tsk = current;
304 struct mm_struct *mm = tsk->mm;
305
306 mmap_read_unlock(mm);
307
308 /* Kernel mode? Handle exceptions or die: */
309 if (!user_mode(regs))
310 no_context(regs, error_code, address);
311
312 force_sig_info_fault(SIGBUS, BUS_ADRERR, address);
313 }
314
315 static noinline int
316 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
317 unsigned long address, vm_fault_t fault)
318 {
319 /*
320 * Pagefault was interrupted by SIGKILL. We have no reason to
321 * continue pagefault.
322 */
323 if (fault_signal_pending(fault, regs)) {
324 if (!user_mode(regs))
325 no_context(regs, error_code, address);
326 return 1;
327 }
328
329 /* Release mmap_sem first if necessary */
330 if (!(fault & VM_FAULT_RETRY))
331 mmap_read_unlock(current->mm);
332
333 if (!(fault & VM_FAULT_ERROR))
334 return 0;
335
336 if (fault & VM_FAULT_OOM) {
337 /* Kernel mode? Handle exceptions or die: */
338 if (!user_mode(regs)) {
339 no_context(regs, error_code, address);
340 return 1;
341 }
342
343 /*
344 * We ran out of memory, call the OOM killer, and return the
345 * userspace (which will retry the fault, or kill us if we got
346 * oom-killed):
347 */
348 pagefault_out_of_memory();
349 } else {
350 if (fault & VM_FAULT_SIGBUS)
351 do_sigbus(regs, error_code, address);
352 else if (fault & VM_FAULT_SIGSEGV)
353 bad_area(regs, error_code, address);
354 else
355 BUG();
356 }
357
358 return 1;
359 }
360
361 static inline int access_error(int error_code, struct vm_area_struct *vma)
362 {
363 if (error_code & FAULT_CODE_WRITE) {
364 /* write, present and write, not present: */
365 if (unlikely(!(vma->vm_flags & VM_WRITE)))
366 return 1;
367 return 0;
368 }
369
370 /* ITLB miss on NX page */
371 if (unlikely((error_code & FAULT_CODE_ITLB) &&
372 !(vma->vm_flags & VM_EXEC)))
373 return 1;
374
375 /* read, not present: */
376 if (unlikely(!vma_is_accessible(vma)))
377 return 1;
378
379 return 0;
380 }
381
382 static int fault_in_kernel_space(unsigned long address)
383 {
384 return address >= TASK_SIZE;
385 }
386
387 /*
388 * This routine handles page faults. It determines the address,
389 * and the problem, and then passes it off to one of the appropriate
390 * routines.
391 */
392 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
393 unsigned long error_code,
394 unsigned long address)
395 {
396 unsigned long vec;
397 struct task_struct *tsk;
398 struct mm_struct *mm;
399 struct vm_area_struct * vma;
400 vm_fault_t fault;
401 unsigned int flags = FAULT_FLAG_DEFAULT;
402
403 tsk = current;
404 mm = tsk->mm;
405 vec = lookup_exception_vector();
406
407 /*
408 * We fault-in kernel-space virtual memory on-demand. The
409 * 'reference' page table is init_mm.pgd.
410 *
411 * NOTE! We MUST NOT take any locks for this case. We may
412 * be in an interrupt or a critical region, and should
413 * only copy the information from the master page table,
414 * nothing more.
415 */
416 if (unlikely(fault_in_kernel_space(address))) {
417 if (vmalloc_fault(address) >= 0)
418 return;
419 if (kprobe_page_fault(regs, vec))
420 return;
421
422 bad_area_nosemaphore(regs, error_code, address);
423 return;
424 }
425
426 if (unlikely(kprobe_page_fault(regs, vec)))
427 return;
428
429 /* Only enable interrupts if they were on before the fault */
430 if ((regs->sr & SR_IMASK) != SR_IMASK)
431 local_irq_enable();
432
433 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
434
435 /*
436 * If we're in an interrupt, have no user context or are running
437 * with pagefaults disabled then we must not take the fault:
438 */
439 if (unlikely(faulthandler_disabled() || !mm)) {
440 bad_area_nosemaphore(regs, error_code, address);
441 return;
442 }
443
444 retry:
445 mmap_read_lock(mm);
446
447 vma = find_vma(mm, address);
448 if (unlikely(!vma)) {
449 bad_area(regs, error_code, address);
450 return;
451 }
452 if (likely(vma->vm_start <= address))
453 goto good_area;
454 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
455 bad_area(regs, error_code, address);
456 return;
457 }
458 if (unlikely(expand_stack(vma, address))) {
459 bad_area(regs, error_code, address);
460 return;
461 }
462
463 /*
464 * Ok, we have a good vm_area for this memory access, so
465 * we can handle it..
466 */
467 good_area:
468 if (unlikely(access_error(error_code, vma))) {
469 bad_area_access_error(regs, error_code, address);
470 return;
471 }
472
473 set_thread_fault_code(error_code);
474
475 if (user_mode(regs))
476 flags |= FAULT_FLAG_USER;
477 if (error_code & FAULT_CODE_WRITE)
478 flags |= FAULT_FLAG_WRITE;
479
480 /*
481 * If for any reason at all we couldn't handle the fault,
482 * make sure we exit gracefully rather than endlessly redo
483 * the fault.
484 */
485 fault = handle_mm_fault(vma, address, flags);
486
487 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
488 if (mm_fault_error(regs, error_code, address, fault))
489 return;
490
491 if (flags & FAULT_FLAG_ALLOW_RETRY) {
492 if (fault & VM_FAULT_MAJOR) {
493 tsk->maj_flt++;
494 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
495 regs, address);
496 } else {
497 tsk->min_flt++;
498 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
499 regs, address);
500 }
501 if (fault & VM_FAULT_RETRY) {
502 flags |= FAULT_FLAG_TRIED;
503
504 /*
505 * No need to mmap_read_unlock(mm) as we would
506 * have already released it in __lock_page_or_retry
507 * in mm/filemap.c.
508 */
509 goto retry;
510 }
511 }
512
513 mmap_read_unlock(mm);
514 }