]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/nds32/mm/fault.c
signal: Ensure every siginfo we send has all bits initialized
[mirror_ubuntu-hirsute-kernel.git] / arch / nds32 / mm / fault.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3
4 #include <linux/extable.h>
5 #include <linux/module.h>
6 #include <linux/signal.h>
7 #include <linux/ptrace.h>
8 #include <linux/mm.h>
9 #include <linux/init.h>
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12
13 #include <asm/pgtable.h>
14 #include <asm/tlbflush.h>
15
16 extern void die(const char *str, struct pt_regs *regs, long err);
17
18 /*
19 * This is useful to dump out the page tables associated with
20 * 'addr' in mm 'mm'.
21 */
22 void show_pte(struct mm_struct *mm, unsigned long addr)
23 {
24 pgd_t *pgd;
25 if (!mm)
26 mm = &init_mm;
27
28 pr_alert("pgd = %p\n", mm->pgd);
29 pgd = pgd_offset(mm, addr);
30 pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
31
32 do {
33 pmd_t *pmd;
34
35 if (pgd_none(*pgd))
36 break;
37
38 if (pgd_bad(*pgd)) {
39 pr_alert("(bad)");
40 break;
41 }
42
43 pmd = pmd_offset(pgd, addr);
44 #if PTRS_PER_PMD != 1
45 pr_alert(", *pmd=%08lx", pmd_val(*pmd));
46 #endif
47
48 if (pmd_none(*pmd))
49 break;
50
51 if (pmd_bad(*pmd)) {
52 pr_alert("(bad)");
53 break;
54 }
55
56 if (IS_ENABLED(CONFIG_HIGHMEM))
57 {
58 pte_t *pte;
59 /* We must not map this if we have highmem enabled */
60 pte = pte_offset_map(pmd, addr);
61 pr_alert(", *pte=%08lx", pte_val(*pte));
62 pte_unmap(pte);
63 }
64 } while (0);
65
66 pr_alert("\n");
67 }
68
69 void do_page_fault(unsigned long entry, unsigned long addr,
70 unsigned int error_code, struct pt_regs *regs)
71 {
72 struct task_struct *tsk;
73 struct mm_struct *mm;
74 struct vm_area_struct *vma;
75 siginfo_t info;
76 int fault;
77 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
78 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
79
80 clear_siginfo(&info);
81 error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
82 tsk = current;
83 mm = tsk->mm;
84 info.si_code = SEGV_MAPERR;
85 /*
86 * We fault-in kernel-space virtual memory on-demand. The
87 * 'reference' page table is init_mm.pgd.
88 *
89 * NOTE! We MUST NOT take any locks for this case. We may
90 * be in an interrupt or a critical region, and should
91 * only copy the information from the master page table,
92 * nothing more.
93 */
94 if (addr >= TASK_SIZE) {
95 if (user_mode(regs))
96 goto bad_area_nosemaphore;
97
98 if (addr >= TASK_SIZE && addr < VMALLOC_END
99 && (entry == ENTRY_PTE_NOT_PRESENT))
100 goto vmalloc_fault;
101 else
102 goto no_context;
103 }
104
105 /* Send a signal to the task for handling the unalignment access. */
106 if (entry == ENTRY_GENERAL_EXCPETION
107 && error_code == ETYPE_ALIGNMENT_CHECK) {
108 if (user_mode(regs))
109 goto bad_area_nosemaphore;
110 else
111 goto no_context;
112 }
113
114 /*
115 * If we're in an interrupt or have no user
116 * context, we must not take the fault..
117 */
118 if (unlikely(faulthandler_disabled() || !mm))
119 goto no_context;
120
121 /*
122 * As per x86, we may deadlock here. However, since the kernel only
123 * validly references user space from well defined areas of the code,
124 * we can bug out early if this is from code which shouldn't.
125 */
126 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
127 if (!user_mode(regs) &&
128 !search_exception_tables(instruction_pointer(regs)))
129 goto no_context;
130 retry:
131 down_read(&mm->mmap_sem);
132 } else {
133 /*
134 * The above down_read_trylock() might have succeeded in which
135 * case, we'll have missed the might_sleep() from down_read().
136 */
137 might_sleep();
138 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
139 if (!user_mode(regs) &&
140 !search_exception_tables(instruction_pointer(regs)))
141 goto no_context;
142 }
143 }
144
145 vma = find_vma(mm, addr);
146
147 if (unlikely(!vma))
148 goto bad_area;
149
150 if (vma->vm_start <= addr)
151 goto good_area;
152
153 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
154 goto bad_area;
155
156 if (unlikely(expand_stack(vma, addr)))
157 goto bad_area;
158
159 /*
160 * Ok, we have a good vm_area for this memory access, so
161 * we can handle it..
162 */
163
164 good_area:
165 info.si_code = SEGV_ACCERR;
166
167 /* first do some preliminary protection checks */
168 if (entry == ENTRY_PTE_NOT_PRESENT) {
169 if (error_code & ITYPE_mskINST)
170 mask = VM_EXEC;
171 else {
172 mask = VM_READ | VM_WRITE;
173 if (vma->vm_flags & VM_WRITE)
174 flags |= FAULT_FLAG_WRITE;
175 }
176 } else if (entry == ENTRY_TLB_MISC) {
177 switch (error_code & ITYPE_mskETYPE) {
178 case RD_PROT:
179 mask = VM_READ;
180 break;
181 case WRT_PROT:
182 mask = VM_WRITE;
183 flags |= FAULT_FLAG_WRITE;
184 break;
185 case NOEXEC:
186 mask = VM_EXEC;
187 break;
188 case PAGE_MODIFY:
189 mask = VM_WRITE;
190 flags |= FAULT_FLAG_WRITE;
191 break;
192 case ACC_BIT:
193 BUG();
194 default:
195 break;
196 }
197
198 }
199 if (!(vma->vm_flags & mask))
200 goto bad_area;
201
202 /*
203 * If for any reason at all we couldn't handle the fault,
204 * make sure we exit gracefully rather than endlessly redo
205 * the fault.
206 */
207
208 fault = handle_mm_fault(vma, addr, flags);
209
210 /*
211 * If we need to retry but a fatal signal is pending, handle the
212 * signal first. We do not need to release the mmap_sem because it
213 * would already be released in __lock_page_or_retry in mm/filemap.c.
214 */
215 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
216 if (!user_mode(regs))
217 goto no_context;
218 return;
219 }
220
221 if (unlikely(fault & VM_FAULT_ERROR)) {
222 if (fault & VM_FAULT_OOM)
223 goto out_of_memory;
224 else if (fault & VM_FAULT_SIGBUS)
225 goto do_sigbus;
226 else
227 goto bad_area;
228 }
229
230 /*
231 * Major/minor page fault accounting is only done on the initial
232 * attempt. If we go through a retry, it is extremely likely that the
233 * page will be found in page cache at that point.
234 */
235 if (flags & FAULT_FLAG_ALLOW_RETRY) {
236 if (fault & VM_FAULT_MAJOR)
237 tsk->maj_flt++;
238 else
239 tsk->min_flt++;
240 if (fault & VM_FAULT_RETRY) {
241 flags &= ~FAULT_FLAG_ALLOW_RETRY;
242 flags |= FAULT_FLAG_TRIED;
243
244 /* No need to up_read(&mm->mmap_sem) as we would
245 * have already released it in __lock_page_or_retry
246 * in mm/filemap.c.
247 */
248 goto retry;
249 }
250 }
251
252 up_read(&mm->mmap_sem);
253 return;
254
255 /*
256 * Something tried to access memory that isn't in our memory map..
257 * Fix it, but check if it's kernel or user first..
258 */
259 bad_area:
260 up_read(&mm->mmap_sem);
261
262 bad_area_nosemaphore:
263
264 /* User mode accesses just cause a SIGSEGV */
265
266 if (user_mode(regs)) {
267 tsk->thread.address = addr;
268 tsk->thread.error_code = error_code;
269 tsk->thread.trap_no = entry;
270 info.si_signo = SIGSEGV;
271 info.si_errno = 0;
272 /* info.si_code has been set above */
273 info.si_addr = (void *)addr;
274 force_sig_info(SIGSEGV, &info, tsk);
275 return;
276 }
277
278 no_context:
279
280 /* Are we prepared to handle this kernel fault?
281 *
282 * (The kernel has valid exception-points in the source
283 * when it acesses user-memory. When it fails in one
284 * of those points, we find it in a table and do a jump
285 * to some fixup code that loads an appropriate error
286 * code)
287 */
288
289 {
290 const struct exception_table_entry *entry;
291
292 if ((entry =
293 search_exception_tables(instruction_pointer(regs))) !=
294 NULL) {
295 /* Adjust the instruction pointer in the stackframe */
296 instruction_pointer(regs) = entry->fixup;
297 return;
298 }
299 }
300
301 /*
302 * Oops. The kernel tried to access some bad page. We'll have to
303 * terminate things with extreme prejudice.
304 */
305
306 bust_spinlocks(1);
307 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
308 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
309 "paging request", addr);
310
311 show_pte(mm, addr);
312 die("Oops", regs, error_code);
313 bust_spinlocks(0);
314 do_exit(SIGKILL);
315
316 return;
317
318 /*
319 * We ran out of memory, or some other thing happened to us that made
320 * us unable to handle the page fault gracefully.
321 */
322
323 out_of_memory:
324 up_read(&mm->mmap_sem);
325 if (!user_mode(regs))
326 goto no_context;
327 pagefault_out_of_memory();
328 return;
329
330 do_sigbus:
331 up_read(&mm->mmap_sem);
332
333 /* Kernel mode? Handle exceptions or die */
334 if (!user_mode(regs))
335 goto no_context;
336
337 /*
338 * Send a sigbus
339 */
340 tsk->thread.address = addr;
341 tsk->thread.error_code = error_code;
342 tsk->thread.trap_no = entry;
343 info.si_signo = SIGBUS;
344 info.si_errno = 0;
345 info.si_code = BUS_ADRERR;
346 info.si_addr = (void *)addr;
347 force_sig_info(SIGBUS, &info, tsk);
348
349 return;
350
351 vmalloc_fault:
352 {
353 /*
354 * Synchronize this task's top level page-table
355 * with the 'reference' page table.
356 *
357 * Use current_pgd instead of tsk->active_mm->pgd
358 * since the latter might be unavailable if this
359 * code is executed in a misfortunately run irq
360 * (like inside schedule() between switch_mm and
361 * switch_to...).
362 */
363
364 unsigned int index = pgd_index(addr);
365 pgd_t *pgd, *pgd_k;
366 pud_t *pud, *pud_k;
367 pmd_t *pmd, *pmd_k;
368 pte_t *pte_k;
369
370 pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
371 pgd_k = init_mm.pgd + index;
372
373 if (!pgd_present(*pgd_k))
374 goto no_context;
375
376 pud = pud_offset(pgd, addr);
377 pud_k = pud_offset(pgd_k, addr);
378 if (!pud_present(*pud_k))
379 goto no_context;
380
381 pmd = pmd_offset(pud, addr);
382 pmd_k = pmd_offset(pud_k, addr);
383 if (!pmd_present(*pmd_k))
384 goto no_context;
385
386 if (!pmd_present(*pmd))
387 set_pmd(pmd, *pmd_k);
388 else
389 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
390
391 /*
392 * Since the vmalloc area is global, we don't
393 * need to copy individual PTE's, it is enough to
394 * copy the pgd pointer into the pte page of the
395 * root task. If that is there, we'll find our pte if
396 * it exists.
397 */
398
399 /* Make sure the actual PTE exists as well to
400 * catch kernel vmalloc-area accesses to non-mapped
401 * addres. If we don't do this, this will just
402 * silently loop forever.
403 */
404
405 pte_k = pte_offset_kernel(pmd_k, addr);
406 if (!pte_present(*pte_k))
407 goto no_context;
408
409 return;
410 }
411 }