]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/proc/kcore.c
UBUNTU: [Config] arm64: snapdragon: SND*=m
[mirror_ubuntu-bionic-kernel.git] / fs / proc / kcore.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/proc/kcore.c kernel ELF core dumper
4 *
5 * Modelled on fs/exec.c:aout_core_dump()
6 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
7 * ELF version written by David Howells <David.Howells@nexor.co.uk>
8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
11 */
12
13 #include <linux/mm.h>
14 #include <linux/proc_fs.h>
15 #include <linux/kcore.h>
16 #include <linux/user.h>
17 #include <linux/capability.h>
18 #include <linux/elf.h>
19 #include <linux/elfcore.h>
20 #include <linux/notifier.h>
21 #include <linux/vmalloc.h>
22 #include <linux/highmem.h>
23 #include <linux/printk.h>
24 #include <linux/bootmem.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include <asm/io.h>
29 #include <linux/list.h>
30 #include <linux/ioport.h>
31 #include <linux/memory.h>
32 #include <linux/sched/task.h>
33 #include <asm/sections.h>
34 #include "internal.h"
35
36 #define CORE_STR "CORE"
37
38 #ifndef ELF_CORE_EFLAGS
39 #define ELF_CORE_EFLAGS 0
40 #endif
41
42 static struct proc_dir_entry *proc_root_kcore;
43
44
45 #ifndef kc_vaddr_to_offset
46 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
47 #endif
48 #ifndef kc_offset_to_vaddr
49 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
50 #endif
51
52 /* An ELF note in memory */
53 struct memelfnote
54 {
55 const char *name;
56 int type;
57 unsigned int datasz;
58 void *data;
59 };
60
61 static LIST_HEAD(kclist_head);
62 static DEFINE_RWLOCK(kclist_lock);
63 static int kcore_need_update = 1;
64
65 void
66 kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
67 {
68 new->addr = (unsigned long)addr;
69 new->size = size;
70 new->type = type;
71
72 write_lock(&kclist_lock);
73 list_add_tail(&new->list, &kclist_head);
74 write_unlock(&kclist_lock);
75 }
76
77 static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
78 {
79 size_t try, size;
80 struct kcore_list *m;
81
82 *nphdr = 1; /* PT_NOTE */
83 size = 0;
84
85 list_for_each_entry(m, &kclist_head, list) {
86 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
87 if (try > size)
88 size = try;
89 *nphdr = *nphdr + 1;
90 }
91 *elf_buflen = sizeof(struct elfhdr) +
92 (*nphdr + 2)*sizeof(struct elf_phdr) +
93 3 * ((sizeof(struct elf_note)) +
94 roundup(sizeof(CORE_STR), 4)) +
95 roundup(sizeof(struct elf_prstatus), 4) +
96 roundup(sizeof(struct elf_prpsinfo), 4) +
97 roundup(arch_task_struct_size, 4);
98 *elf_buflen = PAGE_ALIGN(*elf_buflen);
99 return size + *elf_buflen;
100 }
101
102 static void free_kclist_ents(struct list_head *head)
103 {
104 struct kcore_list *tmp, *pos;
105
106 list_for_each_entry_safe(pos, tmp, head, list) {
107 list_del(&pos->list);
108 kfree(pos);
109 }
110 }
111 /*
112 * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
113 */
114 static void __kcore_update_ram(struct list_head *list)
115 {
116 int nphdr;
117 size_t size;
118 struct kcore_list *tmp, *pos;
119 LIST_HEAD(garbage);
120
121 write_lock(&kclist_lock);
122 if (kcore_need_update) {
123 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
124 if (pos->type == KCORE_RAM
125 || pos->type == KCORE_VMEMMAP)
126 list_move(&pos->list, &garbage);
127 }
128 list_splice_tail(list, &kclist_head);
129 } else
130 list_splice(list, &garbage);
131 kcore_need_update = 0;
132 proc_root_kcore->size = get_kcore_size(&nphdr, &size);
133 write_unlock(&kclist_lock);
134
135 free_kclist_ents(&garbage);
136 }
137
138
139 #ifdef CONFIG_HIGHMEM
140 /*
141 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
142 * because memory hole is not as big as !HIGHMEM case.
143 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
144 */
145 static int kcore_update_ram(void)
146 {
147 LIST_HEAD(head);
148 struct kcore_list *ent;
149 int ret = 0;
150
151 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
152 if (!ent)
153 return -ENOMEM;
154 ent->addr = (unsigned long)__va(0);
155 ent->size = max_low_pfn << PAGE_SHIFT;
156 ent->type = KCORE_RAM;
157 list_add(&ent->list, &head);
158 __kcore_update_ram(&head);
159 return ret;
160 }
161
162 #else /* !CONFIG_HIGHMEM */
163
164 #ifdef CONFIG_SPARSEMEM_VMEMMAP
165 /* calculate vmemmap's address from given system ram pfn and register it */
166 static int
167 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
168 {
169 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
170 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
171 unsigned long start, end;
172 struct kcore_list *vmm, *tmp;
173
174
175 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
176 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
177 end = PAGE_ALIGN(end);
178 /* overlap check (because we have to align page */
179 list_for_each_entry(tmp, head, list) {
180 if (tmp->type != KCORE_VMEMMAP)
181 continue;
182 if (start < tmp->addr + tmp->size)
183 if (end > tmp->addr)
184 end = tmp->addr;
185 }
186 if (start < end) {
187 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
188 if (!vmm)
189 return 0;
190 vmm->addr = start;
191 vmm->size = end - start;
192 vmm->type = KCORE_VMEMMAP;
193 list_add_tail(&vmm->list, head);
194 }
195 return 1;
196
197 }
198 #else
199 static int
200 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
201 {
202 return 1;
203 }
204
205 #endif
206
207 static int
208 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
209 {
210 struct list_head *head = (struct list_head *)arg;
211 struct kcore_list *ent;
212 struct page *p;
213
214 if (!pfn_valid(pfn))
215 return 1;
216
217 p = pfn_to_page(pfn);
218 if (!memmap_valid_within(pfn, p, page_zone(p)))
219 return 1;
220
221 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
222 if (!ent)
223 return -ENOMEM;
224 ent->addr = (unsigned long)page_to_virt(p);
225 ent->size = nr_pages << PAGE_SHIFT;
226
227 if (!virt_addr_valid(ent->addr))
228 goto free_out;
229
230 /* cut not-mapped area. ....from ppc-32 code. */
231 if (ULONG_MAX - ent->addr < ent->size)
232 ent->size = ULONG_MAX - ent->addr;
233
234 /*
235 * We've already checked virt_addr_valid so we know this address
236 * is a valid pointer, therefore we can check against it to determine
237 * if we need to trim
238 */
239 if (VMALLOC_START > ent->addr) {
240 if (VMALLOC_START - ent->addr < ent->size)
241 ent->size = VMALLOC_START - ent->addr;
242 }
243
244 ent->type = KCORE_RAM;
245 list_add_tail(&ent->list, head);
246
247 if (!get_sparsemem_vmemmap_info(ent, head)) {
248 list_del(&ent->list);
249 goto free_out;
250 }
251
252 return 0;
253 free_out:
254 kfree(ent);
255 return 1;
256 }
257
258 static int kcore_update_ram(void)
259 {
260 int nid, ret;
261 unsigned long end_pfn;
262 LIST_HEAD(head);
263
264 /* Not inialized....update now */
265 /* find out "max pfn" */
266 end_pfn = 0;
267 for_each_node_state(nid, N_MEMORY) {
268 unsigned long node_end;
269 node_end = node_end_pfn(nid);
270 if (end_pfn < node_end)
271 end_pfn = node_end;
272 }
273 /* scan 0 to max_pfn */
274 ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
275 if (ret) {
276 free_kclist_ents(&head);
277 return -ENOMEM;
278 }
279 __kcore_update_ram(&head);
280 return ret;
281 }
282 #endif /* CONFIG_HIGHMEM */
283
284 /*****************************************************************************/
285 /*
286 * determine size of ELF note
287 */
288 static int notesize(struct memelfnote *en)
289 {
290 int sz;
291
292 sz = sizeof(struct elf_note);
293 sz += roundup((strlen(en->name) + 1), 4);
294 sz += roundup(en->datasz, 4);
295
296 return sz;
297 } /* end notesize() */
298
299 /*****************************************************************************/
300 /*
301 * store a note in the header buffer
302 */
303 static char *storenote(struct memelfnote *men, char *bufp)
304 {
305 struct elf_note en;
306
307 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
308
309 en.n_namesz = strlen(men->name) + 1;
310 en.n_descsz = men->datasz;
311 en.n_type = men->type;
312
313 DUMP_WRITE(&en, sizeof(en));
314 DUMP_WRITE(men->name, en.n_namesz);
315
316 /* XXX - cast from long long to long to avoid need for libgcc.a */
317 bufp = (char*) roundup((unsigned long)bufp,4);
318 DUMP_WRITE(men->data, men->datasz);
319 bufp = (char*) roundup((unsigned long)bufp,4);
320
321 #undef DUMP_WRITE
322
323 return bufp;
324 } /* end storenote() */
325
326 /*
327 * store an ELF coredump header in the supplied buffer
328 * nphdr is the number of elf_phdr to insert
329 */
330 static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
331 {
332 struct elf_prstatus prstatus; /* NT_PRSTATUS */
333 struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
334 struct elf_phdr *nhdr, *phdr;
335 struct elfhdr *elf;
336 struct memelfnote notes[3];
337 off_t offset = 0;
338 struct kcore_list *m;
339
340 /* setup ELF header */
341 elf = (struct elfhdr *) bufp;
342 bufp += sizeof(struct elfhdr);
343 offset += sizeof(struct elfhdr);
344 memcpy(elf->e_ident, ELFMAG, SELFMAG);
345 elf->e_ident[EI_CLASS] = ELF_CLASS;
346 elf->e_ident[EI_DATA] = ELF_DATA;
347 elf->e_ident[EI_VERSION]= EV_CURRENT;
348 elf->e_ident[EI_OSABI] = ELF_OSABI;
349 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
350 elf->e_type = ET_CORE;
351 elf->e_machine = ELF_ARCH;
352 elf->e_version = EV_CURRENT;
353 elf->e_entry = 0;
354 elf->e_phoff = sizeof(struct elfhdr);
355 elf->e_shoff = 0;
356 elf->e_flags = ELF_CORE_EFLAGS;
357 elf->e_ehsize = sizeof(struct elfhdr);
358 elf->e_phentsize= sizeof(struct elf_phdr);
359 elf->e_phnum = nphdr;
360 elf->e_shentsize= 0;
361 elf->e_shnum = 0;
362 elf->e_shstrndx = 0;
363
364 /* setup ELF PT_NOTE program header */
365 nhdr = (struct elf_phdr *) bufp;
366 bufp += sizeof(struct elf_phdr);
367 offset += sizeof(struct elf_phdr);
368 nhdr->p_type = PT_NOTE;
369 nhdr->p_offset = 0;
370 nhdr->p_vaddr = 0;
371 nhdr->p_paddr = 0;
372 nhdr->p_filesz = 0;
373 nhdr->p_memsz = 0;
374 nhdr->p_flags = 0;
375 nhdr->p_align = 0;
376
377 /* setup ELF PT_LOAD program header for every area */
378 list_for_each_entry(m, &kclist_head, list) {
379 phdr = (struct elf_phdr *) bufp;
380 bufp += sizeof(struct elf_phdr);
381 offset += sizeof(struct elf_phdr);
382
383 phdr->p_type = PT_LOAD;
384 phdr->p_flags = PF_R|PF_W|PF_X;
385 phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
386 phdr->p_vaddr = (size_t)m->addr;
387 if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
388 phdr->p_paddr = __pa(m->addr);
389 else
390 phdr->p_paddr = (elf_addr_t)-1;
391 phdr->p_filesz = phdr->p_memsz = m->size;
392 phdr->p_align = PAGE_SIZE;
393 }
394
395 /*
396 * Set up the notes in similar form to SVR4 core dumps made
397 * with info from their /proc.
398 */
399 nhdr->p_offset = offset;
400
401 /* set up the process status */
402 notes[0].name = CORE_STR;
403 notes[0].type = NT_PRSTATUS;
404 notes[0].datasz = sizeof(struct elf_prstatus);
405 notes[0].data = &prstatus;
406
407 memset(&prstatus, 0, sizeof(struct elf_prstatus));
408
409 nhdr->p_filesz = notesize(&notes[0]);
410 bufp = storenote(&notes[0], bufp);
411
412 /* set up the process info */
413 notes[1].name = CORE_STR;
414 notes[1].type = NT_PRPSINFO;
415 notes[1].datasz = sizeof(struct elf_prpsinfo);
416 notes[1].data = &prpsinfo;
417
418 memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
419 prpsinfo.pr_state = 0;
420 prpsinfo.pr_sname = 'R';
421 prpsinfo.pr_zomb = 0;
422
423 strcpy(prpsinfo.pr_fname, "vmlinux");
424 strlcpy(prpsinfo.pr_psargs, saved_command_line, sizeof(prpsinfo.pr_psargs));
425
426 nhdr->p_filesz += notesize(&notes[1]);
427 bufp = storenote(&notes[1], bufp);
428
429 /* set up the task structure */
430 notes[2].name = CORE_STR;
431 notes[2].type = NT_TASKSTRUCT;
432 notes[2].datasz = arch_task_struct_size;
433 notes[2].data = current;
434
435 nhdr->p_filesz += notesize(&notes[2]);
436 bufp = storenote(&notes[2], bufp);
437
438 } /* end elf_kcore_store_hdr() */
439
440 /*****************************************************************************/
441 /*
442 * read from the ELF header and then kernel memory
443 */
444 static ssize_t
445 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
446 {
447 char *buf = file->private_data;
448 ssize_t acc = 0;
449 size_t size, tsz;
450 size_t elf_buflen;
451 int nphdr;
452 unsigned long start;
453
454 read_lock(&kclist_lock);
455 size = get_kcore_size(&nphdr, &elf_buflen);
456
457 if (buflen == 0 || *fpos >= size) {
458 read_unlock(&kclist_lock);
459 return 0;
460 }
461
462 /* trim buflen to not go beyond EOF */
463 if (buflen > size - *fpos)
464 buflen = size - *fpos;
465
466 /* construct an ELF core header if we'll need some of it */
467 if (*fpos < elf_buflen) {
468 char * elf_buf;
469
470 tsz = elf_buflen - *fpos;
471 if (buflen < tsz)
472 tsz = buflen;
473 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
474 if (!elf_buf) {
475 read_unlock(&kclist_lock);
476 return -ENOMEM;
477 }
478 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
479 read_unlock(&kclist_lock);
480 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
481 kfree(elf_buf);
482 return -EFAULT;
483 }
484 kfree(elf_buf);
485 buflen -= tsz;
486 *fpos += tsz;
487 buffer += tsz;
488 acc += tsz;
489
490 /* leave now if filled buffer already */
491 if (buflen == 0)
492 return acc;
493 } else
494 read_unlock(&kclist_lock);
495
496 /*
497 * Check to see if our file offset matches with any of
498 * the addresses in the elf_phdr on our list.
499 */
500 start = kc_offset_to_vaddr(*fpos - elf_buflen);
501 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
502 tsz = buflen;
503
504 while (buflen) {
505 struct kcore_list *m;
506
507 read_lock(&kclist_lock);
508 list_for_each_entry(m, &kclist_head, list) {
509 if (start >= m->addr && start < (m->addr+m->size))
510 break;
511 }
512 read_unlock(&kclist_lock);
513
514 if (&m->list == &kclist_head) {
515 if (clear_user(buffer, tsz))
516 return -EFAULT;
517 } else if (m->type == KCORE_VMALLOC) {
518 vread(buf, (char *)start, tsz);
519 /* we have to zero-fill user buffer even if no read */
520 if (copy_to_user(buffer, buf, tsz))
521 return -EFAULT;
522 } else if (m->type == KCORE_USER) {
523 /* User page is handled prior to normal kernel page: */
524 if (copy_to_user(buffer, (char *)start, tsz))
525 return -EFAULT;
526 } else {
527 if (kern_addr_valid(start)) {
528 /*
529 * Using bounce buffer to bypass the
530 * hardened user copy kernel text checks.
531 */
532 if (probe_kernel_read(buf, (void *) start, tsz)) {
533 if (clear_user(buffer, tsz))
534 return -EFAULT;
535 } else {
536 if (copy_to_user(buffer, buf, tsz))
537 return -EFAULT;
538 }
539 } else {
540 if (clear_user(buffer, tsz))
541 return -EFAULT;
542 }
543 }
544 buflen -= tsz;
545 *fpos += tsz;
546 buffer += tsz;
547 acc += tsz;
548 start += tsz;
549 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
550 }
551
552 return acc;
553 }
554
555
556 static int open_kcore(struct inode *inode, struct file *filp)
557 {
558 if (kernel_is_locked_down("/proc/kcore"))
559 return -EPERM;
560 if (!capable(CAP_SYS_RAWIO))
561 return -EPERM;
562
563 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
564 if (!filp->private_data)
565 return -ENOMEM;
566
567 if (kcore_need_update)
568 kcore_update_ram();
569 if (i_size_read(inode) != proc_root_kcore->size) {
570 inode_lock(inode);
571 i_size_write(inode, proc_root_kcore->size);
572 inode_unlock(inode);
573 }
574 return 0;
575 }
576
577 static int release_kcore(struct inode *inode, struct file *file)
578 {
579 kfree(file->private_data);
580 return 0;
581 }
582
583 static const struct file_operations proc_kcore_operations = {
584 .read = read_kcore,
585 .open = open_kcore,
586 .release = release_kcore,
587 .llseek = default_llseek,
588 };
589
590 /* just remember that we have to update kcore */
591 static int __meminit kcore_callback(struct notifier_block *self,
592 unsigned long action, void *arg)
593 {
594 switch (action) {
595 case MEM_ONLINE:
596 case MEM_OFFLINE:
597 write_lock(&kclist_lock);
598 kcore_need_update = 1;
599 write_unlock(&kclist_lock);
600 }
601 return NOTIFY_OK;
602 }
603
604 static struct notifier_block kcore_callback_nb __meminitdata = {
605 .notifier_call = kcore_callback,
606 .priority = 0,
607 };
608
609 static struct kcore_list kcore_vmalloc;
610
611 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
612 static struct kcore_list kcore_text;
613 /*
614 * If defined, special segment is used for mapping kernel text instead of
615 * direct-map area. We need to create special TEXT section.
616 */
617 static void __init proc_kcore_text_init(void)
618 {
619 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
620 }
621 #else
622 static void __init proc_kcore_text_init(void)
623 {
624 }
625 #endif
626
627 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
628 /*
629 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
630 */
631 struct kcore_list kcore_modules;
632 static void __init add_modules_range(void)
633 {
634 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
635 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
636 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
637 }
638 }
639 #else
640 static void __init add_modules_range(void)
641 {
642 }
643 #endif
644
645 static int __init proc_kcore_init(void)
646 {
647 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
648 &proc_kcore_operations);
649 if (!proc_root_kcore) {
650 pr_err("couldn't create /proc/kcore\n");
651 return 0; /* Always returns 0. */
652 }
653 /* Store text area if it's special */
654 proc_kcore_text_init();
655 /* Store vmalloc area */
656 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
657 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
658 add_modules_range();
659 /* Store direct-map area from physical memory map */
660 kcore_update_ram();
661 register_hotmemory_notifier(&kcore_callback_nb);
662
663 return 0;
664 }
665 fs_initcall(proc_kcore_init);