]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - fs/proc/kcore.c
Merge branch 'linus' into perf/urgent, to pick up dependent commits
[mirror_ubuntu-focal-kernel.git] / fs / proc / kcore.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/proc/kcore.c kernel ELF core dumper
4 *
5 * Modelled on fs/exec.c:aout_core_dump()
6 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
7 * ELF version written by David Howells <David.Howells@nexor.co.uk>
8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
11 */
12
13 #include <linux/mm.h>
14 #include <linux/proc_fs.h>
15 #include <linux/kcore.h>
16 #include <linux/user.h>
17 #include <linux/capability.h>
18 #include <linux/elf.h>
19 #include <linux/elfcore.h>
20 #include <linux/notifier.h>
21 #include <linux/vmalloc.h>
22 #include <linux/highmem.h>
23 #include <linux/printk.h>
24 #include <linux/bootmem.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include <asm/io.h>
29 #include <linux/list.h>
30 #include <linux/ioport.h>
31 #include <linux/memory.h>
32 #include <linux/sched/task.h>
33 #include <asm/sections.h>
34 #include "internal.h"
35
36 #define CORE_STR "CORE"
37
38 #ifndef ELF_CORE_EFLAGS
39 #define ELF_CORE_EFLAGS 0
40 #endif
41
42 static struct proc_dir_entry *proc_root_kcore;
43
44
45 #ifndef kc_vaddr_to_offset
46 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
47 #endif
48 #ifndef kc_offset_to_vaddr
49 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
50 #endif
51
52 /* An ELF note in memory */
53 struct memelfnote
54 {
55 const char *name;
56 int type;
57 unsigned int datasz;
58 void *data;
59 };
60
61 static LIST_HEAD(kclist_head);
62 static DEFINE_RWLOCK(kclist_lock);
63 static int kcore_need_update = 1;
64
65 void
66 kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
67 {
68 new->addr = (unsigned long)addr;
69 new->size = size;
70 new->type = type;
71
72 write_lock(&kclist_lock);
73 list_add_tail(&new->list, &kclist_head);
74 write_unlock(&kclist_lock);
75 }
76
77 static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
78 {
79 size_t try, size;
80 struct kcore_list *m;
81
82 *nphdr = 1; /* PT_NOTE */
83 size = 0;
84
85 list_for_each_entry(m, &kclist_head, list) {
86 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
87 if (try > size)
88 size = try;
89 *nphdr = *nphdr + 1;
90 }
91 *elf_buflen = sizeof(struct elfhdr) +
92 (*nphdr + 2)*sizeof(struct elf_phdr) +
93 3 * ((sizeof(struct elf_note)) +
94 roundup(sizeof(CORE_STR), 4)) +
95 roundup(sizeof(struct elf_prstatus), 4) +
96 roundup(sizeof(struct elf_prpsinfo), 4) +
97 roundup(arch_task_struct_size, 4);
98 *elf_buflen = PAGE_ALIGN(*elf_buflen);
99 return size + *elf_buflen;
100 }
101
102 static void free_kclist_ents(struct list_head *head)
103 {
104 struct kcore_list *tmp, *pos;
105
106 list_for_each_entry_safe(pos, tmp, head, list) {
107 list_del(&pos->list);
108 kfree(pos);
109 }
110 }
111 /*
112 * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
113 */
114 static void __kcore_update_ram(struct list_head *list)
115 {
116 int nphdr;
117 size_t size;
118 struct kcore_list *tmp, *pos;
119 LIST_HEAD(garbage);
120
121 write_lock(&kclist_lock);
122 if (kcore_need_update) {
123 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
124 if (pos->type == KCORE_RAM
125 || pos->type == KCORE_VMEMMAP)
126 list_move(&pos->list, &garbage);
127 }
128 list_splice_tail(list, &kclist_head);
129 } else
130 list_splice(list, &garbage);
131 kcore_need_update = 0;
132 proc_root_kcore->size = get_kcore_size(&nphdr, &size);
133 write_unlock(&kclist_lock);
134
135 free_kclist_ents(&garbage);
136 }
137
138
139 #ifdef CONFIG_HIGHMEM
140 /*
141 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
142 * because memory hole is not as big as !HIGHMEM case.
143 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
144 */
145 static int kcore_update_ram(void)
146 {
147 LIST_HEAD(head);
148 struct kcore_list *ent;
149 int ret = 0;
150
151 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
152 if (!ent)
153 return -ENOMEM;
154 ent->addr = (unsigned long)__va(0);
155 ent->size = max_low_pfn << PAGE_SHIFT;
156 ent->type = KCORE_RAM;
157 list_add(&ent->list, &head);
158 __kcore_update_ram(&head);
159 return ret;
160 }
161
162 #else /* !CONFIG_HIGHMEM */
163
164 #ifdef CONFIG_SPARSEMEM_VMEMMAP
165 /* calculate vmemmap's address from given system ram pfn and register it */
166 static int
167 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
168 {
169 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
170 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
171 unsigned long start, end;
172 struct kcore_list *vmm, *tmp;
173
174
175 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
176 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
177 end = PAGE_ALIGN(end);
178 /* overlap check (because we have to align page */
179 list_for_each_entry(tmp, head, list) {
180 if (tmp->type != KCORE_VMEMMAP)
181 continue;
182 if (start < tmp->addr + tmp->size)
183 if (end > tmp->addr)
184 end = tmp->addr;
185 }
186 if (start < end) {
187 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
188 if (!vmm)
189 return 0;
190 vmm->addr = start;
191 vmm->size = end - start;
192 vmm->type = KCORE_VMEMMAP;
193 list_add_tail(&vmm->list, head);
194 }
195 return 1;
196
197 }
198 #else
199 static int
200 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
201 {
202 return 1;
203 }
204
205 #endif
206
207 static int
208 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
209 {
210 struct list_head *head = (struct list_head *)arg;
211 struct kcore_list *ent;
212
213 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
214 if (!ent)
215 return -ENOMEM;
216 ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
217 ent->size = nr_pages << PAGE_SHIFT;
218
219 /* Sanity check: Can happen in 32bit arch...maybe */
220 if (ent->addr < (unsigned long) __va(0))
221 goto free_out;
222
223 /* cut not-mapped area. ....from ppc-32 code. */
224 if (ULONG_MAX - ent->addr < ent->size)
225 ent->size = ULONG_MAX - ent->addr;
226
227 /* cut when vmalloc() area is higher than direct-map area */
228 if (VMALLOC_START > (unsigned long)__va(0)) {
229 if (ent->addr > VMALLOC_START)
230 goto free_out;
231 if (VMALLOC_START - ent->addr < ent->size)
232 ent->size = VMALLOC_START - ent->addr;
233 }
234
235 ent->type = KCORE_RAM;
236 list_add_tail(&ent->list, head);
237
238 if (!get_sparsemem_vmemmap_info(ent, head)) {
239 list_del(&ent->list);
240 goto free_out;
241 }
242
243 return 0;
244 free_out:
245 kfree(ent);
246 return 1;
247 }
248
249 static int kcore_update_ram(void)
250 {
251 int nid, ret;
252 unsigned long end_pfn;
253 LIST_HEAD(head);
254
255 /* Not inialized....update now */
256 /* find out "max pfn" */
257 end_pfn = 0;
258 for_each_node_state(nid, N_MEMORY) {
259 unsigned long node_end;
260 node_end = node_end_pfn(nid);
261 if (end_pfn < node_end)
262 end_pfn = node_end;
263 }
264 /* scan 0 to max_pfn */
265 ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
266 if (ret) {
267 free_kclist_ents(&head);
268 return -ENOMEM;
269 }
270 __kcore_update_ram(&head);
271 return ret;
272 }
273 #endif /* CONFIG_HIGHMEM */
274
275 /*****************************************************************************/
276 /*
277 * determine size of ELF note
278 */
279 static int notesize(struct memelfnote *en)
280 {
281 int sz;
282
283 sz = sizeof(struct elf_note);
284 sz += roundup((strlen(en->name) + 1), 4);
285 sz += roundup(en->datasz, 4);
286
287 return sz;
288 } /* end notesize() */
289
290 /*****************************************************************************/
291 /*
292 * store a note in the header buffer
293 */
294 static char *storenote(struct memelfnote *men, char *bufp)
295 {
296 struct elf_note en;
297
298 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
299
300 en.n_namesz = strlen(men->name) + 1;
301 en.n_descsz = men->datasz;
302 en.n_type = men->type;
303
304 DUMP_WRITE(&en, sizeof(en));
305 DUMP_WRITE(men->name, en.n_namesz);
306
307 /* XXX - cast from long long to long to avoid need for libgcc.a */
308 bufp = (char*) roundup((unsigned long)bufp,4);
309 DUMP_WRITE(men->data, men->datasz);
310 bufp = (char*) roundup((unsigned long)bufp,4);
311
312 #undef DUMP_WRITE
313
314 return bufp;
315 } /* end storenote() */
316
317 /*
318 * store an ELF coredump header in the supplied buffer
319 * nphdr is the number of elf_phdr to insert
320 */
321 static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
322 {
323 struct elf_prstatus prstatus; /* NT_PRSTATUS */
324 struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
325 struct elf_phdr *nhdr, *phdr;
326 struct elfhdr *elf;
327 struct memelfnote notes[3];
328 off_t offset = 0;
329 struct kcore_list *m;
330
331 /* setup ELF header */
332 elf = (struct elfhdr *) bufp;
333 bufp += sizeof(struct elfhdr);
334 offset += sizeof(struct elfhdr);
335 memcpy(elf->e_ident, ELFMAG, SELFMAG);
336 elf->e_ident[EI_CLASS] = ELF_CLASS;
337 elf->e_ident[EI_DATA] = ELF_DATA;
338 elf->e_ident[EI_VERSION]= EV_CURRENT;
339 elf->e_ident[EI_OSABI] = ELF_OSABI;
340 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
341 elf->e_type = ET_CORE;
342 elf->e_machine = ELF_ARCH;
343 elf->e_version = EV_CURRENT;
344 elf->e_entry = 0;
345 elf->e_phoff = sizeof(struct elfhdr);
346 elf->e_shoff = 0;
347 elf->e_flags = ELF_CORE_EFLAGS;
348 elf->e_ehsize = sizeof(struct elfhdr);
349 elf->e_phentsize= sizeof(struct elf_phdr);
350 elf->e_phnum = nphdr;
351 elf->e_shentsize= 0;
352 elf->e_shnum = 0;
353 elf->e_shstrndx = 0;
354
355 /* setup ELF PT_NOTE program header */
356 nhdr = (struct elf_phdr *) bufp;
357 bufp += sizeof(struct elf_phdr);
358 offset += sizeof(struct elf_phdr);
359 nhdr->p_type = PT_NOTE;
360 nhdr->p_offset = 0;
361 nhdr->p_vaddr = 0;
362 nhdr->p_paddr = 0;
363 nhdr->p_filesz = 0;
364 nhdr->p_memsz = 0;
365 nhdr->p_flags = 0;
366 nhdr->p_align = 0;
367
368 /* setup ELF PT_LOAD program header for every area */
369 list_for_each_entry(m, &kclist_head, list) {
370 phdr = (struct elf_phdr *) bufp;
371 bufp += sizeof(struct elf_phdr);
372 offset += sizeof(struct elf_phdr);
373
374 phdr->p_type = PT_LOAD;
375 phdr->p_flags = PF_R|PF_W|PF_X;
376 phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
377 phdr->p_vaddr = (size_t)m->addr;
378 if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
379 phdr->p_paddr = __pa(m->addr);
380 else
381 phdr->p_paddr = (elf_addr_t)-1;
382 phdr->p_filesz = phdr->p_memsz = m->size;
383 phdr->p_align = PAGE_SIZE;
384 }
385
386 /*
387 * Set up the notes in similar form to SVR4 core dumps made
388 * with info from their /proc.
389 */
390 nhdr->p_offset = offset;
391
392 /* set up the process status */
393 notes[0].name = CORE_STR;
394 notes[0].type = NT_PRSTATUS;
395 notes[0].datasz = sizeof(struct elf_prstatus);
396 notes[0].data = &prstatus;
397
398 memset(&prstatus, 0, sizeof(struct elf_prstatus));
399
400 nhdr->p_filesz = notesize(&notes[0]);
401 bufp = storenote(&notes[0], bufp);
402
403 /* set up the process info */
404 notes[1].name = CORE_STR;
405 notes[1].type = NT_PRPSINFO;
406 notes[1].datasz = sizeof(struct elf_prpsinfo);
407 notes[1].data = &prpsinfo;
408
409 memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
410 prpsinfo.pr_state = 0;
411 prpsinfo.pr_sname = 'R';
412 prpsinfo.pr_zomb = 0;
413
414 strcpy(prpsinfo.pr_fname, "vmlinux");
415 strlcpy(prpsinfo.pr_psargs, saved_command_line, sizeof(prpsinfo.pr_psargs));
416
417 nhdr->p_filesz += notesize(&notes[1]);
418 bufp = storenote(&notes[1], bufp);
419
420 /* set up the task structure */
421 notes[2].name = CORE_STR;
422 notes[2].type = NT_TASKSTRUCT;
423 notes[2].datasz = arch_task_struct_size;
424 notes[2].data = current;
425
426 nhdr->p_filesz += notesize(&notes[2]);
427 bufp = storenote(&notes[2], bufp);
428
429 } /* end elf_kcore_store_hdr() */
430
431 /*****************************************************************************/
432 /*
433 * read from the ELF header and then kernel memory
434 */
435 static ssize_t
436 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
437 {
438 char *buf = file->private_data;
439 ssize_t acc = 0;
440 size_t size, tsz;
441 size_t elf_buflen;
442 int nphdr;
443 unsigned long start;
444
445 read_lock(&kclist_lock);
446 size = get_kcore_size(&nphdr, &elf_buflen);
447
448 if (buflen == 0 || *fpos >= size) {
449 read_unlock(&kclist_lock);
450 return 0;
451 }
452
453 /* trim buflen to not go beyond EOF */
454 if (buflen > size - *fpos)
455 buflen = size - *fpos;
456
457 /* construct an ELF core header if we'll need some of it */
458 if (*fpos < elf_buflen) {
459 char * elf_buf;
460
461 tsz = elf_buflen - *fpos;
462 if (buflen < tsz)
463 tsz = buflen;
464 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
465 if (!elf_buf) {
466 read_unlock(&kclist_lock);
467 return -ENOMEM;
468 }
469 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
470 read_unlock(&kclist_lock);
471 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
472 kfree(elf_buf);
473 return -EFAULT;
474 }
475 kfree(elf_buf);
476 buflen -= tsz;
477 *fpos += tsz;
478 buffer += tsz;
479 acc += tsz;
480
481 /* leave now if filled buffer already */
482 if (buflen == 0)
483 return acc;
484 } else
485 read_unlock(&kclist_lock);
486
487 /*
488 * Check to see if our file offset matches with any of
489 * the addresses in the elf_phdr on our list.
490 */
491 start = kc_offset_to_vaddr(*fpos - elf_buflen);
492 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
493 tsz = buflen;
494
495 while (buflen) {
496 struct kcore_list *m;
497
498 read_lock(&kclist_lock);
499 list_for_each_entry(m, &kclist_head, list) {
500 if (start >= m->addr && start < (m->addr+m->size))
501 break;
502 }
503 read_unlock(&kclist_lock);
504
505 if (&m->list == &kclist_head) {
506 if (clear_user(buffer, tsz))
507 return -EFAULT;
508 } else if (m->type == KCORE_VMALLOC) {
509 vread(buf, (char *)start, tsz);
510 /* we have to zero-fill user buffer even if no read */
511 if (copy_to_user(buffer, buf, tsz))
512 return -EFAULT;
513 } else {
514 if (kern_addr_valid(start)) {
515 unsigned long n;
516
517 /*
518 * Using bounce buffer to bypass the
519 * hardened user copy kernel text checks.
520 */
521 memcpy(buf, (char *) start, tsz);
522 n = copy_to_user(buffer, buf, tsz);
523 /*
524 * We cannot distinguish between fault on source
525 * and fault on destination. When this happens
526 * we clear too and hope it will trigger the
527 * EFAULT again.
528 */
529 if (n) {
530 if (clear_user(buffer + tsz - n,
531 n))
532 return -EFAULT;
533 }
534 } else {
535 if (clear_user(buffer, tsz))
536 return -EFAULT;
537 }
538 }
539 buflen -= tsz;
540 *fpos += tsz;
541 buffer += tsz;
542 acc += tsz;
543 start += tsz;
544 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
545 }
546
547 return acc;
548 }
549
550
551 static int open_kcore(struct inode *inode, struct file *filp)
552 {
553 if (!capable(CAP_SYS_RAWIO))
554 return -EPERM;
555
556 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
557 if (!filp->private_data)
558 return -ENOMEM;
559
560 if (kcore_need_update)
561 kcore_update_ram();
562 if (i_size_read(inode) != proc_root_kcore->size) {
563 inode_lock(inode);
564 i_size_write(inode, proc_root_kcore->size);
565 inode_unlock(inode);
566 }
567 return 0;
568 }
569
570 static int release_kcore(struct inode *inode, struct file *file)
571 {
572 kfree(file->private_data);
573 return 0;
574 }
575
576 static const struct file_operations proc_kcore_operations = {
577 .read = read_kcore,
578 .open = open_kcore,
579 .release = release_kcore,
580 .llseek = default_llseek,
581 };
582
583 /* just remember that we have to update kcore */
584 static int __meminit kcore_callback(struct notifier_block *self,
585 unsigned long action, void *arg)
586 {
587 switch (action) {
588 case MEM_ONLINE:
589 case MEM_OFFLINE:
590 write_lock(&kclist_lock);
591 kcore_need_update = 1;
592 write_unlock(&kclist_lock);
593 }
594 return NOTIFY_OK;
595 }
596
597 static struct notifier_block kcore_callback_nb __meminitdata = {
598 .notifier_call = kcore_callback,
599 .priority = 0,
600 };
601
602 static struct kcore_list kcore_vmalloc;
603
604 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
605 static struct kcore_list kcore_text;
606 /*
607 * If defined, special segment is used for mapping kernel text instead of
608 * direct-map area. We need to create special TEXT section.
609 */
610 static void __init proc_kcore_text_init(void)
611 {
612 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
613 }
614 #else
615 static void __init proc_kcore_text_init(void)
616 {
617 }
618 #endif
619
620 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
621 /*
622 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
623 */
624 struct kcore_list kcore_modules;
625 static void __init add_modules_range(void)
626 {
627 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
628 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
629 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
630 }
631 }
632 #else
633 static void __init add_modules_range(void)
634 {
635 }
636 #endif
637
638 static int __init proc_kcore_init(void)
639 {
640 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
641 &proc_kcore_operations);
642 if (!proc_root_kcore) {
643 pr_err("couldn't create /proc/kcore\n");
644 return 0; /* Always returns 0. */
645 }
646 /* Store text area if it's special */
647 proc_kcore_text_init();
648 /* Store vmalloc area */
649 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
650 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
651 add_modules_range();
652 /* Store direct-map area from physical memory map */
653 kcore_update_ram();
654 register_hotmemory_notifier(&kcore_callback_nb);
655
656 return 0;
657 }
658 fs_initcall(proc_kcore_init);