]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - kernel/kexec_file.c
kernel/kexec_file.c: remove checks in kexec_purgatory_load
[mirror_ubuntu-eoan-kernel.git] / kernel / kexec_file.c
CommitLineData
a43cac0d
DY
1/*
2 * kexec: kexec_file_load system call
3 *
4 * Copyright (C) 2014 Red Hat Inc.
5 * Authors:
6 * Vivek Goyal <vgoyal@redhat.com>
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
de90a6bc
MH
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
a43cac0d
DY
14#include <linux/capability.h>
15#include <linux/mm.h>
16#include <linux/file.h>
17#include <linux/slab.h>
18#include <linux/kexec.h>
19#include <linux/mutex.h>
20#include <linux/list.h>
b804defe 21#include <linux/fs.h>
7b8589cc 22#include <linux/ima.h>
a43cac0d
DY
23#include <crypto/hash.h>
24#include <crypto/sha.h>
babac4a8
AT
25#include <linux/elf.h>
26#include <linux/elfcore.h>
27#include <linux/kernel.h>
28#include <linux/kexec.h>
29#include <linux/slab.h>
a43cac0d
DY
30#include <linux/syscalls.h>
31#include <linux/vmalloc.h>
32#include "kexec_internal.h"
33
a43cac0d
DY
34static int kexec_calculate_store_digests(struct kimage *image);
35
9ec4ecef
AT
36/*
37 * Currently this is the only default function that is exported as some
38 * architectures need it to do additional handlings.
39 * In the future, other default functions may be exported too if required.
40 */
41int kexec_image_probe_default(struct kimage *image, void *buf,
42 unsigned long buf_len)
43{
44 const struct kexec_file_ops * const *fops;
45 int ret = -ENOEXEC;
46
47 for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
48 ret = (*fops)->probe(buf, buf_len);
49 if (!ret) {
50 image->fops = *fops;
51 return ret;
52 }
53 }
54
55 return ret;
56}
57
a43cac0d
DY
58/* Architectures can provide this probe function */
59int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
60 unsigned long buf_len)
61{
9ec4ecef
AT
62 return kexec_image_probe_default(image, buf, buf_len);
63}
64
65static void *kexec_image_load_default(struct kimage *image)
66{
67 if (!image->fops || !image->fops->load)
68 return ERR_PTR(-ENOEXEC);
69
70 return image->fops->load(image, image->kernel_buf,
71 image->kernel_buf_len, image->initrd_buf,
72 image->initrd_buf_len, image->cmdline_buf,
73 image->cmdline_buf_len);
a43cac0d
DY
74}
75
76void * __weak arch_kexec_kernel_image_load(struct kimage *image)
77{
9ec4ecef
AT
78 return kexec_image_load_default(image);
79}
80
81static int kexec_image_post_load_cleanup_default(struct kimage *image)
82{
83 if (!image->fops || !image->fops->cleanup)
84 return 0;
85
86 return image->fops->cleanup(image->image_loader_data);
a43cac0d
DY
87}
88
89int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
90{
9ec4ecef 91 return kexec_image_post_load_cleanup_default(image);
a43cac0d
DY
92}
93
978e30c9 94#ifdef CONFIG_KEXEC_VERIFY_SIG
9ec4ecef
AT
95static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
96 unsigned long buf_len)
97{
98 if (!image->fops || !image->fops->verify_sig) {
99 pr_debug("kernel loader does not support signature verification.\n");
100 return -EKEYREJECTED;
101 }
102
103 return image->fops->verify_sig(buf, buf_len);
104}
105
a43cac0d
DY
106int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
107 unsigned long buf_len)
108{
9ec4ecef 109 return kexec_image_verify_sig_default(image, buf, buf_len);
a43cac0d 110}
978e30c9 111#endif
a43cac0d
DY
112
113/* Apply relocations of type RELA */
114int __weak
115arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
116 unsigned int relsec)
117{
118 pr_err("RELA relocation unsupported.\n");
119 return -ENOEXEC;
120}
121
122/* Apply relocations of type REL */
123int __weak
124arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
125 unsigned int relsec)
126{
127 pr_err("REL relocation unsupported.\n");
128 return -ENOEXEC;
129}
130
131/*
132 * Free up memory used by kernel, initrd, and command line. This is temporary
133 * memory allocation which is not needed any more after these buffers have
134 * been loaded into separate segments and have been copied elsewhere.
135 */
136void kimage_file_post_load_cleanup(struct kimage *image)
137{
138 struct purgatory_info *pi = &image->purgatory_info;
139
140 vfree(image->kernel_buf);
141 image->kernel_buf = NULL;
142
143 vfree(image->initrd_buf);
144 image->initrd_buf = NULL;
145
146 kfree(image->cmdline_buf);
147 image->cmdline_buf = NULL;
148
149 vfree(pi->purgatory_buf);
150 pi->purgatory_buf = NULL;
151
152 vfree(pi->sechdrs);
153 pi->sechdrs = NULL;
154
155 /* See if architecture has anything to cleanup post load */
156 arch_kimage_file_post_load_cleanup(image);
157
158 /*
159 * Above call should have called into bootloader to free up
160 * any data stored in kimage->image_loader_data. It should
161 * be ok now to free it up.
162 */
163 kfree(image->image_loader_data);
164 image->image_loader_data = NULL;
165}
166
167/*
168 * In file mode list of segments is prepared by kernel. Copy relevant
169 * data from user space, do error checking, prepare segment list
170 */
171static int
172kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
173 const char __user *cmdline_ptr,
174 unsigned long cmdline_len, unsigned flags)
175{
176 int ret = 0;
177 void *ldata;
b804defe 178 loff_t size;
a43cac0d 179
b804defe
MZ
180 ret = kernel_read_file_from_fd(kernel_fd, &image->kernel_buf,
181 &size, INT_MAX, READING_KEXEC_IMAGE);
a43cac0d
DY
182 if (ret)
183 return ret;
b804defe 184 image->kernel_buf_len = size;
a43cac0d 185
7b8589cc
MZ
186 /* IMA needs to pass the measurement list to the next kernel. */
187 ima_add_kexec_buffer(image);
188
a43cac0d
DY
189 /* Call arch image probe handlers */
190 ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
191 image->kernel_buf_len);
a43cac0d
DY
192 if (ret)
193 goto out;
194
195#ifdef CONFIG_KEXEC_VERIFY_SIG
196 ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
197 image->kernel_buf_len);
198 if (ret) {
199 pr_debug("kernel signature verification failed.\n");
200 goto out;
201 }
202 pr_debug("kernel signature verification successful.\n");
203#endif
204 /* It is possible that there no initramfs is being loaded */
205 if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
b804defe
MZ
206 ret = kernel_read_file_from_fd(initrd_fd, &image->initrd_buf,
207 &size, INT_MAX,
208 READING_KEXEC_INITRAMFS);
a43cac0d
DY
209 if (ret)
210 goto out;
b804defe 211 image->initrd_buf_len = size;
a43cac0d
DY
212 }
213
214 if (cmdline_len) {
a9bd8dfa
AV
215 image->cmdline_buf = memdup_user(cmdline_ptr, cmdline_len);
216 if (IS_ERR(image->cmdline_buf)) {
217 ret = PTR_ERR(image->cmdline_buf);
218 image->cmdline_buf = NULL;
a43cac0d
DY
219 goto out;
220 }
221
222 image->cmdline_buf_len = cmdline_len;
223
224 /* command line should be a string with last byte null */
225 if (image->cmdline_buf[cmdline_len - 1] != '\0') {
226 ret = -EINVAL;
227 goto out;
228 }
229 }
230
231 /* Call arch image load handlers */
232 ldata = arch_kexec_kernel_image_load(image);
233
234 if (IS_ERR(ldata)) {
235 ret = PTR_ERR(ldata);
236 goto out;
237 }
238
239 image->image_loader_data = ldata;
240out:
241 /* In case of error, free up all allocated memory in this function */
242 if (ret)
243 kimage_file_post_load_cleanup(image);
244 return ret;
245}
246
247static int
248kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
249 int initrd_fd, const char __user *cmdline_ptr,
250 unsigned long cmdline_len, unsigned long flags)
251{
252 int ret;
253 struct kimage *image;
254 bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
255
256 image = do_kimage_alloc_init();
257 if (!image)
258 return -ENOMEM;
259
260 image->file_mode = 1;
261
262 if (kexec_on_panic) {
263 /* Enable special crash kernel control page alloc policy. */
264 image->control_page = crashk_res.start;
265 image->type = KEXEC_TYPE_CRASH;
266 }
267
268 ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
269 cmdline_ptr, cmdline_len, flags);
270 if (ret)
271 goto out_free_image;
272
273 ret = sanity_check_segment_list(image);
274 if (ret)
275 goto out_free_post_load_bufs;
276
277 ret = -ENOMEM;
278 image->control_code_page = kimage_alloc_control_pages(image,
279 get_order(KEXEC_CONTROL_PAGE_SIZE));
280 if (!image->control_code_page) {
281 pr_err("Could not allocate control_code_buffer\n");
282 goto out_free_post_load_bufs;
283 }
284
285 if (!kexec_on_panic) {
286 image->swap_page = kimage_alloc_control_pages(image, 0);
287 if (!image->swap_page) {
288 pr_err("Could not allocate swap buffer\n");
289 goto out_free_control_pages;
290 }
291 }
292
293 *rimage = image;
294 return 0;
295out_free_control_pages:
296 kimage_free_page_list(&image->control_pages);
297out_free_post_load_bufs:
298 kimage_file_post_load_cleanup(image);
299out_free_image:
300 kfree(image);
301 return ret;
302}
303
304SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
305 unsigned long, cmdline_len, const char __user *, cmdline_ptr,
306 unsigned long, flags)
307{
308 int ret = 0, i;
309 struct kimage **dest_image, *image;
310
311 /* We only trust the superuser with rebooting the system. */
312 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
313 return -EPERM;
314
315 /* Make sure we have a legal set of flags */
316 if (flags != (flags & KEXEC_FILE_FLAGS))
317 return -EINVAL;
318
319 image = NULL;
320
321 if (!mutex_trylock(&kexec_mutex))
322 return -EBUSY;
323
324 dest_image = &kexec_image;
9b492cf5 325 if (flags & KEXEC_FILE_ON_CRASH) {
a43cac0d 326 dest_image = &kexec_crash_image;
9b492cf5
XP
327 if (kexec_crash_image)
328 arch_kexec_unprotect_crashkres();
329 }
a43cac0d
DY
330
331 if (flags & KEXEC_FILE_UNLOAD)
332 goto exchange;
333
334 /*
335 * In case of crash, new kernel gets loaded in reserved region. It is
336 * same memory where old crash kernel might be loaded. Free any
337 * current crash dump kernel before we corrupt it.
338 */
339 if (flags & KEXEC_FILE_ON_CRASH)
340 kimage_free(xchg(&kexec_crash_image, NULL));
341
342 ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
343 cmdline_len, flags);
344 if (ret)
345 goto out;
346
347 ret = machine_kexec_prepare(image);
348 if (ret)
349 goto out;
350
1229384f
XP
351 /*
352 * Some architecture(like S390) may touch the crash memory before
353 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
354 */
355 ret = kimage_crash_copy_vmcoreinfo(image);
356 if (ret)
357 goto out;
358
a43cac0d
DY
359 ret = kexec_calculate_store_digests(image);
360 if (ret)
361 goto out;
362
363 for (i = 0; i < image->nr_segments; i++) {
364 struct kexec_segment *ksegment;
365
366 ksegment = &image->segment[i];
367 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
368 i, ksegment->buf, ksegment->bufsz, ksegment->mem,
369 ksegment->memsz);
370
371 ret = kimage_load_segment(image, &image->segment[i]);
372 if (ret)
373 goto out;
374 }
375
376 kimage_terminate(image);
377
378 /*
379 * Free up any temporary buffers allocated which are not needed
380 * after image has been loaded
381 */
382 kimage_file_post_load_cleanup(image);
383exchange:
384 image = xchg(dest_image, image);
385out:
9b492cf5
XP
386 if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
387 arch_kexec_protect_crashkres();
388
a43cac0d
DY
389 mutex_unlock(&kexec_mutex);
390 kimage_free(image);
391 return ret;
392}
393
394static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
395 struct kexec_buf *kbuf)
396{
397 struct kimage *image = kbuf->image;
398 unsigned long temp_start, temp_end;
399
400 temp_end = min(end, kbuf->buf_max);
401 temp_start = temp_end - kbuf->memsz;
402
403 do {
404 /* align down start */
405 temp_start = temp_start & (~(kbuf->buf_align - 1));
406
407 if (temp_start < start || temp_start < kbuf->buf_min)
408 return 0;
409
410 temp_end = temp_start + kbuf->memsz - 1;
411
412 /*
413 * Make sure this does not conflict with any of existing
414 * segments
415 */
416 if (kimage_is_destination_range(image, temp_start, temp_end)) {
417 temp_start = temp_start - PAGE_SIZE;
418 continue;
419 }
420
421 /* We found a suitable memory range */
422 break;
423 } while (1);
424
425 /* If we are here, we found a suitable memory range */
426 kbuf->mem = temp_start;
427
428 /* Success, stop navigating through remaining System RAM ranges */
429 return 1;
430}
431
432static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
433 struct kexec_buf *kbuf)
434{
435 struct kimage *image = kbuf->image;
436 unsigned long temp_start, temp_end;
437
438 temp_start = max(start, kbuf->buf_min);
439
440 do {
441 temp_start = ALIGN(temp_start, kbuf->buf_align);
442 temp_end = temp_start + kbuf->memsz - 1;
443
444 if (temp_end > end || temp_end > kbuf->buf_max)
445 return 0;
446 /*
447 * Make sure this does not conflict with any of existing
448 * segments
449 */
450 if (kimage_is_destination_range(image, temp_start, temp_end)) {
451 temp_start = temp_start + PAGE_SIZE;
452 continue;
453 }
454
455 /* We found a suitable memory range */
456 break;
457 } while (1);
458
459 /* If we are here, we found a suitable memory range */
460 kbuf->mem = temp_start;
461
462 /* Success, stop navigating through remaining System RAM ranges */
463 return 1;
464}
465
1d2e733b 466static int locate_mem_hole_callback(struct resource *res, void *arg)
a43cac0d
DY
467{
468 struct kexec_buf *kbuf = (struct kexec_buf *)arg;
1d2e733b 469 u64 start = res->start, end = res->end;
a43cac0d
DY
470 unsigned long sz = end - start + 1;
471
472 /* Returning 0 will take to next memory range */
473 if (sz < kbuf->memsz)
474 return 0;
475
476 if (end < kbuf->buf_min || start > kbuf->buf_max)
477 return 0;
478
479 /*
480 * Allocate memory top down with-in ram range. Otherwise bottom up
481 * allocation.
482 */
483 if (kbuf->top_down)
484 return locate_mem_hole_top_down(start, end, kbuf);
485 return locate_mem_hole_bottom_up(start, end, kbuf);
486}
487
60fe3910
TJB
488/**
489 * arch_kexec_walk_mem - call func(data) on free memory regions
490 * @kbuf: Context info for the search. Also passed to @func.
491 * @func: Function to call for each memory region.
492 *
493 * Return: The memory walk will stop when func returns a non-zero value
494 * and that value will be returned. If all free regions are visited without
495 * func returning non-zero, then zero will be returned.
496 */
497int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
1d2e733b 498 int (*func)(struct resource *, void *))
60fe3910
TJB
499{
500 if (kbuf->image->type == KEXEC_TYPE_CRASH)
501 return walk_iomem_res_desc(crashk_res.desc,
502 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
503 crashk_res.start, crashk_res.end,
504 kbuf, func);
505 else
506 return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
507}
508
e2e806f9
TJB
509/**
510 * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
511 * @kbuf: Parameters for the memory search.
512 *
513 * On success, kbuf->mem will have the start address of the memory region found.
514 *
515 * Return: 0 on success, negative errno on error.
516 */
517int kexec_locate_mem_hole(struct kexec_buf *kbuf)
518{
519 int ret;
520
521 ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback);
522
523 return ret == 1 ? 0 : -EADDRNOTAVAIL;
524}
525
ec2b9bfa
TJB
526/**
527 * kexec_add_buffer - place a buffer in a kexec segment
528 * @kbuf: Buffer contents and memory parameters.
529 *
530 * This function assumes that kexec_mutex is held.
531 * On successful return, @kbuf->mem will have the physical address of
532 * the buffer in memory.
533 *
534 * Return: 0 on success, negative errno on error.
a43cac0d 535 */
ec2b9bfa 536int kexec_add_buffer(struct kexec_buf *kbuf)
a43cac0d
DY
537{
538
539 struct kexec_segment *ksegment;
a43cac0d
DY
540 int ret;
541
542 /* Currently adding segment this way is allowed only in file mode */
ec2b9bfa 543 if (!kbuf->image->file_mode)
a43cac0d
DY
544 return -EINVAL;
545
ec2b9bfa 546 if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
a43cac0d
DY
547 return -EINVAL;
548
549 /*
550 * Make sure we are not trying to add buffer after allocating
551 * control pages. All segments need to be placed first before
552 * any control pages are allocated. As control page allocation
553 * logic goes through list of segments to make sure there are
554 * no destination overlaps.
555 */
ec2b9bfa 556 if (!list_empty(&kbuf->image->control_pages)) {
a43cac0d
DY
557 WARN_ON(1);
558 return -EINVAL;
559 }
560
ec2b9bfa
TJB
561 /* Ensure minimum alignment needed for segments. */
562 kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
563 kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
a43cac0d
DY
564
565 /* Walk the RAM ranges and allocate a suitable range for the buffer */
e2e806f9
TJB
566 ret = kexec_locate_mem_hole(kbuf);
567 if (ret)
568 return ret;
a43cac0d
DY
569
570 /* Found a suitable memory range */
ec2b9bfa 571 ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
a43cac0d
DY
572 ksegment->kbuf = kbuf->buffer;
573 ksegment->bufsz = kbuf->bufsz;
574 ksegment->mem = kbuf->mem;
575 ksegment->memsz = kbuf->memsz;
ec2b9bfa 576 kbuf->image->nr_segments++;
a43cac0d
DY
577 return 0;
578}
579
580/* Calculate and store the digest of segments */
581static int kexec_calculate_store_digests(struct kimage *image)
582{
583 struct crypto_shash *tfm;
584 struct shash_desc *desc;
585 int ret = 0, i, j, zero_buf_sz, sha_region_sz;
586 size_t desc_size, nullsz;
587 char *digest;
588 void *zero_buf;
589 struct kexec_sha_region *sha_regions;
590 struct purgatory_info *pi = &image->purgatory_info;
591
b799a09f
AT
592 if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
593 return 0;
594
a43cac0d
DY
595 zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
596 zero_buf_sz = PAGE_SIZE;
597
598 tfm = crypto_alloc_shash("sha256", 0, 0);
599 if (IS_ERR(tfm)) {
600 ret = PTR_ERR(tfm);
601 goto out;
602 }
603
604 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
605 desc = kzalloc(desc_size, GFP_KERNEL);
606 if (!desc) {
607 ret = -ENOMEM;
608 goto out_free_tfm;
609 }
610
611 sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
612 sha_regions = vzalloc(sha_region_sz);
613 if (!sha_regions)
614 goto out_free_desc;
615
616 desc->tfm = tfm;
617 desc->flags = 0;
618
619 ret = crypto_shash_init(desc);
620 if (ret < 0)
621 goto out_free_sha_regions;
622
623 digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
624 if (!digest) {
625 ret = -ENOMEM;
626 goto out_free_sha_regions;
627 }
628
629 for (j = i = 0; i < image->nr_segments; i++) {
630 struct kexec_segment *ksegment;
631
632 ksegment = &image->segment[i];
633 /*
634 * Skip purgatory as it will be modified once we put digest
635 * info in purgatory.
636 */
637 if (ksegment->kbuf == pi->purgatory_buf)
638 continue;
639
640 ret = crypto_shash_update(desc, ksegment->kbuf,
641 ksegment->bufsz);
642 if (ret)
643 break;
644
645 /*
646 * Assume rest of the buffer is filled with zero and
647 * update digest accordingly.
648 */
649 nullsz = ksegment->memsz - ksegment->bufsz;
650 while (nullsz) {
651 unsigned long bytes = nullsz;
652
653 if (bytes > zero_buf_sz)
654 bytes = zero_buf_sz;
655 ret = crypto_shash_update(desc, zero_buf, bytes);
656 if (ret)
657 break;
658 nullsz -= bytes;
659 }
660
661 if (ret)
662 break;
663
664 sha_regions[j].start = ksegment->mem;
665 sha_regions[j].len = ksegment->memsz;
666 j++;
667 }
668
669 if (!ret) {
670 ret = crypto_shash_final(desc, digest);
671 if (ret)
672 goto out_free_digest;
40c50c1f
TG
673 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
674 sha_regions, sha_region_sz, 0);
a43cac0d
DY
675 if (ret)
676 goto out_free_digest;
677
40c50c1f
TG
678 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
679 digest, SHA256_DIGEST_SIZE, 0);
a43cac0d
DY
680 if (ret)
681 goto out_free_digest;
682 }
683
684out_free_digest:
685 kfree(digest);
686out_free_sha_regions:
687 vfree(sha_regions);
688out_free_desc:
689 kfree(desc);
690out_free_tfm:
691 kfree(tfm);
692out:
693 return ret;
694}
695
b799a09f 696#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
a43cac0d
DY
697/* Actually load purgatory. Lot of code taken from kexec-tools */
698static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
699 unsigned long max, int top_down)
700{
701 struct purgatory_info *pi = &image->purgatory_info;
ec2b9bfa
TJB
702 unsigned long align, bss_align, bss_sz, bss_pad;
703 unsigned long entry, load_addr, curr_load_addr, bss_addr, offset;
a43cac0d
DY
704 unsigned char *buf_addr, *src;
705 int i, ret = 0, entry_sidx = -1;
706 const Elf_Shdr *sechdrs_c;
707 Elf_Shdr *sechdrs = NULL;
ec2b9bfa
TJB
708 struct kexec_buf kbuf = { .image = image, .bufsz = 0, .buf_align = 1,
709 .buf_min = min, .buf_max = max,
710 .top_down = top_down };
a43cac0d
DY
711
712 /*
713 * sechdrs_c points to section headers in purgatory and are read
714 * only. No modifications allowed.
715 */
716 sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff;
717
718 /*
719 * We can not modify sechdrs_c[] and its fields. It is read only.
720 * Copy it over to a local copy where one can store some temporary
721 * data and free it at the end. We need to modify ->sh_addr and
722 * ->sh_offset fields to keep track of permanent and temporary
723 * locations of sections.
724 */
725 sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
726 if (!sechdrs)
727 return -ENOMEM;
728
729 memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr));
730
731 /*
732 * We seem to have multiple copies of sections. First copy is which
733 * is embedded in kernel in read only section. Some of these sections
734 * will be copied to a temporary buffer and relocated. And these
735 * sections will finally be copied to their final destination at
736 * segment load time.
737 *
738 * Use ->sh_offset to reflect section address in memory. It will
739 * point to original read only copy if section is not allocatable.
740 * Otherwise it will point to temporary copy which will be relocated.
741 *
742 * Use ->sh_addr to contain final address of the section where it
743 * will go during execution time.
744 */
745 for (i = 0; i < pi->ehdr->e_shnum; i++) {
746 if (sechdrs[i].sh_type == SHT_NOBITS)
747 continue;
748
749 sechdrs[i].sh_offset = (unsigned long)pi->ehdr +
750 sechdrs[i].sh_offset;
751 }
752
753 /*
754 * Identify entry point section and make entry relative to section
755 * start.
756 */
757 entry = pi->ehdr->e_entry;
758 for (i = 0; i < pi->ehdr->e_shnum; i++) {
759 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
760 continue;
761
762 if (!(sechdrs[i].sh_flags & SHF_EXECINSTR))
763 continue;
764
765 /* Make entry section relative */
766 if (sechdrs[i].sh_addr <= pi->ehdr->e_entry &&
767 ((sechdrs[i].sh_addr + sechdrs[i].sh_size) >
768 pi->ehdr->e_entry)) {
769 entry_sidx = i;
770 entry -= sechdrs[i].sh_addr;
771 break;
772 }
773 }
774
775 /* Determine how much memory is needed to load relocatable object. */
a43cac0d 776 bss_align = 1;
a43cac0d
DY
777 bss_sz = 0;
778
779 for (i = 0; i < pi->ehdr->e_shnum; i++) {
780 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
781 continue;
782
783 align = sechdrs[i].sh_addralign;
784 if (sechdrs[i].sh_type != SHT_NOBITS) {
ec2b9bfa
TJB
785 if (kbuf.buf_align < align)
786 kbuf.buf_align = align;
787 kbuf.bufsz = ALIGN(kbuf.bufsz, align);
788 kbuf.bufsz += sechdrs[i].sh_size;
a43cac0d
DY
789 } else {
790 /* bss section */
791 if (bss_align < align)
792 bss_align = align;
793 bss_sz = ALIGN(bss_sz, align);
794 bss_sz += sechdrs[i].sh_size;
795 }
796 }
797
798 /* Determine the bss padding required to align bss properly */
799 bss_pad = 0;
ec2b9bfa
TJB
800 if (kbuf.bufsz & (bss_align - 1))
801 bss_pad = bss_align - (kbuf.bufsz & (bss_align - 1));
a43cac0d 802
ec2b9bfa 803 kbuf.memsz = kbuf.bufsz + bss_pad + bss_sz;
a43cac0d
DY
804
805 /* Allocate buffer for purgatory */
ec2b9bfa
TJB
806 kbuf.buffer = vzalloc(kbuf.bufsz);
807 if (!kbuf.buffer) {
a43cac0d
DY
808 ret = -ENOMEM;
809 goto out;
810 }
811
ec2b9bfa
TJB
812 if (kbuf.buf_align < bss_align)
813 kbuf.buf_align = bss_align;
a43cac0d
DY
814
815 /* Add buffer to segment list */
ec2b9bfa 816 ret = kexec_add_buffer(&kbuf);
a43cac0d
DY
817 if (ret)
818 goto out;
ec2b9bfa 819 pi->purgatory_load_addr = kbuf.mem;
a43cac0d
DY
820
821 /* Load SHF_ALLOC sections */
ec2b9bfa 822 buf_addr = kbuf.buffer;
a43cac0d 823 load_addr = curr_load_addr = pi->purgatory_load_addr;
ec2b9bfa 824 bss_addr = load_addr + kbuf.bufsz + bss_pad;
a43cac0d
DY
825
826 for (i = 0; i < pi->ehdr->e_shnum; i++) {
827 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
828 continue;
829
830 align = sechdrs[i].sh_addralign;
831 if (sechdrs[i].sh_type != SHT_NOBITS) {
832 curr_load_addr = ALIGN(curr_load_addr, align);
833 offset = curr_load_addr - load_addr;
834 /* We already modifed ->sh_offset to keep src addr */
835 src = (char *) sechdrs[i].sh_offset;
836 memcpy(buf_addr + offset, src, sechdrs[i].sh_size);
837
838 /* Store load address and source address of section */
839 sechdrs[i].sh_addr = curr_load_addr;
840
841 /*
842 * This section got copied to temporary buffer. Update
843 * ->sh_offset accordingly.
844 */
845 sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset);
846
847 /* Advance to the next address */
848 curr_load_addr += sechdrs[i].sh_size;
849 } else {
850 bss_addr = ALIGN(bss_addr, align);
851 sechdrs[i].sh_addr = bss_addr;
852 bss_addr += sechdrs[i].sh_size;
853 }
854 }
855
856 /* Update entry point based on load address of text section */
857 if (entry_sidx >= 0)
858 entry += sechdrs[entry_sidx].sh_addr;
859
860 /* Make kernel jump to purgatory after shutdown */
861 image->start = entry;
862
863 /* Used later to get/set symbol values */
864 pi->sechdrs = sechdrs;
865
866 /*
867 * Used later to identify which section is purgatory and skip it
868 * from checksumming.
869 */
ec2b9bfa 870 pi->purgatory_buf = kbuf.buffer;
a43cac0d
DY
871 return ret;
872out:
873 vfree(sechdrs);
ec2b9bfa 874 vfree(kbuf.buffer);
a43cac0d
DY
875 return ret;
876}
877
878static int kexec_apply_relocations(struct kimage *image)
879{
880 int i, ret;
881 struct purgatory_info *pi = &image->purgatory_info;
882 Elf_Shdr *sechdrs = pi->sechdrs;
883
884 /* Apply relocations */
885 for (i = 0; i < pi->ehdr->e_shnum; i++) {
886 Elf_Shdr *section, *symtab;
887
888 if (sechdrs[i].sh_type != SHT_RELA &&
889 sechdrs[i].sh_type != SHT_REL)
890 continue;
891
892 /*
893 * For section of type SHT_RELA/SHT_REL,
894 * ->sh_link contains section header index of associated
895 * symbol table. And ->sh_info contains section header
896 * index of section to which relocations apply.
897 */
898 if (sechdrs[i].sh_info >= pi->ehdr->e_shnum ||
899 sechdrs[i].sh_link >= pi->ehdr->e_shnum)
900 return -ENOEXEC;
901
902 section = &sechdrs[sechdrs[i].sh_info];
903 symtab = &sechdrs[sechdrs[i].sh_link];
904
905 if (!(section->sh_flags & SHF_ALLOC))
906 continue;
907
908 /*
909 * symtab->sh_link contain section header index of associated
910 * string table.
911 */
912 if (symtab->sh_link >= pi->ehdr->e_shnum)
913 /* Invalid section number? */
914 continue;
915
916 /*
917 * Respective architecture needs to provide support for applying
918 * relocations of type SHT_RELA/SHT_REL.
919 */
920 if (sechdrs[i].sh_type == SHT_RELA)
921 ret = arch_kexec_apply_relocations_add(pi->ehdr,
922 sechdrs, i);
923 else if (sechdrs[i].sh_type == SHT_REL)
924 ret = arch_kexec_apply_relocations(pi->ehdr,
925 sechdrs, i);
926 if (ret)
927 return ret;
928 }
929
930 return 0;
931}
932
933/* Load relocatable purgatory object and relocate it appropriately */
934int kexec_load_purgatory(struct kimage *image, unsigned long min,
935 unsigned long max, int top_down,
936 unsigned long *load_addr)
937{
938 struct purgatory_info *pi = &image->purgatory_info;
939 int ret;
940
941 if (kexec_purgatory_size <= 0)
942 return -EINVAL;
943
a43cac0d
DY
944 pi->ehdr = (Elf_Ehdr *)kexec_purgatory;
945
a43cac0d
DY
946 ret = __kexec_load_purgatory(image, min, max, top_down);
947 if (ret)
948 return ret;
949
950 ret = kexec_apply_relocations(image);
951 if (ret)
952 goto out;
953
954 *load_addr = pi->purgatory_load_addr;
955 return 0;
956out:
957 vfree(pi->sechdrs);
070c43ee
TJB
958 pi->sechdrs = NULL;
959
a43cac0d 960 vfree(pi->purgatory_buf);
070c43ee 961 pi->purgatory_buf = NULL;
a43cac0d
DY
962 return ret;
963}
964
965static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
966 const char *name)
967{
968 Elf_Sym *syms;
969 Elf_Shdr *sechdrs;
970 Elf_Ehdr *ehdr;
971 int i, k;
972 const char *strtab;
973
974 if (!pi->sechdrs || !pi->ehdr)
975 return NULL;
976
977 sechdrs = pi->sechdrs;
978 ehdr = pi->ehdr;
979
980 for (i = 0; i < ehdr->e_shnum; i++) {
981 if (sechdrs[i].sh_type != SHT_SYMTAB)
982 continue;
983
984 if (sechdrs[i].sh_link >= ehdr->e_shnum)
985 /* Invalid strtab section number */
986 continue;
987 strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset;
988 syms = (Elf_Sym *)sechdrs[i].sh_offset;
989
990 /* Go through symbols for a match */
991 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
992 if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
993 continue;
994
995 if (strcmp(strtab + syms[k].st_name, name) != 0)
996 continue;
997
998 if (syms[k].st_shndx == SHN_UNDEF ||
999 syms[k].st_shndx >= ehdr->e_shnum) {
1000 pr_debug("Symbol: %s has bad section index %d.\n",
1001 name, syms[k].st_shndx);
1002 return NULL;
1003 }
1004
1005 /* Found the symbol we are looking for */
1006 return &syms[k];
1007 }
1008 }
1009
1010 return NULL;
1011}
1012
1013void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
1014{
1015 struct purgatory_info *pi = &image->purgatory_info;
1016 Elf_Sym *sym;
1017 Elf_Shdr *sechdr;
1018
1019 sym = kexec_purgatory_find_symbol(pi, name);
1020 if (!sym)
1021 return ERR_PTR(-EINVAL);
1022
1023 sechdr = &pi->sechdrs[sym->st_shndx];
1024
1025 /*
1026 * Returns the address where symbol will finally be loaded after
1027 * kexec_load_segment()
1028 */
1029 return (void *)(sechdr->sh_addr + sym->st_value);
1030}
1031
1032/*
1033 * Get or set value of a symbol. If "get_value" is true, symbol value is
1034 * returned in buf otherwise symbol value is set based on value in buf.
1035 */
1036int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
1037 void *buf, unsigned int size, bool get_value)
1038{
1039 Elf_Sym *sym;
1040 Elf_Shdr *sechdrs;
1041 struct purgatory_info *pi = &image->purgatory_info;
1042 char *sym_buf;
1043
1044 sym = kexec_purgatory_find_symbol(pi, name);
1045 if (!sym)
1046 return -EINVAL;
1047
1048 if (sym->st_size != size) {
1049 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
1050 name, (unsigned long)sym->st_size, size);
1051 return -EINVAL;
1052 }
1053
1054 sechdrs = pi->sechdrs;
1055
1056 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
1057 pr_err("symbol %s is in a bss section. Cannot %s\n", name,
1058 get_value ? "get" : "set");
1059 return -EINVAL;
1060 }
1061
1062 sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset +
1063 sym->st_value;
1064
1065 if (get_value)
1066 memcpy((void *)buf, sym_buf, size);
1067 else
1068 memcpy((void *)sym_buf, buf, size);
1069
1070 return 0;
1071}
b799a09f 1072#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
babac4a8
AT
1073
1074int crash_exclude_mem_range(struct crash_mem *mem,
1075 unsigned long long mstart, unsigned long long mend)
1076{
1077 int i, j;
1078 unsigned long long start, end;
1079 struct crash_mem_range temp_range = {0, 0};
1080
1081 for (i = 0; i < mem->nr_ranges; i++) {
1082 start = mem->ranges[i].start;
1083 end = mem->ranges[i].end;
1084
1085 if (mstart > end || mend < start)
1086 continue;
1087
1088 /* Truncate any area outside of range */
1089 if (mstart < start)
1090 mstart = start;
1091 if (mend > end)
1092 mend = end;
1093
1094 /* Found completely overlapping range */
1095 if (mstart == start && mend == end) {
1096 mem->ranges[i].start = 0;
1097 mem->ranges[i].end = 0;
1098 if (i < mem->nr_ranges - 1) {
1099 /* Shift rest of the ranges to left */
1100 for (j = i; j < mem->nr_ranges - 1; j++) {
1101 mem->ranges[j].start =
1102 mem->ranges[j+1].start;
1103 mem->ranges[j].end =
1104 mem->ranges[j+1].end;
1105 }
1106 }
1107 mem->nr_ranges--;
1108 return 0;
1109 }
1110
1111 if (mstart > start && mend < end) {
1112 /* Split original range */
1113 mem->ranges[i].end = mstart - 1;
1114 temp_range.start = mend + 1;
1115 temp_range.end = end;
1116 } else if (mstart != start)
1117 mem->ranges[i].end = mstart - 1;
1118 else
1119 mem->ranges[i].start = mend + 1;
1120 break;
1121 }
1122
1123 /* If a split happened, add the split to array */
1124 if (!temp_range.end)
1125 return 0;
1126
1127 /* Split happened */
1128 if (i == mem->max_nr_ranges - 1)
1129 return -ENOMEM;
1130
1131 /* Location where new range should go */
1132 j = i + 1;
1133 if (j < mem->nr_ranges) {
1134 /* Move over all ranges one slot towards the end */
1135 for (i = mem->nr_ranges - 1; i >= j; i--)
1136 mem->ranges[i + 1] = mem->ranges[i];
1137 }
1138
1139 mem->ranges[j].start = temp_range.start;
1140 mem->ranges[j].end = temp_range.end;
1141 mem->nr_ranges++;
1142 return 0;
1143}
1144
1145int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
1146 void **addr, unsigned long *sz)
1147{
1148 Elf64_Ehdr *ehdr;
1149 Elf64_Phdr *phdr;
1150 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
1151 unsigned char *buf;
1152 unsigned int cpu, i;
1153 unsigned long long notes_addr;
1154 unsigned long mstart, mend;
1155
1156 /* extra phdr for vmcoreinfo elf note */
1157 nr_phdr = nr_cpus + 1;
1158 nr_phdr += mem->nr_ranges;
1159
1160 /*
1161 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
1162 * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
1163 * I think this is required by tools like gdb. So same physical
1164 * memory will be mapped in two elf headers. One will contain kernel
1165 * text virtual addresses and other will have __va(physical) addresses.
1166 */
1167
1168 nr_phdr++;
1169 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
1170 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
1171
1172 buf = vzalloc(elf_sz);
1173 if (!buf)
1174 return -ENOMEM;
1175
1176 ehdr = (Elf64_Ehdr *)buf;
1177 phdr = (Elf64_Phdr *)(ehdr + 1);
1178 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
1179 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
1180 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1181 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1182 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
1183 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
1184 ehdr->e_type = ET_CORE;
1185 ehdr->e_machine = ELF_ARCH;
1186 ehdr->e_version = EV_CURRENT;
1187 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1188 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1189 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1190
1191 /* Prepare one phdr of type PT_NOTE for each present cpu */
1192 for_each_present_cpu(cpu) {
1193 phdr->p_type = PT_NOTE;
1194 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
1195 phdr->p_offset = phdr->p_paddr = notes_addr;
1196 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
1197 (ehdr->e_phnum)++;
1198 phdr++;
1199 }
1200
1201 /* Prepare one PT_NOTE header for vmcoreinfo */
1202 phdr->p_type = PT_NOTE;
1203 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
1204 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
1205 (ehdr->e_phnum)++;
1206 phdr++;
1207
1208 /* Prepare PT_LOAD type program header for kernel text region */
1209 if (kernel_map) {
1210 phdr->p_type = PT_LOAD;
1211 phdr->p_flags = PF_R|PF_W|PF_X;
1212 phdr->p_vaddr = (Elf64_Addr)_text;
1213 phdr->p_filesz = phdr->p_memsz = _end - _text;
1214 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
1215 ehdr->e_phnum++;
1216 phdr++;
1217 }
1218
1219 /* Go through all the ranges in mem->ranges[] and prepare phdr */
1220 for (i = 0; i < mem->nr_ranges; i++) {
1221 mstart = mem->ranges[i].start;
1222 mend = mem->ranges[i].end;
1223
1224 phdr->p_type = PT_LOAD;
1225 phdr->p_flags = PF_R|PF_W|PF_X;
1226 phdr->p_offset = mstart;
1227
1228 phdr->p_paddr = mstart;
1229 phdr->p_vaddr = (unsigned long long) __va(mstart);
1230 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
1231 phdr->p_align = 0;
1232 ehdr->e_phnum++;
1233 phdr++;
1234 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
1235 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
1236 ehdr->e_phnum, phdr->p_offset);
1237 }
1238
1239 *addr = buf;
1240 *sz = elf_sz;
1241 return 0;
1242}