1 // SPDX-License-Identifier: GPL-2.0-only
3 * kexec.c - kexec system call core code.
4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
29 #include <linux/panic_notifier.h>
31 #include <linux/cpu.h>
32 #include <linux/uaccess.h>
34 #include <linux/console.h>
35 #include <linux/vmalloc.h>
36 #include <linux/swap.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/compiler.h>
39 #include <linux/hugetlb.h>
40 #include <linux/objtool.h>
41 #include <linux/kmsg_dump.h>
44 #include <asm/sections.h>
46 #include <crypto/hash.h>
47 #include "kexec_internal.h"
49 DEFINE_MUTEX(kexec_mutex
);
51 /* Per cpu memory for storing cpu states in case of system crash. */
52 note_buf_t __percpu
*crash_notes
;
54 /* Flag to indicate we are going to kexec a new kernel */
55 bool kexec_in_progress
= false;
58 /* Location of the reserved area for the crash kernel */
59 struct resource crashk_res
= {
60 .name
= "Crash kernel",
63 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
,
64 .desc
= IORES_DESC_CRASH_KERNEL
66 struct resource crashk_low_res
= {
67 .name
= "Crash kernel",
70 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
,
71 .desc
= IORES_DESC_CRASH_KERNEL
74 int kexec_should_crash(struct task_struct
*p
)
77 * If crash_kexec_post_notifiers is enabled, don't run
78 * crash_kexec() here yet, which must be run after panic
79 * notifiers in panic().
81 if (crash_kexec_post_notifiers
)
84 * There are 4 panic() calls in do_exit() path, each of which
85 * corresponds to each of these 4 conditions.
87 if (in_interrupt() || !p
->pid
|| is_global_init(p
) || panic_on_oops
)
92 int kexec_crash_loaded(void)
94 return !!kexec_crash_image
;
96 EXPORT_SYMBOL_GPL(kexec_crash_loaded
);
99 * When kexec transitions to the new kernel there is a one-to-one
100 * mapping between physical and virtual addresses. On processors
101 * where you can disable the MMU this is trivial, and easy. For
102 * others it is still a simple predictable page table to setup.
104 * In that environment kexec copies the new kernel to its final
105 * resting place. This means I can only support memory whose
106 * physical address can fit in an unsigned long. In particular
107 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
108 * If the assembly stub has more restrictive requirements
109 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
110 * defined more restrictively in <asm/kexec.h>.
112 * The code for the transition from the current kernel to the
113 * new kernel is placed in the control_code_buffer, whose size
114 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
115 * page of memory is necessary, but some architectures require more.
116 * Because this memory must be identity mapped in the transition from
117 * virtual to physical addresses it must live in the range
118 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
121 * The assembly stub in the control code buffer is passed a linked list
122 * of descriptor pages detailing the source pages of the new kernel,
123 * and the destination addresses of those source pages. As this data
124 * structure is not used in the context of the current OS, it must
127 * The code has been made to work with highmem pages and will use a
128 * destination page in its final resting place (if it happens
129 * to allocate it). The end product of this is that most of the
130 * physical address space, and most of RAM can be used.
132 * Future directions include:
133 * - allocating a page table with the control code buffer identity
134 * mapped, to simplify machine_kexec and make kexec_on_panic more
139 * KIMAGE_NO_DEST is an impossible destination address..., for
140 * allocating pages whose destination address we do not care about.
142 #define KIMAGE_NO_DEST (-1UL)
143 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
145 static struct page
*kimage_alloc_page(struct kimage
*image
,
149 int sanity_check_segment_list(struct kimage
*image
)
152 unsigned long nr_segments
= image
->nr_segments
;
153 unsigned long total_pages
= 0;
154 unsigned long nr_pages
= totalram_pages();
157 * Verify we have good destination addresses. The caller is
158 * responsible for making certain we don't attempt to load
159 * the new image into invalid or reserved areas of RAM. This
160 * just verifies it is an address we can use.
162 * Since the kernel does everything in page size chunks ensure
163 * the destination addresses are page aligned. Too many
164 * special cases crop of when we don't do this. The most
165 * insidious is getting overlapping destination addresses
166 * simply because addresses are changed to page size
169 for (i
= 0; i
< nr_segments
; i
++) {
170 unsigned long mstart
, mend
;
172 mstart
= image
->segment
[i
].mem
;
173 mend
= mstart
+ image
->segment
[i
].memsz
;
175 return -EADDRNOTAVAIL
;
176 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
177 return -EADDRNOTAVAIL
;
178 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
179 return -EADDRNOTAVAIL
;
182 /* Verify our destination addresses do not overlap.
183 * If we alloed overlapping destination addresses
184 * through very weird things can happen with no
185 * easy explanation as one segment stops on another.
187 for (i
= 0; i
< nr_segments
; i
++) {
188 unsigned long mstart
, mend
;
191 mstart
= image
->segment
[i
].mem
;
192 mend
= mstart
+ image
->segment
[i
].memsz
;
193 for (j
= 0; j
< i
; j
++) {
194 unsigned long pstart
, pend
;
196 pstart
= image
->segment
[j
].mem
;
197 pend
= pstart
+ image
->segment
[j
].memsz
;
198 /* Do the segments overlap ? */
199 if ((mend
> pstart
) && (mstart
< pend
))
204 /* Ensure our buffer sizes are strictly less than
205 * our memory sizes. This should always be the case,
206 * and it is easier to check up front than to be surprised
209 for (i
= 0; i
< nr_segments
; i
++) {
210 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
215 * Verify that no more than half of memory will be consumed. If the
216 * request from userspace is too large, a large amount of time will be
217 * wasted allocating pages, which can cause a soft lockup.
219 for (i
= 0; i
< nr_segments
; i
++) {
220 if (PAGE_COUNT(image
->segment
[i
].memsz
) > nr_pages
/ 2)
223 total_pages
+= PAGE_COUNT(image
->segment
[i
].memsz
);
226 if (total_pages
> nr_pages
/ 2)
230 * Verify we have good destination addresses. Normally
231 * the caller is responsible for making certain we don't
232 * attempt to load the new image into invalid or reserved
233 * areas of RAM. But crash kernels are preloaded into a
234 * reserved area of ram. We must ensure the addresses
235 * are in the reserved area otherwise preloading the
236 * kernel could corrupt things.
239 if (image
->type
== KEXEC_TYPE_CRASH
) {
240 for (i
= 0; i
< nr_segments
; i
++) {
241 unsigned long mstart
, mend
;
243 mstart
= image
->segment
[i
].mem
;
244 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
245 /* Ensure we are within the crash kernel limits */
246 if ((mstart
< phys_to_boot_phys(crashk_res
.start
)) ||
247 (mend
> phys_to_boot_phys(crashk_res
.end
)))
248 return -EADDRNOTAVAIL
;
255 struct kimage
*do_kimage_alloc_init(void)
257 struct kimage
*image
;
259 /* Allocate a controlling structure */
260 image
= kzalloc(sizeof(*image
), GFP_KERNEL
);
265 image
->entry
= &image
->head
;
266 image
->last_entry
= &image
->head
;
267 image
->control_page
= ~0; /* By default this does not apply */
268 image
->type
= KEXEC_TYPE_DEFAULT
;
270 /* Initialize the list of control pages */
271 INIT_LIST_HEAD(&image
->control_pages
);
273 /* Initialize the list of destination pages */
274 INIT_LIST_HEAD(&image
->dest_pages
);
276 /* Initialize the list of unusable pages */
277 INIT_LIST_HEAD(&image
->unusable_pages
);
282 int kimage_is_destination_range(struct kimage
*image
,
288 for (i
= 0; i
< image
->nr_segments
; i
++) {
289 unsigned long mstart
, mend
;
291 mstart
= image
->segment
[i
].mem
;
292 mend
= mstart
+ image
->segment
[i
].memsz
;
293 if ((end
> mstart
) && (start
< mend
))
300 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
304 if (fatal_signal_pending(current
))
306 pages
= alloc_pages(gfp_mask
& ~__GFP_ZERO
, order
);
308 unsigned int count
, i
;
310 pages
->mapping
= NULL
;
311 set_page_private(pages
, order
);
313 for (i
= 0; i
< count
; i
++)
314 SetPageReserved(pages
+ i
);
316 arch_kexec_post_alloc_pages(page_address(pages
), count
,
319 if (gfp_mask
& __GFP_ZERO
)
320 for (i
= 0; i
< count
; i
++)
321 clear_highpage(pages
+ i
);
327 static void kimage_free_pages(struct page
*page
)
329 unsigned int order
, count
, i
;
331 order
= page_private(page
);
334 arch_kexec_pre_free_pages(page_address(page
), count
);
336 for (i
= 0; i
< count
; i
++)
337 ClearPageReserved(page
+ i
);
338 __free_pages(page
, order
);
341 void kimage_free_page_list(struct list_head
*list
)
343 struct page
*page
, *next
;
345 list_for_each_entry_safe(page
, next
, list
, lru
) {
346 list_del(&page
->lru
);
347 kimage_free_pages(page
);
351 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
354 /* Control pages are special, they are the intermediaries
355 * that are needed while we copy the rest of the pages
356 * to their final resting place. As such they must
357 * not conflict with either the destination addresses
358 * or memory the kernel is already using.
360 * The only case where we really need more than one of
361 * these are for architectures where we cannot disable
362 * the MMU and must instead generate an identity mapped
363 * page table for all of the memory.
365 * At worst this runs in O(N) of the image size.
367 struct list_head extra_pages
;
372 INIT_LIST_HEAD(&extra_pages
);
374 /* Loop while I can allocate a page and the page allocated
375 * is a destination page.
378 unsigned long pfn
, epfn
, addr
, eaddr
;
380 pages
= kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP
, order
);
383 pfn
= page_to_boot_pfn(pages
);
385 addr
= pfn
<< PAGE_SHIFT
;
386 eaddr
= epfn
<< PAGE_SHIFT
;
387 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
388 kimage_is_destination_range(image
, addr
, eaddr
)) {
389 list_add(&pages
->lru
, &extra_pages
);
395 /* Remember the allocated page... */
396 list_add(&pages
->lru
, &image
->control_pages
);
398 /* Because the page is already in it's destination
399 * location we will never allocate another page at
400 * that address. Therefore kimage_alloc_pages
401 * will not return it (again) and we don't need
402 * to give it an entry in image->segment[].
405 /* Deal with the destination pages I have inadvertently allocated.
407 * Ideally I would convert multi-page allocations into single
408 * page allocations, and add everything to image->dest_pages.
410 * For now it is simpler to just free the pages.
412 kimage_free_page_list(&extra_pages
);
417 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
420 /* Control pages are special, they are the intermediaries
421 * that are needed while we copy the rest of the pages
422 * to their final resting place. As such they must
423 * not conflict with either the destination addresses
424 * or memory the kernel is already using.
426 * Control pages are also the only pags we must allocate
427 * when loading a crash kernel. All of the other pages
428 * are specified by the segments and we just memcpy
429 * into them directly.
431 * The only case where we really need more than one of
432 * these are for architectures where we cannot disable
433 * the MMU and must instead generate an identity mapped
434 * page table for all of the memory.
436 * Given the low demand this implements a very simple
437 * allocator that finds the first hole of the appropriate
438 * size in the reserved memory region, and allocates all
439 * of the memory up to and including the hole.
441 unsigned long hole_start
, hole_end
, size
;
445 size
= (1 << order
) << PAGE_SHIFT
;
446 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
447 hole_end
= hole_start
+ size
- 1;
448 while (hole_end
<= crashk_res
.end
) {
453 if (hole_end
> KEXEC_CRASH_CONTROL_MEMORY_LIMIT
)
455 /* See if I overlap any of the segments */
456 for (i
= 0; i
< image
->nr_segments
; i
++) {
457 unsigned long mstart
, mend
;
459 mstart
= image
->segment
[i
].mem
;
460 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
461 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
462 /* Advance the hole to the end of the segment */
463 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
464 hole_end
= hole_start
+ size
- 1;
468 /* If I don't overlap any segments I have found my hole! */
469 if (i
== image
->nr_segments
) {
470 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
471 image
->control_page
= hole_end
;
476 /* Ensure that these pages are decrypted if SME is enabled. */
478 arch_kexec_post_alloc_pages(page_address(pages
), 1 << order
, 0);
484 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
487 struct page
*pages
= NULL
;
489 switch (image
->type
) {
490 case KEXEC_TYPE_DEFAULT
:
491 pages
= kimage_alloc_normal_control_pages(image
, order
);
493 case KEXEC_TYPE_CRASH
:
494 pages
= kimage_alloc_crash_control_pages(image
, order
);
501 int kimage_crash_copy_vmcoreinfo(struct kimage
*image
)
503 struct page
*vmcoreinfo_page
;
506 if (image
->type
!= KEXEC_TYPE_CRASH
)
510 * For kdump, allocate one vmcoreinfo safe copy from the
511 * crash memory. as we have arch_kexec_protect_crashkres()
512 * after kexec syscall, we naturally protect it from write
513 * (even read) access under kernel direct mapping. But on
514 * the other hand, we still need to operate it when crash
515 * happens to generate vmcoreinfo note, hereby we rely on
516 * vmap for this purpose.
518 vmcoreinfo_page
= kimage_alloc_control_pages(image
, 0);
519 if (!vmcoreinfo_page
) {
520 pr_warn("Could not allocate vmcoreinfo buffer\n");
523 safecopy
= vmap(&vmcoreinfo_page
, 1, VM_MAP
, PAGE_KERNEL
);
525 pr_warn("Could not vmap vmcoreinfo buffer\n");
529 image
->vmcoreinfo_data_copy
= safecopy
;
530 crash_update_vmcoreinfo_safecopy(safecopy
);
535 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
537 if (*image
->entry
!= 0)
540 if (image
->entry
== image
->last_entry
) {
541 kimage_entry_t
*ind_page
;
544 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
548 ind_page
= page_address(page
);
549 *image
->entry
= virt_to_boot_phys(ind_page
) | IND_INDIRECTION
;
550 image
->entry
= ind_page
;
551 image
->last_entry
= ind_page
+
552 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
554 *image
->entry
= entry
;
561 static int kimage_set_destination(struct kimage
*image
,
562 unsigned long destination
)
566 destination
&= PAGE_MASK
;
567 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
573 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
578 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
584 static void kimage_free_extra_pages(struct kimage
*image
)
586 /* Walk through and free any extra destination pages I may have */
587 kimage_free_page_list(&image
->dest_pages
);
589 /* Walk through and free any unusable pages I have cached */
590 kimage_free_page_list(&image
->unusable_pages
);
594 int __weak
machine_kexec_post_load(struct kimage
*image
)
599 void kimage_terminate(struct kimage
*image
)
601 if (*image
->entry
!= 0)
604 *image
->entry
= IND_DONE
;
607 #define for_each_kimage_entry(image, ptr, entry) \
608 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
609 ptr = (entry & IND_INDIRECTION) ? \
610 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
612 static void kimage_free_entry(kimage_entry_t entry
)
616 page
= boot_pfn_to_page(entry
>> PAGE_SHIFT
);
617 kimage_free_pages(page
);
620 void kimage_free(struct kimage
*image
)
622 kimage_entry_t
*ptr
, entry
;
623 kimage_entry_t ind
= 0;
628 if (image
->vmcoreinfo_data_copy
) {
629 crash_update_vmcoreinfo_safecopy(NULL
);
630 vunmap(image
->vmcoreinfo_data_copy
);
633 kimage_free_extra_pages(image
);
634 for_each_kimage_entry(image
, ptr
, entry
) {
635 if (entry
& IND_INDIRECTION
) {
636 /* Free the previous indirection page */
637 if (ind
& IND_INDIRECTION
)
638 kimage_free_entry(ind
);
639 /* Save this indirection page until we are
643 } else if (entry
& IND_SOURCE
)
644 kimage_free_entry(entry
);
646 /* Free the final indirection page */
647 if (ind
& IND_INDIRECTION
)
648 kimage_free_entry(ind
);
650 /* Handle any machine specific cleanup */
651 machine_kexec_cleanup(image
);
653 /* Free the kexec control pages... */
654 kimage_free_page_list(&image
->control_pages
);
657 * Free up any temporary buffers allocated. This might hit if
658 * error occurred much later after buffer allocation.
660 if (image
->file_mode
)
661 kimage_file_post_load_cleanup(image
);
666 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
669 kimage_entry_t
*ptr
, entry
;
670 unsigned long destination
= 0;
672 for_each_kimage_entry(image
, ptr
, entry
) {
673 if (entry
& IND_DESTINATION
)
674 destination
= entry
& PAGE_MASK
;
675 else if (entry
& IND_SOURCE
) {
676 if (page
== destination
)
678 destination
+= PAGE_SIZE
;
685 static struct page
*kimage_alloc_page(struct kimage
*image
,
687 unsigned long destination
)
690 * Here we implement safeguards to ensure that a source page
691 * is not copied to its destination page before the data on
692 * the destination page is no longer useful.
694 * To do this we maintain the invariant that a source page is
695 * either its own destination page, or it is not a
696 * destination page at all.
698 * That is slightly stronger than required, but the proof
699 * that no problems will not occur is trivial, and the
700 * implementation is simply to verify.
702 * When allocating all pages normally this algorithm will run
703 * in O(N) time, but in the worst case it will run in O(N^2)
704 * time. If the runtime is a problem the data structures can
711 * Walk through the list of destination pages, and see if I
714 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
715 addr
= page_to_boot_pfn(page
) << PAGE_SHIFT
;
716 if (addr
== destination
) {
717 list_del(&page
->lru
);
725 /* Allocate a page, if we run out of memory give up */
726 page
= kimage_alloc_pages(gfp_mask
, 0);
729 /* If the page cannot be used file it away */
730 if (page_to_boot_pfn(page
) >
731 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
732 list_add(&page
->lru
, &image
->unusable_pages
);
735 addr
= page_to_boot_pfn(page
) << PAGE_SHIFT
;
737 /* If it is the destination page we want use it */
738 if (addr
== destination
)
741 /* If the page is not a destination page use it */
742 if (!kimage_is_destination_range(image
, addr
,
747 * I know that the page is someones destination page.
748 * See if there is already a source page for this
749 * destination page. And if so swap the source pages.
751 old
= kimage_dst_used(image
, addr
);
754 unsigned long old_addr
;
755 struct page
*old_page
;
757 old_addr
= *old
& PAGE_MASK
;
758 old_page
= boot_pfn_to_page(old_addr
>> PAGE_SHIFT
);
759 copy_highpage(page
, old_page
);
760 *old
= addr
| (*old
& ~PAGE_MASK
);
762 /* The old page I have found cannot be a
763 * destination page, so return it if it's
764 * gfp_flags honor the ones passed in.
766 if (!(gfp_mask
& __GFP_HIGHMEM
) &&
767 PageHighMem(old_page
)) {
768 kimage_free_pages(old_page
);
775 /* Place the page on the destination list, to be used later */
776 list_add(&page
->lru
, &image
->dest_pages
);
782 static int kimage_load_normal_segment(struct kimage
*image
,
783 struct kexec_segment
*segment
)
786 size_t ubytes
, mbytes
;
788 unsigned char __user
*buf
= NULL
;
789 unsigned char *kbuf
= NULL
;
792 if (image
->file_mode
)
793 kbuf
= segment
->kbuf
;
796 ubytes
= segment
->bufsz
;
797 mbytes
= segment
->memsz
;
798 maddr
= segment
->mem
;
800 result
= kimage_set_destination(image
, maddr
);
807 size_t uchunk
, mchunk
;
809 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
814 result
= kimage_add_page(image
, page_to_boot_pfn(page
)
820 /* Start with a clear page */
822 ptr
+= maddr
& ~PAGE_MASK
;
823 mchunk
= min_t(size_t, mbytes
,
824 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
825 uchunk
= min(ubytes
, mchunk
);
827 /* For file based kexec, source pages are in kernel memory */
828 if (image
->file_mode
)
829 memcpy(ptr
, kbuf
, uchunk
);
831 result
= copy_from_user(ptr
, buf
, uchunk
);
839 if (image
->file_mode
)
851 static int kimage_load_crash_segment(struct kimage
*image
,
852 struct kexec_segment
*segment
)
854 /* For crash dumps kernels we simply copy the data from
855 * user space to it's destination.
856 * We do things a page at a time for the sake of kmap.
859 size_t ubytes
, mbytes
;
861 unsigned char __user
*buf
= NULL
;
862 unsigned char *kbuf
= NULL
;
865 if (image
->file_mode
)
866 kbuf
= segment
->kbuf
;
869 ubytes
= segment
->bufsz
;
870 mbytes
= segment
->memsz
;
871 maddr
= segment
->mem
;
875 size_t uchunk
, mchunk
;
877 page
= boot_pfn_to_page(maddr
>> PAGE_SHIFT
);
882 arch_kexec_post_alloc_pages(page_address(page
), 1, 0);
884 ptr
+= maddr
& ~PAGE_MASK
;
885 mchunk
= min_t(size_t, mbytes
,
886 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
887 uchunk
= min(ubytes
, mchunk
);
888 if (mchunk
> uchunk
) {
889 /* Zero the trailing part of the page */
890 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
893 /* For file based kexec, source pages are in kernel memory */
894 if (image
->file_mode
)
895 memcpy(ptr
, kbuf
, uchunk
);
897 result
= copy_from_user(ptr
, buf
, uchunk
);
898 kexec_flush_icache_page(page
);
900 arch_kexec_pre_free_pages(page_address(page
), 1);
907 if (image
->file_mode
)
919 int kimage_load_segment(struct kimage
*image
,
920 struct kexec_segment
*segment
)
922 int result
= -ENOMEM
;
924 switch (image
->type
) {
925 case KEXEC_TYPE_DEFAULT
:
926 result
= kimage_load_normal_segment(image
, segment
);
928 case KEXEC_TYPE_CRASH
:
929 result
= kimage_load_crash_segment(image
, segment
);
936 struct kimage
*kexec_image
;
937 struct kimage
*kexec_crash_image
;
938 int kexec_load_disabled
;
941 * No panic_cpu check version of crash_kexec(). This function is called
942 * only when panic_cpu holds the current CPU number; this is the only CPU
943 * which processes crash_kexec routines.
945 void __noclone
__crash_kexec(struct pt_regs
*regs
)
947 /* Take the kexec_mutex here to prevent sys_kexec_load
948 * running on one cpu from replacing the crash kernel
949 * we are using after a panic on a different cpu.
951 * If the crash kernel was not located in a fixed area
952 * of memory the xchg(&kexec_crash_image) would be
953 * sufficient. But since I reuse the memory...
955 if (mutex_trylock(&kexec_mutex
)) {
956 if (kexec_crash_image
) {
957 struct pt_regs fixed_regs
;
959 crash_setup_regs(&fixed_regs
, regs
);
960 crash_save_vmcoreinfo();
961 machine_crash_shutdown(&fixed_regs
);
962 machine_kexec(kexec_crash_image
);
964 mutex_unlock(&kexec_mutex
);
967 STACK_FRAME_NON_STANDARD(__crash_kexec
);
969 void crash_kexec(struct pt_regs
*regs
)
971 int old_cpu
, this_cpu
;
974 * Only one CPU is allowed to execute the crash_kexec() code as with
975 * panic(). Otherwise parallel calls of panic() and crash_kexec()
976 * may stop each other. To exclude them, we use panic_cpu here too.
978 this_cpu
= raw_smp_processor_id();
979 old_cpu
= atomic_cmpxchg(&panic_cpu
, PANIC_CPU_INVALID
, this_cpu
);
980 if (old_cpu
== PANIC_CPU_INVALID
) {
981 /* This is the 1st CPU which comes here, so go ahead. */
985 * Reset panic_cpu to allow another panic()/crash_kexec()
988 atomic_set(&panic_cpu
, PANIC_CPU_INVALID
);
992 size_t crash_get_memory_size(void)
996 mutex_lock(&kexec_mutex
);
997 if (crashk_res
.end
!= crashk_res
.start
)
998 size
= resource_size(&crashk_res
);
999 mutex_unlock(&kexec_mutex
);
1003 void __weak
crash_free_reserved_phys_range(unsigned long begin
,
1008 for (addr
= begin
; addr
< end
; addr
+= PAGE_SIZE
)
1009 free_reserved_page(boot_pfn_to_page(addr
>> PAGE_SHIFT
));
1012 int crash_shrink_memory(unsigned long new_size
)
1015 unsigned long start
, end
;
1016 unsigned long old_size
;
1017 struct resource
*ram_res
;
1019 mutex_lock(&kexec_mutex
);
1021 if (kexec_crash_image
) {
1025 start
= crashk_res
.start
;
1026 end
= crashk_res
.end
;
1027 old_size
= (end
== 0) ? 0 : end
- start
+ 1;
1028 if (new_size
>= old_size
) {
1029 ret
= (new_size
== old_size
) ? 0 : -EINVAL
;
1033 ram_res
= kzalloc(sizeof(*ram_res
), GFP_KERNEL
);
1039 start
= roundup(start
, KEXEC_CRASH_MEM_ALIGN
);
1040 end
= roundup(start
+ new_size
, KEXEC_CRASH_MEM_ALIGN
);
1042 crash_free_reserved_phys_range(end
, crashk_res
.end
);
1044 if ((start
== end
) && (crashk_res
.parent
!= NULL
))
1045 release_resource(&crashk_res
);
1047 ram_res
->start
= end
;
1048 ram_res
->end
= crashk_res
.end
;
1049 ram_res
->flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
;
1050 ram_res
->name
= "System RAM";
1052 crashk_res
.end
= end
- 1;
1054 insert_resource(&iomem_resource
, ram_res
);
1057 mutex_unlock(&kexec_mutex
);
1061 void crash_save_cpu(struct pt_regs
*regs
, int cpu
)
1063 struct elf_prstatus prstatus
;
1066 if ((cpu
< 0) || (cpu
>= nr_cpu_ids
))
1069 /* Using ELF notes here is opportunistic.
1070 * I need a well defined structure format
1071 * for the data I pass, and I need tags
1072 * on the data to indicate what information I have
1073 * squirrelled away. ELF notes happen to provide
1074 * all of that, so there is no need to invent something new.
1076 buf
= (u32
*)per_cpu_ptr(crash_notes
, cpu
);
1079 memset(&prstatus
, 0, sizeof(prstatus
));
1080 prstatus
.common
.pr_pid
= current
->pid
;
1081 elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
);
1082 buf
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
,
1083 &prstatus
, sizeof(prstatus
));
1087 static int __init
crash_notes_memory_init(void)
1089 /* Allocate memory for saving cpu registers. */
1093 * crash_notes could be allocated across 2 vmalloc pages when percpu
1094 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1095 * pages are also on 2 continuous physical pages. In this case the
1096 * 2nd part of crash_notes in 2nd page could be lost since only the
1097 * starting address and size of crash_notes are exported through sysfs.
1098 * Here round up the size of crash_notes to the nearest power of two
1099 * and pass it to __alloc_percpu as align value. This can make sure
1100 * crash_notes is allocated inside one physical page.
1102 size
= sizeof(note_buf_t
);
1103 align
= min(roundup_pow_of_two(sizeof(note_buf_t
)), PAGE_SIZE
);
1106 * Break compile if size is bigger than PAGE_SIZE since crash_notes
1107 * definitely will be in 2 pages with that.
1109 BUILD_BUG_ON(size
> PAGE_SIZE
);
1111 crash_notes
= __alloc_percpu(size
, align
);
1113 pr_warn("Memory allocation for saving cpu register states failed\n");
1118 subsys_initcall(crash_notes_memory_init
);
1122 * Move into place and start executing a preloaded standalone
1123 * executable. If nothing was preloaded return an error.
1125 int kernel_kexec(void)
1129 if (!mutex_trylock(&kexec_mutex
))
1136 #ifdef CONFIG_KEXEC_JUMP
1137 if (kexec_image
->preserve_context
) {
1138 pm_prepare_console();
1139 error
= freeze_processes();
1142 goto Restore_console
;
1145 error
= dpm_suspend_start(PMSG_FREEZE
);
1147 goto Resume_console
;
1148 /* At this point, dpm_suspend_start() has been called,
1149 * but *not* dpm_suspend_end(). We *must* call
1150 * dpm_suspend_end() now. Otherwise, drivers for
1151 * some devices (e.g. interrupt controllers) become
1152 * desynchronized with the actual state of the
1153 * hardware at resume time, and evil weirdness ensues.
1155 error
= dpm_suspend_end(PMSG_FREEZE
);
1157 goto Resume_devices
;
1158 error
= suspend_disable_secondary_cpus();
1161 local_irq_disable();
1162 error
= syscore_suspend();
1168 kexec_in_progress
= true;
1169 kernel_restart_prepare("kexec reboot");
1170 migrate_to_reboot_cpu();
1173 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1174 * no further code needs to use CPU hotplug (which is true in
1175 * the reboot case). However, the kexec path depends on using
1176 * CPU hotplug again; so re-enable it here.
1178 cpu_hotplug_enable();
1179 pr_notice("Starting new kernel\n");
1183 kmsg_dump(KMSG_DUMP_SHUTDOWN
);
1184 machine_kexec(kexec_image
);
1186 #ifdef CONFIG_KEXEC_JUMP
1187 if (kexec_image
->preserve_context
) {
1192 suspend_enable_secondary_cpus();
1193 dpm_resume_start(PMSG_RESTORE
);
1195 dpm_resume_end(PMSG_RESTORE
);
1200 pm_restore_console();
1205 mutex_unlock(&kexec_mutex
);
1210 * Protection mechanism for crashkernel reserved memory after
1211 * the kdump kernel is loaded.
1213 * Provide an empty default implementation here -- architecture
1214 * code may override this
1216 void __weak
arch_kexec_protect_crashkres(void)
1219 void __weak
arch_kexec_unprotect_crashkres(void)