2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #define pr_fmt(fmt) "kexec: " fmt
11 #include <linux/capability.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
16 #include <linux/kexec.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <linux/highmem.h>
20 #include <linux/syscalls.h>
21 #include <linux/reboot.h>
22 #include <linux/ioport.h>
23 #include <linux/hardirq.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
32 #include <linux/cpu.h>
33 #include <linux/console.h>
34 #include <linux/vmalloc.h>
35 #include <linux/swap.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/compiler.h>
38 #include <linux/hugetlb.h>
41 #include <asm/uaccess.h>
43 #include <asm/sections.h>
45 #include <crypto/hash.h>
46 #include <crypto/sha.h>
48 /* Per cpu memory for storing cpu states in case of system crash. */
49 note_buf_t __percpu
*crash_notes
;
51 /* vmcoreinfo stuff */
52 static unsigned char vmcoreinfo_data
[VMCOREINFO_BYTES
];
53 u32 vmcoreinfo_note
[VMCOREINFO_NOTE_SIZE
/4];
54 size_t vmcoreinfo_size
;
55 size_t vmcoreinfo_max_size
= sizeof(vmcoreinfo_data
);
57 /* Flag to indicate we are going to kexec a new kernel */
58 bool kexec_in_progress
= false;
61 * Declare these symbols weak so that if architecture provides a purgatory,
62 * these will be overridden.
64 char __weak kexec_purgatory
[0];
65 size_t __weak kexec_purgatory_size
= 0;
67 static int kexec_calculate_store_digests(struct kimage
*image
);
69 /* Location of the reserved area for the crash kernel */
70 struct resource crashk_res
= {
71 .name
= "Crash kernel",
74 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
76 struct resource crashk_low_res
= {
77 .name
= "Crash kernel",
80 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
83 int kexec_should_crash(struct task_struct
*p
)
85 if (in_interrupt() || !p
->pid
|| is_global_init(p
) || panic_on_oops
)
91 * When kexec transitions to the new kernel there is a one-to-one
92 * mapping between physical and virtual addresses. On processors
93 * where you can disable the MMU this is trivial, and easy. For
94 * others it is still a simple predictable page table to setup.
96 * In that environment kexec copies the new kernel to its final
97 * resting place. This means I can only support memory whose
98 * physical address can fit in an unsigned long. In particular
99 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
100 * If the assembly stub has more restrictive requirements
101 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
102 * defined more restrictively in <asm/kexec.h>.
104 * The code for the transition from the current kernel to the
105 * the new kernel is placed in the control_code_buffer, whose size
106 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
107 * page of memory is necessary, but some architectures require more.
108 * Because this memory must be identity mapped in the transition from
109 * virtual to physical addresses it must live in the range
110 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
113 * The assembly stub in the control code buffer is passed a linked list
114 * of descriptor pages detailing the source pages of the new kernel,
115 * and the destination addresses of those source pages. As this data
116 * structure is not used in the context of the current OS, it must
119 * The code has been made to work with highmem pages and will use a
120 * destination page in its final resting place (if it happens
121 * to allocate it). The end product of this is that most of the
122 * physical address space, and most of RAM can be used.
124 * Future directions include:
125 * - allocating a page table with the control code buffer identity
126 * mapped, to simplify machine_kexec and make kexec_on_panic more
131 * KIMAGE_NO_DEST is an impossible destination address..., for
132 * allocating pages whose destination address we do not care about.
134 #define KIMAGE_NO_DEST (-1UL)
136 static int kimage_is_destination_range(struct kimage
*image
,
137 unsigned long start
, unsigned long end
);
138 static struct page
*kimage_alloc_page(struct kimage
*image
,
142 static int copy_user_segment_list(struct kimage
*image
,
143 unsigned long nr_segments
,
144 struct kexec_segment __user
*segments
)
147 size_t segment_bytes
;
149 /* Read in the segments */
150 image
->nr_segments
= nr_segments
;
151 segment_bytes
= nr_segments
* sizeof(*segments
);
152 ret
= copy_from_user(image
->segment
, segments
, segment_bytes
);
159 static int sanity_check_segment_list(struct kimage
*image
)
162 unsigned long nr_segments
= image
->nr_segments
;
165 * Verify we have good destination addresses. The caller is
166 * responsible for making certain we don't attempt to load
167 * the new image into invalid or reserved areas of RAM. This
168 * just verifies it is an address we can use.
170 * Since the kernel does everything in page size chunks ensure
171 * the destination addresses are page aligned. Too many
172 * special cases crop of when we don't do this. The most
173 * insidious is getting overlapping destination addresses
174 * simply because addresses are changed to page size
177 result
= -EADDRNOTAVAIL
;
178 for (i
= 0; i
< nr_segments
; i
++) {
179 unsigned long mstart
, mend
;
181 mstart
= image
->segment
[i
].mem
;
182 mend
= mstart
+ image
->segment
[i
].memsz
;
183 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
185 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
189 /* Verify our destination addresses do not overlap.
190 * If we alloed overlapping destination addresses
191 * through very weird things can happen with no
192 * easy explanation as one segment stops on another.
195 for (i
= 0; i
< nr_segments
; i
++) {
196 unsigned long mstart
, mend
;
199 mstart
= image
->segment
[i
].mem
;
200 mend
= mstart
+ image
->segment
[i
].memsz
;
201 for (j
= 0; j
< i
; j
++) {
202 unsigned long pstart
, pend
;
203 pstart
= image
->segment
[j
].mem
;
204 pend
= pstart
+ image
->segment
[j
].memsz
;
205 /* Do the segments overlap ? */
206 if ((mend
> pstart
) && (mstart
< pend
))
211 /* Ensure our buffer sizes are strictly less than
212 * our memory sizes. This should always be the case,
213 * and it is easier to check up front than to be surprised
217 for (i
= 0; i
< nr_segments
; i
++) {
218 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
223 * Verify we have good destination addresses. Normally
224 * the caller is responsible for making certain we don't
225 * attempt to load the new image into invalid or reserved
226 * areas of RAM. But crash kernels are preloaded into a
227 * reserved area of ram. We must ensure the addresses
228 * are in the reserved area otherwise preloading the
229 * kernel could corrupt things.
232 if (image
->type
== KEXEC_TYPE_CRASH
) {
233 result
= -EADDRNOTAVAIL
;
234 for (i
= 0; i
< nr_segments
; i
++) {
235 unsigned long mstart
, mend
;
237 mstart
= image
->segment
[i
].mem
;
238 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
239 /* Ensure we are within the crash kernel limits */
240 if ((mstart
< crashk_res
.start
) ||
241 (mend
> crashk_res
.end
))
249 static struct kimage
*do_kimage_alloc_init(void)
251 struct kimage
*image
;
253 /* Allocate a controlling structure */
254 image
= kzalloc(sizeof(*image
), GFP_KERNEL
);
259 image
->entry
= &image
->head
;
260 image
->last_entry
= &image
->head
;
261 image
->control_page
= ~0; /* By default this does not apply */
262 image
->type
= KEXEC_TYPE_DEFAULT
;
264 /* Initialize the list of control pages */
265 INIT_LIST_HEAD(&image
->control_pages
);
267 /* Initialize the list of destination pages */
268 INIT_LIST_HEAD(&image
->dest_pages
);
270 /* Initialize the list of unusable pages */
271 INIT_LIST_HEAD(&image
->unusable_pages
);
276 static void kimage_free_page_list(struct list_head
*list
);
278 static int kimage_alloc_init(struct kimage
**rimage
, unsigned long entry
,
279 unsigned long nr_segments
,
280 struct kexec_segment __user
*segments
,
284 struct kimage
*image
;
285 bool kexec_on_panic
= flags
& KEXEC_ON_CRASH
;
287 if (kexec_on_panic
) {
288 /* Verify we have a valid entry point */
289 if ((entry
< crashk_res
.start
) || (entry
> crashk_res
.end
))
290 return -EADDRNOTAVAIL
;
293 /* Allocate and initialize a controlling structure */
294 image
= do_kimage_alloc_init();
298 image
->start
= entry
;
300 ret
= copy_user_segment_list(image
, nr_segments
, segments
);
304 ret
= sanity_check_segment_list(image
);
308 /* Enable the special crash kernel control page allocation policy. */
309 if (kexec_on_panic
) {
310 image
->control_page
= crashk_res
.start
;
311 image
->type
= KEXEC_TYPE_CRASH
;
315 * Find a location for the control code buffer, and add it
316 * the vector of segments so that it's pages will also be
317 * counted as destination pages.
320 image
->control_code_page
= kimage_alloc_control_pages(image
,
321 get_order(KEXEC_CONTROL_PAGE_SIZE
));
322 if (!image
->control_code_page
) {
323 pr_err("Could not allocate control_code_buffer\n");
327 if (!kexec_on_panic
) {
328 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
329 if (!image
->swap_page
) {
330 pr_err("Could not allocate swap buffer\n");
331 goto out_free_control_pages
;
337 out_free_control_pages
:
338 kimage_free_page_list(&image
->control_pages
);
344 static int copy_file_from_fd(int fd
, void **buf
, unsigned long *buf_len
)
346 struct fd f
= fdget(fd
);
355 ret
= vfs_getattr(&f
.file
->f_path
, &stat
);
359 if (stat
.size
> INT_MAX
) {
364 /* Don't hand 0 to vmalloc, it whines. */
365 if (stat
.size
== 0) {
370 *buf
= vmalloc(stat
.size
);
377 while (pos
< stat
.size
) {
378 bytes
= kernel_read(f
.file
, pos
, (char *)(*buf
) + pos
,
391 if (pos
!= stat
.size
) {
403 /* Architectures can provide this probe function */
404 int __weak
arch_kexec_kernel_image_probe(struct kimage
*image
, void *buf
,
405 unsigned long buf_len
)
410 void * __weak
arch_kexec_kernel_image_load(struct kimage
*image
)
412 return ERR_PTR(-ENOEXEC
);
415 void __weak
arch_kimage_file_post_load_cleanup(struct kimage
*image
)
419 /* Apply relocations of type RELA */
421 arch_kexec_apply_relocations_add(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
424 pr_err("RELA relocation unsupported.\n");
428 /* Apply relocations of type REL */
430 arch_kexec_apply_relocations(const Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
,
433 pr_err("REL relocation unsupported.\n");
438 * Free up memory used by kernel, initrd, and comand line. This is temporary
439 * memory allocation which is not needed any more after these buffers have
440 * been loaded into separate segments and have been copied elsewhere.
442 static void kimage_file_post_load_cleanup(struct kimage
*image
)
444 struct purgatory_info
*pi
= &image
->purgatory_info
;
446 vfree(image
->kernel_buf
);
447 image
->kernel_buf
= NULL
;
449 vfree(image
->initrd_buf
);
450 image
->initrd_buf
= NULL
;
452 kfree(image
->cmdline_buf
);
453 image
->cmdline_buf
= NULL
;
455 vfree(pi
->purgatory_buf
);
456 pi
->purgatory_buf
= NULL
;
461 /* See if architecture has anything to cleanup post load */
462 arch_kimage_file_post_load_cleanup(image
);
465 * Above call should have called into bootloader to free up
466 * any data stored in kimage->image_loader_data. It should
467 * be ok now to free it up.
469 kfree(image
->image_loader_data
);
470 image
->image_loader_data
= NULL
;
474 * In file mode list of segments is prepared by kernel. Copy relevant
475 * data from user space, do error checking, prepare segment list
478 kimage_file_prepare_segments(struct kimage
*image
, int kernel_fd
, int initrd_fd
,
479 const char __user
*cmdline_ptr
,
480 unsigned long cmdline_len
, unsigned flags
)
485 ret
= copy_file_from_fd(kernel_fd
, &image
->kernel_buf
,
486 &image
->kernel_buf_len
);
490 /* Call arch image probe handlers */
491 ret
= arch_kexec_kernel_image_probe(image
, image
->kernel_buf
,
492 image
->kernel_buf_len
);
497 /* It is possible that there no initramfs is being loaded */
498 if (!(flags
& KEXEC_FILE_NO_INITRAMFS
)) {
499 ret
= copy_file_from_fd(initrd_fd
, &image
->initrd_buf
,
500 &image
->initrd_buf_len
);
506 image
->cmdline_buf
= kzalloc(cmdline_len
, GFP_KERNEL
);
507 if (!image
->cmdline_buf
) {
512 ret
= copy_from_user(image
->cmdline_buf
, cmdline_ptr
,
519 image
->cmdline_buf_len
= cmdline_len
;
521 /* command line should be a string with last byte null */
522 if (image
->cmdline_buf
[cmdline_len
- 1] != '\0') {
528 /* Call arch image load handlers */
529 ldata
= arch_kexec_kernel_image_load(image
);
532 ret
= PTR_ERR(ldata
);
536 image
->image_loader_data
= ldata
;
538 /* In case of error, free up all allocated memory in this function */
540 kimage_file_post_load_cleanup(image
);
545 kimage_file_alloc_init(struct kimage
**rimage
, int kernel_fd
,
546 int initrd_fd
, const char __user
*cmdline_ptr
,
547 unsigned long cmdline_len
, unsigned long flags
)
550 struct kimage
*image
;
551 bool kexec_on_panic
= flags
& KEXEC_FILE_ON_CRASH
;
553 image
= do_kimage_alloc_init();
557 image
->file_mode
= 1;
559 if (kexec_on_panic
) {
560 /* Enable special crash kernel control page alloc policy. */
561 image
->control_page
= crashk_res
.start
;
562 image
->type
= KEXEC_TYPE_CRASH
;
565 ret
= kimage_file_prepare_segments(image
, kernel_fd
, initrd_fd
,
566 cmdline_ptr
, cmdline_len
, flags
);
570 ret
= sanity_check_segment_list(image
);
572 goto out_free_post_load_bufs
;
575 image
->control_code_page
= kimage_alloc_control_pages(image
,
576 get_order(KEXEC_CONTROL_PAGE_SIZE
));
577 if (!image
->control_code_page
) {
578 pr_err("Could not allocate control_code_buffer\n");
579 goto out_free_post_load_bufs
;
582 if (!kexec_on_panic
) {
583 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
584 if (!image
->swap_page
) {
585 pr_err(KERN_ERR
"Could not allocate swap buffer\n");
586 goto out_free_control_pages
;
592 out_free_control_pages
:
593 kimage_free_page_list(&image
->control_pages
);
594 out_free_post_load_bufs
:
595 kimage_file_post_load_cleanup(image
);
601 static int kimage_is_destination_range(struct kimage
*image
,
607 for (i
= 0; i
< image
->nr_segments
; i
++) {
608 unsigned long mstart
, mend
;
610 mstart
= image
->segment
[i
].mem
;
611 mend
= mstart
+ image
->segment
[i
].memsz
;
612 if ((end
> mstart
) && (start
< mend
))
619 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
623 pages
= alloc_pages(gfp_mask
, order
);
625 unsigned int count
, i
;
626 pages
->mapping
= NULL
;
627 set_page_private(pages
, order
);
629 for (i
= 0; i
< count
; i
++)
630 SetPageReserved(pages
+ i
);
636 static void kimage_free_pages(struct page
*page
)
638 unsigned int order
, count
, i
;
640 order
= page_private(page
);
642 for (i
= 0; i
< count
; i
++)
643 ClearPageReserved(page
+ i
);
644 __free_pages(page
, order
);
647 static void kimage_free_page_list(struct list_head
*list
)
649 struct list_head
*pos
, *next
;
651 list_for_each_safe(pos
, next
, list
) {
654 page
= list_entry(pos
, struct page
, lru
);
655 list_del(&page
->lru
);
656 kimage_free_pages(page
);
660 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
663 /* Control pages are special, they are the intermediaries
664 * that are needed while we copy the rest of the pages
665 * to their final resting place. As such they must
666 * not conflict with either the destination addresses
667 * or memory the kernel is already using.
669 * The only case where we really need more than one of
670 * these are for architectures where we cannot disable
671 * the MMU and must instead generate an identity mapped
672 * page table for all of the memory.
674 * At worst this runs in O(N) of the image size.
676 struct list_head extra_pages
;
681 INIT_LIST_HEAD(&extra_pages
);
683 /* Loop while I can allocate a page and the page allocated
684 * is a destination page.
687 unsigned long pfn
, epfn
, addr
, eaddr
;
689 pages
= kimage_alloc_pages(GFP_KERNEL
, order
);
692 pfn
= page_to_pfn(pages
);
694 addr
= pfn
<< PAGE_SHIFT
;
695 eaddr
= epfn
<< PAGE_SHIFT
;
696 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
697 kimage_is_destination_range(image
, addr
, eaddr
)) {
698 list_add(&pages
->lru
, &extra_pages
);
704 /* Remember the allocated page... */
705 list_add(&pages
->lru
, &image
->control_pages
);
707 /* Because the page is already in it's destination
708 * location we will never allocate another page at
709 * that address. Therefore kimage_alloc_pages
710 * will not return it (again) and we don't need
711 * to give it an entry in image->segment[].
714 /* Deal with the destination pages I have inadvertently allocated.
716 * Ideally I would convert multi-page allocations into single
717 * page allocations, and add everything to image->dest_pages.
719 * For now it is simpler to just free the pages.
721 kimage_free_page_list(&extra_pages
);
726 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
729 /* Control pages are special, they are the intermediaries
730 * that are needed while we copy the rest of the pages
731 * to their final resting place. As such they must
732 * not conflict with either the destination addresses
733 * or memory the kernel is already using.
735 * Control pages are also the only pags we must allocate
736 * when loading a crash kernel. All of the other pages
737 * are specified by the segments and we just memcpy
738 * into them directly.
740 * The only case where we really need more than one of
741 * these are for architectures where we cannot disable
742 * the MMU and must instead generate an identity mapped
743 * page table for all of the memory.
745 * Given the low demand this implements a very simple
746 * allocator that finds the first hole of the appropriate
747 * size in the reserved memory region, and allocates all
748 * of the memory up to and including the hole.
750 unsigned long hole_start
, hole_end
, size
;
754 size
= (1 << order
) << PAGE_SHIFT
;
755 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
756 hole_end
= hole_start
+ size
- 1;
757 while (hole_end
<= crashk_res
.end
) {
760 if (hole_end
> KEXEC_CRASH_CONTROL_MEMORY_LIMIT
)
762 /* See if I overlap any of the segments */
763 for (i
= 0; i
< image
->nr_segments
; i
++) {
764 unsigned long mstart
, mend
;
766 mstart
= image
->segment
[i
].mem
;
767 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
768 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
769 /* Advance the hole to the end of the segment */
770 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
771 hole_end
= hole_start
+ size
- 1;
775 /* If I don't overlap any segments I have found my hole! */
776 if (i
== image
->nr_segments
) {
777 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
782 image
->control_page
= hole_end
;
788 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
791 struct page
*pages
= NULL
;
793 switch (image
->type
) {
794 case KEXEC_TYPE_DEFAULT
:
795 pages
= kimage_alloc_normal_control_pages(image
, order
);
797 case KEXEC_TYPE_CRASH
:
798 pages
= kimage_alloc_crash_control_pages(image
, order
);
805 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
807 if (*image
->entry
!= 0)
810 if (image
->entry
== image
->last_entry
) {
811 kimage_entry_t
*ind_page
;
814 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
818 ind_page
= page_address(page
);
819 *image
->entry
= virt_to_phys(ind_page
) | IND_INDIRECTION
;
820 image
->entry
= ind_page
;
821 image
->last_entry
= ind_page
+
822 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
824 *image
->entry
= entry
;
831 static int kimage_set_destination(struct kimage
*image
,
832 unsigned long destination
)
836 destination
&= PAGE_MASK
;
837 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
839 image
->destination
= destination
;
845 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
850 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
852 image
->destination
+= PAGE_SIZE
;
858 static void kimage_free_extra_pages(struct kimage
*image
)
860 /* Walk through and free any extra destination pages I may have */
861 kimage_free_page_list(&image
->dest_pages
);
863 /* Walk through and free any unusable pages I have cached */
864 kimage_free_page_list(&image
->unusable_pages
);
867 static void kimage_terminate(struct kimage
*image
)
869 if (*image
->entry
!= 0)
872 *image
->entry
= IND_DONE
;
875 #define for_each_kimage_entry(image, ptr, entry) \
876 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
877 ptr = (entry & IND_INDIRECTION) ? \
878 phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
880 static void kimage_free_entry(kimage_entry_t entry
)
884 page
= pfn_to_page(entry
>> PAGE_SHIFT
);
885 kimage_free_pages(page
);
888 static void kimage_free(struct kimage
*image
)
890 kimage_entry_t
*ptr
, entry
;
891 kimage_entry_t ind
= 0;
896 kimage_free_extra_pages(image
);
897 for_each_kimage_entry(image
, ptr
, entry
) {
898 if (entry
& IND_INDIRECTION
) {
899 /* Free the previous indirection page */
900 if (ind
& IND_INDIRECTION
)
901 kimage_free_entry(ind
);
902 /* Save this indirection page until we are
906 } else if (entry
& IND_SOURCE
)
907 kimage_free_entry(entry
);
909 /* Free the final indirection page */
910 if (ind
& IND_INDIRECTION
)
911 kimage_free_entry(ind
);
913 /* Handle any machine specific cleanup */
914 machine_kexec_cleanup(image
);
916 /* Free the kexec control pages... */
917 kimage_free_page_list(&image
->control_pages
);
920 * Free up any temporary buffers allocated. This might hit if
921 * error occurred much later after buffer allocation.
923 if (image
->file_mode
)
924 kimage_file_post_load_cleanup(image
);
929 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
932 kimage_entry_t
*ptr
, entry
;
933 unsigned long destination
= 0;
935 for_each_kimage_entry(image
, ptr
, entry
) {
936 if (entry
& IND_DESTINATION
)
937 destination
= entry
& PAGE_MASK
;
938 else if (entry
& IND_SOURCE
) {
939 if (page
== destination
)
941 destination
+= PAGE_SIZE
;
948 static struct page
*kimage_alloc_page(struct kimage
*image
,
950 unsigned long destination
)
953 * Here we implement safeguards to ensure that a source page
954 * is not copied to its destination page before the data on
955 * the destination page is no longer useful.
957 * To do this we maintain the invariant that a source page is
958 * either its own destination page, or it is not a
959 * destination page at all.
961 * That is slightly stronger than required, but the proof
962 * that no problems will not occur is trivial, and the
963 * implementation is simply to verify.
965 * When allocating all pages normally this algorithm will run
966 * in O(N) time, but in the worst case it will run in O(N^2)
967 * time. If the runtime is a problem the data structures can
974 * Walk through the list of destination pages, and see if I
977 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
978 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
979 if (addr
== destination
) {
980 list_del(&page
->lru
);
988 /* Allocate a page, if we run out of memory give up */
989 page
= kimage_alloc_pages(gfp_mask
, 0);
992 /* If the page cannot be used file it away */
993 if (page_to_pfn(page
) >
994 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
995 list_add(&page
->lru
, &image
->unusable_pages
);
998 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
1000 /* If it is the destination page we want use it */
1001 if (addr
== destination
)
1004 /* If the page is not a destination page use it */
1005 if (!kimage_is_destination_range(image
, addr
,
1010 * I know that the page is someones destination page.
1011 * See if there is already a source page for this
1012 * destination page. And if so swap the source pages.
1014 old
= kimage_dst_used(image
, addr
);
1017 unsigned long old_addr
;
1018 struct page
*old_page
;
1020 old_addr
= *old
& PAGE_MASK
;
1021 old_page
= pfn_to_page(old_addr
>> PAGE_SHIFT
);
1022 copy_highpage(page
, old_page
);
1023 *old
= addr
| (*old
& ~PAGE_MASK
);
1025 /* The old page I have found cannot be a
1026 * destination page, so return it if it's
1027 * gfp_flags honor the ones passed in.
1029 if (!(gfp_mask
& __GFP_HIGHMEM
) &&
1030 PageHighMem(old_page
)) {
1031 kimage_free_pages(old_page
);
1038 /* Place the page on the destination list I
1039 * will use it later.
1041 list_add(&page
->lru
, &image
->dest_pages
);
1048 static int kimage_load_normal_segment(struct kimage
*image
,
1049 struct kexec_segment
*segment
)
1051 unsigned long maddr
;
1052 size_t ubytes
, mbytes
;
1054 unsigned char __user
*buf
= NULL
;
1055 unsigned char *kbuf
= NULL
;
1058 if (image
->file_mode
)
1059 kbuf
= segment
->kbuf
;
1062 ubytes
= segment
->bufsz
;
1063 mbytes
= segment
->memsz
;
1064 maddr
= segment
->mem
;
1066 result
= kimage_set_destination(image
, maddr
);
1073 size_t uchunk
, mchunk
;
1075 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
1080 result
= kimage_add_page(image
, page_to_pfn(page
)
1086 /* Start with a clear page */
1088 ptr
+= maddr
& ~PAGE_MASK
;
1089 mchunk
= min_t(size_t, mbytes
,
1090 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
1091 uchunk
= min(ubytes
, mchunk
);
1093 /* For file based kexec, source pages are in kernel memory */
1094 if (image
->file_mode
)
1095 memcpy(ptr
, kbuf
, uchunk
);
1097 result
= copy_from_user(ptr
, buf
, uchunk
);
1105 if (image
->file_mode
)
1115 static int kimage_load_crash_segment(struct kimage
*image
,
1116 struct kexec_segment
*segment
)
1118 /* For crash dumps kernels we simply copy the data from
1119 * user space to it's destination.
1120 * We do things a page at a time for the sake of kmap.
1122 unsigned long maddr
;
1123 size_t ubytes
, mbytes
;
1125 unsigned char __user
*buf
= NULL
;
1126 unsigned char *kbuf
= NULL
;
1129 if (image
->file_mode
)
1130 kbuf
= segment
->kbuf
;
1133 ubytes
= segment
->bufsz
;
1134 mbytes
= segment
->memsz
;
1135 maddr
= segment
->mem
;
1139 size_t uchunk
, mchunk
;
1141 page
= pfn_to_page(maddr
>> PAGE_SHIFT
);
1147 ptr
+= maddr
& ~PAGE_MASK
;
1148 mchunk
= min_t(size_t, mbytes
,
1149 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
1150 uchunk
= min(ubytes
, mchunk
);
1151 if (mchunk
> uchunk
) {
1152 /* Zero the trailing part of the page */
1153 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
1156 /* For file based kexec, source pages are in kernel memory */
1157 if (image
->file_mode
)
1158 memcpy(ptr
, kbuf
, uchunk
);
1160 result
= copy_from_user(ptr
, buf
, uchunk
);
1161 kexec_flush_icache_page(page
);
1169 if (image
->file_mode
)
1179 static int kimage_load_segment(struct kimage
*image
,
1180 struct kexec_segment
*segment
)
1182 int result
= -ENOMEM
;
1184 switch (image
->type
) {
1185 case KEXEC_TYPE_DEFAULT
:
1186 result
= kimage_load_normal_segment(image
, segment
);
1188 case KEXEC_TYPE_CRASH
:
1189 result
= kimage_load_crash_segment(image
, segment
);
1197 * Exec Kernel system call: for obvious reasons only root may call it.
1199 * This call breaks up into three pieces.
1200 * - A generic part which loads the new kernel from the current
1201 * address space, and very carefully places the data in the
1204 * - A generic part that interacts with the kernel and tells all of
1205 * the devices to shut down. Preventing on-going dmas, and placing
1206 * the devices in a consistent state so a later kernel can
1207 * reinitialize them.
1209 * - A machine specific part that includes the syscall number
1210 * and then copies the image to it's final destination. And
1211 * jumps into the image at entry.
1213 * kexec does not sync, or unmount filesystems so if you need
1214 * that to happen you need to do that yourself.
1216 struct kimage
*kexec_image
;
1217 struct kimage
*kexec_crash_image
;
1218 int kexec_load_disabled
;
1220 static DEFINE_MUTEX(kexec_mutex
);
1222 SYSCALL_DEFINE4(kexec_load
, unsigned long, entry
, unsigned long, nr_segments
,
1223 struct kexec_segment __user
*, segments
, unsigned long, flags
)
1225 struct kimage
**dest_image
, *image
;
1228 /* We only trust the superuser with rebooting the system. */
1229 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
1233 * Verify we have a legal set of flags
1234 * This leaves us room for future extensions.
1236 if ((flags
& KEXEC_FLAGS
) != (flags
& ~KEXEC_ARCH_MASK
))
1239 /* Verify we are on the appropriate architecture */
1240 if (((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) &&
1241 ((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
))
1244 /* Put an artificial cap on the number
1245 * of segments passed to kexec_load.
1247 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1253 /* Because we write directly to the reserved memory
1254 * region when loading crash kernels we need a mutex here to
1255 * prevent multiple crash kernels from attempting to load
1256 * simultaneously, and to prevent a crash kernel from loading
1257 * over the top of a in use crash kernel.
1259 * KISS: always take the mutex.
1261 if (!mutex_trylock(&kexec_mutex
))
1264 dest_image
= &kexec_image
;
1265 if (flags
& KEXEC_ON_CRASH
)
1266 dest_image
= &kexec_crash_image
;
1267 if (nr_segments
> 0) {
1270 /* Loading another kernel to reboot into */
1271 if ((flags
& KEXEC_ON_CRASH
) == 0)
1272 result
= kimage_alloc_init(&image
, entry
, nr_segments
,
1274 /* Loading another kernel to switch to if this one crashes */
1275 else if (flags
& KEXEC_ON_CRASH
) {
1276 /* Free any current crash dump kernel before
1279 kimage_free(xchg(&kexec_crash_image
, NULL
));
1280 result
= kimage_alloc_init(&image
, entry
, nr_segments
,
1282 crash_map_reserved_pages();
1287 if (flags
& KEXEC_PRESERVE_CONTEXT
)
1288 image
->preserve_context
= 1;
1289 result
= machine_kexec_prepare(image
);
1293 for (i
= 0; i
< nr_segments
; i
++) {
1294 result
= kimage_load_segment(image
, &image
->segment
[i
]);
1298 kimage_terminate(image
);
1299 if (flags
& KEXEC_ON_CRASH
)
1300 crash_unmap_reserved_pages();
1302 /* Install the new kernel, and Uninstall the old */
1303 image
= xchg(dest_image
, image
);
1306 mutex_unlock(&kexec_mutex
);
1313 * Add and remove page tables for crashkernel memory
1315 * Provide an empty default implementation here -- architecture
1316 * code may override this
1318 void __weak
crash_map_reserved_pages(void)
1321 void __weak
crash_unmap_reserved_pages(void)
1324 #ifdef CONFIG_COMPAT
1325 COMPAT_SYSCALL_DEFINE4(kexec_load
, compat_ulong_t
, entry
,
1326 compat_ulong_t
, nr_segments
,
1327 struct compat_kexec_segment __user
*, segments
,
1328 compat_ulong_t
, flags
)
1330 struct compat_kexec_segment in
;
1331 struct kexec_segment out
, __user
*ksegments
;
1332 unsigned long i
, result
;
1334 /* Don't allow clients that don't understand the native
1335 * architecture to do anything.
1337 if ((flags
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
)
1340 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1343 ksegments
= compat_alloc_user_space(nr_segments
* sizeof(out
));
1344 for (i
= 0; i
< nr_segments
; i
++) {
1345 result
= copy_from_user(&in
, &segments
[i
], sizeof(in
));
1349 out
.buf
= compat_ptr(in
.buf
);
1350 out
.bufsz
= in
.bufsz
;
1352 out
.memsz
= in
.memsz
;
1354 result
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
));
1359 return sys_kexec_load(entry
, nr_segments
, ksegments
, flags
);
1363 SYSCALL_DEFINE5(kexec_file_load
, int, kernel_fd
, int, initrd_fd
,
1364 unsigned long, cmdline_len
, const char __user
*, cmdline_ptr
,
1365 unsigned long, flags
)
1368 struct kimage
**dest_image
, *image
;
1370 /* We only trust the superuser with rebooting the system. */
1371 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
1374 /* Make sure we have a legal set of flags */
1375 if (flags
!= (flags
& KEXEC_FILE_FLAGS
))
1380 if (!mutex_trylock(&kexec_mutex
))
1383 dest_image
= &kexec_image
;
1384 if (flags
& KEXEC_FILE_ON_CRASH
)
1385 dest_image
= &kexec_crash_image
;
1387 if (flags
& KEXEC_FILE_UNLOAD
)
1391 * In case of crash, new kernel gets loaded in reserved region. It is
1392 * same memory where old crash kernel might be loaded. Free any
1393 * current crash dump kernel before we corrupt it.
1395 if (flags
& KEXEC_FILE_ON_CRASH
)
1396 kimage_free(xchg(&kexec_crash_image
, NULL
));
1398 ret
= kimage_file_alloc_init(&image
, kernel_fd
, initrd_fd
, cmdline_ptr
,
1399 cmdline_len
, flags
);
1403 ret
= machine_kexec_prepare(image
);
1407 ret
= kexec_calculate_store_digests(image
);
1411 for (i
= 0; i
< image
->nr_segments
; i
++) {
1412 struct kexec_segment
*ksegment
;
1414 ksegment
= &image
->segment
[i
];
1415 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
1416 i
, ksegment
->buf
, ksegment
->bufsz
, ksegment
->mem
,
1419 ret
= kimage_load_segment(image
, &image
->segment
[i
]);
1424 kimage_terminate(image
);
1427 * Free up any temporary buffers allocated which are not needed
1428 * after image has been loaded
1430 kimage_file_post_load_cleanup(image
);
1432 image
= xchg(dest_image
, image
);
1434 mutex_unlock(&kexec_mutex
);
1439 void crash_kexec(struct pt_regs
*regs
)
1441 /* Take the kexec_mutex here to prevent sys_kexec_load
1442 * running on one cpu from replacing the crash kernel
1443 * we are using after a panic on a different cpu.
1445 * If the crash kernel was not located in a fixed area
1446 * of memory the xchg(&kexec_crash_image) would be
1447 * sufficient. But since I reuse the memory...
1449 if (mutex_trylock(&kexec_mutex
)) {
1450 if (kexec_crash_image
) {
1451 struct pt_regs fixed_regs
;
1453 crash_setup_regs(&fixed_regs
, regs
);
1454 crash_save_vmcoreinfo();
1455 machine_crash_shutdown(&fixed_regs
);
1456 machine_kexec(kexec_crash_image
);
1458 mutex_unlock(&kexec_mutex
);
1462 size_t crash_get_memory_size(void)
1465 mutex_lock(&kexec_mutex
);
1466 if (crashk_res
.end
!= crashk_res
.start
)
1467 size
= resource_size(&crashk_res
);
1468 mutex_unlock(&kexec_mutex
);
1472 void __weak
crash_free_reserved_phys_range(unsigned long begin
,
1477 for (addr
= begin
; addr
< end
; addr
+= PAGE_SIZE
)
1478 free_reserved_page(pfn_to_page(addr
>> PAGE_SHIFT
));
1481 int crash_shrink_memory(unsigned long new_size
)
1484 unsigned long start
, end
;
1485 unsigned long old_size
;
1486 struct resource
*ram_res
;
1488 mutex_lock(&kexec_mutex
);
1490 if (kexec_crash_image
) {
1494 start
= crashk_res
.start
;
1495 end
= crashk_res
.end
;
1496 old_size
= (end
== 0) ? 0 : end
- start
+ 1;
1497 if (new_size
>= old_size
) {
1498 ret
= (new_size
== old_size
) ? 0 : -EINVAL
;
1502 ram_res
= kzalloc(sizeof(*ram_res
), GFP_KERNEL
);
1508 start
= roundup(start
, KEXEC_CRASH_MEM_ALIGN
);
1509 end
= roundup(start
+ new_size
, KEXEC_CRASH_MEM_ALIGN
);
1511 crash_map_reserved_pages();
1512 crash_free_reserved_phys_range(end
, crashk_res
.end
);
1514 if ((start
== end
) && (crashk_res
.parent
!= NULL
))
1515 release_resource(&crashk_res
);
1517 ram_res
->start
= end
;
1518 ram_res
->end
= crashk_res
.end
;
1519 ram_res
->flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
;
1520 ram_res
->name
= "System RAM";
1522 crashk_res
.end
= end
- 1;
1524 insert_resource(&iomem_resource
, ram_res
);
1525 crash_unmap_reserved_pages();
1528 mutex_unlock(&kexec_mutex
);
1532 static u32
*append_elf_note(u32
*buf
, char *name
, unsigned type
, void *data
,
1535 struct elf_note note
;
1537 note
.n_namesz
= strlen(name
) + 1;
1538 note
.n_descsz
= data_len
;
1540 memcpy(buf
, ¬e
, sizeof(note
));
1541 buf
+= (sizeof(note
) + 3)/4;
1542 memcpy(buf
, name
, note
.n_namesz
);
1543 buf
+= (note
.n_namesz
+ 3)/4;
1544 memcpy(buf
, data
, note
.n_descsz
);
1545 buf
+= (note
.n_descsz
+ 3)/4;
1550 static void final_note(u32
*buf
)
1552 struct elf_note note
;
1557 memcpy(buf
, ¬e
, sizeof(note
));
1560 void crash_save_cpu(struct pt_regs
*regs
, int cpu
)
1562 struct elf_prstatus prstatus
;
1565 if ((cpu
< 0) || (cpu
>= nr_cpu_ids
))
1568 /* Using ELF notes here is opportunistic.
1569 * I need a well defined structure format
1570 * for the data I pass, and I need tags
1571 * on the data to indicate what information I have
1572 * squirrelled away. ELF notes happen to provide
1573 * all of that, so there is no need to invent something new.
1575 buf
= (u32
*)per_cpu_ptr(crash_notes
, cpu
);
1578 memset(&prstatus
, 0, sizeof(prstatus
));
1579 prstatus
.pr_pid
= current
->pid
;
1580 elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
);
1581 buf
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
,
1582 &prstatus
, sizeof(prstatus
));
1586 static int __init
crash_notes_memory_init(void)
1588 /* Allocate memory for saving cpu registers. */
1589 crash_notes
= alloc_percpu(note_buf_t
);
1591 pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
1596 subsys_initcall(crash_notes_memory_init
);
1600 * parsing the "crashkernel" commandline
1602 * this code is intended to be called from architecture specific code
1607 * This function parses command lines in the format
1609 * crashkernel=ramsize-range:size[,...][@offset]
1611 * The function returns 0 on success and -EINVAL on failure.
1613 static int __init
parse_crashkernel_mem(char *cmdline
,
1614 unsigned long long system_ram
,
1615 unsigned long long *crash_size
,
1616 unsigned long long *crash_base
)
1618 char *cur
= cmdline
, *tmp
;
1620 /* for each entry of the comma-separated list */
1622 unsigned long long start
, end
= ULLONG_MAX
, size
;
1624 /* get the start of the range */
1625 start
= memparse(cur
, &tmp
);
1627 pr_warn("crashkernel: Memory value expected\n");
1632 pr_warn("crashkernel: '-' expected\n");
1637 /* if no ':' is here, than we read the end */
1639 end
= memparse(cur
, &tmp
);
1641 pr_warn("crashkernel: Memory value expected\n");
1646 pr_warn("crashkernel: end <= start\n");
1652 pr_warn("crashkernel: ':' expected\n");
1657 size
= memparse(cur
, &tmp
);
1659 pr_warn("Memory value expected\n");
1663 if (size
>= system_ram
) {
1664 pr_warn("crashkernel: invalid size\n");
1669 if (system_ram
>= start
&& system_ram
< end
) {
1673 } while (*cur
++ == ',');
1675 if (*crash_size
> 0) {
1676 while (*cur
&& *cur
!= ' ' && *cur
!= '@')
1680 *crash_base
= memparse(cur
, &tmp
);
1682 pr_warn("Memory value expected after '@'\n");
1692 * That function parses "simple" (old) crashkernel command lines like
1694 * crashkernel=size[@offset]
1696 * It returns 0 on success and -EINVAL on failure.
1698 static int __init
parse_crashkernel_simple(char *cmdline
,
1699 unsigned long long *crash_size
,
1700 unsigned long long *crash_base
)
1702 char *cur
= cmdline
;
1704 *crash_size
= memparse(cmdline
, &cur
);
1705 if (cmdline
== cur
) {
1706 pr_warn("crashkernel: memory value expected\n");
1711 *crash_base
= memparse(cur
+1, &cur
);
1712 else if (*cur
!= ' ' && *cur
!= '\0') {
1713 pr_warn("crashkernel: unrecognized char\n");
1720 #define SUFFIX_HIGH 0
1721 #define SUFFIX_LOW 1
1722 #define SUFFIX_NULL 2
1723 static __initdata
char *suffix_tbl
[] = {
1724 [SUFFIX_HIGH
] = ",high",
1725 [SUFFIX_LOW
] = ",low",
1726 [SUFFIX_NULL
] = NULL
,
1730 * That function parses "suffix" crashkernel command lines like
1732 * crashkernel=size,[high|low]
1734 * It returns 0 on success and -EINVAL on failure.
1736 static int __init
parse_crashkernel_suffix(char *cmdline
,
1737 unsigned long long *crash_size
,
1738 unsigned long long *crash_base
,
1741 char *cur
= cmdline
;
1743 *crash_size
= memparse(cmdline
, &cur
);
1744 if (cmdline
== cur
) {
1745 pr_warn("crashkernel: memory value expected\n");
1749 /* check with suffix */
1750 if (strncmp(cur
, suffix
, strlen(suffix
))) {
1751 pr_warn("crashkernel: unrecognized char\n");
1754 cur
+= strlen(suffix
);
1755 if (*cur
!= ' ' && *cur
!= '\0') {
1756 pr_warn("crashkernel: unrecognized char\n");
1763 static __init
char *get_last_crashkernel(char *cmdline
,
1767 char *p
= cmdline
, *ck_cmdline
= NULL
;
1769 /* find crashkernel and use the last one if there are more */
1770 p
= strstr(p
, name
);
1772 char *end_p
= strchr(p
, ' ');
1776 end_p
= p
+ strlen(p
);
1781 /* skip the one with any known suffix */
1782 for (i
= 0; suffix_tbl
[i
]; i
++) {
1783 q
= end_p
- strlen(suffix_tbl
[i
]);
1784 if (!strncmp(q
, suffix_tbl
[i
],
1785 strlen(suffix_tbl
[i
])))
1790 q
= end_p
- strlen(suffix
);
1791 if (!strncmp(q
, suffix
, strlen(suffix
)))
1795 p
= strstr(p
+1, name
);
1804 static int __init
__parse_crashkernel(char *cmdline
,
1805 unsigned long long system_ram
,
1806 unsigned long long *crash_size
,
1807 unsigned long long *crash_base
,
1811 char *first_colon
, *first_space
;
1814 BUG_ON(!crash_size
|| !crash_base
);
1818 ck_cmdline
= get_last_crashkernel(cmdline
, name
, suffix
);
1823 ck_cmdline
+= strlen(name
);
1826 return parse_crashkernel_suffix(ck_cmdline
, crash_size
,
1827 crash_base
, suffix
);
1829 * if the commandline contains a ':', then that's the extended
1830 * syntax -- if not, it must be the classic syntax
1832 first_colon
= strchr(ck_cmdline
, ':');
1833 first_space
= strchr(ck_cmdline
, ' ');
1834 if (first_colon
&& (!first_space
|| first_colon
< first_space
))
1835 return parse_crashkernel_mem(ck_cmdline
, system_ram
,
1836 crash_size
, crash_base
);
1838 return parse_crashkernel_simple(ck_cmdline
, crash_size
, crash_base
);
1842 * That function is the entry point for command line parsing and should be
1843 * called from the arch-specific code.
1845 int __init
parse_crashkernel(char *cmdline
,
1846 unsigned long long system_ram
,
1847 unsigned long long *crash_size
,
1848 unsigned long long *crash_base
)
1850 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1851 "crashkernel=", NULL
);
1854 int __init
parse_crashkernel_high(char *cmdline
,
1855 unsigned long long system_ram
,
1856 unsigned long long *crash_size
,
1857 unsigned long long *crash_base
)
1859 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1860 "crashkernel=", suffix_tbl
[SUFFIX_HIGH
]);
1863 int __init
parse_crashkernel_low(char *cmdline
,
1864 unsigned long long system_ram
,
1865 unsigned long long *crash_size
,
1866 unsigned long long *crash_base
)
1868 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1869 "crashkernel=", suffix_tbl
[SUFFIX_LOW
]);
1872 static void update_vmcoreinfo_note(void)
1874 u32
*buf
= vmcoreinfo_note
;
1876 if (!vmcoreinfo_size
)
1878 buf
= append_elf_note(buf
, VMCOREINFO_NOTE_NAME
, 0, vmcoreinfo_data
,
1883 void crash_save_vmcoreinfo(void)
1885 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1886 update_vmcoreinfo_note();
1889 void vmcoreinfo_append_str(const char *fmt
, ...)
1895 va_start(args
, fmt
);
1896 r
= vscnprintf(buf
, sizeof(buf
), fmt
, args
);
1899 r
= min(r
, vmcoreinfo_max_size
- vmcoreinfo_size
);
1901 memcpy(&vmcoreinfo_data
[vmcoreinfo_size
], buf
, r
);
1903 vmcoreinfo_size
+= r
;
1907 * provide an empty default implementation here -- architecture
1908 * code may override this
1910 void __weak
arch_crash_save_vmcoreinfo(void)
1913 unsigned long __weak
paddr_vmcoreinfo_note(void)
1915 return __pa((unsigned long)(char *)&vmcoreinfo_note
);
1918 static int __init
crash_save_vmcoreinfo_init(void)
1920 VMCOREINFO_OSRELEASE(init_uts_ns
.name
.release
);
1921 VMCOREINFO_PAGESIZE(PAGE_SIZE
);
1923 VMCOREINFO_SYMBOL(init_uts_ns
);
1924 VMCOREINFO_SYMBOL(node_online_map
);
1926 VMCOREINFO_SYMBOL(swapper_pg_dir
);
1928 VMCOREINFO_SYMBOL(_stext
);
1929 VMCOREINFO_SYMBOL(vmap_area_list
);
1931 #ifndef CONFIG_NEED_MULTIPLE_NODES
1932 VMCOREINFO_SYMBOL(mem_map
);
1933 VMCOREINFO_SYMBOL(contig_page_data
);
1935 #ifdef CONFIG_SPARSEMEM
1936 VMCOREINFO_SYMBOL(mem_section
);
1937 VMCOREINFO_LENGTH(mem_section
, NR_SECTION_ROOTS
);
1938 VMCOREINFO_STRUCT_SIZE(mem_section
);
1939 VMCOREINFO_OFFSET(mem_section
, section_mem_map
);
1941 VMCOREINFO_STRUCT_SIZE(page
);
1942 VMCOREINFO_STRUCT_SIZE(pglist_data
);
1943 VMCOREINFO_STRUCT_SIZE(zone
);
1944 VMCOREINFO_STRUCT_SIZE(free_area
);
1945 VMCOREINFO_STRUCT_SIZE(list_head
);
1946 VMCOREINFO_SIZE(nodemask_t
);
1947 VMCOREINFO_OFFSET(page
, flags
);
1948 VMCOREINFO_OFFSET(page
, _count
);
1949 VMCOREINFO_OFFSET(page
, mapping
);
1950 VMCOREINFO_OFFSET(page
, lru
);
1951 VMCOREINFO_OFFSET(page
, _mapcount
);
1952 VMCOREINFO_OFFSET(page
, private);
1953 VMCOREINFO_OFFSET(pglist_data
, node_zones
);
1954 VMCOREINFO_OFFSET(pglist_data
, nr_zones
);
1955 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1956 VMCOREINFO_OFFSET(pglist_data
, node_mem_map
);
1958 VMCOREINFO_OFFSET(pglist_data
, node_start_pfn
);
1959 VMCOREINFO_OFFSET(pglist_data
, node_spanned_pages
);
1960 VMCOREINFO_OFFSET(pglist_data
, node_id
);
1961 VMCOREINFO_OFFSET(zone
, free_area
);
1962 VMCOREINFO_OFFSET(zone
, vm_stat
);
1963 VMCOREINFO_OFFSET(zone
, spanned_pages
);
1964 VMCOREINFO_OFFSET(free_area
, free_list
);
1965 VMCOREINFO_OFFSET(list_head
, next
);
1966 VMCOREINFO_OFFSET(list_head
, prev
);
1967 VMCOREINFO_OFFSET(vmap_area
, va_start
);
1968 VMCOREINFO_OFFSET(vmap_area
, list
);
1969 VMCOREINFO_LENGTH(zone
.free_area
, MAX_ORDER
);
1970 log_buf_kexec_setup();
1971 VMCOREINFO_LENGTH(free_area
.free_list
, MIGRATE_TYPES
);
1972 VMCOREINFO_NUMBER(NR_FREE_PAGES
);
1973 VMCOREINFO_NUMBER(PG_lru
);
1974 VMCOREINFO_NUMBER(PG_private
);
1975 VMCOREINFO_NUMBER(PG_swapcache
);
1976 VMCOREINFO_NUMBER(PG_slab
);
1977 #ifdef CONFIG_MEMORY_FAILURE
1978 VMCOREINFO_NUMBER(PG_hwpoison
);
1980 VMCOREINFO_NUMBER(PG_head_mask
);
1981 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE
);
1982 #ifdef CONFIG_HUGETLBFS
1983 VMCOREINFO_SYMBOL(free_huge_page
);
1986 arch_crash_save_vmcoreinfo();
1987 update_vmcoreinfo_note();
1992 subsys_initcall(crash_save_vmcoreinfo_init
);
1994 static int __kexec_add_segment(struct kimage
*image
, char *buf
,
1995 unsigned long bufsz
, unsigned long mem
,
1996 unsigned long memsz
)
1998 struct kexec_segment
*ksegment
;
2000 ksegment
= &image
->segment
[image
->nr_segments
];
2001 ksegment
->kbuf
= buf
;
2002 ksegment
->bufsz
= bufsz
;
2003 ksegment
->mem
= mem
;
2004 ksegment
->memsz
= memsz
;
2005 image
->nr_segments
++;
2010 static int locate_mem_hole_top_down(unsigned long start
, unsigned long end
,
2011 struct kexec_buf
*kbuf
)
2013 struct kimage
*image
= kbuf
->image
;
2014 unsigned long temp_start
, temp_end
;
2016 temp_end
= min(end
, kbuf
->buf_max
);
2017 temp_start
= temp_end
- kbuf
->memsz
;
2020 /* align down start */
2021 temp_start
= temp_start
& (~(kbuf
->buf_align
- 1));
2023 if (temp_start
< start
|| temp_start
< kbuf
->buf_min
)
2026 temp_end
= temp_start
+ kbuf
->memsz
- 1;
2029 * Make sure this does not conflict with any of existing
2032 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
2033 temp_start
= temp_start
- PAGE_SIZE
;
2037 /* We found a suitable memory range */
2041 /* If we are here, we found a suitable memory range */
2042 __kexec_add_segment(image
, kbuf
->buffer
, kbuf
->bufsz
, temp_start
,
2045 /* Success, stop navigating through remaining System RAM ranges */
2049 static int locate_mem_hole_bottom_up(unsigned long start
, unsigned long end
,
2050 struct kexec_buf
*kbuf
)
2052 struct kimage
*image
= kbuf
->image
;
2053 unsigned long temp_start
, temp_end
;
2055 temp_start
= max(start
, kbuf
->buf_min
);
2058 temp_start
= ALIGN(temp_start
, kbuf
->buf_align
);
2059 temp_end
= temp_start
+ kbuf
->memsz
- 1;
2061 if (temp_end
> end
|| temp_end
> kbuf
->buf_max
)
2064 * Make sure this does not conflict with any of existing
2067 if (kimage_is_destination_range(image
, temp_start
, temp_end
)) {
2068 temp_start
= temp_start
+ PAGE_SIZE
;
2072 /* We found a suitable memory range */
2076 /* If we are here, we found a suitable memory range */
2077 __kexec_add_segment(image
, kbuf
->buffer
, kbuf
->bufsz
, temp_start
,
2080 /* Success, stop navigating through remaining System RAM ranges */
2084 static int locate_mem_hole_callback(u64 start
, u64 end
, void *arg
)
2086 struct kexec_buf
*kbuf
= (struct kexec_buf
*)arg
;
2087 unsigned long sz
= end
- start
+ 1;
2089 /* Returning 0 will take to next memory range */
2090 if (sz
< kbuf
->memsz
)
2093 if (end
< kbuf
->buf_min
|| start
> kbuf
->buf_max
)
2097 * Allocate memory top down with-in ram range. Otherwise bottom up
2101 return locate_mem_hole_top_down(start
, end
, kbuf
);
2102 return locate_mem_hole_bottom_up(start
, end
, kbuf
);
2106 * Helper function for placing a buffer in a kexec segment. This assumes
2107 * that kexec_mutex is held.
2109 int kexec_add_buffer(struct kimage
*image
, char *buffer
, unsigned long bufsz
,
2110 unsigned long memsz
, unsigned long buf_align
,
2111 unsigned long buf_min
, unsigned long buf_max
,
2112 bool top_down
, unsigned long *load_addr
)
2115 struct kexec_segment
*ksegment
;
2116 struct kexec_buf buf
, *kbuf
;
2119 /* Currently adding segment this way is allowed only in file mode */
2120 if (!image
->file_mode
)
2123 if (image
->nr_segments
>= KEXEC_SEGMENT_MAX
)
2127 * Make sure we are not trying to add buffer after allocating
2128 * control pages. All segments need to be placed first before
2129 * any control pages are allocated. As control page allocation
2130 * logic goes through list of segments to make sure there are
2131 * no destination overlaps.
2133 if (!list_empty(&image
->control_pages
)) {
2138 memset(&buf
, 0, sizeof(struct kexec_buf
));
2140 kbuf
->image
= image
;
2141 kbuf
->buffer
= buffer
;
2142 kbuf
->bufsz
= bufsz
;
2144 kbuf
->memsz
= ALIGN(memsz
, PAGE_SIZE
);
2145 kbuf
->buf_align
= max(buf_align
, PAGE_SIZE
);
2146 kbuf
->buf_min
= buf_min
;
2147 kbuf
->buf_max
= buf_max
;
2148 kbuf
->top_down
= top_down
;
2150 /* Walk the RAM ranges and allocate a suitable range for the buffer */
2151 if (image
->type
== KEXEC_TYPE_CRASH
)
2152 ret
= walk_iomem_res("Crash kernel",
2153 IORESOURCE_MEM
| IORESOURCE_BUSY
,
2154 crashk_res
.start
, crashk_res
.end
, kbuf
,
2155 locate_mem_hole_callback
);
2157 ret
= walk_system_ram_res(0, -1, kbuf
,
2158 locate_mem_hole_callback
);
2160 /* A suitable memory range could not be found for buffer */
2161 return -EADDRNOTAVAIL
;
2164 /* Found a suitable memory range */
2165 ksegment
= &image
->segment
[image
->nr_segments
- 1];
2166 *load_addr
= ksegment
->mem
;
2170 /* Calculate and store the digest of segments */
2171 static int kexec_calculate_store_digests(struct kimage
*image
)
2173 struct crypto_shash
*tfm
;
2174 struct shash_desc
*desc
;
2175 int ret
= 0, i
, j
, zero_buf_sz
, sha_region_sz
;
2176 size_t desc_size
, nullsz
;
2179 struct kexec_sha_region
*sha_regions
;
2180 struct purgatory_info
*pi
= &image
->purgatory_info
;
2182 zero_buf
= __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT
);
2183 zero_buf_sz
= PAGE_SIZE
;
2185 tfm
= crypto_alloc_shash("sha256", 0, 0);
2191 desc_size
= crypto_shash_descsize(tfm
) + sizeof(*desc
);
2192 desc
= kzalloc(desc_size
, GFP_KERNEL
);
2198 sha_region_sz
= KEXEC_SEGMENT_MAX
* sizeof(struct kexec_sha_region
);
2199 sha_regions
= vzalloc(sha_region_sz
);
2206 ret
= crypto_shash_init(desc
);
2208 goto out_free_sha_regions
;
2210 digest
= kzalloc(SHA256_DIGEST_SIZE
, GFP_KERNEL
);
2213 goto out_free_sha_regions
;
2216 for (j
= i
= 0; i
< image
->nr_segments
; i
++) {
2217 struct kexec_segment
*ksegment
;
2219 ksegment
= &image
->segment
[i
];
2221 * Skip purgatory as it will be modified once we put digest
2222 * info in purgatory.
2224 if (ksegment
->kbuf
== pi
->purgatory_buf
)
2227 ret
= crypto_shash_update(desc
, ksegment
->kbuf
,
2233 * Assume rest of the buffer is filled with zero and
2234 * update digest accordingly.
2236 nullsz
= ksegment
->memsz
- ksegment
->bufsz
;
2238 unsigned long bytes
= nullsz
;
2240 if (bytes
> zero_buf_sz
)
2241 bytes
= zero_buf_sz
;
2242 ret
= crypto_shash_update(desc
, zero_buf
, bytes
);
2251 sha_regions
[j
].start
= ksegment
->mem
;
2252 sha_regions
[j
].len
= ksegment
->memsz
;
2257 ret
= crypto_shash_final(desc
, digest
);
2259 goto out_free_digest
;
2260 ret
= kexec_purgatory_get_set_symbol(image
, "sha_regions",
2261 sha_regions
, sha_region_sz
, 0);
2263 goto out_free_digest
;
2265 ret
= kexec_purgatory_get_set_symbol(image
, "sha256_digest",
2266 digest
, SHA256_DIGEST_SIZE
, 0);
2268 goto out_free_digest
;
2273 out_free_sha_regions
:
2283 /* Actually load purgatory. Lot of code taken from kexec-tools */
2284 static int __kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
2285 unsigned long max
, int top_down
)
2287 struct purgatory_info
*pi
= &image
->purgatory_info
;
2288 unsigned long align
, buf_align
, bss_align
, buf_sz
, bss_sz
, bss_pad
;
2289 unsigned long memsz
, entry
, load_addr
, curr_load_addr
, bss_addr
, offset
;
2290 unsigned char *buf_addr
, *src
;
2291 int i
, ret
= 0, entry_sidx
= -1;
2292 const Elf_Shdr
*sechdrs_c
;
2293 Elf_Shdr
*sechdrs
= NULL
;
2294 void *purgatory_buf
= NULL
;
2297 * sechdrs_c points to section headers in purgatory and are read
2298 * only. No modifications allowed.
2300 sechdrs_c
= (void *)pi
->ehdr
+ pi
->ehdr
->e_shoff
;
2303 * We can not modify sechdrs_c[] and its fields. It is read only.
2304 * Copy it over to a local copy where one can store some temporary
2305 * data and free it at the end. We need to modify ->sh_addr and
2306 * ->sh_offset fields to keep track of permanent and temporary
2307 * locations of sections.
2309 sechdrs
= vzalloc(pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
2313 memcpy(sechdrs
, sechdrs_c
, pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
));
2316 * We seem to have multiple copies of sections. First copy is which
2317 * is embedded in kernel in read only section. Some of these sections
2318 * will be copied to a temporary buffer and relocated. And these
2319 * sections will finally be copied to their final destination at
2320 * segment load time.
2322 * Use ->sh_offset to reflect section address in memory. It will
2323 * point to original read only copy if section is not allocatable.
2324 * Otherwise it will point to temporary copy which will be relocated.
2326 * Use ->sh_addr to contain final address of the section where it
2327 * will go during execution time.
2329 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2330 if (sechdrs
[i
].sh_type
== SHT_NOBITS
)
2333 sechdrs
[i
].sh_offset
= (unsigned long)pi
->ehdr
+
2334 sechdrs
[i
].sh_offset
;
2338 * Identify entry point section and make entry relative to section
2341 entry
= pi
->ehdr
->e_entry
;
2342 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2343 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
2346 if (!(sechdrs
[i
].sh_flags
& SHF_EXECINSTR
))
2349 /* Make entry section relative */
2350 if (sechdrs
[i
].sh_addr
<= pi
->ehdr
->e_entry
&&
2351 ((sechdrs
[i
].sh_addr
+ sechdrs
[i
].sh_size
) >
2352 pi
->ehdr
->e_entry
)) {
2354 entry
-= sechdrs
[i
].sh_addr
;
2359 /* Determine how much memory is needed to load relocatable object. */
2365 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2366 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
2369 align
= sechdrs
[i
].sh_addralign
;
2370 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
2371 if (buf_align
< align
)
2373 buf_sz
= ALIGN(buf_sz
, align
);
2374 buf_sz
+= sechdrs
[i
].sh_size
;
2377 if (bss_align
< align
)
2379 bss_sz
= ALIGN(bss_sz
, align
);
2380 bss_sz
+= sechdrs
[i
].sh_size
;
2384 /* Determine the bss padding required to align bss properly */
2386 if (buf_sz
& (bss_align
- 1))
2387 bss_pad
= bss_align
- (buf_sz
& (bss_align
- 1));
2389 memsz
= buf_sz
+ bss_pad
+ bss_sz
;
2391 /* Allocate buffer for purgatory */
2392 purgatory_buf
= vzalloc(buf_sz
);
2393 if (!purgatory_buf
) {
2398 if (buf_align
< bss_align
)
2399 buf_align
= bss_align
;
2401 /* Add buffer to segment list */
2402 ret
= kexec_add_buffer(image
, purgatory_buf
, buf_sz
, memsz
,
2403 buf_align
, min
, max
, top_down
,
2404 &pi
->purgatory_load_addr
);
2408 /* Load SHF_ALLOC sections */
2409 buf_addr
= purgatory_buf
;
2410 load_addr
= curr_load_addr
= pi
->purgatory_load_addr
;
2411 bss_addr
= load_addr
+ buf_sz
+ bss_pad
;
2413 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2414 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
2417 align
= sechdrs
[i
].sh_addralign
;
2418 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
) {
2419 curr_load_addr
= ALIGN(curr_load_addr
, align
);
2420 offset
= curr_load_addr
- load_addr
;
2421 /* We already modifed ->sh_offset to keep src addr */
2422 src
= (char *) sechdrs
[i
].sh_offset
;
2423 memcpy(buf_addr
+ offset
, src
, sechdrs
[i
].sh_size
);
2425 /* Store load address and source address of section */
2426 sechdrs
[i
].sh_addr
= curr_load_addr
;
2429 * This section got copied to temporary buffer. Update
2430 * ->sh_offset accordingly.
2432 sechdrs
[i
].sh_offset
= (unsigned long)(buf_addr
+ offset
);
2434 /* Advance to the next address */
2435 curr_load_addr
+= sechdrs
[i
].sh_size
;
2437 bss_addr
= ALIGN(bss_addr
, align
);
2438 sechdrs
[i
].sh_addr
= bss_addr
;
2439 bss_addr
+= sechdrs
[i
].sh_size
;
2443 /* Update entry point based on load address of text section */
2444 if (entry_sidx
>= 0)
2445 entry
+= sechdrs
[entry_sidx
].sh_addr
;
2447 /* Make kernel jump to purgatory after shutdown */
2448 image
->start
= entry
;
2450 /* Used later to get/set symbol values */
2451 pi
->sechdrs
= sechdrs
;
2454 * Used later to identify which section is purgatory and skip it
2455 * from checksumming.
2457 pi
->purgatory_buf
= purgatory_buf
;
2461 vfree(purgatory_buf
);
2465 static int kexec_apply_relocations(struct kimage
*image
)
2468 struct purgatory_info
*pi
= &image
->purgatory_info
;
2469 Elf_Shdr
*sechdrs
= pi
->sechdrs
;
2471 /* Apply relocations */
2472 for (i
= 0; i
< pi
->ehdr
->e_shnum
; i
++) {
2473 Elf_Shdr
*section
, *symtab
;
2475 if (sechdrs
[i
].sh_type
!= SHT_RELA
&&
2476 sechdrs
[i
].sh_type
!= SHT_REL
)
2480 * For section of type SHT_RELA/SHT_REL,
2481 * ->sh_link contains section header index of associated
2482 * symbol table. And ->sh_info contains section header
2483 * index of section to which relocations apply.
2485 if (sechdrs
[i
].sh_info
>= pi
->ehdr
->e_shnum
||
2486 sechdrs
[i
].sh_link
>= pi
->ehdr
->e_shnum
)
2489 section
= &sechdrs
[sechdrs
[i
].sh_info
];
2490 symtab
= &sechdrs
[sechdrs
[i
].sh_link
];
2492 if (!(section
->sh_flags
& SHF_ALLOC
))
2496 * symtab->sh_link contain section header index of associated
2499 if (symtab
->sh_link
>= pi
->ehdr
->e_shnum
)
2500 /* Invalid section number? */
2504 * Respective archicture needs to provide support for applying
2505 * relocations of type SHT_RELA/SHT_REL.
2507 if (sechdrs
[i
].sh_type
== SHT_RELA
)
2508 ret
= arch_kexec_apply_relocations_add(pi
->ehdr
,
2510 else if (sechdrs
[i
].sh_type
== SHT_REL
)
2511 ret
= arch_kexec_apply_relocations(pi
->ehdr
,
2520 /* Load relocatable purgatory object and relocate it appropriately */
2521 int kexec_load_purgatory(struct kimage
*image
, unsigned long min
,
2522 unsigned long max
, int top_down
,
2523 unsigned long *load_addr
)
2525 struct purgatory_info
*pi
= &image
->purgatory_info
;
2528 if (kexec_purgatory_size
<= 0)
2531 if (kexec_purgatory_size
< sizeof(Elf_Ehdr
))
2534 pi
->ehdr
= (Elf_Ehdr
*)kexec_purgatory
;
2536 if (memcmp(pi
->ehdr
->e_ident
, ELFMAG
, SELFMAG
) != 0
2537 || pi
->ehdr
->e_type
!= ET_REL
2538 || !elf_check_arch(pi
->ehdr
)
2539 || pi
->ehdr
->e_shentsize
!= sizeof(Elf_Shdr
))
2542 if (pi
->ehdr
->e_shoff
>= kexec_purgatory_size
2543 || (pi
->ehdr
->e_shnum
* sizeof(Elf_Shdr
) >
2544 kexec_purgatory_size
- pi
->ehdr
->e_shoff
))
2547 ret
= __kexec_load_purgatory(image
, min
, max
, top_down
);
2551 ret
= kexec_apply_relocations(image
);
2555 *load_addr
= pi
->purgatory_load_addr
;
2559 vfree(pi
->purgatory_buf
);
2563 static Elf_Sym
*kexec_purgatory_find_symbol(struct purgatory_info
*pi
,
2572 if (!pi
->sechdrs
|| !pi
->ehdr
)
2575 sechdrs
= pi
->sechdrs
;
2578 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
2579 if (sechdrs
[i
].sh_type
!= SHT_SYMTAB
)
2582 if (sechdrs
[i
].sh_link
>= ehdr
->e_shnum
)
2583 /* Invalid strtab section number */
2585 strtab
= (char *)sechdrs
[sechdrs
[i
].sh_link
].sh_offset
;
2586 syms
= (Elf_Sym
*)sechdrs
[i
].sh_offset
;
2588 /* Go through symbols for a match */
2589 for (k
= 0; k
< sechdrs
[i
].sh_size
/sizeof(Elf_Sym
); k
++) {
2590 if (ELF_ST_BIND(syms
[k
].st_info
) != STB_GLOBAL
)
2593 if (strcmp(strtab
+ syms
[k
].st_name
, name
) != 0)
2596 if (syms
[k
].st_shndx
== SHN_UNDEF
||
2597 syms
[k
].st_shndx
>= ehdr
->e_shnum
) {
2598 pr_debug("Symbol: %s has bad section index %d.\n",
2599 name
, syms
[k
].st_shndx
);
2603 /* Found the symbol we are looking for */
2611 void *kexec_purgatory_get_symbol_addr(struct kimage
*image
, const char *name
)
2613 struct purgatory_info
*pi
= &image
->purgatory_info
;
2617 sym
= kexec_purgatory_find_symbol(pi
, name
);
2619 return ERR_PTR(-EINVAL
);
2621 sechdr
= &pi
->sechdrs
[sym
->st_shndx
];
2624 * Returns the address where symbol will finally be loaded after
2625 * kexec_load_segment()
2627 return (void *)(sechdr
->sh_addr
+ sym
->st_value
);
2631 * Get or set value of a symbol. If "get_value" is true, symbol value is
2632 * returned in buf otherwise symbol value is set based on value in buf.
2634 int kexec_purgatory_get_set_symbol(struct kimage
*image
, const char *name
,
2635 void *buf
, unsigned int size
, bool get_value
)
2639 struct purgatory_info
*pi
= &image
->purgatory_info
;
2642 sym
= kexec_purgatory_find_symbol(pi
, name
);
2646 if (sym
->st_size
!= size
) {
2647 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
2648 name
, (unsigned long)sym
->st_size
, size
);
2652 sechdrs
= pi
->sechdrs
;
2654 if (sechdrs
[sym
->st_shndx
].sh_type
== SHT_NOBITS
) {
2655 pr_err("symbol %s is in a bss section. Cannot %s\n", name
,
2656 get_value
? "get" : "set");
2660 sym_buf
= (unsigned char *)sechdrs
[sym
->st_shndx
].sh_offset
+
2664 memcpy((void *)buf
, sym_buf
, size
);
2666 memcpy((void *)sym_buf
, buf
, size
);
2672 * Move into place and start executing a preloaded standalone
2673 * executable. If nothing was preloaded return an error.
2675 int kernel_kexec(void)
2679 if (!mutex_trylock(&kexec_mutex
))
2686 #ifdef CONFIG_KEXEC_JUMP
2687 if (kexec_image
->preserve_context
) {
2688 lock_system_sleep();
2689 pm_prepare_console();
2690 error
= freeze_processes();
2693 goto Restore_console
;
2696 error
= dpm_suspend_start(PMSG_FREEZE
);
2698 goto Resume_console
;
2699 /* At this point, dpm_suspend_start() has been called,
2700 * but *not* dpm_suspend_end(). We *must* call
2701 * dpm_suspend_end() now. Otherwise, drivers for
2702 * some devices (e.g. interrupt controllers) become
2703 * desynchronized with the actual state of the
2704 * hardware at resume time, and evil weirdness ensues.
2706 error
= dpm_suspend_end(PMSG_FREEZE
);
2708 goto Resume_devices
;
2709 error
= disable_nonboot_cpus();
2712 local_irq_disable();
2713 error
= syscore_suspend();
2719 kexec_in_progress
= true;
2720 kernel_restart_prepare(NULL
);
2721 migrate_to_reboot_cpu();
2724 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
2725 * no further code needs to use CPU hotplug (which is true in
2726 * the reboot case). However, the kexec path depends on using
2727 * CPU hotplug again; so re-enable it here.
2729 cpu_hotplug_enable();
2730 pr_emerg("Starting new kernel\n");
2734 machine_kexec(kexec_image
);
2736 #ifdef CONFIG_KEXEC_JUMP
2737 if (kexec_image
->preserve_context
) {
2742 enable_nonboot_cpus();
2743 dpm_resume_start(PMSG_RESTORE
);
2745 dpm_resume_end(PMSG_RESTORE
);
2750 pm_restore_console();
2751 unlock_system_sleep();
2756 mutex_unlock(&kexec_mutex
);