]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/kexec_core.c
userns: prevent speculative execution
[mirror_ubuntu-artful-kernel.git] / kernel / kexec_core.c
CommitLineData
2965faa5
DY
1/*
2 * kexec.c - kexec system call core code.
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
de90a6bc 9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2965faa5
DY
10
11#include <linux/capability.h>
12#include <linux/mm.h>
13#include <linux/file.h>
14#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/kexec.h>
17#include <linux/mutex.h>
18#include <linux/list.h>
19#include <linux/highmem.h>
20#include <linux/syscalls.h>
21#include <linux/reboot.h>
22#include <linux/ioport.h>
23#include <linux/hardirq.h>
24#include <linux/elf.h>
25#include <linux/elfcore.h>
26#include <linux/utsname.h>
27#include <linux/numa.h>
28#include <linux/suspend.h>
29#include <linux/device.h>
30#include <linux/freezer.h>
31#include <linux/pm.h>
32#include <linux/cpu.h>
33#include <linux/uaccess.h>
34#include <linux/io.h>
35#include <linux/console.h>
36#include <linux/vmalloc.h>
37#include <linux/swap.h>
38#include <linux/syscore_ops.h>
39#include <linux/compiler.h>
40#include <linux/hugetlb.h>
c207aee4 41#include <linux/frame.h>
2965faa5
DY
42
43#include <asm/page.h>
44#include <asm/sections.h>
45
46#include <crypto/hash.h>
47#include <crypto/sha.h>
48#include "kexec_internal.h"
49
50DEFINE_MUTEX(kexec_mutex);
51
52/* Per cpu memory for storing cpu states in case of system crash. */
53note_buf_t __percpu *crash_notes;
54
2965faa5
DY
55/* Flag to indicate we are going to kexec a new kernel */
56bool kexec_in_progress = false;
57
58
59/* Location of the reserved area for the crash kernel */
60struct resource crashk_res = {
61 .name = "Crash kernel",
62 .start = 0,
63 .end = 0,
1a085d07
TK
64 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
65 .desc = IORES_DESC_CRASH_KERNEL
2965faa5
DY
66};
67struct resource crashk_low_res = {
68 .name = "Crash kernel",
69 .start = 0,
70 .end = 0,
1a085d07
TK
71 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
72 .desc = IORES_DESC_CRASH_KERNEL
2965faa5
DY
73};
74
75int kexec_should_crash(struct task_struct *p)
76{
77 /*
78 * If crash_kexec_post_notifiers is enabled, don't run
79 * crash_kexec() here yet, which must be run after panic
80 * notifiers in panic().
81 */
82 if (crash_kexec_post_notifiers)
83 return 0;
84 /*
85 * There are 4 panic() calls in do_exit() path, each of which
86 * corresponds to each of these 4 conditions.
87 */
88 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
89 return 1;
90 return 0;
91}
92
21db79e8
PT
93int kexec_crash_loaded(void)
94{
95 return !!kexec_crash_image;
96}
97EXPORT_SYMBOL_GPL(kexec_crash_loaded);
98
2965faa5
DY
99/*
100 * When kexec transitions to the new kernel there is a one-to-one
101 * mapping between physical and virtual addresses. On processors
102 * where you can disable the MMU this is trivial, and easy. For
103 * others it is still a simple predictable page table to setup.
104 *
105 * In that environment kexec copies the new kernel to its final
106 * resting place. This means I can only support memory whose
107 * physical address can fit in an unsigned long. In particular
108 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
109 * If the assembly stub has more restrictive requirements
110 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
111 * defined more restrictively in <asm/kexec.h>.
112 *
113 * The code for the transition from the current kernel to the
114 * the new kernel is placed in the control_code_buffer, whose size
115 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
116 * page of memory is necessary, but some architectures require more.
117 * Because this memory must be identity mapped in the transition from
118 * virtual to physical addresses it must live in the range
119 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
120 * modifiable.
121 *
122 * The assembly stub in the control code buffer is passed a linked list
123 * of descriptor pages detailing the source pages of the new kernel,
124 * and the destination addresses of those source pages. As this data
125 * structure is not used in the context of the current OS, it must
126 * be self-contained.
127 *
128 * The code has been made to work with highmem pages and will use a
129 * destination page in its final resting place (if it happens
130 * to allocate it). The end product of this is that most of the
131 * physical address space, and most of RAM can be used.
132 *
133 * Future directions include:
134 * - allocating a page table with the control code buffer identity
135 * mapped, to simplify machine_kexec and make kexec_on_panic more
136 * reliable.
137 */
138
139/*
140 * KIMAGE_NO_DEST is an impossible destination address..., for
141 * allocating pages whose destination address we do not care about.
142 */
143#define KIMAGE_NO_DEST (-1UL)
1730f146 144#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
2965faa5
DY
145
146static struct page *kimage_alloc_page(struct kimage *image,
147 gfp_t gfp_mask,
148 unsigned long dest);
149
150int sanity_check_segment_list(struct kimage *image)
151{
4caf9615 152 int i;
2965faa5 153 unsigned long nr_segments = image->nr_segments;
1730f146 154 unsigned long total_pages = 0;
2965faa5
DY
155
156 /*
157 * Verify we have good destination addresses. The caller is
158 * responsible for making certain we don't attempt to load
159 * the new image into invalid or reserved areas of RAM. This
160 * just verifies it is an address we can use.
161 *
162 * Since the kernel does everything in page size chunks ensure
163 * the destination addresses are page aligned. Too many
164 * special cases crop of when we don't do this. The most
165 * insidious is getting overlapping destination addresses
166 * simply because addresses are changed to page size
167 * granularity.
168 */
2965faa5
DY
169 for (i = 0; i < nr_segments; i++) {
170 unsigned long mstart, mend;
171
172 mstart = image->segment[i].mem;
173 mend = mstart + image->segment[i].memsz;
465d3777
RK
174 if (mstart > mend)
175 return -EADDRNOTAVAIL;
2965faa5 176 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
4caf9615 177 return -EADDRNOTAVAIL;
2965faa5 178 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
4caf9615 179 return -EADDRNOTAVAIL;
2965faa5
DY
180 }
181
182 /* Verify our destination addresses do not overlap.
183 * If we alloed overlapping destination addresses
184 * through very weird things can happen with no
185 * easy explanation as one segment stops on another.
186 */
2965faa5
DY
187 for (i = 0; i < nr_segments; i++) {
188 unsigned long mstart, mend;
189 unsigned long j;
190
191 mstart = image->segment[i].mem;
192 mend = mstart + image->segment[i].memsz;
193 for (j = 0; j < i; j++) {
194 unsigned long pstart, pend;
195
196 pstart = image->segment[j].mem;
197 pend = pstart + image->segment[j].memsz;
198 /* Do the segments overlap ? */
199 if ((mend > pstart) && (mstart < pend))
4caf9615 200 return -EINVAL;
2965faa5
DY
201 }
202 }
203
204 /* Ensure our buffer sizes are strictly less than
205 * our memory sizes. This should always be the case,
206 * and it is easier to check up front than to be surprised
207 * later on.
208 */
2965faa5
DY
209 for (i = 0; i < nr_segments; i++) {
210 if (image->segment[i].bufsz > image->segment[i].memsz)
4caf9615 211 return -EINVAL;
2965faa5
DY
212 }
213
1730f146 214 /*
215 * Verify that no more than half of memory will be consumed. If the
216 * request from userspace is too large, a large amount of time will be
217 * wasted allocating pages, which can cause a soft lockup.
218 */
219 for (i = 0; i < nr_segments; i++) {
220 if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
221 return -EINVAL;
222
223 total_pages += PAGE_COUNT(image->segment[i].memsz);
224 }
225
226 if (total_pages > totalram_pages / 2)
227 return -EINVAL;
228
2965faa5
DY
229 /*
230 * Verify we have good destination addresses. Normally
231 * the caller is responsible for making certain we don't
232 * attempt to load the new image into invalid or reserved
233 * areas of RAM. But crash kernels are preloaded into a
234 * reserved area of ram. We must ensure the addresses
235 * are in the reserved area otherwise preloading the
236 * kernel could corrupt things.
237 */
238
239 if (image->type == KEXEC_TYPE_CRASH) {
2965faa5
DY
240 for (i = 0; i < nr_segments; i++) {
241 unsigned long mstart, mend;
242
243 mstart = image->segment[i].mem;
244 mend = mstart + image->segment[i].memsz - 1;
245 /* Ensure we are within the crash kernel limits */
43546d86
RK
246 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
247 (mend > phys_to_boot_phys(crashk_res.end)))
4caf9615 248 return -EADDRNOTAVAIL;
2965faa5
DY
249 }
250 }
251
252 return 0;
253}
254
255struct kimage *do_kimage_alloc_init(void)
256{
257 struct kimage *image;
258
259 /* Allocate a controlling structure */
260 image = kzalloc(sizeof(*image), GFP_KERNEL);
261 if (!image)
262 return NULL;
263
264 image->head = 0;
265 image->entry = &image->head;
266 image->last_entry = &image->head;
267 image->control_page = ~0; /* By default this does not apply */
268 image->type = KEXEC_TYPE_DEFAULT;
269
270 /* Initialize the list of control pages */
271 INIT_LIST_HEAD(&image->control_pages);
272
273 /* Initialize the list of destination pages */
274 INIT_LIST_HEAD(&image->dest_pages);
275
276 /* Initialize the list of unusable pages */
277 INIT_LIST_HEAD(&image->unusable_pages);
278
279 return image;
280}
281
282int kimage_is_destination_range(struct kimage *image,
283 unsigned long start,
284 unsigned long end)
285{
286 unsigned long i;
287
288 for (i = 0; i < image->nr_segments; i++) {
289 unsigned long mstart, mend;
290
291 mstart = image->segment[i].mem;
292 mend = mstart + image->segment[i].memsz;
293 if ((end > mstart) && (start < mend))
294 return 1;
295 }
296
297 return 0;
298}
299
300static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
301{
302 struct page *pages;
303
304 pages = alloc_pages(gfp_mask, order);
305 if (pages) {
306 unsigned int count, i;
307
308 pages->mapping = NULL;
309 set_page_private(pages, order);
310 count = 1 << order;
311 for (i = 0; i < count; i++)
312 SetPageReserved(pages + i);
313 }
314
315 return pages;
316}
317
318static void kimage_free_pages(struct page *page)
319{
320 unsigned int order, count, i;
321
322 order = page_private(page);
323 count = 1 << order;
324 for (i = 0; i < count; i++)
325 ClearPageReserved(page + i);
326 __free_pages(page, order);
327}
328
329void kimage_free_page_list(struct list_head *list)
330{
2b24692b 331 struct page *page, *next;
2965faa5 332
2b24692b 333 list_for_each_entry_safe(page, next, list, lru) {
2965faa5
DY
334 list_del(&page->lru);
335 kimage_free_pages(page);
336 }
337}
338
339static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
340 unsigned int order)
341{
342 /* Control pages are special, they are the intermediaries
343 * that are needed while we copy the rest of the pages
344 * to their final resting place. As such they must
345 * not conflict with either the destination addresses
346 * or memory the kernel is already using.
347 *
348 * The only case where we really need more than one of
349 * these are for architectures where we cannot disable
350 * the MMU and must instead generate an identity mapped
351 * page table for all of the memory.
352 *
353 * At worst this runs in O(N) of the image size.
354 */
355 struct list_head extra_pages;
356 struct page *pages;
357 unsigned int count;
358
359 count = 1 << order;
360 INIT_LIST_HEAD(&extra_pages);
361
362 /* Loop while I can allocate a page and the page allocated
363 * is a destination page.
364 */
365 do {
366 unsigned long pfn, epfn, addr, eaddr;
367
368 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
369 if (!pages)
370 break;
43546d86 371 pfn = page_to_boot_pfn(pages);
2965faa5
DY
372 epfn = pfn + count;
373 addr = pfn << PAGE_SHIFT;
374 eaddr = epfn << PAGE_SHIFT;
375 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
376 kimage_is_destination_range(image, addr, eaddr)) {
377 list_add(&pages->lru, &extra_pages);
378 pages = NULL;
379 }
380 } while (!pages);
381
382 if (pages) {
383 /* Remember the allocated page... */
384 list_add(&pages->lru, &image->control_pages);
385
386 /* Because the page is already in it's destination
387 * location we will never allocate another page at
388 * that address. Therefore kimage_alloc_pages
389 * will not return it (again) and we don't need
390 * to give it an entry in image->segment[].
391 */
392 }
393 /* Deal with the destination pages I have inadvertently allocated.
394 *
395 * Ideally I would convert multi-page allocations into single
396 * page allocations, and add everything to image->dest_pages.
397 *
398 * For now it is simpler to just free the pages.
399 */
400 kimage_free_page_list(&extra_pages);
401
402 return pages;
403}
404
405static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
406 unsigned int order)
407{
408 /* Control pages are special, they are the intermediaries
409 * that are needed while we copy the rest of the pages
410 * to their final resting place. As such they must
411 * not conflict with either the destination addresses
412 * or memory the kernel is already using.
413 *
414 * Control pages are also the only pags we must allocate
415 * when loading a crash kernel. All of the other pages
416 * are specified by the segments and we just memcpy
417 * into them directly.
418 *
419 * The only case where we really need more than one of
420 * these are for architectures where we cannot disable
421 * the MMU and must instead generate an identity mapped
422 * page table for all of the memory.
423 *
424 * Given the low demand this implements a very simple
425 * allocator that finds the first hole of the appropriate
426 * size in the reserved memory region, and allocates all
427 * of the memory up to and including the hole.
428 */
429 unsigned long hole_start, hole_end, size;
430 struct page *pages;
431
432 pages = NULL;
433 size = (1 << order) << PAGE_SHIFT;
434 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
435 hole_end = hole_start + size - 1;
436 while (hole_end <= crashk_res.end) {
437 unsigned long i;
438
8e53c073 439 cond_resched();
440
2965faa5
DY
441 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
442 break;
443 /* See if I overlap any of the segments */
444 for (i = 0; i < image->nr_segments; i++) {
445 unsigned long mstart, mend;
446
447 mstart = image->segment[i].mem;
448 mend = mstart + image->segment[i].memsz - 1;
449 if ((hole_end >= mstart) && (hole_start <= mend)) {
450 /* Advance the hole to the end of the segment */
451 hole_start = (mend + (size - 1)) & ~(size - 1);
452 hole_end = hole_start + size - 1;
453 break;
454 }
455 }
456 /* If I don't overlap any segments I have found my hole! */
457 if (i == image->nr_segments) {
458 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
04e9949b 459 image->control_page = hole_end;
2965faa5
DY
460 break;
461 }
462 }
2965faa5
DY
463
464 return pages;
465}
466
467
468struct page *kimage_alloc_control_pages(struct kimage *image,
469 unsigned int order)
470{
471 struct page *pages = NULL;
472
473 switch (image->type) {
474 case KEXEC_TYPE_DEFAULT:
475 pages = kimage_alloc_normal_control_pages(image, order);
476 break;
477 case KEXEC_TYPE_CRASH:
478 pages = kimage_alloc_crash_control_pages(image, order);
479 break;
480 }
481
482 return pages;
483}
484
1229384f
XP
485int kimage_crash_copy_vmcoreinfo(struct kimage *image)
486{
487 struct page *vmcoreinfo_page;
488 void *safecopy;
489
490 if (image->type != KEXEC_TYPE_CRASH)
491 return 0;
492
493 /*
494 * For kdump, allocate one vmcoreinfo safe copy from the
495 * crash memory. as we have arch_kexec_protect_crashkres()
496 * after kexec syscall, we naturally protect it from write
497 * (even read) access under kernel direct mapping. But on
498 * the other hand, we still need to operate it when crash
499 * happens to generate vmcoreinfo note, hereby we rely on
500 * vmap for this purpose.
501 */
502 vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
503 if (!vmcoreinfo_page) {
504 pr_warn("Could not allocate vmcoreinfo buffer\n");
505 return -ENOMEM;
506 }
507 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
508 if (!safecopy) {
509 pr_warn("Could not vmap vmcoreinfo buffer\n");
510 return -ENOMEM;
511 }
512
513 image->vmcoreinfo_data_copy = safecopy;
514 crash_update_vmcoreinfo_safecopy(safecopy);
515
516 return 0;
517}
518
2965faa5
DY
519static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
520{
521 if (*image->entry != 0)
522 image->entry++;
523
524 if (image->entry == image->last_entry) {
525 kimage_entry_t *ind_page;
526 struct page *page;
527
528 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
529 if (!page)
530 return -ENOMEM;
531
532 ind_page = page_address(page);
43546d86 533 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
2965faa5
DY
534 image->entry = ind_page;
535 image->last_entry = ind_page +
536 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
537 }
538 *image->entry = entry;
539 image->entry++;
540 *image->entry = 0;
541
542 return 0;
543}
544
545static int kimage_set_destination(struct kimage *image,
546 unsigned long destination)
547{
548 int result;
549
550 destination &= PAGE_MASK;
551 result = kimage_add_entry(image, destination | IND_DESTINATION);
552
553 return result;
554}
555
556
557static int kimage_add_page(struct kimage *image, unsigned long page)
558{
559 int result;
560
561 page &= PAGE_MASK;
562 result = kimage_add_entry(image, page | IND_SOURCE);
563
564 return result;
565}
566
567
568static void kimage_free_extra_pages(struct kimage *image)
569{
570 /* Walk through and free any extra destination pages I may have */
571 kimage_free_page_list(&image->dest_pages);
572
573 /* Walk through and free any unusable pages I have cached */
574 kimage_free_page_list(&image->unusable_pages);
575
576}
577void kimage_terminate(struct kimage *image)
578{
579 if (*image->entry != 0)
580 image->entry++;
581
582 *image->entry = IND_DONE;
583}
584
585#define for_each_kimage_entry(image, ptr, entry) \
586 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
587 ptr = (entry & IND_INDIRECTION) ? \
43546d86 588 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
2965faa5
DY
589
590static void kimage_free_entry(kimage_entry_t entry)
591{
592 struct page *page;
593
43546d86 594 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
2965faa5
DY
595 kimage_free_pages(page);
596}
597
598void kimage_free(struct kimage *image)
599{
600 kimage_entry_t *ptr, entry;
601 kimage_entry_t ind = 0;
602
603 if (!image)
604 return;
605
1229384f
XP
606 if (image->vmcoreinfo_data_copy) {
607 crash_update_vmcoreinfo_safecopy(NULL);
608 vunmap(image->vmcoreinfo_data_copy);
609 }
610
2965faa5
DY
611 kimage_free_extra_pages(image);
612 for_each_kimage_entry(image, ptr, entry) {
613 if (entry & IND_INDIRECTION) {
614 /* Free the previous indirection page */
615 if (ind & IND_INDIRECTION)
616 kimage_free_entry(ind);
617 /* Save this indirection page until we are
618 * done with it.
619 */
620 ind = entry;
621 } else if (entry & IND_SOURCE)
622 kimage_free_entry(entry);
623 }
624 /* Free the final indirection page */
625 if (ind & IND_INDIRECTION)
626 kimage_free_entry(ind);
627
628 /* Handle any machine specific cleanup */
629 machine_kexec_cleanup(image);
630
631 /* Free the kexec control pages... */
632 kimage_free_page_list(&image->control_pages);
633
634 /*
635 * Free up any temporary buffers allocated. This might hit if
636 * error occurred much later after buffer allocation.
637 */
638 if (image->file_mode)
639 kimage_file_post_load_cleanup(image);
640
641 kfree(image);
642}
643
644static kimage_entry_t *kimage_dst_used(struct kimage *image,
645 unsigned long page)
646{
647 kimage_entry_t *ptr, entry;
648 unsigned long destination = 0;
649
650 for_each_kimage_entry(image, ptr, entry) {
651 if (entry & IND_DESTINATION)
652 destination = entry & PAGE_MASK;
653 else if (entry & IND_SOURCE) {
654 if (page == destination)
655 return ptr;
656 destination += PAGE_SIZE;
657 }
658 }
659
660 return NULL;
661}
662
663static struct page *kimage_alloc_page(struct kimage *image,
664 gfp_t gfp_mask,
665 unsigned long destination)
666{
667 /*
668 * Here we implement safeguards to ensure that a source page
669 * is not copied to its destination page before the data on
670 * the destination page is no longer useful.
671 *
672 * To do this we maintain the invariant that a source page is
673 * either its own destination page, or it is not a
674 * destination page at all.
675 *
676 * That is slightly stronger than required, but the proof
677 * that no problems will not occur is trivial, and the
678 * implementation is simply to verify.
679 *
680 * When allocating all pages normally this algorithm will run
681 * in O(N) time, but in the worst case it will run in O(N^2)
682 * time. If the runtime is a problem the data structures can
683 * be fixed.
684 */
685 struct page *page;
686 unsigned long addr;
687
688 /*
689 * Walk through the list of destination pages, and see if I
690 * have a match.
691 */
692 list_for_each_entry(page, &image->dest_pages, lru) {
43546d86 693 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
2965faa5
DY
694 if (addr == destination) {
695 list_del(&page->lru);
696 return page;
697 }
698 }
699 page = NULL;
700 while (1) {
701 kimage_entry_t *old;
702
703 /* Allocate a page, if we run out of memory give up */
704 page = kimage_alloc_pages(gfp_mask, 0);
705 if (!page)
706 return NULL;
707 /* If the page cannot be used file it away */
43546d86 708 if (page_to_boot_pfn(page) >
2965faa5
DY
709 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
710 list_add(&page->lru, &image->unusable_pages);
711 continue;
712 }
43546d86 713 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
2965faa5
DY
714
715 /* If it is the destination page we want use it */
716 if (addr == destination)
717 break;
718
719 /* If the page is not a destination page use it */
720 if (!kimage_is_destination_range(image, addr,
721 addr + PAGE_SIZE))
722 break;
723
724 /*
725 * I know that the page is someones destination page.
726 * See if there is already a source page for this
727 * destination page. And if so swap the source pages.
728 */
729 old = kimage_dst_used(image, addr);
730 if (old) {
731 /* If so move it */
732 unsigned long old_addr;
733 struct page *old_page;
734
735 old_addr = *old & PAGE_MASK;
43546d86 736 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
2965faa5
DY
737 copy_highpage(page, old_page);
738 *old = addr | (*old & ~PAGE_MASK);
739
740 /* The old page I have found cannot be a
741 * destination page, so return it if it's
742 * gfp_flags honor the ones passed in.
743 */
744 if (!(gfp_mask & __GFP_HIGHMEM) &&
745 PageHighMem(old_page)) {
746 kimage_free_pages(old_page);
747 continue;
748 }
749 addr = old_addr;
750 page = old_page;
751 break;
752 }
753 /* Place the page on the destination list, to be used later */
754 list_add(&page->lru, &image->dest_pages);
755 }
756
757 return page;
758}
759
760static int kimage_load_normal_segment(struct kimage *image,
761 struct kexec_segment *segment)
762{
763 unsigned long maddr;
764 size_t ubytes, mbytes;
765 int result;
766 unsigned char __user *buf = NULL;
767 unsigned char *kbuf = NULL;
768
769 result = 0;
770 if (image->file_mode)
771 kbuf = segment->kbuf;
772 else
773 buf = segment->buf;
774 ubytes = segment->bufsz;
775 mbytes = segment->memsz;
776 maddr = segment->mem;
777
778 result = kimage_set_destination(image, maddr);
779 if (result < 0)
780 goto out;
781
782 while (mbytes) {
783 struct page *page;
784 char *ptr;
785 size_t uchunk, mchunk;
786
787 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
788 if (!page) {
789 result = -ENOMEM;
790 goto out;
791 }
43546d86 792 result = kimage_add_page(image, page_to_boot_pfn(page)
2965faa5
DY
793 << PAGE_SHIFT);
794 if (result < 0)
795 goto out;
796
797 ptr = kmap(page);
798 /* Start with a clear page */
799 clear_page(ptr);
800 ptr += maddr & ~PAGE_MASK;
801 mchunk = min_t(size_t, mbytes,
802 PAGE_SIZE - (maddr & ~PAGE_MASK));
803 uchunk = min(ubytes, mchunk);
804
805 /* For file based kexec, source pages are in kernel memory */
806 if (image->file_mode)
807 memcpy(ptr, kbuf, uchunk);
808 else
809 result = copy_from_user(ptr, buf, uchunk);
810 kunmap(page);
811 if (result) {
812 result = -EFAULT;
813 goto out;
814 }
815 ubytes -= uchunk;
816 maddr += mchunk;
817 if (image->file_mode)
818 kbuf += mchunk;
819 else
820 buf += mchunk;
821 mbytes -= mchunk;
822 }
823out:
824 return result;
825}
826
827static int kimage_load_crash_segment(struct kimage *image,
828 struct kexec_segment *segment)
829{
830 /* For crash dumps kernels we simply copy the data from
831 * user space to it's destination.
832 * We do things a page at a time for the sake of kmap.
833 */
834 unsigned long maddr;
835 size_t ubytes, mbytes;
836 int result;
837 unsigned char __user *buf = NULL;
838 unsigned char *kbuf = NULL;
839
840 result = 0;
841 if (image->file_mode)
842 kbuf = segment->kbuf;
843 else
844 buf = segment->buf;
845 ubytes = segment->bufsz;
846 mbytes = segment->memsz;
847 maddr = segment->mem;
848 while (mbytes) {
849 struct page *page;
850 char *ptr;
851 size_t uchunk, mchunk;
852
43546d86 853 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
2965faa5
DY
854 if (!page) {
855 result = -ENOMEM;
856 goto out;
857 }
858 ptr = kmap(page);
859 ptr += maddr & ~PAGE_MASK;
860 mchunk = min_t(size_t, mbytes,
861 PAGE_SIZE - (maddr & ~PAGE_MASK));
862 uchunk = min(ubytes, mchunk);
863 if (mchunk > uchunk) {
864 /* Zero the trailing part of the page */
865 memset(ptr + uchunk, 0, mchunk - uchunk);
866 }
867
868 /* For file based kexec, source pages are in kernel memory */
869 if (image->file_mode)
870 memcpy(ptr, kbuf, uchunk);
871 else
872 result = copy_from_user(ptr, buf, uchunk);
873 kexec_flush_icache_page(page);
874 kunmap(page);
875 if (result) {
876 result = -EFAULT;
877 goto out;
878 }
879 ubytes -= uchunk;
880 maddr += mchunk;
881 if (image->file_mode)
882 kbuf += mchunk;
883 else
884 buf += mchunk;
885 mbytes -= mchunk;
886 }
887out:
888 return result;
889}
890
891int kimage_load_segment(struct kimage *image,
892 struct kexec_segment *segment)
893{
894 int result = -ENOMEM;
895
896 switch (image->type) {
897 case KEXEC_TYPE_DEFAULT:
898 result = kimage_load_normal_segment(image, segment);
899 break;
900 case KEXEC_TYPE_CRASH:
901 result = kimage_load_crash_segment(image, segment);
902 break;
903 }
904
905 return result;
906}
907
908struct kimage *kexec_image;
909struct kimage *kexec_crash_image;
910int kexec_load_disabled;
911
7bbee5ca
HK
912/*
913 * No panic_cpu check version of crash_kexec(). This function is called
914 * only when panic_cpu holds the current CPU number; this is the only CPU
915 * which processes crash_kexec routines.
916 */
c207aee4 917void __noclone __crash_kexec(struct pt_regs *regs)
2965faa5
DY
918{
919 /* Take the kexec_mutex here to prevent sys_kexec_load
920 * running on one cpu from replacing the crash kernel
921 * we are using after a panic on a different cpu.
922 *
923 * If the crash kernel was not located in a fixed area
924 * of memory the xchg(&kexec_crash_image) would be
925 * sufficient. But since I reuse the memory...
926 */
927 if (mutex_trylock(&kexec_mutex)) {
928 if (kexec_crash_image) {
929 struct pt_regs fixed_regs;
930
931 crash_setup_regs(&fixed_regs, regs);
932 crash_save_vmcoreinfo();
933 machine_crash_shutdown(&fixed_regs);
934 machine_kexec(kexec_crash_image);
935 }
936 mutex_unlock(&kexec_mutex);
937 }
938}
c207aee4 939STACK_FRAME_NON_STANDARD(__crash_kexec);
2965faa5 940
7bbee5ca
HK
941void crash_kexec(struct pt_regs *regs)
942{
943 int old_cpu, this_cpu;
944
945 /*
946 * Only one CPU is allowed to execute the crash_kexec() code as with
947 * panic(). Otherwise parallel calls of panic() and crash_kexec()
948 * may stop each other. To exclude them, we use panic_cpu here too.
949 */
950 this_cpu = raw_smp_processor_id();
951 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
952 if (old_cpu == PANIC_CPU_INVALID) {
953 /* This is the 1st CPU which comes here, so go ahead. */
f92bac3b 954 printk_safe_flush_on_panic();
7bbee5ca
HK
955 __crash_kexec(regs);
956
957 /*
958 * Reset panic_cpu to allow another panic()/crash_kexec()
959 * call.
960 */
961 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
962 }
963}
964
2965faa5
DY
965size_t crash_get_memory_size(void)
966{
967 size_t size = 0;
968
969 mutex_lock(&kexec_mutex);
970 if (crashk_res.end != crashk_res.start)
971 size = resource_size(&crashk_res);
972 mutex_unlock(&kexec_mutex);
973 return size;
974}
975
976void __weak crash_free_reserved_phys_range(unsigned long begin,
977 unsigned long end)
978{
979 unsigned long addr;
980
981 for (addr = begin; addr < end; addr += PAGE_SIZE)
43546d86 982 free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
2965faa5
DY
983}
984
985int crash_shrink_memory(unsigned long new_size)
986{
987 int ret = 0;
988 unsigned long start, end;
989 unsigned long old_size;
990 struct resource *ram_res;
991
992 mutex_lock(&kexec_mutex);
993
994 if (kexec_crash_image) {
995 ret = -ENOENT;
996 goto unlock;
997 }
998 start = crashk_res.start;
999 end = crashk_res.end;
1000 old_size = (end == 0) ? 0 : end - start + 1;
1001 if (new_size >= old_size) {
1002 ret = (new_size == old_size) ? 0 : -EINVAL;
1003 goto unlock;
1004 }
1005
1006 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1007 if (!ram_res) {
1008 ret = -ENOMEM;
1009 goto unlock;
1010 }
1011
1012 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1013 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1014
2965faa5
DY
1015 crash_free_reserved_phys_range(end, crashk_res.end);
1016
1017 if ((start == end) && (crashk_res.parent != NULL))
1018 release_resource(&crashk_res);
1019
1020 ram_res->start = end;
1021 ram_res->end = crashk_res.end;
1a085d07 1022 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
2965faa5
DY
1023 ram_res->name = "System RAM";
1024
1025 crashk_res.end = end - 1;
1026
1027 insert_resource(&iomem_resource, ram_res);
2965faa5
DY
1028
1029unlock:
1030 mutex_unlock(&kexec_mutex);
1031 return ret;
1032}
1033
2965faa5
DY
1034void crash_save_cpu(struct pt_regs *regs, int cpu)
1035{
1036 struct elf_prstatus prstatus;
1037 u32 *buf;
1038
1039 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1040 return;
1041
1042 /* Using ELF notes here is opportunistic.
1043 * I need a well defined structure format
1044 * for the data I pass, and I need tags
1045 * on the data to indicate what information I have
1046 * squirrelled away. ELF notes happen to provide
1047 * all of that, so there is no need to invent something new.
1048 */
1049 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1050 if (!buf)
1051 return;
1052 memset(&prstatus, 0, sizeof(prstatus));
1053 prstatus.pr_pid = current->pid;
1054 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1055 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1056 &prstatus, sizeof(prstatus));
1057 final_note(buf);
1058}
1059
1060static int __init crash_notes_memory_init(void)
1061{
1062 /* Allocate memory for saving cpu registers. */
bbb78b8f
BH
1063 size_t size, align;
1064
1065 /*
1066 * crash_notes could be allocated across 2 vmalloc pages when percpu
1067 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1068 * pages are also on 2 continuous physical pages. In this case the
1069 * 2nd part of crash_notes in 2nd page could be lost since only the
1070 * starting address and size of crash_notes are exported through sysfs.
1071 * Here round up the size of crash_notes to the nearest power of two
1072 * and pass it to __alloc_percpu as align value. This can make sure
1073 * crash_notes is allocated inside one physical page.
1074 */
1075 size = sizeof(note_buf_t);
1076 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1077
1078 /*
1079 * Break compile if size is bigger than PAGE_SIZE since crash_notes
1080 * definitely will be in 2 pages with that.
1081 */
1082 BUILD_BUG_ON(size > PAGE_SIZE);
1083
1084 crash_notes = __alloc_percpu(size, align);
2965faa5 1085 if (!crash_notes) {
de90a6bc 1086 pr_warn("Memory allocation for saving cpu register states failed\n");
2965faa5
DY
1087 return -ENOMEM;
1088 }
1089 return 0;
1090}
1091subsys_initcall(crash_notes_memory_init);
1092
1093
2965faa5
DY
1094/*
1095 * Move into place and start executing a preloaded standalone
1096 * executable. If nothing was preloaded return an error.
1097 */
1098int kernel_kexec(void)
1099{
1100 int error = 0;
1101
1102 if (!mutex_trylock(&kexec_mutex))
1103 return -EBUSY;
1104 if (!kexec_image) {
1105 error = -EINVAL;
1106 goto Unlock;
1107 }
1108
1109#ifdef CONFIG_KEXEC_JUMP
1110 if (kexec_image->preserve_context) {
1111 lock_system_sleep();
1112 pm_prepare_console();
1113 error = freeze_processes();
1114 if (error) {
1115 error = -EBUSY;
1116 goto Restore_console;
1117 }
1118 suspend_console();
1119 error = dpm_suspend_start(PMSG_FREEZE);
1120 if (error)
1121 goto Resume_console;
1122 /* At this point, dpm_suspend_start() has been called,
1123 * but *not* dpm_suspend_end(). We *must* call
1124 * dpm_suspend_end() now. Otherwise, drivers for
1125 * some devices (e.g. interrupt controllers) become
1126 * desynchronized with the actual state of the
1127 * hardware at resume time, and evil weirdness ensues.
1128 */
1129 error = dpm_suspend_end(PMSG_FREEZE);
1130 if (error)
1131 goto Resume_devices;
1132 error = disable_nonboot_cpus();
1133 if (error)
1134 goto Enable_cpus;
1135 local_irq_disable();
1136 error = syscore_suspend();
1137 if (error)
1138 goto Enable_irqs;
1139 } else
1140#endif
1141 {
1142 kexec_in_progress = true;
1143 kernel_restart_prepare(NULL);
1144 migrate_to_reboot_cpu();
1145
1146 /*
1147 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1148 * no further code needs to use CPU hotplug (which is true in
1149 * the reboot case). However, the kexec path depends on using
1150 * CPU hotplug again; so re-enable it here.
1151 */
1152 cpu_hotplug_enable();
1153 pr_emerg("Starting new kernel\n");
1154 machine_shutdown();
1155 }
1156
1157 machine_kexec(kexec_image);
1158
1159#ifdef CONFIG_KEXEC_JUMP
1160 if (kexec_image->preserve_context) {
1161 syscore_resume();
1162 Enable_irqs:
1163 local_irq_enable();
1164 Enable_cpus:
1165 enable_nonboot_cpus();
1166 dpm_resume_start(PMSG_RESTORE);
1167 Resume_devices:
1168 dpm_resume_end(PMSG_RESTORE);
1169 Resume_console:
1170 resume_console();
1171 thaw_processes();
1172 Restore_console:
1173 pm_restore_console();
1174 unlock_system_sleep();
1175 }
1176#endif
1177
1178 Unlock:
1179 mutex_unlock(&kexec_mutex);
1180 return error;
1181}
1182
1183/*
7a0058ec
XP
1184 * Protection mechanism for crashkernel reserved memory after
1185 * the kdump kernel is loaded.
2965faa5
DY
1186 *
1187 * Provide an empty default implementation here -- architecture
1188 * code may override this
1189 */
9b492cf5
XP
1190void __weak arch_kexec_protect_crashkres(void)
1191{}
1192
1193void __weak arch_kexec_unprotect_crashkres(void)
1194{}