]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/arm64/kernel/machine_kexec.c
b63baa7496090099b0ed5975140d99ca322e715e
[mirror_ubuntu-zesty-kernel.git] / arch / arm64 / kernel / machine_kexec.c
1 /*
2 * kexec for arm64
3 *
4 * Copyright (C) Linaro.
5 * Copyright (C) Huawei Futurewei Technologies.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/kexec.h>
13 #include <linux/smp.h>
14
15 #include <asm/cacheflush.h>
16 #include <asm/cpu_ops.h>
17 #include <asm/mmu.h>
18 #include <asm/mmu_context.h>
19 #include <asm/page.h>
20
21 #include "cpu-reset.h"
22
23 /* Global variables for the arm64_relocate_new_kernel routine. */
24 extern const unsigned char arm64_relocate_new_kernel[];
25 extern const unsigned long arm64_relocate_new_kernel_size;
26
27 /**
28 * kexec_image_info - For debugging output.
29 */
30 #define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
31 static void _kexec_image_info(const char *func, int line,
32 const struct kimage *kimage)
33 {
34 unsigned long i;
35
36 pr_debug("%s:%d:\n", func, line);
37 pr_debug(" kexec kimage info:\n");
38 pr_debug(" type: %d\n", kimage->type);
39 pr_debug(" start: %lx\n", kimage->start);
40 pr_debug(" head: %lx\n", kimage->head);
41 pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
42
43 for (i = 0; i < kimage->nr_segments; i++) {
44 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
45 i,
46 kimage->segment[i].mem,
47 kimage->segment[i].mem + kimage->segment[i].memsz,
48 kimage->segment[i].memsz,
49 kimage->segment[i].memsz / PAGE_SIZE);
50 }
51 }
52
53 void machine_kexec_cleanup(struct kimage *kimage)
54 {
55 /* Empty routine needed to avoid build errors. */
56 }
57
58 /**
59 * machine_kexec_prepare - Prepare for a kexec reboot.
60 *
61 * Called from the core kexec code when a kernel image is loaded.
62 * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
63 * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
64 */
65 int machine_kexec_prepare(struct kimage *kimage)
66 {
67 kexec_image_info(kimage);
68
69 if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
70 pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
71 return -EBUSY;
72 }
73
74 return 0;
75 }
76
77 /**
78 * kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
79 */
80 static void kexec_list_flush(struct kimage *kimage)
81 {
82 kimage_entry_t *entry;
83
84 for (entry = &kimage->head; ; entry++) {
85 unsigned int flag;
86 void *addr;
87
88 /* flush the list entries. */
89 __flush_dcache_area(entry, sizeof(kimage_entry_t));
90
91 flag = *entry & IND_FLAGS;
92 if (flag == IND_DONE)
93 break;
94
95 addr = phys_to_virt(*entry & PAGE_MASK);
96
97 switch (flag) {
98 case IND_INDIRECTION:
99 /* Set entry point just before the new list page. */
100 entry = (kimage_entry_t *)addr - 1;
101 break;
102 case IND_SOURCE:
103 /* flush the source pages. */
104 __flush_dcache_area(addr, PAGE_SIZE);
105 break;
106 case IND_DESTINATION:
107 break;
108 default:
109 BUG();
110 }
111 }
112 }
113
114 /**
115 * kexec_segment_flush - Helper to flush the kimage segments to PoC.
116 */
117 static void kexec_segment_flush(const struct kimage *kimage)
118 {
119 unsigned long i;
120
121 pr_debug("%s:\n", __func__);
122
123 for (i = 0; i < kimage->nr_segments; i++) {
124 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
125 i,
126 kimage->segment[i].mem,
127 kimage->segment[i].mem + kimage->segment[i].memsz,
128 kimage->segment[i].memsz,
129 kimage->segment[i].memsz / PAGE_SIZE);
130
131 __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
132 kimage->segment[i].memsz);
133 }
134 }
135
136 /**
137 * machine_kexec - Do the kexec reboot.
138 *
139 * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
140 */
141 void machine_kexec(struct kimage *kimage)
142 {
143 phys_addr_t reboot_code_buffer_phys;
144 void *reboot_code_buffer;
145
146 /*
147 * New cpus may have become stuck_in_kernel after we loaded the image.
148 */
149 BUG_ON(cpus_are_stuck_in_kernel() || (num_online_cpus() > 1));
150
151 reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
152 reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
153
154 kexec_image_info(kimage);
155
156 pr_debug("%s:%d: control_code_page: %p\n", __func__, __LINE__,
157 kimage->control_code_page);
158 pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__, __LINE__,
159 &reboot_code_buffer_phys);
160 pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__, __LINE__,
161 reboot_code_buffer);
162 pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__, __LINE__,
163 arm64_relocate_new_kernel);
164 pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
165 __func__, __LINE__, arm64_relocate_new_kernel_size,
166 arm64_relocate_new_kernel_size);
167
168 /*
169 * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
170 * after the kernel is shut down.
171 */
172 memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
173 arm64_relocate_new_kernel_size);
174
175 /* Flush the reboot_code_buffer in preparation for its execution. */
176 __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
177 flush_icache_range((uintptr_t)reboot_code_buffer,
178 arm64_relocate_new_kernel_size);
179
180 /* Flush the kimage list and its buffers. */
181 kexec_list_flush(kimage);
182
183 /* Flush the new image if already in place. */
184 if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE))
185 kexec_segment_flush(kimage);
186
187 pr_info("Bye!\n");
188
189 /* Disable all DAIF exceptions. */
190 asm volatile ("msr daifset, #0xf" : : : "memory");
191
192 /*
193 * cpu_soft_restart will shutdown the MMU, disable data caches, then
194 * transfer control to the reboot_code_buffer which contains a copy of
195 * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
196 * uses physical addressing to relocate the new image to its final
197 * position and transfers control to the image entry point when the
198 * relocation is complete.
199 */
200
201 cpu_soft_restart(1, reboot_code_buffer_phys, kimage->head,
202 kimage->start, 0);
203
204 BUG(); /* Should never get here. */
205 }
206
207 void machine_crash_shutdown(struct pt_regs *regs)
208 {
209 /* Empty routine needed to avoid build errors. */
210 }
211
212 void arch_kexec_protect_crashkres(void)
213 {
214 int i;
215
216 kexec_segment_flush(kexec_crash_image);
217
218 for (i = 0; i < kexec_crash_image->nr_segments; i++)
219 set_memory_valid(
220 __phys_to_virt(kexec_crash_image->segment[i].mem),
221 kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
222 }
223
224 void arch_kexec_unprotect_crashkres(void)
225 {
226 int i;
227
228 for (i = 0; i < kexec_crash_image->nr_segments; i++)
229 set_memory_valid(
230 __phys_to_virt(kexec_crash_image->segment[i].mem),
231 kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
232 }