]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d28f6df1 GL |
2 | /* |
3 | * kexec for arm64 | |
4 | * | |
5 | * Copyright (C) Linaro. | |
6 | * Copyright (C) Huawei Futurewei Technologies. | |
d28f6df1 GL |
7 | */ |
8 | ||
78fd584c AT |
9 | #include <linux/interrupt.h> |
10 | #include <linux/irq.h> | |
11 | #include <linux/kernel.h> | |
d28f6df1 | 12 | #include <linux/kexec.h> |
254a41c0 | 13 | #include <linux/page-flags.h> |
d28f6df1 GL |
14 | #include <linux/smp.h> |
15 | ||
16 | #include <asm/cacheflush.h> | |
17 | #include <asm/cpu_ops.h> | |
0fbeb318 | 18 | #include <asm/daifflags.h> |
20a16624 | 19 | #include <asm/memory.h> |
98d2e153 | 20 | #include <asm/mmu.h> |
d28f6df1 | 21 | #include <asm/mmu_context.h> |
98d2e153 | 22 | #include <asm/page.h> |
d28f6df1 GL |
23 | |
24 | #include "cpu-reset.h" | |
25 | ||
26 | /* Global variables for the arm64_relocate_new_kernel routine. */ | |
27 | extern const unsigned char arm64_relocate_new_kernel[]; | |
28 | extern const unsigned long arm64_relocate_new_kernel_size; | |
29 | ||
221f2c77 GL |
30 | /** |
31 | * kexec_image_info - For debugging output. | |
32 | */ | |
33 | #define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i) | |
34 | static void _kexec_image_info(const char *func, int line, | |
35 | const struct kimage *kimage) | |
36 | { | |
37 | unsigned long i; | |
38 | ||
39 | pr_debug("%s:%d:\n", func, line); | |
40 | pr_debug(" kexec kimage info:\n"); | |
41 | pr_debug(" type: %d\n", kimage->type); | |
42 | pr_debug(" start: %lx\n", kimage->start); | |
43 | pr_debug(" head: %lx\n", kimage->head); | |
44 | pr_debug(" nr_segments: %lu\n", kimage->nr_segments); | |
45 | ||
46 | for (i = 0; i < kimage->nr_segments; i++) { | |
47 | pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", | |
48 | i, | |
49 | kimage->segment[i].mem, | |
50 | kimage->segment[i].mem + kimage->segment[i].memsz, | |
51 | kimage->segment[i].memsz, | |
52 | kimage->segment[i].memsz / PAGE_SIZE); | |
53 | } | |
54 | } | |
55 | ||
d28f6df1 GL |
56 | void machine_kexec_cleanup(struct kimage *kimage) |
57 | { | |
58 | /* Empty routine needed to avoid build errors. */ | |
59 | } | |
60 | ||
61 | /** | |
62 | * machine_kexec_prepare - Prepare for a kexec reboot. | |
63 | * | |
64 | * Called from the core kexec code when a kernel image is loaded. | |
65 | * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus | |
66 | * are stuck in the kernel. This avoids a panic once we hit machine_kexec(). | |
67 | */ | |
68 | int machine_kexec_prepare(struct kimage *kimage) | |
69 | { | |
221f2c77 GL |
70 | kexec_image_info(kimage); |
71 | ||
d28f6df1 GL |
72 | if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) { |
73 | pr_err("Can't kexec: CPUs are stuck in the kernel.\n"); | |
74 | return -EBUSY; | |
75 | } | |
76 | ||
77 | return 0; | |
78 | } | |
79 | ||
80 | /** | |
81 | * kexec_list_flush - Helper to flush the kimage list and source pages to PoC. | |
82 | */ | |
83 | static void kexec_list_flush(struct kimage *kimage) | |
84 | { | |
85 | kimage_entry_t *entry; | |
86 | ||
87 | for (entry = &kimage->head; ; entry++) { | |
88 | unsigned int flag; | |
89 | void *addr; | |
90 | ||
91 | /* flush the list entries. */ | |
92 | __flush_dcache_area(entry, sizeof(kimage_entry_t)); | |
93 | ||
94 | flag = *entry & IND_FLAGS; | |
95 | if (flag == IND_DONE) | |
96 | break; | |
97 | ||
98 | addr = phys_to_virt(*entry & PAGE_MASK); | |
99 | ||
100 | switch (flag) { | |
101 | case IND_INDIRECTION: | |
102 | /* Set entry point just before the new list page. */ | |
103 | entry = (kimage_entry_t *)addr - 1; | |
104 | break; | |
105 | case IND_SOURCE: | |
106 | /* flush the source pages. */ | |
107 | __flush_dcache_area(addr, PAGE_SIZE); | |
108 | break; | |
109 | case IND_DESTINATION: | |
110 | break; | |
111 | default: | |
112 | BUG(); | |
113 | } | |
114 | } | |
115 | } | |
116 | ||
117 | /** | |
118 | * kexec_segment_flush - Helper to flush the kimage segments to PoC. | |
119 | */ | |
120 | static void kexec_segment_flush(const struct kimage *kimage) | |
121 | { | |
122 | unsigned long i; | |
123 | ||
124 | pr_debug("%s:\n", __func__); | |
125 | ||
126 | for (i = 0; i < kimage->nr_segments; i++) { | |
127 | pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", | |
128 | i, | |
129 | kimage->segment[i].mem, | |
130 | kimage->segment[i].mem + kimage->segment[i].memsz, | |
131 | kimage->segment[i].memsz, | |
132 | kimage->segment[i].memsz / PAGE_SIZE); | |
133 | ||
134 | __flush_dcache_area(phys_to_virt(kimage->segment[i].mem), | |
135 | kimage->segment[i].memsz); | |
136 | } | |
137 | } | |
138 | ||
139 | /** | |
140 | * machine_kexec - Do the kexec reboot. | |
141 | * | |
142 | * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC. | |
143 | */ | |
144 | void machine_kexec(struct kimage *kimage) | |
145 | { | |
146 | phys_addr_t reboot_code_buffer_phys; | |
147 | void *reboot_code_buffer; | |
78fd584c AT |
148 | bool in_kexec_crash = (kimage == kexec_crash_image); |
149 | bool stuck_cpus = cpus_are_stuck_in_kernel(); | |
d28f6df1 GL |
150 | |
151 | /* | |
152 | * New cpus may have become stuck_in_kernel after we loaded the image. | |
153 | */ | |
78fd584c AT |
154 | BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1))); |
155 | WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()), | |
156 | "Some CPUs may be stale, kdump will be unreliable.\n"); | |
d28f6df1 GL |
157 | |
158 | reboot_code_buffer_phys = page_to_phys(kimage->control_code_page); | |
159 | reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys); | |
160 | ||
221f2c77 GL |
161 | kexec_image_info(kimage); |
162 | ||
163 | pr_debug("%s:%d: control_code_page: %p\n", __func__, __LINE__, | |
164 | kimage->control_code_page); | |
165 | pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__, __LINE__, | |
166 | &reboot_code_buffer_phys); | |
167 | pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__, __LINE__, | |
168 | reboot_code_buffer); | |
169 | pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__, __LINE__, | |
170 | arm64_relocate_new_kernel); | |
171 | pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n", | |
172 | __func__, __LINE__, arm64_relocate_new_kernel_size, | |
173 | arm64_relocate_new_kernel_size); | |
174 | ||
d28f6df1 GL |
175 | /* |
176 | * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use | |
177 | * after the kernel is shut down. | |
178 | */ | |
179 | memcpy(reboot_code_buffer, arm64_relocate_new_kernel, | |
180 | arm64_relocate_new_kernel_size); | |
181 | ||
182 | /* Flush the reboot_code_buffer in preparation for its execution. */ | |
183 | __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size); | |
dcab90d9 WD |
184 | |
185 | /* | |
186 | * Although we've killed off the secondary CPUs, we don't update | |
187 | * the online mask if we're handling a crash kernel and consequently | |
188 | * need to avoid flush_icache_range(), which will attempt to IPI | |
189 | * the offline CPUs. Therefore, we must use the __* variant here. | |
190 | */ | |
140aada4 | 191 | __flush_icache_range((uintptr_t)reboot_code_buffer, |
dcab90d9 | 192 | arm64_relocate_new_kernel_size); |
d28f6df1 GL |
193 | |
194 | /* Flush the kimage list and its buffers. */ | |
195 | kexec_list_flush(kimage); | |
196 | ||
197 | /* Flush the new image if already in place. */ | |
98d2e153 | 198 | if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE)) |
d28f6df1 GL |
199 | kexec_segment_flush(kimage); |
200 | ||
201 | pr_info("Bye!\n"); | |
202 | ||
0fbeb318 | 203 | local_daif_mask(); |
d28f6df1 GL |
204 | |
205 | /* | |
206 | * cpu_soft_restart will shutdown the MMU, disable data caches, then | |
207 | * transfer control to the reboot_code_buffer which contains a copy of | |
208 | * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel | |
209 | * uses physical addressing to relocate the new image to its final | |
210 | * position and transfers control to the image entry point when the | |
211 | * relocation is complete. | |
4c9e7e64 AT |
212 | * In kexec case, kimage->start points to purgatory assuming that |
213 | * kernel entry and dtb address are embedded in purgatory by | |
214 | * userspace (kexec-tools). | |
215 | * In kexec_file case, the kernel starts directly without purgatory. | |
d28f6df1 | 216 | */ |
4c9e7e64 AT |
217 | cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, |
218 | #ifdef CONFIG_KEXEC_FILE | |
219 | kimage->arch.dtb_mem); | |
220 | #else | |
221 | 0); | |
222 | #endif | |
d28f6df1 GL |
223 | |
224 | BUG(); /* Should never get here. */ | |
225 | } | |
226 | ||
78fd584c AT |
227 | static void machine_kexec_mask_interrupts(void) |
228 | { | |
229 | unsigned int i; | |
230 | struct irq_desc *desc; | |
231 | ||
232 | for_each_irq_desc(i, desc) { | |
233 | struct irq_chip *chip; | |
234 | int ret; | |
235 | ||
236 | chip = irq_desc_get_chip(desc); | |
237 | if (!chip) | |
238 | continue; | |
239 | ||
240 | /* | |
241 | * First try to remove the active state. If this | |
242 | * fails, try to EOI the interrupt. | |
243 | */ | |
244 | ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false); | |
245 | ||
246 | if (ret && irqd_irq_inprogress(&desc->irq_data) && | |
247 | chip->irq_eoi) | |
248 | chip->irq_eoi(&desc->irq_data); | |
249 | ||
250 | if (chip->irq_mask) | |
251 | chip->irq_mask(&desc->irq_data); | |
252 | ||
253 | if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) | |
254 | chip->irq_disable(&desc->irq_data); | |
255 | } | |
256 | } | |
257 | ||
258 | /** | |
259 | * machine_crash_shutdown - shutdown non-crashing cpus and save registers | |
260 | */ | |
d28f6df1 GL |
261 | void machine_crash_shutdown(struct pt_regs *regs) |
262 | { | |
78fd584c AT |
263 | local_irq_disable(); |
264 | ||
265 | /* shutdown non-crashing cpus */ | |
a88ce63b | 266 | crash_smp_send_stop(); |
78fd584c AT |
267 | |
268 | /* for crashing cpu */ | |
269 | crash_save_cpu(regs, smp_processor_id()); | |
270 | machine_kexec_mask_interrupts(); | |
271 | ||
272 | pr_info("Starting crashdump kernel...\n"); | |
d28f6df1 | 273 | } |
98d2e153 TA |
274 | |
275 | void arch_kexec_protect_crashkres(void) | |
276 | { | |
277 | int i; | |
278 | ||
279 | kexec_segment_flush(kexec_crash_image); | |
280 | ||
281 | for (i = 0; i < kexec_crash_image->nr_segments; i++) | |
282 | set_memory_valid( | |
283 | __phys_to_virt(kexec_crash_image->segment[i].mem), | |
284 | kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0); | |
285 | } | |
286 | ||
287 | void arch_kexec_unprotect_crashkres(void) | |
288 | { | |
289 | int i; | |
290 | ||
291 | for (i = 0; i < kexec_crash_image->nr_segments; i++) | |
292 | set_memory_valid( | |
293 | __phys_to_virt(kexec_crash_image->segment[i].mem), | |
294 | kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1); | |
295 | } | |
254a41c0 AT |
296 | |
297 | #ifdef CONFIG_HIBERNATION | |
298 | /* | |
299 | * To preserve the crash dump kernel image, the relevant memory segments | |
300 | * should be mapped again around the hibernation. | |
301 | */ | |
302 | void crash_prepare_suspend(void) | |
303 | { | |
304 | if (kexec_crash_image) | |
305 | arch_kexec_unprotect_crashkres(); | |
306 | } | |
307 | ||
308 | void crash_post_resume(void) | |
309 | { | |
310 | if (kexec_crash_image) | |
311 | arch_kexec_protect_crashkres(); | |
312 | } | |
313 | ||
314 | /* | |
315 | * crash_is_nosave | |
316 | * | |
317 | * Return true only if a page is part of reserved memory for crash dump kernel, | |
318 | * but does not hold any data of loaded kernel image. | |
319 | * | |
320 | * Note that all the pages in crash dump kernel memory have been initially | |
d9fa9d95 | 321 | * marked as Reserved as memory was allocated via memblock_reserve(). |
254a41c0 AT |
322 | * |
323 | * In hibernation, the pages which are Reserved and yet "nosave" are excluded | |
324 | * from the hibernation iamge. crash_is_nosave() does thich check for crash | |
325 | * dump kernel and will reduce the total size of hibernation image. | |
326 | */ | |
327 | ||
328 | bool crash_is_nosave(unsigned long pfn) | |
329 | { | |
330 | int i; | |
331 | phys_addr_t addr; | |
332 | ||
333 | if (!crashk_res.end) | |
334 | return false; | |
335 | ||
336 | /* in reserved memory? */ | |
337 | addr = __pfn_to_phys(pfn); | |
338 | if ((addr < crashk_res.start) || (crashk_res.end < addr)) | |
339 | return false; | |
340 | ||
341 | if (!kexec_crash_image) | |
342 | return true; | |
343 | ||
344 | /* not part of loaded kernel image? */ | |
345 | for (i = 0; i < kexec_crash_image->nr_segments; i++) | |
346 | if (addr >= kexec_crash_image->segment[i].mem && | |
347 | addr < (kexec_crash_image->segment[i].mem + | |
348 | kexec_crash_image->segment[i].memsz)) | |
349 | return false; | |
350 | ||
351 | return true; | |
352 | } | |
353 | ||
354 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | |
355 | { | |
356 | unsigned long addr; | |
357 | struct page *page; | |
358 | ||
359 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | |
360 | page = phys_to_page(addr); | |
254a41c0 AT |
361 | free_reserved_page(page); |
362 | } | |
363 | } | |
364 | #endif /* CONFIG_HIBERNATION */ |