]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/platform/efi/efi_64.c
Merge tag 'asoc-v5.7' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / platform / efi / efi_64.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
5b83683f
HY
2/*
3 * x86_64 specific EFI support functions
4 * Based on Extensible Firmware Interface Specification version 1.0
5 *
6 * Copyright (C) 2005-2008 Intel Co.
7 * Fenghua Yu <fenghua.yu@intel.com>
8 * Bibo Mao <bibo.mao@intel.com>
9 * Chandramouli Narayanan <mouli@linux.intel.com>
10 * Huang Ying <ying.huang@intel.com>
11 *
12 * Code to convert EFI to E820 map has been implemented in elilo bootloader
13 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
14 * is setup appropriately for EFI runtime code.
15 * - mouli 06/14/2007.
16 *
17 */
18
26d7f65f
MF
19#define pr_fmt(fmt) "efi: " fmt
20
5b83683f
HY
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/mm.h>
24#include <linux/types.h>
25#include <linux/spinlock.h>
57c8a661 26#include <linux/memblock.h>
5b83683f 27#include <linux/ioport.h>
5ab788d7 28#include <linux/mc146818rtc.h>
5b83683f 29#include <linux/efi.h>
116fef64 30#include <linux/export.h>
5b83683f
HY
31#include <linux/uaccess.h>
32#include <linux/io.h>
33#include <linux/reboot.h>
0d01ff25 34#include <linux/slab.h>
f6697df3 35#include <linux/ucs2_string.h>
1379edd5 36#include <linux/mem_encrypt.h>
03781e40 37#include <linux/sched/task.h>
5b83683f
HY
38
39#include <asm/setup.h>
40#include <asm/page.h>
66441bd3 41#include <asm/e820/api.h>
5b83683f
HY
42#include <asm/pgtable.h>
43#include <asm/tlbflush.h>
5b83683f
HY
44#include <asm/proto.h>
45#include <asm/efi.h>
4de0d4a6 46#include <asm/cacheflush.h>
3819cd48 47#include <asm/fixmap.h>
d2f7cbe7 48#include <asm/realmode.h>
4f9dbcfc 49#include <asm/time.h>
67a9108e 50#include <asm/pgalloc.h>
5b83683f 51
d2f7cbe7 52/*
b1d17761 53 * We allocate runtime services regions top-down, starting from -4G, i.e.
d2f7cbe7
BP
54 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
55 */
8266e31e 56static u64 efi_va = EFI_VA_START;
d2f7cbe7 57
c9f2a9a6 58struct efi_scratch efi_scratch;
d2f7cbe7 59
3ede3417 60EXPORT_SYMBOL_GPL(efi_mm);
67a9108e
MF
61
62/*
63 * We need our own copy of the higher levels of the page tables
64 * because we want to avoid inserting EFI region mappings (EFI_VA_END
65 * to EFI_VA_START) into the standard kernel page tables. Everything
66 * else can be shared, see efi_sync_low_kernel_mappings().
d9e9a641
DH
67 *
68 * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
69 * allocation.
67a9108e
MF
70 */
71int __init efi_alloc_page_tables(void)
72{
3ede3417 73 pgd_t *pgd, *efi_pgd;
e981316f 74 p4d_t *p4d;
67a9108e
MF
75 pud_t *pud;
76 gfp_t gfp_mask;
77
1f299fad 78 if (efi_have_uv1_memmap())
67a9108e
MF
79 return 0;
80
75f296d9 81 gfp_mask = GFP_KERNEL | __GFP_ZERO;
d9e9a641 82 efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
67a9108e
MF
83 if (!efi_pgd)
84 return -ENOMEM;
85
86 pgd = efi_pgd + pgd_index(EFI_VA_END);
e981316f
KS
87 p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
88 if (!p4d) {
89 free_page((unsigned long)efi_pgd);
90 return -ENOMEM;
91 }
67a9108e 92
e981316f 93 pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
67a9108e 94 if (!pud) {
ed7588d5 95 if (pgtable_l5_enabled())
e981316f 96 free_page((unsigned long) pgd_page_vaddr(*pgd));
06ace26f 97 free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
67a9108e
MF
98 return -ENOMEM;
99 }
100
3ede3417 101 efi_mm.pgd = efi_pgd;
7e904a91
SP
102 mm_init_cpumask(&efi_mm);
103 init_new_context(NULL, &efi_mm);
104
67a9108e
MF
105 return 0;
106}
107
d2f7cbe7
BP
108/*
109 * Add low kernel mappings for passing arguments to EFI functions.
110 */
111void efi_sync_low_kernel_mappings(void)
112{
67a9108e
MF
113 unsigned num_entries;
114 pgd_t *pgd_k, *pgd_efi;
e0c4f675 115 p4d_t *p4d_k, *p4d_efi;
67a9108e 116 pud_t *pud_k, *pud_efi;
3ede3417 117 pgd_t *efi_pgd = efi_mm.pgd;
d2f7cbe7 118
1f299fad 119 if (efi_have_uv1_memmap())
d2f7cbe7
BP
120 return;
121
67a9108e
MF
122 /*
123 * We can share all PGD entries apart from the one entry that
124 * covers the EFI runtime mapping space.
125 *
126 * Make sure the EFI runtime region mappings are guaranteed to
127 * only span a single PGD entry and that the entry also maps
128 * other important kernel regions.
129 */
c65e774f
KS
130 MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
131 MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
67a9108e
MF
132 (EFI_VA_END & PGDIR_MASK));
133
134 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
135 pgd_k = pgd_offset_k(PAGE_OFFSET);
136
137 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
138 memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
d2f7cbe7 139
e981316f
KS
140 /*
141 * As with PGDs, we share all P4D entries apart from the one entry
142 * that covers the EFI runtime mapping space.
143 */
144 BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
145 BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
146
147 pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
148 pgd_k = pgd_offset_k(EFI_VA_END);
149 p4d_efi = p4d_offset(pgd_efi, 0);
150 p4d_k = p4d_offset(pgd_k, 0);
151
152 num_entries = p4d_index(EFI_VA_END);
153 memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
154
67a9108e
MF
155 /*
156 * We share all the PUD entries apart from those that map the
157 * EFI regions. Copy around them.
158 */
159 BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
160 BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
161
e981316f
KS
162 p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
163 p4d_k = p4d_offset(pgd_k, EFI_VA_END);
e0c4f675 164 pud_efi = pud_offset(p4d_efi, 0);
e0c4f675 165 pud_k = pud_offset(p4d_k, 0);
67a9108e
MF
166
167 num_entries = pud_index(EFI_VA_END);
168 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
169
e0c4f675 170 pud_efi = pud_offset(p4d_efi, EFI_VA_START);
e0c4f675 171 pud_k = pud_offset(p4d_k, EFI_VA_START);
67a9108e
MF
172
173 num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
174 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
d2f7cbe7
BP
175}
176
f6697df3
MF
177/*
178 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
179 */
180static inline phys_addr_t
181virt_to_phys_or_null_size(void *va, unsigned long size)
182{
8319e9d5 183 phys_addr_t pa;
f6697df3
MF
184
185 if (!va)
186 return 0;
187
188 if (virt_addr_valid(va))
189 return virt_to_phys(va);
190
8319e9d5 191 pa = slow_virt_to_phys(va);
f6697df3 192
8319e9d5
AB
193 /* check if the object crosses a page boundary */
194 if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
195 return 0;
f6697df3 196
8319e9d5 197 return pa;
f6697df3
MF
198}
199
200#define virt_to_phys_or_null(addr) \
201 virt_to_phys_or_null_size((addr), sizeof(*(addr)))
202
4e78eb05 203int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
d2f7cbe7 204{
38eecccd 205 unsigned long pfn, text, pf;
4f9dbcfc 206 struct page *page;
994448f1 207 unsigned npages;
3ede3417 208 pgd_t *pgd = efi_mm.pgd;
b7b898ae 209
1f299fad 210 if (efi_have_uv1_memmap())
b7b898ae
BP
211 return 0;
212
b7b898ae
BP
213 /*
214 * It can happen that the physical address of new_memmap lands in memory
215 * which is not mapped in the EFI page table. Therefore we need to go
216 * and ident-map those pages containing the map before calling
217 * phys_efi_set_virtual_address_map().
218 */
edc3b912 219 pfn = pa_memmap >> PAGE_SHIFT;
38eecccd
TL
220 pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
221 if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
b7b898ae
BP
222 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
223 return 1;
224 }
225
bf29bddf
JK
226 /*
227 * Certain firmware versions are way too sentimential and still believe
228 * they are exclusive and unquestionable owners of the first physical page,
229 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
230 * (but then write-access it later during SetVirtualAddressMap()).
231 *
232 * Create a 1:1 mapping for this page, to avoid triple faults during early
233 * boot with such firmware. We are free to hand this page to the BIOS,
234 * as trim_bios_range() will reserve the first page and isolate it away
235 * from memory allocators anyway.
236 */
1379edd5 237 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
bf29bddf
JK
238 pr_err("Failed to create 1:1 mapping for the first page!\n");
239 return 1;
240 }
241
4f9dbcfc
MF
242 /*
243 * When making calls to the firmware everything needs to be 1:1
244 * mapped and addressable with 32-bit pointers. Map the kernel
245 * text and allocate a new stack because we can't rely on the
246 * stack pointer being < 4GB.
247 */
a8147dba 248 if (!efi_is_mixed())
994448f1 249 return 0;
4f9dbcfc
MF
250
251 page = alloc_page(GFP_KERNEL|__GFP_DMA32);
e2d68a95
AB
252 if (!page) {
253 pr_err("Unable to allocate EFI runtime stack < 4GB\n");
254 return 1;
255 }
4f9dbcfc 256
e2d68a95 257 efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
4f9dbcfc 258
d9e3d2c4 259 npages = (__end_rodata_aligned - _text) >> PAGE_SHIFT;
4f9dbcfc 260 text = __pa(_text);
edc3b912 261 pfn = text >> PAGE_SHIFT;
4f9dbcfc 262
d9e3d2c4 263 pf = _PAGE_ENC;
38eecccd 264 if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
4f9dbcfc 265 pr_err("Failed to map kernel text 1:1\n");
994448f1 266 return 1;
4f9dbcfc 267 }
b7b898ae
BP
268
269 return 0;
270}
271
d2f7cbe7
BP
272static void __init __map_region(efi_memory_desc_t *md, u64 va)
273{
15f003d2 274 unsigned long flags = _PAGE_RW;
edc3b912 275 unsigned long pfn;
3ede3417 276 pgd_t *pgd = efi_mm.pgd;
d2f7cbe7 277
97bb9cdc
AB
278 /*
279 * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF
280 * executable images in memory that consist of both R-X and
281 * RW- sections, so we cannot apply read-only or non-exec
282 * permissions just yet. However, modern EFI systems provide
283 * a memory attributes table that describes those sections
284 * with the appropriate restricted permissions, which are
285 * applied in efi_runtime_update_mappings() below. All other
286 * regions can be mapped non-executable at this point, with
287 * the exception of boot services code regions, but those will
288 * be unmapped again entirely in efi_free_boot_services().
289 */
290 if (md->type != EFI_BOOT_SERVICES_CODE &&
291 md->type != EFI_RUNTIME_SERVICES_CODE)
292 flags |= _PAGE_NX;
293
d2f7cbe7 294 if (!(md->attribute & EFI_MEMORY_WB))
edc3b912 295 flags |= _PAGE_PCD;
d2f7cbe7 296
9b788f32 297 if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
1379edd5
TL
298 flags |= _PAGE_ENC;
299
edc3b912
MF
300 pfn = md->phys_addr >> PAGE_SHIFT;
301 if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
d2f7cbe7
BP
302 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
303 md->phys_addr, va);
304}
305
306void __init efi_map_region(efi_memory_desc_t *md)
307{
308 unsigned long size = md->num_pages << PAGE_SHIFT;
309 u64 pa = md->phys_addr;
310
1f299fad 311 if (efi_have_uv1_memmap())
d2f7cbe7
BP
312 return old_map_region(md);
313
314 /*
315 * Make sure the 1:1 mappings are present as a catch-all for b0rked
316 * firmware which doesn't update all internal pointers after switching
317 * to virtual mode and would otherwise crap on us.
318 */
319 __map_region(md, md->phys_addr);
320
4f9dbcfc
MF
321 /*
322 * Enforce the 1:1 mapping as the default virtual address when
323 * booting in EFI mixed mode, because even though we may be
324 * running a 64-bit kernel, the firmware may only be 32-bit.
325 */
a8147dba 326 if (efi_is_mixed()) {
4f9dbcfc
MF
327 md->virt_addr = md->phys_addr;
328 return;
329 }
330
d2f7cbe7
BP
331 efi_va -= size;
332
333 /* Is PA 2M-aligned? */
334 if (!(pa & (PMD_SIZE - 1))) {
335 efi_va &= PMD_MASK;
336 } else {
337 u64 pa_offset = pa & (PMD_SIZE - 1);
338 u64 prev_va = efi_va;
339
340 /* get us the same offset within this 2M page */
341 efi_va = (efi_va & PMD_MASK) + pa_offset;
342
343 if (efi_va > prev_va)
344 efi_va -= PMD_SIZE;
345 }
346
347 if (efi_va < EFI_VA_END) {
348 pr_warn(FW_WARN "VA address range overflow!\n");
349 return;
350 }
351
352 /* Do the VA map */
353 __map_region(md, efi_va);
354 md->virt_addr = efi_va;
355}
356
3b266496
DY
357/*
358 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
359 * md->virt_addr is the original virtual address which had been mapped in kexec
360 * 1st kernel.
361 */
362void __init efi_map_region_fixed(efi_memory_desc_t *md)
363{
0513fe1d 364 __map_region(md, md->phys_addr);
3b266496
DY
365 __map_region(md, md->virt_addr);
366}
367
1fec0533
DY
368void __init parse_efi_setup(u64 phys_addr, u32 data_len)
369{
370 efi_setup = phys_addr + sizeof(struct setup_data);
1fec0533 371}
c55d016f 372
18141e89 373static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
c55d016f 374{
6d0cc887 375 unsigned long pfn;
3ede3417 376 pgd_t *pgd = efi_mm.pgd;
18141e89
SP
377 int err1, err2;
378
379 /* Update the 1:1 mapping */
380 pfn = md->phys_addr >> PAGE_SHIFT;
381 err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
382 if (err1) {
383 pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
384 md->phys_addr, md->virt_addr);
385 }
386
387 err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
388 if (err2) {
389 pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
390 md->phys_addr, md->virt_addr);
391 }
392
393 return err1 || err2;
394}
395
396static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
397{
398 unsigned long pf = 0;
399
400 if (md->attribute & EFI_MEMORY_XP)
401 pf |= _PAGE_NX;
402
403 if (!(md->attribute & EFI_MEMORY_RO))
404 pf |= _PAGE_RW;
405
1379edd5
TL
406 if (sev_active())
407 pf |= _PAGE_ENC;
408
18141e89
SP
409 return efi_update_mappings(md, pf);
410}
411
412void __init efi_runtime_update_mappings(void)
413{
6d0cc887 414 efi_memory_desc_t *md;
6d0cc887 415
1f299fad 416 if (efi_have_uv1_memmap()) {
6d0cc887
SP
417 if (__supported_pte_mask & _PAGE_NX)
418 runtime_code_page_mkexec();
419 return;
420 }
421
18141e89
SP
422 /*
423 * Use the EFI Memory Attribute Table for mapping permissions if it
424 * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
425 */
426 if (efi_enabled(EFI_MEM_ATTR)) {
427 efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
428 return;
429 }
430
431 /*
432 * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
433 * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
434 * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
435 * published by the firmware. Even if we find a buggy implementation of
436 * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
437 * EFI_PROPERTIES_TABLE, because of the same reason.
438 */
439
6d0cc887 440 if (!efi_enabled(EFI_NX_PE_DATA))
c55d016f
BP
441 return;
442
78ce248f 443 for_each_efi_memory_desc(md) {
6d0cc887 444 unsigned long pf = 0;
6d0cc887
SP
445
446 if (!(md->attribute & EFI_MEMORY_RUNTIME))
447 continue;
448
449 if (!(md->attribute & EFI_MEMORY_WB))
450 pf |= _PAGE_PCD;
451
452 if ((md->attribute & EFI_MEMORY_XP) ||
453 (md->type == EFI_RUNTIME_SERVICES_DATA))
454 pf |= _PAGE_NX;
455
456 if (!(md->attribute & EFI_MEMORY_RO) &&
457 (md->type != EFI_RUNTIME_SERVICES_CODE))
458 pf |= _PAGE_RW;
459
1379edd5
TL
460 if (sev_active())
461 pf |= _PAGE_ENC;
462
18141e89 463 efi_update_mappings(md, pf);
6d0cc887 464 }
c55d016f 465}
11cc8512
BP
466
467void __init efi_dump_pagetable(void)
468{
469#ifdef CONFIG_EFI_PGT_DUMP
1f299fad 470 if (efi_have_uv1_memmap())
e455248d 471 ptdump_walk_pgd_level(NULL, &init_mm);
ac81d3de 472 else
e455248d 473 ptdump_walk_pgd_level(NULL, &efi_mm);
11cc8512
BP
474#endif
475}
994448f1 476
03781e40
SP
477/*
478 * Makes the calling thread switch to/from efi_mm context. Can be used
4eda1117
SAS
479 * in a kernel thread and user context. Preemption needs to remain disabled
480 * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
481 * can not change under us.
482 * It should be ensured that there are no concurent calls to this function.
03781e40
SP
483 */
484void efi_switch_mm(struct mm_struct *mm)
485{
03781e40
SP
486 efi_scratch.prev_mm = current->active_mm;
487 current->active_mm = mm;
488 switch_mm(efi_scratch.prev_mm, mm, NULL);
03781e40
SP
489}
490
83a0a2ea
AB
491static DEFINE_SPINLOCK(efi_runtime_lock);
492
ea5e1919
AB
493/*
494 * DS and ES contain user values. We need to save them.
495 * The 32-bit EFI code needs a valid DS, ES, and SS. There's no
496 * need to save the old SS: __KERNEL_DS is always acceptable.
497 */
498#define __efi_thunk(func, ...) \
499({ \
500 efi_runtime_services_32_t *__rt; \
501 unsigned short __ds, __es; \
502 efi_status_t ____s; \
503 \
504 __rt = (void *)(unsigned long)efi.systab->mixed_mode.runtime; \
505 \
506 savesegment(ds, __ds); \
507 savesegment(es, __es); \
508 \
509 loadsegment(ss, __KERNEL_DS); \
510 loadsegment(ds, __KERNEL_DS); \
511 loadsegment(es, __KERNEL_DS); \
512 \
513 ____s = efi64_thunk(__rt->func, __VA_ARGS__); \
514 \
515 loadsegment(ds, __ds); \
516 loadsegment(es, __es); \
517 \
518 ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \
519 ____s; \
4f9dbcfc
MF
520})
521
522/*
523 * Switch to the EFI page tables early so that we can access the 1:1
524 * runtime services mappings which are not mapped in any other page
ea5e1919 525 * tables.
4f9dbcfc
MF
526 *
527 * Also, disable interrupts because the IDT points to 64-bit handlers,
528 * which aren't going to function correctly when we switch to 32-bit.
529 */
ea5e1919 530#define efi_thunk(func...) \
4f9dbcfc
MF
531({ \
532 efi_status_t __s; \
4f9dbcfc 533 \
21f86625 534 arch_efi_call_virt_setup(); \
4f9dbcfc 535 \
ea5e1919 536 __s = __efi_thunk(func); \
4f9dbcfc 537 \
21f86625 538 arch_efi_call_virt_teardown(); \
4f9dbcfc
MF
539 \
540 __s; \
541})
542
3cc02861 543static efi_status_t __init __no_sanitize_address
ea5e1919
AB
544efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
545 unsigned long descriptor_size,
546 u32 descriptor_version,
547 efi_memory_desc_t *virtual_map)
4f9dbcfc
MF
548{
549 efi_status_t status;
550 unsigned long flags;
4f9dbcfc
MF
551
552 efi_sync_low_kernel_mappings();
553 local_irq_save(flags);
554
03781e40 555 efi_switch_mm(&efi_mm);
4f9dbcfc 556
ea5e1919
AB
557 status = __efi_thunk(set_virtual_address_map, memory_map_size,
558 descriptor_size, descriptor_version, virtual_map);
4f9dbcfc 559
03781e40 560 efi_switch_mm(efi_scratch.prev_mm);
4f9dbcfc
MF
561 local_irq_restore(flags);
562
563 return status;
564}
565
566static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
567{
f80c9f64 568 return EFI_UNSUPPORTED;
4f9dbcfc
MF
569}
570
571static efi_status_t efi_thunk_set_time(efi_time_t *tm)
572{
f80c9f64 573 return EFI_UNSUPPORTED;
4f9dbcfc
MF
574}
575
576static efi_status_t
577efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
578 efi_time_t *tm)
579{
f80c9f64 580 return EFI_UNSUPPORTED;
4f9dbcfc
MF
581}
582
583static efi_status_t
584efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
585{
f80c9f64 586 return EFI_UNSUPPORTED;
4f9dbcfc
MF
587}
588
f6697df3
MF
589static unsigned long efi_name_size(efi_char16_t *name)
590{
591 return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
592}
4f9dbcfc
MF
593
594static efi_status_t
595efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
596 u32 *attr, unsigned long *data_size, void *data)
597{
63056e8b
AB
598 u8 buf[24] __aligned(8);
599 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
4f9dbcfc
MF
600 efi_status_t status;
601 u32 phys_name, phys_vendor, phys_attr;
602 u32 phys_data_size, phys_data;
83a0a2ea
AB
603 unsigned long flags;
604
605 spin_lock_irqsave(&efi_runtime_lock, flags);
4f9dbcfc 606
63056e8b
AB
607 *vnd = *vendor;
608
f6697df3 609 phys_data_size = virt_to_phys_or_null(data_size);
63056e8b 610 phys_vendor = virt_to_phys_or_null(vnd);
f6697df3
MF
611 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
612 phys_attr = virt_to_phys_or_null(attr);
613 phys_data = virt_to_phys_or_null_size(data, *data_size);
4f9dbcfc 614
8319e9d5
AB
615 if (!phys_name || (data && !phys_data))
616 status = EFI_INVALID_PARAMETER;
617 else
618 status = efi_thunk(get_variable, phys_name, phys_vendor,
619 phys_attr, phys_data_size, phys_data);
4f9dbcfc 620
83a0a2ea
AB
621 spin_unlock_irqrestore(&efi_runtime_lock, flags);
622
4f9dbcfc
MF
623 return status;
624}
625
626static efi_status_t
627efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
628 u32 attr, unsigned long data_size, void *data)
629{
63056e8b
AB
630 u8 buf[24] __aligned(8);
631 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
4f9dbcfc
MF
632 u32 phys_name, phys_vendor, phys_data;
633 efi_status_t status;
83a0a2ea
AB
634 unsigned long flags;
635
636 spin_lock_irqsave(&efi_runtime_lock, flags);
637
63056e8b
AB
638 *vnd = *vendor;
639
83a0a2ea 640 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
63056e8b 641 phys_vendor = virt_to_phys_or_null(vnd);
83a0a2ea
AB
642 phys_data = virt_to_phys_or_null_size(data, data_size);
643
8319e9d5
AB
644 if (!phys_name || !phys_data)
645 status = EFI_INVALID_PARAMETER;
646 else
647 status = efi_thunk(set_variable, phys_name, phys_vendor,
648 attr, data_size, phys_data);
83a0a2ea
AB
649
650 spin_unlock_irqrestore(&efi_runtime_lock, flags);
651
652 return status;
653}
654
655static efi_status_t
656efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
657 u32 attr, unsigned long data_size,
658 void *data)
659{
63056e8b
AB
660 u8 buf[24] __aligned(8);
661 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
83a0a2ea
AB
662 u32 phys_name, phys_vendor, phys_data;
663 efi_status_t status;
664 unsigned long flags;
665
666 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
667 return EFI_NOT_READY;
4f9dbcfc 668
63056e8b
AB
669 *vnd = *vendor;
670
f6697df3 671 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
63056e8b 672 phys_vendor = virt_to_phys_or_null(vnd);
f6697df3 673 phys_data = virt_to_phys_or_null_size(data, data_size);
4f9dbcfc 674
8319e9d5
AB
675 if (!phys_name || !phys_data)
676 status = EFI_INVALID_PARAMETER;
677 else
678 status = efi_thunk(set_variable, phys_name, phys_vendor,
679 attr, data_size, phys_data);
4f9dbcfc 680
83a0a2ea
AB
681 spin_unlock_irqrestore(&efi_runtime_lock, flags);
682
4f9dbcfc
MF
683 return status;
684}
685
686static efi_status_t
687efi_thunk_get_next_variable(unsigned long *name_size,
688 efi_char16_t *name,
689 efi_guid_t *vendor)
690{
63056e8b
AB
691 u8 buf[24] __aligned(8);
692 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
4f9dbcfc
MF
693 efi_status_t status;
694 u32 phys_name_size, phys_name, phys_vendor;
83a0a2ea
AB
695 unsigned long flags;
696
697 spin_lock_irqsave(&efi_runtime_lock, flags);
4f9dbcfc 698
63056e8b
AB
699 *vnd = *vendor;
700
f6697df3 701 phys_name_size = virt_to_phys_or_null(name_size);
63056e8b 702 phys_vendor = virt_to_phys_or_null(vnd);
f6697df3 703 phys_name = virt_to_phys_or_null_size(name, *name_size);
4f9dbcfc 704
8319e9d5
AB
705 if (!phys_name)
706 status = EFI_INVALID_PARAMETER;
707 else
708 status = efi_thunk(get_next_variable, phys_name_size,
709 phys_name, phys_vendor);
4f9dbcfc 710
83a0a2ea
AB
711 spin_unlock_irqrestore(&efi_runtime_lock, flags);
712
63056e8b 713 *vendor = *vnd;
4f9dbcfc
MF
714 return status;
715}
716
717static efi_status_t
718efi_thunk_get_next_high_mono_count(u32 *count)
719{
f80c9f64 720 return EFI_UNSUPPORTED;
4f9dbcfc
MF
721}
722
723static void
724efi_thunk_reset_system(int reset_type, efi_status_t status,
725 unsigned long data_size, efi_char16_t *data)
726{
727 u32 phys_data;
83a0a2ea
AB
728 unsigned long flags;
729
730 spin_lock_irqsave(&efi_runtime_lock, flags);
4f9dbcfc 731
f6697df3 732 phys_data = virt_to_phys_or_null_size(data, data_size);
4f9dbcfc
MF
733
734 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
83a0a2ea
AB
735
736 spin_unlock_irqrestore(&efi_runtime_lock, flags);
4f9dbcfc
MF
737}
738
739static efi_status_t
740efi_thunk_update_capsule(efi_capsule_header_t **capsules,
741 unsigned long count, unsigned long sg_list)
742{
743 /*
744 * To properly support this function we would need to repackage
745 * 'capsules' because the firmware doesn't understand 64-bit
746 * pointers.
747 */
748 return EFI_UNSUPPORTED;
749}
750
751static efi_status_t
752efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
753 u64 *remaining_space,
754 u64 *max_variable_size)
755{
756 efi_status_t status;
757 u32 phys_storage, phys_remaining, phys_max;
83a0a2ea 758 unsigned long flags;
4f9dbcfc
MF
759
760 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
761 return EFI_UNSUPPORTED;
762
83a0a2ea
AB
763 spin_lock_irqsave(&efi_runtime_lock, flags);
764
f6697df3
MF
765 phys_storage = virt_to_phys_or_null(storage_space);
766 phys_remaining = virt_to_phys_or_null(remaining_space);
767 phys_max = virt_to_phys_or_null(max_variable_size);
4f9dbcfc 768
9a11040f 769 status = efi_thunk(query_variable_info, attr, phys_storage,
4f9dbcfc
MF
770 phys_remaining, phys_max);
771
83a0a2ea
AB
772 spin_unlock_irqrestore(&efi_runtime_lock, flags);
773
774 return status;
775}
776
777static efi_status_t
778efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
779 u64 *remaining_space,
780 u64 *max_variable_size)
781{
782 efi_status_t status;
783 u32 phys_storage, phys_remaining, phys_max;
784 unsigned long flags;
785
786 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
787 return EFI_UNSUPPORTED;
788
789 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
790 return EFI_NOT_READY;
791
792 phys_storage = virt_to_phys_or_null(storage_space);
793 phys_remaining = virt_to_phys_or_null(remaining_space);
794 phys_max = virt_to_phys_or_null(max_variable_size);
795
796 status = efi_thunk(query_variable_info, attr, phys_storage,
797 phys_remaining, phys_max);
798
799 spin_unlock_irqrestore(&efi_runtime_lock, flags);
800
4f9dbcfc
MF
801 return status;
802}
803
804static efi_status_t
805efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
806 unsigned long count, u64 *max_size,
807 int *reset_type)
808{
809 /*
810 * To properly support this function we would need to repackage
811 * 'capsules' because the firmware doesn't understand 64-bit
812 * pointers.
813 */
814 return EFI_UNSUPPORTED;
815}
816
ea5e1919 817void __init efi_thunk_runtime_setup(void)
4f9dbcfc 818{
ea5e1919
AB
819 if (!IS_ENABLED(CONFIG_EFI_MIXED))
820 return;
821
4f9dbcfc
MF
822 efi.get_time = efi_thunk_get_time;
823 efi.set_time = efi_thunk_set_time;
824 efi.get_wakeup_time = efi_thunk_get_wakeup_time;
825 efi.set_wakeup_time = efi_thunk_set_wakeup_time;
826 efi.get_variable = efi_thunk_get_variable;
827 efi.get_next_variable = efi_thunk_get_next_variable;
828 efi.set_variable = efi_thunk_set_variable;
83a0a2ea 829 efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
4f9dbcfc
MF
830 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
831 efi.reset_system = efi_thunk_reset_system;
832 efi.query_variable_info = efi_thunk_query_variable_info;
83a0a2ea 833 efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
4f9dbcfc
MF
834 efi.update_capsule = efi_thunk_update_capsule;
835 efi.query_capsule_caps = efi_thunk_query_capsule_caps;
836}
69829470 837
3cc02861
AB
838efi_status_t __init __no_sanitize_address
839efi_set_virtual_address_map(unsigned long memory_map_size,
840 unsigned long descriptor_size,
841 u32 descriptor_version,
842 efi_memory_desc_t *virtual_map)
69829470
AB
843{
844 efi_status_t status;
845 unsigned long flags;
846 pgd_t *save_pgd = NULL;
847
ea5e1919
AB
848 if (efi_is_mixed())
849 return efi_thunk_set_virtual_address_map(memory_map_size,
850 descriptor_size,
851 descriptor_version,
852 virtual_map);
853
1f299fad
AB
854 if (efi_have_uv1_memmap()) {
855 save_pgd = efi_uv1_memmap_phys_prolog();
69829470
AB
856 if (!save_pgd)
857 return EFI_ABORTED;
858 } else {
859 efi_switch_mm(&efi_mm);
860 }
861
e5f930fe
AB
862 kernel_fpu_begin();
863
69829470
AB
864 /* Disable interrupts around EFI calls: */
865 local_irq_save(flags);
866 status = efi_call(efi.systab->runtime->set_virtual_address_map,
867 memory_map_size, descriptor_size,
868 descriptor_version, virtual_map);
869 local_irq_restore(flags);
870
e5f930fe 871 kernel_fpu_end();
69829470
AB
872
873 if (save_pgd)
1f299fad 874 efi_uv1_memmap_phys_epilog(save_pgd);
69829470
AB
875 else
876 efi_switch_mm(efi_scratch.prev_mm);
877
878 return status;
879}