1 // SPDX-License-Identifier: GPL-2.0
3 * x86_64 specific EFI support functions
4 * Based on Extensible Firmware Interface Specification version 1.0
6 * Copyright (C) 2005-2008 Intel Co.
7 * Fenghua Yu <fenghua.yu@intel.com>
8 * Bibo Mao <bibo.mao@intel.com>
9 * Chandramouli Narayanan <mouli@linux.intel.com>
10 * Huang Ying <ying.huang@intel.com>
12 * Code to convert EFI to E820 map has been implemented in elilo bootloader
13 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
14 * is setup appropriately for EFI runtime code.
19 #define pr_fmt(fmt) "efi: " fmt
21 #include <linux/kernel.h>
22 #include <linux/init.h>
24 #include <linux/types.h>
25 #include <linux/spinlock.h>
26 #include <linux/bootmem.h>
27 #include <linux/ioport.h>
28 #include <linux/init.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/efi.h>
31 #include <linux/uaccess.h>
33 #include <linux/reboot.h>
34 #include <linux/slab.h>
35 #include <linux/ucs2_string.h>
37 #include <asm/setup.h>
39 #include <asm/e820/api.h>
40 #include <asm/pgtable.h>
41 #include <asm/tlbflush.h>
42 #include <asm/proto.h>
44 #include <asm/cacheflush.h>
45 #include <asm/fixmap.h>
46 #include <asm/realmode.h>
48 #include <asm/pgalloc.h>
51 * We allocate runtime services regions top-down, starting from -4G, i.e.
52 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
54 static u64 efi_va
= EFI_VA_START
;
56 struct efi_scratch efi_scratch
;
58 static void __init
early_code_mapping_set_exec(int executable
)
60 efi_memory_desc_t
*md
;
62 if (!(__supported_pte_mask
& _PAGE_NX
))
65 /* Make EFI service code area executable */
66 for_each_efi_memory_desc(md
) {
67 if (md
->type
== EFI_RUNTIME_SERVICES_CODE
||
68 md
->type
== EFI_BOOT_SERVICES_CODE
)
69 efi_set_executable(md
, executable
);
73 pgd_t
* __init
efi_call_phys_prolog(void)
75 unsigned long vaddr
, addr_pgd
, addr_p4d
, addr_pud
;
76 pgd_t
*save_pgd
, *pgd_k
, *pgd_efi
;
77 p4d_t
*p4d
, *p4d_k
, *p4d_efi
;
83 if (!efi_enabled(EFI_OLD_MEMMAP
)) {
84 save_pgd
= (pgd_t
*)__read_cr3();
85 write_cr3((unsigned long)efi_scratch
.efi_pgt
);
89 early_code_mapping_set_exec(1);
91 n_pgds
= DIV_ROUND_UP((max_pfn
<< PAGE_SHIFT
), PGDIR_SIZE
);
92 save_pgd
= kmalloc_array(n_pgds
, sizeof(*save_pgd
), GFP_KERNEL
);
95 * Build 1:1 identity mapping for efi=old_map usage. Note that
96 * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
97 * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
98 * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
99 * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
100 * This means here we can only reuse the PMD tables of the direct mapping.
102 for (pgd
= 0; pgd
< n_pgds
; pgd
++) {
103 addr_pgd
= (unsigned long)(pgd
* PGDIR_SIZE
);
104 vaddr
= (unsigned long)__va(pgd
* PGDIR_SIZE
);
105 pgd_efi
= pgd_offset_k(addr_pgd
);
106 save_pgd
[pgd
] = *pgd_efi
;
108 p4d
= p4d_alloc(&init_mm
, pgd_efi
, addr_pgd
);
110 pr_err("Failed to allocate p4d table!\n");
114 for (i
= 0; i
< PTRS_PER_P4D
; i
++) {
115 addr_p4d
= addr_pgd
+ i
* P4D_SIZE
;
116 p4d_efi
= p4d
+ p4d_index(addr_p4d
);
118 pud
= pud_alloc(&init_mm
, p4d_efi
, addr_p4d
);
120 pr_err("Failed to allocate pud table!\n");
124 for (j
= 0; j
< PTRS_PER_PUD
; j
++) {
125 addr_pud
= addr_p4d
+ j
* PUD_SIZE
;
127 if (addr_pud
> (max_pfn
<< PAGE_SHIFT
))
130 vaddr
= (unsigned long)__va(addr_pud
);
132 pgd_k
= pgd_offset_k(vaddr
);
133 p4d_k
= p4d_offset(pgd_k
, vaddr
);
134 pud
[j
] = *pud_offset(p4d_k
, vaddr
);
144 void __init
efi_call_phys_epilog(pgd_t
*save_pgd
)
147 * After the lock is released, the original page table is restored.
155 if (!efi_enabled(EFI_OLD_MEMMAP
)) {
156 write_cr3((unsigned long)save_pgd
);
161 nr_pgds
= DIV_ROUND_UP((max_pfn
<< PAGE_SHIFT
) , PGDIR_SIZE
);
163 for (pgd_idx
= 0; pgd_idx
< nr_pgds
; pgd_idx
++) {
164 pgd
= pgd_offset_k(pgd_idx
* PGDIR_SIZE
);
165 set_pgd(pgd_offset_k(pgd_idx
* PGDIR_SIZE
), save_pgd
[pgd_idx
]);
167 if (!(pgd_val(*pgd
) & _PAGE_PRESENT
))
170 for (i
= 0; i
< PTRS_PER_P4D
; i
++) {
171 p4d
= p4d_offset(pgd
,
172 pgd_idx
* PGDIR_SIZE
+ i
* P4D_SIZE
);
174 if (!(p4d_val(*p4d
) & _PAGE_PRESENT
))
177 pud
= (pud_t
*)p4d_page_vaddr(*p4d
);
178 pud_free(&init_mm
, pud
);
181 p4d
= (p4d_t
*)pgd_page_vaddr(*pgd
);
182 p4d_free(&init_mm
, p4d
);
188 early_code_mapping_set_exec(0);
191 static pgd_t
*efi_pgd
;
194 * We need our own copy of the higher levels of the page tables
195 * because we want to avoid inserting EFI region mappings (EFI_VA_END
196 * to EFI_VA_START) into the standard kernel page tables. Everything
197 * else can be shared, see efi_sync_low_kernel_mappings().
199 int __init
efi_alloc_page_tables(void)
206 if (efi_enabled(EFI_OLD_MEMMAP
))
209 gfp_mask
= GFP_KERNEL
| __GFP_NOTRACK
| __GFP_ZERO
;
210 efi_pgd
= (pgd_t
*)__get_free_page(gfp_mask
);
214 pgd
= efi_pgd
+ pgd_index(EFI_VA_END
);
215 p4d
= p4d_alloc(&init_mm
, pgd
, EFI_VA_END
);
217 free_page((unsigned long)efi_pgd
);
221 pud
= pud_alloc(&init_mm
, p4d
, EFI_VA_END
);
223 if (CONFIG_PGTABLE_LEVELS
> 4)
224 free_page((unsigned long) pgd_page_vaddr(*pgd
));
225 free_page((unsigned long)efi_pgd
);
233 * Add low kernel mappings for passing arguments to EFI functions.
235 void efi_sync_low_kernel_mappings(void)
237 unsigned num_entries
;
238 pgd_t
*pgd_k
, *pgd_efi
;
239 p4d_t
*p4d_k
, *p4d_efi
;
240 pud_t
*pud_k
, *pud_efi
;
242 if (efi_enabled(EFI_OLD_MEMMAP
))
246 * We can share all PGD entries apart from the one entry that
247 * covers the EFI runtime mapping space.
249 * Make sure the EFI runtime region mappings are guaranteed to
250 * only span a single PGD entry and that the entry also maps
251 * other important kernel regions.
253 BUILD_BUG_ON(pgd_index(EFI_VA_END
) != pgd_index(MODULES_END
));
254 BUILD_BUG_ON((EFI_VA_START
& PGDIR_MASK
) !=
255 (EFI_VA_END
& PGDIR_MASK
));
257 pgd_efi
= efi_pgd
+ pgd_index(PAGE_OFFSET
);
258 pgd_k
= pgd_offset_k(PAGE_OFFSET
);
260 num_entries
= pgd_index(EFI_VA_END
) - pgd_index(PAGE_OFFSET
);
261 memcpy(pgd_efi
, pgd_k
, sizeof(pgd_t
) * num_entries
);
264 * As with PGDs, we share all P4D entries apart from the one entry
265 * that covers the EFI runtime mapping space.
267 BUILD_BUG_ON(p4d_index(EFI_VA_END
) != p4d_index(MODULES_END
));
268 BUILD_BUG_ON((EFI_VA_START
& P4D_MASK
) != (EFI_VA_END
& P4D_MASK
));
270 pgd_efi
= efi_pgd
+ pgd_index(EFI_VA_END
);
271 pgd_k
= pgd_offset_k(EFI_VA_END
);
272 p4d_efi
= p4d_offset(pgd_efi
, 0);
273 p4d_k
= p4d_offset(pgd_k
, 0);
275 num_entries
= p4d_index(EFI_VA_END
);
276 memcpy(p4d_efi
, p4d_k
, sizeof(p4d_t
) * num_entries
);
279 * We share all the PUD entries apart from those that map the
280 * EFI regions. Copy around them.
282 BUILD_BUG_ON((EFI_VA_START
& ~PUD_MASK
) != 0);
283 BUILD_BUG_ON((EFI_VA_END
& ~PUD_MASK
) != 0);
285 p4d_efi
= p4d_offset(pgd_efi
, EFI_VA_END
);
286 p4d_k
= p4d_offset(pgd_k
, EFI_VA_END
);
287 pud_efi
= pud_offset(p4d_efi
, 0);
288 pud_k
= pud_offset(p4d_k
, 0);
290 num_entries
= pud_index(EFI_VA_END
);
291 memcpy(pud_efi
, pud_k
, sizeof(pud_t
) * num_entries
);
293 pud_efi
= pud_offset(p4d_efi
, EFI_VA_START
);
294 pud_k
= pud_offset(p4d_k
, EFI_VA_START
);
296 num_entries
= PTRS_PER_PUD
- pud_index(EFI_VA_START
);
297 memcpy(pud_efi
, pud_k
, sizeof(pud_t
) * num_entries
);
301 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
303 static inline phys_addr_t
304 virt_to_phys_or_null_size(void *va
, unsigned long size
)
311 if (virt_addr_valid(va
))
312 return virt_to_phys(va
);
315 * A fully aligned variable on the stack is guaranteed not to
316 * cross a page bounary. Try to catch strings on the stack by
317 * checking that 'size' is a power of two.
319 bad_size
= size
> PAGE_SIZE
|| !is_power_of_2(size
);
321 WARN_ON(!IS_ALIGNED((unsigned long)va
, size
) || bad_size
);
323 return slow_virt_to_phys(va
);
326 #define virt_to_phys_or_null(addr) \
327 virt_to_phys_or_null_size((addr), sizeof(*(addr)))
329 int __init
efi_setup_page_tables(unsigned long pa_memmap
, unsigned num_pages
)
331 unsigned long pfn
, text
, pf
;
336 if (efi_enabled(EFI_OLD_MEMMAP
))
340 * Since the PGD is encrypted, set the encryption mask so that when
341 * this value is loaded into cr3 the PGD will be decrypted during
342 * the pagetable walk.
344 efi_scratch
.efi_pgt
= (pgd_t
*)__sme_pa(efi_pgd
);
348 * It can happen that the physical address of new_memmap lands in memory
349 * which is not mapped in the EFI page table. Therefore we need to go
350 * and ident-map those pages containing the map before calling
351 * phys_efi_set_virtual_address_map().
353 pfn
= pa_memmap
>> PAGE_SHIFT
;
354 pf
= _PAGE_NX
| _PAGE_RW
| _PAGE_ENC
;
355 if (kernel_map_pages_in_pgd(pgd
, pfn
, pa_memmap
, num_pages
, pf
)) {
356 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap
);
360 efi_scratch
.use_pgd
= true;
363 * Certain firmware versions are way too sentimential and still believe
364 * they are exclusive and unquestionable owners of the first physical page,
365 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
366 * (but then write-access it later during SetVirtualAddressMap()).
368 * Create a 1:1 mapping for this page, to avoid triple faults during early
369 * boot with such firmware. We are free to hand this page to the BIOS,
370 * as trim_bios_range() will reserve the first page and isolate it away
371 * from memory allocators anyway.
373 if (kernel_map_pages_in_pgd(pgd
, 0x0, 0x0, 1, _PAGE_RW
)) {
374 pr_err("Failed to create 1:1 mapping for the first page!\n");
379 * When making calls to the firmware everything needs to be 1:1
380 * mapped and addressable with 32-bit pointers. Map the kernel
381 * text and allocate a new stack because we can't rely on the
382 * stack pointer being < 4GB.
384 if (!IS_ENABLED(CONFIG_EFI_MIXED
) || efi_is_native())
387 page
= alloc_page(GFP_KERNEL
|__GFP_DMA32
);
389 panic("Unable to allocate EFI runtime stack < 4GB\n");
391 efi_scratch
.phys_stack
= virt_to_phys(page_address(page
));
392 efi_scratch
.phys_stack
+= PAGE_SIZE
; /* stack grows down */
394 npages
= (_etext
- _text
) >> PAGE_SHIFT
;
396 pfn
= text
>> PAGE_SHIFT
;
398 pf
= _PAGE_RW
| _PAGE_ENC
;
399 if (kernel_map_pages_in_pgd(pgd
, pfn
, text
, npages
, pf
)) {
400 pr_err("Failed to map kernel text 1:1\n");
407 static void __init
__map_region(efi_memory_desc_t
*md
, u64 va
)
409 unsigned long flags
= _PAGE_RW
;
411 pgd_t
*pgd
= efi_pgd
;
413 if (!(md
->attribute
& EFI_MEMORY_WB
))
416 pfn
= md
->phys_addr
>> PAGE_SHIFT
;
417 if (kernel_map_pages_in_pgd(pgd
, pfn
, va
, md
->num_pages
, flags
))
418 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
422 void __init
efi_map_region(efi_memory_desc_t
*md
)
424 unsigned long size
= md
->num_pages
<< PAGE_SHIFT
;
425 u64 pa
= md
->phys_addr
;
427 if (efi_enabled(EFI_OLD_MEMMAP
))
428 return old_map_region(md
);
431 * Make sure the 1:1 mappings are present as a catch-all for b0rked
432 * firmware which doesn't update all internal pointers after switching
433 * to virtual mode and would otherwise crap on us.
435 __map_region(md
, md
->phys_addr
);
438 * Enforce the 1:1 mapping as the default virtual address when
439 * booting in EFI mixed mode, because even though we may be
440 * running a 64-bit kernel, the firmware may only be 32-bit.
442 if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED
)) {
443 md
->virt_addr
= md
->phys_addr
;
449 /* Is PA 2M-aligned? */
450 if (!(pa
& (PMD_SIZE
- 1))) {
453 u64 pa_offset
= pa
& (PMD_SIZE
- 1);
454 u64 prev_va
= efi_va
;
456 /* get us the same offset within this 2M page */
457 efi_va
= (efi_va
& PMD_MASK
) + pa_offset
;
459 if (efi_va
> prev_va
)
463 if (efi_va
< EFI_VA_END
) {
464 pr_warn(FW_WARN
"VA address range overflow!\n");
469 __map_region(md
, efi_va
);
470 md
->virt_addr
= efi_va
;
474 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
475 * md->virt_addr is the original virtual address which had been mapped in kexec
478 void __init
efi_map_region_fixed(efi_memory_desc_t
*md
)
480 __map_region(md
, md
->phys_addr
);
481 __map_region(md
, md
->virt_addr
);
484 void __iomem
*__init
efi_ioremap(unsigned long phys_addr
, unsigned long size
,
485 u32 type
, u64 attribute
)
487 unsigned long last_map_pfn
;
489 if (type
== EFI_MEMORY_MAPPED_IO
)
490 return ioremap(phys_addr
, size
);
492 last_map_pfn
= init_memory_mapping(phys_addr
, phys_addr
+ size
);
493 if ((last_map_pfn
<< PAGE_SHIFT
) < phys_addr
+ size
) {
494 unsigned long top
= last_map_pfn
<< PAGE_SHIFT
;
495 efi_ioremap(top
, size
- (top
- phys_addr
), type
, attribute
);
498 if (!(attribute
& EFI_MEMORY_WB
))
499 efi_memory_uc((u64
)(unsigned long)__va(phys_addr
), size
);
501 return (void __iomem
*)__va(phys_addr
);
504 void __init
parse_efi_setup(u64 phys_addr
, u32 data_len
)
506 efi_setup
= phys_addr
+ sizeof(struct setup_data
);
509 static int __init
efi_update_mappings(efi_memory_desc_t
*md
, unsigned long pf
)
512 pgd_t
*pgd
= efi_pgd
;
515 /* Update the 1:1 mapping */
516 pfn
= md
->phys_addr
>> PAGE_SHIFT
;
517 err1
= kernel_map_pages_in_pgd(pgd
, pfn
, md
->phys_addr
, md
->num_pages
, pf
);
519 pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
520 md
->phys_addr
, md
->virt_addr
);
523 err2
= kernel_map_pages_in_pgd(pgd
, pfn
, md
->virt_addr
, md
->num_pages
, pf
);
525 pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
526 md
->phys_addr
, md
->virt_addr
);
532 static int __init
efi_update_mem_attr(struct mm_struct
*mm
, efi_memory_desc_t
*md
)
534 unsigned long pf
= 0;
536 if (md
->attribute
& EFI_MEMORY_XP
)
539 if (!(md
->attribute
& EFI_MEMORY_RO
))
542 return efi_update_mappings(md
, pf
);
545 void __init
efi_runtime_update_mappings(void)
547 efi_memory_desc_t
*md
;
549 if (efi_enabled(EFI_OLD_MEMMAP
)) {
550 if (__supported_pte_mask
& _PAGE_NX
)
551 runtime_code_page_mkexec();
556 * Use the EFI Memory Attribute Table for mapping permissions if it
557 * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
559 if (efi_enabled(EFI_MEM_ATTR
)) {
560 efi_memattr_apply_permissions(NULL
, efi_update_mem_attr
);
565 * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
566 * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
567 * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
568 * published by the firmware. Even if we find a buggy implementation of
569 * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
570 * EFI_PROPERTIES_TABLE, because of the same reason.
573 if (!efi_enabled(EFI_NX_PE_DATA
))
576 for_each_efi_memory_desc(md
) {
577 unsigned long pf
= 0;
579 if (!(md
->attribute
& EFI_MEMORY_RUNTIME
))
582 if (!(md
->attribute
& EFI_MEMORY_WB
))
585 if ((md
->attribute
& EFI_MEMORY_XP
) ||
586 (md
->type
== EFI_RUNTIME_SERVICES_DATA
))
589 if (!(md
->attribute
& EFI_MEMORY_RO
) &&
590 (md
->type
!= EFI_RUNTIME_SERVICES_CODE
))
593 efi_update_mappings(md
, pf
);
597 void __init
efi_dump_pagetable(void)
599 #ifdef CONFIG_EFI_PGT_DUMP
600 if (efi_enabled(EFI_OLD_MEMMAP
))
601 ptdump_walk_pgd_level(NULL
, swapper_pg_dir
);
603 ptdump_walk_pgd_level(NULL
, efi_pgd
);
607 #ifdef CONFIG_EFI_MIXED
608 extern efi_status_t
efi64_thunk(u32
, ...);
610 #define runtime_service32(func) \
612 u32 table = (u32)(unsigned long)efi.systab; \
615 rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
616 ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
621 * Switch to the EFI page tables early so that we can access the 1:1
622 * runtime services mappings which are not mapped in any other page
623 * tables. This function must be called before runtime_service32().
625 * Also, disable interrupts because the IDT points to 64-bit handlers,
626 * which aren't going to function correctly when we switch to 32-bit.
628 #define efi_thunk(f, ...) \
631 unsigned long __flags; \
634 local_irq_save(__flags); \
635 arch_efi_call_virt_setup(); \
637 __func = runtime_service32(f); \
638 __s = efi64_thunk(__func, __VA_ARGS__); \
640 arch_efi_call_virt_teardown(); \
641 local_irq_restore(__flags); \
646 efi_status_t
efi_thunk_set_virtual_address_map(
647 void *phys_set_virtual_address_map
,
648 unsigned long memory_map_size
,
649 unsigned long descriptor_size
,
650 u32 descriptor_version
,
651 efi_memory_desc_t
*virtual_map
)
657 efi_sync_low_kernel_mappings();
658 local_irq_save(flags
);
660 efi_scratch
.prev_cr3
= __read_cr3();
661 write_cr3((unsigned long)efi_scratch
.efi_pgt
);
664 func
= (u32
)(unsigned long)phys_set_virtual_address_map
;
665 status
= efi64_thunk(func
, memory_map_size
, descriptor_size
,
666 descriptor_version
, virtual_map
);
668 write_cr3(efi_scratch
.prev_cr3
);
670 local_irq_restore(flags
);
675 static efi_status_t
efi_thunk_get_time(efi_time_t
*tm
, efi_time_cap_t
*tc
)
678 u32 phys_tm
, phys_tc
;
680 spin_lock(&rtc_lock
);
682 phys_tm
= virt_to_phys_or_null(tm
);
683 phys_tc
= virt_to_phys_or_null(tc
);
685 status
= efi_thunk(get_time
, phys_tm
, phys_tc
);
687 spin_unlock(&rtc_lock
);
692 static efi_status_t
efi_thunk_set_time(efi_time_t
*tm
)
697 spin_lock(&rtc_lock
);
699 phys_tm
= virt_to_phys_or_null(tm
);
701 status
= efi_thunk(set_time
, phys_tm
);
703 spin_unlock(&rtc_lock
);
709 efi_thunk_get_wakeup_time(efi_bool_t
*enabled
, efi_bool_t
*pending
,
713 u32 phys_enabled
, phys_pending
, phys_tm
;
715 spin_lock(&rtc_lock
);
717 phys_enabled
= virt_to_phys_or_null(enabled
);
718 phys_pending
= virt_to_phys_or_null(pending
);
719 phys_tm
= virt_to_phys_or_null(tm
);
721 status
= efi_thunk(get_wakeup_time
, phys_enabled
,
722 phys_pending
, phys_tm
);
724 spin_unlock(&rtc_lock
);
730 efi_thunk_set_wakeup_time(efi_bool_t enabled
, efi_time_t
*tm
)
735 spin_lock(&rtc_lock
);
737 phys_tm
= virt_to_phys_or_null(tm
);
739 status
= efi_thunk(set_wakeup_time
, enabled
, phys_tm
);
741 spin_unlock(&rtc_lock
);
746 static unsigned long efi_name_size(efi_char16_t
*name
)
748 return ucs2_strsize(name
, EFI_VAR_NAME_LEN
) + 1;
752 efi_thunk_get_variable(efi_char16_t
*name
, efi_guid_t
*vendor
,
753 u32
*attr
, unsigned long *data_size
, void *data
)
756 u32 phys_name
, phys_vendor
, phys_attr
;
757 u32 phys_data_size
, phys_data
;
759 phys_data_size
= virt_to_phys_or_null(data_size
);
760 phys_vendor
= virt_to_phys_or_null(vendor
);
761 phys_name
= virt_to_phys_or_null_size(name
, efi_name_size(name
));
762 phys_attr
= virt_to_phys_or_null(attr
);
763 phys_data
= virt_to_phys_or_null_size(data
, *data_size
);
765 status
= efi_thunk(get_variable
, phys_name
, phys_vendor
,
766 phys_attr
, phys_data_size
, phys_data
);
772 efi_thunk_set_variable(efi_char16_t
*name
, efi_guid_t
*vendor
,
773 u32 attr
, unsigned long data_size
, void *data
)
775 u32 phys_name
, phys_vendor
, phys_data
;
778 phys_name
= virt_to_phys_or_null_size(name
, efi_name_size(name
));
779 phys_vendor
= virt_to_phys_or_null(vendor
);
780 phys_data
= virt_to_phys_or_null_size(data
, data_size
);
782 /* If data_size is > sizeof(u32) we've got problems */
783 status
= efi_thunk(set_variable
, phys_name
, phys_vendor
,
784 attr
, data_size
, phys_data
);
790 efi_thunk_get_next_variable(unsigned long *name_size
,
795 u32 phys_name_size
, phys_name
, phys_vendor
;
797 phys_name_size
= virt_to_phys_or_null(name_size
);
798 phys_vendor
= virt_to_phys_or_null(vendor
);
799 phys_name
= virt_to_phys_or_null_size(name
, *name_size
);
801 status
= efi_thunk(get_next_variable
, phys_name_size
,
802 phys_name
, phys_vendor
);
808 efi_thunk_get_next_high_mono_count(u32
*count
)
813 phys_count
= virt_to_phys_or_null(count
);
814 status
= efi_thunk(get_next_high_mono_count
, phys_count
);
820 efi_thunk_reset_system(int reset_type
, efi_status_t status
,
821 unsigned long data_size
, efi_char16_t
*data
)
825 phys_data
= virt_to_phys_or_null_size(data
, data_size
);
827 efi_thunk(reset_system
, reset_type
, status
, data_size
, phys_data
);
831 efi_thunk_update_capsule(efi_capsule_header_t
**capsules
,
832 unsigned long count
, unsigned long sg_list
)
835 * To properly support this function we would need to repackage
836 * 'capsules' because the firmware doesn't understand 64-bit
839 return EFI_UNSUPPORTED
;
843 efi_thunk_query_variable_info(u32 attr
, u64
*storage_space
,
844 u64
*remaining_space
,
845 u64
*max_variable_size
)
848 u32 phys_storage
, phys_remaining
, phys_max
;
850 if (efi
.runtime_version
< EFI_2_00_SYSTEM_TABLE_REVISION
)
851 return EFI_UNSUPPORTED
;
853 phys_storage
= virt_to_phys_or_null(storage_space
);
854 phys_remaining
= virt_to_phys_or_null(remaining_space
);
855 phys_max
= virt_to_phys_or_null(max_variable_size
);
857 status
= efi_thunk(query_variable_info
, attr
, phys_storage
,
858 phys_remaining
, phys_max
);
864 efi_thunk_query_capsule_caps(efi_capsule_header_t
**capsules
,
865 unsigned long count
, u64
*max_size
,
869 * To properly support this function we would need to repackage
870 * 'capsules' because the firmware doesn't understand 64-bit
873 return EFI_UNSUPPORTED
;
876 void efi_thunk_runtime_setup(void)
878 efi
.get_time
= efi_thunk_get_time
;
879 efi
.set_time
= efi_thunk_set_time
;
880 efi
.get_wakeup_time
= efi_thunk_get_wakeup_time
;
881 efi
.set_wakeup_time
= efi_thunk_set_wakeup_time
;
882 efi
.get_variable
= efi_thunk_get_variable
;
883 efi
.get_next_variable
= efi_thunk_get_next_variable
;
884 efi
.set_variable
= efi_thunk_set_variable
;
885 efi
.get_next_high_mono_count
= efi_thunk_get_next_high_mono_count
;
886 efi
.reset_system
= efi_thunk_reset_system
;
887 efi
.query_variable_info
= efi_thunk_query_variable_info
;
888 efi
.update_capsule
= efi_thunk_update_capsule
;
889 efi
.query_capsule_caps
= efi_thunk_query_capsule_caps
;
891 #endif /* CONFIG_EFI_MIXED */