]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/platform/efi/efi.c
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / platform / efi / efi.c
1 /*
2 * Common EFI (Extensible Firmware Interface) support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
4 *
5 * Copyright (C) 1999 VA Linux Systems
6 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
7 * Copyright (C) 1999-2002 Hewlett-Packard Co.
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Stephane Eranian <eranian@hpl.hp.com>
10 * Copyright (C) 2005-2008 Intel Co.
11 * Fenghua Yu <fenghua.yu@intel.com>
12 * Bibo Mao <bibo.mao@intel.com>
13 * Chandramouli Narayanan <mouli@linux.intel.com>
14 * Huang Ying <ying.huang@intel.com>
15 * Copyright (C) 2013 SuSE Labs
16 * Borislav Petkov <bp@suse.de> - runtime services VA mapping
17 *
18 * Copied from efi_32.c to eliminate the duplicated code between EFI
19 * 32/64 support code. --ying 2007-10-26
20 *
21 * All EFI Runtime Services are not implemented yet as EFI only
22 * supports physical mode addressing on SoftSDV. This is to be fixed
23 * in a future version. --drummond 1999-07-20
24 *
25 * Implemented EFI runtime services and virtual mode calls. --davidm
26 *
27 * Goutham Rao: <goutham.rao@intel.com>
28 * Skip non-WB memory and ignore empty memory ranges.
29 */
30
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/efi.h>
36 #include <linux/efi-bgrt.h>
37 #include <linux/export.h>
38 #include <linux/bootmem.h>
39 #include <linux/slab.h>
40 #include <linux/memblock.h>
41 #include <linux/spinlock.h>
42 #include <linux/uaccess.h>
43 #include <linux/time.h>
44 #include <linux/io.h>
45 #include <linux/reboot.h>
46 #include <linux/bcd.h>
47
48 #include <asm/setup.h>
49 #include <asm/efi.h>
50 #include <asm/time.h>
51 #include <asm/cacheflush.h>
52 #include <asm/tlbflush.h>
53 #include <asm/x86_init.h>
54 #include <asm/rtc.h>
55 #include <asm/uv/uv.h>
56
57 static struct efi efi_phys __initdata;
58 static efi_system_table_t efi_systab __initdata;
59
60 static efi_config_table_type_t arch_tables[] __initdata = {
61 #ifdef CONFIG_X86_UV
62 {UV_SYSTEM_TABLE_GUID, "UVsystab", &efi.uv_systab},
63 #endif
64 {NULL_GUID, NULL, NULL},
65 };
66
67 u64 efi_setup; /* efi setup_data physical address */
68
69 static int add_efi_memmap __initdata;
70 static int __init setup_add_efi_memmap(char *arg)
71 {
72 add_efi_memmap = 1;
73 return 0;
74 }
75 early_param("add_efi_memmap", setup_add_efi_memmap);
76
77 static efi_status_t __init phys_efi_set_virtual_address_map(
78 unsigned long memory_map_size,
79 unsigned long descriptor_size,
80 u32 descriptor_version,
81 efi_memory_desc_t *virtual_map)
82 {
83 efi_status_t status;
84 unsigned long flags;
85 pgd_t *save_pgd;
86
87 save_pgd = efi_call_phys_prolog();
88
89 /* Disable interrupts around EFI calls: */
90 local_irq_save(flags);
91 status = efi_call_phys(efi_phys.set_virtual_address_map,
92 memory_map_size, descriptor_size,
93 descriptor_version, virtual_map);
94 local_irq_restore(flags);
95
96 efi_call_phys_epilog(save_pgd);
97
98 return status;
99 }
100
101 void __init efi_find_mirror(void)
102 {
103 efi_memory_desc_t *md;
104 u64 mirror_size = 0, total_size = 0;
105
106 for_each_efi_memory_desc(md) {
107 unsigned long long start = md->phys_addr;
108 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
109
110 total_size += size;
111 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
112 memblock_mark_mirror(start, size);
113 mirror_size += size;
114 }
115 }
116 if (mirror_size)
117 pr_info("Memory: %lldM/%lldM mirrored memory\n",
118 mirror_size>>20, total_size>>20);
119 }
120
121 /*
122 * Tell the kernel about the EFI memory map. This might include
123 * more than the max 128 entries that can fit in the e820 legacy
124 * (zeropage) memory map.
125 */
126
127 static void __init do_add_efi_memmap(void)
128 {
129 efi_memory_desc_t *md;
130
131 for_each_efi_memory_desc(md) {
132 unsigned long long start = md->phys_addr;
133 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
134 int e820_type;
135
136 switch (md->type) {
137 case EFI_LOADER_CODE:
138 case EFI_LOADER_DATA:
139 case EFI_BOOT_SERVICES_CODE:
140 case EFI_BOOT_SERVICES_DATA:
141 case EFI_CONVENTIONAL_MEMORY:
142 if (md->attribute & EFI_MEMORY_WB)
143 e820_type = E820_RAM;
144 else
145 e820_type = E820_RESERVED;
146 break;
147 case EFI_ACPI_RECLAIM_MEMORY:
148 e820_type = E820_ACPI;
149 break;
150 case EFI_ACPI_MEMORY_NVS:
151 e820_type = E820_NVS;
152 break;
153 case EFI_UNUSABLE_MEMORY:
154 e820_type = E820_UNUSABLE;
155 break;
156 case EFI_PERSISTENT_MEMORY:
157 e820_type = E820_PMEM;
158 break;
159 default:
160 /*
161 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
162 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
163 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
164 */
165 e820_type = E820_RESERVED;
166 break;
167 }
168 e820_add_region(start, size, e820_type);
169 }
170 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
171 }
172
173 int __init efi_memblock_x86_reserve_range(void)
174 {
175 struct efi_info *e = &boot_params.efi_info;
176 phys_addr_t pmap;
177
178 if (efi_enabled(EFI_PARAVIRT))
179 return 0;
180
181 #ifdef CONFIG_X86_32
182 /* Can't handle data above 4GB at this time */
183 if (e->efi_memmap_hi) {
184 pr_err("Memory map is above 4GB, disabling EFI.\n");
185 return -EINVAL;
186 }
187 pmap = e->efi_memmap;
188 #else
189 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
190 #endif
191 efi.memmap.phys_map = pmap;
192 efi.memmap.nr_map = e->efi_memmap_size /
193 e->efi_memdesc_size;
194 efi.memmap.desc_size = e->efi_memdesc_size;
195 efi.memmap.desc_version = e->efi_memdesc_version;
196
197 WARN(efi.memmap.desc_version != 1,
198 "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
199 efi.memmap.desc_version);
200
201 memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
202
203 return 0;
204 }
205
206 void __init efi_print_memmap(void)
207 {
208 efi_memory_desc_t *md;
209 int i = 0;
210
211 for_each_efi_memory_desc(md) {
212 char buf[64];
213
214 pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
215 i++, efi_md_typeattr_format(buf, sizeof(buf), md),
216 md->phys_addr,
217 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
218 (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
219 }
220 }
221
222 void __init efi_unmap_memmap(void)
223 {
224 unsigned long size;
225
226 clear_bit(EFI_MEMMAP, &efi.flags);
227
228 size = efi.memmap.nr_map * efi.memmap.desc_size;
229 if (efi.memmap.map) {
230 early_memunmap(efi.memmap.map, size);
231 efi.memmap.map = NULL;
232 }
233 }
234
235 static int __init efi_systab_init(void *phys)
236 {
237 if (efi_enabled(EFI_64BIT)) {
238 efi_system_table_64_t *systab64;
239 struct efi_setup_data *data = NULL;
240 u64 tmp = 0;
241
242 if (efi_setup) {
243 data = early_memremap(efi_setup, sizeof(*data));
244 if (!data)
245 return -ENOMEM;
246 }
247 systab64 = early_memremap((unsigned long)phys,
248 sizeof(*systab64));
249 if (systab64 == NULL) {
250 pr_err("Couldn't map the system table!\n");
251 if (data)
252 early_memunmap(data, sizeof(*data));
253 return -ENOMEM;
254 }
255
256 efi_systab.hdr = systab64->hdr;
257 efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
258 systab64->fw_vendor;
259 tmp |= data ? data->fw_vendor : systab64->fw_vendor;
260 efi_systab.fw_revision = systab64->fw_revision;
261 efi_systab.con_in_handle = systab64->con_in_handle;
262 tmp |= systab64->con_in_handle;
263 efi_systab.con_in = systab64->con_in;
264 tmp |= systab64->con_in;
265 efi_systab.con_out_handle = systab64->con_out_handle;
266 tmp |= systab64->con_out_handle;
267 efi_systab.con_out = systab64->con_out;
268 tmp |= systab64->con_out;
269 efi_systab.stderr_handle = systab64->stderr_handle;
270 tmp |= systab64->stderr_handle;
271 efi_systab.stderr = systab64->stderr;
272 tmp |= systab64->stderr;
273 efi_systab.runtime = data ?
274 (void *)(unsigned long)data->runtime :
275 (void *)(unsigned long)systab64->runtime;
276 tmp |= data ? data->runtime : systab64->runtime;
277 efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
278 tmp |= systab64->boottime;
279 efi_systab.nr_tables = systab64->nr_tables;
280 efi_systab.tables = data ? (unsigned long)data->tables :
281 systab64->tables;
282 tmp |= data ? data->tables : systab64->tables;
283
284 early_memunmap(systab64, sizeof(*systab64));
285 if (data)
286 early_memunmap(data, sizeof(*data));
287 #ifdef CONFIG_X86_32
288 if (tmp >> 32) {
289 pr_err("EFI data located above 4GB, disabling EFI.\n");
290 return -EINVAL;
291 }
292 #endif
293 } else {
294 efi_system_table_32_t *systab32;
295
296 systab32 = early_memremap((unsigned long)phys,
297 sizeof(*systab32));
298 if (systab32 == NULL) {
299 pr_err("Couldn't map the system table!\n");
300 return -ENOMEM;
301 }
302
303 efi_systab.hdr = systab32->hdr;
304 efi_systab.fw_vendor = systab32->fw_vendor;
305 efi_systab.fw_revision = systab32->fw_revision;
306 efi_systab.con_in_handle = systab32->con_in_handle;
307 efi_systab.con_in = systab32->con_in;
308 efi_systab.con_out_handle = systab32->con_out_handle;
309 efi_systab.con_out = systab32->con_out;
310 efi_systab.stderr_handle = systab32->stderr_handle;
311 efi_systab.stderr = systab32->stderr;
312 efi_systab.runtime = (void *)(unsigned long)systab32->runtime;
313 efi_systab.boottime = (void *)(unsigned long)systab32->boottime;
314 efi_systab.nr_tables = systab32->nr_tables;
315 efi_systab.tables = systab32->tables;
316
317 early_memunmap(systab32, sizeof(*systab32));
318 }
319
320 efi.systab = &efi_systab;
321
322 /*
323 * Verify the EFI Table
324 */
325 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
326 pr_err("System table signature incorrect!\n");
327 return -EINVAL;
328 }
329 if ((efi.systab->hdr.revision >> 16) == 0)
330 pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
331 efi.systab->hdr.revision >> 16,
332 efi.systab->hdr.revision & 0xffff);
333
334 return 0;
335 }
336
337 static int __init efi_runtime_init32(void)
338 {
339 efi_runtime_services_32_t *runtime;
340
341 runtime = early_memremap((unsigned long)efi.systab->runtime,
342 sizeof(efi_runtime_services_32_t));
343 if (!runtime) {
344 pr_err("Could not map the runtime service table!\n");
345 return -ENOMEM;
346 }
347
348 /*
349 * We will only need *early* access to the SetVirtualAddressMap
350 * EFI runtime service. All other runtime services will be called
351 * via the virtual mapping.
352 */
353 efi_phys.set_virtual_address_map =
354 (efi_set_virtual_address_map_t *)
355 (unsigned long)runtime->set_virtual_address_map;
356 early_memunmap(runtime, sizeof(efi_runtime_services_32_t));
357
358 return 0;
359 }
360
361 static int __init efi_runtime_init64(void)
362 {
363 efi_runtime_services_64_t *runtime;
364
365 runtime = early_memremap((unsigned long)efi.systab->runtime,
366 sizeof(efi_runtime_services_64_t));
367 if (!runtime) {
368 pr_err("Could not map the runtime service table!\n");
369 return -ENOMEM;
370 }
371
372 /*
373 * We will only need *early* access to the SetVirtualAddressMap
374 * EFI runtime service. All other runtime services will be called
375 * via the virtual mapping.
376 */
377 efi_phys.set_virtual_address_map =
378 (efi_set_virtual_address_map_t *)
379 (unsigned long)runtime->set_virtual_address_map;
380 early_memunmap(runtime, sizeof(efi_runtime_services_64_t));
381
382 return 0;
383 }
384
385 static int __init efi_runtime_init(void)
386 {
387 int rv;
388
389 /*
390 * Check out the runtime services table. We need to map
391 * the runtime services table so that we can grab the physical
392 * address of several of the EFI runtime functions, needed to
393 * set the firmware into virtual mode.
394 *
395 * When EFI_PARAVIRT is in force then we could not map runtime
396 * service memory region because we do not have direct access to it.
397 * However, runtime services are available through proxy functions
398 * (e.g. in case of Xen dom0 EFI implementation they call special
399 * hypercall which executes relevant EFI functions) and that is why
400 * they are always enabled.
401 */
402
403 if (!efi_enabled(EFI_PARAVIRT)) {
404 if (efi_enabled(EFI_64BIT))
405 rv = efi_runtime_init64();
406 else
407 rv = efi_runtime_init32();
408
409 if (rv)
410 return rv;
411 }
412
413 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
414
415 return 0;
416 }
417
418 static int __init efi_memmap_init(void)
419 {
420 unsigned long addr, size;
421
422 if (efi_enabled(EFI_PARAVIRT))
423 return 0;
424
425 /* Map the EFI memory map */
426 size = efi.memmap.nr_map * efi.memmap.desc_size;
427 addr = (unsigned long)efi.memmap.phys_map;
428
429 efi.memmap.map = early_memremap(addr, size);
430 if (efi.memmap.map == NULL) {
431 pr_err("Could not map the memory map!\n");
432 return -ENOMEM;
433 }
434
435 efi.memmap.map_end = efi.memmap.map + size;
436
437 if (add_efi_memmap)
438 do_add_efi_memmap();
439
440 set_bit(EFI_MEMMAP, &efi.flags);
441
442 return 0;
443 }
444
445 void __init efi_init(void)
446 {
447 efi_char16_t *c16;
448 char vendor[100] = "unknown";
449 int i = 0;
450 void *tmp;
451
452 #ifdef CONFIG_X86_32
453 if (boot_params.efi_info.efi_systab_hi ||
454 boot_params.efi_info.efi_memmap_hi) {
455 pr_info("Table located above 4GB, disabling EFI.\n");
456 return;
457 }
458 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
459 #else
460 efi_phys.systab = (efi_system_table_t *)
461 (boot_params.efi_info.efi_systab |
462 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
463 #endif
464
465 if (efi_systab_init(efi_phys.systab))
466 return;
467
468 efi.config_table = (unsigned long)efi.systab->tables;
469 efi.fw_vendor = (unsigned long)efi.systab->fw_vendor;
470 efi.runtime = (unsigned long)efi.systab->runtime;
471
472 /*
473 * Show what we know for posterity
474 */
475 c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
476 if (c16) {
477 for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
478 vendor[i] = *c16++;
479 vendor[i] = '\0';
480 } else
481 pr_err("Could not map the firmware vendor!\n");
482 early_memunmap(tmp, 2);
483
484 pr_info("EFI v%u.%.02u by %s\n",
485 efi.systab->hdr.revision >> 16,
486 efi.systab->hdr.revision & 0xffff, vendor);
487
488 if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
489 return;
490
491 if (efi_config_init(arch_tables))
492 return;
493
494 /*
495 * Note: We currently don't support runtime services on an EFI
496 * that doesn't match the kernel 32/64-bit mode.
497 */
498
499 if (!efi_runtime_supported())
500 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
501 else {
502 if (efi_runtime_disabled() || efi_runtime_init())
503 return;
504 }
505 if (efi_memmap_init())
506 return;
507
508 if (efi_enabled(EFI_DBG))
509 efi_print_memmap();
510
511 efi_esrt_init();
512 }
513
514 void __init efi_late_init(void)
515 {
516 efi_bgrt_init();
517 }
518
519 void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
520 {
521 u64 addr, npages;
522
523 addr = md->virt_addr;
524 npages = md->num_pages;
525
526 memrange_efi_to_native(&addr, &npages);
527
528 if (executable)
529 set_memory_x(addr, npages);
530 else
531 set_memory_nx(addr, npages);
532 }
533
534 void __init runtime_code_page_mkexec(void)
535 {
536 efi_memory_desc_t *md;
537
538 /* Make EFI runtime service code area executable */
539 for_each_efi_memory_desc(md) {
540 if (md->type != EFI_RUNTIME_SERVICES_CODE)
541 continue;
542
543 efi_set_executable(md, true);
544 }
545 }
546
547 void __init efi_memory_uc(u64 addr, unsigned long size)
548 {
549 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
550 u64 npages;
551
552 npages = round_up(size, page_shift) / page_shift;
553 memrange_efi_to_native(&addr, &npages);
554 set_memory_uc(addr, npages);
555 }
556
557 void __init old_map_region(efi_memory_desc_t *md)
558 {
559 u64 start_pfn, end_pfn, end;
560 unsigned long size;
561 void *va;
562
563 start_pfn = PFN_DOWN(md->phys_addr);
564 size = md->num_pages << PAGE_SHIFT;
565 end = md->phys_addr + size;
566 end_pfn = PFN_UP(end);
567
568 if (pfn_range_is_mapped(start_pfn, end_pfn)) {
569 va = __va(md->phys_addr);
570
571 if (!(md->attribute & EFI_MEMORY_WB))
572 efi_memory_uc((u64)(unsigned long)va, size);
573 } else
574 va = efi_ioremap(md->phys_addr, size,
575 md->type, md->attribute);
576
577 md->virt_addr = (u64) (unsigned long) va;
578 if (!va)
579 pr_err("ioremap of 0x%llX failed!\n",
580 (unsigned long long)md->phys_addr);
581 }
582
583 /* Merge contiguous regions of the same type and attribute */
584 static void __init efi_merge_regions(void)
585 {
586 efi_memory_desc_t *md, *prev_md = NULL;
587
588 for_each_efi_memory_desc(md) {
589 u64 prev_size;
590
591 if (!prev_md) {
592 prev_md = md;
593 continue;
594 }
595
596 if (prev_md->type != md->type ||
597 prev_md->attribute != md->attribute) {
598 prev_md = md;
599 continue;
600 }
601
602 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
603
604 if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
605 prev_md->num_pages += md->num_pages;
606 md->type = EFI_RESERVED_TYPE;
607 md->attribute = 0;
608 continue;
609 }
610 prev_md = md;
611 }
612 }
613
614 static void __init get_systab_virt_addr(efi_memory_desc_t *md)
615 {
616 unsigned long size;
617 u64 end, systab;
618
619 size = md->num_pages << EFI_PAGE_SHIFT;
620 end = md->phys_addr + size;
621 systab = (u64)(unsigned long)efi_phys.systab;
622 if (md->phys_addr <= systab && systab < end) {
623 systab += md->virt_addr - md->phys_addr;
624 efi.systab = (efi_system_table_t *)(unsigned long)systab;
625 }
626 }
627
628 static void __init save_runtime_map(void)
629 {
630 #ifdef CONFIG_KEXEC_CORE
631 unsigned long desc_size;
632 efi_memory_desc_t *md;
633 void *tmp, *q = NULL;
634 int count = 0;
635
636 if (efi_enabled(EFI_OLD_MEMMAP))
637 return;
638
639 desc_size = efi.memmap.desc_size;
640
641 for_each_efi_memory_desc(md) {
642 if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
643 (md->type == EFI_BOOT_SERVICES_CODE) ||
644 (md->type == EFI_BOOT_SERVICES_DATA))
645 continue;
646 tmp = krealloc(q, (count + 1) * desc_size, GFP_KERNEL);
647 if (!tmp)
648 goto out;
649 q = tmp;
650
651 memcpy(q + count * desc_size, md, desc_size);
652 count++;
653 }
654
655 efi_runtime_map_setup(q, count, desc_size);
656 return;
657
658 out:
659 kfree(q);
660 pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
661 #endif
662 }
663
664 static void *realloc_pages(void *old_memmap, int old_shift)
665 {
666 void *ret;
667
668 ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
669 if (!ret)
670 goto out;
671
672 /*
673 * A first-time allocation doesn't have anything to copy.
674 */
675 if (!old_memmap)
676 return ret;
677
678 memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
679
680 out:
681 free_pages((unsigned long)old_memmap, old_shift);
682 return ret;
683 }
684
685 /*
686 * Iterate the EFI memory map in reverse order because the regions
687 * will be mapped top-down. The end result is the same as if we had
688 * mapped things forward, but doesn't require us to change the
689 * existing implementation of efi_map_region().
690 */
691 static inline void *efi_map_next_entry_reverse(void *entry)
692 {
693 /* Initial call */
694 if (!entry)
695 return efi.memmap.map_end - efi.memmap.desc_size;
696
697 entry -= efi.memmap.desc_size;
698 if (entry < efi.memmap.map)
699 return NULL;
700
701 return entry;
702 }
703
704 /*
705 * efi_map_next_entry - Return the next EFI memory map descriptor
706 * @entry: Previous EFI memory map descriptor
707 *
708 * This is a helper function to iterate over the EFI memory map, which
709 * we do in different orders depending on the current configuration.
710 *
711 * To begin traversing the memory map @entry must be %NULL.
712 *
713 * Returns %NULL when we reach the end of the memory map.
714 */
715 static void *efi_map_next_entry(void *entry)
716 {
717 if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
718 /*
719 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
720 * config table feature requires us to map all entries
721 * in the same order as they appear in the EFI memory
722 * map. That is to say, entry N must have a lower
723 * virtual address than entry N+1. This is because the
724 * firmware toolchain leaves relative references in
725 * the code/data sections, which are split and become
726 * separate EFI memory regions. Mapping things
727 * out-of-order leads to the firmware accessing
728 * unmapped addresses.
729 *
730 * Since we need to map things this way whether or not
731 * the kernel actually makes use of
732 * EFI_PROPERTIES_TABLE, let's just switch to this
733 * scheme by default for 64-bit.
734 */
735 return efi_map_next_entry_reverse(entry);
736 }
737
738 /* Initial call */
739 if (!entry)
740 return efi.memmap.map;
741
742 entry += efi.memmap.desc_size;
743 if (entry >= efi.memmap.map_end)
744 return NULL;
745
746 return entry;
747 }
748
749 /*
750 * Map the efi memory ranges of the runtime services and update new_mmap with
751 * virtual addresses.
752 */
753 static void * __init efi_map_regions(int *count, int *pg_shift)
754 {
755 void *p, *new_memmap = NULL;
756 unsigned long left = 0;
757 unsigned long desc_size;
758 efi_memory_desc_t *md;
759
760 desc_size = efi.memmap.desc_size;
761
762 p = NULL;
763 while ((p = efi_map_next_entry(p))) {
764 md = p;
765 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
766 #ifdef CONFIG_X86_64
767 if (md->type != EFI_BOOT_SERVICES_CODE &&
768 md->type != EFI_BOOT_SERVICES_DATA)
769 #endif
770 continue;
771 }
772
773 efi_map_region(md);
774 get_systab_virt_addr(md);
775
776 if (left < desc_size) {
777 new_memmap = realloc_pages(new_memmap, *pg_shift);
778 if (!new_memmap)
779 return NULL;
780
781 left += PAGE_SIZE << *pg_shift;
782 (*pg_shift)++;
783 }
784
785 memcpy(new_memmap + (*count * desc_size), md, desc_size);
786
787 left -= desc_size;
788 (*count)++;
789 }
790
791 return new_memmap;
792 }
793
794 static void __init kexec_enter_virtual_mode(void)
795 {
796 #ifdef CONFIG_KEXEC_CORE
797 efi_memory_desc_t *md;
798 unsigned int num_pages;
799
800 efi.systab = NULL;
801
802 /*
803 * We don't do virtual mode, since we don't do runtime services, on
804 * non-native EFI
805 */
806 if (!efi_is_native()) {
807 efi_unmap_memmap();
808 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
809 return;
810 }
811
812 if (efi_alloc_page_tables()) {
813 pr_err("Failed to allocate EFI page tables\n");
814 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
815 return;
816 }
817
818 /*
819 * Map efi regions which were passed via setup_data. The virt_addr is a
820 * fixed addr which was used in first kernel of a kexec boot.
821 */
822 for_each_efi_memory_desc(md) {
823 efi_map_region_fixed(md); /* FIXME: add error handling */
824 get_systab_virt_addr(md);
825 }
826
827 save_runtime_map();
828
829 BUG_ON(!efi.systab);
830
831 num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
832 num_pages >>= PAGE_SHIFT;
833
834 if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
835 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
836 return;
837 }
838
839 efi_sync_low_kernel_mappings();
840
841 /*
842 * Now that EFI is in virtual mode, update the function
843 * pointers in the runtime service table to the new virtual addresses.
844 *
845 * Call EFI services through wrapper functions.
846 */
847 efi.runtime_version = efi_systab.hdr.revision;
848
849 efi_native_runtime_setup();
850
851 efi.set_virtual_address_map = NULL;
852
853 if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
854 runtime_code_page_mkexec();
855
856 /* clean DUMMY object */
857 efi_delete_dummy_variable();
858 #endif
859 }
860
861 /*
862 * This function will switch the EFI runtime services to virtual mode.
863 * Essentially, we look through the EFI memmap and map every region that
864 * has the runtime attribute bit set in its memory descriptor into the
865 * efi_pgd page table.
866 *
867 * The old method which used to update that memory descriptor with the
868 * virtual address obtained from ioremap() is still supported when the
869 * kernel is booted with efi=old_map on its command line. Same old
870 * method enabled the runtime services to be called without having to
871 * thunk back into physical mode for every invocation.
872 *
873 * The new method does a pagetable switch in a preemption-safe manner
874 * so that we're in a different address space when calling a runtime
875 * function. For function arguments passing we do copy the PUDs of the
876 * kernel page table into efi_pgd prior to each call.
877 *
878 * Specially for kexec boot, efi runtime maps in previous kernel should
879 * be passed in via setup_data. In that case runtime ranges will be mapped
880 * to the same virtual addresses as the first kernel, see
881 * kexec_enter_virtual_mode().
882 */
883 static void __init __efi_enter_virtual_mode(void)
884 {
885 int count = 0, pg_shift = 0;
886 void *new_memmap = NULL;
887 efi_status_t status;
888
889 efi.systab = NULL;
890
891 if (efi_alloc_page_tables()) {
892 pr_err("Failed to allocate EFI page tables\n");
893 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
894 return;
895 }
896
897 efi_merge_regions();
898 new_memmap = efi_map_regions(&count, &pg_shift);
899 if (!new_memmap) {
900 pr_err("Error reallocating memory, EFI runtime non-functional!\n");
901 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
902 return;
903 }
904
905 save_runtime_map();
906
907 BUG_ON(!efi.systab);
908
909 if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) {
910 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
911 return;
912 }
913
914 efi_sync_low_kernel_mappings();
915
916 if (efi_is_native()) {
917 status = phys_efi_set_virtual_address_map(
918 efi.memmap.desc_size * count,
919 efi.memmap.desc_size,
920 efi.memmap.desc_version,
921 (efi_memory_desc_t *)__pa(new_memmap));
922 } else {
923 status = efi_thunk_set_virtual_address_map(
924 efi_phys.set_virtual_address_map,
925 efi.memmap.desc_size * count,
926 efi.memmap.desc_size,
927 efi.memmap.desc_version,
928 (efi_memory_desc_t *)__pa(new_memmap));
929 }
930
931 if (status != EFI_SUCCESS) {
932 pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
933 status);
934 panic("EFI call to SetVirtualAddressMap() failed!");
935 }
936
937 /*
938 * Now that EFI is in virtual mode, update the function
939 * pointers in the runtime service table to the new virtual addresses.
940 *
941 * Call EFI services through wrapper functions.
942 */
943 efi.runtime_version = efi_systab.hdr.revision;
944
945 if (efi_is_native())
946 efi_native_runtime_setup();
947 else
948 efi_thunk_runtime_setup();
949
950 efi.set_virtual_address_map = NULL;
951
952 /*
953 * Apply more restrictive page table mapping attributes now that
954 * SVAM() has been called and the firmware has performed all
955 * necessary relocation fixups for the new virtual addresses.
956 */
957 efi_runtime_update_mappings();
958 efi_dump_pagetable();
959
960 /*
961 * We mapped the descriptor array into the EFI pagetable above
962 * but we're not unmapping it here because if we're running in
963 * EFI mixed mode we need all of memory to be accessible when
964 * we pass parameters to the EFI runtime services in the
965 * thunking code.
966 */
967 free_pages((unsigned long)new_memmap, pg_shift);
968
969 /* clean DUMMY object */
970 efi_delete_dummy_variable();
971 }
972
973 void __init efi_enter_virtual_mode(void)
974 {
975 if (efi_enabled(EFI_PARAVIRT))
976 return;
977
978 if (efi_setup)
979 kexec_enter_virtual_mode();
980 else
981 __efi_enter_virtual_mode();
982 }
983
984 /*
985 * Convenience functions to obtain memory types and attributes
986 */
987 u32 efi_mem_type(unsigned long phys_addr)
988 {
989 efi_memory_desc_t *md;
990
991 if (!efi_enabled(EFI_MEMMAP))
992 return 0;
993
994 for_each_efi_memory_desc(md) {
995 if ((md->phys_addr <= phys_addr) &&
996 (phys_addr < (md->phys_addr +
997 (md->num_pages << EFI_PAGE_SHIFT))))
998 return md->type;
999 }
1000 return 0;
1001 }
1002
1003 static int __init arch_parse_efi_cmdline(char *str)
1004 {
1005 if (!str) {
1006 pr_warn("need at least one option\n");
1007 return -EINVAL;
1008 }
1009
1010 if (parse_option_str(str, "old_map"))
1011 set_bit(EFI_OLD_MEMMAP, &efi.flags);
1012
1013 return 0;
1014 }
1015 early_param("efi", arch_parse_efi_cmdline);