]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/mm/ioremap.c
mm/ioremap: probe platform for p4d huge map support
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / mm / ioremap.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
1da177e4
LT
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
9
57c8a661 10#include <linux/memblock.h>
1da177e4 11#include <linux/init.h>
a148ecfd 12#include <linux/io.h>
9de94dbb 13#include <linux/ioport.h>
3cbd09e4
TG
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
d61fc448 16#include <linux/mmiotrace.h>
8f716c9b
TL
17#include <linux/mem_encrypt.h>
18#include <linux/efi.h>
3cbd09e4 19
d1163651 20#include <asm/set_memory.h>
66441bd3 21#include <asm/e820/api.h>
3cbd09e4 22#include <asm/fixmap.h>
1da177e4 23#include <asm/pgtable.h>
3cbd09e4 24#include <asm/tlbflush.h>
f6df72e7 25#include <asm/pgalloc.h>
d7677d40 26#include <asm/pat.h>
8f716c9b 27#include <asm/setup.h>
1da177e4 28
78c86e5e 29#include "physaddr.h"
240d3a7c 30
5da04cc8
LJ
31/*
32 * Descriptor controlling ioremap() behavior.
33 */
34struct ioremap_desc {
35 unsigned int flags;
0e4c12b4
TL
36};
37
e9332cac
TG
38/*
39 * Fix up the linear direct mapping of the kernel to avoid cache attribute
40 * conflicts.
41 */
3a96ce8c 42int ioremap_change_attr(unsigned long vaddr, unsigned long size,
b14097bd 43 enum page_cache_mode pcm)
e9332cac 44{
d806e5ee 45 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 46 int err;
e9332cac 47
b14097bd
JG
48 switch (pcm) {
49 case _PAGE_CACHE_MODE_UC:
d806e5ee 50 default:
1219333d 51 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 52 break;
b14097bd 53 case _PAGE_CACHE_MODE_WC:
b310f381 54 err = _set_memory_wc(vaddr, nrpages);
55 break;
623dffb2
TK
56 case _PAGE_CACHE_MODE_WT:
57 err = _set_memory_wt(vaddr, nrpages);
58 break;
b14097bd 59 case _PAGE_CACHE_MODE_WB:
1219333d 60 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
61 break;
62 }
e9332cac
TG
63
64 return err;
65}
66
5da04cc8
LJ
67/* Does the range (or a subset of) contain normal RAM? */
68static unsigned int __ioremap_check_ram(struct resource *res)
c81c8a1e 69{
0e4c12b4 70 unsigned long start_pfn, stop_pfn;
c81c8a1e
RD
71 unsigned long i;
72
0e4c12b4 73 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
5da04cc8 74 return 0;
0e4c12b4
TL
75
76 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
77 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
78 if (stop_pfn > start_pfn) {
79 for (i = 0; i < (stop_pfn - start_pfn); ++i)
80 if (pfn_valid(start_pfn + i) &&
81 !PageReserved(pfn_to_page(start_pfn + i)))
5da04cc8 82 return IORES_MAP_SYSTEM_RAM;
0e4c12b4
TL
83 }
84
5da04cc8 85 return 0;
0e4c12b4
TL
86}
87
5da04cc8
LJ
88/*
89 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
90 * there the whole memory is already encrypted.
91 */
92static unsigned int __ioremap_check_encrypted(struct resource *res)
0e4c12b4 93{
5da04cc8
LJ
94 if (!sev_active())
95 return 0;
96
97 switch (res->desc) {
98 case IORES_DESC_NONE:
99 case IORES_DESC_RESERVED:
100 break;
101 default:
102 return IORES_MAP_ENCRYPTED;
103 }
104
105 return 0;
0e4c12b4
TL
106}
107
5da04cc8 108static int __ioremap_collect_map_flags(struct resource *res, void *arg)
0e4c12b4 109{
5da04cc8 110 struct ioremap_desc *desc = arg;
0e4c12b4 111
5da04cc8
LJ
112 if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
113 desc->flags |= __ioremap_check_ram(res);
0e4c12b4 114
5da04cc8
LJ
115 if (!(desc->flags & IORES_MAP_ENCRYPTED))
116 desc->flags |= __ioremap_check_encrypted(res);
c81c8a1e 117
5da04cc8
LJ
118 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
119 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
0e4c12b4
TL
120}
121
122/*
123 * To avoid multiple resource walks, this function walks resources marked as
124 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
125 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
126 */
127static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
5da04cc8 128 struct ioremap_desc *desc)
0e4c12b4
TL
129{
130 u64 start, end;
131
132 start = (u64)addr;
133 end = start + size - 1;
5da04cc8 134 memset(desc, 0, sizeof(struct ioremap_desc));
0e4c12b4 135
5da04cc8 136 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
c81c8a1e
RD
137}
138
1da177e4
LT
139/*
140 * Remap an arbitrary physical address space into the kernel virtual
5d72b4fb
TK
141 * address space. It transparently creates kernel huge I/O mapping when
142 * the physical address is aligned by a huge page size (1GB or 2MB) and
143 * the requested size is at least the huge page size.
144 *
145 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
146 * Therefore, the mapping code falls back to use a smaller page toward 4KB
147 * when a mapping range is covered by non-WB type of MTRRs.
1da177e4
LT
148 *
149 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
150 * have to convert them into an offset in a page-aligned mapping, but the
151 * caller shouldn't need to know that small detail.
152 */
5da04cc8
LJ
153static void __iomem *
154__ioremap_caller(resource_size_t phys_addr, unsigned long size,
155 enum page_cache_mode pcm, void *caller, bool encrypted)
1da177e4 156{
ffa71f33 157 unsigned long offset, vaddr;
0e4c12b4 158 resource_size_t last_addr;
87e547fe
PP
159 const resource_size_t unaligned_phys_addr = phys_addr;
160 const unsigned long unaligned_size = size;
5da04cc8 161 struct ioremap_desc io_desc;
91eebf40 162 struct vm_struct *area;
b14097bd 163 enum page_cache_mode new_pcm;
d806e5ee 164 pgprot_t prot;
dee7cbb2 165 int retval;
d61fc448 166 void __iomem *ret_addr;
1da177e4
LT
167
168 /* Don't allow wraparound or zero size */
169 last_addr = phys_addr + size - 1;
170 if (!size || last_addr < phys_addr)
171 return NULL;
172
e3100c82 173 if (!phys_addr_valid(phys_addr)) {
6997ab49 174 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 175 (unsigned long long)phys_addr);
e3100c82
TG
176 WARN_ON_ONCE(1);
177 return NULL;
178 }
179
5da04cc8 180 __ioremap_check_mem(phys_addr, size, &io_desc);
0e4c12b4 181
1da177e4
LT
182 /*
183 * Don't allow anybody to remap normal RAM that we're using..
184 */
5da04cc8 185 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
8a0a5da6
TG
186 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
187 &phys_addr, &last_addr);
9a58eebe 188 return NULL;
906e36c5 189 }
9a58eebe 190
d7677d40 191 /*
192 * Mappings have to be page-aligned
193 */
194 offset = phys_addr & ~PAGE_MASK;
ffa71f33 195 phys_addr &= PHYSICAL_PAGE_MASK;
d7677d40 196 size = PAGE_ALIGN(last_addr+1) - phys_addr;
197
e213e877 198 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
e00c8cc9 199 pcm, &new_pcm);
dee7cbb2 200 if (retval) {
279e669b 201 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
dee7cbb2
VP
202 return NULL;
203 }
204
b14097bd
JG
205 if (pcm != new_pcm) {
206 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
279e669b 207 printk(KERN_ERR
b14097bd 208 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
4c8337ac
RD
209 (unsigned long long)phys_addr,
210 (unsigned long long)(phys_addr + size),
b14097bd 211 pcm, new_pcm);
de2a47cf 212 goto err_free_memtype;
d7677d40 213 }
b14097bd 214 pcm = new_pcm;
d7677d40 215 }
216
0e4c12b4
TL
217 /*
218 * If the page being mapped is in memory and SEV is active then
219 * make sure the memory encryption attribute is enabled in the
220 * resulting mapping.
221 */
b14097bd 222 prot = PAGE_KERNEL_IO;
5da04cc8 223 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
0e4c12b4
TL
224 prot = pgprot_encrypted(prot);
225
b14097bd
JG
226 switch (pcm) {
227 case _PAGE_CACHE_MODE_UC:
d806e5ee 228 default:
b14097bd
JG
229 prot = __pgprot(pgprot_val(prot) |
230 cachemode2protval(_PAGE_CACHE_MODE_UC));
d806e5ee 231 break;
b14097bd
JG
232 case _PAGE_CACHE_MODE_UC_MINUS:
233 prot = __pgprot(pgprot_val(prot) |
234 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
de33c442 235 break;
b14097bd
JG
236 case _PAGE_CACHE_MODE_WC:
237 prot = __pgprot(pgprot_val(prot) |
238 cachemode2protval(_PAGE_CACHE_MODE_WC));
b310f381 239 break;
d838270e
TK
240 case _PAGE_CACHE_MODE_WT:
241 prot = __pgprot(pgprot_val(prot) |
242 cachemode2protval(_PAGE_CACHE_MODE_WT));
243 break;
b14097bd 244 case _PAGE_CACHE_MODE_WB:
d806e5ee
TG
245 break;
246 }
a148ecfd 247
1da177e4
LT
248 /*
249 * Ok, go for it..
250 */
23016969 251 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4 252 if (!area)
de2a47cf 253 goto err_free_memtype;
1da177e4 254 area->phys_addr = phys_addr;
e66aadbe 255 vaddr = (unsigned long) area->addr;
43a432b1 256
b14097bd 257 if (kernel_map_sync_memtype(phys_addr, size, pcm))
de2a47cf 258 goto err_free_area;
e9332cac 259
de2a47cf
XF
260 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
261 goto err_free_area;
e9332cac 262
d61fc448 263 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 264 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448 265
c7a7b814
TG
266 /*
267 * Check if the request spans more than any BAR in the iomem resource
268 * tree.
269 */
9abb0ecd
LA
270 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
271 pr_warn("caller %pS mapping multiple BARs\n", caller);
c7a7b814 272
d61fc448 273 return ret_addr;
de2a47cf
XF
274err_free_area:
275 free_vm_area(area);
276err_free_memtype:
277 free_memtype(phys_addr, phys_addr + size);
278 return NULL;
1da177e4 279}
1da177e4
LT
280
281/**
282 * ioremap_nocache - map bus memory into CPU space
9efc31b8 283 * @phys_addr: bus address of the memory
1da177e4
LT
284 * @size: size of the resource to map
285 *
286 * ioremap_nocache performs a platform specific sequence of operations to
287 * make bus memory CPU accessible via the readb/readw/readl/writeb/
288 * writew/writel functions and the other mmio helpers. The returned
289 * address is not guaranteed to be usable directly as a virtual
91eebf40 290 * address.
1da177e4
LT
291 *
292 * This version of ioremap ensures that the memory is marked uncachable
293 * on the CPU as well as honouring existing caching rules from things like
91eebf40 294 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
295 * busses. In particular driver authors should read up on PCI writes
296 *
297 * It's useful if some control registers are in such an area and
298 * write combining or read caching is not desirable:
91eebf40 299 *
1da177e4
LT
300 * Must be freed with iounmap.
301 */
b9e76a00 302void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 303{
de33c442
SS
304 /*
305 * Ideally, this should be:
cb32edf6 306 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
de33c442
SS
307 *
308 * Till we fix all X drivers to use ioremap_wc(), we will use
e4b6be33
LR
309 * UC MINUS. Drivers that are certain they need or can already
310 * be converted over to strong UC can use ioremap_uc().
de33c442 311 */
b14097bd 312 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
de33c442 313
b14097bd 314 return __ioremap_caller(phys_addr, size, pcm,
c3a7a61c 315 __builtin_return_address(0), false);
1da177e4 316}
129f6946 317EXPORT_SYMBOL(ioremap_nocache);
1da177e4 318
e4b6be33
LR
319/**
320 * ioremap_uc - map bus memory into CPU space as strongly uncachable
321 * @phys_addr: bus address of the memory
322 * @size: size of the resource to map
323 *
324 * ioremap_uc performs a platform specific sequence of operations to
325 * make bus memory CPU accessible via the readb/readw/readl/writeb/
326 * writew/writel functions and the other mmio helpers. The returned
327 * address is not guaranteed to be usable directly as a virtual
328 * address.
329 *
330 * This version of ioremap ensures that the memory is marked with a strong
331 * preference as completely uncachable on the CPU when possible. For non-PAT
332 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
333 * systems this will set the PAT entry for the pages as strong UC. This call
334 * will honor existing caching rules from things like the PCI bus. Note that
335 * there are other caches and buffers on many busses. In particular driver
336 * authors should read up on PCI writes.
337 *
338 * It's useful if some control registers are in such an area and
339 * write combining or read caching is not desirable:
340 *
341 * Must be freed with iounmap.
342 */
343void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
344{
345 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
346
347 return __ioremap_caller(phys_addr, size, pcm,
c3a7a61c 348 __builtin_return_address(0), false);
e4b6be33
LR
349}
350EXPORT_SYMBOL_GPL(ioremap_uc);
351
b310f381 352/**
353 * ioremap_wc - map memory into CPU space write combined
9efc31b8 354 * @phys_addr: bus address of the memory
b310f381 355 * @size: size of the resource to map
356 *
357 * This version of ioremap ensures that the memory is marked write combining.
358 * Write combining allows faster writes to some hardware devices.
359 *
360 * Must be freed with iounmap.
361 */
d639bab8 362void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
b310f381 363{
7202fdb1 364 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
c3a7a61c 365 __builtin_return_address(0), false);
b310f381 366}
367EXPORT_SYMBOL(ioremap_wc);
368
d838270e
TK
369/**
370 * ioremap_wt - map memory into CPU space write through
371 * @phys_addr: bus address of the memory
372 * @size: size of the resource to map
373 *
374 * This version of ioremap ensures that the memory is marked write through.
375 * Write through stores data into memory while keeping the cache up-to-date.
376 *
377 * Must be freed with iounmap.
378 */
379void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
380{
381 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
c3a7a61c 382 __builtin_return_address(0), false);
d838270e
TK
383}
384EXPORT_SYMBOL(ioremap_wt);
385
c3a7a61c
LJ
386void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
387{
388 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
389 __builtin_return_address(0), true);
390}
391EXPORT_SYMBOL(ioremap_encrypted);
392
b9e76a00 393void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 394{
b14097bd 395 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
c3a7a61c 396 __builtin_return_address(0), false);
5f868152
TG
397}
398EXPORT_SYMBOL(ioremap_cache);
399
28b2ee20
RR
400void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
401 unsigned long prot_val)
402{
b14097bd
JG
403 return __ioremap_caller(phys_addr, size,
404 pgprot2cachemode(__pgprot(prot_val)),
c3a7a61c 405 __builtin_return_address(0), false);
28b2ee20
RR
406}
407EXPORT_SYMBOL(ioremap_prot);
408
bf5421c3
AK
409/**
410 * iounmap - Free a IO remapping
411 * @addr: virtual address from ioremap_*
412 *
413 * Caller must ensure there is only one unmapping for the same pointer.
414 */
1da177e4
LT
415void iounmap(volatile void __iomem *addr)
416{
bf5421c3 417 struct vm_struct *p, *o;
c23a4e96
AM
418
419 if ((void __force *)addr <= high_memory)
1da177e4
LT
420 return;
421
422 /*
33c2b803
TL
423 * The PCI/ISA range special-casing was removed from __ioremap()
424 * so this check, in theory, can be removed. However, there are
425 * cases where iounmap() is called for addresses not obtained via
426 * ioremap() (vga16fb for example). Add a warning so that these
427 * cases can be caught and fixed.
1da177e4 428 */
6e92a5a6 429 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
33c2b803
TL
430 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
431 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
1da177e4 432 return;
33c2b803 433 }
1da177e4 434
6d60ce38
KH
435 mmiotrace_iounmap(addr);
436
91eebf40
TG
437 addr = (volatile void __iomem *)
438 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
439
440 /* Use the vm area unlocked, assuming the caller
441 ensures there isn't another iounmap for the same address
442 in parallel. Reuse of the virtual address is prevented by
443 leaving it in the global lists until we're done with it.
444 cpa takes care of the direct mappings. */
ef932473 445 p = find_vm_area((void __force *)addr);
bf5421c3
AK
446
447 if (!p) {
91eebf40 448 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 449 dump_stack();
bf5421c3 450 return;
1da177e4
LT
451 }
452
d7677d40 453 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
454
bf5421c3 455 /* Finally remove it */
6e92a5a6 456 o = remove_vm_area((void __force *)addr);
bf5421c3 457 BUG_ON(p != o || o == NULL);
91eebf40 458 kfree(p);
1da177e4 459}
129f6946 460EXPORT_SYMBOL(iounmap);
1da177e4 461
0f472d04
AK
462int __init arch_ioremap_p4d_supported(void)
463{
464 return 0;
465}
466
1e6277de 467int __init arch_ioremap_pud_supported(void)
5d72b4fb
TK
468{
469#ifdef CONFIG_X86_64
b8291adc 470 return boot_cpu_has(X86_FEATURE_GBPAGES);
5d72b4fb
TK
471#else
472 return 0;
473#endif
474}
475
1e6277de 476int __init arch_ioremap_pmd_supported(void)
5d72b4fb 477{
16bf9226 478 return boot_cpu_has(X86_FEATURE_PSE);
5d72b4fb
TK
479}
480
e045fb2a 481/*
482 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
483 * access
484 */
4707a341 485void *xlate_dev_mem_ptr(phys_addr_t phys)
e045fb2a 486{
94d4b476
IM
487 unsigned long start = phys & PAGE_MASK;
488 unsigned long offset = phys & ~PAGE_MASK;
562bfca4 489 void *vaddr;
e045fb2a 490
8458bf94
TL
491 /* memremap() maps if RAM, otherwise falls back to ioremap() */
492 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
e045fb2a 493
8458bf94 494 /* Only add the offset on success and return NULL if memremap() failed */
94d4b476
IM
495 if (vaddr)
496 vaddr += offset;
e045fb2a 497
562bfca4 498 return vaddr;
e045fb2a 499}
500
4707a341 501void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
e045fb2a 502{
8458bf94 503 memunmap((void *)((unsigned long)addr & PAGE_MASK));
e045fb2a 504}
505
8f716c9b
TL
506/*
507 * Examine the physical address to determine if it is an area of memory
508 * that should be mapped decrypted. If the memory is not part of the
509 * kernel usable area it was accessed and created decrypted, so these
1de32862
TL
510 * areas should be mapped decrypted. And since the encryption key can
511 * change across reboots, persistent memory should also be mapped
512 * decrypted.
072f58c6
TL
513 *
514 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
515 * only persistent memory should be mapped decrypted.
8f716c9b
TL
516 */
517static bool memremap_should_map_decrypted(resource_size_t phys_addr,
518 unsigned long size)
519{
1de32862
TL
520 int is_pmem;
521
522 /*
523 * Check if the address is part of a persistent memory region.
524 * This check covers areas added by E820, EFI and ACPI.
525 */
526 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
527 IORES_DESC_PERSISTENT_MEMORY);
528 if (is_pmem != REGION_DISJOINT)
529 return true;
530
531 /*
532 * Check if the non-volatile attribute is set for an EFI
533 * reserved area.
534 */
535 if (efi_enabled(EFI_BOOT)) {
536 switch (efi_mem_type(phys_addr)) {
537 case EFI_RESERVED_TYPE:
538 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
539 return true;
540 break;
541 default:
542 break;
543 }
544 }
545
8f716c9b
TL
546 /* Check if the address is outside kernel usable area */
547 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
548 case E820_TYPE_RESERVED:
549 case E820_TYPE_ACPI:
550 case E820_TYPE_NVS:
551 case E820_TYPE_UNUSABLE:
072f58c6
TL
552 /* For SEV, these areas are encrypted */
553 if (sev_active())
554 break;
555 /* Fallthrough */
556
1de32862 557 case E820_TYPE_PRAM:
8f716c9b
TL
558 return true;
559 default:
560 break;
561 }
562
563 return false;
564}
565
566/*
567 * Examine the physical address to determine if it is EFI data. Check
568 * it against the boot params structure and EFI tables and memory types.
569 */
570static bool memremap_is_efi_data(resource_size_t phys_addr,
571 unsigned long size)
572{
573 u64 paddr;
574
575 /* Check if the address is part of EFI boot/runtime data */
576 if (!efi_enabled(EFI_BOOT))
577 return false;
578
579 paddr = boot_params.efi_info.efi_memmap_hi;
580 paddr <<= 32;
581 paddr |= boot_params.efi_info.efi_memmap;
582 if (phys_addr == paddr)
583 return true;
584
585 paddr = boot_params.efi_info.efi_systab_hi;
586 paddr <<= 32;
587 paddr |= boot_params.efi_info.efi_systab;
588 if (phys_addr == paddr)
589 return true;
590
591 if (efi_is_table_address(phys_addr))
592 return true;
593
594 switch (efi_mem_type(phys_addr)) {
595 case EFI_BOOT_SERVICES_DATA:
596 case EFI_RUNTIME_SERVICES_DATA:
597 return true;
598 default:
599 break;
600 }
601
602 return false;
603}
604
605/*
606 * Examine the physical address to determine if it is boot data by checking
607 * it against the boot params setup_data chain.
608 */
609static bool memremap_is_setup_data(resource_size_t phys_addr,
610 unsigned long size)
611{
612 struct setup_data *data;
613 u64 paddr, paddr_next;
614
615 paddr = boot_params.hdr.setup_data;
616 while (paddr) {
617 unsigned int len;
618
619 if (phys_addr == paddr)
620 return true;
621
622 data = memremap(paddr, sizeof(*data),
623 MEMREMAP_WB | MEMREMAP_DEC);
624
625 paddr_next = data->next;
626 len = data->len;
627
628 memunmap(data);
629
630 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
631 return true;
632
633 paddr = paddr_next;
634 }
635
636 return false;
637}
638
639/*
640 * Examine the physical address to determine if it is boot data by checking
641 * it against the boot params setup_data chain (early boot version).
642 */
643static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
644 unsigned long size)
645{
646 struct setup_data *data;
647 u64 paddr, paddr_next;
648
649 paddr = boot_params.hdr.setup_data;
650 while (paddr) {
651 unsigned int len;
652
653 if (phys_addr == paddr)
654 return true;
655
656 data = early_memremap_decrypted(paddr, sizeof(*data));
657
658 paddr_next = data->next;
659 len = data->len;
660
661 early_memunmap(data, sizeof(*data));
662
663 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
664 return true;
665
666 paddr = paddr_next;
667 }
668
669 return false;
670}
671
672/*
673 * Architecture function to determine if RAM remap is allowed. By default, a
674 * RAM remap will map the data as encrypted. Determine if a RAM remap should
675 * not be done so that the data will be mapped decrypted.
676 */
677bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
678 unsigned long flags)
679{
072f58c6 680 if (!mem_encrypt_active())
8f716c9b
TL
681 return true;
682
683 if (flags & MEMREMAP_ENC)
684 return true;
685
686 if (flags & MEMREMAP_DEC)
687 return false;
688
072f58c6
TL
689 if (sme_active()) {
690 if (memremap_is_setup_data(phys_addr, size) ||
691 memremap_is_efi_data(phys_addr, size))
692 return false;
693 }
8f716c9b 694
072f58c6 695 return !memremap_should_map_decrypted(phys_addr, size);
8f716c9b
TL
696}
697
698/*
699 * Architecture override of __weak function to adjust the protection attributes
700 * used when remapping memory. By default, early_memremap() will map the data
701 * as encrypted. Determine if an encrypted mapping should not be done and set
702 * the appropriate protection attributes.
703 */
704pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
705 unsigned long size,
706 pgprot_t prot)
707{
072f58c6
TL
708 bool encrypted_prot;
709
710 if (!mem_encrypt_active())
8f716c9b
TL
711 return prot;
712
072f58c6
TL
713 encrypted_prot = true;
714
715 if (sme_active()) {
716 if (early_memremap_is_setup_data(phys_addr, size) ||
717 memremap_is_efi_data(phys_addr, size))
718 encrypted_prot = false;
719 }
720
721 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
722 encrypted_prot = false;
8f716c9b 723
072f58c6
TL
724 return encrypted_prot ? pgprot_encrypted(prot)
725 : pgprot_decrypted(prot);
8f716c9b
TL
726}
727
8458bf94
TL
728bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
729{
730 return arch_memremap_can_ram_remap(phys_addr, size, 0);
731}
732
ce9084ba 733#ifdef CONFIG_AMD_MEM_ENCRYPT
f88a68fa
TL
734/* Remap memory with encryption */
735void __init *early_memremap_encrypted(resource_size_t phys_addr,
736 unsigned long size)
737{
738 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
739}
740
741/*
742 * Remap memory with encryption and write-protected - cannot be called
743 * before pat_init() is called
744 */
745void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
746 unsigned long size)
747{
748 /* Be sure the write-protect PAT entry is set for write-protect */
749 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
750 return NULL;
751
752 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
753}
754
755/* Remap memory without encryption */
756void __init *early_memremap_decrypted(resource_size_t phys_addr,
757 unsigned long size)
758{
759 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
760}
761
762/*
763 * Remap memory without encryption and write-protected - cannot be called
764 * before pat_init() is called
765 */
766void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
767 unsigned long size)
768{
769 /* Be sure the write-protect PAT entry is set for write-protect */
770 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
771 return NULL;
772
773 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
774}
ce9084ba 775#endif /* CONFIG_AMD_MEM_ENCRYPT */
f88a68fa 776
45c7b28f 777static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 778
551889a6 779static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 780{
37cc8d7f 781 /* Don't assume we're using swapper_pg_dir at this point */
6c690ee1 782 pgd_t *base = __va(read_cr3_pa());
37cc8d7f 783 pgd_t *pgd = &base[pgd_index(addr)];
e0c4f675
KS
784 p4d_t *p4d = p4d_offset(pgd, addr);
785 pud_t *pud = pud_offset(p4d, addr);
551889a6
IC
786 pmd_t *pmd = pmd_offset(pud, addr);
787
788 return pmd;
0947b2f3
HY
789}
790
551889a6 791static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 792{
551889a6 793 return &bm_pte[pte_index(addr)];
0947b2f3
HY
794}
795
fef5ba79
JF
796bool __init is_early_ioremap_ptep(pte_t *ptep)
797{
798 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
799}
800
beacfaac 801void __init early_ioremap_init(void)
0947b2f3 802{
551889a6 803 pmd_t *pmd;
0947b2f3 804
73159fdc
AL
805#ifdef CONFIG_X86_64
806 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
807#else
808 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
809#endif
810
5b7c73e0 811 early_ioremap_setup();
8827247f 812
551889a6 813 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
45c7b28f
JF
814 memset(bm_pte, 0, sizeof(bm_pte));
815 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 816
0e3a9549 817 /*
551889a6 818 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
819 * we are not prepared:
820 */
499a5f1e
JB
821#define __FIXADDR_TOP (-PAGE_SIZE)
822 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
823 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
824#undef __FIXADDR_TOP
551889a6 825 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 826 WARN_ON(1);
551889a6
IC
827 printk(KERN_WARNING "pmd %p != %p\n",
828 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 829 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 830 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 831 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 832 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
833
834 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
835 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
836 FIX_BTMAP_BEGIN);
0e3a9549 837 }
0947b2f3
HY
838}
839
5b7c73e0
MS
840void __init __early_set_fixmap(enum fixed_addresses idx,
841 phys_addr_t phys, pgprot_t flags)
0947b2f3 842{
551889a6
IC
843 unsigned long addr = __fix_to_virt(idx);
844 pte_t *pte;
0947b2f3
HY
845
846 if (idx >= __end_of_fixed_addresses) {
847 BUG();
848 return;
849 }
beacfaac 850 pte = early_ioremap_pte(addr);
4583ed51 851
fb43d6cb 852 /* Sanitize 'prot' against any unsupported bits: */
510bb96f 853 pgprot_val(flags) &= __supported_pte_mask;
fb43d6cb 854
0947b2f3 855 if (pgprot_val(flags))
551889a6 856 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 857 else
4f9c11dd 858 pte_clear(&init_mm, addr, pte);
1299ef1d 859 __flush_tlb_one_kernel(addr);
0947b2f3 860}