]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/mm/ioremap.c
x86/mm: Do not auto-massage page protections
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
9de94dbb 12#include <linux/ioport.h>
3cbd09e4
TG
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
d61fc448 15#include <linux/mmiotrace.h>
8f716c9b
TL
16#include <linux/mem_encrypt.h>
17#include <linux/efi.h>
3cbd09e4 18
d1163651 19#include <asm/set_memory.h>
66441bd3 20#include <asm/e820/api.h>
3cbd09e4 21#include <asm/fixmap.h>
1da177e4 22#include <asm/pgtable.h>
3cbd09e4 23#include <asm/tlbflush.h>
f6df72e7 24#include <asm/pgalloc.h>
d7677d40 25#include <asm/pat.h>
8f716c9b 26#include <asm/setup.h>
1da177e4 27
78c86e5e 28#include "physaddr.h"
240d3a7c 29
0e4c12b4
TL
30struct ioremap_mem_flags {
31 bool system_ram;
32 bool desc_other;
33};
34
e9332cac
TG
35/*
36 * Fix up the linear direct mapping of the kernel to avoid cache attribute
37 * conflicts.
38 */
3a96ce8c 39int ioremap_change_attr(unsigned long vaddr, unsigned long size,
b14097bd 40 enum page_cache_mode pcm)
e9332cac 41{
d806e5ee 42 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 43 int err;
e9332cac 44
b14097bd
JG
45 switch (pcm) {
46 case _PAGE_CACHE_MODE_UC:
d806e5ee 47 default:
1219333d 48 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 49 break;
b14097bd 50 case _PAGE_CACHE_MODE_WC:
b310f381 51 err = _set_memory_wc(vaddr, nrpages);
52 break;
623dffb2
TK
53 case _PAGE_CACHE_MODE_WT:
54 err = _set_memory_wt(vaddr, nrpages);
55 break;
b14097bd 56 case _PAGE_CACHE_MODE_WB:
1219333d 57 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
58 break;
59 }
e9332cac
TG
60
61 return err;
62}
63
0e4c12b4 64static bool __ioremap_check_ram(struct resource *res)
c81c8a1e 65{
0e4c12b4 66 unsigned long start_pfn, stop_pfn;
c81c8a1e
RD
67 unsigned long i;
68
0e4c12b4
TL
69 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
70 return false;
71
72 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
73 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
74 if (stop_pfn > start_pfn) {
75 for (i = 0; i < (stop_pfn - start_pfn); ++i)
76 if (pfn_valid(start_pfn + i) &&
77 !PageReserved(pfn_to_page(start_pfn + i)))
78 return true;
79 }
80
81 return false;
82}
83
84static int __ioremap_check_desc_other(struct resource *res)
85{
86 return (res->desc != IORES_DESC_NONE);
87}
88
89static int __ioremap_res_check(struct resource *res, void *arg)
90{
91 struct ioremap_mem_flags *flags = arg;
92
93 if (!flags->system_ram)
94 flags->system_ram = __ioremap_check_ram(res);
95
96 if (!flags->desc_other)
97 flags->desc_other = __ioremap_check_desc_other(res);
c81c8a1e 98
0e4c12b4
TL
99 return flags->system_ram && flags->desc_other;
100}
101
102/*
103 * To avoid multiple resource walks, this function walks resources marked as
104 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
105 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
106 */
107static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
108 struct ioremap_mem_flags *flags)
109{
110 u64 start, end;
111
112 start = (u64)addr;
113 end = start + size - 1;
114 memset(flags, 0, sizeof(*flags));
115
116 walk_mem_res(start, end, flags, __ioremap_res_check);
c81c8a1e
RD
117}
118
1da177e4
LT
119/*
120 * Remap an arbitrary physical address space into the kernel virtual
5d72b4fb
TK
121 * address space. It transparently creates kernel huge I/O mapping when
122 * the physical address is aligned by a huge page size (1GB or 2MB) and
123 * the requested size is at least the huge page size.
124 *
125 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
126 * Therefore, the mapping code falls back to use a smaller page toward 4KB
127 * when a mapping range is covered by non-WB type of MTRRs.
1da177e4
LT
128 *
129 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
130 * have to convert them into an offset in a page-aligned mapping, but the
131 * caller shouldn't need to know that small detail.
132 */
23016969 133static void __iomem *__ioremap_caller(resource_size_t phys_addr,
b14097bd 134 unsigned long size, enum page_cache_mode pcm, void *caller)
1da177e4 135{
ffa71f33 136 unsigned long offset, vaddr;
0e4c12b4 137 resource_size_t last_addr;
87e547fe
PP
138 const resource_size_t unaligned_phys_addr = phys_addr;
139 const unsigned long unaligned_size = size;
0e4c12b4 140 struct ioremap_mem_flags mem_flags;
91eebf40 141 struct vm_struct *area;
b14097bd 142 enum page_cache_mode new_pcm;
d806e5ee 143 pgprot_t prot;
dee7cbb2 144 int retval;
d61fc448 145 void __iomem *ret_addr;
1da177e4
LT
146
147 /* Don't allow wraparound or zero size */
148 last_addr = phys_addr + size - 1;
149 if (!size || last_addr < phys_addr)
150 return NULL;
151
e3100c82 152 if (!phys_addr_valid(phys_addr)) {
6997ab49 153 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 154 (unsigned long long)phys_addr);
e3100c82
TG
155 WARN_ON_ONCE(1);
156 return NULL;
157 }
158
0e4c12b4
TL
159 __ioremap_check_mem(phys_addr, size, &mem_flags);
160
1da177e4
LT
161 /*
162 * Don't allow anybody to remap normal RAM that we're using..
163 */
0e4c12b4 164 if (mem_flags.system_ram) {
8a0a5da6
TG
165 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
166 &phys_addr, &last_addr);
9a58eebe 167 return NULL;
906e36c5 168 }
9a58eebe 169
d7677d40 170 /*
171 * Mappings have to be page-aligned
172 */
173 offset = phys_addr & ~PAGE_MASK;
ffa71f33 174 phys_addr &= PHYSICAL_PAGE_MASK;
d7677d40 175 size = PAGE_ALIGN(last_addr+1) - phys_addr;
176
e213e877 177 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
e00c8cc9 178 pcm, &new_pcm);
dee7cbb2 179 if (retval) {
279e669b 180 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
dee7cbb2
VP
181 return NULL;
182 }
183
b14097bd
JG
184 if (pcm != new_pcm) {
185 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
279e669b 186 printk(KERN_ERR
b14097bd 187 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
4c8337ac
RD
188 (unsigned long long)phys_addr,
189 (unsigned long long)(phys_addr + size),
b14097bd 190 pcm, new_pcm);
de2a47cf 191 goto err_free_memtype;
d7677d40 192 }
b14097bd 193 pcm = new_pcm;
d7677d40 194 }
195
0e4c12b4
TL
196 /*
197 * If the page being mapped is in memory and SEV is active then
198 * make sure the memory encryption attribute is enabled in the
199 * resulting mapping.
200 */
b14097bd 201 prot = PAGE_KERNEL_IO;
0e4c12b4
TL
202 if (sev_active() && mem_flags.desc_other)
203 prot = pgprot_encrypted(prot);
204
b14097bd
JG
205 switch (pcm) {
206 case _PAGE_CACHE_MODE_UC:
d806e5ee 207 default:
b14097bd
JG
208 prot = __pgprot(pgprot_val(prot) |
209 cachemode2protval(_PAGE_CACHE_MODE_UC));
d806e5ee 210 break;
b14097bd
JG
211 case _PAGE_CACHE_MODE_UC_MINUS:
212 prot = __pgprot(pgprot_val(prot) |
213 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
de33c442 214 break;
b14097bd
JG
215 case _PAGE_CACHE_MODE_WC:
216 prot = __pgprot(pgprot_val(prot) |
217 cachemode2protval(_PAGE_CACHE_MODE_WC));
b310f381 218 break;
d838270e
TK
219 case _PAGE_CACHE_MODE_WT:
220 prot = __pgprot(pgprot_val(prot) |
221 cachemode2protval(_PAGE_CACHE_MODE_WT));
222 break;
b14097bd 223 case _PAGE_CACHE_MODE_WB:
d806e5ee
TG
224 break;
225 }
a148ecfd 226
1da177e4
LT
227 /*
228 * Ok, go for it..
229 */
23016969 230 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4 231 if (!area)
de2a47cf 232 goto err_free_memtype;
1da177e4 233 area->phys_addr = phys_addr;
e66aadbe 234 vaddr = (unsigned long) area->addr;
43a432b1 235
b14097bd 236 if (kernel_map_sync_memtype(phys_addr, size, pcm))
de2a47cf 237 goto err_free_area;
e9332cac 238
de2a47cf
XF
239 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
240 goto err_free_area;
e9332cac 241
d61fc448 242 ret_addr = (void __iomem *) (vaddr + offset);
87e547fe 243 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
d61fc448 244
c7a7b814
TG
245 /*
246 * Check if the request spans more than any BAR in the iomem resource
247 * tree.
248 */
9abb0ecd
LA
249 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
250 pr_warn("caller %pS mapping multiple BARs\n", caller);
c7a7b814 251
d61fc448 252 return ret_addr;
de2a47cf
XF
253err_free_area:
254 free_vm_area(area);
255err_free_memtype:
256 free_memtype(phys_addr, phys_addr + size);
257 return NULL;
1da177e4 258}
1da177e4
LT
259
260/**
261 * ioremap_nocache - map bus memory into CPU space
9efc31b8 262 * @phys_addr: bus address of the memory
1da177e4
LT
263 * @size: size of the resource to map
264 *
265 * ioremap_nocache performs a platform specific sequence of operations to
266 * make bus memory CPU accessible via the readb/readw/readl/writeb/
267 * writew/writel functions and the other mmio helpers. The returned
268 * address is not guaranteed to be usable directly as a virtual
91eebf40 269 * address.
1da177e4
LT
270 *
271 * This version of ioremap ensures that the memory is marked uncachable
272 * on the CPU as well as honouring existing caching rules from things like
91eebf40 273 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
274 * busses. In particular driver authors should read up on PCI writes
275 *
276 * It's useful if some control registers are in such an area and
277 * write combining or read caching is not desirable:
91eebf40 278 *
1da177e4
LT
279 * Must be freed with iounmap.
280 */
b9e76a00 281void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 282{
de33c442
SS
283 /*
284 * Ideally, this should be:
cb32edf6 285 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
de33c442
SS
286 *
287 * Till we fix all X drivers to use ioremap_wc(), we will use
e4b6be33
LR
288 * UC MINUS. Drivers that are certain they need or can already
289 * be converted over to strong UC can use ioremap_uc().
de33c442 290 */
b14097bd 291 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
de33c442 292
b14097bd 293 return __ioremap_caller(phys_addr, size, pcm,
23016969 294 __builtin_return_address(0));
1da177e4 295}
129f6946 296EXPORT_SYMBOL(ioremap_nocache);
1da177e4 297
e4b6be33
LR
298/**
299 * ioremap_uc - map bus memory into CPU space as strongly uncachable
300 * @phys_addr: bus address of the memory
301 * @size: size of the resource to map
302 *
303 * ioremap_uc performs a platform specific sequence of operations to
304 * make bus memory CPU accessible via the readb/readw/readl/writeb/
305 * writew/writel functions and the other mmio helpers. The returned
306 * address is not guaranteed to be usable directly as a virtual
307 * address.
308 *
309 * This version of ioremap ensures that the memory is marked with a strong
310 * preference as completely uncachable on the CPU when possible. For non-PAT
311 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
312 * systems this will set the PAT entry for the pages as strong UC. This call
313 * will honor existing caching rules from things like the PCI bus. Note that
314 * there are other caches and buffers on many busses. In particular driver
315 * authors should read up on PCI writes.
316 *
317 * It's useful if some control registers are in such an area and
318 * write combining or read caching is not desirable:
319 *
320 * Must be freed with iounmap.
321 */
322void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
323{
324 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
325
326 return __ioremap_caller(phys_addr, size, pcm,
327 __builtin_return_address(0));
328}
329EXPORT_SYMBOL_GPL(ioremap_uc);
330
b310f381 331/**
332 * ioremap_wc - map memory into CPU space write combined
9efc31b8 333 * @phys_addr: bus address of the memory
b310f381 334 * @size: size of the resource to map
335 *
336 * This version of ioremap ensures that the memory is marked write combining.
337 * Write combining allows faster writes to some hardware devices.
338 *
339 * Must be freed with iounmap.
340 */
d639bab8 341void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
b310f381 342{
7202fdb1 343 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
23016969 344 __builtin_return_address(0));
b310f381 345}
346EXPORT_SYMBOL(ioremap_wc);
347
d838270e
TK
348/**
349 * ioremap_wt - map memory into CPU space write through
350 * @phys_addr: bus address of the memory
351 * @size: size of the resource to map
352 *
353 * This version of ioremap ensures that the memory is marked write through.
354 * Write through stores data into memory while keeping the cache up-to-date.
355 *
356 * Must be freed with iounmap.
357 */
358void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
359{
360 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
361 __builtin_return_address(0));
362}
363EXPORT_SYMBOL(ioremap_wt);
364
b9e76a00 365void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 366{
b14097bd 367 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
23016969 368 __builtin_return_address(0));
5f868152
TG
369}
370EXPORT_SYMBOL(ioremap_cache);
371
28b2ee20
RR
372void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
373 unsigned long prot_val)
374{
b14097bd
JG
375 return __ioremap_caller(phys_addr, size,
376 pgprot2cachemode(__pgprot(prot_val)),
28b2ee20
RR
377 __builtin_return_address(0));
378}
379EXPORT_SYMBOL(ioremap_prot);
380
bf5421c3
AK
381/**
382 * iounmap - Free a IO remapping
383 * @addr: virtual address from ioremap_*
384 *
385 * Caller must ensure there is only one unmapping for the same pointer.
386 */
1da177e4
LT
387void iounmap(volatile void __iomem *addr)
388{
bf5421c3 389 struct vm_struct *p, *o;
c23a4e96
AM
390
391 if ((void __force *)addr <= high_memory)
1da177e4
LT
392 return;
393
394 /*
33c2b803
TL
395 * The PCI/ISA range special-casing was removed from __ioremap()
396 * so this check, in theory, can be removed. However, there are
397 * cases where iounmap() is called for addresses not obtained via
398 * ioremap() (vga16fb for example). Add a warning so that these
399 * cases can be caught and fixed.
1da177e4 400 */
6e92a5a6 401 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
33c2b803
TL
402 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
403 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
1da177e4 404 return;
33c2b803 405 }
1da177e4 406
6d60ce38
KH
407 mmiotrace_iounmap(addr);
408
91eebf40
TG
409 addr = (volatile void __iomem *)
410 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
411
412 /* Use the vm area unlocked, assuming the caller
413 ensures there isn't another iounmap for the same address
414 in parallel. Reuse of the virtual address is prevented by
415 leaving it in the global lists until we're done with it.
416 cpa takes care of the direct mappings. */
ef932473 417 p = find_vm_area((void __force *)addr);
bf5421c3
AK
418
419 if (!p) {
91eebf40 420 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 421 dump_stack();
bf5421c3 422 return;
1da177e4
LT
423 }
424
d7677d40 425 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
426
bf5421c3 427 /* Finally remove it */
6e92a5a6 428 o = remove_vm_area((void __force *)addr);
bf5421c3 429 BUG_ON(p != o || o == NULL);
91eebf40 430 kfree(p);
1da177e4 431}
129f6946 432EXPORT_SYMBOL(iounmap);
1da177e4 433
1e6277de 434int __init arch_ioremap_pud_supported(void)
5d72b4fb
TK
435{
436#ifdef CONFIG_X86_64
b8291adc 437 return boot_cpu_has(X86_FEATURE_GBPAGES);
5d72b4fb
TK
438#else
439 return 0;
440#endif
441}
442
1e6277de 443int __init arch_ioremap_pmd_supported(void)
5d72b4fb 444{
16bf9226 445 return boot_cpu_has(X86_FEATURE_PSE);
5d72b4fb
TK
446}
447
e045fb2a 448/*
449 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
450 * access
451 */
4707a341 452void *xlate_dev_mem_ptr(phys_addr_t phys)
e045fb2a 453{
94d4b476
IM
454 unsigned long start = phys & PAGE_MASK;
455 unsigned long offset = phys & ~PAGE_MASK;
562bfca4 456 void *vaddr;
e045fb2a 457
8458bf94
TL
458 /* memremap() maps if RAM, otherwise falls back to ioremap() */
459 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
e045fb2a 460
8458bf94 461 /* Only add the offset on success and return NULL if memremap() failed */
94d4b476
IM
462 if (vaddr)
463 vaddr += offset;
e045fb2a 464
562bfca4 465 return vaddr;
e045fb2a 466}
467
4707a341 468void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
e045fb2a 469{
8458bf94 470 memunmap((void *)((unsigned long)addr & PAGE_MASK));
e045fb2a 471}
472
8f716c9b
TL
473/*
474 * Examine the physical address to determine if it is an area of memory
475 * that should be mapped decrypted. If the memory is not part of the
476 * kernel usable area it was accessed and created decrypted, so these
1de32862
TL
477 * areas should be mapped decrypted. And since the encryption key can
478 * change across reboots, persistent memory should also be mapped
479 * decrypted.
072f58c6
TL
480 *
481 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
482 * only persistent memory should be mapped decrypted.
8f716c9b
TL
483 */
484static bool memremap_should_map_decrypted(resource_size_t phys_addr,
485 unsigned long size)
486{
1de32862
TL
487 int is_pmem;
488
489 /*
490 * Check if the address is part of a persistent memory region.
491 * This check covers areas added by E820, EFI and ACPI.
492 */
493 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
494 IORES_DESC_PERSISTENT_MEMORY);
495 if (is_pmem != REGION_DISJOINT)
496 return true;
497
498 /*
499 * Check if the non-volatile attribute is set for an EFI
500 * reserved area.
501 */
502 if (efi_enabled(EFI_BOOT)) {
503 switch (efi_mem_type(phys_addr)) {
504 case EFI_RESERVED_TYPE:
505 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
506 return true;
507 break;
508 default:
509 break;
510 }
511 }
512
8f716c9b
TL
513 /* Check if the address is outside kernel usable area */
514 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
515 case E820_TYPE_RESERVED:
516 case E820_TYPE_ACPI:
517 case E820_TYPE_NVS:
518 case E820_TYPE_UNUSABLE:
072f58c6
TL
519 /* For SEV, these areas are encrypted */
520 if (sev_active())
521 break;
522 /* Fallthrough */
523
1de32862 524 case E820_TYPE_PRAM:
8f716c9b
TL
525 return true;
526 default:
527 break;
528 }
529
530 return false;
531}
532
533/*
534 * Examine the physical address to determine if it is EFI data. Check
535 * it against the boot params structure and EFI tables and memory types.
536 */
537static bool memremap_is_efi_data(resource_size_t phys_addr,
538 unsigned long size)
539{
540 u64 paddr;
541
542 /* Check if the address is part of EFI boot/runtime data */
543 if (!efi_enabled(EFI_BOOT))
544 return false;
545
546 paddr = boot_params.efi_info.efi_memmap_hi;
547 paddr <<= 32;
548 paddr |= boot_params.efi_info.efi_memmap;
549 if (phys_addr == paddr)
550 return true;
551
552 paddr = boot_params.efi_info.efi_systab_hi;
553 paddr <<= 32;
554 paddr |= boot_params.efi_info.efi_systab;
555 if (phys_addr == paddr)
556 return true;
557
558 if (efi_is_table_address(phys_addr))
559 return true;
560
561 switch (efi_mem_type(phys_addr)) {
562 case EFI_BOOT_SERVICES_DATA:
563 case EFI_RUNTIME_SERVICES_DATA:
564 return true;
565 default:
566 break;
567 }
568
569 return false;
570}
571
572/*
573 * Examine the physical address to determine if it is boot data by checking
574 * it against the boot params setup_data chain.
575 */
576static bool memremap_is_setup_data(resource_size_t phys_addr,
577 unsigned long size)
578{
579 struct setup_data *data;
580 u64 paddr, paddr_next;
581
582 paddr = boot_params.hdr.setup_data;
583 while (paddr) {
584 unsigned int len;
585
586 if (phys_addr == paddr)
587 return true;
588
589 data = memremap(paddr, sizeof(*data),
590 MEMREMAP_WB | MEMREMAP_DEC);
591
592 paddr_next = data->next;
593 len = data->len;
594
595 memunmap(data);
596
597 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
598 return true;
599
600 paddr = paddr_next;
601 }
602
603 return false;
604}
605
606/*
607 * Examine the physical address to determine if it is boot data by checking
608 * it against the boot params setup_data chain (early boot version).
609 */
610static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
611 unsigned long size)
612{
613 struct setup_data *data;
614 u64 paddr, paddr_next;
615
616 paddr = boot_params.hdr.setup_data;
617 while (paddr) {
618 unsigned int len;
619
620 if (phys_addr == paddr)
621 return true;
622
623 data = early_memremap_decrypted(paddr, sizeof(*data));
624
625 paddr_next = data->next;
626 len = data->len;
627
628 early_memunmap(data, sizeof(*data));
629
630 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
631 return true;
632
633 paddr = paddr_next;
634 }
635
636 return false;
637}
638
639/*
640 * Architecture function to determine if RAM remap is allowed. By default, a
641 * RAM remap will map the data as encrypted. Determine if a RAM remap should
642 * not be done so that the data will be mapped decrypted.
643 */
644bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
645 unsigned long flags)
646{
072f58c6 647 if (!mem_encrypt_active())
8f716c9b
TL
648 return true;
649
650 if (flags & MEMREMAP_ENC)
651 return true;
652
653 if (flags & MEMREMAP_DEC)
654 return false;
655
072f58c6
TL
656 if (sme_active()) {
657 if (memremap_is_setup_data(phys_addr, size) ||
658 memremap_is_efi_data(phys_addr, size))
659 return false;
660 }
8f716c9b 661
072f58c6 662 return !memremap_should_map_decrypted(phys_addr, size);
8f716c9b
TL
663}
664
665/*
666 * Architecture override of __weak function to adjust the protection attributes
667 * used when remapping memory. By default, early_memremap() will map the data
668 * as encrypted. Determine if an encrypted mapping should not be done and set
669 * the appropriate protection attributes.
670 */
671pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
672 unsigned long size,
673 pgprot_t prot)
674{
072f58c6
TL
675 bool encrypted_prot;
676
677 if (!mem_encrypt_active())
8f716c9b
TL
678 return prot;
679
072f58c6
TL
680 encrypted_prot = true;
681
682 if (sme_active()) {
683 if (early_memremap_is_setup_data(phys_addr, size) ||
684 memremap_is_efi_data(phys_addr, size))
685 encrypted_prot = false;
686 }
687
688 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
689 encrypted_prot = false;
8f716c9b 690
072f58c6
TL
691 return encrypted_prot ? pgprot_encrypted(prot)
692 : pgprot_decrypted(prot);
8f716c9b
TL
693}
694
8458bf94
TL
695bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
696{
697 return arch_memremap_can_ram_remap(phys_addr, size, 0);
698}
699
f88a68fa
TL
700#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
701/* Remap memory with encryption */
702void __init *early_memremap_encrypted(resource_size_t phys_addr,
703 unsigned long size)
704{
705 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
706}
707
708/*
709 * Remap memory with encryption and write-protected - cannot be called
710 * before pat_init() is called
711 */
712void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
713 unsigned long size)
714{
715 /* Be sure the write-protect PAT entry is set for write-protect */
716 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
717 return NULL;
718
719 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
720}
721
722/* Remap memory without encryption */
723void __init *early_memremap_decrypted(resource_size_t phys_addr,
724 unsigned long size)
725{
726 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
727}
728
729/*
730 * Remap memory without encryption and write-protected - cannot be called
731 * before pat_init() is called
732 */
733void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
734 unsigned long size)
735{
736 /* Be sure the write-protect PAT entry is set for write-protect */
737 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
738 return NULL;
739
740 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
741}
742#endif /* CONFIG_ARCH_USE_MEMREMAP_PROT */
743
45c7b28f 744static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
0947b2f3 745
551889a6 746static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 747{
37cc8d7f 748 /* Don't assume we're using swapper_pg_dir at this point */
6c690ee1 749 pgd_t *base = __va(read_cr3_pa());
37cc8d7f 750 pgd_t *pgd = &base[pgd_index(addr)];
e0c4f675
KS
751 p4d_t *p4d = p4d_offset(pgd, addr);
752 pud_t *pud = pud_offset(p4d, addr);
551889a6
IC
753 pmd_t *pmd = pmd_offset(pud, addr);
754
755 return pmd;
0947b2f3
HY
756}
757
551889a6 758static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 759{
551889a6 760 return &bm_pte[pte_index(addr)];
0947b2f3
HY
761}
762
fef5ba79
JF
763bool __init is_early_ioremap_ptep(pte_t *ptep)
764{
765 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
766}
767
beacfaac 768void __init early_ioremap_init(void)
0947b2f3 769{
551889a6 770 pmd_t *pmd;
0947b2f3 771
73159fdc
AL
772#ifdef CONFIG_X86_64
773 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
774#else
775 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
776#endif
777
5b7c73e0 778 early_ioremap_setup();
8827247f 779
551889a6 780 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
45c7b28f
JF
781 memset(bm_pte, 0, sizeof(bm_pte));
782 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 783
0e3a9549 784 /*
551889a6 785 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
786 * we are not prepared:
787 */
499a5f1e
JB
788#define __FIXADDR_TOP (-PAGE_SIZE)
789 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
790 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
791#undef __FIXADDR_TOP
551889a6 792 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 793 WARN_ON(1);
551889a6
IC
794 printk(KERN_WARNING "pmd %p != %p\n",
795 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 796 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 797 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 798 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 799 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
800
801 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
802 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
803 FIX_BTMAP_BEGIN);
0e3a9549 804 }
0947b2f3
HY
805}
806
5b7c73e0
MS
807void __init __early_set_fixmap(enum fixed_addresses idx,
808 phys_addr_t phys, pgprot_t flags)
0947b2f3 809{
551889a6
IC
810 unsigned long addr = __fix_to_virt(idx);
811 pte_t *pte;
0947b2f3
HY
812
813 if (idx >= __end_of_fixed_addresses) {
814 BUG();
815 return;
816 }
beacfaac 817 pte = early_ioremap_pte(addr);
4583ed51 818
78fb5523
DH
819 /* Sanitize 'prot' against any unsupported bits: */
820 pgprot_val(flags) &= __default_kernel_pte_mask;
821
0947b2f3 822 if (pgprot_val(flags))
551889a6 823 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 824 else
4f9c11dd 825 pte_clear(&init_mm, addr, pte);
b774e6f1 826 __flush_tlb_one_kernel(addr);
0947b2f3 827}