2 * AMD Memory Encryption Support
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define DISABLE_BRANCH_PROFILING
15 #include <linux/linkage.h>
16 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/swiotlb.h>
20 #include <linux/mem_encrypt.h>
22 #include <asm/tlbflush.h>
23 #include <asm/fixmap.h>
24 #include <asm/setup.h>
25 #include <asm/bootparam.h>
26 #include <asm/set_memory.h>
27 #include <asm/cacheflush.h>
28 #include <asm/sections.h>
29 #include <asm/processor-flags.h>
31 #include <asm/cmdline.h>
33 static char sme_cmdline_arg
[] __initdata
= "mem_encrypt";
34 static char sme_cmdline_on
[] __initdata
= "on";
35 static char sme_cmdline_off
[] __initdata
= "off";
38 * Since SME related variables are set early in the boot process they must
39 * reside in the .data section so as not to be zeroed out when the .bss
40 * section is later cleared.
42 u64 sme_me_mask
__section(.data
) = 0;
43 EXPORT_SYMBOL_GPL(sme_me_mask
);
45 static bool sev_enabled
__section(.data
);
47 /* Buffer used for early in-place encryption by BSP, no locking needed */
48 static char sme_early_buffer
[PAGE_SIZE
] __aligned(PAGE_SIZE
);
51 * This routine does not change the underlying encryption setting of the
52 * page(s) that map this memory. It assumes that eventually the memory is
53 * meant to be accessed as either encrypted or decrypted but the contents
54 * are currently not in the desired state.
56 * This routine follows the steps outlined in the AMD64 Architecture
57 * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
59 static void __init
__sme_early_enc_dec(resource_size_t paddr
,
60 unsigned long size
, bool enc
)
72 * There are limited number of early mapping slots, so map (at most)
76 len
= min_t(size_t, sizeof(sme_early_buffer
), size
);
79 * Create mappings for the current and desired format of
80 * the memory. Use a write-protected mapping for the source.
82 src
= enc
? early_memremap_decrypted_wp(paddr
, len
) :
83 early_memremap_encrypted_wp(paddr
, len
);
85 dst
= enc
? early_memremap_encrypted(paddr
, len
) :
86 early_memremap_decrypted(paddr
, len
);
89 * If a mapping can't be obtained to perform the operation,
90 * then eventual access of that area in the desired mode
96 * Use a temporary buffer, of cache-line multiple size, to
97 * avoid data corruption as documented in the APM.
99 memcpy(sme_early_buffer
, src
, len
);
100 memcpy(dst
, sme_early_buffer
, len
);
102 early_memunmap(dst
, len
);
103 early_memunmap(src
, len
);
110 void __init
sme_early_encrypt(resource_size_t paddr
, unsigned long size
)
112 __sme_early_enc_dec(paddr
, size
, true);
115 void __init
sme_early_decrypt(resource_size_t paddr
, unsigned long size
)
117 __sme_early_enc_dec(paddr
, size
, false);
120 static void __init
__sme_early_map_unmap_mem(void *vaddr
, unsigned long size
,
123 unsigned long paddr
= (unsigned long)vaddr
- __PAGE_OFFSET
;
124 pmdval_t pmd_flags
, pmd
;
126 /* Use early_pmd_flags but remove the encryption mask */
127 pmd_flags
= __sme_clr(early_pmd_flags
);
130 pmd
= map
? (paddr
& PMD_MASK
) + pmd_flags
: 0;
131 __early_make_pgtable((unsigned long)vaddr
, pmd
);
135 size
= (size
<= PMD_SIZE
) ? 0 : size
- PMD_SIZE
;
138 __native_flush_tlb();
141 void __init
sme_unmap_bootdata(char *real_mode_data
)
143 struct boot_params
*boot_data
;
144 unsigned long cmdline_paddr
;
149 /* Get the command line address before unmapping the real_mode_data */
150 boot_data
= (struct boot_params
*)real_mode_data
;
151 cmdline_paddr
= boot_data
->hdr
.cmd_line_ptr
| ((u64
)boot_data
->ext_cmd_line_ptr
<< 32);
153 __sme_early_map_unmap_mem(real_mode_data
, sizeof(boot_params
), false);
158 __sme_early_map_unmap_mem(__va(cmdline_paddr
), COMMAND_LINE_SIZE
, false);
161 void __init
sme_map_bootdata(char *real_mode_data
)
163 struct boot_params
*boot_data
;
164 unsigned long cmdline_paddr
;
169 __sme_early_map_unmap_mem(real_mode_data
, sizeof(boot_params
), true);
171 /* Get the command line address after mapping the real_mode_data */
172 boot_data
= (struct boot_params
*)real_mode_data
;
173 cmdline_paddr
= boot_data
->hdr
.cmd_line_ptr
| ((u64
)boot_data
->ext_cmd_line_ptr
<< 32);
178 __sme_early_map_unmap_mem(__va(cmdline_paddr
), COMMAND_LINE_SIZE
, true);
181 void __init
sme_early_init(void)
188 early_pmd_flags
= __sme_set(early_pmd_flags
);
190 __supported_pte_mask
= __sme_set(__supported_pte_mask
);
192 /* Update the protection map with memory encryption mask */
193 for (i
= 0; i
< ARRAY_SIZE(protection_map
); i
++)
194 protection_map
[i
] = pgprot_encrypted(protection_map
[i
]);
198 * SME and SEV are very similar but they are not the same, so there are
199 * times that the kernel will need to distinguish between SME and SEV. The
200 * sme_active() and sev_active() functions are used for this. When a
201 * distinction isn't needed, the mem_encrypt_active() function can be used.
203 * The trampoline code is a good example for this requirement. Before
204 * paging is activated, SME will access all memory as decrypted, but SEV
205 * will access all memory as encrypted. So, when APs are being brought
206 * up under SME the trampoline area cannot be encrypted, whereas under SEV
207 * the trampoline area must be encrypted.
209 bool sme_active(void)
211 return sme_me_mask
&& !sev_enabled
;
213 EXPORT_SYMBOL_GPL(sme_active
);
215 bool sev_active(void)
217 return sme_me_mask
&& sev_enabled
;
219 EXPORT_SYMBOL_GPL(sev_active
);
221 /* Architecture __weak replacement functions */
222 void __init
mem_encrypt_init(void)
227 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
228 swiotlb_update_mem_attributes();
230 pr_info("AMD Secure Memory Encryption (SME) active\n");
233 void swiotlb_set_mem_attributes(void *vaddr
, unsigned long size
)
235 WARN(PAGE_ALIGN(size
) != size
,
236 "size is not page-aligned (%#lx)\n", size
);
238 /* Make the SWIOTLB buffer area decrypted */
239 set_memory_decrypted((unsigned long)vaddr
, size
>> PAGE_SHIFT
);
242 static void __init
sme_clear_pgd(pgd_t
*pgd_base
, unsigned long start
,
245 unsigned long pgd_start
, pgd_end
, pgd_size
;
248 pgd_start
= start
& PGDIR_MASK
;
249 pgd_end
= end
& PGDIR_MASK
;
251 pgd_size
= (((pgd_end
- pgd_start
) / PGDIR_SIZE
) + 1);
252 pgd_size
*= sizeof(pgd_t
);
254 pgd_p
= pgd_base
+ pgd_index(start
);
256 memset(pgd_p
, 0, pgd_size
);
259 #define PGD_FLAGS _KERNPG_TABLE_NOENC
260 #define P4D_FLAGS _KERNPG_TABLE_NOENC
261 #define PUD_FLAGS _KERNPG_TABLE_NOENC
262 #define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
264 static void __init
*sme_populate_pgd(pgd_t
*pgd_base
, void *pgtable_area
,
265 unsigned long vaddr
, pmdval_t pmd_val
)
272 pgd_p
= pgd_base
+ pgd_index(vaddr
);
273 if (native_pgd_val(*pgd_p
)) {
274 if (IS_ENABLED(CONFIG_X86_5LEVEL
))
275 p4d_p
= (p4d_t
*)(native_pgd_val(*pgd_p
) & ~PTE_FLAGS_MASK
);
277 pud_p
= (pud_t
*)(native_pgd_val(*pgd_p
) & ~PTE_FLAGS_MASK
);
281 if (IS_ENABLED(CONFIG_X86_5LEVEL
)) {
282 p4d_p
= pgtable_area
;
283 memset(p4d_p
, 0, sizeof(*p4d_p
) * PTRS_PER_P4D
);
284 pgtable_area
+= sizeof(*p4d_p
) * PTRS_PER_P4D
;
286 pgd
= native_make_pgd((pgdval_t
)p4d_p
+ PGD_FLAGS
);
288 pud_p
= pgtable_area
;
289 memset(pud_p
, 0, sizeof(*pud_p
) * PTRS_PER_PUD
);
290 pgtable_area
+= sizeof(*pud_p
) * PTRS_PER_PUD
;
292 pgd
= native_make_pgd((pgdval_t
)pud_p
+ PGD_FLAGS
);
294 native_set_pgd(pgd_p
, pgd
);
297 if (IS_ENABLED(CONFIG_X86_5LEVEL
)) {
298 p4d_p
+= p4d_index(vaddr
);
299 if (native_p4d_val(*p4d_p
)) {
300 pud_p
= (pud_t
*)(native_p4d_val(*p4d_p
) & ~PTE_FLAGS_MASK
);
304 pud_p
= pgtable_area
;
305 memset(pud_p
, 0, sizeof(*pud_p
) * PTRS_PER_PUD
);
306 pgtable_area
+= sizeof(*pud_p
) * PTRS_PER_PUD
;
308 p4d
= native_make_p4d((pudval_t
)pud_p
+ P4D_FLAGS
);
309 native_set_p4d(p4d_p
, p4d
);
313 pud_p
+= pud_index(vaddr
);
314 if (native_pud_val(*pud_p
)) {
315 if (native_pud_val(*pud_p
) & _PAGE_PSE
)
318 pmd_p
= (pmd_t
*)(native_pud_val(*pud_p
) & ~PTE_FLAGS_MASK
);
322 pmd_p
= pgtable_area
;
323 memset(pmd_p
, 0, sizeof(*pmd_p
) * PTRS_PER_PMD
);
324 pgtable_area
+= sizeof(*pmd_p
) * PTRS_PER_PMD
;
326 pud
= native_make_pud((pmdval_t
)pmd_p
+ PUD_FLAGS
);
327 native_set_pud(pud_p
, pud
);
330 pmd_p
+= pmd_index(vaddr
);
331 if (!native_pmd_val(*pmd_p
) || !(native_pmd_val(*pmd_p
) & _PAGE_PSE
))
332 native_set_pmd(pmd_p
, native_make_pmd(pmd_val
));
338 static unsigned long __init
sme_pgtable_calc(unsigned long len
)
340 unsigned long p4d_size
, pud_size
, pmd_size
;
344 * Perform a relatively simplistic calculation of the pagetable
345 * entries that are needed. That mappings will be covered by 2MB
346 * PMD entries so we can conservatively calculate the required
347 * number of P4D, PUD and PMD structures needed to perform the
348 * mappings. Incrementing the count for each covers the case where
349 * the addresses cross entries.
351 if (IS_ENABLED(CONFIG_X86_5LEVEL
)) {
352 p4d_size
= (ALIGN(len
, PGDIR_SIZE
) / PGDIR_SIZE
) + 1;
353 p4d_size
*= sizeof(p4d_t
) * PTRS_PER_P4D
;
354 pud_size
= (ALIGN(len
, P4D_SIZE
) / P4D_SIZE
) + 1;
355 pud_size
*= sizeof(pud_t
) * PTRS_PER_PUD
;
358 pud_size
= (ALIGN(len
, PGDIR_SIZE
) / PGDIR_SIZE
) + 1;
359 pud_size
*= sizeof(pud_t
) * PTRS_PER_PUD
;
361 pmd_size
= (ALIGN(len
, PUD_SIZE
) / PUD_SIZE
) + 1;
362 pmd_size
*= sizeof(pmd_t
) * PTRS_PER_PMD
;
364 total
= p4d_size
+ pud_size
+ pmd_size
;
367 * Now calculate the added pagetable structures needed to populate
368 * the new pagetables.
370 if (IS_ENABLED(CONFIG_X86_5LEVEL
)) {
371 p4d_size
= ALIGN(total
, PGDIR_SIZE
) / PGDIR_SIZE
;
372 p4d_size
*= sizeof(p4d_t
) * PTRS_PER_P4D
;
373 pud_size
= ALIGN(total
, P4D_SIZE
) / P4D_SIZE
;
374 pud_size
*= sizeof(pud_t
) * PTRS_PER_PUD
;
377 pud_size
= ALIGN(total
, PGDIR_SIZE
) / PGDIR_SIZE
;
378 pud_size
*= sizeof(pud_t
) * PTRS_PER_PUD
;
380 pmd_size
= ALIGN(total
, PUD_SIZE
) / PUD_SIZE
;
381 pmd_size
*= sizeof(pmd_t
) * PTRS_PER_PMD
;
383 total
+= p4d_size
+ pud_size
+ pmd_size
;
388 void __init
sme_encrypt_kernel(void)
390 unsigned long workarea_start
, workarea_end
, workarea_len
;
391 unsigned long execute_start
, execute_end
, execute_len
;
392 unsigned long kernel_start
, kernel_end
, kernel_len
;
393 unsigned long pgtable_area_len
;
394 unsigned long paddr
, pmd_flags
;
395 unsigned long decrypted_base
;
403 * Prepare for encrypting the kernel by building new pagetables with
404 * the necessary attributes needed to encrypt the kernel in place.
406 * One range of virtual addresses will map the memory occupied
407 * by the kernel as encrypted.
409 * Another range of virtual addresses will map the memory occupied
410 * by the kernel as decrypted and write-protected.
412 * The use of write-protect attribute will prevent any of the
413 * memory from being cached.
416 /* Physical addresses gives us the identity mapped virtual addresses */
417 kernel_start
= __pa_symbol(_text
);
418 kernel_end
= ALIGN(__pa_symbol(_end
), PMD_PAGE_SIZE
);
419 kernel_len
= kernel_end
- kernel_start
;
421 /* Set the encryption workarea to be immediately after the kernel */
422 workarea_start
= kernel_end
;
425 * Calculate required number of workarea bytes needed:
426 * executable encryption area size:
427 * stack page (PAGE_SIZE)
428 * encryption routine page (PAGE_SIZE)
429 * intermediate copy buffer (PMD_PAGE_SIZE)
430 * pagetable structures for the encryption of the kernel
431 * pagetable structures for workarea (in case not currently mapped)
433 execute_start
= workarea_start
;
434 execute_end
= execute_start
+ (PAGE_SIZE
* 2) + PMD_PAGE_SIZE
;
435 execute_len
= execute_end
- execute_start
;
438 * One PGD for both encrypted and decrypted mappings and a set of
439 * PUDs and PMDs for each of the encrypted and decrypted mappings.
441 pgtable_area_len
= sizeof(pgd_t
) * PTRS_PER_PGD
;
442 pgtable_area_len
+= sme_pgtable_calc(execute_end
- kernel_start
) * 2;
444 /* PUDs and PMDs needed in the current pagetables for the workarea */
445 pgtable_area_len
+= sme_pgtable_calc(execute_len
+ pgtable_area_len
);
448 * The total workarea includes the executable encryption area and
449 * the pagetable area.
451 workarea_len
= execute_len
+ pgtable_area_len
;
452 workarea_end
= workarea_start
+ workarea_len
;
455 * Set the address to the start of where newly created pagetable
456 * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
457 * structures are created when the workarea is added to the current
458 * pagetables and when the new encrypted and decrypted kernel
459 * mappings are populated.
461 pgtable_area
= (void *)execute_end
;
464 * Make sure the current pagetable structure has entries for
465 * addressing the workarea.
467 pgd
= (pgd_t
*)native_read_cr3_pa();
468 paddr
= workarea_start
;
469 while (paddr
< workarea_end
) {
470 pgtable_area
= sme_populate_pgd(pgd
, pgtable_area
,
474 paddr
+= PMD_PAGE_SIZE
;
477 /* Flush the TLB - no globals so cr3 is enough */
478 native_write_cr3(__native_read_cr3());
481 * A new pagetable structure is being built to allow for the kernel
482 * to be encrypted. It starts with an empty PGD that will then be
483 * populated with new PUDs and PMDs as the encrypted and decrypted
484 * kernel mappings are created.
487 memset(pgd
, 0, sizeof(*pgd
) * PTRS_PER_PGD
);
488 pgtable_area
+= sizeof(*pgd
) * PTRS_PER_PGD
;
490 /* Add encrypted kernel (identity) mappings */
491 pmd_flags
= PMD_FLAGS
| _PAGE_ENC
;
492 paddr
= kernel_start
;
493 while (paddr
< kernel_end
) {
494 pgtable_area
= sme_populate_pgd(pgd
, pgtable_area
,
498 paddr
+= PMD_PAGE_SIZE
;
502 * A different PGD index/entry must be used to get different
503 * pagetable entries for the decrypted mapping. Choose the next
504 * PGD index and convert it to a virtual address to be used as
505 * the base of the mapping.
507 decrypted_base
= (pgd_index(workarea_end
) + 1) & (PTRS_PER_PGD
- 1);
508 decrypted_base
<<= PGDIR_SHIFT
;
510 /* Add decrypted, write-protected kernel (non-identity) mappings */
511 pmd_flags
= (PMD_FLAGS
& ~_PAGE_CACHE_MASK
) | (_PAGE_PAT
| _PAGE_PWT
);
512 paddr
= kernel_start
;
513 while (paddr
< kernel_end
) {
514 pgtable_area
= sme_populate_pgd(pgd
, pgtable_area
,
515 paddr
+ decrypted_base
,
518 paddr
+= PMD_PAGE_SIZE
;
521 /* Add decrypted workarea mappings to both kernel mappings */
522 paddr
= workarea_start
;
523 while (paddr
< workarea_end
) {
524 pgtable_area
= sme_populate_pgd(pgd
, pgtable_area
,
528 pgtable_area
= sme_populate_pgd(pgd
, pgtable_area
,
529 paddr
+ decrypted_base
,
532 paddr
+= PMD_PAGE_SIZE
;
535 /* Perform the encryption */
536 sme_encrypt_execute(kernel_start
, kernel_start
+ decrypted_base
,
537 kernel_len
, workarea_start
, (unsigned long)pgd
);
540 * At this point we are running encrypted. Remove the mappings for
541 * the decrypted areas - all that is needed for this is to remove
542 * the PGD entry/entries.
544 sme_clear_pgd(pgd
, kernel_start
+ decrypted_base
,
545 kernel_end
+ decrypted_base
);
547 sme_clear_pgd(pgd
, workarea_start
+ decrypted_base
,
548 workarea_end
+ decrypted_base
);
550 /* Flush the TLB - no globals so cr3 is enough */
551 native_write_cr3(__native_read_cr3());
554 void __init __nostackprotector
sme_enable(struct boot_params
*bp
)
556 const char *cmdline_ptr
, *cmdline_arg
, *cmdline_on
, *cmdline_off
;
557 unsigned int eax
, ebx
, ecx
, edx
;
558 bool active_by_default
;
559 unsigned long me_mask
;
563 /* Check for the SME support leaf */
566 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
567 if (eax
< 0x8000001f)
571 * Check for the SME feature:
572 * CPUID Fn8000_001F[EAX] - Bit 0
573 * Secure Memory Encryption support
574 * CPUID Fn8000_001F[EBX] - Bits 5:0
575 * Pagetable bit position used to indicate encryption
579 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
583 me_mask
= 1UL << (ebx
& 0x3f);
585 /* Check if SME is enabled */
586 msr
= __rdmsr(MSR_K8_SYSCFG
);
587 if (!(msr
& MSR_K8_SYSCFG_MEM_ENCRYPT
))
591 * Fixups have not been applied to phys_base yet and we're running
592 * identity mapped, so we must obtain the address to the SME command
593 * line argument data using rip-relative addressing.
595 asm ("lea sme_cmdline_arg(%%rip), %0"
597 : "p" (sme_cmdline_arg
));
598 asm ("lea sme_cmdline_on(%%rip), %0"
600 : "p" (sme_cmdline_on
));
601 asm ("lea sme_cmdline_off(%%rip), %0"
603 : "p" (sme_cmdline_off
));
605 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
))
606 active_by_default
= true;
608 active_by_default
= false;
610 cmdline_ptr
= (const char *)((u64
)bp
->hdr
.cmd_line_ptr
|
611 ((u64
)bp
->ext_cmd_line_ptr
<< 32));
613 cmdline_find_option(cmdline_ptr
, cmdline_arg
, buffer
, sizeof(buffer
));
615 if (!strncmp(buffer
, cmdline_on
, sizeof(buffer
)))
616 sme_me_mask
= me_mask
;
617 else if (!strncmp(buffer
, cmdline_off
, sizeof(buffer
)))
620 sme_me_mask
= active_by_default
? me_mask
: 0;