1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Memory Encryption Support
5 * Copyright (C) 2019 SUSE
7 * Author: Joerg Roedel <jroedel@suse.de>
10 #define pr_fmt(fmt) "SEV-ES: " fmt
12 #include <linux/sched/debug.h> /* For show_regs() */
13 #include <linux/percpu-defs.h>
14 #include <linux/mem_encrypt.h>
15 #include <linux/lockdep.h>
16 #include <linux/printk.h>
17 #include <linux/mm_types.h>
18 #include <linux/set_memory.h>
19 #include <linux/memblock.h>
20 #include <linux/kernel.h>
23 #include <asm/cpu_entry_area.h>
24 #include <asm/stacktrace.h>
25 #include <asm/sev-es.h>
26 #include <asm/insn-eval.h>
27 #include <asm/fpu/internal.h>
28 #include <asm/processor.h>
29 #include <asm/realmode.h>
30 #include <asm/traps.h>
35 #define DR7_RESET_VALUE 0x400
37 /* For early boot hypervisor communication in SEV-ES enabled guests */
38 static struct ghcb boot_ghcb_page __bss_decrypted
__aligned(PAGE_SIZE
);
41 * Needs to be in the .data section because we need it NULL before bss is
44 static struct ghcb __initdata
*boot_ghcb
;
46 /* #VC handler runtime per-CPU data */
47 struct sev_es_runtime_data
{
48 struct ghcb ghcb_page
;
50 /* Physical storage for the per-CPU IST stack of the #VC handler */
51 char ist_stack
[EXCEPTION_STKSZ
] __aligned(PAGE_SIZE
);
54 * Physical storage for the per-CPU fall-back stack of the #VC handler.
55 * The fall-back stack is used when it is not safe to switch back to the
56 * interrupted stack in the #VC entry code.
58 char fallback_stack
[EXCEPTION_STKSZ
] __aligned(PAGE_SIZE
);
61 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
62 * It is needed when an NMI happens while the #VC handler uses the real
63 * GHCB, and the NMI handler itself is causing another #VC exception. In
64 * that case the GHCB content of the first handler needs to be backed up
67 struct ghcb backup_ghcb
;
70 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
71 * There is no need for it to be atomic, because nothing is written to
72 * the GHCB between the read and the write of ghcb_active. So it is safe
73 * to use it when a nested #VC exception happens before the write.
75 * This is necessary for example in the #VC->NMI->#VC case when the NMI
76 * happens while the first #VC handler uses the GHCB. When the NMI code
77 * raises a second #VC handler it might overwrite the contents of the
78 * GHCB written by the first handler. To avoid this the content of the
79 * GHCB is saved and restored when the GHCB is detected to be in use
83 bool backup_ghcb_active
;
86 * Cached DR7 value - write it on DR7 writes and return it on reads.
87 * That value will never make it to the real hardware DR7 as debugging
88 * is currently unsupported in SEV-ES guests.
97 static DEFINE_PER_CPU(struct sev_es_runtime_data
*, runtime_data
);
98 DEFINE_STATIC_KEY_FALSE(sev_es_enable_key
);
100 /* Needed in vc_early_forward_exception */
101 void do_early_exception(struct pt_regs
*regs
, int trapnr
);
103 static void __init
setup_vc_stacks(int cpu
)
105 struct sev_es_runtime_data
*data
;
106 struct cpu_entry_area
*cea
;
110 data
= per_cpu(runtime_data
, cpu
);
111 cea
= get_cpu_entry_area(cpu
);
113 /* Map #VC IST stack */
114 vaddr
= CEA_ESTACK_BOT(&cea
->estacks
, VC
);
115 pa
= __pa(data
->ist_stack
);
116 cea_set_pte((void *)vaddr
, pa
, PAGE_KERNEL
);
118 /* Map VC fall-back stack */
119 vaddr
= CEA_ESTACK_BOT(&cea
->estacks
, VC2
);
120 pa
= __pa(data
->fallback_stack
);
121 cea_set_pte((void *)vaddr
, pa
, PAGE_KERNEL
);
124 static __always_inline
bool on_vc_stack(struct pt_regs
*regs
)
126 unsigned long sp
= regs
->sp
;
128 /* User-mode RSP is not trusted */
132 /* SYSCALL gap still has user-mode RSP */
133 if (ip_within_syscall_gap(regs
))
136 return ((sp
>= __this_cpu_ist_bottom_va(VC
)) && (sp
< __this_cpu_ist_top_va(VC
)));
140 * This function handles the case when an NMI is raised in the #VC exception
141 * handler entry code. In this case, the IST entry for #VC must be adjusted, so
142 * that any subsequent #VC exception will not overwrite the stack contents of the
143 * interrupted #VC handler.
145 * The IST entry is adjusted unconditionally so that it can be also be
146 * unconditionally adjusted back in sev_es_ist_exit(). Otherwise a nested
147 * sev_es_ist_exit() call may adjust back the IST entry too early.
149 void noinstr
__sev_es_ist_enter(struct pt_regs
*regs
)
151 unsigned long old_ist
, new_ist
;
153 /* Read old IST entry */
154 old_ist
= __this_cpu_read(cpu_tss_rw
.x86_tss
.ist
[IST_INDEX_VC
]);
156 /* Make room on the IST stack */
157 if (on_vc_stack(regs
))
158 new_ist
= ALIGN_DOWN(regs
->sp
, 8) - sizeof(old_ist
);
160 new_ist
= old_ist
- sizeof(old_ist
);
162 /* Store old IST entry */
163 *(unsigned long *)new_ist
= old_ist
;
165 /* Set new IST entry */
166 this_cpu_write(cpu_tss_rw
.x86_tss
.ist
[IST_INDEX_VC
], new_ist
);
169 void noinstr
__sev_es_ist_exit(void)
174 ist
= __this_cpu_read(cpu_tss_rw
.x86_tss
.ist
[IST_INDEX_VC
]);
176 if (WARN_ON(ist
== __this_cpu_ist_top_va(VC
)))
179 /* Read back old IST entry and write it to the TSS */
180 this_cpu_write(cpu_tss_rw
.x86_tss
.ist
[IST_INDEX_VC
], *(unsigned long *)ist
);
183 static __always_inline
struct ghcb
*sev_es_get_ghcb(struct ghcb_state
*state
)
185 struct sev_es_runtime_data
*data
;
188 data
= this_cpu_read(runtime_data
);
189 ghcb
= &data
->ghcb_page
;
191 if (unlikely(data
->ghcb_active
)) {
192 /* GHCB is already in use - save its contents */
194 if (unlikely(data
->backup_ghcb_active
))
197 /* Mark backup_ghcb active before writing to it */
198 data
->backup_ghcb_active
= true;
200 state
->ghcb
= &data
->backup_ghcb
;
202 /* Backup GHCB content */
203 *state
->ghcb
= *ghcb
;
206 data
->ghcb_active
= true;
212 static __always_inline
void sev_es_put_ghcb(struct ghcb_state
*state
)
214 struct sev_es_runtime_data
*data
;
217 data
= this_cpu_read(runtime_data
);
218 ghcb
= &data
->ghcb_page
;
221 /* Restore GHCB from Backup */
222 *ghcb
= *state
->ghcb
;
223 data
->backup_ghcb_active
= false;
226 data
->ghcb_active
= false;
230 /* Needed in vc_early_forward_exception */
231 void do_early_exception(struct pt_regs
*regs
, int trapnr
);
233 static inline u64
sev_es_rd_ghcb_msr(void)
235 return __rdmsr(MSR_AMD64_SEV_ES_GHCB
);
238 static __always_inline
void sev_es_wr_ghcb_msr(u64 val
)
243 high
= (u32
)(val
>> 32);
245 native_wrmsr(MSR_AMD64_SEV_ES_GHCB
, low
, high
);
248 static int vc_fetch_insn_kernel(struct es_em_ctxt
*ctxt
,
249 unsigned char *buffer
)
251 return copy_from_kernel_nofault(buffer
, (unsigned char *)ctxt
->regs
->ip
, MAX_INSN_SIZE
);
254 static enum es_result
vc_decode_insn(struct es_em_ctxt
*ctxt
)
256 char buffer
[MAX_INSN_SIZE
];
260 if (user_mode(ctxt
->regs
)) {
261 res
= insn_fetch_from_user_inatomic(ctxt
->regs
, buffer
);
263 ctxt
->fi
.vector
= X86_TRAP_PF
;
264 ctxt
->fi
.error_code
= X86_PF_INSTR
| X86_PF_USER
;
265 ctxt
->fi
.cr2
= ctxt
->regs
->ip
;
269 if (!insn_decode(&ctxt
->insn
, ctxt
->regs
, buffer
, res
))
270 return ES_DECODE_FAILED
;
272 res
= vc_fetch_insn_kernel(ctxt
, buffer
);
274 ctxt
->fi
.vector
= X86_TRAP_PF
;
275 ctxt
->fi
.error_code
= X86_PF_INSTR
;
276 ctxt
->fi
.cr2
= ctxt
->regs
->ip
;
280 insn_init(&ctxt
->insn
, buffer
, MAX_INSN_SIZE
- res
, 1);
281 insn_get_length(&ctxt
->insn
);
284 ret
= ctxt
->insn
.immediate
.got
? ES_OK
: ES_DECODE_FAILED
;
289 static enum es_result
vc_write_mem(struct es_em_ctxt
*ctxt
,
290 char *dst
, char *buf
, size_t size
)
292 unsigned long error_code
= X86_PF_PROT
| X86_PF_WRITE
;
293 char __user
*target
= (char __user
*)dst
;
299 /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
300 if (!user_mode(ctxt
->regs
) && !access_ok(target
, size
)) {
301 memcpy(dst
, buf
, size
);
308 if (put_user(d1
, target
))
313 if (put_user(d2
, target
))
318 if (put_user(d4
, target
))
323 if (put_user(d8
, target
))
327 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__
, size
);
328 return ES_UNSUPPORTED
;
334 if (user_mode(ctxt
->regs
))
335 error_code
|= X86_PF_USER
;
337 ctxt
->fi
.vector
= X86_TRAP_PF
;
338 ctxt
->fi
.error_code
= error_code
;
339 ctxt
->fi
.cr2
= (unsigned long)dst
;
344 static enum es_result
vc_read_mem(struct es_em_ctxt
*ctxt
,
345 char *src
, char *buf
, size_t size
)
347 unsigned long error_code
= X86_PF_PROT
;
348 char __user
*s
= (char __user
*)src
;
354 /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
355 if (!user_mode(ctxt
->regs
) && !access_ok(s
, size
)) {
356 memcpy(buf
, src
, size
);
382 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__
, size
);
383 return ES_UNSUPPORTED
;
389 if (user_mode(ctxt
->regs
))
390 error_code
|= X86_PF_USER
;
392 ctxt
->fi
.vector
= X86_TRAP_PF
;
393 ctxt
->fi
.error_code
= error_code
;
394 ctxt
->fi
.cr2
= (unsigned long)src
;
399 static enum es_result
vc_slow_virt_to_phys(struct ghcb
*ghcb
, struct es_em_ctxt
*ctxt
,
400 unsigned long vaddr
, phys_addr_t
*paddr
)
402 unsigned long va
= (unsigned long)vaddr
;
408 pgd
= __va(read_cr3_pa());
409 pgd
= &pgd
[pgd_index(va
)];
410 pte
= lookup_address_in_pgd(pgd
, va
, &level
);
412 ctxt
->fi
.vector
= X86_TRAP_PF
;
413 ctxt
->fi
.cr2
= vaddr
;
414 ctxt
->fi
.error_code
= 0;
416 if (user_mode(ctxt
->regs
))
417 ctxt
->fi
.error_code
|= X86_PF_USER
;
422 if (WARN_ON_ONCE(pte_val(*pte
) & _PAGE_ENC
))
423 /* Emulated MMIO to/from encrypted memory not supported */
424 return ES_UNSUPPORTED
;
426 pa
= (phys_addr_t
)pte_pfn(*pte
) << PAGE_SHIFT
;
427 pa
|= va
& ~page_level_mask(level
);
434 /* Include code shared with pre-decompression boot stage */
435 #include "sev-es-shared.c"
437 void noinstr
__sev_es_nmi_complete(void)
439 struct ghcb_state state
;
442 ghcb
= sev_es_get_ghcb(&state
);
444 vc_ghcb_invalidate(ghcb
);
445 ghcb_set_sw_exit_code(ghcb
, SVM_VMGEXIT_NMI_COMPLETE
);
446 ghcb_set_sw_exit_info_1(ghcb
, 0);
447 ghcb_set_sw_exit_info_2(ghcb
, 0);
449 sev_es_wr_ghcb_msr(__pa_nodebug(ghcb
));
452 sev_es_put_ghcb(&state
);
455 static u64
get_jump_table_addr(void)
457 struct ghcb_state state
;
462 local_irq_save(flags
);
464 ghcb
= sev_es_get_ghcb(&state
);
466 vc_ghcb_invalidate(ghcb
);
467 ghcb_set_sw_exit_code(ghcb
, SVM_VMGEXIT_AP_JUMP_TABLE
);
468 ghcb_set_sw_exit_info_1(ghcb
, SVM_VMGEXIT_GET_AP_JUMP_TABLE
);
469 ghcb_set_sw_exit_info_2(ghcb
, 0);
471 sev_es_wr_ghcb_msr(__pa(ghcb
));
474 if (ghcb_sw_exit_info_1_is_valid(ghcb
) &&
475 ghcb_sw_exit_info_2_is_valid(ghcb
))
476 ret
= ghcb
->save
.sw_exit_info_2
;
478 sev_es_put_ghcb(&state
);
480 local_irq_restore(flags
);
485 int sev_es_setup_ap_jump_table(struct real_mode_header
*rmh
)
487 u16 startup_cs
, startup_ip
;
488 phys_addr_t jump_table_pa
;
490 u16 __iomem
*jump_table
;
492 jump_table_addr
= get_jump_table_addr();
494 /* On UP guests there is no jump table so this is not a failure */
495 if (!jump_table_addr
)
498 /* Check if AP Jump Table is page-aligned */
499 if (jump_table_addr
& ~PAGE_MASK
)
502 jump_table_pa
= jump_table_addr
& PAGE_MASK
;
504 startup_cs
= (u16
)(rmh
->trampoline_start
>> 4);
505 startup_ip
= (u16
)(rmh
->sev_es_trampoline_start
-
506 rmh
->trampoline_start
);
508 jump_table
= ioremap_encrypted(jump_table_pa
, PAGE_SIZE
);
512 writew(startup_ip
, &jump_table
[0]);
513 writew(startup_cs
, &jump_table
[1]);
521 * This is needed by the OVMF UEFI firmware which will use whatever it finds in
522 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
523 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
525 int __init
sev_es_efi_map_ghcbs(pgd_t
*pgd
)
527 struct sev_es_runtime_data
*data
;
528 unsigned long address
, pflags
;
532 if (!sev_es_active())
535 pflags
= _PAGE_NX
| _PAGE_RW
;
537 for_each_possible_cpu(cpu
) {
538 data
= per_cpu(runtime_data
, cpu
);
540 address
= __pa(&data
->ghcb_page
);
541 pfn
= address
>> PAGE_SHIFT
;
543 if (kernel_map_pages_in_pgd(pgd
, pfn
, address
, 1, pflags
))
550 static enum es_result
vc_handle_msr(struct ghcb
*ghcb
, struct es_em_ctxt
*ctxt
)
552 struct pt_regs
*regs
= ctxt
->regs
;
557 exit_info_1
= (ctxt
->insn
.opcode
.bytes
[1] == 0x30) ? 1 : 0;
559 ghcb_set_rcx(ghcb
, regs
->cx
);
561 ghcb_set_rax(ghcb
, regs
->ax
);
562 ghcb_set_rdx(ghcb
, regs
->dx
);
565 ret
= sev_es_ghcb_hv_call(ghcb
, ctxt
, SVM_EXIT_MSR
, exit_info_1
, 0);
567 if ((ret
== ES_OK
) && (!exit_info_1
)) {
568 regs
->ax
= ghcb
->save
.rax
;
569 regs
->dx
= ghcb
->save
.rdx
;
576 * This function runs on the first #VC exception after the kernel
577 * switched to virtual addresses.
579 static bool __init
sev_es_setup_ghcb(void)
581 /* First make sure the hypervisor talks a supported protocol. */
582 if (!sev_es_negotiate_protocol())
586 * Clear the boot_ghcb. The first exception comes in before the bss
587 * section is cleared.
589 memset(&boot_ghcb_page
, 0, PAGE_SIZE
);
591 /* Alright - Make the boot-ghcb public */
592 boot_ghcb
= &boot_ghcb_page
;
597 #ifdef CONFIG_HOTPLUG_CPU
598 static void sev_es_ap_hlt_loop(void)
600 struct ghcb_state state
;
603 ghcb
= sev_es_get_ghcb(&state
);
606 vc_ghcb_invalidate(ghcb
);
607 ghcb_set_sw_exit_code(ghcb
, SVM_VMGEXIT_AP_HLT_LOOP
);
608 ghcb_set_sw_exit_info_1(ghcb
, 0);
609 ghcb_set_sw_exit_info_2(ghcb
, 0);
611 sev_es_wr_ghcb_msr(__pa(ghcb
));
615 if (ghcb_sw_exit_info_2_is_valid(ghcb
) &&
616 ghcb
->save
.sw_exit_info_2
)
620 sev_es_put_ghcb(&state
);
624 * Play_dead handler when running under SEV-ES. This is needed because
625 * the hypervisor can't deliver an SIPI request to restart the AP.
626 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
627 * hypervisor wakes it up again.
629 static void sev_es_play_dead(void)
633 /* IRQs now disabled */
635 sev_es_ap_hlt_loop();
638 * If we get here, the VCPU was woken up again. Jump to CPU
639 * startup code to get it back online.
643 #else /* CONFIG_HOTPLUG_CPU */
644 #define sev_es_play_dead native_play_dead
645 #endif /* CONFIG_HOTPLUG_CPU */
648 static void __init
sev_es_setup_play_dead(void)
650 smp_ops
.play_dead
= sev_es_play_dead
;
653 static inline void sev_es_setup_play_dead(void) { }
656 static void __init
alloc_runtime_data(int cpu
)
658 struct sev_es_runtime_data
*data
;
660 data
= memblock_alloc(sizeof(*data
), PAGE_SIZE
);
662 panic("Can't allocate SEV-ES runtime data");
664 per_cpu(runtime_data
, cpu
) = data
;
667 static void __init
init_ghcb(int cpu
)
669 struct sev_es_runtime_data
*data
;
672 data
= per_cpu(runtime_data
, cpu
);
674 err
= early_set_memory_decrypted((unsigned long)&data
->ghcb_page
,
675 sizeof(data
->ghcb_page
));
677 panic("Can't map GHCBs unencrypted");
679 memset(&data
->ghcb_page
, 0, sizeof(data
->ghcb_page
));
681 data
->ghcb_active
= false;
682 data
->backup_ghcb_active
= false;
685 void __init
sev_es_init_vc_handling(void)
689 BUILD_BUG_ON(offsetof(struct sev_es_runtime_data
, ghcb_page
) % PAGE_SIZE
);
691 if (!sev_es_active())
694 if (!sev_es_check_cpu_features())
695 panic("SEV-ES CPU Features missing");
697 /* Enable SEV-ES special handling */
698 static_branch_enable(&sev_es_enable_key
);
700 /* Initialize per-cpu GHCB pages */
701 for_each_possible_cpu(cpu
) {
702 alloc_runtime_data(cpu
);
704 setup_vc_stacks(cpu
);
707 sev_es_setup_play_dead();
709 /* Secondary CPUs use the runtime #VC handler */
710 initial_vc_handler
= (unsigned long)safe_stack_exc_vmm_communication
;
713 static void __init
vc_early_forward_exception(struct es_em_ctxt
*ctxt
)
715 int trapnr
= ctxt
->fi
.vector
;
717 if (trapnr
== X86_TRAP_PF
)
718 native_write_cr2(ctxt
->fi
.cr2
);
720 ctxt
->regs
->orig_ax
= ctxt
->fi
.error_code
;
721 do_early_exception(ctxt
->regs
, trapnr
);
724 static long *vc_insn_get_reg(struct es_em_ctxt
*ctxt
)
729 reg_array
= (long *)ctxt
->regs
;
730 offset
= insn_get_modrm_reg_off(&ctxt
->insn
, ctxt
->regs
);
735 offset
/= sizeof(long);
737 return reg_array
+ offset
;
740 static long *vc_insn_get_rm(struct es_em_ctxt
*ctxt
)
745 reg_array
= (long *)ctxt
->regs
;
746 offset
= insn_get_modrm_rm_off(&ctxt
->insn
, ctxt
->regs
);
751 offset
/= sizeof(long);
753 return reg_array
+ offset
;
755 static enum es_result
vc_do_mmio(struct ghcb
*ghcb
, struct es_em_ctxt
*ctxt
,
756 unsigned int bytes
, bool read
)
758 u64 exit_code
, exit_info_1
, exit_info_2
;
759 unsigned long ghcb_pa
= __pa(ghcb
);
764 ref
= insn_get_addr_ref(&ctxt
->insn
, ctxt
->regs
);
765 if (ref
== (void __user
*)-1L)
766 return ES_UNSUPPORTED
;
768 exit_code
= read
? SVM_VMGEXIT_MMIO_READ
: SVM_VMGEXIT_MMIO_WRITE
;
770 res
= vc_slow_virt_to_phys(ghcb
, ctxt
, (unsigned long)ref
, &paddr
);
772 if (res
== ES_EXCEPTION
&& !read
)
773 ctxt
->fi
.error_code
|= X86_PF_WRITE
;
779 /* Can never be greater than 8 */
782 ghcb_set_sw_scratch(ghcb
, ghcb_pa
+ offsetof(struct ghcb
, shared_buffer
));
784 return sev_es_ghcb_hv_call(ghcb
, ctxt
, exit_code
, exit_info_1
, exit_info_2
);
787 static enum es_result
vc_handle_mmio_twobyte_ops(struct ghcb
*ghcb
,
788 struct es_em_ctxt
*ctxt
)
790 struct insn
*insn
= &ctxt
->insn
;
791 unsigned int bytes
= 0;
796 switch (insn
->opcode
.bytes
[1]) {
797 /* MMIO Read w/ zero-extension */
805 ret
= vc_do_mmio(ghcb
, ctxt
, bytes
, true);
809 /* Zero extend based on operand size */
810 reg_data
= vc_insn_get_reg(ctxt
);
812 return ES_DECODE_FAILED
;
814 memset(reg_data
, 0, insn
->opnd_bytes
);
816 memcpy(reg_data
, ghcb
->shared_buffer
, bytes
);
819 /* MMIO Read w/ sign-extension */
827 ret
= vc_do_mmio(ghcb
, ctxt
, bytes
, true);
831 /* Sign extend based on operand size */
832 reg_data
= vc_insn_get_reg(ctxt
);
834 return ES_DECODE_FAILED
;
837 u8
*val
= (u8
*)ghcb
->shared_buffer
;
839 sign_byte
= (*val
& 0x80) ? 0xff : 0x00;
841 u16
*val
= (u16
*)ghcb
->shared_buffer
;
843 sign_byte
= (*val
& 0x8000) ? 0xff : 0x00;
845 memset(reg_data
, sign_byte
, insn
->opnd_bytes
);
847 memcpy(reg_data
, ghcb
->shared_buffer
, bytes
);
851 ret
= ES_UNSUPPORTED
;
858 * The MOVS instruction has two memory operands, which raises the
859 * problem that it is not known whether the access to the source or the
860 * destination caused the #VC exception (and hence whether an MMIO read
861 * or write operation needs to be emulated).
863 * Instead of playing games with walking page-tables and trying to guess
864 * whether the source or destination is an MMIO range, split the move
865 * into two operations, a read and a write with only one memory operand.
866 * This will cause a nested #VC exception on the MMIO address which can
869 * This implementation has the benefit that it also supports MOVS where
870 * source _and_ destination are MMIO regions.
872 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
873 * rare operation. If it turns out to be a performance problem the split
874 * operations can be moved to memcpy_fromio() and memcpy_toio().
876 static enum es_result
vc_handle_mmio_movs(struct es_em_ctxt
*ctxt
,
879 unsigned long ds_base
, es_base
;
880 unsigned char *src
, *dst
;
881 unsigned char buffer
[8];
886 ds_base
= insn_get_seg_base(ctxt
->regs
, INAT_SEG_REG_DS
);
887 es_base
= insn_get_seg_base(ctxt
->regs
, INAT_SEG_REG_ES
);
889 if (ds_base
== -1L || es_base
== -1L) {
890 ctxt
->fi
.vector
= X86_TRAP_GP
;
891 ctxt
->fi
.error_code
= 0;
895 src
= ds_base
+ (unsigned char *)ctxt
->regs
->si
;
896 dst
= es_base
+ (unsigned char *)ctxt
->regs
->di
;
898 ret
= vc_read_mem(ctxt
, src
, buffer
, bytes
);
902 ret
= vc_write_mem(ctxt
, dst
, buffer
, bytes
);
906 if (ctxt
->regs
->flags
& X86_EFLAGS_DF
)
911 ctxt
->regs
->si
+= off
;
912 ctxt
->regs
->di
+= off
;
914 rep
= insn_has_rep_prefix(&ctxt
->insn
);
918 if (!rep
|| ctxt
->regs
->cx
== 0)
924 static enum es_result
vc_handle_mmio(struct ghcb
*ghcb
,
925 struct es_em_ctxt
*ctxt
)
927 struct insn
*insn
= &ctxt
->insn
;
928 unsigned int bytes
= 0;
932 switch (insn
->opcode
.bytes
[0]) {
939 bytes
= insn
->opnd_bytes
;
941 reg_data
= vc_insn_get_reg(ctxt
);
943 return ES_DECODE_FAILED
;
945 memcpy(ghcb
->shared_buffer
, reg_data
, bytes
);
947 ret
= vc_do_mmio(ghcb
, ctxt
, bytes
, false);
955 bytes
= insn
->opnd_bytes
;
957 memcpy(ghcb
->shared_buffer
, insn
->immediate1
.bytes
, bytes
);
959 ret
= vc_do_mmio(ghcb
, ctxt
, bytes
, false);
968 bytes
= insn
->opnd_bytes
;
970 ret
= vc_do_mmio(ghcb
, ctxt
, bytes
, true);
974 reg_data
= vc_insn_get_reg(ctxt
);
976 return ES_DECODE_FAILED
;
978 /* Zero-extend for 32-bit operation */
982 memcpy(reg_data
, ghcb
->shared_buffer
, bytes
);
985 /* MOVS instruction */
991 bytes
= insn
->opnd_bytes
;
993 ret
= vc_handle_mmio_movs(ctxt
, bytes
);
995 /* Two-Byte Opcodes */
997 ret
= vc_handle_mmio_twobyte_ops(ghcb
, ctxt
);
1000 ret
= ES_UNSUPPORTED
;
1006 static enum es_result
vc_handle_dr7_write(struct ghcb
*ghcb
,
1007 struct es_em_ctxt
*ctxt
)
1009 struct sev_es_runtime_data
*data
= this_cpu_read(runtime_data
);
1010 long val
, *reg
= vc_insn_get_rm(ctxt
);
1014 return ES_DECODE_FAILED
;
1018 /* Upper 32 bits must be written as zeroes */
1020 ctxt
->fi
.vector
= X86_TRAP_GP
;
1021 ctxt
->fi
.error_code
= 0;
1022 return ES_EXCEPTION
;
1025 /* Clear out other reserved bits and set bit 10 */
1026 val
= (val
& 0xffff23ffL
) | BIT(10);
1028 /* Early non-zero writes to DR7 are not supported */
1029 if (!data
&& (val
& ~DR7_RESET_VALUE
))
1030 return ES_UNSUPPORTED
;
1032 /* Using a value of 0 for ExitInfo1 means RAX holds the value */
1033 ghcb_set_rax(ghcb
, val
);
1034 ret
= sev_es_ghcb_hv_call(ghcb
, ctxt
, SVM_EXIT_WRITE_DR7
, 0, 0);
1044 static enum es_result
vc_handle_dr7_read(struct ghcb
*ghcb
,
1045 struct es_em_ctxt
*ctxt
)
1047 struct sev_es_runtime_data
*data
= this_cpu_read(runtime_data
);
1048 long *reg
= vc_insn_get_rm(ctxt
);
1051 return ES_DECODE_FAILED
;
1056 *reg
= DR7_RESET_VALUE
;
1061 static enum es_result
vc_handle_wbinvd(struct ghcb
*ghcb
,
1062 struct es_em_ctxt
*ctxt
)
1064 return sev_es_ghcb_hv_call(ghcb
, ctxt
, SVM_EXIT_WBINVD
, 0, 0);
1067 static enum es_result
vc_handle_rdpmc(struct ghcb
*ghcb
, struct es_em_ctxt
*ctxt
)
1071 ghcb_set_rcx(ghcb
, ctxt
->regs
->cx
);
1073 ret
= sev_es_ghcb_hv_call(ghcb
, ctxt
, SVM_EXIT_RDPMC
, 0, 0);
1077 if (!(ghcb_rax_is_valid(ghcb
) && ghcb_rdx_is_valid(ghcb
)))
1078 return ES_VMM_ERROR
;
1080 ctxt
->regs
->ax
= ghcb
->save
.rax
;
1081 ctxt
->regs
->dx
= ghcb
->save
.rdx
;
1086 static enum es_result
vc_handle_monitor(struct ghcb
*ghcb
,
1087 struct es_em_ctxt
*ctxt
)
1090 * Treat it as a NOP and do not leak a physical address to the
1096 static enum es_result
vc_handle_mwait(struct ghcb
*ghcb
,
1097 struct es_em_ctxt
*ctxt
)
1099 /* Treat the same as MONITOR/MONITORX */
1103 static enum es_result
vc_handle_vmmcall(struct ghcb
*ghcb
,
1104 struct es_em_ctxt
*ctxt
)
1108 ghcb_set_rax(ghcb
, ctxt
->regs
->ax
);
1109 ghcb_set_cpl(ghcb
, user_mode(ctxt
->regs
) ? 3 : 0);
1111 if (x86_platform
.hyper
.sev_es_hcall_prepare
)
1112 x86_platform
.hyper
.sev_es_hcall_prepare(ghcb
, ctxt
->regs
);
1114 ret
= sev_es_ghcb_hv_call(ghcb
, ctxt
, SVM_EXIT_VMMCALL
, 0, 0);
1118 if (!ghcb_rax_is_valid(ghcb
))
1119 return ES_VMM_ERROR
;
1121 ctxt
->regs
->ax
= ghcb
->save
.rax
;
1124 * Call sev_es_hcall_finish() after regs->ax is already set.
1125 * This allows the hypervisor handler to overwrite it again if
1128 if (x86_platform
.hyper
.sev_es_hcall_finish
&&
1129 !x86_platform
.hyper
.sev_es_hcall_finish(ghcb
, ctxt
->regs
))
1130 return ES_VMM_ERROR
;
1135 static enum es_result
vc_handle_trap_ac(struct ghcb
*ghcb
,
1136 struct es_em_ctxt
*ctxt
)
1139 * Calling ecx_alignment_check() directly does not work, because it
1140 * enables IRQs and the GHCB is active. Forward the exception and call
1141 * it later from vc_forward_exception().
1143 ctxt
->fi
.vector
= X86_TRAP_AC
;
1144 ctxt
->fi
.error_code
= 0;
1145 return ES_EXCEPTION
;
1148 static __always_inline
void vc_handle_trap_db(struct pt_regs
*regs
)
1150 if (user_mode(regs
))
1151 noist_exc_debug(regs
);
1156 static enum es_result
vc_handle_exitcode(struct es_em_ctxt
*ctxt
,
1158 unsigned long exit_code
)
1160 enum es_result result
;
1162 switch (exit_code
) {
1163 case SVM_EXIT_READ_DR7
:
1164 result
= vc_handle_dr7_read(ghcb
, ctxt
);
1166 case SVM_EXIT_WRITE_DR7
:
1167 result
= vc_handle_dr7_write(ghcb
, ctxt
);
1169 case SVM_EXIT_EXCP_BASE
+ X86_TRAP_AC
:
1170 result
= vc_handle_trap_ac(ghcb
, ctxt
);
1172 case SVM_EXIT_RDTSC
:
1173 case SVM_EXIT_RDTSCP
:
1174 result
= vc_handle_rdtsc(ghcb
, ctxt
, exit_code
);
1176 case SVM_EXIT_RDPMC
:
1177 result
= vc_handle_rdpmc(ghcb
, ctxt
);
1180 pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1181 result
= ES_UNSUPPORTED
;
1183 case SVM_EXIT_CPUID
:
1184 result
= vc_handle_cpuid(ghcb
, ctxt
);
1187 result
= vc_handle_ioio(ghcb
, ctxt
);
1190 result
= vc_handle_msr(ghcb
, ctxt
);
1192 case SVM_EXIT_VMMCALL
:
1193 result
= vc_handle_vmmcall(ghcb
, ctxt
);
1195 case SVM_EXIT_WBINVD
:
1196 result
= vc_handle_wbinvd(ghcb
, ctxt
);
1198 case SVM_EXIT_MONITOR
:
1199 result
= vc_handle_monitor(ghcb
, ctxt
);
1201 case SVM_EXIT_MWAIT
:
1202 result
= vc_handle_mwait(ghcb
, ctxt
);
1205 result
= vc_handle_mmio(ghcb
, ctxt
);
1209 * Unexpected #VC exception
1211 result
= ES_UNSUPPORTED
;
1217 static __always_inline
void vc_forward_exception(struct es_em_ctxt
*ctxt
)
1219 long error_code
= ctxt
->fi
.error_code
;
1220 int trapnr
= ctxt
->fi
.vector
;
1222 ctxt
->regs
->orig_ax
= ctxt
->fi
.error_code
;
1226 exc_general_protection(ctxt
->regs
, error_code
);
1229 exc_invalid_op(ctxt
->regs
);
1232 exc_alignment_check(ctxt
->regs
, error_code
);
1235 pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1240 static __always_inline
bool on_vc_fallback_stack(struct pt_regs
*regs
)
1242 unsigned long sp
= (unsigned long)regs
;
1244 return (sp
>= __this_cpu_ist_bottom_va(VC2
) && sp
< __this_cpu_ist_top_va(VC2
));
1248 * Main #VC exception handler. It is called when the entry code was able to
1249 * switch off the IST to a safe kernel stack.
1251 * With the current implementation it is always possible to switch to a safe
1252 * stack because #VC exceptions only happen at known places, like intercepted
1253 * instructions or accesses to MMIO areas/IO ports. They can also happen with
1254 * code instrumentation when the hypervisor intercepts #DB, but the critical
1255 * paths are forbidden to be instrumented, so #DB exceptions currently also
1256 * only happen in safe places.
1258 DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication
)
1260 struct sev_es_runtime_data
*data
= this_cpu_read(runtime_data
);
1261 irqentry_state_t irq_state
;
1262 struct ghcb_state state
;
1263 struct es_em_ctxt ctxt
;
1264 enum es_result result
;
1268 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1270 if (error_code
== SVM_EXIT_EXCP_BASE
+ X86_TRAP_DB
) {
1271 vc_handle_trap_db(regs
);
1275 irq_state
= irqentry_nmi_enter(regs
);
1276 lockdep_assert_irqs_disabled();
1277 instrumentation_begin();
1280 * This is invoked through an interrupt gate, so IRQs are disabled. The
1281 * code below might walk page-tables for user or kernel addresses, so
1282 * keep the IRQs disabled to protect us against concurrent TLB flushes.
1285 ghcb
= sev_es_get_ghcb(&state
);
1288 * Mark GHCBs inactive so that panic() is able to print the
1291 data
->ghcb_active
= false;
1292 data
->backup_ghcb_active
= false;
1294 panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
1297 vc_ghcb_invalidate(ghcb
);
1298 result
= vc_init_em_ctxt(&ctxt
, regs
, error_code
);
1300 if (result
== ES_OK
)
1301 result
= vc_handle_exitcode(&ctxt
, ghcb
, error_code
);
1303 sev_es_put_ghcb(&state
);
1305 /* Done - now check the result */
1308 vc_finish_insn(&ctxt
);
1310 case ES_UNSUPPORTED
:
1311 pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
1312 error_code
, regs
->ip
);
1315 pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1316 error_code
, regs
->ip
);
1318 case ES_DECODE_FAILED
:
1319 pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1320 error_code
, regs
->ip
);
1323 vc_forward_exception(&ctxt
);
1329 pr_emerg("Unknown result in %s():%d\n", __func__
, result
);
1331 * Emulating the instruction which caused the #VC exception
1332 * failed - can't continue so print debug information
1338 instrumentation_end();
1339 irqentry_nmi_exit(regs
, irq_state
);
1344 if (user_mode(regs
)) {
1346 * Do not kill the machine if user-space triggered the
1347 * exception. Send SIGBUS instead and let user-space deal with
1350 force_sig_fault(SIGBUS
, BUS_OBJERR
, (void __user
*)0);
1352 pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
1355 /* Show some debug info */
1358 /* Ask hypervisor to sev_es_terminate */
1359 sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST
);
1361 /* If that fails and we get here - just panic */
1362 panic("Returned from Terminate-Request to Hypervisor\n");
1368 /* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
1369 DEFINE_IDTENTRY_VC_IST(exc_vmm_communication
)
1371 instrumentation_begin();
1372 panic("Can't handle #VC exception from unsupported context\n");
1373 instrumentation_end();
1376 DEFINE_IDTENTRY_VC(exc_vmm_communication
)
1378 if (likely(!on_vc_fallback_stack(regs
)))
1379 safe_stack_exc_vmm_communication(regs
, error_code
);
1381 ist_exc_vmm_communication(regs
, error_code
);
1384 bool __init
handle_vc_boot_ghcb(struct pt_regs
*regs
)
1386 unsigned long exit_code
= regs
->orig_ax
;
1387 struct es_em_ctxt ctxt
;
1388 enum es_result result
;
1390 /* Do initial setup or terminate the guest */
1391 if (unlikely(boot_ghcb
== NULL
&& !sev_es_setup_ghcb()))
1392 sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST
);
1394 vc_ghcb_invalidate(boot_ghcb
);
1396 result
= vc_init_em_ctxt(&ctxt
, regs
, exit_code
);
1397 if (result
== ES_OK
)
1398 result
= vc_handle_exitcode(&ctxt
, boot_ghcb
, exit_code
);
1400 /* Done - now check the result */
1403 vc_finish_insn(&ctxt
);
1405 case ES_UNSUPPORTED
:
1406 early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
1407 exit_code
, regs
->ip
);
1410 early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1411 exit_code
, regs
->ip
);
1413 case ES_DECODE_FAILED
:
1414 early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1415 exit_code
, regs
->ip
);
1418 vc_early_forward_exception(&ctxt
);