]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm64/kvm/hyp-init.S
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / kvm / hyp-init.S
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
092bd143
MZ
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
092bd143
MZ
5 */
6
7#include <linux/linkage.h>
8
9#include <asm/assembler.h>
10#include <asm/kvm_arm.h>
11#include <asm/kvm_mmu.h>
e4c5a685 12#include <asm/pgtable-hwdef.h>
e7227d0e 13#include <asm/sysreg.h>
fb1b4e01 14#include <asm/virt.h>
092bd143
MZ
15
16 .text
17 .pushsection .hyp.idmap.text, "ax"
18
19 .align 11
20
21ENTRY(__kvm_hyp_init)
22 ventry __invalid // Synchronous EL2t
23 ventry __invalid // IRQ EL2t
24 ventry __invalid // FIQ EL2t
25 ventry __invalid // Error EL2t
26
27 ventry __invalid // Synchronous EL2h
28 ventry __invalid // IRQ EL2h
29 ventry __invalid // FIQ EL2h
30 ventry __invalid // Error EL2h
31
32 ventry __do_hyp_init // Synchronous 64-bit EL1
33 ventry __invalid // IRQ 64-bit EL1
34 ventry __invalid // FIQ 64-bit EL1
35 ventry __invalid // Error 64-bit EL1
36
37 ventry __invalid // Synchronous 32-bit EL1
38 ventry __invalid // IRQ 32-bit EL1
39 ventry __invalid // FIQ 32-bit EL1
40 ventry __invalid // Error 32-bit EL1
41
42__invalid:
43 b .
44
45 /*
3421e9d8
MZ
46 * x0: HYP pgd
47 * x1: HYP stack
48 * x2: HYP vectors
9bc03f1d 49 * x3: per-CPU offset
092bd143
MZ
50 */
51__do_hyp_init:
fb1b4e01
MZ
52 /* Check for a stub HVC call */
53 cmp x0, #HVC_STUB_HCALL_NR
54 b.lo __kvm_handle_stub_hvc
092bd143 55
fa0465fc 56 phys_to_ttbr x4, x0
ab510027
VM
57alternative_if ARM64_HAS_CNP
58 orr x4, x4, #TTBR_CNP_BIT
59alternative_else_nop_endif
529c4b05 60 msr ttbr0_el2, x4
092bd143
MZ
61
62 mrs x4, tcr_el1
63 ldr x5, =TCR_EL2_MASK
64 and x4, x4, x5
3c5b1d92 65 mov x5, #TCR_EL2_RES1
092bd143 66 orr x4, x4, x5
e4c5a685 67
e4c5a685 68 /*
fa2a8445
KM
69 * The ID map may be configured to use an extended virtual address
70 * range. This is only the case if system RAM is out of range for the
71 * currently configured page size and VA_BITS, in which case we will
72 * also need the extended virtual range for the HYP ID map, or we won't
73 * be able to enable the EL2 MMU.
e4c5a685
AB
74 *
75 * However, at EL2, there is only one TTBR register, and we can't switch
76 * between translation tables *and* update TCR_EL2.T0SZ at the same
fa2a8445
KM
77 * time. Bottom line: we need to use the extended range with *both* our
78 * translation tables.
e4c5a685
AB
79 *
80 * So use the same T0SZ value we use for the ID map.
81 */
82 ldr_l x5, idmap_t0sz
83 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
fa2a8445 84
87366d8c 85 /*
787fd1d0 86 * Set the PS bits in TCR_EL2.
87366d8c 87 */
787fd1d0 88 tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6
3c5b1d92
TC
89
90 msr tcr_el2, x4
91
092bd143
MZ
92 mrs x4, mair_el1
93 msr mair_el2, x4
94 isb
95
f6edbbf3
PS
96 /* Invalidate the stale TLBs from Bootloader */
97 tlbi alle2
98 dsb sy
99
d68c1f7f
MZ
100 /*
101 * Preserve all the RES1 bits while setting the default flags,
78fd6dcf
MZ
102 * as well as the EE bit on BE. Drop the A flag since the compiler
103 * is allowed to generate unaligned accesses.
d68c1f7f 104 */
78fd6dcf 105 ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
d68c1f7f 106CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
092bd143
MZ
107 msr sctlr_el2, x4
108 isb
109
092bd143 110 /* Set the stack and new vectors */
3421e9d8
MZ
111 kern_hyp_va x1
112 mov sp, x1
3421e9d8 113 msr vbar_el2, x2
092bd143 114
9bc03f1d
MZ
115 /* Set tpidr_el2 for use by HYP */
116 msr tpidr_el2, x3
1f742679 117
092bd143
MZ
118 /* Hello, World! */
119 eret
120ENDPROC(__kvm_hyp_init)
121
fb1b4e01 122ENTRY(__kvm_handle_stub_hvc)
0b51c547 123 cmp x0, #HVC_SOFT_RESTART
506c372a
MZ
124 b.ne 1f
125
126 /* This is where we're about to jump, staying at EL2 */
127 msr elr_el2, x1
128 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
129 msr spsr_el2, x0
130
131 /* Shuffle the arguments, and don't come back */
132 mov x0, x2
133 mov x1, x3
134 mov x2, x4
135 b reset
136
82529d9b 1371: cmp x0, #HVC_RESET_VECTORS
fb1b4e01 138 b.ne 1f
506c372a 139reset:
67f69197 140 /*
506c372a
MZ
141 * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
142 * case we coming via HVC_SOFT_RESTART.
67f69197 143 */
506c372a
MZ
144 mrs x5, sctlr_el2
145 ldr x6, =SCTLR_ELx_FLAGS
146 bic x5, x5, x6 // Clear SCTL_M and etc
3060e9f0 147 pre_disable_mmu_workaround
506c372a 148 msr sctlr_el2, x5
67f69197
AT
149 isb
150
67f69197 151 /* Install stub vectors */
506c372a
MZ
152 adr_l x5, __hyp_stub_vectors
153 msr vbar_el2, x5
af42f204
MZ
154 mov x0, xzr
155 eret
67f69197 156
fb1b4e01
MZ
1571: /* Bad stub call */
158 ldr x0, =HVC_STUB_ERR
67f69197 159 eret
af42f204 160
fb1b4e01 161ENDPROC(__kvm_handle_stub_hvc)
67f69197 162
092bd143
MZ
163 .ltorg
164
165 .popsection