]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/kvm/vgic-v3-switch.S
Merge tag 'nfs-for-4.2-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / kvm / vgic-v3-switch.S
1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/linkage.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20
21 #include <asm/assembler.h>
22 #include <asm/memory.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/kvm.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_arm.h>
27
28 .text
29 .pushsection .hyp.text, "ax"
30
31 /*
32 * We store LRs in reverse order to let the CPU deal with streaming
33 * access. Use this macro to make it look saner...
34 */
35 #define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
36
37 /*
38 * Save the VGIC CPU state into memory
39 * x0: Register pointing to VCPU struct
40 * Do not corrupt x1!!!
41 */
42 .macro save_vgic_v3_state
43 // Compute the address of struct vgic_cpu
44 add x3, x0, #VCPU_VGIC_CPU
45
46 // Make sure stores to the GIC via the memory mapped interface
47 // are now visible to the system register interface
48 dsb st
49
50 // Save all interesting registers
51 mrs_s x5, ICH_VMCR_EL2
52 mrs_s x6, ICH_MISR_EL2
53 mrs_s x7, ICH_EISR_EL2
54 mrs_s x8, ICH_ELSR_EL2
55
56 str w5, [x3, #VGIC_V3_CPU_VMCR]
57 str w6, [x3, #VGIC_V3_CPU_MISR]
58 str w7, [x3, #VGIC_V3_CPU_EISR]
59 str w8, [x3, #VGIC_V3_CPU_ELRSR]
60
61 msr_s ICH_HCR_EL2, xzr
62
63 mrs_s x21, ICH_VTR_EL2
64 mvn w22, w21
65 ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
66
67 adr x24, 1f
68 add x24, x24, x23
69 br x24
70
71 1:
72 mrs_s x20, ICH_LR15_EL2
73 mrs_s x19, ICH_LR14_EL2
74 mrs_s x18, ICH_LR13_EL2
75 mrs_s x17, ICH_LR12_EL2
76 mrs_s x16, ICH_LR11_EL2
77 mrs_s x15, ICH_LR10_EL2
78 mrs_s x14, ICH_LR9_EL2
79 mrs_s x13, ICH_LR8_EL2
80 mrs_s x12, ICH_LR7_EL2
81 mrs_s x11, ICH_LR6_EL2
82 mrs_s x10, ICH_LR5_EL2
83 mrs_s x9, ICH_LR4_EL2
84 mrs_s x8, ICH_LR3_EL2
85 mrs_s x7, ICH_LR2_EL2
86 mrs_s x6, ICH_LR1_EL2
87 mrs_s x5, ICH_LR0_EL2
88
89 adr x24, 1f
90 add x24, x24, x23
91 br x24
92
93 1:
94 str x20, [x3, #LR_OFFSET(15)]
95 str x19, [x3, #LR_OFFSET(14)]
96 str x18, [x3, #LR_OFFSET(13)]
97 str x17, [x3, #LR_OFFSET(12)]
98 str x16, [x3, #LR_OFFSET(11)]
99 str x15, [x3, #LR_OFFSET(10)]
100 str x14, [x3, #LR_OFFSET(9)]
101 str x13, [x3, #LR_OFFSET(8)]
102 str x12, [x3, #LR_OFFSET(7)]
103 str x11, [x3, #LR_OFFSET(6)]
104 str x10, [x3, #LR_OFFSET(5)]
105 str x9, [x3, #LR_OFFSET(4)]
106 str x8, [x3, #LR_OFFSET(3)]
107 str x7, [x3, #LR_OFFSET(2)]
108 str x6, [x3, #LR_OFFSET(1)]
109 str x5, [x3, #LR_OFFSET(0)]
110
111 tbnz w21, #29, 6f // 6 bits
112 tbz w21, #30, 5f // 5 bits
113 // 7 bits
114 mrs_s x20, ICH_AP0R3_EL2
115 str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
116 mrs_s x19, ICH_AP0R2_EL2
117 str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
118 6: mrs_s x18, ICH_AP0R1_EL2
119 str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
120 5: mrs_s x17, ICH_AP0R0_EL2
121 str w17, [x3, #VGIC_V3_CPU_AP0R]
122
123 tbnz w21, #29, 6f // 6 bits
124 tbz w21, #30, 5f // 5 bits
125 // 7 bits
126 mrs_s x20, ICH_AP1R3_EL2
127 str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
128 mrs_s x19, ICH_AP1R2_EL2
129 str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
130 6: mrs_s x18, ICH_AP1R1_EL2
131 str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
132 5: mrs_s x17, ICH_AP1R0_EL2
133 str w17, [x3, #VGIC_V3_CPU_AP1R]
134
135 // Restore SRE_EL1 access and re-enable SRE at EL1.
136 mrs_s x5, ICC_SRE_EL2
137 orr x5, x5, #ICC_SRE_EL2_ENABLE
138 msr_s ICC_SRE_EL2, x5
139 isb
140 mov x5, #1
141 msr_s ICC_SRE_EL1, x5
142 .endm
143
144 /*
145 * Restore the VGIC CPU state from memory
146 * x0: Register pointing to VCPU struct
147 */
148 .macro restore_vgic_v3_state
149 // Compute the address of struct vgic_cpu
150 add x3, x0, #VCPU_VGIC_CPU
151
152 // Restore all interesting registers
153 ldr w4, [x3, #VGIC_V3_CPU_HCR]
154 ldr w5, [x3, #VGIC_V3_CPU_VMCR]
155 ldr w25, [x3, #VGIC_V3_CPU_SRE]
156
157 msr_s ICC_SRE_EL1, x25
158
159 // make sure SRE is valid before writing the other registers
160 isb
161
162 msr_s ICH_HCR_EL2, x4
163 msr_s ICH_VMCR_EL2, x5
164
165 mrs_s x21, ICH_VTR_EL2
166
167 tbnz w21, #29, 6f // 6 bits
168 tbz w21, #30, 5f // 5 bits
169 // 7 bits
170 ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
171 msr_s ICH_AP1R3_EL2, x20
172 ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
173 msr_s ICH_AP1R2_EL2, x19
174 6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
175 msr_s ICH_AP1R1_EL2, x18
176 5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
177 msr_s ICH_AP1R0_EL2, x17
178
179 tbnz w21, #29, 6f // 6 bits
180 tbz w21, #30, 5f // 5 bits
181 // 7 bits
182 ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
183 msr_s ICH_AP0R3_EL2, x20
184 ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
185 msr_s ICH_AP0R2_EL2, x19
186 6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
187 msr_s ICH_AP0R1_EL2, x18
188 5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
189 msr_s ICH_AP0R0_EL2, x17
190
191 and w22, w21, #0xf
192 mvn w22, w21
193 ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
194
195 adr x24, 1f
196 add x24, x24, x23
197 br x24
198
199 1:
200 ldr x20, [x3, #LR_OFFSET(15)]
201 ldr x19, [x3, #LR_OFFSET(14)]
202 ldr x18, [x3, #LR_OFFSET(13)]
203 ldr x17, [x3, #LR_OFFSET(12)]
204 ldr x16, [x3, #LR_OFFSET(11)]
205 ldr x15, [x3, #LR_OFFSET(10)]
206 ldr x14, [x3, #LR_OFFSET(9)]
207 ldr x13, [x3, #LR_OFFSET(8)]
208 ldr x12, [x3, #LR_OFFSET(7)]
209 ldr x11, [x3, #LR_OFFSET(6)]
210 ldr x10, [x3, #LR_OFFSET(5)]
211 ldr x9, [x3, #LR_OFFSET(4)]
212 ldr x8, [x3, #LR_OFFSET(3)]
213 ldr x7, [x3, #LR_OFFSET(2)]
214 ldr x6, [x3, #LR_OFFSET(1)]
215 ldr x5, [x3, #LR_OFFSET(0)]
216
217 adr x24, 1f
218 add x24, x24, x23
219 br x24
220
221 1:
222 msr_s ICH_LR15_EL2, x20
223 msr_s ICH_LR14_EL2, x19
224 msr_s ICH_LR13_EL2, x18
225 msr_s ICH_LR12_EL2, x17
226 msr_s ICH_LR11_EL2, x16
227 msr_s ICH_LR10_EL2, x15
228 msr_s ICH_LR9_EL2, x14
229 msr_s ICH_LR8_EL2, x13
230 msr_s ICH_LR7_EL2, x12
231 msr_s ICH_LR6_EL2, x11
232 msr_s ICH_LR5_EL2, x10
233 msr_s ICH_LR4_EL2, x9
234 msr_s ICH_LR3_EL2, x8
235 msr_s ICH_LR2_EL2, x7
236 msr_s ICH_LR1_EL2, x6
237 msr_s ICH_LR0_EL2, x5
238
239 // Ensure that the above will have reached the
240 // (re)distributors. This ensure the guest will read
241 // the correct values from the memory-mapped interface.
242 isb
243 dsb sy
244
245 // Prevent the guest from touching the GIC system registers
246 // if SRE isn't enabled for GICv3 emulation
247 cbnz x25, 1f
248 mrs_s x5, ICC_SRE_EL2
249 and x5, x5, #~ICC_SRE_EL2_ENABLE
250 msr_s ICC_SRE_EL2, x5
251 1:
252 .endm
253
254 ENTRY(__save_vgic_v3_state)
255 save_vgic_v3_state
256 ret
257 ENDPROC(__save_vgic_v3_state)
258
259 ENTRY(__restore_vgic_v3_state)
260 restore_vgic_v3_state
261 ret
262 ENDPROC(__restore_vgic_v3_state)
263
264 ENTRY(__vgic_v3_get_ich_vtr_el2)
265 mrs_s x0, ICH_VTR_EL2
266 ret
267 ENDPROC(__vgic_v3_get_ich_vtr_el2)
268
269 .popsection