]>
Commit | Line | Data |
---|---|---|
0f15adbb WD |
1 | /* |
2 | * Contains CPU specific branch predictor invalidation sequences | |
3 | * | |
4 | * Copyright (C) 2018 ARM Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
19 | #include <linux/linkage.h> | |
b092201e | 20 | #include <linux/arm-smccc.h> |
0f15adbb | 21 | |
71dcb8be | 22 | #include <asm/alternative.h> |
4205a89b | 23 | #include <asm/mmu.h> |
71dcb8be | 24 | |
f0445dfa MZ |
25 | .macro hyp_ventry |
26 | .align 7 | |
27 | 1: .rept 27 | |
0f15adbb WD |
28 | nop |
29 | .endr | |
71dcb8be MZ |
30 | /* |
31 | * The default sequence is to directly branch to the KVM vectors, | |
32 | * using the computed offset. This applies for VHE as well as | |
33 | * !ARM64_HARDEN_EL2_VECTORS. | |
34 | * | |
35 | * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced | |
36 | * with: | |
37 | * | |
38 | * stp x0, x1, [sp, #-16]! | |
39 | * movz x0, #(addr & 0xffff) | |
40 | * movk x0, #((addr >> 16) & 0xffff), lsl #16 | |
41 | * movk x0, #((addr >> 32) & 0xffff), lsl #32 | |
42 | * br x0 | |
43 | * | |
44 | * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4. | |
45 | * See kvm_patch_vector_branch for details. | |
46 | */ | |
47 | alternative_cb kvm_patch_vector_branch | |
f0445dfa MZ |
48 | b __kvm_hyp_vector + (1b - 0b) |
49 | nop | |
50 | nop | |
51 | nop | |
52 | nop | |
71dcb8be | 53 | alternative_cb_end |
0f15adbb WD |
54 | .endm |
55 | ||
f0445dfa MZ |
56 | .macro generate_vectors |
57 | 0: | |
58 | .rept 16 | |
59 | hyp_ventry | |
60 | .endr | |
61 | .org 0b + SZ_2K // Safety measure | |
0f15adbb WD |
62 | .endm |
63 | ||
4340ba80 MZ |
64 | |
65 | .text | |
66 | .pushsection .hyp.text, "ax" | |
67 | ||
0f15adbb WD |
68 | .align 11 |
69 | ENTRY(__bp_harden_hyp_vecs_start) | |
4205a89b | 70 | .rept BP_HARDEN_EL2_SLOTS |
f0445dfa | 71 | generate_vectors |
0f15adbb WD |
72 | .endr |
73 | ENTRY(__bp_harden_hyp_vecs_end) | |
ec82b567 | 74 | |
4340ba80 MZ |
75 | .popsection |
76 | ||
adc91ab7 MZ |
77 | ENTRY(__qcom_hyp_sanitize_link_stack_start) |
78 | stp x29, x30, [sp, #-16]! | |
79 | .rept 16 | |
80 | bl . + 4 | |
81 | .endr | |
82 | ldp x29, x30, [sp], #16 | |
83 | ENTRY(__qcom_hyp_sanitize_link_stack_end) | |
84 | ||
b092201e MZ |
85 | .macro smccc_workaround_1 inst |
86 | sub sp, sp, #(8 * 4) | |
87 | stp x2, x3, [sp, #(8 * 0)] | |
88 | stp x0, x1, [sp, #(8 * 2)] | |
89 | mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 | |
90 | \inst #0 | |
91 | ldp x2, x3, [sp, #(8 * 0)] | |
92 | ldp x0, x1, [sp, #(8 * 2)] | |
93 | add sp, sp, #(8 * 4) | |
94 | .endm | |
95 | ||
96 | ENTRY(__smccc_workaround_1_smc_start) | |
97 | smccc_workaround_1 smc | |
98 | ENTRY(__smccc_workaround_1_smc_end) | |
99 | ||
100 | ENTRY(__smccc_workaround_1_hvc_start) | |
101 | smccc_workaround_1 hvc | |
102 | ENTRY(__smccc_workaround_1_hvc_end) |