]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm64/kernel/bpi.S
Revert "arm64: KVM: Use SMCCC_ARCH_WORKAROUND_1 for Falkor BP hardening"
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / kernel / bpi.S
1 /*
2 * Contains CPU specific branch predictor invalidation sequences
3 *
4 * Copyright (C) 2018 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <linux/linkage.h>
20 #include <linux/arm-smccc.h>
21
22 #include <asm/alternative.h>
23 #include <asm/mmu.h>
24
25 .macro hyp_ventry
26 .align 7
27 1: .rept 27
28 nop
29 .endr
30 /*
31 * The default sequence is to directly branch to the KVM vectors,
32 * using the computed offset. This applies for VHE as well as
33 * !ARM64_HARDEN_EL2_VECTORS.
34 *
35 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
36 * with:
37 *
38 * stp x0, x1, [sp, #-16]!
39 * movz x0, #(addr & 0xffff)
40 * movk x0, #((addr >> 16) & 0xffff), lsl #16
41 * movk x0, #((addr >> 32) & 0xffff), lsl #32
42 * br x0
43 *
44 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
45 * See kvm_patch_vector_branch for details.
46 */
47 alternative_cb kvm_patch_vector_branch
48 b __kvm_hyp_vector + (1b - 0b)
49 nop
50 nop
51 nop
52 nop
53 alternative_cb_end
54 .endm
55
56 .macro generate_vectors
57 0:
58 .rept 16
59 hyp_ventry
60 .endr
61 .org 0b + SZ_2K // Safety measure
62 .endm
63
64
65 .text
66 .pushsection .hyp.text, "ax"
67
68 .align 11
69 ENTRY(__bp_harden_hyp_vecs_start)
70 .rept BP_HARDEN_EL2_SLOTS
71 generate_vectors
72 .endr
73 ENTRY(__bp_harden_hyp_vecs_end)
74
75 .popsection
76
77 ENTRY(__qcom_hyp_sanitize_link_stack_start)
78 stp x29, x30, [sp, #-16]!
79 .rept 16
80 bl . + 4
81 .endr
82 ldp x29, x30, [sp], #16
83 ENTRY(__qcom_hyp_sanitize_link_stack_end)
84
85 .macro smccc_workaround_1 inst
86 sub sp, sp, #(8 * 4)
87 stp x2, x3, [sp, #(8 * 0)]
88 stp x0, x1, [sp, #(8 * 2)]
89 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
90 \inst #0
91 ldp x2, x3, [sp, #(8 * 0)]
92 ldp x0, x1, [sp, #(8 * 2)]
93 add sp, sp, #(8 * 4)
94 .endm
95
96 ENTRY(__smccc_workaround_1_smc_start)
97 smccc_workaround_1 smc
98 ENTRY(__smccc_workaround_1_smc_end)
99
100 ENTRY(__smccc_workaround_1_hvc_start)
101 smccc_workaround_1 hvc
102 ENTRY(__smccc_workaround_1_hvc_end)