]>
Commit | Line | Data |
---|---|---|
e039ee4e AP |
1 | /* |
2 | * alternative runtime patching | |
3 | * inspired by the x86 version | |
4 | * | |
5 | * Copyright (C) 2014 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) "alternatives: " fmt | |
21 | ||
22 | #include <linux/init.h> | |
23 | #include <linux/cpu.h> | |
24 | #include <asm/cacheflush.h> | |
25 | #include <asm/alternative.h> | |
26 | #include <asm/cpufeature.h> | |
7616fc8b | 27 | #include <asm/insn.h> |
ee78fdc7 | 28 | #include <asm/sections.h> |
e039ee4e AP |
29 | #include <linux/stop_machine.h> |
30 | ||
15ad6ace | 31 | #define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f) |
7616fc8b MZ |
32 | #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) |
33 | #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) | |
34 | ||
6d99b689 JM |
35 | int alternatives_applied; |
36 | ||
932ded4b AP |
37 | struct alt_region { |
38 | struct alt_instr *begin; | |
39 | struct alt_instr *end; | |
40 | }; | |
41 | ||
7616fc8b MZ |
42 | /* |
43 | * Check if the target PC is within an alternative block. | |
44 | */ | |
45 | static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) | |
46 | { | |
47 | unsigned long replptr; | |
48 | ||
49 | if (kernel_text_address(pc)) | |
50 | return 1; | |
51 | ||
52 | replptr = (unsigned long)ALT_REPL_PTR(alt); | |
53 | if (pc >= replptr && pc <= (replptr + alt->alt_len)) | |
54 | return 0; | |
55 | ||
56 | /* | |
57 | * Branching into *another* alternate sequence is doomed, and | |
58 | * we're not even trying to fix it up. | |
59 | */ | |
60 | BUG(); | |
61 | } | |
62 | ||
c831b2ae SP |
63 | #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) |
64 | ||
15ad6ace | 65 | static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) |
7616fc8b MZ |
66 | { |
67 | u32 insn; | |
68 | ||
69 | insn = le32_to_cpu(*altinsnptr); | |
70 | ||
71 | if (aarch64_insn_is_branch_imm(insn)) { | |
72 | s32 offset = aarch64_get_branch_offset(insn); | |
73 | unsigned long target; | |
74 | ||
75 | target = (unsigned long)altinsnptr + offset; | |
76 | ||
77 | /* | |
78 | * If we're branching inside the alternate sequence, | |
79 | * do not rewrite the instruction, as it is already | |
80 | * correct. Otherwise, generate the new instruction. | |
81 | */ | |
82 | if (branch_insn_requires_update(alt, target)) { | |
83 | offset = target - (unsigned long)insnptr; | |
84 | insn = aarch64_set_branch_offset(insn, offset); | |
85 | } | |
c831b2ae SP |
86 | } else if (aarch64_insn_is_adrp(insn)) { |
87 | s32 orig_offset, new_offset; | |
88 | unsigned long target; | |
89 | ||
90 | /* | |
91 | * If we're replacing an adrp instruction, which uses PC-relative | |
92 | * immediate addressing, adjust the offset to reflect the new | |
93 | * PC. adrp operates on 4K aligned addresses. | |
94 | */ | |
95 | orig_offset = aarch64_insn_adrp_get_offset(insn); | |
96 | target = align_down(altinsnptr, SZ_4K) + orig_offset; | |
97 | new_offset = target - align_down(insnptr, SZ_4K); | |
98 | insn = aarch64_insn_adrp_set_offset(insn, new_offset); | |
baa763b5 SP |
99 | } else if (aarch64_insn_uses_literal(insn)) { |
100 | /* | |
101 | * Disallow patching unhandled instructions using PC relative | |
102 | * literal addresses | |
103 | */ | |
104 | BUG(); | |
7616fc8b MZ |
105 | } |
106 | ||
107 | return insn; | |
108 | } | |
109 | ||
5ea5306c | 110 | static void __apply_alternatives(void *alt_region, bool use_linear_alias) |
e039ee4e AP |
111 | { |
112 | struct alt_instr *alt; | |
932ded4b | 113 | struct alt_region *region = alt_region; |
15ad6ace | 114 | __le32 *origptr, *replptr, *updptr; |
e039ee4e | 115 | |
932ded4b | 116 | for (alt = region->begin; alt < region->end; alt++) { |
7616fc8b MZ |
117 | u32 insn; |
118 | int i, nr_inst; | |
119 | ||
e039ee4e AP |
120 | if (!cpus_have_cap(alt->cpufeature)) |
121 | continue; | |
122 | ||
fef7f2b2 | 123 | BUG_ON(alt->alt_len != alt->orig_len); |
e039ee4e AP |
124 | |
125 | pr_info_once("patching kernel code\n"); | |
126 | ||
7616fc8b MZ |
127 | origptr = ALT_ORIG_PTR(alt); |
128 | replptr = ALT_REPL_PTR(alt); | |
15ad6ace | 129 | updptr = use_linear_alias ? lm_alias(origptr) : origptr; |
7616fc8b MZ |
130 | nr_inst = alt->alt_len / sizeof(insn); |
131 | ||
132 | for (i = 0; i < nr_inst; i++) { | |
133 | insn = get_alt_insn(alt, origptr + i, replptr + i); | |
5ea5306c | 134 | updptr[i] = cpu_to_le32(insn); |
7616fc8b MZ |
135 | } |
136 | ||
e039ee4e | 137 | flush_icache_range((uintptr_t)origptr, |
7616fc8b | 138 | (uintptr_t)(origptr + nr_inst)); |
e039ee4e | 139 | } |
e039ee4e AP |
140 | } |
141 | ||
ef5e724b WD |
142 | /* |
143 | * We might be patching the stop_machine state machine, so implement a | |
144 | * really simple polling protocol here. | |
145 | */ | |
146 | static int __apply_alternatives_multi_stop(void *unused) | |
e039ee4e | 147 | { |
932ded4b | 148 | struct alt_region region = { |
ee78fdc7 JM |
149 | .begin = (struct alt_instr *)__alt_instructions, |
150 | .end = (struct alt_instr *)__alt_instructions_end, | |
932ded4b AP |
151 | }; |
152 | ||
ef5e724b WD |
153 | /* We always have a CPU 0 at this point (__init) */ |
154 | if (smp_processor_id()) { | |
6d99b689 | 155 | while (!READ_ONCE(alternatives_applied)) |
ef5e724b | 156 | cpu_relax(); |
04b8637b | 157 | isb(); |
ef5e724b | 158 | } else { |
6d99b689 | 159 | BUG_ON(alternatives_applied); |
5ea5306c | 160 | __apply_alternatives(®ion, true); |
ef5e724b | 161 | /* Barriers provided by the cache flushing */ |
6d99b689 | 162 | WRITE_ONCE(alternatives_applied, 1); |
ef5e724b WD |
163 | } |
164 | ||
165 | return 0; | |
166 | } | |
167 | ||
168 | void __init apply_alternatives_all(void) | |
169 | { | |
e039ee4e | 170 | /* better not try code patching on a live SMP system */ |
ef5e724b | 171 | stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); |
932ded4b AP |
172 | } |
173 | ||
174 | void apply_alternatives(void *start, size_t length) | |
175 | { | |
176 | struct alt_region region = { | |
177 | .begin = start, | |
178 | .end = start + length, | |
179 | }; | |
180 | ||
5ea5306c | 181 | __apply_alternatives(®ion, false); |
e039ee4e | 182 | } |