]>
Commit | Line | Data |
---|---|---|
c767a54b JP |
1 | #define pr_fmt(fmt) "SMP alternatives: " fmt |
2 | ||
9a0b5817 | 3 | #include <linux/module.h> |
f6a57033 | 4 | #include <linux/sched.h> |
2f1dafe5 | 5 | #include <linux/mutex.h> |
9a0b5817 | 6 | #include <linux/list.h> |
8b5a10fc | 7 | #include <linux/stringify.h> |
19d36ccd AK |
8 | #include <linux/mm.h> |
9 | #include <linux/vmalloc.h> | |
3945dab4 | 10 | #include <linux/memory.h> |
3d55cc8a | 11 | #include <linux/stop_machine.h> |
5a0e3ad6 | 12 | #include <linux/slab.h> |
fd4363ff | 13 | #include <linux/kdebug.h> |
c13324a5 | 14 | #include <linux/kprobes.h> |
35de5b06 | 15 | #include <asm/text-patching.h> |
9a0b5817 GH |
16 | #include <asm/alternative.h> |
17 | #include <asm/sections.h> | |
19d36ccd | 18 | #include <asm/pgtable.h> |
8f4e956b AK |
19 | #include <asm/mce.h> |
20 | #include <asm/nmi.h> | |
e587cadd | 21 | #include <asm/cacheflush.h> |
78ff7fae | 22 | #include <asm/tlbflush.h> |
e587cadd | 23 | #include <asm/io.h> |
78ff7fae | 24 | #include <asm/fixmap.h> |
9a0b5817 | 25 | |
5e907bb0 IM |
26 | int __read_mostly alternatives_patched; |
27 | ||
28 | EXPORT_SYMBOL_GPL(alternatives_patched); | |
29 | ||
ab144f5e AK |
30 | #define MAX_PATCH_LEN (255-1) |
31 | ||
8b5a10fc | 32 | static int __initdata_or_module debug_alternative; |
b7fb4af0 | 33 | |
d167a518 GH |
34 | static int __init debug_alt(char *str) |
35 | { | |
36 | debug_alternative = 1; | |
37 | return 1; | |
38 | } | |
d167a518 GH |
39 | __setup("debug-alternative", debug_alt); |
40 | ||
09488165 JB |
41 | static int noreplace_smp; |
42 | ||
b7fb4af0 JF |
43 | static int __init setup_noreplace_smp(char *str) |
44 | { | |
45 | noreplace_smp = 1; | |
46 | return 1; | |
47 | } | |
48 | __setup("noreplace-smp", setup_noreplace_smp); | |
49 | ||
db477a33 BP |
50 | #define DPRINTK(fmt, args...) \ |
51 | do { \ | |
52 | if (debug_alternative) \ | |
53 | printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \ | |
c767a54b | 54 | } while (0) |
d167a518 | 55 | |
48c7a250 BP |
56 | #define DUMP_BYTES(buf, len, fmt, args...) \ |
57 | do { \ | |
58 | if (unlikely(debug_alternative)) { \ | |
59 | int j; \ | |
60 | \ | |
61 | if (!(len)) \ | |
62 | break; \ | |
63 | \ | |
64 | printk(KERN_DEBUG fmt, ##args); \ | |
65 | for (j = 0; j < (len) - 1; j++) \ | |
66 | printk(KERN_CONT "%02hhx ", buf[j]); \ | |
67 | printk(KERN_CONT "%02hhx\n", buf[j]); \ | |
68 | } \ | |
69 | } while (0) | |
70 | ||
dc326fca PA |
71 | /* |
72 | * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes | |
73 | * that correspond to that nop. Getting from one nop to the next, we | |
74 | * add to the array the offset that is equal to the sum of all sizes of | |
75 | * nops preceding the one we are after. | |
76 | * | |
77 | * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the | |
78 | * nice symmetry of sizes of the previous nops. | |
79 | */ | |
8b5a10fc | 80 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
dc326fca PA |
81 | static const unsigned char intelnops[] = |
82 | { | |
83 | GENERIC_NOP1, | |
84 | GENERIC_NOP2, | |
85 | GENERIC_NOP3, | |
86 | GENERIC_NOP4, | |
87 | GENERIC_NOP5, | |
88 | GENERIC_NOP6, | |
89 | GENERIC_NOP7, | |
90 | GENERIC_NOP8, | |
91 | GENERIC_NOP5_ATOMIC | |
92 | }; | |
93 | static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = | |
94 | { | |
9a0b5817 GH |
95 | NULL, |
96 | intelnops, | |
97 | intelnops + 1, | |
98 | intelnops + 1 + 2, | |
99 | intelnops + 1 + 2 + 3, | |
100 | intelnops + 1 + 2 + 3 + 4, | |
101 | intelnops + 1 + 2 + 3 + 4 + 5, | |
102 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | |
103 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
dc326fca | 104 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
9a0b5817 | 105 | }; |
d167a518 GH |
106 | #endif |
107 | ||
108 | #ifdef K8_NOP1 | |
dc326fca PA |
109 | static const unsigned char k8nops[] = |
110 | { | |
111 | K8_NOP1, | |
112 | K8_NOP2, | |
113 | K8_NOP3, | |
114 | K8_NOP4, | |
115 | K8_NOP5, | |
116 | K8_NOP6, | |
117 | K8_NOP7, | |
118 | K8_NOP8, | |
119 | K8_NOP5_ATOMIC | |
120 | }; | |
121 | static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = | |
122 | { | |
9a0b5817 GH |
123 | NULL, |
124 | k8nops, | |
125 | k8nops + 1, | |
126 | k8nops + 1 + 2, | |
127 | k8nops + 1 + 2 + 3, | |
128 | k8nops + 1 + 2 + 3 + 4, | |
129 | k8nops + 1 + 2 + 3 + 4 + 5, | |
130 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | |
131 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
dc326fca | 132 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
9a0b5817 | 133 | }; |
d167a518 GH |
134 | #endif |
135 | ||
8b5a10fc | 136 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
dc326fca PA |
137 | static const unsigned char k7nops[] = |
138 | { | |
139 | K7_NOP1, | |
140 | K7_NOP2, | |
141 | K7_NOP3, | |
142 | K7_NOP4, | |
143 | K7_NOP5, | |
144 | K7_NOP6, | |
145 | K7_NOP7, | |
146 | K7_NOP8, | |
147 | K7_NOP5_ATOMIC | |
148 | }; | |
149 | static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = | |
150 | { | |
9a0b5817 GH |
151 | NULL, |
152 | k7nops, | |
153 | k7nops + 1, | |
154 | k7nops + 1 + 2, | |
155 | k7nops + 1 + 2 + 3, | |
156 | k7nops + 1 + 2 + 3 + 4, | |
157 | k7nops + 1 + 2 + 3 + 4 + 5, | |
158 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | |
159 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
dc326fca | 160 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
9a0b5817 | 161 | }; |
d167a518 GH |
162 | #endif |
163 | ||
32c464f5 | 164 | #ifdef P6_NOP1 |
cb09cad4 | 165 | static const unsigned char p6nops[] = |
dc326fca PA |
166 | { |
167 | P6_NOP1, | |
168 | P6_NOP2, | |
169 | P6_NOP3, | |
170 | P6_NOP4, | |
171 | P6_NOP5, | |
172 | P6_NOP6, | |
173 | P6_NOP7, | |
174 | P6_NOP8, | |
175 | P6_NOP5_ATOMIC | |
176 | }; | |
177 | static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = | |
178 | { | |
32c464f5 JB |
179 | NULL, |
180 | p6nops, | |
181 | p6nops + 1, | |
182 | p6nops + 1 + 2, | |
183 | p6nops + 1 + 2 + 3, | |
184 | p6nops + 1 + 2 + 3 + 4, | |
185 | p6nops + 1 + 2 + 3 + 4 + 5, | |
186 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, | |
187 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
dc326fca | 188 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
32c464f5 JB |
189 | }; |
190 | #endif | |
191 | ||
dc326fca | 192 | /* Initialize these to a safe default */ |
d167a518 | 193 | #ifdef CONFIG_X86_64 |
dc326fca PA |
194 | const unsigned char * const *ideal_nops = p6_nops; |
195 | #else | |
196 | const unsigned char * const *ideal_nops = intel_nops; | |
197 | #endif | |
d167a518 | 198 | |
dc326fca | 199 | void __init arch_init_ideal_nops(void) |
d167a518 | 200 | { |
dc326fca PA |
201 | switch (boot_cpu_data.x86_vendor) { |
202 | case X86_VENDOR_INTEL: | |
d8d9766c PA |
203 | /* |
204 | * Due to a decoder implementation quirk, some | |
205 | * specific Intel CPUs actually perform better with | |
206 | * the "k8_nops" than with the SDM-recommended NOPs. | |
207 | */ | |
208 | if (boot_cpu_data.x86 == 6 && | |
209 | boot_cpu_data.x86_model >= 0x0f && | |
210 | boot_cpu_data.x86_model != 0x1c && | |
211 | boot_cpu_data.x86_model != 0x26 && | |
212 | boot_cpu_data.x86_model != 0x27 && | |
213 | boot_cpu_data.x86_model < 0x30) { | |
214 | ideal_nops = k8_nops; | |
215 | } else if (boot_cpu_has(X86_FEATURE_NOPL)) { | |
dc326fca PA |
216 | ideal_nops = p6_nops; |
217 | } else { | |
218 | #ifdef CONFIG_X86_64 | |
219 | ideal_nops = k8_nops; | |
220 | #else | |
221 | ideal_nops = intel_nops; | |
222 | #endif | |
223 | } | |
d6250a3f | 224 | break; |
f21262b8 | 225 | |
c3fecca4 PW |
226 | case X86_VENDOR_HYGON: |
227 | ideal_nops = p6_nops; | |
228 | return; | |
229 | ||
f21262b8 BP |
230 | case X86_VENDOR_AMD: |
231 | if (boot_cpu_data.x86 > 0xf) { | |
232 | ideal_nops = p6_nops; | |
233 | return; | |
234 | } | |
235 | ||
236 | /* fall through */ | |
237 | ||
dc326fca PA |
238 | default: |
239 | #ifdef CONFIG_X86_64 | |
240 | ideal_nops = k8_nops; | |
241 | #else | |
242 | if (boot_cpu_has(X86_FEATURE_K8)) | |
243 | ideal_nops = k8_nops; | |
244 | else if (boot_cpu_has(X86_FEATURE_K7)) | |
245 | ideal_nops = k7_nops; | |
246 | else | |
247 | ideal_nops = intel_nops; | |
248 | #endif | |
249 | } | |
9a0b5817 GH |
250 | } |
251 | ||
ab144f5e | 252 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
8b5a10fc | 253 | static void __init_or_module add_nops(void *insns, unsigned int len) |
139ec7c4 | 254 | { |
139ec7c4 RR |
255 | while (len > 0) { |
256 | unsigned int noplen = len; | |
257 | if (noplen > ASM_NOP_MAX) | |
258 | noplen = ASM_NOP_MAX; | |
dc326fca | 259 | memcpy(insns, ideal_nops[noplen], noplen); |
139ec7c4 RR |
260 | insns += noplen; |
261 | len -= noplen; | |
262 | } | |
263 | } | |
264 | ||
d167a518 | 265 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
5967ed87 | 266 | extern s32 __smp_locks[], __smp_locks_end[]; |
fa6f2cc7 | 267 | void *text_poke_early(void *addr, const void *opcode, size_t len); |
d167a518 | 268 | |
48c7a250 BP |
269 | /* |
270 | * Are we looking at a near JMP with a 1 or 4-byte displacement. | |
271 | */ | |
272 | static inline bool is_jmp(const u8 opcode) | |
273 | { | |
274 | return opcode == 0xeb || opcode == 0xe9; | |
275 | } | |
276 | ||
277 | static void __init_or_module | |
278 | recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) | |
279 | { | |
280 | u8 *next_rip, *tgt_rip; | |
281 | s32 n_dspl, o_dspl; | |
282 | int repl_len; | |
283 | ||
284 | if (a->replacementlen != 5) | |
285 | return; | |
286 | ||
287 | o_dspl = *(s32 *)(insnbuf + 1); | |
288 | ||
289 | /* next_rip of the replacement JMP */ | |
290 | next_rip = repl_insn + a->replacementlen; | |
291 | /* target rip of the replacement JMP */ | |
292 | tgt_rip = next_rip + o_dspl; | |
293 | n_dspl = tgt_rip - orig_insn; | |
294 | ||
0e6c16c6 | 295 | DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl); |
48c7a250 BP |
296 | |
297 | if (tgt_rip - orig_insn >= 0) { | |
298 | if (n_dspl - 2 <= 127) | |
299 | goto two_byte_jmp; | |
300 | else | |
301 | goto five_byte_jmp; | |
302 | /* negative offset */ | |
303 | } else { | |
304 | if (((n_dspl - 2) & 0xff) == (n_dspl - 2)) | |
305 | goto two_byte_jmp; | |
306 | else | |
307 | goto five_byte_jmp; | |
308 | } | |
309 | ||
310 | two_byte_jmp: | |
311 | n_dspl -= 2; | |
312 | ||
313 | insnbuf[0] = 0xeb; | |
314 | insnbuf[1] = (s8)n_dspl; | |
315 | add_nops(insnbuf + 2, 3); | |
316 | ||
317 | repl_len = 2; | |
318 | goto done; | |
319 | ||
320 | five_byte_jmp: | |
321 | n_dspl -= 5; | |
322 | ||
323 | insnbuf[0] = 0xe9; | |
324 | *(s32 *)&insnbuf[1] = n_dspl; | |
325 | ||
326 | repl_len = 5; | |
327 | ||
328 | done: | |
329 | ||
330 | DPRINTK("final displ: 0x%08x, JMP 0x%lx", | |
331 | n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); | |
332 | } | |
333 | ||
34bfab0e BP |
334 | /* |
335 | * "noinline" to cause control flow change and thus invalidate I$ and | |
336 | * cause refetch after modification. | |
337 | */ | |
338 | static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) | |
4fd4b6e5 | 339 | { |
66c117d7 | 340 | unsigned long flags; |
612e8e93 | 341 | int i; |
66c117d7 | 342 | |
612e8e93 BP |
343 | for (i = 0; i < a->padlen; i++) { |
344 | if (instr[i] != 0x90) | |
345 | return; | |
346 | } | |
69df353f | 347 | |
66c117d7 | 348 | local_irq_save(flags); |
4fd4b6e5 | 349 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); |
66c117d7 | 350 | local_irq_restore(flags); |
4fd4b6e5 | 351 | |
0e6c16c6 | 352 | DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ", |
4fd4b6e5 BP |
353 | instr, a->instrlen - a->padlen, a->padlen); |
354 | } | |
355 | ||
db477a33 BP |
356 | /* |
357 | * Replace instructions with better alternatives for this CPU type. This runs | |
358 | * before SMP is initialized to avoid SMP problems with self modifying code. | |
359 | * This implies that asymmetric systems where APs have less capabilities than | |
360 | * the boot processor are not handled. Tough. Make sure you disable such | |
361 | * features by hand. | |
34bfab0e BP |
362 | * |
363 | * Marked "noinline" to cause control flow change and thus insn cache | |
364 | * to refetch changed I$ lines. | |
db477a33 | 365 | */ |
34bfab0e BP |
366 | void __init_or_module noinline apply_alternatives(struct alt_instr *start, |
367 | struct alt_instr *end) | |
9a0b5817 | 368 | { |
9a0b5817 | 369 | struct alt_instr *a; |
59e97e4d | 370 | u8 *instr, *replacement; |
1b1d9258 | 371 | u8 insnbuf[MAX_PATCH_LEN]; |
9a0b5817 | 372 | |
0e6c16c6 | 373 | DPRINTK("alt table %px, -> %px", start, end); |
50973133 FY |
374 | /* |
375 | * The scan order should be from start to end. A later scanned | |
db477a33 | 376 | * alternative code can overwrite previously scanned alternative code. |
50973133 FY |
377 | * Some kernel functions (e.g. memcpy, memset, etc) use this order to |
378 | * patch code. | |
379 | * | |
380 | * So be careful if you want to change the scan order to any other | |
381 | * order. | |
382 | */ | |
9a0b5817 | 383 | for (a = start; a < end; a++) { |
48c7a250 BP |
384 | int insnbuf_sz = 0; |
385 | ||
59e97e4d AL |
386 | instr = (u8 *)&a->instr_offset + a->instr_offset; |
387 | replacement = (u8 *)&a->repl_offset + a->repl_offset; | |
ab144f5e | 388 | BUG_ON(a->instrlen > sizeof(insnbuf)); |
65fc985b | 389 | BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); |
4fd4b6e5 BP |
390 | if (!boot_cpu_has(a->cpuid)) { |
391 | if (a->padlen > 1) | |
392 | optimize_nops(a, instr); | |
393 | ||
9a0b5817 | 394 | continue; |
4fd4b6e5 | 395 | } |
59e97e4d | 396 | |
c1d4e419 | 397 | DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d", |
db477a33 BP |
398 | a->cpuid >> 5, |
399 | a->cpuid & 0x1f, | |
c1d4e419 | 400 | instr, instr, a->instrlen, |
dbe4058a | 401 | replacement, a->replacementlen, a->padlen); |
db477a33 | 402 | |
0e6c16c6 BP |
403 | DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); |
404 | DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); | |
48c7a250 | 405 | |
59e97e4d | 406 | memcpy(insnbuf, replacement, a->replacementlen); |
48c7a250 | 407 | insnbuf_sz = a->replacementlen; |
59e97e4d | 408 | |
fc152d22 MJ |
409 | /* |
410 | * 0xe8 is a relative jump; fix the offset. | |
411 | * | |
412 | * Instruction length is checked before the opcode to avoid | |
413 | * accessing uninitialized bytes for zero-length replacements. | |
414 | */ | |
415 | if (a->replacementlen == 5 && *insnbuf == 0xe8) { | |
db477a33 | 416 | *(s32 *)(insnbuf + 1) += replacement - instr; |
48c7a250 BP |
417 | DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", |
418 | *(s32 *)(insnbuf + 1), | |
419 | (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5); | |
db477a33 | 420 | } |
59e97e4d | 421 | |
48c7a250 BP |
422 | if (a->replacementlen && is_jmp(replacement[0])) |
423 | recompute_jump(a, instr, replacement, insnbuf); | |
424 | ||
425 | if (a->instrlen > a->replacementlen) { | |
4332195c BP |
426 | add_nops(insnbuf + a->replacementlen, |
427 | a->instrlen - a->replacementlen); | |
48c7a250 BP |
428 | insnbuf_sz += a->instrlen - a->replacementlen; |
429 | } | |
0e6c16c6 | 430 | DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr); |
59e97e4d | 431 | |
48c7a250 | 432 | text_poke_early(instr, insnbuf, insnbuf_sz); |
9a0b5817 GH |
433 | } |
434 | } | |
435 | ||
8ec4d41f | 436 | #ifdef CONFIG_SMP |
5967ed87 JB |
437 | static void alternatives_smp_lock(const s32 *start, const s32 *end, |
438 | u8 *text, u8 *text_end) | |
9a0b5817 | 439 | { |
5967ed87 | 440 | const s32 *poff; |
9a0b5817 | 441 | |
5967ed87 JB |
442 | for (poff = start; poff < end; poff++) { |
443 | u8 *ptr = (u8 *)poff + *poff; | |
444 | ||
445 | if (!*poff || ptr < text || ptr >= text_end) | |
9a0b5817 | 446 | continue; |
f88f07e0 | 447 | /* turn DS segment override prefix into lock prefix */ |
d9c5841e PA |
448 | if (*ptr == 0x3e) |
449 | text_poke(ptr, ((unsigned char []){0xf0}), 1); | |
4b8073e4 | 450 | } |
9a0b5817 GH |
451 | } |
452 | ||
5967ed87 JB |
453 | static void alternatives_smp_unlock(const s32 *start, const s32 *end, |
454 | u8 *text, u8 *text_end) | |
9a0b5817 | 455 | { |
5967ed87 | 456 | const s32 *poff; |
9a0b5817 | 457 | |
5967ed87 JB |
458 | for (poff = start; poff < end; poff++) { |
459 | u8 *ptr = (u8 *)poff + *poff; | |
460 | ||
461 | if (!*poff || ptr < text || ptr >= text_end) | |
9a0b5817 | 462 | continue; |
f88f07e0 | 463 | /* turn lock prefix into DS segment override prefix */ |
d9c5841e PA |
464 | if (*ptr == 0xf0) |
465 | text_poke(ptr, ((unsigned char []){0x3E}), 1); | |
4b8073e4 | 466 | } |
9a0b5817 GH |
467 | } |
468 | ||
469 | struct smp_alt_module { | |
470 | /* what is this ??? */ | |
471 | struct module *mod; | |
472 | char *name; | |
473 | ||
474 | /* ptrs to lock prefixes */ | |
5967ed87 JB |
475 | const s32 *locks; |
476 | const s32 *locks_end; | |
9a0b5817 GH |
477 | |
478 | /* .text segment, needed to avoid patching init code ;) */ | |
479 | u8 *text; | |
480 | u8 *text_end; | |
481 | ||
482 | struct list_head next; | |
483 | }; | |
484 | static LIST_HEAD(smp_alt_modules); | |
e846d139 | 485 | static bool uniproc_patched = false; /* protected by text_mutex */ |
9a0b5817 | 486 | |
8b5a10fc JB |
487 | void __init_or_module alternatives_smp_module_add(struct module *mod, |
488 | char *name, | |
489 | void *locks, void *locks_end, | |
490 | void *text, void *text_end) | |
9a0b5817 GH |
491 | { |
492 | struct smp_alt_module *smp; | |
9a0b5817 | 493 | |
e846d139 | 494 | mutex_lock(&text_mutex); |
816afe4f RR |
495 | if (!uniproc_patched) |
496 | goto unlock; | |
b7fb4af0 | 497 | |
816afe4f RR |
498 | if (num_possible_cpus() == 1) |
499 | /* Don't bother remembering, we'll never have to undo it. */ | |
500 | goto smp_unlock; | |
9a0b5817 GH |
501 | |
502 | smp = kzalloc(sizeof(*smp), GFP_KERNEL); | |
503 | if (NULL == smp) | |
816afe4f RR |
504 | /* we'll run the (safe but slow) SMP code then ... */ |
505 | goto unlock; | |
9a0b5817 GH |
506 | |
507 | smp->mod = mod; | |
508 | smp->name = name; | |
509 | smp->locks = locks; | |
510 | smp->locks_end = locks_end; | |
511 | smp->text = text; | |
512 | smp->text_end = text_end; | |
db477a33 BP |
513 | DPRINTK("locks %p -> %p, text %p -> %p, name %s\n", |
514 | smp->locks, smp->locks_end, | |
9a0b5817 GH |
515 | smp->text, smp->text_end, smp->name); |
516 | ||
9a0b5817 | 517 | list_add_tail(&smp->next, &smp_alt_modules); |
816afe4f RR |
518 | smp_unlock: |
519 | alternatives_smp_unlock(locks, locks_end, text, text_end); | |
520 | unlock: | |
e846d139 | 521 | mutex_unlock(&text_mutex); |
9a0b5817 GH |
522 | } |
523 | ||
8b5a10fc | 524 | void __init_or_module alternatives_smp_module_del(struct module *mod) |
9a0b5817 GH |
525 | { |
526 | struct smp_alt_module *item; | |
9a0b5817 | 527 | |
e846d139 | 528 | mutex_lock(&text_mutex); |
9a0b5817 GH |
529 | list_for_each_entry(item, &smp_alt_modules, next) { |
530 | if (mod != item->mod) | |
531 | continue; | |
532 | list_del(&item->next); | |
9a0b5817 | 533 | kfree(item); |
816afe4f | 534 | break; |
9a0b5817 | 535 | } |
e846d139 | 536 | mutex_unlock(&text_mutex); |
9a0b5817 GH |
537 | } |
538 | ||
816afe4f | 539 | void alternatives_enable_smp(void) |
9a0b5817 GH |
540 | { |
541 | struct smp_alt_module *mod; | |
9a0b5817 | 542 | |
816afe4f RR |
543 | /* Why bother if there are no other CPUs? */ |
544 | BUG_ON(num_possible_cpus() == 1); | |
9a0b5817 | 545 | |
e846d139 | 546 | mutex_lock(&text_mutex); |
ca74a6f8 | 547 | |
816afe4f | 548 | if (uniproc_patched) { |
c767a54b | 549 | pr_info("switching to SMP code\n"); |
816afe4f | 550 | BUG_ON(num_online_cpus() != 1); |
53756d37 JF |
551 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
552 | clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); | |
9a0b5817 GH |
553 | list_for_each_entry(mod, &smp_alt_modules, next) |
554 | alternatives_smp_lock(mod->locks, mod->locks_end, | |
555 | mod->text, mod->text_end); | |
816afe4f | 556 | uniproc_patched = false; |
9a0b5817 | 557 | } |
e846d139 | 558 | mutex_unlock(&text_mutex); |
9a0b5817 GH |
559 | } |
560 | ||
e846d139 ZC |
561 | /* |
562 | * Return 1 if the address range is reserved for SMP-alternatives. | |
563 | * Must hold text_mutex. | |
564 | */ | |
2cfa1978 MH |
565 | int alternatives_text_reserved(void *start, void *end) |
566 | { | |
567 | struct smp_alt_module *mod; | |
5967ed87 | 568 | const s32 *poff; |
076dc4a6 MH |
569 | u8 *text_start = start; |
570 | u8 *text_end = end; | |
2cfa1978 | 571 | |
e846d139 ZC |
572 | lockdep_assert_held(&text_mutex); |
573 | ||
2cfa1978 | 574 | list_for_each_entry(mod, &smp_alt_modules, next) { |
076dc4a6 | 575 | if (mod->text > text_end || mod->text_end < text_start) |
2cfa1978 | 576 | continue; |
5967ed87 JB |
577 | for (poff = mod->locks; poff < mod->locks_end; poff++) { |
578 | const u8 *ptr = (const u8 *)poff + *poff; | |
579 | ||
580 | if (text_start <= ptr && text_end > ptr) | |
2cfa1978 | 581 | return 1; |
5967ed87 | 582 | } |
2cfa1978 MH |
583 | } |
584 | ||
585 | return 0; | |
586 | } | |
48c7a250 | 587 | #endif /* CONFIG_SMP */ |
8ec4d41f | 588 | |
139ec7c4 | 589 | #ifdef CONFIG_PARAVIRT |
8b5a10fc JB |
590 | void __init_or_module apply_paravirt(struct paravirt_patch_site *start, |
591 | struct paravirt_patch_site *end) | |
139ec7c4 | 592 | { |
98de032b | 593 | struct paravirt_patch_site *p; |
ab144f5e | 594 | char insnbuf[MAX_PATCH_LEN]; |
139ec7c4 RR |
595 | |
596 | for (p = start; p < end; p++) { | |
597 | unsigned int used; | |
598 | ||
ab144f5e | 599 | BUG_ON(p->len > MAX_PATCH_LEN); |
d34fda4a CW |
600 | /* prep the buffer with the original instructions */ |
601 | memcpy(insnbuf, p->instr, p->len); | |
5c83511b | 602 | used = pv_ops.init.patch(p->instrtype, insnbuf, |
93b1eab3 | 603 | (unsigned long)p->instr, p->len); |
7f63c41c | 604 | |
63f70270 JF |
605 | BUG_ON(used > p->len); |
606 | ||
139ec7c4 | 607 | /* Pad the rest with nops */ |
ab144f5e | 608 | add_nops(insnbuf + used, p->len - used); |
e587cadd | 609 | text_poke_early(p->instr, insnbuf, p->len); |
139ec7c4 | 610 | } |
139ec7c4 | 611 | } |
98de032b | 612 | extern struct paravirt_patch_site __start_parainstructions[], |
139ec7c4 RR |
613 | __stop_parainstructions[]; |
614 | #endif /* CONFIG_PARAVIRT */ | |
615 | ||
9a0b5817 GH |
616 | void __init alternative_instructions(void) |
617 | { | |
8f4e956b AK |
618 | /* The patching is not fully atomic, so try to avoid local interruptions |
619 | that might execute the to be patched code. | |
620 | Other CPUs are not running. */ | |
621 | stop_nmi(); | |
123aa76e AK |
622 | |
623 | /* | |
624 | * Don't stop machine check exceptions while patching. | |
625 | * MCEs only happen when something got corrupted and in this | |
626 | * case we must do something about the corruption. | |
627 | * Ignoring it is worse than a unlikely patching race. | |
628 | * Also machine checks tend to be broadcast and if one CPU | |
629 | * goes into machine check the others follow quickly, so we don't | |
630 | * expect a machine check to cause undue problems during to code | |
631 | * patching. | |
632 | */ | |
8f4e956b | 633 | |
9a0b5817 GH |
634 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
635 | ||
8ec4d41f | 636 | #ifdef CONFIG_SMP |
816afe4f RR |
637 | /* Patch to UP if other cpus not imminent. */ |
638 | if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { | |
639 | uniproc_patched = true; | |
9a0b5817 GH |
640 | alternatives_smp_module_add(NULL, "core kernel", |
641 | __smp_locks, __smp_locks_end, | |
642 | _text, _etext); | |
9a0b5817 | 643 | } |
8f4e956b | 644 | |
816afe4f | 645 | if (!uniproc_patched || num_possible_cpus() == 1) |
f68fd5f4 FW |
646 | free_init_pages("SMP alternatives", |
647 | (unsigned long)__smp_locks, | |
648 | (unsigned long)__smp_locks_end); | |
816afe4f RR |
649 | #endif |
650 | ||
651 | apply_paravirt(__parainstructions, __parainstructions_end); | |
f68fd5f4 | 652 | |
8f4e956b | 653 | restart_nmi(); |
5e907bb0 | 654 | alternatives_patched = 1; |
9a0b5817 | 655 | } |
19d36ccd | 656 | |
e587cadd MD |
657 | /** |
658 | * text_poke_early - Update instructions on a live kernel at boot time | |
659 | * @addr: address to modify | |
660 | * @opcode: source of the copy | |
661 | * @len: length to copy | |
662 | * | |
19d36ccd AK |
663 | * When you use this code to patch more than one byte of an instruction |
664 | * you need to make sure that other CPUs cannot execute this code in parallel. | |
e587cadd MD |
665 | * Also no thread must be currently preempted in the middle of these |
666 | * instructions. And on the local CPU you need to be protected again NMI or MCE | |
667 | * handlers seeing an inconsistent instruction while you patch. | |
19d36ccd | 668 | */ |
fa6f2cc7 | 669 | void *__init_or_module text_poke_early(void *addr, const void *opcode, |
8b5a10fc | 670 | size_t len) |
19d36ccd | 671 | { |
e587cadd MD |
672 | unsigned long flags; |
673 | local_irq_save(flags); | |
19d36ccd | 674 | memcpy(addr, opcode, len); |
5367b688 | 675 | local_irq_restore(flags); |
6fffacb3 | 676 | sync_core(); |
e587cadd MD |
677 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
678 | that causes hangs on some VIA CPUs. */ | |
679 | return addr; | |
680 | } | |
681 | ||
e836673c | 682 | static void *__text_poke(void *addr, const void *opcode, size_t len) |
e587cadd | 683 | { |
78ff7fae | 684 | unsigned long flags; |
e587cadd | 685 | char *vaddr; |
b7b66baa MD |
686 | struct page *pages[2]; |
687 | int i; | |
e587cadd | 688 | |
6fffacb3 PT |
689 | /* |
690 | * While boot memory allocator is runnig we cannot use struct | |
691 | * pages as they are not yet initialized. | |
692 | */ | |
693 | BUG_ON(!after_bootmem); | |
694 | ||
b7b66baa MD |
695 | if (!core_kernel_text((unsigned long)addr)) { |
696 | pages[0] = vmalloc_to_page(addr); | |
697 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); | |
15a601eb | 698 | } else { |
b7b66baa | 699 | pages[0] = virt_to_page(addr); |
00c6b2d5 | 700 | WARN_ON(!PageReserved(pages[0])); |
b7b66baa | 701 | pages[1] = virt_to_page(addr + PAGE_SIZE); |
e587cadd | 702 | } |
b7b66baa | 703 | BUG_ON(!pages[0]); |
7cf49427 | 704 | local_irq_save(flags); |
78ff7fae MH |
705 | set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); |
706 | if (pages[1]) | |
707 | set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); | |
708 | vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); | |
b7b66baa | 709 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); |
78ff7fae MH |
710 | clear_fixmap(FIX_TEXT_POKE0); |
711 | if (pages[1]) | |
712 | clear_fixmap(FIX_TEXT_POKE1); | |
713 | local_flush_tlb(); | |
19d36ccd | 714 | sync_core(); |
a534b679 AK |
715 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
716 | that causes hangs on some VIA CPUs. */ | |
b7b66baa MD |
717 | for (i = 0; i < len; i++) |
718 | BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); | |
7cf49427 | 719 | local_irq_restore(flags); |
e587cadd | 720 | return addr; |
19d36ccd | 721 | } |
3d55cc8a | 722 | |
e836673c NA |
723 | /** |
724 | * text_poke - Update instructions on a live kernel | |
725 | * @addr: address to modify | |
726 | * @opcode: source of the copy | |
727 | * @len: length to copy | |
728 | * | |
729 | * Only atomic text poke/set should be allowed when not doing early patching. | |
730 | * It means the size must be writable atomically and the address must be aligned | |
731 | * in a way that permits an atomic write. It also makes sure we fit on a single | |
732 | * page. | |
733 | */ | |
734 | void *text_poke(void *addr, const void *opcode, size_t len) | |
735 | { | |
736 | lockdep_assert_held(&text_mutex); | |
737 | ||
738 | return __text_poke(addr, opcode, len); | |
739 | } | |
740 | ||
741 | /** | |
742 | * text_poke_kgdb - Update instructions on a live kernel by kgdb | |
743 | * @addr: address to modify | |
744 | * @opcode: source of the copy | |
745 | * @len: length to copy | |
746 | * | |
747 | * Only atomic text poke/set should be allowed when not doing early patching. | |
748 | * It means the size must be writable atomically and the address must be aligned | |
749 | * in a way that permits an atomic write. It also makes sure we fit on a single | |
750 | * page. | |
751 | * | |
752 | * Context: should only be used by kgdb, which ensures no other core is running, | |
753 | * despite the fact it does not hold the text_mutex. | |
754 | */ | |
755 | void *text_poke_kgdb(void *addr, const void *opcode, size_t len) | |
756 | { | |
757 | return __text_poke(addr, opcode, len); | |
758 | } | |
759 | ||
fd4363ff JK |
760 | static void do_sync_core(void *info) |
761 | { | |
762 | sync_core(); | |
763 | } | |
764 | ||
765 | static bool bp_patching_in_progress; | |
766 | static void *bp_int3_handler, *bp_int3_addr; | |
767 | ||
17f41571 | 768 | int poke_int3_handler(struct pt_regs *regs) |
fd4363ff | 769 | { |
01651324 PZ |
770 | /* |
771 | * Having observed our INT3 instruction, we now must observe | |
772 | * bp_patching_in_progress. | |
773 | * | |
774 | * in_progress = TRUE INT3 | |
775 | * WMB RMB | |
776 | * write INT3 if (in_progress) | |
777 | * | |
778 | * Idem for bp_int3_handler. | |
779 | */ | |
fd4363ff JK |
780 | smp_rmb(); |
781 | ||
782 | if (likely(!bp_patching_in_progress)) | |
17f41571 | 783 | return 0; |
fd4363ff | 784 | |
f39b6f0e | 785 | if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr) |
17f41571 | 786 | return 0; |
fd4363ff JK |
787 | |
788 | /* set up the specified breakpoint handler */ | |
17f41571 JK |
789 | regs->ip = (unsigned long) bp_int3_handler; |
790 | ||
791 | return 1; | |
fd4363ff | 792 | } |
c13324a5 | 793 | NOKPROBE_SYMBOL(poke_int3_handler); |
17f41571 | 794 | |
fd4363ff JK |
795 | /** |
796 | * text_poke_bp() -- update instructions on live kernel on SMP | |
797 | * @addr: address to patch | |
798 | * @opcode: opcode of new instruction | |
799 | * @len: length to copy | |
800 | * @handler: address to jump to when the temporary breakpoint is hit | |
801 | * | |
802 | * Modify multi-byte instruction by using int3 breakpoint on SMP. | |
ea8596bb MH |
803 | * We completely avoid stop_machine() here, and achieve the |
804 | * synchronization using int3 breakpoint. | |
fd4363ff JK |
805 | * |
806 | * The way it is done: | |
807 | * - add a int3 trap to the address that will be patched | |
808 | * - sync cores | |
809 | * - update all but the first byte of the patched range | |
810 | * - sync cores | |
811 | * - replace the first byte (int3) by the first byte of | |
812 | * replacing opcode | |
813 | * - sync cores | |
fd4363ff JK |
814 | */ |
815 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) | |
816 | { | |
817 | unsigned char int3 = 0xcc; | |
818 | ||
819 | bp_int3_handler = handler; | |
820 | bp_int3_addr = (u8 *)addr + sizeof(int3); | |
821 | bp_patching_in_progress = true; | |
9222f606 JK |
822 | |
823 | lockdep_assert_held(&text_mutex); | |
824 | ||
fd4363ff | 825 | /* |
01651324 PZ |
826 | * Corresponding read barrier in int3 notifier for making sure the |
827 | * in_progress and handler are correctly ordered wrt. patching. | |
fd4363ff JK |
828 | */ |
829 | smp_wmb(); | |
830 | ||
831 | text_poke(addr, &int3, sizeof(int3)); | |
832 | ||
833 | on_each_cpu(do_sync_core, NULL, 1); | |
834 | ||
835 | if (len - sizeof(int3) > 0) { | |
836 | /* patch all but the first byte */ | |
837 | text_poke((char *)addr + sizeof(int3), | |
838 | (const char *) opcode + sizeof(int3), | |
839 | len - sizeof(int3)); | |
840 | /* | |
841 | * According to Intel, this core syncing is very likely | |
842 | * not necessary and we'd be safe even without it. But | |
843 | * better safe than sorry (plus there's not only Intel). | |
844 | */ | |
845 | on_each_cpu(do_sync_core, NULL, 1); | |
846 | } | |
847 | ||
848 | /* patch the first byte */ | |
849 | text_poke(addr, opcode, sizeof(int3)); | |
850 | ||
851 | on_each_cpu(do_sync_core, NULL, 1); | |
01651324 PZ |
852 | /* |
853 | * sync_core() implies an smp_mb() and orders this store against | |
854 | * the writing of the new instruction. | |
855 | */ | |
fd4363ff | 856 | bp_patching_in_progress = false; |
fd4363ff JK |
857 | |
858 | return addr; | |
859 | } | |
860 |