]>
Commit | Line | Data |
---|---|---|
9a0b5817 | 1 | #include <linux/module.h> |
f6a57033 | 2 | #include <linux/sched.h> |
2f1dafe5 | 3 | #include <linux/mutex.h> |
9a0b5817 | 4 | #include <linux/list.h> |
8b5a10fc | 5 | #include <linux/stringify.h> |
19d36ccd AK |
6 | #include <linux/kprobes.h> |
7 | #include <linux/mm.h> | |
8 | #include <linux/vmalloc.h> | |
3945dab4 | 9 | #include <linux/memory.h> |
9a0b5817 GH |
10 | #include <asm/alternative.h> |
11 | #include <asm/sections.h> | |
19d36ccd | 12 | #include <asm/pgtable.h> |
8f4e956b AK |
13 | #include <asm/mce.h> |
14 | #include <asm/nmi.h> | |
b097976e | 15 | #include <asm/vsyscall.h> |
e587cadd | 16 | #include <asm/cacheflush.h> |
78ff7fae | 17 | #include <asm/tlbflush.h> |
e587cadd | 18 | #include <asm/io.h> |
78ff7fae | 19 | #include <asm/fixmap.h> |
9a0b5817 | 20 | |
ab144f5e AK |
21 | #define MAX_PATCH_LEN (255-1) |
22 | ||
09488165 JB |
23 | #ifdef CONFIG_HOTPLUG_CPU |
24 | static int smp_alt_once; | |
9a0b5817 | 25 | |
d167a518 GH |
26 | static int __init bootonly(char *str) |
27 | { | |
28 | smp_alt_once = 1; | |
29 | return 1; | |
30 | } | |
b7fb4af0 | 31 | __setup("smp-alt-boot", bootonly); |
09488165 JB |
32 | #else |
33 | #define smp_alt_once 1 | |
34 | #endif | |
35 | ||
8b5a10fc | 36 | static int __initdata_or_module debug_alternative; |
b7fb4af0 | 37 | |
d167a518 GH |
38 | static int __init debug_alt(char *str) |
39 | { | |
40 | debug_alternative = 1; | |
41 | return 1; | |
42 | } | |
d167a518 GH |
43 | __setup("debug-alternative", debug_alt); |
44 | ||
09488165 JB |
45 | static int noreplace_smp; |
46 | ||
b7fb4af0 JF |
47 | static int __init setup_noreplace_smp(char *str) |
48 | { | |
49 | noreplace_smp = 1; | |
50 | return 1; | |
51 | } | |
52 | __setup("noreplace-smp", setup_noreplace_smp); | |
53 | ||
959b4fdf | 54 | #ifdef CONFIG_PARAVIRT |
8b5a10fc | 55 | static int __initdata_or_module noreplace_paravirt = 0; |
959b4fdf JF |
56 | |
57 | static int __init setup_noreplace_paravirt(char *str) | |
58 | { | |
59 | noreplace_paravirt = 1; | |
60 | return 1; | |
61 | } | |
62 | __setup("noreplace-paravirt", setup_noreplace_paravirt); | |
63 | #endif | |
b7fb4af0 | 64 | |
d167a518 GH |
65 | #define DPRINTK(fmt, args...) if (debug_alternative) \ |
66 | printk(KERN_DEBUG fmt, args) | |
67 | ||
8b5a10fc | 68 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
9a0b5817 GH |
69 | /* Use inline assembly to define this because the nops are defined |
70 | as inline assembly strings in the include files and we cannot | |
71 | get them easily into strings. */ | |
8b5a10fc | 72 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: " |
9a0b5817 | 73 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 |
f4be31ec SR |
74 | GENERIC_NOP7 GENERIC_NOP8 |
75 | "\t.previous"); | |
121d7bf5 | 76 | extern const unsigned char intelnops[]; |
8b5a10fc JB |
77 | static const unsigned char *const __initconst_or_module |
78 | intel_nops[ASM_NOP_MAX+1] = { | |
9a0b5817 GH |
79 | NULL, |
80 | intelnops, | |
81 | intelnops + 1, | |
82 | intelnops + 1 + 2, | |
83 | intelnops + 1 + 2 + 3, | |
84 | intelnops + 1 + 2 + 3 + 4, | |
85 | intelnops + 1 + 2 + 3 + 4 + 5, | |
86 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | |
87 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
88 | }; | |
d167a518 GH |
89 | #endif |
90 | ||
91 | #ifdef K8_NOP1 | |
8b5a10fc | 92 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: " |
d167a518 | 93 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 |
f4be31ec SR |
94 | K8_NOP7 K8_NOP8 |
95 | "\t.previous"); | |
121d7bf5 | 96 | extern const unsigned char k8nops[]; |
8b5a10fc JB |
97 | static const unsigned char *const __initconst_or_module |
98 | k8_nops[ASM_NOP_MAX+1] = { | |
9a0b5817 GH |
99 | NULL, |
100 | k8nops, | |
101 | k8nops + 1, | |
102 | k8nops + 1 + 2, | |
103 | k8nops + 1 + 2 + 3, | |
104 | k8nops + 1 + 2 + 3 + 4, | |
105 | k8nops + 1 + 2 + 3 + 4 + 5, | |
106 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | |
107 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
108 | }; | |
d167a518 GH |
109 | #endif |
110 | ||
8b5a10fc JB |
111 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
112 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: " | |
d167a518 | 113 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 |
f4be31ec SR |
114 | K7_NOP7 K7_NOP8 |
115 | "\t.previous"); | |
121d7bf5 | 116 | extern const unsigned char k7nops[]; |
8b5a10fc JB |
117 | static const unsigned char *const __initconst_or_module |
118 | k7_nops[ASM_NOP_MAX+1] = { | |
9a0b5817 GH |
119 | NULL, |
120 | k7nops, | |
121 | k7nops + 1, | |
122 | k7nops + 1 + 2, | |
123 | k7nops + 1 + 2 + 3, | |
124 | k7nops + 1 + 2 + 3 + 4, | |
125 | k7nops + 1 + 2 + 3 + 4 + 5, | |
126 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | |
127 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
128 | }; | |
d167a518 GH |
129 | #endif |
130 | ||
32c464f5 | 131 | #ifdef P6_NOP1 |
8b5a10fc | 132 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: " |
32c464f5 | 133 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 |
f4be31ec SR |
134 | P6_NOP7 P6_NOP8 |
135 | "\t.previous"); | |
32c464f5 | 136 | extern const unsigned char p6nops[]; |
8b5a10fc JB |
137 | static const unsigned char *const __initconst_or_module |
138 | p6_nops[ASM_NOP_MAX+1] = { | |
32c464f5 JB |
139 | NULL, |
140 | p6nops, | |
141 | p6nops + 1, | |
142 | p6nops + 1 + 2, | |
143 | p6nops + 1 + 2 + 3, | |
144 | p6nops + 1 + 2 + 3 + 4, | |
145 | p6nops + 1 + 2 + 3 + 4 + 5, | |
146 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, | |
147 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
148 | }; | |
149 | #endif | |
150 | ||
d167a518 GH |
151 | #ifdef CONFIG_X86_64 |
152 | ||
153 | extern char __vsyscall_0; | |
8b5a10fc | 154 | static const unsigned char *const *__init_or_module find_nop_table(void) |
d167a518 | 155 | { |
f31d731e PA |
156 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
157 | boot_cpu_has(X86_FEATURE_NOPL)) | |
158 | return p6_nops; | |
159 | else | |
160 | return k8_nops; | |
d167a518 GH |
161 | } |
162 | ||
163 | #else /* CONFIG_X86_64 */ | |
164 | ||
8b5a10fc | 165 | static const unsigned char *const *__init_or_module find_nop_table(void) |
9a0b5817 | 166 | { |
f31d731e PA |
167 | if (boot_cpu_has(X86_FEATURE_K8)) |
168 | return k8_nops; | |
169 | else if (boot_cpu_has(X86_FEATURE_K7)) | |
170 | return k7_nops; | |
171 | else if (boot_cpu_has(X86_FEATURE_NOPL)) | |
172 | return p6_nops; | |
173 | else | |
174 | return intel_nops; | |
9a0b5817 GH |
175 | } |
176 | ||
d167a518 GH |
177 | #endif /* CONFIG_X86_64 */ |
178 | ||
ab144f5e | 179 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
8b5a10fc | 180 | static void __init_or_module add_nops(void *insns, unsigned int len) |
139ec7c4 | 181 | { |
121d7bf5 | 182 | const unsigned char *const *noptable = find_nop_table(); |
139ec7c4 RR |
183 | |
184 | while (len > 0) { | |
185 | unsigned int noplen = len; | |
186 | if (noplen > ASM_NOP_MAX) | |
187 | noplen = ASM_NOP_MAX; | |
ab144f5e | 188 | memcpy(insns, noptable[noplen], noplen); |
139ec7c4 RR |
189 | insns += noplen; |
190 | len -= noplen; | |
191 | } | |
192 | } | |
193 | ||
d167a518 | 194 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
d167a518 | 195 | extern u8 *__smp_locks[], *__smp_locks_end[]; |
8b5a10fc | 196 | static void *text_poke_early(void *addr, const void *opcode, size_t len); |
d167a518 | 197 | |
9a0b5817 GH |
198 | /* Replace instructions with better alternatives for this CPU type. |
199 | This runs before SMP is initialized to avoid SMP problems with | |
200 | self modifying code. This implies that assymetric systems where | |
201 | APs have less capabilities than the boot processor are not handled. | |
202 | Tough. Make sure you disable such features by hand. */ | |
203 | ||
8b5a10fc JB |
204 | void __init_or_module apply_alternatives(struct alt_instr *start, |
205 | struct alt_instr *end) | |
9a0b5817 | 206 | { |
9a0b5817 | 207 | struct alt_instr *a; |
ab144f5e | 208 | char insnbuf[MAX_PATCH_LEN]; |
9a0b5817 | 209 | |
77bf90ed | 210 | DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); |
9a0b5817 | 211 | for (a = start; a < end; a++) { |
ab144f5e | 212 | u8 *instr = a->instr; |
9a0b5817 | 213 | BUG_ON(a->replacementlen > a->instrlen); |
ab144f5e | 214 | BUG_ON(a->instrlen > sizeof(insnbuf)); |
9a0b5817 GH |
215 | if (!boot_cpu_has(a->cpuid)) |
216 | continue; | |
d167a518 GH |
217 | #ifdef CONFIG_X86_64 |
218 | /* vsyscall code is not mapped yet. resolve it manually. */ | |
219 | if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { | |
220 | instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); | |
221 | DPRINTK("%s: vsyscall fixup: %p => %p\n", | |
77bf90ed | 222 | __func__, a->instr, instr); |
d167a518 GH |
223 | } |
224 | #endif | |
ab144f5e AK |
225 | memcpy(insnbuf, a->replacement, a->replacementlen); |
226 | add_nops(insnbuf + a->replacementlen, | |
227 | a->instrlen - a->replacementlen); | |
e587cadd | 228 | text_poke_early(instr, insnbuf, a->instrlen); |
9a0b5817 GH |
229 | } |
230 | } | |
231 | ||
8ec4d41f GH |
232 | #ifdef CONFIG_SMP |
233 | ||
9a0b5817 GH |
234 | static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) |
235 | { | |
236 | u8 **ptr; | |
237 | ||
3945dab4 | 238 | mutex_lock(&text_mutex); |
9a0b5817 GH |
239 | for (ptr = start; ptr < end; ptr++) { |
240 | if (*ptr < text) | |
241 | continue; | |
242 | if (*ptr > text_end) | |
243 | continue; | |
f88f07e0 MD |
244 | /* turn DS segment override prefix into lock prefix */ |
245 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); | |
9a0b5817 | 246 | }; |
3945dab4 | 247 | mutex_unlock(&text_mutex); |
9a0b5817 GH |
248 | } |
249 | ||
250 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) | |
251 | { | |
9a0b5817 GH |
252 | u8 **ptr; |
253 | ||
b7fb4af0 JF |
254 | if (noreplace_smp) |
255 | return; | |
256 | ||
3945dab4 | 257 | mutex_lock(&text_mutex); |
9a0b5817 GH |
258 | for (ptr = start; ptr < end; ptr++) { |
259 | if (*ptr < text) | |
260 | continue; | |
261 | if (*ptr > text_end) | |
262 | continue; | |
f88f07e0 MD |
263 | /* turn lock prefix into DS segment override prefix */ |
264 | text_poke(*ptr, ((unsigned char []){0x3E}), 1); | |
9a0b5817 | 265 | }; |
3945dab4 | 266 | mutex_unlock(&text_mutex); |
9a0b5817 GH |
267 | } |
268 | ||
269 | struct smp_alt_module { | |
270 | /* what is this ??? */ | |
271 | struct module *mod; | |
272 | char *name; | |
273 | ||
274 | /* ptrs to lock prefixes */ | |
275 | u8 **locks; | |
276 | u8 **locks_end; | |
277 | ||
278 | /* .text segment, needed to avoid patching init code ;) */ | |
279 | u8 *text; | |
280 | u8 *text_end; | |
281 | ||
282 | struct list_head next; | |
283 | }; | |
284 | static LIST_HEAD(smp_alt_modules); | |
2f1dafe5 | 285 | static DEFINE_MUTEX(smp_alt); |
ca74a6f8 | 286 | static int smp_mode = 1; /* protected by smp_alt */ |
9a0b5817 | 287 | |
8b5a10fc JB |
288 | void __init_or_module alternatives_smp_module_add(struct module *mod, |
289 | char *name, | |
290 | void *locks, void *locks_end, | |
291 | void *text, void *text_end) | |
9a0b5817 GH |
292 | { |
293 | struct smp_alt_module *smp; | |
9a0b5817 | 294 | |
b7fb4af0 JF |
295 | if (noreplace_smp) |
296 | return; | |
297 | ||
9a0b5817 GH |
298 | if (smp_alt_once) { |
299 | if (boot_cpu_has(X86_FEATURE_UP)) | |
300 | alternatives_smp_unlock(locks, locks_end, | |
301 | text, text_end); | |
302 | return; | |
303 | } | |
304 | ||
305 | smp = kzalloc(sizeof(*smp), GFP_KERNEL); | |
306 | if (NULL == smp) | |
307 | return; /* we'll run the (safe but slow) SMP code then ... */ | |
308 | ||
309 | smp->mod = mod; | |
310 | smp->name = name; | |
311 | smp->locks = locks; | |
312 | smp->locks_end = locks_end; | |
313 | smp->text = text; | |
314 | smp->text_end = text_end; | |
315 | DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", | |
77bf90ed | 316 | __func__, smp->locks, smp->locks_end, |
9a0b5817 GH |
317 | smp->text, smp->text_end, smp->name); |
318 | ||
2f1dafe5 | 319 | mutex_lock(&smp_alt); |
9a0b5817 GH |
320 | list_add_tail(&smp->next, &smp_alt_modules); |
321 | if (boot_cpu_has(X86_FEATURE_UP)) | |
322 | alternatives_smp_unlock(smp->locks, smp->locks_end, | |
323 | smp->text, smp->text_end); | |
2f1dafe5 | 324 | mutex_unlock(&smp_alt); |
9a0b5817 GH |
325 | } |
326 | ||
8b5a10fc | 327 | void __init_or_module alternatives_smp_module_del(struct module *mod) |
9a0b5817 GH |
328 | { |
329 | struct smp_alt_module *item; | |
9a0b5817 | 330 | |
b7fb4af0 | 331 | if (smp_alt_once || noreplace_smp) |
9a0b5817 GH |
332 | return; |
333 | ||
2f1dafe5 | 334 | mutex_lock(&smp_alt); |
9a0b5817 GH |
335 | list_for_each_entry(item, &smp_alt_modules, next) { |
336 | if (mod != item->mod) | |
337 | continue; | |
338 | list_del(&item->next); | |
2f1dafe5 | 339 | mutex_unlock(&smp_alt); |
77bf90ed | 340 | DPRINTK("%s: %s\n", __func__, item->name); |
9a0b5817 GH |
341 | kfree(item); |
342 | return; | |
343 | } | |
2f1dafe5 | 344 | mutex_unlock(&smp_alt); |
9a0b5817 GH |
345 | } |
346 | ||
347 | void alternatives_smp_switch(int smp) | |
348 | { | |
349 | struct smp_alt_module *mod; | |
9a0b5817 | 350 | |
3047e99e IM |
351 | #ifdef CONFIG_LOCKDEP |
352 | /* | |
17abecfe IM |
353 | * Older binutils section handling bug prevented |
354 | * alternatives-replacement from working reliably. | |
355 | * | |
356 | * If this still occurs then you should see a hang | |
357 | * or crash shortly after this line: | |
3047e99e | 358 | */ |
17abecfe | 359 | printk("lockdep: fixing up alternatives.\n"); |
3047e99e IM |
360 | #endif |
361 | ||
b7fb4af0 | 362 | if (noreplace_smp || smp_alt_once) |
9a0b5817 GH |
363 | return; |
364 | BUG_ON(!smp && (num_online_cpus() > 1)); | |
365 | ||
2f1dafe5 | 366 | mutex_lock(&smp_alt); |
ca74a6f8 AK |
367 | |
368 | /* | |
369 | * Avoid unnecessary switches because it forces JIT based VMs to | |
370 | * throw away all cached translations, which can be quite costly. | |
371 | */ | |
372 | if (smp == smp_mode) { | |
373 | /* nothing */ | |
374 | } else if (smp) { | |
9a0b5817 | 375 | printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); |
53756d37 JF |
376 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
377 | clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); | |
9a0b5817 GH |
378 | list_for_each_entry(mod, &smp_alt_modules, next) |
379 | alternatives_smp_lock(mod->locks, mod->locks_end, | |
380 | mod->text, mod->text_end); | |
381 | } else { | |
382 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | |
53756d37 JF |
383 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
384 | set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); | |
9a0b5817 GH |
385 | list_for_each_entry(mod, &smp_alt_modules, next) |
386 | alternatives_smp_unlock(mod->locks, mod->locks_end, | |
387 | mod->text, mod->text_end); | |
388 | } | |
ca74a6f8 | 389 | smp_mode = smp; |
2f1dafe5 | 390 | mutex_unlock(&smp_alt); |
9a0b5817 GH |
391 | } |
392 | ||
8ec4d41f GH |
393 | #endif |
394 | ||
139ec7c4 | 395 | #ifdef CONFIG_PARAVIRT |
8b5a10fc JB |
396 | void __init_or_module apply_paravirt(struct paravirt_patch_site *start, |
397 | struct paravirt_patch_site *end) | |
139ec7c4 | 398 | { |
98de032b | 399 | struct paravirt_patch_site *p; |
ab144f5e | 400 | char insnbuf[MAX_PATCH_LEN]; |
139ec7c4 | 401 | |
959b4fdf JF |
402 | if (noreplace_paravirt) |
403 | return; | |
404 | ||
139ec7c4 RR |
405 | for (p = start; p < end; p++) { |
406 | unsigned int used; | |
407 | ||
ab144f5e | 408 | BUG_ON(p->len > MAX_PATCH_LEN); |
d34fda4a CW |
409 | /* prep the buffer with the original instructions */ |
410 | memcpy(insnbuf, p->instr, p->len); | |
93b1eab3 JF |
411 | used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, |
412 | (unsigned long)p->instr, p->len); | |
7f63c41c | 413 | |
63f70270 JF |
414 | BUG_ON(used > p->len); |
415 | ||
139ec7c4 | 416 | /* Pad the rest with nops */ |
ab144f5e | 417 | add_nops(insnbuf + used, p->len - used); |
e587cadd | 418 | text_poke_early(p->instr, insnbuf, p->len); |
139ec7c4 | 419 | } |
139ec7c4 | 420 | } |
98de032b | 421 | extern struct paravirt_patch_site __start_parainstructions[], |
139ec7c4 RR |
422 | __stop_parainstructions[]; |
423 | #endif /* CONFIG_PARAVIRT */ | |
424 | ||
9a0b5817 GH |
425 | void __init alternative_instructions(void) |
426 | { | |
8f4e956b AK |
427 | /* The patching is not fully atomic, so try to avoid local interruptions |
428 | that might execute the to be patched code. | |
429 | Other CPUs are not running. */ | |
430 | stop_nmi(); | |
123aa76e AK |
431 | |
432 | /* | |
433 | * Don't stop machine check exceptions while patching. | |
434 | * MCEs only happen when something got corrupted and in this | |
435 | * case we must do something about the corruption. | |
436 | * Ignoring it is worse than a unlikely patching race. | |
437 | * Also machine checks tend to be broadcast and if one CPU | |
438 | * goes into machine check the others follow quickly, so we don't | |
439 | * expect a machine check to cause undue problems during to code | |
440 | * patching. | |
441 | */ | |
8f4e956b | 442 | |
9a0b5817 GH |
443 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
444 | ||
445 | /* switch to patch-once-at-boottime-only mode and free the | |
446 | * tables in case we know the number of CPUs will never ever | |
447 | * change */ | |
448 | #ifdef CONFIG_HOTPLUG_CPU | |
449 | if (num_possible_cpus() < 2) | |
450 | smp_alt_once = 1; | |
9a0b5817 GH |
451 | #endif |
452 | ||
8ec4d41f | 453 | #ifdef CONFIG_SMP |
9a0b5817 GH |
454 | if (smp_alt_once) { |
455 | if (1 == num_possible_cpus()) { | |
456 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | |
53756d37 JF |
457 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
458 | set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); | |
459 | ||
9a0b5817 GH |
460 | alternatives_smp_unlock(__smp_locks, __smp_locks_end, |
461 | _text, _etext); | |
462 | } | |
9a0b5817 | 463 | } else { |
9a0b5817 GH |
464 | alternatives_smp_module_add(NULL, "core kernel", |
465 | __smp_locks, __smp_locks_end, | |
466 | _text, _etext); | |
ca74a6f8 AK |
467 | |
468 | /* Only switch to UP mode if we don't immediately boot others */ | |
649c6653 | 469 | if (num_present_cpus() == 1 || setup_max_cpus <= 1) |
ca74a6f8 | 470 | alternatives_smp_switch(0); |
9a0b5817 | 471 | } |
8ec4d41f | 472 | #endif |
441d40dc | 473 | apply_paravirt(__parainstructions, __parainstructions_end); |
8f4e956b | 474 | |
f68fd5f4 FW |
475 | if (smp_alt_once) |
476 | free_init_pages("SMP alternatives", | |
477 | (unsigned long)__smp_locks, | |
478 | (unsigned long)__smp_locks_end); | |
479 | ||
8f4e956b | 480 | restart_nmi(); |
9a0b5817 | 481 | } |
19d36ccd | 482 | |
e587cadd MD |
483 | /** |
484 | * text_poke_early - Update instructions on a live kernel at boot time | |
485 | * @addr: address to modify | |
486 | * @opcode: source of the copy | |
487 | * @len: length to copy | |
488 | * | |
19d36ccd AK |
489 | * When you use this code to patch more than one byte of an instruction |
490 | * you need to make sure that other CPUs cannot execute this code in parallel. | |
e587cadd MD |
491 | * Also no thread must be currently preempted in the middle of these |
492 | * instructions. And on the local CPU you need to be protected again NMI or MCE | |
493 | * handlers seeing an inconsistent instruction while you patch. | |
19d36ccd | 494 | */ |
8b5a10fc JB |
495 | static void *__init_or_module text_poke_early(void *addr, const void *opcode, |
496 | size_t len) | |
19d36ccd | 497 | { |
e587cadd MD |
498 | unsigned long flags; |
499 | local_irq_save(flags); | |
19d36ccd | 500 | memcpy(addr, opcode, len); |
e587cadd | 501 | sync_core(); |
5367b688 | 502 | local_irq_restore(flags); |
e587cadd MD |
503 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
504 | that causes hangs on some VIA CPUs. */ | |
505 | return addr; | |
506 | } | |
507 | ||
508 | /** | |
509 | * text_poke - Update instructions on a live kernel | |
510 | * @addr: address to modify | |
511 | * @opcode: source of the copy | |
512 | * @len: length to copy | |
513 | * | |
514 | * Only atomic text poke/set should be allowed when not doing early patching. | |
515 | * It means the size must be writable atomically and the address must be aligned | |
516 | * in a way that permits an atomic write. It also makes sure we fit on a single | |
517 | * page. | |
78ff7fae MH |
518 | * |
519 | * Note: Must be called under text_mutex. | |
e587cadd MD |
520 | */ |
521 | void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | |
522 | { | |
78ff7fae | 523 | unsigned long flags; |
e587cadd | 524 | char *vaddr; |
b7b66baa MD |
525 | struct page *pages[2]; |
526 | int i; | |
e587cadd | 527 | |
b7b66baa MD |
528 | if (!core_kernel_text((unsigned long)addr)) { |
529 | pages[0] = vmalloc_to_page(addr); | |
530 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); | |
15a601eb | 531 | } else { |
b7b66baa | 532 | pages[0] = virt_to_page(addr); |
00c6b2d5 | 533 | WARN_ON(!PageReserved(pages[0])); |
b7b66baa | 534 | pages[1] = virt_to_page(addr + PAGE_SIZE); |
e587cadd | 535 | } |
b7b66baa | 536 | BUG_ON(!pages[0]); |
7cf49427 | 537 | local_irq_save(flags); |
78ff7fae MH |
538 | set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); |
539 | if (pages[1]) | |
540 | set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); | |
541 | vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); | |
b7b66baa | 542 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); |
78ff7fae MH |
543 | clear_fixmap(FIX_TEXT_POKE0); |
544 | if (pages[1]) | |
545 | clear_fixmap(FIX_TEXT_POKE1); | |
546 | local_flush_tlb(); | |
19d36ccd | 547 | sync_core(); |
a534b679 AK |
548 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
549 | that causes hangs on some VIA CPUs. */ | |
b7b66baa MD |
550 | for (i = 0; i < len; i++) |
551 | BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); | |
7cf49427 | 552 | local_irq_restore(flags); |
e587cadd | 553 | return addr; |
19d36ccd | 554 | } |