]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/alternative.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / alternative.c
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
7 #include <linux/mm.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
15 #include <asm/mce.h>
16 #include <asm/nmi.h>
17 #include <asm/vsyscall.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/io.h>
21 #include <asm/fixmap.h>
22
23 #define MAX_PATCH_LEN (255-1)
24
25 #ifdef CONFIG_HOTPLUG_CPU
26 static int smp_alt_once;
27
28 static int __init bootonly(char *str)
29 {
30 smp_alt_once = 1;
31 return 1;
32 }
33 __setup("smp-alt-boot", bootonly);
34 #else
35 #define smp_alt_once 1
36 #endif
37
38 static int __initdata_or_module debug_alternative;
39
40 static int __init debug_alt(char *str)
41 {
42 debug_alternative = 1;
43 return 1;
44 }
45 __setup("debug-alternative", debug_alt);
46
47 static int noreplace_smp;
48
49 static int __init setup_noreplace_smp(char *str)
50 {
51 noreplace_smp = 1;
52 return 1;
53 }
54 __setup("noreplace-smp", setup_noreplace_smp);
55
56 #ifdef CONFIG_PARAVIRT
57 static int __initdata_or_module noreplace_paravirt = 0;
58
59 static int __init setup_noreplace_paravirt(char *str)
60 {
61 noreplace_paravirt = 1;
62 return 1;
63 }
64 __setup("noreplace-paravirt", setup_noreplace_paravirt);
65 #endif
66
67 #define DPRINTK(fmt, args...) if (debug_alternative) \
68 printk(KERN_DEBUG fmt, args)
69
70 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
71 /* Use inline assembly to define this because the nops are defined
72 as inline assembly strings in the include files and we cannot
73 get them easily into strings. */
74 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
75 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
76 GENERIC_NOP7 GENERIC_NOP8
77 "\t.previous");
78 extern const unsigned char intelnops[];
79 static const unsigned char *const __initconst_or_module
80 intel_nops[ASM_NOP_MAX+1] = {
81 NULL,
82 intelnops,
83 intelnops + 1,
84 intelnops + 1 + 2,
85 intelnops + 1 + 2 + 3,
86 intelnops + 1 + 2 + 3 + 4,
87 intelnops + 1 + 2 + 3 + 4 + 5,
88 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
89 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
90 };
91 #endif
92
93 #ifdef K8_NOP1
94 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
95 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
96 K8_NOP7 K8_NOP8
97 "\t.previous");
98 extern const unsigned char k8nops[];
99 static const unsigned char *const __initconst_or_module
100 k8_nops[ASM_NOP_MAX+1] = {
101 NULL,
102 k8nops,
103 k8nops + 1,
104 k8nops + 1 + 2,
105 k8nops + 1 + 2 + 3,
106 k8nops + 1 + 2 + 3 + 4,
107 k8nops + 1 + 2 + 3 + 4 + 5,
108 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
109 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
110 };
111 #endif
112
113 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
114 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
115 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
116 K7_NOP7 K7_NOP8
117 "\t.previous");
118 extern const unsigned char k7nops[];
119 static const unsigned char *const __initconst_or_module
120 k7_nops[ASM_NOP_MAX+1] = {
121 NULL,
122 k7nops,
123 k7nops + 1,
124 k7nops + 1 + 2,
125 k7nops + 1 + 2 + 3,
126 k7nops + 1 + 2 + 3 + 4,
127 k7nops + 1 + 2 + 3 + 4 + 5,
128 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
129 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
130 };
131 #endif
132
133 #ifdef P6_NOP1
134 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
135 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
136 P6_NOP7 P6_NOP8
137 "\t.previous");
138 extern const unsigned char p6nops[];
139 static const unsigned char *const __initconst_or_module
140 p6_nops[ASM_NOP_MAX+1] = {
141 NULL,
142 p6nops,
143 p6nops + 1,
144 p6nops + 1 + 2,
145 p6nops + 1 + 2 + 3,
146 p6nops + 1 + 2 + 3 + 4,
147 p6nops + 1 + 2 + 3 + 4 + 5,
148 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
149 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
150 };
151 #endif
152
153 #ifdef CONFIG_X86_64
154
155 extern char __vsyscall_0;
156 static const unsigned char *const *__init_or_module find_nop_table(void)
157 {
158 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
159 boot_cpu_has(X86_FEATURE_NOPL))
160 return p6_nops;
161 else
162 return k8_nops;
163 }
164
165 #else /* CONFIG_X86_64 */
166
167 static const unsigned char *const *__init_or_module find_nop_table(void)
168 {
169 if (boot_cpu_has(X86_FEATURE_K8))
170 return k8_nops;
171 else if (boot_cpu_has(X86_FEATURE_K7))
172 return k7_nops;
173 else if (boot_cpu_has(X86_FEATURE_NOPL))
174 return p6_nops;
175 else
176 return intel_nops;
177 }
178
179 #endif /* CONFIG_X86_64 */
180
181 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
182 static void __init_or_module add_nops(void *insns, unsigned int len)
183 {
184 const unsigned char *const *noptable = find_nop_table();
185
186 while (len > 0) {
187 unsigned int noplen = len;
188 if (noplen > ASM_NOP_MAX)
189 noplen = ASM_NOP_MAX;
190 memcpy(insns, noptable[noplen], noplen);
191 insns += noplen;
192 len -= noplen;
193 }
194 }
195
196 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
197 extern u8 *__smp_locks[], *__smp_locks_end[];
198 static void *text_poke_early(void *addr, const void *opcode, size_t len);
199
200 /* Replace instructions with better alternatives for this CPU type.
201 This runs before SMP is initialized to avoid SMP problems with
202 self modifying code. This implies that assymetric systems where
203 APs have less capabilities than the boot processor are not handled.
204 Tough. Make sure you disable such features by hand. */
205
206 void __init_or_module apply_alternatives(struct alt_instr *start,
207 struct alt_instr *end)
208 {
209 struct alt_instr *a;
210 u8 insnbuf[MAX_PATCH_LEN];
211
212 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
213 for (a = start; a < end; a++) {
214 u8 *instr = a->instr;
215 BUG_ON(a->replacementlen > a->instrlen);
216 BUG_ON(a->instrlen > sizeof(insnbuf));
217 if (!boot_cpu_has(a->cpuid))
218 continue;
219 #ifdef CONFIG_X86_64
220 /* vsyscall code is not mapped yet. resolve it manually. */
221 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
222 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
223 DPRINTK("%s: vsyscall fixup: %p => %p\n",
224 __func__, a->instr, instr);
225 }
226 #endif
227 memcpy(insnbuf, a->replacement, a->replacementlen);
228 if (*insnbuf == 0xe8 && a->replacementlen == 5)
229 *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
230 add_nops(insnbuf + a->replacementlen,
231 a->instrlen - a->replacementlen);
232 text_poke_early(instr, insnbuf, a->instrlen);
233 }
234 }
235
236 #ifdef CONFIG_SMP
237
238 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
239 {
240 u8 **ptr;
241
242 mutex_lock(&text_mutex);
243 for (ptr = start; ptr < end; ptr++) {
244 if (*ptr < text)
245 continue;
246 if (*ptr > text_end)
247 continue;
248 /* turn DS segment override prefix into lock prefix */
249 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
250 };
251 mutex_unlock(&text_mutex);
252 }
253
254 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
255 {
256 u8 **ptr;
257
258 if (noreplace_smp)
259 return;
260
261 mutex_lock(&text_mutex);
262 for (ptr = start; ptr < end; ptr++) {
263 if (*ptr < text)
264 continue;
265 if (*ptr > text_end)
266 continue;
267 /* turn lock prefix into DS segment override prefix */
268 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
269 };
270 mutex_unlock(&text_mutex);
271 }
272
273 struct smp_alt_module {
274 /* what is this ??? */
275 struct module *mod;
276 char *name;
277
278 /* ptrs to lock prefixes */
279 u8 **locks;
280 u8 **locks_end;
281
282 /* .text segment, needed to avoid patching init code ;) */
283 u8 *text;
284 u8 *text_end;
285
286 struct list_head next;
287 };
288 static LIST_HEAD(smp_alt_modules);
289 static DEFINE_MUTEX(smp_alt);
290 static int smp_mode = 1; /* protected by smp_alt */
291
292 void __init_or_module alternatives_smp_module_add(struct module *mod,
293 char *name,
294 void *locks, void *locks_end,
295 void *text, void *text_end)
296 {
297 struct smp_alt_module *smp;
298
299 if (noreplace_smp)
300 return;
301
302 if (smp_alt_once) {
303 if (boot_cpu_has(X86_FEATURE_UP))
304 alternatives_smp_unlock(locks, locks_end,
305 text, text_end);
306 return;
307 }
308
309 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
310 if (NULL == smp)
311 return; /* we'll run the (safe but slow) SMP code then ... */
312
313 smp->mod = mod;
314 smp->name = name;
315 smp->locks = locks;
316 smp->locks_end = locks_end;
317 smp->text = text;
318 smp->text_end = text_end;
319 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
320 __func__, smp->locks, smp->locks_end,
321 smp->text, smp->text_end, smp->name);
322
323 mutex_lock(&smp_alt);
324 list_add_tail(&smp->next, &smp_alt_modules);
325 if (boot_cpu_has(X86_FEATURE_UP))
326 alternatives_smp_unlock(smp->locks, smp->locks_end,
327 smp->text, smp->text_end);
328 mutex_unlock(&smp_alt);
329 }
330
331 void __init_or_module alternatives_smp_module_del(struct module *mod)
332 {
333 struct smp_alt_module *item;
334
335 if (smp_alt_once || noreplace_smp)
336 return;
337
338 mutex_lock(&smp_alt);
339 list_for_each_entry(item, &smp_alt_modules, next) {
340 if (mod != item->mod)
341 continue;
342 list_del(&item->next);
343 mutex_unlock(&smp_alt);
344 DPRINTK("%s: %s\n", __func__, item->name);
345 kfree(item);
346 return;
347 }
348 mutex_unlock(&smp_alt);
349 }
350
351 void alternatives_smp_switch(int smp)
352 {
353 struct smp_alt_module *mod;
354
355 #ifdef CONFIG_LOCKDEP
356 /*
357 * Older binutils section handling bug prevented
358 * alternatives-replacement from working reliably.
359 *
360 * If this still occurs then you should see a hang
361 * or crash shortly after this line:
362 */
363 printk("lockdep: fixing up alternatives.\n");
364 #endif
365
366 if (noreplace_smp || smp_alt_once)
367 return;
368 BUG_ON(!smp && (num_online_cpus() > 1));
369
370 mutex_lock(&smp_alt);
371
372 /*
373 * Avoid unnecessary switches because it forces JIT based VMs to
374 * throw away all cached translations, which can be quite costly.
375 */
376 if (smp == smp_mode) {
377 /* nothing */
378 } else if (smp) {
379 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
380 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
381 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
382 list_for_each_entry(mod, &smp_alt_modules, next)
383 alternatives_smp_lock(mod->locks, mod->locks_end,
384 mod->text, mod->text_end);
385 } else {
386 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
387 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
388 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
389 list_for_each_entry(mod, &smp_alt_modules, next)
390 alternatives_smp_unlock(mod->locks, mod->locks_end,
391 mod->text, mod->text_end);
392 }
393 smp_mode = smp;
394 mutex_unlock(&smp_alt);
395 }
396
397 /* Return 1 if the address range is reserved for smp-alternatives */
398 int alternatives_text_reserved(void *start, void *end)
399 {
400 struct smp_alt_module *mod;
401 u8 **ptr;
402 u8 *text_start = start;
403 u8 *text_end = end;
404
405 list_for_each_entry(mod, &smp_alt_modules, next) {
406 if (mod->text > text_end || mod->text_end < text_start)
407 continue;
408 for (ptr = mod->locks; ptr < mod->locks_end; ptr++)
409 if (text_start <= *ptr && text_end >= *ptr)
410 return 1;
411 }
412
413 return 0;
414 }
415 #endif
416
417 #ifdef CONFIG_PARAVIRT
418 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
419 struct paravirt_patch_site *end)
420 {
421 struct paravirt_patch_site *p;
422 char insnbuf[MAX_PATCH_LEN];
423
424 if (noreplace_paravirt)
425 return;
426
427 for (p = start; p < end; p++) {
428 unsigned int used;
429
430 BUG_ON(p->len > MAX_PATCH_LEN);
431 /* prep the buffer with the original instructions */
432 memcpy(insnbuf, p->instr, p->len);
433 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
434 (unsigned long)p->instr, p->len);
435
436 BUG_ON(used > p->len);
437
438 /* Pad the rest with nops */
439 add_nops(insnbuf + used, p->len - used);
440 text_poke_early(p->instr, insnbuf, p->len);
441 }
442 }
443 extern struct paravirt_patch_site __start_parainstructions[],
444 __stop_parainstructions[];
445 #endif /* CONFIG_PARAVIRT */
446
447 void __init alternative_instructions(void)
448 {
449 /* The patching is not fully atomic, so try to avoid local interruptions
450 that might execute the to be patched code.
451 Other CPUs are not running. */
452 stop_nmi();
453
454 /*
455 * Don't stop machine check exceptions while patching.
456 * MCEs only happen when something got corrupted and in this
457 * case we must do something about the corruption.
458 * Ignoring it is worse than a unlikely patching race.
459 * Also machine checks tend to be broadcast and if one CPU
460 * goes into machine check the others follow quickly, so we don't
461 * expect a machine check to cause undue problems during to code
462 * patching.
463 */
464
465 apply_alternatives(__alt_instructions, __alt_instructions_end);
466
467 /* switch to patch-once-at-boottime-only mode and free the
468 * tables in case we know the number of CPUs will never ever
469 * change */
470 #ifdef CONFIG_HOTPLUG_CPU
471 if (num_possible_cpus() < 2)
472 smp_alt_once = 1;
473 #endif
474
475 #ifdef CONFIG_SMP
476 if (smp_alt_once) {
477 if (1 == num_possible_cpus()) {
478 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
479 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
480 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
481
482 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
483 _text, _etext);
484 }
485 } else {
486 alternatives_smp_module_add(NULL, "core kernel",
487 __smp_locks, __smp_locks_end,
488 _text, _etext);
489
490 /* Only switch to UP mode if we don't immediately boot others */
491 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
492 alternatives_smp_switch(0);
493 }
494 #endif
495 apply_paravirt(__parainstructions, __parainstructions_end);
496
497 if (smp_alt_once)
498 free_init_pages("SMP alternatives",
499 (unsigned long)__smp_locks,
500 (unsigned long)__smp_locks_end);
501
502 restart_nmi();
503 }
504
505 /**
506 * text_poke_early - Update instructions on a live kernel at boot time
507 * @addr: address to modify
508 * @opcode: source of the copy
509 * @len: length to copy
510 *
511 * When you use this code to patch more than one byte of an instruction
512 * you need to make sure that other CPUs cannot execute this code in parallel.
513 * Also no thread must be currently preempted in the middle of these
514 * instructions. And on the local CPU you need to be protected again NMI or MCE
515 * handlers seeing an inconsistent instruction while you patch.
516 */
517 static void *__init_or_module text_poke_early(void *addr, const void *opcode,
518 size_t len)
519 {
520 unsigned long flags;
521 local_irq_save(flags);
522 memcpy(addr, opcode, len);
523 sync_core();
524 local_irq_restore(flags);
525 /* Could also do a CLFLUSH here to speed up CPU recovery; but
526 that causes hangs on some VIA CPUs. */
527 return addr;
528 }
529
530 /**
531 * text_poke - Update instructions on a live kernel
532 * @addr: address to modify
533 * @opcode: source of the copy
534 * @len: length to copy
535 *
536 * Only atomic text poke/set should be allowed when not doing early patching.
537 * It means the size must be writable atomically and the address must be aligned
538 * in a way that permits an atomic write. It also makes sure we fit on a single
539 * page.
540 *
541 * Note: Must be called under text_mutex.
542 */
543 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
544 {
545 unsigned long flags;
546 char *vaddr;
547 struct page *pages[2];
548 int i;
549
550 if (!core_kernel_text((unsigned long)addr)) {
551 pages[0] = vmalloc_to_page(addr);
552 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
553 } else {
554 pages[0] = virt_to_page(addr);
555 WARN_ON(!PageReserved(pages[0]));
556 pages[1] = virt_to_page(addr + PAGE_SIZE);
557 }
558 BUG_ON(!pages[0]);
559 local_irq_save(flags);
560 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
561 if (pages[1])
562 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
563 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
564 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
565 clear_fixmap(FIX_TEXT_POKE0);
566 if (pages[1])
567 clear_fixmap(FIX_TEXT_POKE1);
568 local_flush_tlb();
569 sync_core();
570 /* Could also do a CLFLUSH here to speed up CPU recovery; but
571 that causes hangs on some VIA CPUs. */
572 for (i = 0; i < len; i++)
573 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
574 local_irq_restore(flags);
575 return addr;
576 }
577
578 /*
579 * Cross-modifying kernel text with stop_machine().
580 * This code originally comes from immediate value.
581 */
582 static atomic_t stop_machine_first;
583 static int wrote_text;
584
585 struct text_poke_params {
586 void *addr;
587 const void *opcode;
588 size_t len;
589 };
590
591 static int __kprobes stop_machine_text_poke(void *data)
592 {
593 struct text_poke_params *tpp = data;
594
595 if (atomic_dec_and_test(&stop_machine_first)) {
596 text_poke(tpp->addr, tpp->opcode, tpp->len);
597 smp_wmb(); /* Make sure other cpus see that this has run */
598 wrote_text = 1;
599 } else {
600 while (!wrote_text)
601 cpu_relax();
602 smp_mb(); /* Load wrote_text before following execution */
603 }
604
605 flush_icache_range((unsigned long)tpp->addr,
606 (unsigned long)tpp->addr + tpp->len);
607 return 0;
608 }
609
610 /**
611 * text_poke_smp - Update instructions on a live kernel on SMP
612 * @addr: address to modify
613 * @opcode: source of the copy
614 * @len: length to copy
615 *
616 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
617 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
618 * should be allowed, since stop_machine() does _not_ protect code against
619 * NMI and MCE.
620 *
621 * Note: Must be called under get_online_cpus() and text_mutex.
622 */
623 void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
624 {
625 struct text_poke_params tpp;
626
627 tpp.addr = addr;
628 tpp.opcode = opcode;
629 tpp.len = len;
630 atomic_set(&stop_machine_first, 1);
631 wrote_text = 0;
632 stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
633 return addr;
634 }
635