]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/i386/kernel/alternative.c
[PATCH] x86: Remove noreplacement option
[mirror_ubuntu-jammy-kernel.git] / arch / i386 / kernel / alternative.c
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
7
8 static int smp_alt_once = 0;
9 static int debug_alternative = 0;
10
11 static int __init bootonly(char *str)
12 {
13 smp_alt_once = 1;
14 return 1;
15 }
16 static int __init debug_alt(char *str)
17 {
18 debug_alternative = 1;
19 return 1;
20 }
21
22 __setup("smp-alt-boot", bootonly);
23 __setup("debug-alternative", debug_alt);
24
25 #define DPRINTK(fmt, args...) if (debug_alternative) \
26 printk(KERN_DEBUG fmt, args)
27
28 #ifdef GENERIC_NOP1
29 /* Use inline assembly to define this because the nops are defined
30 as inline assembly strings in the include files and we cannot
31 get them easily into strings. */
32 asm("\t.data\nintelnops: "
33 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
34 GENERIC_NOP7 GENERIC_NOP8);
35 extern unsigned char intelnops[];
36 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
37 NULL,
38 intelnops,
39 intelnops + 1,
40 intelnops + 1 + 2,
41 intelnops + 1 + 2 + 3,
42 intelnops + 1 + 2 + 3 + 4,
43 intelnops + 1 + 2 + 3 + 4 + 5,
44 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
45 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
46 };
47 #endif
48
49 #ifdef K8_NOP1
50 asm("\t.data\nk8nops: "
51 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
52 K8_NOP7 K8_NOP8);
53 extern unsigned char k8nops[];
54 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
55 NULL,
56 k8nops,
57 k8nops + 1,
58 k8nops + 1 + 2,
59 k8nops + 1 + 2 + 3,
60 k8nops + 1 + 2 + 3 + 4,
61 k8nops + 1 + 2 + 3 + 4 + 5,
62 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
63 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
64 };
65 #endif
66
67 #ifdef K7_NOP1
68 asm("\t.data\nk7nops: "
69 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
70 K7_NOP7 K7_NOP8);
71 extern unsigned char k7nops[];
72 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
73 NULL,
74 k7nops,
75 k7nops + 1,
76 k7nops + 1 + 2,
77 k7nops + 1 + 2 + 3,
78 k7nops + 1 + 2 + 3 + 4,
79 k7nops + 1 + 2 + 3 + 4 + 5,
80 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
81 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
82 };
83 #endif
84
85 #ifdef CONFIG_X86_64
86
87 extern char __vsyscall_0;
88 static inline unsigned char** find_nop_table(void)
89 {
90 return k8_nops;
91 }
92
93 #else /* CONFIG_X86_64 */
94
95 static struct nop {
96 int cpuid;
97 unsigned char **noptable;
98 } noptypes[] = {
99 { X86_FEATURE_K8, k8_nops },
100 { X86_FEATURE_K7, k7_nops },
101 { -1, NULL }
102 };
103
104 static unsigned char** find_nop_table(void)
105 {
106 unsigned char **noptable = intel_nops;
107 int i;
108
109 for (i = 0; noptypes[i].cpuid >= 0; i++) {
110 if (boot_cpu_has(noptypes[i].cpuid)) {
111 noptable = noptypes[i].noptable;
112 break;
113 }
114 }
115 return noptable;
116 }
117
118 #endif /* CONFIG_X86_64 */
119
120 static void nop_out(void *insns, unsigned int len)
121 {
122 unsigned char **noptable = find_nop_table();
123
124 while (len > 0) {
125 unsigned int noplen = len;
126 if (noplen > ASM_NOP_MAX)
127 noplen = ASM_NOP_MAX;
128 memcpy(insns, noptable[noplen], noplen);
129 insns += noplen;
130 len -= noplen;
131 }
132 }
133
134 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
135 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
136 extern u8 *__smp_locks[], *__smp_locks_end[];
137
138 extern u8 __smp_alt_begin[], __smp_alt_end[];
139
140 /* Replace instructions with better alternatives for this CPU type.
141 This runs before SMP is initialized to avoid SMP problems with
142 self modifying code. This implies that assymetric systems where
143 APs have less capabilities than the boot processor are not handled.
144 Tough. Make sure you disable such features by hand. */
145
146 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
147 {
148 struct alt_instr *a;
149 u8 *instr;
150 int diff;
151
152 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
153 for (a = start; a < end; a++) {
154 BUG_ON(a->replacementlen > a->instrlen);
155 if (!boot_cpu_has(a->cpuid))
156 continue;
157 instr = a->instr;
158 #ifdef CONFIG_X86_64
159 /* vsyscall code is not mapped yet. resolve it manually. */
160 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
161 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
162 DPRINTK("%s: vsyscall fixup: %p => %p\n",
163 __FUNCTION__, a->instr, instr);
164 }
165 #endif
166 memcpy(instr, a->replacement, a->replacementlen);
167 diff = a->instrlen - a->replacementlen;
168 nop_out(instr + a->replacementlen, diff);
169 }
170 }
171
172 #ifdef CONFIG_SMP
173
174 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
175 {
176 struct alt_instr *a;
177
178 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
179 for (a = start; a < end; a++) {
180 memcpy(a->replacement + a->replacementlen,
181 a->instr,
182 a->instrlen);
183 }
184 }
185
186 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
187 {
188 struct alt_instr *a;
189
190 for (a = start; a < end; a++) {
191 memcpy(a->instr,
192 a->replacement + a->replacementlen,
193 a->instrlen);
194 }
195 }
196
197 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
198 {
199 u8 **ptr;
200
201 for (ptr = start; ptr < end; ptr++) {
202 if (*ptr < text)
203 continue;
204 if (*ptr > text_end)
205 continue;
206 **ptr = 0xf0; /* lock prefix */
207 };
208 }
209
210 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
211 {
212 u8 **ptr;
213
214 for (ptr = start; ptr < end; ptr++) {
215 if (*ptr < text)
216 continue;
217 if (*ptr > text_end)
218 continue;
219 nop_out(*ptr, 1);
220 };
221 }
222
223 struct smp_alt_module {
224 /* what is this ??? */
225 struct module *mod;
226 char *name;
227
228 /* ptrs to lock prefixes */
229 u8 **locks;
230 u8 **locks_end;
231
232 /* .text segment, needed to avoid patching init code ;) */
233 u8 *text;
234 u8 *text_end;
235
236 struct list_head next;
237 };
238 static LIST_HEAD(smp_alt_modules);
239 static DEFINE_SPINLOCK(smp_alt);
240
241 void alternatives_smp_module_add(struct module *mod, char *name,
242 void *locks, void *locks_end,
243 void *text, void *text_end)
244 {
245 struct smp_alt_module *smp;
246 unsigned long flags;
247
248 if (smp_alt_once) {
249 if (boot_cpu_has(X86_FEATURE_UP))
250 alternatives_smp_unlock(locks, locks_end,
251 text, text_end);
252 return;
253 }
254
255 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
256 if (NULL == smp)
257 return; /* we'll run the (safe but slow) SMP code then ... */
258
259 smp->mod = mod;
260 smp->name = name;
261 smp->locks = locks;
262 smp->locks_end = locks_end;
263 smp->text = text;
264 smp->text_end = text_end;
265 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
266 __FUNCTION__, smp->locks, smp->locks_end,
267 smp->text, smp->text_end, smp->name);
268
269 spin_lock_irqsave(&smp_alt, flags);
270 list_add_tail(&smp->next, &smp_alt_modules);
271 if (boot_cpu_has(X86_FEATURE_UP))
272 alternatives_smp_unlock(smp->locks, smp->locks_end,
273 smp->text, smp->text_end);
274 spin_unlock_irqrestore(&smp_alt, flags);
275 }
276
277 void alternatives_smp_module_del(struct module *mod)
278 {
279 struct smp_alt_module *item;
280 unsigned long flags;
281
282 if (smp_alt_once)
283 return;
284
285 spin_lock_irqsave(&smp_alt, flags);
286 list_for_each_entry(item, &smp_alt_modules, next) {
287 if (mod != item->mod)
288 continue;
289 list_del(&item->next);
290 spin_unlock_irqrestore(&smp_alt, flags);
291 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
292 kfree(item);
293 return;
294 }
295 spin_unlock_irqrestore(&smp_alt, flags);
296 }
297
298 void alternatives_smp_switch(int smp)
299 {
300 struct smp_alt_module *mod;
301 unsigned long flags;
302
303 #ifdef CONFIG_LOCKDEP
304 /*
305 * A not yet fixed binutils section handling bug prevents
306 * alternatives-replacement from working reliably, so turn
307 * it off:
308 */
309 printk("lockdep: not fixing up alternatives.\n");
310 return;
311 #endif
312
313 if (smp_alt_once)
314 return;
315 BUG_ON(!smp && (num_online_cpus() > 1));
316
317 spin_lock_irqsave(&smp_alt, flags);
318 if (smp) {
319 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
320 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
321 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
322 alternatives_smp_apply(__smp_alt_instructions,
323 __smp_alt_instructions_end);
324 list_for_each_entry(mod, &smp_alt_modules, next)
325 alternatives_smp_lock(mod->locks, mod->locks_end,
326 mod->text, mod->text_end);
327 } else {
328 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
329 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
330 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
331 apply_alternatives(__smp_alt_instructions,
332 __smp_alt_instructions_end);
333 list_for_each_entry(mod, &smp_alt_modules, next)
334 alternatives_smp_unlock(mod->locks, mod->locks_end,
335 mod->text, mod->text_end);
336 }
337 spin_unlock_irqrestore(&smp_alt, flags);
338 }
339
340 #endif
341
342 #ifdef CONFIG_PARAVIRT
343 void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
344 {
345 struct paravirt_patch *p;
346
347 for (p = start; p < end; p++) {
348 unsigned int used;
349
350 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
351 p->len);
352 #ifdef CONFIG_DEBUG_PARAVIRT
353 {
354 int i;
355 /* Deliberately clobber regs using "not %reg" to find bugs. */
356 for (i = 0; i < 3; i++) {
357 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
358 memcpy(p->instr + used, "\xf7\xd0", 2);
359 p->instr[used+1] |= i;
360 used += 2;
361 }
362 }
363 }
364 #endif
365 /* Pad the rest with nops */
366 nop_out(p->instr + used, p->len - used);
367 }
368
369 /* Sync to be conservative, in case we patched following instructions */
370 sync_core();
371 }
372 extern struct paravirt_patch __start_parainstructions[],
373 __stop_parainstructions[];
374 #endif /* CONFIG_PARAVIRT */
375
376 void __init alternative_instructions(void)
377 {
378 unsigned long flags;
379
380 local_irq_save(flags);
381 apply_alternatives(__alt_instructions, __alt_instructions_end);
382
383 /* switch to patch-once-at-boottime-only mode and free the
384 * tables in case we know the number of CPUs will never ever
385 * change */
386 #ifdef CONFIG_HOTPLUG_CPU
387 if (num_possible_cpus() < 2)
388 smp_alt_once = 1;
389 #else
390 smp_alt_once = 1;
391 #endif
392
393 #ifdef CONFIG_SMP
394 if (smp_alt_once) {
395 if (1 == num_possible_cpus()) {
396 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
397 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
398 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
399 apply_alternatives(__smp_alt_instructions,
400 __smp_alt_instructions_end);
401 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
402 _text, _etext);
403 }
404 free_init_pages("SMP alternatives",
405 (unsigned long)__smp_alt_begin,
406 (unsigned long)__smp_alt_end);
407 } else {
408 alternatives_smp_save(__smp_alt_instructions,
409 __smp_alt_instructions_end);
410 alternatives_smp_module_add(NULL, "core kernel",
411 __smp_locks, __smp_locks_end,
412 _text, _etext);
413 alternatives_smp_switch(0);
414 }
415 #endif
416 apply_paravirt(__start_parainstructions, __stop_parainstructions);
417 local_irq_restore(flags);
418 }