]>
Commit | Line | Data |
---|---|---|
f0fc4aff | 1 | #include <linux/bootmem.h> |
9766cdbc | 2 | #include <linux/linkage.h> |
f0fc4aff | 3 | #include <linux/bitops.h> |
9766cdbc | 4 | #include <linux/kernel.h> |
f0fc4aff | 5 | #include <linux/module.h> |
9766cdbc JSR |
6 | #include <linux/percpu.h> |
7 | #include <linux/string.h> | |
1da177e4 | 8 | #include <linux/delay.h> |
9766cdbc JSR |
9 | #include <linux/sched.h> |
10 | #include <linux/init.h> | |
0f46efeb | 11 | #include <linux/kprobes.h> |
9766cdbc | 12 | #include <linux/kgdb.h> |
1da177e4 | 13 | #include <linux/smp.h> |
9766cdbc JSR |
14 | #include <linux/io.h> |
15 | ||
16 | #include <asm/stackprotector.h> | |
cdd6c482 | 17 | #include <asm/perf_event.h> |
1da177e4 | 18 | #include <asm/mmu_context.h> |
49d859d7 | 19 | #include <asm/archrandom.h> |
9766cdbc JSR |
20 | #include <asm/hypervisor.h> |
21 | #include <asm/processor.h> | |
1e02ce4c | 22 | #include <asm/tlbflush.h> |
f649e938 | 23 | #include <asm/debugreg.h> |
9766cdbc | 24 | #include <asm/sections.h> |
f40c3300 | 25 | #include <asm/vsyscall.h> |
8bdbd962 AC |
26 | #include <linux/topology.h> |
27 | #include <linux/cpumask.h> | |
9766cdbc | 28 | #include <asm/pgtable.h> |
60063497 | 29 | #include <linux/atomic.h> |
9766cdbc JSR |
30 | #include <asm/proto.h> |
31 | #include <asm/setup.h> | |
32 | #include <asm/apic.h> | |
33 | #include <asm/desc.h> | |
34 | #include <asm/i387.h> | |
1361b83a | 35 | #include <asm/fpu-internal.h> |
27b07da7 | 36 | #include <asm/mtrr.h> |
8bdbd962 | 37 | #include <linux/numa.h> |
9766cdbc JSR |
38 | #include <asm/asm.h> |
39 | #include <asm/cpu.h> | |
a03a3e28 | 40 | #include <asm/mce.h> |
9766cdbc | 41 | #include <asm/msr.h> |
8d4a4300 | 42 | #include <asm/pat.h> |
d288e1cf FY |
43 | #include <asm/microcode.h> |
44 | #include <asm/microcode_intel.h> | |
e641f5f5 IM |
45 | |
46 | #ifdef CONFIG_X86_LOCAL_APIC | |
bdbcdd48 | 47 | #include <asm/uv/uv.h> |
1da177e4 LT |
48 | #endif |
49 | ||
50 | #include "cpu.h" | |
51 | ||
c2d1cec1 | 52 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
c2d1cec1 | 53 | cpumask_var_t cpu_initialized_mask; |
9766cdbc JSR |
54 | cpumask_var_t cpu_callout_mask; |
55 | cpumask_var_t cpu_callin_mask; | |
c2d1cec1 MT |
56 | |
57 | /* representing cpus for which sibling maps can be computed */ | |
58 | cpumask_var_t cpu_sibling_setup_mask; | |
59 | ||
2f2f52ba | 60 | /* correctly size the local cpu masks */ |
4369f1fb | 61 | void __init setup_cpu_local_masks(void) |
2f2f52ba BG |
62 | { |
63 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | |
64 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | |
65 | alloc_bootmem_cpumask_var(&cpu_callout_mask); | |
66 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | |
67 | } | |
68 | ||
148f9bb8 | 69 | static void default_init(struct cpuinfo_x86 *c) |
e8055139 OZ |
70 | { |
71 | #ifdef CONFIG_X86_64 | |
27c13ece | 72 | cpu_detect_cache_sizes(c); |
e8055139 OZ |
73 | #else |
74 | /* Not much we can do here... */ | |
75 | /* Check if at least it has cpuid */ | |
76 | if (c->cpuid_level == -1) { | |
77 | /* No cpuid. It must be an ancient CPU */ | |
78 | if (c->x86 == 4) | |
79 | strcpy(c->x86_model_id, "486"); | |
80 | else if (c->x86 == 3) | |
81 | strcpy(c->x86_model_id, "386"); | |
82 | } | |
83 | #endif | |
84 | } | |
85 | ||
148f9bb8 | 86 | static const struct cpu_dev default_cpu = { |
e8055139 OZ |
87 | .c_init = default_init, |
88 | .c_vendor = "Unknown", | |
89 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | |
90 | }; | |
91 | ||
148f9bb8 | 92 | static const struct cpu_dev *this_cpu = &default_cpu; |
0a488a53 | 93 | |
06deef89 | 94 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
950ad7ff | 95 | #ifdef CONFIG_X86_64 |
06deef89 BG |
96 | /* |
97 | * We need valid kernel segments for data and code in long mode too | |
98 | * IRET will check the segment types kkeil 2000/10/28 | |
99 | * Also sysret mandates a special GDT layout | |
100 | * | |
9766cdbc | 101 | * TLS descriptors are currently at a different place compared to i386. |
06deef89 BG |
102 | * Hopefully nobody expects them at a fixed place (Wine?) |
103 | */ | |
1e5de182 AM |
104 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
105 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), | |
106 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), | |
107 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), | |
108 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), | |
109 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), | |
950ad7ff | 110 | #else |
1e5de182 AM |
111 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
112 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), | |
113 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), | |
114 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), | |
bf504672 RR |
115 | /* |
116 | * Segments used for calling PnP BIOS have byte granularity. | |
117 | * They code segments and data segments have fixed 64k limits, | |
118 | * the transfer segment sizes are set at run time. | |
119 | */ | |
6842ef0e | 120 | /* 32-bit code */ |
1e5de182 | 121 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
6842ef0e | 122 | /* 16-bit code */ |
1e5de182 | 123 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
6842ef0e | 124 | /* 16-bit data */ |
1e5de182 | 125 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
6842ef0e | 126 | /* 16-bit data */ |
1e5de182 | 127 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
6842ef0e | 128 | /* 16-bit data */ |
1e5de182 | 129 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
bf504672 RR |
130 | /* |
131 | * The APM segments have byte granularity and their bases | |
132 | * are set at run time. All have 64k limits. | |
133 | */ | |
6842ef0e | 134 | /* 32-bit code */ |
1e5de182 | 135 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
bf504672 | 136 | /* 16-bit code */ |
1e5de182 | 137 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
6842ef0e | 138 | /* data */ |
72c4d853 | 139 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
bf504672 | 140 | |
1e5de182 AM |
141 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
142 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), | |
60a5317f | 143 | GDT_STACK_CANARY_INIT |
950ad7ff | 144 | #endif |
06deef89 | 145 | } }; |
7a61d35d | 146 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
ae1ee11b | 147 | |
0c752a93 SS |
148 | static int __init x86_xsave_setup(char *s) |
149 | { | |
2cd3949f DH |
150 | if (strlen(s)) |
151 | return 0; | |
0c752a93 | 152 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
6bad06b7 | 153 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
b6f42a4a | 154 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); |
c6fd893d SS |
155 | setup_clear_cpu_cap(X86_FEATURE_AVX); |
156 | setup_clear_cpu_cap(X86_FEATURE_AVX2); | |
0c752a93 SS |
157 | return 1; |
158 | } | |
159 | __setup("noxsave", x86_xsave_setup); | |
160 | ||
6bad06b7 SS |
161 | static int __init x86_xsaveopt_setup(char *s) |
162 | { | |
163 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | |
164 | return 1; | |
165 | } | |
166 | __setup("noxsaveopt", x86_xsaveopt_setup); | |
167 | ||
b6f42a4a FY |
168 | static int __init x86_xsaves_setup(char *s) |
169 | { | |
170 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); | |
171 | return 1; | |
172 | } | |
173 | __setup("noxsaves", x86_xsaves_setup); | |
174 | ||
ba51dced | 175 | #ifdef CONFIG_X86_32 |
148f9bb8 PG |
176 | static int cachesize_override = -1; |
177 | static int disable_x86_serial_nr = 1; | |
1da177e4 | 178 | |
0a488a53 YL |
179 | static int __init cachesize_setup(char *str) |
180 | { | |
181 | get_option(&str, &cachesize_override); | |
182 | return 1; | |
183 | } | |
184 | __setup("cachesize=", cachesize_setup); | |
185 | ||
0a488a53 YL |
186 | static int __init x86_fxsr_setup(char *s) |
187 | { | |
188 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | |
189 | setup_clear_cpu_cap(X86_FEATURE_XMM); | |
190 | return 1; | |
191 | } | |
192 | __setup("nofxsr", x86_fxsr_setup); | |
193 | ||
194 | static int __init x86_sep_setup(char *s) | |
195 | { | |
196 | setup_clear_cpu_cap(X86_FEATURE_SEP); | |
197 | return 1; | |
198 | } | |
199 | __setup("nosep", x86_sep_setup); | |
200 | ||
201 | /* Standard macro to see if a specific flag is changeable */ | |
202 | static inline int flag_is_changeable_p(u32 flag) | |
203 | { | |
204 | u32 f1, f2; | |
205 | ||
94f6bac1 KH |
206 | /* |
207 | * Cyrix and IDT cpus allow disabling of CPUID | |
208 | * so the code below may return different results | |
209 | * when it is executed before and after enabling | |
210 | * the CPUID. Add "volatile" to not allow gcc to | |
211 | * optimize the subsequent calls to this function. | |
212 | */ | |
0f3fa48a IM |
213 | asm volatile ("pushfl \n\t" |
214 | "pushfl \n\t" | |
215 | "popl %0 \n\t" | |
216 | "movl %0, %1 \n\t" | |
217 | "xorl %2, %0 \n\t" | |
218 | "pushl %0 \n\t" | |
219 | "popfl \n\t" | |
220 | "pushfl \n\t" | |
221 | "popl %0 \n\t" | |
222 | "popfl \n\t" | |
223 | ||
94f6bac1 KH |
224 | : "=&r" (f1), "=&r" (f2) |
225 | : "ir" (flag)); | |
0a488a53 YL |
226 | |
227 | return ((f1^f2) & flag) != 0; | |
228 | } | |
229 | ||
230 | /* Probe for the CPUID instruction */ | |
148f9bb8 | 231 | int have_cpuid_p(void) |
0a488a53 YL |
232 | { |
233 | return flag_is_changeable_p(X86_EFLAGS_ID); | |
234 | } | |
235 | ||
148f9bb8 | 236 | static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
0a488a53 | 237 | { |
0f3fa48a IM |
238 | unsigned long lo, hi; |
239 | ||
240 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) | |
241 | return; | |
242 | ||
243 | /* Disable processor serial number: */ | |
244 | ||
245 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | |
246 | lo |= 0x200000; | |
247 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | |
248 | ||
249 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | |
250 | clear_cpu_cap(c, X86_FEATURE_PN); | |
251 | ||
252 | /* Disabling the serial number may affect the cpuid level */ | |
253 | c->cpuid_level = cpuid_eax(0); | |
0a488a53 YL |
254 | } |
255 | ||
256 | static int __init x86_serial_nr_setup(char *s) | |
257 | { | |
258 | disable_x86_serial_nr = 0; | |
259 | return 1; | |
260 | } | |
261 | __setup("serialnumber", x86_serial_nr_setup); | |
ba51dced | 262 | #else |
102bbe3a YL |
263 | static inline int flag_is_changeable_p(u32 flag) |
264 | { | |
265 | return 1; | |
266 | } | |
102bbe3a YL |
267 | static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
268 | { | |
269 | } | |
ba51dced | 270 | #endif |
0a488a53 | 271 | |
de5397ad FY |
272 | static __init int setup_disable_smep(char *arg) |
273 | { | |
b2cc2a07 | 274 | setup_clear_cpu_cap(X86_FEATURE_SMEP); |
de5397ad FY |
275 | return 1; |
276 | } | |
277 | __setup("nosmep", setup_disable_smep); | |
278 | ||
b2cc2a07 | 279 | static __always_inline void setup_smep(struct cpuinfo_x86 *c) |
de5397ad | 280 | { |
b2cc2a07 | 281 | if (cpu_has(c, X86_FEATURE_SMEP)) |
375074cc | 282 | cr4_set_bits(X86_CR4_SMEP); |
de5397ad FY |
283 | } |
284 | ||
52b6179a PA |
285 | static __init int setup_disable_smap(char *arg) |
286 | { | |
b2cc2a07 | 287 | setup_clear_cpu_cap(X86_FEATURE_SMAP); |
52b6179a PA |
288 | return 1; |
289 | } | |
290 | __setup("nosmap", setup_disable_smap); | |
291 | ||
b2cc2a07 PA |
292 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) |
293 | { | |
294 | unsigned long eflags; | |
295 | ||
296 | /* This should have been cleared long ago */ | |
297 | raw_local_save_flags(eflags); | |
298 | BUG_ON(eflags & X86_EFLAGS_AC); | |
299 | ||
03bbd596 PA |
300 | if (cpu_has(c, X86_FEATURE_SMAP)) { |
301 | #ifdef CONFIG_X86_SMAP | |
375074cc | 302 | cr4_set_bits(X86_CR4_SMAP); |
03bbd596 | 303 | #else |
375074cc | 304 | cr4_clear_bits(X86_CR4_SMAP); |
03bbd596 PA |
305 | #endif |
306 | } | |
de5397ad FY |
307 | } |
308 | ||
b38b0665 PA |
309 | /* |
310 | * Some CPU features depend on higher CPUID levels, which may not always | |
311 | * be available due to CPUID level capping or broken virtualization | |
312 | * software. Add those features to this table to auto-disable them. | |
313 | */ | |
314 | struct cpuid_dependent_feature { | |
315 | u32 feature; | |
316 | u32 level; | |
317 | }; | |
0f3fa48a | 318 | |
148f9bb8 | 319 | static const struct cpuid_dependent_feature |
b38b0665 PA |
320 | cpuid_dependent_features[] = { |
321 | { X86_FEATURE_MWAIT, 0x00000005 }, | |
322 | { X86_FEATURE_DCA, 0x00000009 }, | |
323 | { X86_FEATURE_XSAVE, 0x0000000d }, | |
324 | { 0, 0 } | |
325 | }; | |
326 | ||
148f9bb8 | 327 | static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
b38b0665 PA |
328 | { |
329 | const struct cpuid_dependent_feature *df; | |
9766cdbc | 330 | |
b38b0665 | 331 | for (df = cpuid_dependent_features; df->feature; df++) { |
0f3fa48a IM |
332 | |
333 | if (!cpu_has(c, df->feature)) | |
334 | continue; | |
b38b0665 PA |
335 | /* |
336 | * Note: cpuid_level is set to -1 if unavailable, but | |
337 | * extended_extended_level is set to 0 if unavailable | |
338 | * and the legitimate extended levels are all negative | |
339 | * when signed; hence the weird messing around with | |
340 | * signs here... | |
341 | */ | |
0f3fa48a | 342 | if (!((s32)df->level < 0 ? |
f6db44df | 343 | (u32)df->level > (u32)c->extended_cpuid_level : |
0f3fa48a IM |
344 | (s32)df->level > (s32)c->cpuid_level)) |
345 | continue; | |
346 | ||
347 | clear_cpu_cap(c, df->feature); | |
348 | if (!warn) | |
349 | continue; | |
350 | ||
351 | printk(KERN_WARNING | |
9def39be JT |
352 | "CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", |
353 | x86_cap_flag(df->feature), df->level); | |
b38b0665 | 354 | } |
f6db44df | 355 | } |
b38b0665 | 356 | |
102bbe3a YL |
357 | /* |
358 | * Naming convention should be: <Name> [(<Codename>)] | |
359 | * This table only is used unless init_<vendor>() below doesn't set it; | |
0f3fa48a IM |
360 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
361 | * isn't used | |
102bbe3a YL |
362 | */ |
363 | ||
364 | /* Look up CPU names by table lookup. */ | |
148f9bb8 | 365 | static const char *table_lookup_model(struct cpuinfo_x86 *c) |
102bbe3a | 366 | { |
09dc68d9 JB |
367 | #ifdef CONFIG_X86_32 |
368 | const struct legacy_cpu_model_info *info; | |
102bbe3a YL |
369 | |
370 | if (c->x86_model >= 16) | |
371 | return NULL; /* Range check */ | |
372 | ||
373 | if (!this_cpu) | |
374 | return NULL; | |
375 | ||
09dc68d9 | 376 | info = this_cpu->legacy_models; |
102bbe3a | 377 | |
09dc68d9 | 378 | while (info->family) { |
102bbe3a YL |
379 | if (info->family == c->x86) |
380 | return info->model_names[c->x86_model]; | |
381 | info++; | |
382 | } | |
09dc68d9 | 383 | #endif |
102bbe3a YL |
384 | return NULL; /* Not found */ |
385 | } | |
386 | ||
148f9bb8 PG |
387 | __u32 cpu_caps_cleared[NCAPINTS]; |
388 | __u32 cpu_caps_set[NCAPINTS]; | |
7d851c8d | 389 | |
11e3a840 JF |
390 | void load_percpu_segment(int cpu) |
391 | { | |
392 | #ifdef CONFIG_X86_32 | |
393 | loadsegment(fs, __KERNEL_PERCPU); | |
394 | #else | |
395 | loadsegment(gs, 0); | |
396 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); | |
397 | #endif | |
60a5317f | 398 | load_stack_canary_segment(); |
11e3a840 JF |
399 | } |
400 | ||
0f3fa48a IM |
401 | /* |
402 | * Current gdt points %fs at the "master" per-cpu area: after this, | |
403 | * it's on the real one. | |
404 | */ | |
552be871 | 405 | void switch_to_new_gdt(int cpu) |
9d31d35b YL |
406 | { |
407 | struct desc_ptr gdt_descr; | |
408 | ||
2697fbd5 | 409 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); |
9d31d35b YL |
410 | gdt_descr.size = GDT_SIZE - 1; |
411 | load_gdt(&gdt_descr); | |
2697fbd5 | 412 | /* Reload the per-cpu base */ |
11e3a840 JF |
413 | |
414 | load_percpu_segment(cpu); | |
9d31d35b YL |
415 | } |
416 | ||
148f9bb8 | 417 | static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
1da177e4 | 418 | |
148f9bb8 | 419 | static void get_model_name(struct cpuinfo_x86 *c) |
1da177e4 LT |
420 | { |
421 | unsigned int *v; | |
1da177e4 | 422 | |
3da99c97 | 423 | if (c->extended_cpuid_level < 0x80000004) |
1b05d60d | 424 | return; |
1da177e4 | 425 | |
0f3fa48a | 426 | v = (unsigned int *)c->x86_model_id; |
1da177e4 LT |
427 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
428 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | |
429 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | |
430 | c->x86_model_id[48] = 0; | |
431 | ||
0f3fa48a | 432 | /* |
adafb98d PB |
433 | * Remove leading whitespace on Intel processors and trailing |
434 | * whitespace on AMD processors. | |
0f3fa48a | 435 | */ |
adafb98d | 436 | memmove(c->x86_model_id, strim(c->x86_model_id), 48); |
1da177e4 LT |
437 | } |
438 | ||
148f9bb8 | 439 | void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
1da177e4 | 440 | { |
9d31d35b | 441 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
1da177e4 | 442 | |
3da99c97 | 443 | n = c->extended_cpuid_level; |
1da177e4 LT |
444 | |
445 | if (n >= 0x80000005) { | |
9d31d35b | 446 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
9d31d35b | 447 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
140fc727 YL |
448 | #ifdef CONFIG_X86_64 |
449 | /* On K8 L1 TLB is inclusive, so don't count it */ | |
450 | c->x86_tlbsize = 0; | |
451 | #endif | |
1da177e4 LT |
452 | } |
453 | ||
454 | if (n < 0x80000006) /* Some chips just has a large L1. */ | |
455 | return; | |
456 | ||
0a488a53 | 457 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
1da177e4 | 458 | l2size = ecx >> 16; |
34048c9e | 459 | |
140fc727 YL |
460 | #ifdef CONFIG_X86_64 |
461 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | |
462 | #else | |
1da177e4 | 463 | /* do processor-specific cache resizing */ |
09dc68d9 JB |
464 | if (this_cpu->legacy_cache_size) |
465 | l2size = this_cpu->legacy_cache_size(c, l2size); | |
1da177e4 LT |
466 | |
467 | /* Allow user to override all this if necessary. */ | |
468 | if (cachesize_override != -1) | |
469 | l2size = cachesize_override; | |
470 | ||
34048c9e | 471 | if (l2size == 0) |
1da177e4 | 472 | return; /* Again, no L2 cache is possible */ |
140fc727 | 473 | #endif |
1da177e4 LT |
474 | |
475 | c->x86_cache_size = l2size; | |
1da177e4 LT |
476 | } |
477 | ||
e0ba94f1 AS |
478 | u16 __read_mostly tlb_lli_4k[NR_INFO]; |
479 | u16 __read_mostly tlb_lli_2m[NR_INFO]; | |
480 | u16 __read_mostly tlb_lli_4m[NR_INFO]; | |
481 | u16 __read_mostly tlb_lld_4k[NR_INFO]; | |
482 | u16 __read_mostly tlb_lld_2m[NR_INFO]; | |
483 | u16 __read_mostly tlb_lld_4m[NR_INFO]; | |
dd360393 | 484 | u16 __read_mostly tlb_lld_1g[NR_INFO]; |
e0ba94f1 | 485 | |
f94fe119 | 486 | static void cpu_detect_tlb(struct cpuinfo_x86 *c) |
e0ba94f1 AS |
487 | { |
488 | if (this_cpu->c_detect_tlb) | |
489 | this_cpu->c_detect_tlb(c); | |
490 | ||
f94fe119 | 491 | pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", |
e0ba94f1 | 492 | tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], |
f94fe119 SH |
493 | tlb_lli_4m[ENTRIES]); |
494 | ||
495 | pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", | |
496 | tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], | |
497 | tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); | |
e0ba94f1 AS |
498 | } |
499 | ||
148f9bb8 | 500 | void detect_ht(struct cpuinfo_x86 *c) |
1da177e4 | 501 | { |
97e4db7c | 502 | #ifdef CONFIG_X86_HT |
0a488a53 YL |
503 | u32 eax, ebx, ecx, edx; |
504 | int index_msb, core_bits; | |
2eaad1fd | 505 | static bool printed; |
1da177e4 | 506 | |
0a488a53 | 507 | if (!cpu_has(c, X86_FEATURE_HT)) |
9d31d35b | 508 | return; |
1da177e4 | 509 | |
0a488a53 YL |
510 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
511 | goto out; | |
1da177e4 | 512 | |
1cd78776 YL |
513 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
514 | return; | |
1da177e4 | 515 | |
0a488a53 | 516 | cpuid(1, &eax, &ebx, &ecx, &edx); |
1da177e4 | 517 | |
9d31d35b YL |
518 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
519 | ||
520 | if (smp_num_siblings == 1) { | |
2eaad1fd | 521 | printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); |
0f3fa48a IM |
522 | goto out; |
523 | } | |
9d31d35b | 524 | |
0f3fa48a IM |
525 | if (smp_num_siblings <= 1) |
526 | goto out; | |
9d31d35b | 527 | |
0f3fa48a IM |
528 | index_msb = get_count_order(smp_num_siblings); |
529 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); | |
9d31d35b | 530 | |
0f3fa48a | 531 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
9d31d35b | 532 | |
0f3fa48a | 533 | index_msb = get_count_order(smp_num_siblings); |
9d31d35b | 534 | |
0f3fa48a | 535 | core_bits = get_count_order(c->x86_max_cores); |
9d31d35b | 536 | |
0f3fa48a IM |
537 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
538 | ((1 << core_bits) - 1); | |
1da177e4 | 539 | |
0a488a53 | 540 | out: |
2eaad1fd | 541 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { |
0a488a53 YL |
542 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
543 | c->phys_proc_id); | |
544 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | |
545 | c->cpu_core_id); | |
2eaad1fd | 546 | printed = 1; |
9d31d35b | 547 | } |
9d31d35b | 548 | #endif |
97e4db7c | 549 | } |
1da177e4 | 550 | |
148f9bb8 | 551 | static void get_cpu_vendor(struct cpuinfo_x86 *c) |
1da177e4 LT |
552 | { |
553 | char *v = c->x86_vendor_id; | |
0f3fa48a | 554 | int i; |
1da177e4 LT |
555 | |
556 | for (i = 0; i < X86_VENDOR_NUM; i++) { | |
10a434fc YL |
557 | if (!cpu_devs[i]) |
558 | break; | |
559 | ||
560 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | |
561 | (cpu_devs[i]->c_ident[1] && | |
562 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | |
0f3fa48a | 563 | |
10a434fc YL |
564 | this_cpu = cpu_devs[i]; |
565 | c->x86_vendor = this_cpu->c_x86_vendor; | |
566 | return; | |
1da177e4 LT |
567 | } |
568 | } | |
10a434fc | 569 | |
a9c56953 MK |
570 | printk_once(KERN_ERR |
571 | "CPU: vendor_id '%s' unknown, using generic init.\n" \ | |
572 | "CPU: Your system may be unstable.\n", v); | |
10a434fc | 573 | |
fe38d855 CE |
574 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
575 | this_cpu = &default_cpu; | |
1da177e4 LT |
576 | } |
577 | ||
148f9bb8 | 578 | void cpu_detect(struct cpuinfo_x86 *c) |
1da177e4 | 579 | { |
1da177e4 | 580 | /* Get vendor name */ |
4a148513 HH |
581 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
582 | (unsigned int *)&c->x86_vendor_id[0], | |
583 | (unsigned int *)&c->x86_vendor_id[8], | |
584 | (unsigned int *)&c->x86_vendor_id[4]); | |
1da177e4 | 585 | |
1da177e4 | 586 | c->x86 = 4; |
9d31d35b | 587 | /* Intel-defined flags: level 0x00000001 */ |
1da177e4 LT |
588 | if (c->cpuid_level >= 0x00000001) { |
589 | u32 junk, tfms, cap0, misc; | |
0f3fa48a | 590 | |
1da177e4 | 591 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
9d31d35b YL |
592 | c->x86 = (tfms >> 8) & 0xf; |
593 | c->x86_model = (tfms >> 4) & 0xf; | |
594 | c->x86_mask = tfms & 0xf; | |
0f3fa48a | 595 | |
f5f786d0 | 596 | if (c->x86 == 0xf) |
1da177e4 | 597 | c->x86 += (tfms >> 20) & 0xff; |
f5f786d0 | 598 | if (c->x86 >= 0x6) |
9d31d35b | 599 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
0f3fa48a | 600 | |
d4387bd3 | 601 | if (cap0 & (1<<19)) { |
d4387bd3 | 602 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
9d31d35b | 603 | c->x86_cache_alignment = c->x86_clflush_size; |
d4387bd3 | 604 | } |
1da177e4 | 605 | } |
1da177e4 | 606 | } |
3da99c97 | 607 | |
148f9bb8 | 608 | void get_cpu_cap(struct cpuinfo_x86 *c) |
093af8d7 YL |
609 | { |
610 | u32 tfms, xlvl; | |
3da99c97 | 611 | u32 ebx; |
093af8d7 | 612 | |
3da99c97 YL |
613 | /* Intel-defined flags: level 0x00000001 */ |
614 | if (c->cpuid_level >= 0x00000001) { | |
615 | u32 capability, excap; | |
0f3fa48a | 616 | |
3da99c97 YL |
617 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
618 | c->x86_capability[0] = capability; | |
619 | c->x86_capability[4] = excap; | |
620 | } | |
093af8d7 | 621 | |
bdc802dc PA |
622 | /* Additional Intel-defined flags: level 0x00000007 */ |
623 | if (c->cpuid_level >= 0x00000007) { | |
624 | u32 eax, ebx, ecx, edx; | |
625 | ||
626 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); | |
627 | ||
2494b030 | 628 | c->x86_capability[9] = ebx; |
bdc802dc PA |
629 | } |
630 | ||
6229ad27 FY |
631 | /* Extended state features: level 0x0000000d */ |
632 | if (c->cpuid_level >= 0x0000000d) { | |
633 | u32 eax, ebx, ecx, edx; | |
634 | ||
635 | cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); | |
636 | ||
637 | c->x86_capability[10] = eax; | |
638 | } | |
639 | ||
cbc82b17 PWJ |
640 | /* Additional Intel-defined flags: level 0x0000000F */ |
641 | if (c->cpuid_level >= 0x0000000F) { | |
642 | u32 eax, ebx, ecx, edx; | |
643 | ||
644 | /* QoS sub-leaf, EAX=0Fh, ECX=0 */ | |
645 | cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); | |
646 | c->x86_capability[11] = edx; | |
647 | if (cpu_has(c, X86_FEATURE_CQM_LLC)) { | |
648 | /* will be overridden if occupancy monitoring exists */ | |
649 | c->x86_cache_max_rmid = ebx; | |
650 | ||
651 | /* QoS sub-leaf, EAX=0Fh, ECX=1 */ | |
652 | cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); | |
653 | c->x86_capability[12] = edx; | |
654 | if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) { | |
655 | c->x86_cache_max_rmid = ecx; | |
656 | c->x86_cache_occ_scale = ebx; | |
657 | } | |
658 | } else { | |
659 | c->x86_cache_max_rmid = -1; | |
660 | c->x86_cache_occ_scale = -1; | |
661 | } | |
662 | } | |
663 | ||
3da99c97 YL |
664 | /* AMD-defined flags: level 0x80000001 */ |
665 | xlvl = cpuid_eax(0x80000000); | |
666 | c->extended_cpuid_level = xlvl; | |
0f3fa48a | 667 | |
3da99c97 YL |
668 | if ((xlvl & 0xffff0000) == 0x80000000) { |
669 | if (xlvl >= 0x80000001) { | |
670 | c->x86_capability[1] = cpuid_edx(0x80000001); | |
671 | c->x86_capability[6] = cpuid_ecx(0x80000001); | |
093af8d7 | 672 | } |
093af8d7 | 673 | } |
093af8d7 | 674 | |
5122c890 YL |
675 | if (c->extended_cpuid_level >= 0x80000008) { |
676 | u32 eax = cpuid_eax(0x80000008); | |
677 | ||
678 | c->x86_virt_bits = (eax >> 8) & 0xff; | |
679 | c->x86_phys_bits = eax & 0xff; | |
093af8d7 | 680 | } |
13c6c532 JB |
681 | #ifdef CONFIG_X86_32 |
682 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | |
683 | c->x86_phys_bits = 36; | |
5122c890 | 684 | #endif |
e3224234 YL |
685 | |
686 | if (c->extended_cpuid_level >= 0x80000007) | |
687 | c->x86_power = cpuid_edx(0x80000007); | |
093af8d7 | 688 | |
1dedefd1 | 689 | init_scattered_cpuid_features(c); |
093af8d7 | 690 | } |
1da177e4 | 691 | |
148f9bb8 | 692 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
aef93c8b YL |
693 | { |
694 | #ifdef CONFIG_X86_32 | |
695 | int i; | |
696 | ||
697 | /* | |
698 | * First of all, decide if this is a 486 or higher | |
699 | * It's a 486 if we can modify the AC flag | |
700 | */ | |
701 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | |
702 | c->x86 = 4; | |
703 | else | |
704 | c->x86 = 3; | |
705 | ||
706 | for (i = 0; i < X86_VENDOR_NUM; i++) | |
707 | if (cpu_devs[i] && cpu_devs[i]->c_identify) { | |
708 | c->x86_vendor_id[0] = 0; | |
709 | cpu_devs[i]->c_identify(c); | |
710 | if (c->x86_vendor_id[0]) { | |
711 | get_cpu_vendor(c); | |
712 | break; | |
713 | } | |
714 | } | |
715 | #endif | |
716 | } | |
717 | ||
34048c9e PC |
718 | /* |
719 | * Do minimum CPU detection early. | |
720 | * Fields really needed: vendor, cpuid_level, family, model, mask, | |
721 | * cache alignment. | |
722 | * The others are not touched to avoid unwanted side effects. | |
723 | * | |
724 | * WARNING: this function is only called on the BP. Don't add code here | |
725 | * that is supposed to run on all CPUs. | |
726 | */ | |
3da99c97 | 727 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
d7cd5611 | 728 | { |
6627d242 YL |
729 | #ifdef CONFIG_X86_64 |
730 | c->x86_clflush_size = 64; | |
13c6c532 JB |
731 | c->x86_phys_bits = 36; |
732 | c->x86_virt_bits = 48; | |
6627d242 | 733 | #else |
d4387bd3 | 734 | c->x86_clflush_size = 32; |
13c6c532 JB |
735 | c->x86_phys_bits = 32; |
736 | c->x86_virt_bits = 32; | |
6627d242 | 737 | #endif |
0a488a53 | 738 | c->x86_cache_alignment = c->x86_clflush_size; |
d7cd5611 | 739 | |
3da99c97 | 740 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
0a488a53 | 741 | c->extended_cpuid_level = 0; |
d7cd5611 | 742 | |
aef93c8b YL |
743 | if (!have_cpuid_p()) |
744 | identify_cpu_without_cpuid(c); | |
745 | ||
746 | /* cyrix could have cpuid enabled via c_identify()*/ | |
d7cd5611 RR |
747 | if (!have_cpuid_p()) |
748 | return; | |
749 | ||
750 | cpu_detect(c); | |
3da99c97 | 751 | get_cpu_vendor(c); |
3da99c97 | 752 | get_cpu_cap(c); |
60e019eb | 753 | fpu_detect(c); |
12cf105c | 754 | |
10a434fc YL |
755 | if (this_cpu->c_early_init) |
756 | this_cpu->c_early_init(c); | |
093af8d7 | 757 | |
f6e9456c | 758 | c->cpu_index = 0; |
b38b0665 | 759 | filter_cpuid_features(c, false); |
de5397ad | 760 | |
a110b5ec BP |
761 | if (this_cpu->c_bsp_init) |
762 | this_cpu->c_bsp_init(c); | |
c3b83598 BP |
763 | |
764 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); | |
d7cd5611 RR |
765 | } |
766 | ||
9d31d35b YL |
767 | void __init early_cpu_init(void) |
768 | { | |
02dde8b4 | 769 | const struct cpu_dev *const *cdev; |
10a434fc YL |
770 | int count = 0; |
771 | ||
ac23f253 | 772 | #ifdef CONFIG_PROCESSOR_SELECT |
9766cdbc | 773 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
31c997ca IM |
774 | #endif |
775 | ||
10a434fc | 776 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
02dde8b4 | 777 | const struct cpu_dev *cpudev = *cdev; |
9d31d35b | 778 | |
10a434fc YL |
779 | if (count >= X86_VENDOR_NUM) |
780 | break; | |
781 | cpu_devs[count] = cpudev; | |
782 | count++; | |
783 | ||
ac23f253 | 784 | #ifdef CONFIG_PROCESSOR_SELECT |
31c997ca IM |
785 | { |
786 | unsigned int j; | |
787 | ||
788 | for (j = 0; j < 2; j++) { | |
789 | if (!cpudev->c_ident[j]) | |
790 | continue; | |
791 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, | |
792 | cpudev->c_ident[j]); | |
793 | } | |
10a434fc | 794 | } |
0388423d | 795 | #endif |
10a434fc | 796 | } |
9d31d35b | 797 | early_identify_cpu(&boot_cpu_data); |
d7cd5611 | 798 | } |
093af8d7 | 799 | |
b6734c35 | 800 | /* |
366d4a43 BP |
801 | * The NOPL instruction is supposed to exist on all CPUs of family >= 6; |
802 | * unfortunately, that's not true in practice because of early VIA | |
803 | * chips and (more importantly) broken virtualizers that are not easy | |
804 | * to detect. In the latter case it doesn't even *fail* reliably, so | |
805 | * probing for it doesn't even work. Disable it completely on 32-bit | |
ba0593bf | 806 | * unless we can find a reliable way to detect all the broken cases. |
366d4a43 | 807 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). |
b6734c35 | 808 | */ |
148f9bb8 | 809 | static void detect_nopl(struct cpuinfo_x86 *c) |
b6734c35 | 810 | { |
366d4a43 | 811 | #ifdef CONFIG_X86_32 |
b6734c35 | 812 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
366d4a43 BP |
813 | #else |
814 | set_cpu_cap(c, X86_FEATURE_NOPL); | |
815 | #endif | |
d7cd5611 RR |
816 | } |
817 | ||
148f9bb8 | 818 | static void generic_identify(struct cpuinfo_x86 *c) |
1da177e4 | 819 | { |
aef93c8b | 820 | c->extended_cpuid_level = 0; |
1da177e4 | 821 | |
3da99c97 | 822 | if (!have_cpuid_p()) |
aef93c8b | 823 | identify_cpu_without_cpuid(c); |
1d67953f | 824 | |
aef93c8b | 825 | /* cyrix could have cpuid enabled via c_identify()*/ |
a9853dd6 | 826 | if (!have_cpuid_p()) |
aef93c8b | 827 | return; |
1da177e4 | 828 | |
3da99c97 | 829 | cpu_detect(c); |
1da177e4 | 830 | |
3da99c97 | 831 | get_cpu_vendor(c); |
1da177e4 | 832 | |
3da99c97 | 833 | get_cpu_cap(c); |
1da177e4 | 834 | |
3da99c97 YL |
835 | if (c->cpuid_level >= 0x00000001) { |
836 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | |
b89d3b3e YL |
837 | #ifdef CONFIG_X86_32 |
838 | # ifdef CONFIG_X86_HT | |
cb8cc442 | 839 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
b89d3b3e | 840 | # else |
3da99c97 | 841 | c->apicid = c->initial_apicid; |
b89d3b3e YL |
842 | # endif |
843 | #endif | |
b89d3b3e | 844 | c->phys_proc_id = c->initial_apicid; |
3da99c97 | 845 | } |
1da177e4 | 846 | |
1b05d60d | 847 | get_model_name(c); /* Default name */ |
1da177e4 | 848 | |
3da99c97 | 849 | detect_nopl(c); |
1da177e4 | 850 | } |
1da177e4 | 851 | |
cbc82b17 PWJ |
852 | static void x86_init_cache_qos(struct cpuinfo_x86 *c) |
853 | { | |
854 | /* | |
855 | * The heavy lifting of max_rmid and cache_occ_scale are handled | |
856 | * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu | |
857 | * in case CQM bits really aren't there in this CPU. | |
858 | */ | |
859 | if (c != &boot_cpu_data) { | |
860 | boot_cpu_data.x86_cache_max_rmid = | |
861 | min(boot_cpu_data.x86_cache_max_rmid, | |
862 | c->x86_cache_max_rmid); | |
863 | } | |
864 | } | |
865 | ||
1da177e4 LT |
866 | /* |
867 | * This does the hard work of actually picking apart the CPU stuff... | |
868 | */ | |
148f9bb8 | 869 | static void identify_cpu(struct cpuinfo_x86 *c) |
1da177e4 LT |
870 | { |
871 | int i; | |
872 | ||
873 | c->loops_per_jiffy = loops_per_jiffy; | |
874 | c->x86_cache_size = -1; | |
875 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
1da177e4 LT |
876 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
877 | c->x86_vendor_id[0] = '\0'; /* Unset */ | |
878 | c->x86_model_id[0] = '\0'; /* Unset */ | |
94605eff | 879 | c->x86_max_cores = 1; |
102bbe3a | 880 | c->x86_coreid_bits = 0; |
11fdd252 | 881 | #ifdef CONFIG_X86_64 |
102bbe3a | 882 | c->x86_clflush_size = 64; |
13c6c532 JB |
883 | c->x86_phys_bits = 36; |
884 | c->x86_virt_bits = 48; | |
102bbe3a YL |
885 | #else |
886 | c->cpuid_level = -1; /* CPUID not detected */ | |
770d132f | 887 | c->x86_clflush_size = 32; |
13c6c532 JB |
888 | c->x86_phys_bits = 32; |
889 | c->x86_virt_bits = 32; | |
102bbe3a YL |
890 | #endif |
891 | c->x86_cache_alignment = c->x86_clflush_size; | |
1da177e4 LT |
892 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
893 | ||
1da177e4 LT |
894 | generic_identify(c); |
895 | ||
3898534d | 896 | if (this_cpu->c_identify) |
1da177e4 LT |
897 | this_cpu->c_identify(c); |
898 | ||
2759c328 YL |
899 | /* Clear/Set all flags overriden by options, after probe */ |
900 | for (i = 0; i < NCAPINTS; i++) { | |
901 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | |
902 | c->x86_capability[i] |= cpu_caps_set[i]; | |
903 | } | |
904 | ||
102bbe3a | 905 | #ifdef CONFIG_X86_64 |
cb8cc442 | 906 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
102bbe3a YL |
907 | #endif |
908 | ||
1da177e4 LT |
909 | /* |
910 | * Vendor-specific initialization. In this section we | |
911 | * canonicalize the feature flags, meaning if there are | |
912 | * features a certain CPU supports which CPUID doesn't | |
913 | * tell us, CPUID claiming incorrect flags, or other bugs, | |
914 | * we handle them here. | |
915 | * | |
916 | * At the end of this section, c->x86_capability better | |
917 | * indicate the features this CPU genuinely supports! | |
918 | */ | |
919 | if (this_cpu->c_init) | |
920 | this_cpu->c_init(c); | |
921 | ||
922 | /* Disable the PN if appropriate */ | |
923 | squash_the_stupid_serial_number(c); | |
924 | ||
b2cc2a07 PA |
925 | /* Set up SMEP/SMAP */ |
926 | setup_smep(c); | |
927 | setup_smap(c); | |
928 | ||
1da177e4 | 929 | /* |
0f3fa48a IM |
930 | * The vendor-specific functions might have changed features. |
931 | * Now we do "generic changes." | |
1da177e4 LT |
932 | */ |
933 | ||
b38b0665 PA |
934 | /* Filter out anything that depends on CPUID levels we don't have */ |
935 | filter_cpuid_features(c, true); | |
936 | ||
1da177e4 | 937 | /* If the model name is still unset, do table lookup. */ |
34048c9e | 938 | if (!c->x86_model_id[0]) { |
02dde8b4 | 939 | const char *p; |
1da177e4 | 940 | p = table_lookup_model(c); |
34048c9e | 941 | if (p) |
1da177e4 LT |
942 | strcpy(c->x86_model_id, p); |
943 | else | |
944 | /* Last resort... */ | |
945 | sprintf(c->x86_model_id, "%02x/%02x", | |
54a20f8c | 946 | c->x86, c->x86_model); |
1da177e4 LT |
947 | } |
948 | ||
102bbe3a YL |
949 | #ifdef CONFIG_X86_64 |
950 | detect_ht(c); | |
951 | #endif | |
952 | ||
88b094fb | 953 | init_hypervisor(c); |
49d859d7 | 954 | x86_init_rdrand(c); |
cbc82b17 | 955 | x86_init_cache_qos(c); |
3e0c3737 YL |
956 | |
957 | /* | |
958 | * Clear/Set all flags overriden by options, need do it | |
959 | * before following smp all cpus cap AND. | |
960 | */ | |
961 | for (i = 0; i < NCAPINTS; i++) { | |
962 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | |
963 | c->x86_capability[i] |= cpu_caps_set[i]; | |
964 | } | |
965 | ||
1da177e4 LT |
966 | /* |
967 | * On SMP, boot_cpu_data holds the common feature set between | |
968 | * all CPUs; so make sure that we indicate which features are | |
969 | * common between the CPUs. The first time this routine gets | |
970 | * executed, c == &boot_cpu_data. | |
971 | */ | |
34048c9e | 972 | if (c != &boot_cpu_data) { |
1da177e4 | 973 | /* AND the already accumulated flags with these */ |
9d31d35b | 974 | for (i = 0; i < NCAPINTS; i++) |
1da177e4 | 975 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
65fc985b BP |
976 | |
977 | /* OR, i.e. replicate the bug flags */ | |
978 | for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) | |
979 | c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; | |
1da177e4 LT |
980 | } |
981 | ||
982 | /* Init Machine Check Exception if available. */ | |
5e09954a | 983 | mcheck_cpu_init(c); |
30d432df AK |
984 | |
985 | select_idle_routine(c); | |
102bbe3a | 986 | |
de2d9445 | 987 | #ifdef CONFIG_NUMA |
102bbe3a YL |
988 | numa_add_cpu(smp_processor_id()); |
989 | #endif | |
a6c4e076 | 990 | } |
31ab269a | 991 | |
8b6c0ab1 IM |
992 | /* |
993 | * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions | |
994 | * on 32-bit kernels: | |
995 | */ | |
cfda7bb9 AL |
996 | #ifdef CONFIG_X86_32 |
997 | void enable_sep_cpu(void) | |
998 | { | |
8b6c0ab1 IM |
999 | struct tss_struct *tss; |
1000 | int cpu; | |
cfda7bb9 | 1001 | |
8b6c0ab1 IM |
1002 | cpu = get_cpu(); |
1003 | tss = &per_cpu(cpu_tss, cpu); | |
1004 | ||
1005 | if (!boot_cpu_has(X86_FEATURE_SEP)) | |
1006 | goto out; | |
1007 | ||
1008 | /* | |
cf9328cc AL |
1009 | * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- |
1010 | * see the big comment in struct x86_hw_tss's definition. | |
8b6c0ab1 | 1011 | */ |
cfda7bb9 AL |
1012 | |
1013 | tss->x86_tss.ss1 = __KERNEL_CS; | |
8b6c0ab1 IM |
1014 | wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); |
1015 | ||
cf9328cc AL |
1016 | wrmsr(MSR_IA32_SYSENTER_ESP, |
1017 | (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), | |
1018 | 0); | |
8b6c0ab1 IM |
1019 | |
1020 | wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0); | |
1021 | ||
1022 | out: | |
cfda7bb9 AL |
1023 | put_cpu(); |
1024 | } | |
e04d645f GC |
1025 | #endif |
1026 | ||
a6c4e076 JF |
1027 | void __init identify_boot_cpu(void) |
1028 | { | |
1029 | identify_cpu(&boot_cpu_data); | |
02c68a02 | 1030 | init_amd_e400_c1e_mask(); |
102bbe3a | 1031 | #ifdef CONFIG_X86_32 |
a6c4e076 | 1032 | sysenter_setup(); |
6fe940d6 | 1033 | enable_sep_cpu(); |
102bbe3a | 1034 | #endif |
5b556332 | 1035 | cpu_detect_tlb(&boot_cpu_data); |
a6c4e076 | 1036 | } |
3b520b23 | 1037 | |
148f9bb8 | 1038 | void identify_secondary_cpu(struct cpuinfo_x86 *c) |
a6c4e076 JF |
1039 | { |
1040 | BUG_ON(c == &boot_cpu_data); | |
1041 | identify_cpu(c); | |
102bbe3a | 1042 | #ifdef CONFIG_X86_32 |
a6c4e076 | 1043 | enable_sep_cpu(); |
102bbe3a | 1044 | #endif |
a6c4e076 | 1045 | mtrr_ap_init(); |
1da177e4 LT |
1046 | } |
1047 | ||
a0854a46 | 1048 | struct msr_range { |
0f3fa48a IM |
1049 | unsigned min; |
1050 | unsigned max; | |
a0854a46 | 1051 | }; |
1da177e4 | 1052 | |
148f9bb8 | 1053 | static const struct msr_range msr_range_array[] = { |
a0854a46 YL |
1054 | { 0x00000000, 0x00000418}, |
1055 | { 0xc0000000, 0xc000040b}, | |
1056 | { 0xc0010000, 0xc0010142}, | |
1057 | { 0xc0011000, 0xc001103b}, | |
1058 | }; | |
1da177e4 | 1059 | |
148f9bb8 | 1060 | static void __print_cpu_msr(void) |
a0854a46 | 1061 | { |
0f3fa48a | 1062 | unsigned index_min, index_max; |
a0854a46 YL |
1063 | unsigned index; |
1064 | u64 val; | |
1065 | int i; | |
a0854a46 YL |
1066 | |
1067 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | |
1068 | index_min = msr_range_array[i].min; | |
1069 | index_max = msr_range_array[i].max; | |
0f3fa48a | 1070 | |
a0854a46 | 1071 | for (index = index_min; index < index_max; index++) { |
ecd431d9 | 1072 | if (rdmsrl_safe(index, &val)) |
a0854a46 YL |
1073 | continue; |
1074 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | |
1da177e4 | 1075 | } |
a0854a46 YL |
1076 | } |
1077 | } | |
94605eff | 1078 | |
148f9bb8 | 1079 | static int show_msr; |
0f3fa48a | 1080 | |
a0854a46 YL |
1081 | static __init int setup_show_msr(char *arg) |
1082 | { | |
1083 | int num; | |
3dd9d514 | 1084 | |
a0854a46 | 1085 | get_option(&arg, &num); |
3dd9d514 | 1086 | |
a0854a46 YL |
1087 | if (num > 0) |
1088 | show_msr = num; | |
1089 | return 1; | |
1da177e4 | 1090 | } |
a0854a46 | 1091 | __setup("show_msr=", setup_show_msr); |
1da177e4 | 1092 | |
191679fd AK |
1093 | static __init int setup_noclflush(char *arg) |
1094 | { | |
840d2830 | 1095 | setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); |
da4aaa7d | 1096 | setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); |
191679fd AK |
1097 | return 1; |
1098 | } | |
1099 | __setup("noclflush", setup_noclflush); | |
1100 | ||
148f9bb8 | 1101 | void print_cpu_info(struct cpuinfo_x86 *c) |
1da177e4 | 1102 | { |
02dde8b4 | 1103 | const char *vendor = NULL; |
1da177e4 | 1104 | |
0f3fa48a | 1105 | if (c->x86_vendor < X86_VENDOR_NUM) { |
1da177e4 | 1106 | vendor = this_cpu->c_vendor; |
0f3fa48a IM |
1107 | } else { |
1108 | if (c->cpuid_level >= 0) | |
1109 | vendor = c->x86_vendor_id; | |
1110 | } | |
1da177e4 | 1111 | |
bd32a8cf | 1112 | if (vendor && !strstr(c->x86_model_id, vendor)) |
9d31d35b | 1113 | printk(KERN_CONT "%s ", vendor); |
1da177e4 | 1114 | |
9d31d35b | 1115 | if (c->x86_model_id[0]) |
adafb98d | 1116 | printk(KERN_CONT "%s", c->x86_model_id); |
1da177e4 | 1117 | else |
9d31d35b | 1118 | printk(KERN_CONT "%d86", c->x86); |
1da177e4 | 1119 | |
924e101a BP |
1120 | printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); |
1121 | ||
34048c9e | 1122 | if (c->x86_mask || c->cpuid_level >= 0) |
924e101a | 1123 | printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask); |
1da177e4 | 1124 | else |
924e101a | 1125 | printk(KERN_CONT ")\n"); |
a0854a46 | 1126 | |
0b8b8078 | 1127 | print_cpu_msr(c); |
21c3fcf3 YL |
1128 | } |
1129 | ||
148f9bb8 | 1130 | void print_cpu_msr(struct cpuinfo_x86 *c) |
21c3fcf3 | 1131 | { |
a0854a46 | 1132 | if (c->cpu_index < show_msr) |
21c3fcf3 | 1133 | __print_cpu_msr(); |
1da177e4 LT |
1134 | } |
1135 | ||
ac72e788 AK |
1136 | static __init int setup_disablecpuid(char *arg) |
1137 | { | |
1138 | int bit; | |
0f3fa48a | 1139 | |
ac72e788 AK |
1140 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) |
1141 | setup_clear_cpu_cap(bit); | |
1142 | else | |
1143 | return 0; | |
0f3fa48a | 1144 | |
ac72e788 AK |
1145 | return 1; |
1146 | } | |
1147 | __setup("clearcpuid=", setup_disablecpuid); | |
1148 | ||
198d208d | 1149 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
ef593260 | 1150 | (unsigned long)&init_thread_union + THREAD_SIZE; |
198d208d SR |
1151 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
1152 | ||
d5494d4f | 1153 | #ifdef CONFIG_X86_64 |
9ff80942 | 1154 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
629f4f9d SA |
1155 | struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, |
1156 | (unsigned long) debug_idt_table }; | |
d5494d4f | 1157 | |
947e76cd | 1158 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
277d5b40 | 1159 | irq_stack_union) __aligned(PAGE_SIZE) __visible; |
0f3fa48a | 1160 | |
bdf977b3 | 1161 | /* |
a7fcf28d AL |
1162 | * The following percpu variables are hot. Align current_task to |
1163 | * cacheline size such that they fall in the same cacheline. | |
bdf977b3 TH |
1164 | */ |
1165 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = | |
1166 | &init_task; | |
1167 | EXPORT_PER_CPU_SYMBOL(current_task); | |
d5494d4f | 1168 | |
bdf977b3 TH |
1169 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
1170 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | |
1171 | ||
277d5b40 | 1172 | DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; |
d5494d4f | 1173 | |
c2daa3be PZ |
1174 | DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
1175 | EXPORT_PER_CPU_SYMBOL(__preempt_count); | |
1176 | ||
7e16838d LT |
1177 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); |
1178 | ||
0f3fa48a IM |
1179 | /* |
1180 | * Special IST stacks which the CPU switches to when it calls | |
1181 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | |
1182 | * limit), all of them are 4K, except the debug stack which | |
1183 | * is 8K. | |
1184 | */ | |
1185 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | |
1186 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | |
1187 | [DEBUG_STACK - 1] = DEBUG_STKSZ | |
1188 | }; | |
1189 | ||
92d65b23 | 1190 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
3e352aa8 | 1191 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); |
d5494d4f | 1192 | |
d5494d4f YL |
1193 | /* May not be marked __init: used by software suspend */ |
1194 | void syscall_init(void) | |
1da177e4 | 1195 | { |
d5494d4f YL |
1196 | /* |
1197 | * LSTAR and STAR live in a bit strange symbiosis. | |
1198 | * They both write to the same internal register. STAR allows to | |
1199 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | |
1200 | */ | |
1201 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | |
1202 | wrmsrl(MSR_LSTAR, system_call); | |
d56fe4bf IM |
1203 | |
1204 | #ifdef CONFIG_IA32_EMULATION | |
a76c7f46 DV |
1205 | wrmsrl(MSR_CSTAR, ia32_cstar_target); |
1206 | /* | |
487d1edb DV |
1207 | * This only works on Intel CPUs. |
1208 | * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. | |
1209 | * This does not cause SYSENTER to jump to the wrong location, because | |
1210 | * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). | |
a76c7f46 DV |
1211 | */ |
1212 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); | |
1213 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); | |
1214 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); | |
d56fe4bf IM |
1215 | #else |
1216 | wrmsrl(MSR_CSTAR, ignore_sysret); | |
6b51311c | 1217 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); |
d56fe4bf IM |
1218 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); |
1219 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); | |
d5494d4f | 1220 | #endif |
03ae5768 | 1221 | |
d5494d4f YL |
1222 | /* Flags to clear on syscall */ |
1223 | wrmsrl(MSR_SYSCALL_MASK, | |
63bcff2a | 1224 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| |
8c7aa698 | 1225 | X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); |
1da177e4 | 1226 | } |
62111195 | 1227 | |
d5494d4f YL |
1228 | /* |
1229 | * Copies of the original ist values from the tss are only accessed during | |
1230 | * debugging, no special alignment required. | |
1231 | */ | |
1232 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | |
1233 | ||
228bdaa9 | 1234 | static DEFINE_PER_CPU(unsigned long, debug_stack_addr); |
42181186 | 1235 | DEFINE_PER_CPU(int, debug_stack_usage); |
228bdaa9 SR |
1236 | |
1237 | int is_debug_stack(unsigned long addr) | |
1238 | { | |
89cbc767 CL |
1239 | return __this_cpu_read(debug_stack_usage) || |
1240 | (addr <= __this_cpu_read(debug_stack_addr) && | |
1241 | addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); | |
228bdaa9 | 1242 | } |
0f46efeb | 1243 | NOKPROBE_SYMBOL(is_debug_stack); |
228bdaa9 | 1244 | |
629f4f9d | 1245 | DEFINE_PER_CPU(u32, debug_idt_ctr); |
f8988175 | 1246 | |
228bdaa9 SR |
1247 | void debug_stack_set_zero(void) |
1248 | { | |
629f4f9d SA |
1249 | this_cpu_inc(debug_idt_ctr); |
1250 | load_current_idt(); | |
228bdaa9 | 1251 | } |
0f46efeb | 1252 | NOKPROBE_SYMBOL(debug_stack_set_zero); |
228bdaa9 SR |
1253 | |
1254 | void debug_stack_reset(void) | |
1255 | { | |
629f4f9d | 1256 | if (WARN_ON(!this_cpu_read(debug_idt_ctr))) |
f8988175 | 1257 | return; |
629f4f9d SA |
1258 | if (this_cpu_dec_return(debug_idt_ctr) == 0) |
1259 | load_current_idt(); | |
228bdaa9 | 1260 | } |
0f46efeb | 1261 | NOKPROBE_SYMBOL(debug_stack_reset); |
228bdaa9 | 1262 | |
0f3fa48a | 1263 | #else /* CONFIG_X86_64 */ |
d5494d4f | 1264 | |
bdf977b3 TH |
1265 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
1266 | EXPORT_PER_CPU_SYMBOL(current_task); | |
c2daa3be PZ |
1267 | DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
1268 | EXPORT_PER_CPU_SYMBOL(__preempt_count); | |
27e74da9 | 1269 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); |
bdf977b3 | 1270 | |
a7fcf28d AL |
1271 | /* |
1272 | * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find | |
1273 | * the top of the kernel stack. Use an extra percpu variable to track the | |
1274 | * top of the kernel stack directly. | |
1275 | */ | |
1276 | DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = | |
1277 | (unsigned long)&init_thread_union + THREAD_SIZE; | |
1278 | EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); | |
1279 | ||
60a5317f | 1280 | #ifdef CONFIG_CC_STACKPROTECTOR |
53f82452 | 1281 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
60a5317f | 1282 | #endif |
d5494d4f | 1283 | |
0f3fa48a | 1284 | #endif /* CONFIG_X86_64 */ |
c5413fbe | 1285 | |
9766cdbc JSR |
1286 | /* |
1287 | * Clear all 6 debug registers: | |
1288 | */ | |
1289 | static void clear_all_debug_regs(void) | |
1290 | { | |
1291 | int i; | |
1292 | ||
1293 | for (i = 0; i < 8; i++) { | |
1294 | /* Ignore db4, db5 */ | |
1295 | if ((i == 4) || (i == 5)) | |
1296 | continue; | |
1297 | ||
1298 | set_debugreg(0, i); | |
1299 | } | |
1300 | } | |
c5413fbe | 1301 | |
0bb9fef9 JW |
1302 | #ifdef CONFIG_KGDB |
1303 | /* | |
1304 | * Restore debug regs if using kgdbwait and you have a kernel debugger | |
1305 | * connection established. | |
1306 | */ | |
1307 | static void dbg_restore_debug_regs(void) | |
1308 | { | |
1309 | if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) | |
1310 | arch_kgdb_ops.correct_hw_break(); | |
1311 | } | |
1312 | #else /* ! CONFIG_KGDB */ | |
1313 | #define dbg_restore_debug_regs() | |
1314 | #endif /* ! CONFIG_KGDB */ | |
1315 | ||
ce4b1b16 IM |
1316 | static void wait_for_master_cpu(int cpu) |
1317 | { | |
1318 | #ifdef CONFIG_SMP | |
1319 | /* | |
1320 | * wait for ACK from master CPU before continuing | |
1321 | * with AP initialization | |
1322 | */ | |
1323 | WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); | |
1324 | while (!cpumask_test_cpu(cpu, cpu_callout_mask)) | |
1325 | cpu_relax(); | |
1326 | #endif | |
1327 | } | |
1328 | ||
d2cbcc49 RR |
1329 | /* |
1330 | * cpu_init() initializes state that is per-CPU. Some data is already | |
1331 | * initialized (naturally) in the bootstrap process, such as the GDT | |
1332 | * and IDT. We reload them nevertheless, this function acts as a | |
1333 | * 'CPU state barrier', nothing should get across. | |
1ba76586 | 1334 | * A lot of state is already set up in PDA init for 64 bit |
d2cbcc49 | 1335 | */ |
1ba76586 | 1336 | #ifdef CONFIG_X86_64 |
0f3fa48a | 1337 | |
148f9bb8 | 1338 | void cpu_init(void) |
1ba76586 | 1339 | { |
0fe1e009 | 1340 | struct orig_ist *oist; |
1ba76586 | 1341 | struct task_struct *me; |
0f3fa48a IM |
1342 | struct tss_struct *t; |
1343 | unsigned long v; | |
ce4b1b16 | 1344 | int cpu = stack_smp_processor_id(); |
1ba76586 YL |
1345 | int i; |
1346 | ||
ce4b1b16 IM |
1347 | wait_for_master_cpu(cpu); |
1348 | ||
1e02ce4c AL |
1349 | /* |
1350 | * Initialize the CR4 shadow before doing anything that could | |
1351 | * try to read it. | |
1352 | */ | |
1353 | cr4_init_shadow(); | |
1354 | ||
e6ebf5de FY |
1355 | /* |
1356 | * Load microcode on this cpu if a valid microcode is available. | |
1357 | * This is early microcode loading procedure. | |
1358 | */ | |
1359 | load_ucode_ap(); | |
1360 | ||
24933b82 | 1361 | t = &per_cpu(cpu_tss, cpu); |
0fe1e009 | 1362 | oist = &per_cpu(orig_ist, cpu); |
0f3fa48a | 1363 | |
e7a22c1e | 1364 | #ifdef CONFIG_NUMA |
27fd185f | 1365 | if (this_cpu_read(numa_node) == 0 && |
e534c7c5 LS |
1366 | early_cpu_to_node(cpu) != NUMA_NO_NODE) |
1367 | set_numa_node(early_cpu_to_node(cpu)); | |
e7a22c1e | 1368 | #endif |
1ba76586 YL |
1369 | |
1370 | me = current; | |
1371 | ||
2eaad1fd | 1372 | pr_debug("Initializing CPU#%d\n", cpu); |
1ba76586 | 1373 | |
375074cc | 1374 | cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
1ba76586 YL |
1375 | |
1376 | /* | |
1377 | * Initialize the per-CPU GDT with the boot GDT, | |
1378 | * and set up the GDT descriptor: | |
1379 | */ | |
1380 | ||
552be871 | 1381 | switch_to_new_gdt(cpu); |
2697fbd5 BG |
1382 | loadsegment(fs, 0); |
1383 | ||
cf910e83 | 1384 | load_current_idt(); |
1ba76586 YL |
1385 | |
1386 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | |
1387 | syscall_init(); | |
1388 | ||
1389 | wrmsrl(MSR_FS_BASE, 0); | |
1390 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | |
1391 | barrier(); | |
1392 | ||
4763ed4d | 1393 | x86_configure_nx(); |
659006bf | 1394 | x2apic_setup(); |
1ba76586 YL |
1395 | |
1396 | /* | |
1397 | * set up and load the per-CPU TSS | |
1398 | */ | |
0fe1e009 | 1399 | if (!oist->ist[0]) { |
92d65b23 | 1400 | char *estacks = per_cpu(exception_stacks, cpu); |
0f3fa48a | 1401 | |
1ba76586 | 1402 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
0f3fa48a | 1403 | estacks += exception_stack_sizes[v]; |
0fe1e009 | 1404 | oist->ist[v] = t->x86_tss.ist[v] = |
1ba76586 | 1405 | (unsigned long)estacks; |
228bdaa9 SR |
1406 | if (v == DEBUG_STACK-1) |
1407 | per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; | |
1ba76586 YL |
1408 | } |
1409 | } | |
1410 | ||
1411 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | |
0f3fa48a | 1412 | |
1ba76586 YL |
1413 | /* |
1414 | * <= is required because the CPU will access up to | |
1415 | * 8 bits beyond the end of the IO permission bitmap. | |
1416 | */ | |
1417 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | |
1418 | t->io_bitmap[i] = ~0UL; | |
1419 | ||
1420 | atomic_inc(&init_mm.mm_count); | |
1421 | me->active_mm = &init_mm; | |
8c5dfd25 | 1422 | BUG_ON(me->mm); |
1ba76586 YL |
1423 | enter_lazy_tlb(&init_mm, me); |
1424 | ||
1425 | load_sp0(t, ¤t->thread); | |
1426 | set_tss_desc(cpu, t); | |
1427 | load_TR_desc(); | |
1428 | load_LDT(&init_mm.context); | |
1429 | ||
0bb9fef9 JW |
1430 | clear_all_debug_regs(); |
1431 | dbg_restore_debug_regs(); | |
1ba76586 YL |
1432 | |
1433 | fpu_init(); | |
1434 | ||
1ba76586 YL |
1435 | if (is_uv_system()) |
1436 | uv_cpu_init(); | |
1437 | } | |
1438 | ||
1439 | #else | |
1440 | ||
148f9bb8 | 1441 | void cpu_init(void) |
9ee79a3d | 1442 | { |
d2cbcc49 RR |
1443 | int cpu = smp_processor_id(); |
1444 | struct task_struct *curr = current; | |
24933b82 | 1445 | struct tss_struct *t = &per_cpu(cpu_tss, cpu); |
9ee79a3d | 1446 | struct thread_struct *thread = &curr->thread; |
62111195 | 1447 | |
ce4b1b16 | 1448 | wait_for_master_cpu(cpu); |
e6ebf5de | 1449 | |
5b2bdbc8 SR |
1450 | /* |
1451 | * Initialize the CR4 shadow before doing anything that could | |
1452 | * try to read it. | |
1453 | */ | |
1454 | cr4_init_shadow(); | |
1455 | ||
ce4b1b16 | 1456 | show_ucode_info_early(); |
62111195 JF |
1457 | |
1458 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | |
1459 | ||
9298b815 | 1460 | if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de) |
375074cc | 1461 | cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
62111195 | 1462 | |
cf910e83 | 1463 | load_current_idt(); |
552be871 | 1464 | switch_to_new_gdt(cpu); |
1da177e4 | 1465 | |
1da177e4 LT |
1466 | /* |
1467 | * Set up and load the per-CPU TSS and LDT | |
1468 | */ | |
1469 | atomic_inc(&init_mm.mm_count); | |
62111195 | 1470 | curr->active_mm = &init_mm; |
8c5dfd25 | 1471 | BUG_ON(curr->mm); |
62111195 | 1472 | enter_lazy_tlb(&init_mm, curr); |
1da177e4 | 1473 | |
faca6227 | 1474 | load_sp0(t, thread); |
34048c9e | 1475 | set_tss_desc(cpu, t); |
1da177e4 LT |
1476 | load_TR_desc(); |
1477 | load_LDT(&init_mm.context); | |
1478 | ||
f9a196b8 TG |
1479 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
1480 | ||
22c4e308 | 1481 | #ifdef CONFIG_DOUBLEFAULT |
1da177e4 LT |
1482 | /* Set up doublefault TSS pointer in the GDT */ |
1483 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | |
22c4e308 | 1484 | #endif |
1da177e4 | 1485 | |
9766cdbc | 1486 | clear_all_debug_regs(); |
0bb9fef9 | 1487 | dbg_restore_debug_regs(); |
1da177e4 | 1488 | |
0e49bf66 | 1489 | fpu_init(); |
1da177e4 | 1490 | } |
1ba76586 | 1491 | #endif |
5700f743 BP |
1492 | |
1493 | #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS | |
1494 | void warn_pre_alternatives(void) | |
1495 | { | |
1496 | WARN(1, "You're using static_cpu_has before alternatives have run!\n"); | |
1497 | } | |
1498 | EXPORT_SYMBOL_GPL(warn_pre_alternatives); | |
1499 | #endif | |
4a90a99c BP |
1500 | |
1501 | inline bool __static_cpu_has_safe(u16 bit) | |
1502 | { | |
1503 | return boot_cpu_has(bit); | |
1504 | } | |
1505 | EXPORT_SYMBOL_GPL(__static_cpu_has_safe); |