]>
Commit | Line | Data |
---|---|---|
f0fc4aff | 1 | #include <linux/bootmem.h> |
9766cdbc | 2 | #include <linux/linkage.h> |
f0fc4aff | 3 | #include <linux/bitops.h> |
9766cdbc | 4 | #include <linux/kernel.h> |
f0fc4aff | 5 | #include <linux/module.h> |
9766cdbc JSR |
6 | #include <linux/percpu.h> |
7 | #include <linux/string.h> | |
1da177e4 | 8 | #include <linux/delay.h> |
9766cdbc JSR |
9 | #include <linux/sched.h> |
10 | #include <linux/init.h> | |
0f46efeb | 11 | #include <linux/kprobes.h> |
9766cdbc | 12 | #include <linux/kgdb.h> |
1da177e4 | 13 | #include <linux/smp.h> |
9766cdbc JSR |
14 | #include <linux/io.h> |
15 | ||
16 | #include <asm/stackprotector.h> | |
cdd6c482 | 17 | #include <asm/perf_event.h> |
1da177e4 | 18 | #include <asm/mmu_context.h> |
49d859d7 | 19 | #include <asm/archrandom.h> |
9766cdbc JSR |
20 | #include <asm/hypervisor.h> |
21 | #include <asm/processor.h> | |
1e02ce4c | 22 | #include <asm/tlbflush.h> |
f649e938 | 23 | #include <asm/debugreg.h> |
9766cdbc | 24 | #include <asm/sections.h> |
f40c3300 | 25 | #include <asm/vsyscall.h> |
8bdbd962 AC |
26 | #include <linux/topology.h> |
27 | #include <linux/cpumask.h> | |
9766cdbc | 28 | #include <asm/pgtable.h> |
60063497 | 29 | #include <linux/atomic.h> |
9766cdbc JSR |
30 | #include <asm/proto.h> |
31 | #include <asm/setup.h> | |
32 | #include <asm/apic.h> | |
33 | #include <asm/desc.h> | |
1361b83a | 34 | #include <asm/fpu-internal.h> |
27b07da7 | 35 | #include <asm/mtrr.h> |
8bdbd962 | 36 | #include <linux/numa.h> |
9766cdbc JSR |
37 | #include <asm/asm.h> |
38 | #include <asm/cpu.h> | |
a03a3e28 | 39 | #include <asm/mce.h> |
9766cdbc | 40 | #include <asm/msr.h> |
8d4a4300 | 41 | #include <asm/pat.h> |
d288e1cf FY |
42 | #include <asm/microcode.h> |
43 | #include <asm/microcode_intel.h> | |
e641f5f5 IM |
44 | |
45 | #ifdef CONFIG_X86_LOCAL_APIC | |
bdbcdd48 | 46 | #include <asm/uv/uv.h> |
1da177e4 LT |
47 | #endif |
48 | ||
49 | #include "cpu.h" | |
50 | ||
c2d1cec1 | 51 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
c2d1cec1 | 52 | cpumask_var_t cpu_initialized_mask; |
9766cdbc JSR |
53 | cpumask_var_t cpu_callout_mask; |
54 | cpumask_var_t cpu_callin_mask; | |
c2d1cec1 MT |
55 | |
56 | /* representing cpus for which sibling maps can be computed */ | |
57 | cpumask_var_t cpu_sibling_setup_mask; | |
58 | ||
2f2f52ba | 59 | /* correctly size the local cpu masks */ |
4369f1fb | 60 | void __init setup_cpu_local_masks(void) |
2f2f52ba BG |
61 | { |
62 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | |
63 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | |
64 | alloc_bootmem_cpumask_var(&cpu_callout_mask); | |
65 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | |
66 | } | |
67 | ||
148f9bb8 | 68 | static void default_init(struct cpuinfo_x86 *c) |
e8055139 OZ |
69 | { |
70 | #ifdef CONFIG_X86_64 | |
27c13ece | 71 | cpu_detect_cache_sizes(c); |
e8055139 OZ |
72 | #else |
73 | /* Not much we can do here... */ | |
74 | /* Check if at least it has cpuid */ | |
75 | if (c->cpuid_level == -1) { | |
76 | /* No cpuid. It must be an ancient CPU */ | |
77 | if (c->x86 == 4) | |
78 | strcpy(c->x86_model_id, "486"); | |
79 | else if (c->x86 == 3) | |
80 | strcpy(c->x86_model_id, "386"); | |
81 | } | |
82 | #endif | |
83 | } | |
84 | ||
148f9bb8 | 85 | static const struct cpu_dev default_cpu = { |
e8055139 OZ |
86 | .c_init = default_init, |
87 | .c_vendor = "Unknown", | |
88 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | |
89 | }; | |
90 | ||
148f9bb8 | 91 | static const struct cpu_dev *this_cpu = &default_cpu; |
0a488a53 | 92 | |
06deef89 | 93 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
950ad7ff | 94 | #ifdef CONFIG_X86_64 |
06deef89 BG |
95 | /* |
96 | * We need valid kernel segments for data and code in long mode too | |
97 | * IRET will check the segment types kkeil 2000/10/28 | |
98 | * Also sysret mandates a special GDT layout | |
99 | * | |
9766cdbc | 100 | * TLS descriptors are currently at a different place compared to i386. |
06deef89 BG |
101 | * Hopefully nobody expects them at a fixed place (Wine?) |
102 | */ | |
1e5de182 AM |
103 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
104 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), | |
105 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), | |
106 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), | |
107 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), | |
108 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), | |
950ad7ff | 109 | #else |
1e5de182 AM |
110 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
111 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), | |
112 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), | |
113 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), | |
bf504672 RR |
114 | /* |
115 | * Segments used for calling PnP BIOS have byte granularity. | |
116 | * They code segments and data segments have fixed 64k limits, | |
117 | * the transfer segment sizes are set at run time. | |
118 | */ | |
6842ef0e | 119 | /* 32-bit code */ |
1e5de182 | 120 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
6842ef0e | 121 | /* 16-bit code */ |
1e5de182 | 122 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
6842ef0e | 123 | /* 16-bit data */ |
1e5de182 | 124 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
6842ef0e | 125 | /* 16-bit data */ |
1e5de182 | 126 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
6842ef0e | 127 | /* 16-bit data */ |
1e5de182 | 128 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
bf504672 RR |
129 | /* |
130 | * The APM segments have byte granularity and their bases | |
131 | * are set at run time. All have 64k limits. | |
132 | */ | |
6842ef0e | 133 | /* 32-bit code */ |
1e5de182 | 134 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
bf504672 | 135 | /* 16-bit code */ |
1e5de182 | 136 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
6842ef0e | 137 | /* data */ |
72c4d853 | 138 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
bf504672 | 139 | |
1e5de182 AM |
140 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
141 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), | |
60a5317f | 142 | GDT_STACK_CANARY_INIT |
950ad7ff | 143 | #endif |
06deef89 | 144 | } }; |
7a61d35d | 145 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
ae1ee11b | 146 | |
0c752a93 SS |
147 | static int __init x86_xsave_setup(char *s) |
148 | { | |
2cd3949f DH |
149 | if (strlen(s)) |
150 | return 0; | |
0c752a93 | 151 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
6bad06b7 | 152 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
b6f42a4a | 153 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); |
c6fd893d SS |
154 | setup_clear_cpu_cap(X86_FEATURE_AVX); |
155 | setup_clear_cpu_cap(X86_FEATURE_AVX2); | |
0c752a93 SS |
156 | return 1; |
157 | } | |
158 | __setup("noxsave", x86_xsave_setup); | |
159 | ||
6bad06b7 SS |
160 | static int __init x86_xsaveopt_setup(char *s) |
161 | { | |
162 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | |
163 | return 1; | |
164 | } | |
165 | __setup("noxsaveopt", x86_xsaveopt_setup); | |
166 | ||
b6f42a4a FY |
167 | static int __init x86_xsaves_setup(char *s) |
168 | { | |
169 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); | |
170 | return 1; | |
171 | } | |
172 | __setup("noxsaves", x86_xsaves_setup); | |
173 | ||
ba51dced | 174 | #ifdef CONFIG_X86_32 |
148f9bb8 PG |
175 | static int cachesize_override = -1; |
176 | static int disable_x86_serial_nr = 1; | |
1da177e4 | 177 | |
0a488a53 YL |
178 | static int __init cachesize_setup(char *str) |
179 | { | |
180 | get_option(&str, &cachesize_override); | |
181 | return 1; | |
182 | } | |
183 | __setup("cachesize=", cachesize_setup); | |
184 | ||
0a488a53 YL |
185 | static int __init x86_fxsr_setup(char *s) |
186 | { | |
187 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | |
188 | setup_clear_cpu_cap(X86_FEATURE_XMM); | |
189 | return 1; | |
190 | } | |
191 | __setup("nofxsr", x86_fxsr_setup); | |
192 | ||
193 | static int __init x86_sep_setup(char *s) | |
194 | { | |
195 | setup_clear_cpu_cap(X86_FEATURE_SEP); | |
196 | return 1; | |
197 | } | |
198 | __setup("nosep", x86_sep_setup); | |
199 | ||
200 | /* Standard macro to see if a specific flag is changeable */ | |
201 | static inline int flag_is_changeable_p(u32 flag) | |
202 | { | |
203 | u32 f1, f2; | |
204 | ||
94f6bac1 KH |
205 | /* |
206 | * Cyrix and IDT cpus allow disabling of CPUID | |
207 | * so the code below may return different results | |
208 | * when it is executed before and after enabling | |
209 | * the CPUID. Add "volatile" to not allow gcc to | |
210 | * optimize the subsequent calls to this function. | |
211 | */ | |
0f3fa48a IM |
212 | asm volatile ("pushfl \n\t" |
213 | "pushfl \n\t" | |
214 | "popl %0 \n\t" | |
215 | "movl %0, %1 \n\t" | |
216 | "xorl %2, %0 \n\t" | |
217 | "pushl %0 \n\t" | |
218 | "popfl \n\t" | |
219 | "pushfl \n\t" | |
220 | "popl %0 \n\t" | |
221 | "popfl \n\t" | |
222 | ||
94f6bac1 KH |
223 | : "=&r" (f1), "=&r" (f2) |
224 | : "ir" (flag)); | |
0a488a53 YL |
225 | |
226 | return ((f1^f2) & flag) != 0; | |
227 | } | |
228 | ||
229 | /* Probe for the CPUID instruction */ | |
148f9bb8 | 230 | int have_cpuid_p(void) |
0a488a53 YL |
231 | { |
232 | return flag_is_changeable_p(X86_EFLAGS_ID); | |
233 | } | |
234 | ||
148f9bb8 | 235 | static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
0a488a53 | 236 | { |
0f3fa48a IM |
237 | unsigned long lo, hi; |
238 | ||
239 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) | |
240 | return; | |
241 | ||
242 | /* Disable processor serial number: */ | |
243 | ||
244 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | |
245 | lo |= 0x200000; | |
246 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | |
247 | ||
248 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | |
249 | clear_cpu_cap(c, X86_FEATURE_PN); | |
250 | ||
251 | /* Disabling the serial number may affect the cpuid level */ | |
252 | c->cpuid_level = cpuid_eax(0); | |
0a488a53 YL |
253 | } |
254 | ||
255 | static int __init x86_serial_nr_setup(char *s) | |
256 | { | |
257 | disable_x86_serial_nr = 0; | |
258 | return 1; | |
259 | } | |
260 | __setup("serialnumber", x86_serial_nr_setup); | |
ba51dced | 261 | #else |
102bbe3a YL |
262 | static inline int flag_is_changeable_p(u32 flag) |
263 | { | |
264 | return 1; | |
265 | } | |
102bbe3a YL |
266 | static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
267 | { | |
268 | } | |
ba51dced | 269 | #endif |
0a488a53 | 270 | |
de5397ad FY |
271 | static __init int setup_disable_smep(char *arg) |
272 | { | |
b2cc2a07 | 273 | setup_clear_cpu_cap(X86_FEATURE_SMEP); |
de5397ad FY |
274 | return 1; |
275 | } | |
276 | __setup("nosmep", setup_disable_smep); | |
277 | ||
b2cc2a07 | 278 | static __always_inline void setup_smep(struct cpuinfo_x86 *c) |
de5397ad | 279 | { |
b2cc2a07 | 280 | if (cpu_has(c, X86_FEATURE_SMEP)) |
375074cc | 281 | cr4_set_bits(X86_CR4_SMEP); |
de5397ad FY |
282 | } |
283 | ||
52b6179a PA |
284 | static __init int setup_disable_smap(char *arg) |
285 | { | |
b2cc2a07 | 286 | setup_clear_cpu_cap(X86_FEATURE_SMAP); |
52b6179a PA |
287 | return 1; |
288 | } | |
289 | __setup("nosmap", setup_disable_smap); | |
290 | ||
b2cc2a07 PA |
291 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) |
292 | { | |
293 | unsigned long eflags; | |
294 | ||
295 | /* This should have been cleared long ago */ | |
296 | raw_local_save_flags(eflags); | |
297 | BUG_ON(eflags & X86_EFLAGS_AC); | |
298 | ||
03bbd596 PA |
299 | if (cpu_has(c, X86_FEATURE_SMAP)) { |
300 | #ifdef CONFIG_X86_SMAP | |
375074cc | 301 | cr4_set_bits(X86_CR4_SMAP); |
03bbd596 | 302 | #else |
375074cc | 303 | cr4_clear_bits(X86_CR4_SMAP); |
03bbd596 PA |
304 | #endif |
305 | } | |
de5397ad FY |
306 | } |
307 | ||
b38b0665 PA |
308 | /* |
309 | * Some CPU features depend on higher CPUID levels, which may not always | |
310 | * be available due to CPUID level capping or broken virtualization | |
311 | * software. Add those features to this table to auto-disable them. | |
312 | */ | |
313 | struct cpuid_dependent_feature { | |
314 | u32 feature; | |
315 | u32 level; | |
316 | }; | |
0f3fa48a | 317 | |
148f9bb8 | 318 | static const struct cpuid_dependent_feature |
b38b0665 PA |
319 | cpuid_dependent_features[] = { |
320 | { X86_FEATURE_MWAIT, 0x00000005 }, | |
321 | { X86_FEATURE_DCA, 0x00000009 }, | |
322 | { X86_FEATURE_XSAVE, 0x0000000d }, | |
323 | { 0, 0 } | |
324 | }; | |
325 | ||
148f9bb8 | 326 | static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
b38b0665 PA |
327 | { |
328 | const struct cpuid_dependent_feature *df; | |
9766cdbc | 329 | |
b38b0665 | 330 | for (df = cpuid_dependent_features; df->feature; df++) { |
0f3fa48a IM |
331 | |
332 | if (!cpu_has(c, df->feature)) | |
333 | continue; | |
b38b0665 PA |
334 | /* |
335 | * Note: cpuid_level is set to -1 if unavailable, but | |
336 | * extended_extended_level is set to 0 if unavailable | |
337 | * and the legitimate extended levels are all negative | |
338 | * when signed; hence the weird messing around with | |
339 | * signs here... | |
340 | */ | |
0f3fa48a | 341 | if (!((s32)df->level < 0 ? |
f6db44df | 342 | (u32)df->level > (u32)c->extended_cpuid_level : |
0f3fa48a IM |
343 | (s32)df->level > (s32)c->cpuid_level)) |
344 | continue; | |
345 | ||
346 | clear_cpu_cap(c, df->feature); | |
347 | if (!warn) | |
348 | continue; | |
349 | ||
350 | printk(KERN_WARNING | |
9def39be JT |
351 | "CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", |
352 | x86_cap_flag(df->feature), df->level); | |
b38b0665 | 353 | } |
f6db44df | 354 | } |
b38b0665 | 355 | |
102bbe3a YL |
356 | /* |
357 | * Naming convention should be: <Name> [(<Codename>)] | |
358 | * This table only is used unless init_<vendor>() below doesn't set it; | |
0f3fa48a IM |
359 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
360 | * isn't used | |
102bbe3a YL |
361 | */ |
362 | ||
363 | /* Look up CPU names by table lookup. */ | |
148f9bb8 | 364 | static const char *table_lookup_model(struct cpuinfo_x86 *c) |
102bbe3a | 365 | { |
09dc68d9 JB |
366 | #ifdef CONFIG_X86_32 |
367 | const struct legacy_cpu_model_info *info; | |
102bbe3a YL |
368 | |
369 | if (c->x86_model >= 16) | |
370 | return NULL; /* Range check */ | |
371 | ||
372 | if (!this_cpu) | |
373 | return NULL; | |
374 | ||
09dc68d9 | 375 | info = this_cpu->legacy_models; |
102bbe3a | 376 | |
09dc68d9 | 377 | while (info->family) { |
102bbe3a YL |
378 | if (info->family == c->x86) |
379 | return info->model_names[c->x86_model]; | |
380 | info++; | |
381 | } | |
09dc68d9 | 382 | #endif |
102bbe3a YL |
383 | return NULL; /* Not found */ |
384 | } | |
385 | ||
148f9bb8 PG |
386 | __u32 cpu_caps_cleared[NCAPINTS]; |
387 | __u32 cpu_caps_set[NCAPINTS]; | |
7d851c8d | 388 | |
11e3a840 JF |
389 | void load_percpu_segment(int cpu) |
390 | { | |
391 | #ifdef CONFIG_X86_32 | |
392 | loadsegment(fs, __KERNEL_PERCPU); | |
393 | #else | |
394 | loadsegment(gs, 0); | |
395 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); | |
396 | #endif | |
60a5317f | 397 | load_stack_canary_segment(); |
11e3a840 JF |
398 | } |
399 | ||
0f3fa48a IM |
400 | /* |
401 | * Current gdt points %fs at the "master" per-cpu area: after this, | |
402 | * it's on the real one. | |
403 | */ | |
552be871 | 404 | void switch_to_new_gdt(int cpu) |
9d31d35b YL |
405 | { |
406 | struct desc_ptr gdt_descr; | |
407 | ||
2697fbd5 | 408 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); |
9d31d35b YL |
409 | gdt_descr.size = GDT_SIZE - 1; |
410 | load_gdt(&gdt_descr); | |
2697fbd5 | 411 | /* Reload the per-cpu base */ |
11e3a840 JF |
412 | |
413 | load_percpu_segment(cpu); | |
9d31d35b YL |
414 | } |
415 | ||
148f9bb8 | 416 | static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
1da177e4 | 417 | |
148f9bb8 | 418 | static void get_model_name(struct cpuinfo_x86 *c) |
1da177e4 LT |
419 | { |
420 | unsigned int *v; | |
421 | char *p, *q; | |
422 | ||
3da99c97 | 423 | if (c->extended_cpuid_level < 0x80000004) |
1b05d60d | 424 | return; |
1da177e4 | 425 | |
0f3fa48a | 426 | v = (unsigned int *)c->x86_model_id; |
1da177e4 LT |
427 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
428 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | |
429 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | |
430 | c->x86_model_id[48] = 0; | |
431 | ||
0f3fa48a IM |
432 | /* |
433 | * Intel chips right-justify this string for some dumb reason; | |
434 | * undo that brain damage: | |
435 | */ | |
1da177e4 | 436 | p = q = &c->x86_model_id[0]; |
34048c9e | 437 | while (*p == ' ') |
9766cdbc | 438 | p++; |
34048c9e | 439 | if (p != q) { |
9766cdbc JSR |
440 | while (*p) |
441 | *q++ = *p++; | |
442 | while (q <= &c->x86_model_id[48]) | |
443 | *q++ = '\0'; /* Zero-pad the rest */ | |
1da177e4 | 444 | } |
1da177e4 LT |
445 | } |
446 | ||
148f9bb8 | 447 | void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
1da177e4 | 448 | { |
9d31d35b | 449 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
1da177e4 | 450 | |
3da99c97 | 451 | n = c->extended_cpuid_level; |
1da177e4 LT |
452 | |
453 | if (n >= 0x80000005) { | |
9d31d35b | 454 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
9d31d35b | 455 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
140fc727 YL |
456 | #ifdef CONFIG_X86_64 |
457 | /* On K8 L1 TLB is inclusive, so don't count it */ | |
458 | c->x86_tlbsize = 0; | |
459 | #endif | |
1da177e4 LT |
460 | } |
461 | ||
462 | if (n < 0x80000006) /* Some chips just has a large L1. */ | |
463 | return; | |
464 | ||
0a488a53 | 465 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
1da177e4 | 466 | l2size = ecx >> 16; |
34048c9e | 467 | |
140fc727 YL |
468 | #ifdef CONFIG_X86_64 |
469 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | |
470 | #else | |
1da177e4 | 471 | /* do processor-specific cache resizing */ |
09dc68d9 JB |
472 | if (this_cpu->legacy_cache_size) |
473 | l2size = this_cpu->legacy_cache_size(c, l2size); | |
1da177e4 LT |
474 | |
475 | /* Allow user to override all this if necessary. */ | |
476 | if (cachesize_override != -1) | |
477 | l2size = cachesize_override; | |
478 | ||
34048c9e | 479 | if (l2size == 0) |
1da177e4 | 480 | return; /* Again, no L2 cache is possible */ |
140fc727 | 481 | #endif |
1da177e4 LT |
482 | |
483 | c->x86_cache_size = l2size; | |
1da177e4 LT |
484 | } |
485 | ||
e0ba94f1 AS |
486 | u16 __read_mostly tlb_lli_4k[NR_INFO]; |
487 | u16 __read_mostly tlb_lli_2m[NR_INFO]; | |
488 | u16 __read_mostly tlb_lli_4m[NR_INFO]; | |
489 | u16 __read_mostly tlb_lld_4k[NR_INFO]; | |
490 | u16 __read_mostly tlb_lld_2m[NR_INFO]; | |
491 | u16 __read_mostly tlb_lld_4m[NR_INFO]; | |
dd360393 | 492 | u16 __read_mostly tlb_lld_1g[NR_INFO]; |
e0ba94f1 | 493 | |
f94fe119 | 494 | static void cpu_detect_tlb(struct cpuinfo_x86 *c) |
e0ba94f1 AS |
495 | { |
496 | if (this_cpu->c_detect_tlb) | |
497 | this_cpu->c_detect_tlb(c); | |
498 | ||
f94fe119 | 499 | pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", |
e0ba94f1 | 500 | tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], |
f94fe119 SH |
501 | tlb_lli_4m[ENTRIES]); |
502 | ||
503 | pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", | |
504 | tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], | |
505 | tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); | |
e0ba94f1 AS |
506 | } |
507 | ||
148f9bb8 | 508 | void detect_ht(struct cpuinfo_x86 *c) |
1da177e4 | 509 | { |
97e4db7c | 510 | #ifdef CONFIG_X86_HT |
0a488a53 YL |
511 | u32 eax, ebx, ecx, edx; |
512 | int index_msb, core_bits; | |
2eaad1fd | 513 | static bool printed; |
1da177e4 | 514 | |
0a488a53 | 515 | if (!cpu_has(c, X86_FEATURE_HT)) |
9d31d35b | 516 | return; |
1da177e4 | 517 | |
0a488a53 YL |
518 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
519 | goto out; | |
1da177e4 | 520 | |
1cd78776 YL |
521 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
522 | return; | |
1da177e4 | 523 | |
0a488a53 | 524 | cpuid(1, &eax, &ebx, &ecx, &edx); |
1da177e4 | 525 | |
9d31d35b YL |
526 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
527 | ||
528 | if (smp_num_siblings == 1) { | |
2eaad1fd | 529 | printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); |
0f3fa48a IM |
530 | goto out; |
531 | } | |
9d31d35b | 532 | |
0f3fa48a IM |
533 | if (smp_num_siblings <= 1) |
534 | goto out; | |
9d31d35b | 535 | |
0f3fa48a IM |
536 | index_msb = get_count_order(smp_num_siblings); |
537 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); | |
9d31d35b | 538 | |
0f3fa48a | 539 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
9d31d35b | 540 | |
0f3fa48a | 541 | index_msb = get_count_order(smp_num_siblings); |
9d31d35b | 542 | |
0f3fa48a | 543 | core_bits = get_count_order(c->x86_max_cores); |
9d31d35b | 544 | |
0f3fa48a IM |
545 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
546 | ((1 << core_bits) - 1); | |
1da177e4 | 547 | |
0a488a53 | 548 | out: |
2eaad1fd | 549 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { |
0a488a53 YL |
550 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
551 | c->phys_proc_id); | |
552 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | |
553 | c->cpu_core_id); | |
2eaad1fd | 554 | printed = 1; |
9d31d35b | 555 | } |
9d31d35b | 556 | #endif |
97e4db7c | 557 | } |
1da177e4 | 558 | |
148f9bb8 | 559 | static void get_cpu_vendor(struct cpuinfo_x86 *c) |
1da177e4 LT |
560 | { |
561 | char *v = c->x86_vendor_id; | |
0f3fa48a | 562 | int i; |
1da177e4 LT |
563 | |
564 | for (i = 0; i < X86_VENDOR_NUM; i++) { | |
10a434fc YL |
565 | if (!cpu_devs[i]) |
566 | break; | |
567 | ||
568 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | |
569 | (cpu_devs[i]->c_ident[1] && | |
570 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | |
0f3fa48a | 571 | |
10a434fc YL |
572 | this_cpu = cpu_devs[i]; |
573 | c->x86_vendor = this_cpu->c_x86_vendor; | |
574 | return; | |
1da177e4 LT |
575 | } |
576 | } | |
10a434fc | 577 | |
a9c56953 MK |
578 | printk_once(KERN_ERR |
579 | "CPU: vendor_id '%s' unknown, using generic init.\n" \ | |
580 | "CPU: Your system may be unstable.\n", v); | |
10a434fc | 581 | |
fe38d855 CE |
582 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
583 | this_cpu = &default_cpu; | |
1da177e4 LT |
584 | } |
585 | ||
148f9bb8 | 586 | void cpu_detect(struct cpuinfo_x86 *c) |
1da177e4 | 587 | { |
1da177e4 | 588 | /* Get vendor name */ |
4a148513 HH |
589 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
590 | (unsigned int *)&c->x86_vendor_id[0], | |
591 | (unsigned int *)&c->x86_vendor_id[8], | |
592 | (unsigned int *)&c->x86_vendor_id[4]); | |
1da177e4 | 593 | |
1da177e4 | 594 | c->x86 = 4; |
9d31d35b | 595 | /* Intel-defined flags: level 0x00000001 */ |
1da177e4 LT |
596 | if (c->cpuid_level >= 0x00000001) { |
597 | u32 junk, tfms, cap0, misc; | |
0f3fa48a | 598 | |
1da177e4 | 599 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
9d31d35b YL |
600 | c->x86 = (tfms >> 8) & 0xf; |
601 | c->x86_model = (tfms >> 4) & 0xf; | |
602 | c->x86_mask = tfms & 0xf; | |
0f3fa48a | 603 | |
f5f786d0 | 604 | if (c->x86 == 0xf) |
1da177e4 | 605 | c->x86 += (tfms >> 20) & 0xff; |
f5f786d0 | 606 | if (c->x86 >= 0x6) |
9d31d35b | 607 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
0f3fa48a | 608 | |
d4387bd3 | 609 | if (cap0 & (1<<19)) { |
d4387bd3 | 610 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
9d31d35b | 611 | c->x86_cache_alignment = c->x86_clflush_size; |
d4387bd3 | 612 | } |
1da177e4 | 613 | } |
1da177e4 | 614 | } |
3da99c97 | 615 | |
148f9bb8 | 616 | void get_cpu_cap(struct cpuinfo_x86 *c) |
093af8d7 YL |
617 | { |
618 | u32 tfms, xlvl; | |
3da99c97 | 619 | u32 ebx; |
093af8d7 | 620 | |
3da99c97 YL |
621 | /* Intel-defined flags: level 0x00000001 */ |
622 | if (c->cpuid_level >= 0x00000001) { | |
623 | u32 capability, excap; | |
0f3fa48a | 624 | |
3da99c97 YL |
625 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
626 | c->x86_capability[0] = capability; | |
627 | c->x86_capability[4] = excap; | |
628 | } | |
093af8d7 | 629 | |
bdc802dc PA |
630 | /* Additional Intel-defined flags: level 0x00000007 */ |
631 | if (c->cpuid_level >= 0x00000007) { | |
632 | u32 eax, ebx, ecx, edx; | |
633 | ||
634 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); | |
635 | ||
2494b030 | 636 | c->x86_capability[9] = ebx; |
bdc802dc PA |
637 | } |
638 | ||
6229ad27 FY |
639 | /* Extended state features: level 0x0000000d */ |
640 | if (c->cpuid_level >= 0x0000000d) { | |
641 | u32 eax, ebx, ecx, edx; | |
642 | ||
643 | cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); | |
644 | ||
645 | c->x86_capability[10] = eax; | |
646 | } | |
647 | ||
cbc82b17 PWJ |
648 | /* Additional Intel-defined flags: level 0x0000000F */ |
649 | if (c->cpuid_level >= 0x0000000F) { | |
650 | u32 eax, ebx, ecx, edx; | |
651 | ||
652 | /* QoS sub-leaf, EAX=0Fh, ECX=0 */ | |
653 | cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); | |
654 | c->x86_capability[11] = edx; | |
655 | if (cpu_has(c, X86_FEATURE_CQM_LLC)) { | |
656 | /* will be overridden if occupancy monitoring exists */ | |
657 | c->x86_cache_max_rmid = ebx; | |
658 | ||
659 | /* QoS sub-leaf, EAX=0Fh, ECX=1 */ | |
660 | cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); | |
661 | c->x86_capability[12] = edx; | |
662 | if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) { | |
663 | c->x86_cache_max_rmid = ecx; | |
664 | c->x86_cache_occ_scale = ebx; | |
665 | } | |
666 | } else { | |
667 | c->x86_cache_max_rmid = -1; | |
668 | c->x86_cache_occ_scale = -1; | |
669 | } | |
670 | } | |
671 | ||
3da99c97 YL |
672 | /* AMD-defined flags: level 0x80000001 */ |
673 | xlvl = cpuid_eax(0x80000000); | |
674 | c->extended_cpuid_level = xlvl; | |
0f3fa48a | 675 | |
3da99c97 YL |
676 | if ((xlvl & 0xffff0000) == 0x80000000) { |
677 | if (xlvl >= 0x80000001) { | |
678 | c->x86_capability[1] = cpuid_edx(0x80000001); | |
679 | c->x86_capability[6] = cpuid_ecx(0x80000001); | |
093af8d7 | 680 | } |
093af8d7 | 681 | } |
093af8d7 | 682 | |
5122c890 YL |
683 | if (c->extended_cpuid_level >= 0x80000008) { |
684 | u32 eax = cpuid_eax(0x80000008); | |
685 | ||
686 | c->x86_virt_bits = (eax >> 8) & 0xff; | |
687 | c->x86_phys_bits = eax & 0xff; | |
093af8d7 | 688 | } |
13c6c532 JB |
689 | #ifdef CONFIG_X86_32 |
690 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | |
691 | c->x86_phys_bits = 36; | |
5122c890 | 692 | #endif |
e3224234 YL |
693 | |
694 | if (c->extended_cpuid_level >= 0x80000007) | |
695 | c->x86_power = cpuid_edx(0x80000007); | |
093af8d7 | 696 | |
1dedefd1 | 697 | init_scattered_cpuid_features(c); |
093af8d7 | 698 | } |
1da177e4 | 699 | |
148f9bb8 | 700 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
aef93c8b YL |
701 | { |
702 | #ifdef CONFIG_X86_32 | |
703 | int i; | |
704 | ||
705 | /* | |
706 | * First of all, decide if this is a 486 or higher | |
707 | * It's a 486 if we can modify the AC flag | |
708 | */ | |
709 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | |
710 | c->x86 = 4; | |
711 | else | |
712 | c->x86 = 3; | |
713 | ||
714 | for (i = 0; i < X86_VENDOR_NUM; i++) | |
715 | if (cpu_devs[i] && cpu_devs[i]->c_identify) { | |
716 | c->x86_vendor_id[0] = 0; | |
717 | cpu_devs[i]->c_identify(c); | |
718 | if (c->x86_vendor_id[0]) { | |
719 | get_cpu_vendor(c); | |
720 | break; | |
721 | } | |
722 | } | |
723 | #endif | |
724 | } | |
725 | ||
34048c9e PC |
726 | /* |
727 | * Do minimum CPU detection early. | |
728 | * Fields really needed: vendor, cpuid_level, family, model, mask, | |
729 | * cache alignment. | |
730 | * The others are not touched to avoid unwanted side effects. | |
731 | * | |
732 | * WARNING: this function is only called on the BP. Don't add code here | |
733 | * that is supposed to run on all CPUs. | |
734 | */ | |
3da99c97 | 735 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
d7cd5611 | 736 | { |
6627d242 YL |
737 | #ifdef CONFIG_X86_64 |
738 | c->x86_clflush_size = 64; | |
13c6c532 JB |
739 | c->x86_phys_bits = 36; |
740 | c->x86_virt_bits = 48; | |
6627d242 | 741 | #else |
d4387bd3 | 742 | c->x86_clflush_size = 32; |
13c6c532 JB |
743 | c->x86_phys_bits = 32; |
744 | c->x86_virt_bits = 32; | |
6627d242 | 745 | #endif |
0a488a53 | 746 | c->x86_cache_alignment = c->x86_clflush_size; |
d7cd5611 | 747 | |
3da99c97 | 748 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
0a488a53 | 749 | c->extended_cpuid_level = 0; |
d7cd5611 | 750 | |
aef93c8b YL |
751 | if (!have_cpuid_p()) |
752 | identify_cpu_without_cpuid(c); | |
753 | ||
754 | /* cyrix could have cpuid enabled via c_identify()*/ | |
d7cd5611 RR |
755 | if (!have_cpuid_p()) |
756 | return; | |
757 | ||
758 | cpu_detect(c); | |
3da99c97 | 759 | get_cpu_vendor(c); |
3da99c97 | 760 | get_cpu_cap(c); |
1a7dc0db | 761 | fpu__detect(c); |
12cf105c | 762 | |
10a434fc YL |
763 | if (this_cpu->c_early_init) |
764 | this_cpu->c_early_init(c); | |
093af8d7 | 765 | |
f6e9456c | 766 | c->cpu_index = 0; |
b38b0665 | 767 | filter_cpuid_features(c, false); |
de5397ad | 768 | |
a110b5ec BP |
769 | if (this_cpu->c_bsp_init) |
770 | this_cpu->c_bsp_init(c); | |
c3b83598 BP |
771 | |
772 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); | |
d7cd5611 RR |
773 | } |
774 | ||
9d31d35b YL |
775 | void __init early_cpu_init(void) |
776 | { | |
02dde8b4 | 777 | const struct cpu_dev *const *cdev; |
10a434fc YL |
778 | int count = 0; |
779 | ||
ac23f253 | 780 | #ifdef CONFIG_PROCESSOR_SELECT |
9766cdbc | 781 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
31c997ca IM |
782 | #endif |
783 | ||
10a434fc | 784 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
02dde8b4 | 785 | const struct cpu_dev *cpudev = *cdev; |
9d31d35b | 786 | |
10a434fc YL |
787 | if (count >= X86_VENDOR_NUM) |
788 | break; | |
789 | cpu_devs[count] = cpudev; | |
790 | count++; | |
791 | ||
ac23f253 | 792 | #ifdef CONFIG_PROCESSOR_SELECT |
31c997ca IM |
793 | { |
794 | unsigned int j; | |
795 | ||
796 | for (j = 0; j < 2; j++) { | |
797 | if (!cpudev->c_ident[j]) | |
798 | continue; | |
799 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, | |
800 | cpudev->c_ident[j]); | |
801 | } | |
10a434fc | 802 | } |
0388423d | 803 | #endif |
10a434fc | 804 | } |
9d31d35b | 805 | early_identify_cpu(&boot_cpu_data); |
d7cd5611 | 806 | } |
093af8d7 | 807 | |
b6734c35 | 808 | /* |
366d4a43 BP |
809 | * The NOPL instruction is supposed to exist on all CPUs of family >= 6; |
810 | * unfortunately, that's not true in practice because of early VIA | |
811 | * chips and (more importantly) broken virtualizers that are not easy | |
812 | * to detect. In the latter case it doesn't even *fail* reliably, so | |
813 | * probing for it doesn't even work. Disable it completely on 32-bit | |
ba0593bf | 814 | * unless we can find a reliable way to detect all the broken cases. |
366d4a43 | 815 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). |
b6734c35 | 816 | */ |
148f9bb8 | 817 | static void detect_nopl(struct cpuinfo_x86 *c) |
b6734c35 | 818 | { |
366d4a43 | 819 | #ifdef CONFIG_X86_32 |
b6734c35 | 820 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
366d4a43 BP |
821 | #else |
822 | set_cpu_cap(c, X86_FEATURE_NOPL); | |
823 | #endif | |
d7cd5611 RR |
824 | } |
825 | ||
148f9bb8 | 826 | static void generic_identify(struct cpuinfo_x86 *c) |
1da177e4 | 827 | { |
aef93c8b | 828 | c->extended_cpuid_level = 0; |
1da177e4 | 829 | |
3da99c97 | 830 | if (!have_cpuid_p()) |
aef93c8b | 831 | identify_cpu_without_cpuid(c); |
1d67953f | 832 | |
aef93c8b | 833 | /* cyrix could have cpuid enabled via c_identify()*/ |
a9853dd6 | 834 | if (!have_cpuid_p()) |
aef93c8b | 835 | return; |
1da177e4 | 836 | |
3da99c97 | 837 | cpu_detect(c); |
1da177e4 | 838 | |
3da99c97 | 839 | get_cpu_vendor(c); |
1da177e4 | 840 | |
3da99c97 | 841 | get_cpu_cap(c); |
1da177e4 | 842 | |
3da99c97 YL |
843 | if (c->cpuid_level >= 0x00000001) { |
844 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | |
b89d3b3e YL |
845 | #ifdef CONFIG_X86_32 |
846 | # ifdef CONFIG_X86_HT | |
cb8cc442 | 847 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
b89d3b3e | 848 | # else |
3da99c97 | 849 | c->apicid = c->initial_apicid; |
b89d3b3e YL |
850 | # endif |
851 | #endif | |
b89d3b3e | 852 | c->phys_proc_id = c->initial_apicid; |
3da99c97 | 853 | } |
1da177e4 | 854 | |
1b05d60d | 855 | get_model_name(c); /* Default name */ |
1da177e4 | 856 | |
3da99c97 | 857 | detect_nopl(c); |
1da177e4 | 858 | } |
1da177e4 | 859 | |
cbc82b17 PWJ |
860 | static void x86_init_cache_qos(struct cpuinfo_x86 *c) |
861 | { | |
862 | /* | |
863 | * The heavy lifting of max_rmid and cache_occ_scale are handled | |
864 | * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu | |
865 | * in case CQM bits really aren't there in this CPU. | |
866 | */ | |
867 | if (c != &boot_cpu_data) { | |
868 | boot_cpu_data.x86_cache_max_rmid = | |
869 | min(boot_cpu_data.x86_cache_max_rmid, | |
870 | c->x86_cache_max_rmid); | |
871 | } | |
872 | } | |
873 | ||
1da177e4 LT |
874 | /* |
875 | * This does the hard work of actually picking apart the CPU stuff... | |
876 | */ | |
148f9bb8 | 877 | static void identify_cpu(struct cpuinfo_x86 *c) |
1da177e4 LT |
878 | { |
879 | int i; | |
880 | ||
881 | c->loops_per_jiffy = loops_per_jiffy; | |
882 | c->x86_cache_size = -1; | |
883 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
1da177e4 LT |
884 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
885 | c->x86_vendor_id[0] = '\0'; /* Unset */ | |
886 | c->x86_model_id[0] = '\0'; /* Unset */ | |
94605eff | 887 | c->x86_max_cores = 1; |
102bbe3a | 888 | c->x86_coreid_bits = 0; |
11fdd252 | 889 | #ifdef CONFIG_X86_64 |
102bbe3a | 890 | c->x86_clflush_size = 64; |
13c6c532 JB |
891 | c->x86_phys_bits = 36; |
892 | c->x86_virt_bits = 48; | |
102bbe3a YL |
893 | #else |
894 | c->cpuid_level = -1; /* CPUID not detected */ | |
770d132f | 895 | c->x86_clflush_size = 32; |
13c6c532 JB |
896 | c->x86_phys_bits = 32; |
897 | c->x86_virt_bits = 32; | |
102bbe3a YL |
898 | #endif |
899 | c->x86_cache_alignment = c->x86_clflush_size; | |
1da177e4 LT |
900 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
901 | ||
1da177e4 LT |
902 | generic_identify(c); |
903 | ||
3898534d | 904 | if (this_cpu->c_identify) |
1da177e4 LT |
905 | this_cpu->c_identify(c); |
906 | ||
2759c328 YL |
907 | /* Clear/Set all flags overriden by options, after probe */ |
908 | for (i = 0; i < NCAPINTS; i++) { | |
909 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | |
910 | c->x86_capability[i] |= cpu_caps_set[i]; | |
911 | } | |
912 | ||
102bbe3a | 913 | #ifdef CONFIG_X86_64 |
cb8cc442 | 914 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
102bbe3a YL |
915 | #endif |
916 | ||
1da177e4 LT |
917 | /* |
918 | * Vendor-specific initialization. In this section we | |
919 | * canonicalize the feature flags, meaning if there are | |
920 | * features a certain CPU supports which CPUID doesn't | |
921 | * tell us, CPUID claiming incorrect flags, or other bugs, | |
922 | * we handle them here. | |
923 | * | |
924 | * At the end of this section, c->x86_capability better | |
925 | * indicate the features this CPU genuinely supports! | |
926 | */ | |
927 | if (this_cpu->c_init) | |
928 | this_cpu->c_init(c); | |
929 | ||
930 | /* Disable the PN if appropriate */ | |
931 | squash_the_stupid_serial_number(c); | |
932 | ||
b2cc2a07 PA |
933 | /* Set up SMEP/SMAP */ |
934 | setup_smep(c); | |
935 | setup_smap(c); | |
936 | ||
1da177e4 | 937 | /* |
0f3fa48a IM |
938 | * The vendor-specific functions might have changed features. |
939 | * Now we do "generic changes." | |
1da177e4 LT |
940 | */ |
941 | ||
b38b0665 PA |
942 | /* Filter out anything that depends on CPUID levels we don't have */ |
943 | filter_cpuid_features(c, true); | |
944 | ||
1da177e4 | 945 | /* If the model name is still unset, do table lookup. */ |
34048c9e | 946 | if (!c->x86_model_id[0]) { |
02dde8b4 | 947 | const char *p; |
1da177e4 | 948 | p = table_lookup_model(c); |
34048c9e | 949 | if (p) |
1da177e4 LT |
950 | strcpy(c->x86_model_id, p); |
951 | else | |
952 | /* Last resort... */ | |
953 | sprintf(c->x86_model_id, "%02x/%02x", | |
54a20f8c | 954 | c->x86, c->x86_model); |
1da177e4 LT |
955 | } |
956 | ||
102bbe3a YL |
957 | #ifdef CONFIG_X86_64 |
958 | detect_ht(c); | |
959 | #endif | |
960 | ||
88b094fb | 961 | init_hypervisor(c); |
49d859d7 | 962 | x86_init_rdrand(c); |
cbc82b17 | 963 | x86_init_cache_qos(c); |
3e0c3737 YL |
964 | |
965 | /* | |
966 | * Clear/Set all flags overriden by options, need do it | |
967 | * before following smp all cpus cap AND. | |
968 | */ | |
969 | for (i = 0; i < NCAPINTS; i++) { | |
970 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | |
971 | c->x86_capability[i] |= cpu_caps_set[i]; | |
972 | } | |
973 | ||
1da177e4 LT |
974 | /* |
975 | * On SMP, boot_cpu_data holds the common feature set between | |
976 | * all CPUs; so make sure that we indicate which features are | |
977 | * common between the CPUs. The first time this routine gets | |
978 | * executed, c == &boot_cpu_data. | |
979 | */ | |
34048c9e | 980 | if (c != &boot_cpu_data) { |
1da177e4 | 981 | /* AND the already accumulated flags with these */ |
9d31d35b | 982 | for (i = 0; i < NCAPINTS; i++) |
1da177e4 | 983 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
65fc985b BP |
984 | |
985 | /* OR, i.e. replicate the bug flags */ | |
986 | for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) | |
987 | c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; | |
1da177e4 LT |
988 | } |
989 | ||
990 | /* Init Machine Check Exception if available. */ | |
5e09954a | 991 | mcheck_cpu_init(c); |
30d432df AK |
992 | |
993 | select_idle_routine(c); | |
102bbe3a | 994 | |
de2d9445 | 995 | #ifdef CONFIG_NUMA |
102bbe3a YL |
996 | numa_add_cpu(smp_processor_id()); |
997 | #endif | |
a6c4e076 | 998 | } |
31ab269a | 999 | |
8b6c0ab1 IM |
1000 | /* |
1001 | * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions | |
1002 | * on 32-bit kernels: | |
1003 | */ | |
cfda7bb9 AL |
1004 | #ifdef CONFIG_X86_32 |
1005 | void enable_sep_cpu(void) | |
1006 | { | |
8b6c0ab1 IM |
1007 | struct tss_struct *tss; |
1008 | int cpu; | |
cfda7bb9 | 1009 | |
8b6c0ab1 IM |
1010 | cpu = get_cpu(); |
1011 | tss = &per_cpu(cpu_tss, cpu); | |
1012 | ||
1013 | if (!boot_cpu_has(X86_FEATURE_SEP)) | |
1014 | goto out; | |
1015 | ||
1016 | /* | |
cf9328cc AL |
1017 | * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- |
1018 | * see the big comment in struct x86_hw_tss's definition. | |
8b6c0ab1 | 1019 | */ |
cfda7bb9 AL |
1020 | |
1021 | tss->x86_tss.ss1 = __KERNEL_CS; | |
8b6c0ab1 IM |
1022 | wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); |
1023 | ||
cf9328cc AL |
1024 | wrmsr(MSR_IA32_SYSENTER_ESP, |
1025 | (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), | |
1026 | 0); | |
8b6c0ab1 IM |
1027 | |
1028 | wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0); | |
1029 | ||
1030 | out: | |
cfda7bb9 AL |
1031 | put_cpu(); |
1032 | } | |
e04d645f GC |
1033 | #endif |
1034 | ||
a6c4e076 JF |
1035 | void __init identify_boot_cpu(void) |
1036 | { | |
1037 | identify_cpu(&boot_cpu_data); | |
02c68a02 | 1038 | init_amd_e400_c1e_mask(); |
102bbe3a | 1039 | #ifdef CONFIG_X86_32 |
a6c4e076 | 1040 | sysenter_setup(); |
6fe940d6 | 1041 | enable_sep_cpu(); |
102bbe3a | 1042 | #endif |
5b556332 | 1043 | cpu_detect_tlb(&boot_cpu_data); |
a6c4e076 | 1044 | } |
3b520b23 | 1045 | |
148f9bb8 | 1046 | void identify_secondary_cpu(struct cpuinfo_x86 *c) |
a6c4e076 JF |
1047 | { |
1048 | BUG_ON(c == &boot_cpu_data); | |
1049 | identify_cpu(c); | |
102bbe3a | 1050 | #ifdef CONFIG_X86_32 |
a6c4e076 | 1051 | enable_sep_cpu(); |
102bbe3a | 1052 | #endif |
a6c4e076 | 1053 | mtrr_ap_init(); |
1da177e4 LT |
1054 | } |
1055 | ||
a0854a46 | 1056 | struct msr_range { |
0f3fa48a IM |
1057 | unsigned min; |
1058 | unsigned max; | |
a0854a46 | 1059 | }; |
1da177e4 | 1060 | |
148f9bb8 | 1061 | static const struct msr_range msr_range_array[] = { |
a0854a46 YL |
1062 | { 0x00000000, 0x00000418}, |
1063 | { 0xc0000000, 0xc000040b}, | |
1064 | { 0xc0010000, 0xc0010142}, | |
1065 | { 0xc0011000, 0xc001103b}, | |
1066 | }; | |
1da177e4 | 1067 | |
148f9bb8 | 1068 | static void __print_cpu_msr(void) |
a0854a46 | 1069 | { |
0f3fa48a | 1070 | unsigned index_min, index_max; |
a0854a46 YL |
1071 | unsigned index; |
1072 | u64 val; | |
1073 | int i; | |
a0854a46 YL |
1074 | |
1075 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | |
1076 | index_min = msr_range_array[i].min; | |
1077 | index_max = msr_range_array[i].max; | |
0f3fa48a | 1078 | |
a0854a46 | 1079 | for (index = index_min; index < index_max; index++) { |
ecd431d9 | 1080 | if (rdmsrl_safe(index, &val)) |
a0854a46 YL |
1081 | continue; |
1082 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | |
1da177e4 | 1083 | } |
a0854a46 YL |
1084 | } |
1085 | } | |
94605eff | 1086 | |
148f9bb8 | 1087 | static int show_msr; |
0f3fa48a | 1088 | |
a0854a46 YL |
1089 | static __init int setup_show_msr(char *arg) |
1090 | { | |
1091 | int num; | |
3dd9d514 | 1092 | |
a0854a46 | 1093 | get_option(&arg, &num); |
3dd9d514 | 1094 | |
a0854a46 YL |
1095 | if (num > 0) |
1096 | show_msr = num; | |
1097 | return 1; | |
1da177e4 | 1098 | } |
a0854a46 | 1099 | __setup("show_msr=", setup_show_msr); |
1da177e4 | 1100 | |
191679fd AK |
1101 | static __init int setup_noclflush(char *arg) |
1102 | { | |
840d2830 | 1103 | setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); |
da4aaa7d | 1104 | setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); |
191679fd AK |
1105 | return 1; |
1106 | } | |
1107 | __setup("noclflush", setup_noclflush); | |
1108 | ||
148f9bb8 | 1109 | void print_cpu_info(struct cpuinfo_x86 *c) |
1da177e4 | 1110 | { |
02dde8b4 | 1111 | const char *vendor = NULL; |
1da177e4 | 1112 | |
0f3fa48a | 1113 | if (c->x86_vendor < X86_VENDOR_NUM) { |
1da177e4 | 1114 | vendor = this_cpu->c_vendor; |
0f3fa48a IM |
1115 | } else { |
1116 | if (c->cpuid_level >= 0) | |
1117 | vendor = c->x86_vendor_id; | |
1118 | } | |
1da177e4 | 1119 | |
bd32a8cf | 1120 | if (vendor && !strstr(c->x86_model_id, vendor)) |
9d31d35b | 1121 | printk(KERN_CONT "%s ", vendor); |
1da177e4 | 1122 | |
9d31d35b | 1123 | if (c->x86_model_id[0]) |
924e101a | 1124 | printk(KERN_CONT "%s", strim(c->x86_model_id)); |
1da177e4 | 1125 | else |
9d31d35b | 1126 | printk(KERN_CONT "%d86", c->x86); |
1da177e4 | 1127 | |
924e101a BP |
1128 | printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); |
1129 | ||
34048c9e | 1130 | if (c->x86_mask || c->cpuid_level >= 0) |
924e101a | 1131 | printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask); |
1da177e4 | 1132 | else |
924e101a | 1133 | printk(KERN_CONT ")\n"); |
a0854a46 | 1134 | |
0b8b8078 | 1135 | print_cpu_msr(c); |
21c3fcf3 YL |
1136 | } |
1137 | ||
148f9bb8 | 1138 | void print_cpu_msr(struct cpuinfo_x86 *c) |
21c3fcf3 | 1139 | { |
a0854a46 | 1140 | if (c->cpu_index < show_msr) |
21c3fcf3 | 1141 | __print_cpu_msr(); |
1da177e4 LT |
1142 | } |
1143 | ||
ac72e788 AK |
1144 | static __init int setup_disablecpuid(char *arg) |
1145 | { | |
1146 | int bit; | |
0f3fa48a | 1147 | |
ac72e788 AK |
1148 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) |
1149 | setup_clear_cpu_cap(bit); | |
1150 | else | |
1151 | return 0; | |
0f3fa48a | 1152 | |
ac72e788 AK |
1153 | return 1; |
1154 | } | |
1155 | __setup("clearcpuid=", setup_disablecpuid); | |
1156 | ||
198d208d | 1157 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
ef593260 | 1158 | (unsigned long)&init_thread_union + THREAD_SIZE; |
198d208d SR |
1159 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
1160 | ||
d5494d4f | 1161 | #ifdef CONFIG_X86_64 |
9ff80942 | 1162 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
629f4f9d SA |
1163 | struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, |
1164 | (unsigned long) debug_idt_table }; | |
d5494d4f | 1165 | |
947e76cd | 1166 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
277d5b40 | 1167 | irq_stack_union) __aligned(PAGE_SIZE) __visible; |
0f3fa48a | 1168 | |
bdf977b3 | 1169 | /* |
a7fcf28d AL |
1170 | * The following percpu variables are hot. Align current_task to |
1171 | * cacheline size such that they fall in the same cacheline. | |
bdf977b3 TH |
1172 | */ |
1173 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = | |
1174 | &init_task; | |
1175 | EXPORT_PER_CPU_SYMBOL(current_task); | |
d5494d4f | 1176 | |
bdf977b3 TH |
1177 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
1178 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | |
1179 | ||
277d5b40 | 1180 | DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; |
d5494d4f | 1181 | |
c2daa3be PZ |
1182 | DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
1183 | EXPORT_PER_CPU_SYMBOL(__preempt_count); | |
1184 | ||
0f3fa48a IM |
1185 | /* |
1186 | * Special IST stacks which the CPU switches to when it calls | |
1187 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | |
1188 | * limit), all of them are 4K, except the debug stack which | |
1189 | * is 8K. | |
1190 | */ | |
1191 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | |
1192 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | |
1193 | [DEBUG_STACK - 1] = DEBUG_STKSZ | |
1194 | }; | |
1195 | ||
92d65b23 | 1196 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
3e352aa8 | 1197 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); |
d5494d4f | 1198 | |
d5494d4f YL |
1199 | /* May not be marked __init: used by software suspend */ |
1200 | void syscall_init(void) | |
1da177e4 | 1201 | { |
d5494d4f YL |
1202 | /* |
1203 | * LSTAR and STAR live in a bit strange symbiosis. | |
1204 | * They both write to the same internal register. STAR allows to | |
1205 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | |
1206 | */ | |
1207 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | |
1208 | wrmsrl(MSR_LSTAR, system_call); | |
d56fe4bf IM |
1209 | |
1210 | #ifdef CONFIG_IA32_EMULATION | |
a76c7f46 DV |
1211 | wrmsrl(MSR_CSTAR, ia32_cstar_target); |
1212 | /* | |
487d1edb DV |
1213 | * This only works on Intel CPUs. |
1214 | * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. | |
1215 | * This does not cause SYSENTER to jump to the wrong location, because | |
1216 | * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). | |
a76c7f46 DV |
1217 | */ |
1218 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); | |
1219 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); | |
1220 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); | |
d56fe4bf IM |
1221 | #else |
1222 | wrmsrl(MSR_CSTAR, ignore_sysret); | |
6b51311c | 1223 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); |
d56fe4bf IM |
1224 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); |
1225 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); | |
d5494d4f | 1226 | #endif |
03ae5768 | 1227 | |
d5494d4f YL |
1228 | /* Flags to clear on syscall */ |
1229 | wrmsrl(MSR_SYSCALL_MASK, | |
63bcff2a | 1230 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| |
8c7aa698 | 1231 | X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); |
1da177e4 | 1232 | } |
62111195 | 1233 | |
d5494d4f YL |
1234 | /* |
1235 | * Copies of the original ist values from the tss are only accessed during | |
1236 | * debugging, no special alignment required. | |
1237 | */ | |
1238 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | |
1239 | ||
228bdaa9 | 1240 | static DEFINE_PER_CPU(unsigned long, debug_stack_addr); |
42181186 | 1241 | DEFINE_PER_CPU(int, debug_stack_usage); |
228bdaa9 SR |
1242 | |
1243 | int is_debug_stack(unsigned long addr) | |
1244 | { | |
89cbc767 CL |
1245 | return __this_cpu_read(debug_stack_usage) || |
1246 | (addr <= __this_cpu_read(debug_stack_addr) && | |
1247 | addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); | |
228bdaa9 | 1248 | } |
0f46efeb | 1249 | NOKPROBE_SYMBOL(is_debug_stack); |
228bdaa9 | 1250 | |
629f4f9d | 1251 | DEFINE_PER_CPU(u32, debug_idt_ctr); |
f8988175 | 1252 | |
228bdaa9 SR |
1253 | void debug_stack_set_zero(void) |
1254 | { | |
629f4f9d SA |
1255 | this_cpu_inc(debug_idt_ctr); |
1256 | load_current_idt(); | |
228bdaa9 | 1257 | } |
0f46efeb | 1258 | NOKPROBE_SYMBOL(debug_stack_set_zero); |
228bdaa9 SR |
1259 | |
1260 | void debug_stack_reset(void) | |
1261 | { | |
629f4f9d | 1262 | if (WARN_ON(!this_cpu_read(debug_idt_ctr))) |
f8988175 | 1263 | return; |
629f4f9d SA |
1264 | if (this_cpu_dec_return(debug_idt_ctr) == 0) |
1265 | load_current_idt(); | |
228bdaa9 | 1266 | } |
0f46efeb | 1267 | NOKPROBE_SYMBOL(debug_stack_reset); |
228bdaa9 | 1268 | |
0f3fa48a | 1269 | #else /* CONFIG_X86_64 */ |
d5494d4f | 1270 | |
bdf977b3 TH |
1271 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
1272 | EXPORT_PER_CPU_SYMBOL(current_task); | |
c2daa3be PZ |
1273 | DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
1274 | EXPORT_PER_CPU_SYMBOL(__preempt_count); | |
bdf977b3 | 1275 | |
a7fcf28d AL |
1276 | /* |
1277 | * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find | |
1278 | * the top of the kernel stack. Use an extra percpu variable to track the | |
1279 | * top of the kernel stack directly. | |
1280 | */ | |
1281 | DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = | |
1282 | (unsigned long)&init_thread_union + THREAD_SIZE; | |
1283 | EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); | |
1284 | ||
60a5317f | 1285 | #ifdef CONFIG_CC_STACKPROTECTOR |
53f82452 | 1286 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
60a5317f | 1287 | #endif |
d5494d4f | 1288 | |
0f3fa48a | 1289 | #endif /* CONFIG_X86_64 */ |
c5413fbe | 1290 | |
9766cdbc JSR |
1291 | /* |
1292 | * Clear all 6 debug registers: | |
1293 | */ | |
1294 | static void clear_all_debug_regs(void) | |
1295 | { | |
1296 | int i; | |
1297 | ||
1298 | for (i = 0; i < 8; i++) { | |
1299 | /* Ignore db4, db5 */ | |
1300 | if ((i == 4) || (i == 5)) | |
1301 | continue; | |
1302 | ||
1303 | set_debugreg(0, i); | |
1304 | } | |
1305 | } | |
c5413fbe | 1306 | |
0bb9fef9 JW |
1307 | #ifdef CONFIG_KGDB |
1308 | /* | |
1309 | * Restore debug regs if using kgdbwait and you have a kernel debugger | |
1310 | * connection established. | |
1311 | */ | |
1312 | static void dbg_restore_debug_regs(void) | |
1313 | { | |
1314 | if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) | |
1315 | arch_kgdb_ops.correct_hw_break(); | |
1316 | } | |
1317 | #else /* ! CONFIG_KGDB */ | |
1318 | #define dbg_restore_debug_regs() | |
1319 | #endif /* ! CONFIG_KGDB */ | |
1320 | ||
ce4b1b16 IM |
1321 | static void wait_for_master_cpu(int cpu) |
1322 | { | |
1323 | #ifdef CONFIG_SMP | |
1324 | /* | |
1325 | * wait for ACK from master CPU before continuing | |
1326 | * with AP initialization | |
1327 | */ | |
1328 | WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); | |
1329 | while (!cpumask_test_cpu(cpu, cpu_callout_mask)) | |
1330 | cpu_relax(); | |
1331 | #endif | |
1332 | } | |
1333 | ||
d2cbcc49 RR |
1334 | /* |
1335 | * cpu_init() initializes state that is per-CPU. Some data is already | |
1336 | * initialized (naturally) in the bootstrap process, such as the GDT | |
1337 | * and IDT. We reload them nevertheless, this function acts as a | |
1338 | * 'CPU state barrier', nothing should get across. | |
1ba76586 | 1339 | * A lot of state is already set up in PDA init for 64 bit |
d2cbcc49 | 1340 | */ |
1ba76586 | 1341 | #ifdef CONFIG_X86_64 |
0f3fa48a | 1342 | |
148f9bb8 | 1343 | void cpu_init(void) |
1ba76586 | 1344 | { |
0fe1e009 | 1345 | struct orig_ist *oist; |
1ba76586 | 1346 | struct task_struct *me; |
0f3fa48a IM |
1347 | struct tss_struct *t; |
1348 | unsigned long v; | |
ce4b1b16 | 1349 | int cpu = stack_smp_processor_id(); |
1ba76586 YL |
1350 | int i; |
1351 | ||
ce4b1b16 IM |
1352 | wait_for_master_cpu(cpu); |
1353 | ||
1e02ce4c AL |
1354 | /* |
1355 | * Initialize the CR4 shadow before doing anything that could | |
1356 | * try to read it. | |
1357 | */ | |
1358 | cr4_init_shadow(); | |
1359 | ||
e6ebf5de FY |
1360 | /* |
1361 | * Load microcode on this cpu if a valid microcode is available. | |
1362 | * This is early microcode loading procedure. | |
1363 | */ | |
1364 | load_ucode_ap(); | |
1365 | ||
24933b82 | 1366 | t = &per_cpu(cpu_tss, cpu); |
0fe1e009 | 1367 | oist = &per_cpu(orig_ist, cpu); |
0f3fa48a | 1368 | |
e7a22c1e | 1369 | #ifdef CONFIG_NUMA |
27fd185f | 1370 | if (this_cpu_read(numa_node) == 0 && |
e534c7c5 LS |
1371 | early_cpu_to_node(cpu) != NUMA_NO_NODE) |
1372 | set_numa_node(early_cpu_to_node(cpu)); | |
e7a22c1e | 1373 | #endif |
1ba76586 YL |
1374 | |
1375 | me = current; | |
1376 | ||
2eaad1fd | 1377 | pr_debug("Initializing CPU#%d\n", cpu); |
1ba76586 | 1378 | |
375074cc | 1379 | cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
1ba76586 YL |
1380 | |
1381 | /* | |
1382 | * Initialize the per-CPU GDT with the boot GDT, | |
1383 | * and set up the GDT descriptor: | |
1384 | */ | |
1385 | ||
552be871 | 1386 | switch_to_new_gdt(cpu); |
2697fbd5 BG |
1387 | loadsegment(fs, 0); |
1388 | ||
cf910e83 | 1389 | load_current_idt(); |
1ba76586 YL |
1390 | |
1391 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | |
1392 | syscall_init(); | |
1393 | ||
1394 | wrmsrl(MSR_FS_BASE, 0); | |
1395 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | |
1396 | barrier(); | |
1397 | ||
4763ed4d | 1398 | x86_configure_nx(); |
659006bf | 1399 | x2apic_setup(); |
1ba76586 YL |
1400 | |
1401 | /* | |
1402 | * set up and load the per-CPU TSS | |
1403 | */ | |
0fe1e009 | 1404 | if (!oist->ist[0]) { |
92d65b23 | 1405 | char *estacks = per_cpu(exception_stacks, cpu); |
0f3fa48a | 1406 | |
1ba76586 | 1407 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
0f3fa48a | 1408 | estacks += exception_stack_sizes[v]; |
0fe1e009 | 1409 | oist->ist[v] = t->x86_tss.ist[v] = |
1ba76586 | 1410 | (unsigned long)estacks; |
228bdaa9 SR |
1411 | if (v == DEBUG_STACK-1) |
1412 | per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; | |
1ba76586 YL |
1413 | } |
1414 | } | |
1415 | ||
1416 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | |
0f3fa48a | 1417 | |
1ba76586 YL |
1418 | /* |
1419 | * <= is required because the CPU will access up to | |
1420 | * 8 bits beyond the end of the IO permission bitmap. | |
1421 | */ | |
1422 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | |
1423 | t->io_bitmap[i] = ~0UL; | |
1424 | ||
1425 | atomic_inc(&init_mm.mm_count); | |
1426 | me->active_mm = &init_mm; | |
8c5dfd25 | 1427 | BUG_ON(me->mm); |
1ba76586 YL |
1428 | enter_lazy_tlb(&init_mm, me); |
1429 | ||
1430 | load_sp0(t, ¤t->thread); | |
1431 | set_tss_desc(cpu, t); | |
1432 | load_TR_desc(); | |
1433 | load_LDT(&init_mm.context); | |
1434 | ||
0bb9fef9 JW |
1435 | clear_all_debug_regs(); |
1436 | dbg_restore_debug_regs(); | |
1ba76586 | 1437 | |
3a9c4b0d | 1438 | fpu__cpu_init(); |
1ba76586 | 1439 | |
1ba76586 YL |
1440 | if (is_uv_system()) |
1441 | uv_cpu_init(); | |
1442 | } | |
1443 | ||
1444 | #else | |
1445 | ||
148f9bb8 | 1446 | void cpu_init(void) |
9ee79a3d | 1447 | { |
d2cbcc49 RR |
1448 | int cpu = smp_processor_id(); |
1449 | struct task_struct *curr = current; | |
24933b82 | 1450 | struct tss_struct *t = &per_cpu(cpu_tss, cpu); |
9ee79a3d | 1451 | struct thread_struct *thread = &curr->thread; |
62111195 | 1452 | |
ce4b1b16 | 1453 | wait_for_master_cpu(cpu); |
e6ebf5de | 1454 | |
5b2bdbc8 SR |
1455 | /* |
1456 | * Initialize the CR4 shadow before doing anything that could | |
1457 | * try to read it. | |
1458 | */ | |
1459 | cr4_init_shadow(); | |
1460 | ||
ce4b1b16 | 1461 | show_ucode_info_early(); |
62111195 JF |
1462 | |
1463 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | |
1464 | ||
9298b815 | 1465 | if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de) |
375074cc | 1466 | cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
62111195 | 1467 | |
cf910e83 | 1468 | load_current_idt(); |
552be871 | 1469 | switch_to_new_gdt(cpu); |
1da177e4 | 1470 | |
1da177e4 LT |
1471 | /* |
1472 | * Set up and load the per-CPU TSS and LDT | |
1473 | */ | |
1474 | atomic_inc(&init_mm.mm_count); | |
62111195 | 1475 | curr->active_mm = &init_mm; |
8c5dfd25 | 1476 | BUG_ON(curr->mm); |
62111195 | 1477 | enter_lazy_tlb(&init_mm, curr); |
1da177e4 | 1478 | |
faca6227 | 1479 | load_sp0(t, thread); |
34048c9e | 1480 | set_tss_desc(cpu, t); |
1da177e4 LT |
1481 | load_TR_desc(); |
1482 | load_LDT(&init_mm.context); | |
1483 | ||
f9a196b8 TG |
1484 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
1485 | ||
22c4e308 | 1486 | #ifdef CONFIG_DOUBLEFAULT |
1da177e4 LT |
1487 | /* Set up doublefault TSS pointer in the GDT */ |
1488 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | |
22c4e308 | 1489 | #endif |
1da177e4 | 1490 | |
9766cdbc | 1491 | clear_all_debug_regs(); |
0bb9fef9 | 1492 | dbg_restore_debug_regs(); |
1da177e4 | 1493 | |
3a9c4b0d | 1494 | fpu__cpu_init(); |
1da177e4 | 1495 | } |
1ba76586 | 1496 | #endif |
5700f743 BP |
1497 | |
1498 | #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS | |
1499 | void warn_pre_alternatives(void) | |
1500 | { | |
1501 | WARN(1, "You're using static_cpu_has before alternatives have run!\n"); | |
1502 | } | |
1503 | EXPORT_SYMBOL_GPL(warn_pre_alternatives); | |
1504 | #endif | |
4a90a99c BP |
1505 | |
1506 | inline bool __static_cpu_has_safe(u16 bit) | |
1507 | { | |
1508 | return boot_cpu_has(bit); | |
1509 | } | |
1510 | EXPORT_SYMBOL_GPL(__static_cpu_has_safe); |