]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/init.h> |
2 | #include <linux/kernel.h> | |
3 | ||
4 | #include <linux/string.h> | |
5 | #include <linux/bitops.h> | |
6 | #include <linux/smp.h> | |
7 | #include <linux/thread_info.h> | |
53e86b91 | 8 | #include <linux/module.h> |
1da177e4 LT |
9 | |
10 | #include <asm/processor.h> | |
d72b1b4f | 11 | #include <asm/pgtable.h> |
1da177e4 LT |
12 | #include <asm/msr.h> |
13 | #include <asm/uaccess.h> | |
eee3af4a | 14 | #include <asm/ds.h> |
73bdb73f | 15 | #include <asm/bugs.h> |
1f442d70 | 16 | #include <asm/cpu.h> |
1da177e4 | 17 | |
185f3b9d YL |
18 | #ifdef CONFIG_X86_64 |
19 | #include <asm/topology.h> | |
20 | #include <asm/numa_64.h> | |
21 | #endif | |
22 | ||
1da177e4 LT |
23 | #include "cpu.h" |
24 | ||
25 | #ifdef CONFIG_X86_LOCAL_APIC | |
26 | #include <asm/mpspec.h> | |
27 | #include <asm/apic.h> | |
1da177e4 LT |
28 | #endif |
29 | ||
03ae5768 | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
1da177e4 | 31 | { |
99fb4d34 | 32 | /* Unmask CPUID levels if masked: */ |
30a0fb94 | 33 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { |
99fb4d34 | 34 | u64 misc_enable; |
066941bd | 35 | |
99fb4d34 IM |
36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
37 | ||
38 | if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { | |
39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | |
40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | |
41 | c->cpuid_level = cpuid_eax(0); | |
42 | } | |
066941bd PA |
43 | } |
44 | ||
2b16a235 AK |
45 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
46 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | |
47 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | |
185f3b9d YL |
48 | |
49 | #ifdef CONFIG_X86_64 | |
50 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | |
51 | #else | |
52 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | |
53 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | |
54 | c->x86_cache_alignment = 128; | |
55 | #endif | |
40fb1715 VP |
56 | |
57 | /* | |
58 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | |
59 | * with P/T states and does not stop in deep C-states | |
60 | */ | |
61 | if (c->x86_power & (1 << 8)) { | |
62 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | |
63 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | |
64 | } | |
65 | ||
75a04811 PA |
66 | /* |
67 | * There is a known erratum on Pentium III and Core Solo | |
68 | * and Core Duo CPUs. | |
69 | * " Page with PAT set to WC while associated MTRR is UC | |
70 | * may consolidate to UC " | |
71 | * Because of this erratum, it is better to stick with | |
72 | * setting WC in MTRR rather than using PAT on these CPUs. | |
73 | * | |
74 | * Enable PAT WC only on P4, Core 2 or later CPUs. | |
75 | */ | |
76 | if (c->x86 == 6 && c->x86_model < 15) | |
77 | clear_cpu_cap(c, X86_FEATURE_PAT); | |
1da177e4 LT |
78 | } |
79 | ||
185f3b9d | 80 | #ifdef CONFIG_X86_32 |
1da177e4 LT |
81 | /* |
82 | * Early probe support logic for ppro memory erratum #50 | |
83 | * | |
84 | * This is called before we do cpu ident work | |
85 | */ | |
65eb6b43 | 86 | |
3bc9b76b | 87 | int __cpuinit ppro_with_ram_bug(void) |
1da177e4 LT |
88 | { |
89 | /* Uses data from early_cpu_detect now */ | |
90 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | |
91 | boot_cpu_data.x86 == 6 && | |
92 | boot_cpu_data.x86_model == 1 && | |
93 | boot_cpu_data.x86_mask < 8) { | |
94 | printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); | |
95 | return 1; | |
96 | } | |
97 | return 0; | |
98 | } | |
65eb6b43 | 99 | |
4052704d YL |
100 | #ifdef CONFIG_X86_F00F_BUG |
101 | static void __cpuinit trap_init_f00f_bug(void) | |
102 | { | |
103 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | |
1da177e4 | 104 | |
4052704d YL |
105 | /* |
106 | * Update the IDT descriptor and reload the IDT so that | |
107 | * it uses the read-only mapped virtual address. | |
108 | */ | |
109 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | |
110 | load_idt(&idt_descr); | |
111 | } | |
112 | #endif | |
113 | ||
1f442d70 YL |
114 | static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) |
115 | { | |
116 | #ifdef CONFIG_SMP | |
117 | /* calling is from identify_secondary_cpu() ? */ | |
118 | if (c->cpu_index == boot_cpu_id) | |
119 | return; | |
120 | ||
121 | /* | |
122 | * Mask B, Pentium, but not Pentium MMX | |
123 | */ | |
124 | if (c->x86 == 5 && | |
125 | c->x86_mask >= 1 && c->x86_mask <= 4 && | |
126 | c->x86_model <= 3) { | |
127 | /* | |
128 | * Remember we have B step Pentia with bugs | |
129 | */ | |
130 | WARN_ONCE(1, "WARNING: SMP operation may be unreliable" | |
131 | "with B stepping processors.\n"); | |
132 | } | |
133 | #endif | |
134 | } | |
135 | ||
4052704d | 136 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) |
1da177e4 LT |
137 | { |
138 | unsigned long lo, hi; | |
139 | ||
4052704d YL |
140 | #ifdef CONFIG_X86_F00F_BUG |
141 | /* | |
142 | * All current models of Pentium and Pentium with MMX technology CPUs | |
143 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | |
144 | * Note that the workaround only should be initialized once... | |
145 | */ | |
146 | c->f00f_bug = 0; | |
147 | if (!paravirt_enabled() && c->x86 == 5) { | |
148 | static int f00f_workaround_enabled; | |
149 | ||
150 | c->f00f_bug = 1; | |
151 | if (!f00f_workaround_enabled) { | |
152 | trap_init_f00f_bug(); | |
153 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | |
154 | f00f_workaround_enabled = 1; | |
155 | } | |
156 | } | |
157 | #endif | |
158 | ||
159 | /* | |
160 | * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until | |
161 | * model 3 mask 3 | |
162 | */ | |
163 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | |
164 | clear_cpu_cap(c, X86_FEATURE_SEP); | |
165 | ||
166 | /* | |
167 | * P4 Xeon errata 037 workaround. | |
168 | * Hardware prefetcher may cause stale data to be loaded into the cache. | |
169 | */ | |
1da177e4 | 170 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
65eb6b43 | 171 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
ecab22aa | 172 | if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { |
1da177e4 LT |
173 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
174 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | |
ecab22aa | 175 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
1da177e4 LT |
176 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); |
177 | } | |
178 | } | |
1da177e4 | 179 | |
4052704d YL |
180 | /* |
181 | * See if we have a good local APIC by checking for buggy Pentia, | |
182 | * i.e. all B steppings and the C2 stepping of P54C when using their | |
183 | * integrated APIC (see 11AP erratum in "Pentium Processor | |
184 | * Specification Update"). | |
185 | */ | |
186 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | |
187 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | |
188 | set_cpu_cap(c, X86_FEATURE_11AP); | |
185f3b9d | 189 | |
185f3b9d | 190 | |
4052704d | 191 | #ifdef CONFIG_X86_INTEL_USERCOPY |
185f3b9d | 192 | /* |
4052704d | 193 | * Set up the preferred alignment for movsl bulk memory moves |
185f3b9d | 194 | */ |
4052704d YL |
195 | switch (c->x86) { |
196 | case 4: /* 486: untested */ | |
197 | break; | |
198 | case 5: /* Old Pentia: untested */ | |
199 | break; | |
200 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | |
201 | movsl_mask.mask = 7; | |
202 | break; | |
203 | case 15: /* P4 is OK down to 8-byte alignment */ | |
204 | movsl_mask.mask = 7; | |
205 | break; | |
206 | } | |
185f3b9d | 207 | #endif |
4052704d YL |
208 | |
209 | #ifdef CONFIG_X86_NUMAQ | |
210 | numaq_tsc_disable(); | |
211 | #endif | |
1f442d70 YL |
212 | |
213 | intel_smp_check(c); | |
4052704d YL |
214 | } |
215 | #else | |
216 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |
217 | { | |
218 | } | |
185f3b9d YL |
219 | #endif |
220 | ||
221 | static void __cpuinit srat_detect_node(void) | |
222 | { | |
223 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | |
224 | unsigned node; | |
225 | int cpu = smp_processor_id(); | |
226 | int apicid = hard_smp_processor_id(); | |
227 | ||
228 | /* Don't do the funky fallback heuristics the AMD version employs | |
229 | for now. */ | |
230 | node = apicid_to_node[apicid]; | |
231 | if (node == NUMA_NO_NODE || !node_online(node)) | |
232 | node = first_node(node_online_map); | |
233 | numa_set_node(cpu, node); | |
234 | ||
823b259b | 235 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); |
185f3b9d YL |
236 | #endif |
237 | } | |
238 | ||
3dd9d514 AK |
239 | /* |
240 | * find out the number of processor cores on the die | |
241 | */ | |
f69feff7 | 242 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) |
3dd9d514 | 243 | { |
f2ab4461 | 244 | unsigned int eax, ebx, ecx, edx; |
3dd9d514 AK |
245 | |
246 | if (c->cpuid_level < 4) | |
247 | return 1; | |
248 | ||
f2ab4461 ZA |
249 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ |
250 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | |
3dd9d514 AK |
251 | if (eax & 0x1f) |
252 | return ((eax >> 26) + 1); | |
253 | else | |
254 | return 1; | |
255 | } | |
256 | ||
e38e05a8 SY |
257 | static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) |
258 | { | |
259 | /* Intel VMX MSR indicated features */ | |
260 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 | |
261 | #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 | |
262 | #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 | |
263 | #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 | |
264 | #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 | |
265 | #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 | |
266 | ||
267 | u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; | |
268 | ||
269 | clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | |
270 | clear_cpu_cap(c, X86_FEATURE_VNMI); | |
271 | clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | |
272 | clear_cpu_cap(c, X86_FEATURE_EPT); | |
273 | clear_cpu_cap(c, X86_FEATURE_VPID); | |
274 | ||
275 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); | |
276 | msr_ctl = vmx_msr_high | vmx_msr_low; | |
277 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) | |
278 | set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | |
279 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) | |
280 | set_cpu_cap(c, X86_FEATURE_VNMI); | |
281 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { | |
282 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | |
283 | vmx_msr_low, vmx_msr_high); | |
284 | msr_ctl2 = vmx_msr_high | vmx_msr_low; | |
285 | if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && | |
286 | (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) | |
287 | set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | |
288 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) | |
289 | set_cpu_cap(c, X86_FEATURE_EPT); | |
290 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) | |
291 | set_cpu_cap(c, X86_FEATURE_VPID); | |
292 | } | |
293 | } | |
294 | ||
3bc9b76b | 295 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
1da177e4 LT |
296 | { |
297 | unsigned int l2 = 0; | |
1da177e4 | 298 | |
2b16a235 AK |
299 | early_init_intel(c); |
300 | ||
4052704d | 301 | intel_workarounds(c); |
1da177e4 | 302 | |
345077cd SS |
303 | /* |
304 | * Detect the extended topology information if available. This | |
305 | * will reinitialise the initial_apicid which will be used | |
306 | * in init_intel_cacheinfo() | |
307 | */ | |
308 | detect_extended_topology(c); | |
309 | ||
1da177e4 | 310 | l2 = init_intel_cacheinfo(c); |
65eb6b43 | 311 | if (c->cpuid_level > 9) { |
0080e667 VP |
312 | unsigned eax = cpuid_eax(10); |
313 | /* Check for version and the number of counters */ | |
314 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | |
d0e95ebd | 315 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
0080e667 | 316 | } |
1da177e4 | 317 | |
4052704d YL |
318 | if (cpu_has_xmm2) |
319 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | |
320 | if (cpu_has_ds) { | |
321 | unsigned int l1; | |
322 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | |
323 | if (!(l1 & (1<<11))) | |
324 | set_cpu_cap(c, X86_FEATURE_BTS); | |
325 | if (!(l1 & (1<<12))) | |
326 | set_cpu_cap(c, X86_FEATURE_PEBS); | |
327 | ds_init_intel(c); | |
328 | } | |
1da177e4 | 329 | |
e736ad54 PV |
330 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) |
331 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | |
332 | ||
4052704d YL |
333 | #ifdef CONFIG_X86_64 |
334 | if (c->x86 == 15) | |
335 | c->x86_cache_alignment = c->x86_clflush_size * 2; | |
336 | if (c->x86 == 6) | |
337 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | |
338 | #else | |
65eb6b43 PC |
339 | /* |
340 | * Names for the Pentium II/Celeron processors | |
341 | * detectable only by also checking the cache size. | |
342 | * Dixon is NOT a Celeron. | |
343 | */ | |
1da177e4 | 344 | if (c->x86 == 6) { |
4052704d YL |
345 | char *p = NULL; |
346 | ||
1da177e4 LT |
347 | switch (c->x86_model) { |
348 | case 5: | |
349 | if (c->x86_mask == 0) { | |
350 | if (l2 == 0) | |
351 | p = "Celeron (Covington)"; | |
352 | else if (l2 == 256) | |
353 | p = "Mobile Pentium II (Dixon)"; | |
354 | } | |
355 | break; | |
65eb6b43 | 356 | |
1da177e4 LT |
357 | case 6: |
358 | if (l2 == 128) | |
359 | p = "Celeron (Mendocino)"; | |
360 | else if (c->x86_mask == 0 || c->x86_mask == 5) | |
361 | p = "Celeron-A"; | |
362 | break; | |
65eb6b43 | 363 | |
1da177e4 LT |
364 | case 8: |
365 | if (l2 == 128) | |
366 | p = "Celeron (Coppermine)"; | |
367 | break; | |
368 | } | |
1da177e4 | 369 | |
4052704d YL |
370 | if (p) |
371 | strcpy(c->x86_model_id, p); | |
1da177e4 | 372 | } |
1da177e4 | 373 | |
185f3b9d YL |
374 | if (c->x86 == 15) |
375 | set_cpu_cap(c, X86_FEATURE_P4); | |
376 | if (c->x86 == 6) | |
377 | set_cpu_cap(c, X86_FEATURE_P3); | |
f4166c54 | 378 | #endif |
185f3b9d | 379 | |
185f3b9d YL |
380 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { |
381 | /* | |
382 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | |
383 | * detection. | |
384 | */ | |
385 | c->x86_max_cores = intel_num_cpu_cores(c); | |
386 | #ifdef CONFIG_X86_32 | |
387 | detect_ht(c); | |
388 | #endif | |
389 | } | |
390 | ||
391 | /* Work around errata */ | |
392 | srat_detect_node(); | |
e38e05a8 SY |
393 | |
394 | if (cpu_has(c, X86_FEATURE_VMX)) | |
395 | detect_vmx_virtcap(c); | |
42ed458a | 396 | } |
1da177e4 | 397 | |
185f3b9d | 398 | #ifdef CONFIG_X86_32 |
65eb6b43 | 399 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
1da177e4 | 400 | { |
65eb6b43 PC |
401 | /* |
402 | * Intel PIII Tualatin. This comes in two flavours. | |
1da177e4 LT |
403 | * One has 256kb of cache, the other 512. We have no way |
404 | * to determine which, so we use a boottime override | |
405 | * for the 512kb model, and assume 256 otherwise. | |
406 | */ | |
407 | if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) | |
408 | size = 256; | |
409 | return size; | |
410 | } | |
185f3b9d | 411 | #endif |
1da177e4 | 412 | |
3bc9b76b | 413 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
1da177e4 | 414 | .c_vendor = "Intel", |
65eb6b43 | 415 | .c_ident = { "GenuineIntel" }, |
185f3b9d | 416 | #ifdef CONFIG_X86_32 |
1da177e4 | 417 | .c_models = { |
65eb6b43 PC |
418 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
419 | { | |
420 | [0] = "486 DX-25/33", | |
421 | [1] = "486 DX-50", | |
422 | [2] = "486 SX", | |
423 | [3] = "486 DX/2", | |
424 | [4] = "486 SL", | |
425 | [5] = "486 SX/2", | |
426 | [7] = "486 DX/2-WB", | |
427 | [8] = "486 DX/4", | |
1da177e4 LT |
428 | [9] = "486 DX/4-WB" |
429 | } | |
430 | }, | |
431 | { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = | |
65eb6b43 PC |
432 | { |
433 | [0] = "Pentium 60/66 A-step", | |
434 | [1] = "Pentium 60/66", | |
1da177e4 | 435 | [2] = "Pentium 75 - 200", |
65eb6b43 | 436 | [3] = "OverDrive PODP5V83", |
1da177e4 | 437 | [4] = "Pentium MMX", |
65eb6b43 | 438 | [7] = "Mobile Pentium 75 - 200", |
1da177e4 LT |
439 | [8] = "Mobile Pentium MMX" |
440 | } | |
441 | }, | |
442 | { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = | |
65eb6b43 | 443 | { |
1da177e4 | 444 | [0] = "Pentium Pro A-step", |
65eb6b43 PC |
445 | [1] = "Pentium Pro", |
446 | [3] = "Pentium II (Klamath)", | |
447 | [4] = "Pentium II (Deschutes)", | |
448 | [5] = "Pentium II (Deschutes)", | |
1da177e4 | 449 | [6] = "Mobile Pentium II", |
65eb6b43 PC |
450 | [7] = "Pentium III (Katmai)", |
451 | [8] = "Pentium III (Coppermine)", | |
1da177e4 LT |
452 | [10] = "Pentium III (Cascades)", |
453 | [11] = "Pentium III (Tualatin)", | |
454 | } | |
455 | }, | |
456 | { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = | |
457 | { | |
458 | [0] = "Pentium 4 (Unknown)", | |
459 | [1] = "Pentium 4 (Willamette)", | |
460 | [2] = "Pentium 4 (Northwood)", | |
461 | [4] = "Pentium 4 (Foster)", | |
462 | [5] = "Pentium 4 (Foster)", | |
463 | } | |
464 | }, | |
465 | }, | |
185f3b9d YL |
466 | .c_size_cache = intel_size_cache, |
467 | #endif | |
03ae5768 | 468 | .c_early_init = early_init_intel, |
1da177e4 | 469 | .c_init = init_intel, |
10a434fc | 470 | .c_x86_vendor = X86_VENDOR_INTEL, |
1da177e4 LT |
471 | }; |
472 | ||
10a434fc | 473 | cpu_dev_register(intel_cpu_dev); |
1da177e4 | 474 |