]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_PROCESSOR_H |
2 | #define _ASM_X86_PROCESSOR_H | |
c758ecf6 | 3 | |
053de044 GOC |
4 | #include <asm/processor-flags.h> |
5 | ||
683e0253 GOC |
6 | /* Forward declaration, a strange C thing */ |
7 | struct task_struct; | |
8 | struct mm_struct; | |
9 | ||
2f66dcc9 GOC |
10 | #include <asm/vm86.h> |
11 | #include <asm/math_emu.h> | |
12 | #include <asm/segment.h> | |
2f66dcc9 GOC |
13 | #include <asm/types.h> |
14 | #include <asm/sigcontext.h> | |
15 | #include <asm/current.h> | |
16 | #include <asm/cpufeature.h> | |
2f66dcc9 | 17 | #include <asm/page.h> |
54321d94 | 18 | #include <asm/pgtable_types.h> |
5300db88 | 19 | #include <asm/percpu.h> |
2f66dcc9 GOC |
20 | #include <asm/msr.h> |
21 | #include <asm/desc_defs.h> | |
bd61643e | 22 | #include <asm/nops.h> |
f05e798a | 23 | #include <asm/special_insns.h> |
4d46a89e | 24 | |
2f66dcc9 | 25 | #include <linux/personality.h> |
5300db88 GOC |
26 | #include <linux/cpumask.h> |
27 | #include <linux/cache.h> | |
2f66dcc9 | 28 | #include <linux/threads.h> |
5cbc19a9 | 29 | #include <linux/math64.h> |
faa4602e | 30 | #include <linux/err.h> |
f05e798a DH |
31 | #include <linux/irqflags.h> |
32 | ||
33 | /* | |
34 | * We handle most unaligned accesses in hardware. On the other hand | |
35 | * unaligned DMA can be quite expensive on some Nehalem processors. | |
36 | * | |
37 | * Based on this we disable the IP header alignment in network drivers. | |
38 | */ | |
39 | #define NET_IP_ALIGN 0 | |
c72dcf83 | 40 | |
b332828c | 41 | #define HBP_NUM 4 |
0ccb8acc GOC |
42 | /* |
43 | * Default implementation of macro that returns current | |
44 | * instruction pointer ("program counter"). | |
45 | */ | |
46 | static inline void *current_text_addr(void) | |
47 | { | |
48 | void *pc; | |
4d46a89e IM |
49 | |
50 | asm volatile("mov $1f, %0; 1:":"=r" (pc)); | |
51 | ||
0ccb8acc GOC |
52 | return pc; |
53 | } | |
54 | ||
dbcb4660 | 55 | #ifdef CONFIG_X86_VSMP |
4d46a89e IM |
56 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
57 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | |
dbcb4660 | 58 | #else |
4d46a89e IM |
59 | # define ARCH_MIN_TASKALIGN 16 |
60 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 | |
dbcb4660 GOC |
61 | #endif |
62 | ||
e0ba94f1 AS |
63 | enum tlb_infos { |
64 | ENTRIES, | |
65 | NR_INFO | |
66 | }; | |
67 | ||
68 | extern u16 __read_mostly tlb_lli_4k[NR_INFO]; | |
69 | extern u16 __read_mostly tlb_lli_2m[NR_INFO]; | |
70 | extern u16 __read_mostly tlb_lli_4m[NR_INFO]; | |
71 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; | |
72 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; | |
73 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; | |
dd360393 | 74 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; |
c4211f42 | 75 | |
5300db88 GOC |
76 | /* |
77 | * CPU type and hardware bug flags. Kept separately for each CPU. | |
78 | * Members of this structure are referenced in head.S, so think twice | |
79 | * before touching them. [mj] | |
80 | */ | |
81 | ||
82 | struct cpuinfo_x86 { | |
4d46a89e IM |
83 | __u8 x86; /* CPU family */ |
84 | __u8 x86_vendor; /* CPU vendor */ | |
85 | __u8 x86_model; | |
86 | __u8 x86_mask; | |
5300db88 | 87 | #ifdef CONFIG_X86_32 |
4d46a89e IM |
88 | char wp_works_ok; /* It doesn't on 386's */ |
89 | ||
90 | /* Problems on some 486Dx4's and old 386's: */ | |
4d46a89e | 91 | char rfu; |
4d46a89e | 92 | char pad0; |
60e019eb | 93 | char pad1; |
5300db88 | 94 | #else |
4d46a89e | 95 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
b1882e68 | 96 | int x86_tlbsize; |
13c6c532 | 97 | #endif |
4d46a89e IM |
98 | __u8 x86_virt_bits; |
99 | __u8 x86_phys_bits; | |
100 | /* CPUID returned core id bits: */ | |
101 | __u8 x86_coreid_bits; | |
102 | /* Max extended CPUID function supported: */ | |
103 | __u32 extended_cpuid_level; | |
4d46a89e IM |
104 | /* Maximum supported CPUID level, -1=no CPUID: */ |
105 | int cpuid_level; | |
65fc985b | 106 | __u32 x86_capability[NCAPINTS + NBUGINTS]; |
4d46a89e IM |
107 | char x86_vendor_id[16]; |
108 | char x86_model_id[64]; | |
109 | /* in KB - valid for CPUS which support this call: */ | |
110 | int x86_cache_size; | |
111 | int x86_cache_alignment; /* In bytes */ | |
cbc82b17 PWJ |
112 | /* Cache QoS architectural values: */ |
113 | int x86_cache_max_rmid; /* max index */ | |
114 | int x86_cache_occ_scale; /* scale to bytes */ | |
4d46a89e IM |
115 | int x86_power; |
116 | unsigned long loops_per_jiffy; | |
4d46a89e IM |
117 | /* cpuid returned max cores value: */ |
118 | u16 x86_max_cores; | |
119 | u16 apicid; | |
01aaea1a | 120 | u16 initial_apicid; |
4d46a89e | 121 | u16 x86_clflush_size; |
4d46a89e IM |
122 | /* number of cores as seen by the OS: */ |
123 | u16 booted_cores; | |
124 | /* Physical processor id: */ | |
125 | u16 phys_proc_id; | |
126 | /* Core id: */ | |
127 | u16 cpu_core_id; | |
6057b4d3 AH |
128 | /* Compute unit id */ |
129 | u8 compute_unit_id; | |
4d46a89e IM |
130 | /* Index into per_cpu list: */ |
131 | u16 cpu_index; | |
506ed6b5 | 132 | u32 microcode; |
2c773dd3 | 133 | }; |
5300db88 | 134 | |
4d46a89e IM |
135 | #define X86_VENDOR_INTEL 0 |
136 | #define X86_VENDOR_CYRIX 1 | |
137 | #define X86_VENDOR_AMD 2 | |
138 | #define X86_VENDOR_UMC 3 | |
4d46a89e IM |
139 | #define X86_VENDOR_CENTAUR 5 |
140 | #define X86_VENDOR_TRANSMETA 7 | |
141 | #define X86_VENDOR_NSC 8 | |
142 | #define X86_VENDOR_NUM 9 | |
143 | ||
144 | #define X86_VENDOR_UNKNOWN 0xff | |
5300db88 | 145 | |
1a53905a GOC |
146 | /* |
147 | * capabilities of CPUs | |
148 | */ | |
4d46a89e IM |
149 | extern struct cpuinfo_x86 boot_cpu_data; |
150 | extern struct cpuinfo_x86 new_cpu_data; | |
151 | ||
152 | extern struct tss_struct doublefault_tss; | |
3e0c3737 YL |
153 | extern __u32 cpu_caps_cleared[NCAPINTS]; |
154 | extern __u32 cpu_caps_set[NCAPINTS]; | |
5300db88 GOC |
155 | |
156 | #ifdef CONFIG_SMP | |
2c773dd3 | 157 | DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
5300db88 | 158 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
5300db88 | 159 | #else |
7b543a53 | 160 | #define cpu_info boot_cpu_data |
5300db88 | 161 | #define cpu_data(cpu) boot_cpu_data |
5300db88 GOC |
162 | #endif |
163 | ||
1c6c727d JS |
164 | extern const struct seq_operations cpuinfo_op; |
165 | ||
4d46a89e IM |
166 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) |
167 | ||
168 | extern void cpu_detect(struct cpuinfo_x86 *c); | |
148f9bb8 | 169 | extern void fpu_detect(struct cpuinfo_x86 *c); |
1a53905a | 170 | |
f580366f | 171 | extern void early_cpu_init(void); |
1a53905a GOC |
172 | extern void identify_boot_cpu(void); |
173 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | |
5300db88 | 174 | extern void print_cpu_info(struct cpuinfo_x86 *); |
21c3fcf3 | 175 | void print_cpu_msr(struct cpuinfo_x86 *); |
5300db88 GOC |
176 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); |
177 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | |
04a15418 | 178 | extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); |
5300db88 | 179 | |
bbb65d2d | 180 | extern void detect_extended_topology(struct cpuinfo_x86 *c); |
1a53905a | 181 | extern void detect_ht(struct cpuinfo_x86 *c); |
1a53905a | 182 | |
d288e1cf FY |
183 | #ifdef CONFIG_X86_32 |
184 | extern int have_cpuid_p(void); | |
185 | #else | |
186 | static inline int have_cpuid_p(void) | |
187 | { | |
188 | return 1; | |
189 | } | |
190 | #endif | |
c758ecf6 | 191 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
4d46a89e | 192 | unsigned int *ecx, unsigned int *edx) |
c758ecf6 GOC |
193 | { |
194 | /* ecx is often an input as well as an output. */ | |
45a94d7c | 195 | asm volatile("cpuid" |
cca2e6f8 JP |
196 | : "=a" (*eax), |
197 | "=b" (*ebx), | |
198 | "=c" (*ecx), | |
199 | "=d" (*edx) | |
506ed6b5 AK |
200 | : "0" (*eax), "2" (*ecx) |
201 | : "memory"); | |
c758ecf6 GOC |
202 | } |
203 | ||
c72dcf83 GOC |
204 | static inline void load_cr3(pgd_t *pgdir) |
205 | { | |
206 | write_cr3(__pa(pgdir)); | |
207 | } | |
c758ecf6 | 208 | |
ca241c75 GOC |
209 | #ifdef CONFIG_X86_32 |
210 | /* This is the TSS defined by the hardware. */ | |
211 | struct x86_hw_tss { | |
4d46a89e IM |
212 | unsigned short back_link, __blh; |
213 | unsigned long sp0; | |
214 | unsigned short ss0, __ss0h; | |
cf9328cc | 215 | unsigned long sp1; |
76e4c490 AL |
216 | |
217 | /* | |
cf9328cc AL |
218 | * We don't use ring 1, so ss1 is a convenient scratch space in |
219 | * the same cacheline as sp0. We use ss1 to cache the value in | |
220 | * MSR_IA32_SYSENTER_CS. When we context switch | |
221 | * MSR_IA32_SYSENTER_CS, we first check if the new value being | |
222 | * written matches ss1, and, if it's not, then we wrmsr the new | |
223 | * value and update ss1. | |
76e4c490 | 224 | * |
cf9328cc AL |
225 | * The only reason we context switch MSR_IA32_SYSENTER_CS is |
226 | * that we set it to zero in vm86 tasks to avoid corrupting the | |
227 | * stack if we were to go through the sysenter path from vm86 | |
228 | * mode. | |
76e4c490 | 229 | */ |
76e4c490 AL |
230 | unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ |
231 | ||
232 | unsigned short __ss1h; | |
4d46a89e IM |
233 | unsigned long sp2; |
234 | unsigned short ss2, __ss2h; | |
235 | unsigned long __cr3; | |
236 | unsigned long ip; | |
237 | unsigned long flags; | |
238 | unsigned long ax; | |
239 | unsigned long cx; | |
240 | unsigned long dx; | |
241 | unsigned long bx; | |
242 | unsigned long sp; | |
243 | unsigned long bp; | |
244 | unsigned long si; | |
245 | unsigned long di; | |
246 | unsigned short es, __esh; | |
247 | unsigned short cs, __csh; | |
248 | unsigned short ss, __ssh; | |
249 | unsigned short ds, __dsh; | |
250 | unsigned short fs, __fsh; | |
251 | unsigned short gs, __gsh; | |
252 | unsigned short ldt, __ldth; | |
253 | unsigned short trace; | |
254 | unsigned short io_bitmap_base; | |
255 | ||
ca241c75 GOC |
256 | } __attribute__((packed)); |
257 | #else | |
258 | struct x86_hw_tss { | |
4d46a89e IM |
259 | u32 reserved1; |
260 | u64 sp0; | |
261 | u64 sp1; | |
262 | u64 sp2; | |
263 | u64 reserved2; | |
264 | u64 ist[7]; | |
265 | u32 reserved3; | |
266 | u32 reserved4; | |
267 | u16 reserved5; | |
268 | u16 io_bitmap_base; | |
269 | ||
ca241c75 GOC |
270 | } __attribute__((packed)) ____cacheline_aligned; |
271 | #endif | |
272 | ||
273 | /* | |
4d46a89e | 274 | * IO-bitmap sizes: |
ca241c75 | 275 | */ |
4d46a89e IM |
276 | #define IO_BITMAP_BITS 65536 |
277 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | |
278 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | |
279 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | |
280 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | |
ca241c75 GOC |
281 | |
282 | struct tss_struct { | |
4d46a89e IM |
283 | /* |
284 | * The hardware state: | |
285 | */ | |
286 | struct x86_hw_tss x86_tss; | |
ca241c75 GOC |
287 | |
288 | /* | |
289 | * The extra 1 is there because the CPU will access an | |
290 | * additional byte beyond the end of the IO permission | |
291 | * bitmap. The extra byte must be all 1 bits, and must | |
292 | * be within the limit. | |
293 | */ | |
4d46a89e | 294 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
4d46a89e | 295 | |
ca241c75 | 296 | /* |
d828c71f | 297 | * Space for the temporary SYSENTER stack: |
ca241c75 | 298 | */ |
d828c71f | 299 | unsigned long SYSENTER_stack[64]; |
4d46a89e | 300 | |
84e65b0a | 301 | } ____cacheline_aligned; |
ca241c75 | 302 | |
24933b82 | 303 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); |
ca241c75 | 304 | |
a7fcf28d AL |
305 | #ifdef CONFIG_X86_32 |
306 | DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); | |
307 | #endif | |
308 | ||
4d46a89e IM |
309 | /* |
310 | * Save the original ist values for checking stack pointers during debugging | |
311 | */ | |
1a53905a | 312 | struct orig_ist { |
4d46a89e | 313 | unsigned long ist[7]; |
1a53905a GOC |
314 | }; |
315 | ||
99f8ecdf | 316 | #define MXCSR_DEFAULT 0x1f80 |
46265df0 | 317 | |
99f8ecdf | 318 | struct i387_fsave_struct { |
ca9cda2f IM |
319 | u32 cwd; /* FPU Control Word */ |
320 | u32 swd; /* FPU Status Word */ | |
321 | u32 twd; /* FPU Tag Word */ | |
322 | u32 fip; /* FPU IP Offset */ | |
323 | u32 fcs; /* FPU IP Selector */ | |
324 | u32 foo; /* FPU Operand Pointer Offset */ | |
325 | u32 fos; /* FPU Operand Pointer Selector */ | |
326 | ||
327 | /* 8*10 bytes for each FP-reg = 80 bytes: */ | |
4d46a89e | 328 | u32 st_space[20]; |
ca9cda2f IM |
329 | |
330 | /* Software status information [not touched by FSAVE ]: */ | |
4d46a89e | 331 | u32 status; |
46265df0 GOC |
332 | }; |
333 | ||
46265df0 | 334 | struct i387_fxsave_struct { |
ca9cda2f IM |
335 | u16 cwd; /* Control Word */ |
336 | u16 swd; /* Status Word */ | |
337 | u16 twd; /* Tag Word */ | |
338 | u16 fop; /* Last Instruction Opcode */ | |
99f8ecdf RM |
339 | union { |
340 | struct { | |
ca9cda2f IM |
341 | u64 rip; /* Instruction Pointer */ |
342 | u64 rdp; /* Data Pointer */ | |
99f8ecdf RM |
343 | }; |
344 | struct { | |
ca9cda2f IM |
345 | u32 fip; /* FPU IP Offset */ |
346 | u32 fcs; /* FPU IP Selector */ | |
347 | u32 foo; /* FPU Operand Offset */ | |
348 | u32 fos; /* FPU Operand Selector */ | |
99f8ecdf RM |
349 | }; |
350 | }; | |
ca9cda2f IM |
351 | u32 mxcsr; /* MXCSR Register State */ |
352 | u32 mxcsr_mask; /* MXCSR Mask */ | |
353 | ||
354 | /* 8*16 bytes for each FP-reg = 128 bytes: */ | |
4d46a89e | 355 | u32 st_space[32]; |
ca9cda2f IM |
356 | |
357 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ | |
4d46a89e | 358 | u32 xmm_space[64]; |
ca9cda2f | 359 | |
bdd8caba SS |
360 | u32 padding[12]; |
361 | ||
362 | union { | |
363 | u32 padding1[12]; | |
364 | u32 sw_reserved[12]; | |
365 | }; | |
4d46a89e | 366 | |
46265df0 GOC |
367 | } __attribute__((aligned(16))); |
368 | ||
99f8ecdf | 369 | struct i387_soft_struct { |
4d46a89e IM |
370 | u32 cwd; |
371 | u32 swd; | |
372 | u32 twd; | |
373 | u32 fip; | |
374 | u32 fcs; | |
375 | u32 foo; | |
376 | u32 fos; | |
377 | /* 8*10 bytes for each FP-reg = 80 bytes: */ | |
378 | u32 st_space[20]; | |
379 | u8 ftop; | |
380 | u8 changed; | |
381 | u8 lookahead; | |
382 | u8 no_update; | |
383 | u8 rm; | |
384 | u8 alimit; | |
ae6af41f | 385 | struct math_emu_info *info; |
4d46a89e | 386 | u32 entry_eip; |
99f8ecdf RM |
387 | }; |
388 | ||
a30469e7 SS |
389 | struct ymmh_struct { |
390 | /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ | |
391 | u32 ymmh_space[64]; | |
392 | }; | |
393 | ||
741e3902 | 394 | /* We don't support LWP yet: */ |
e7d820a5 | 395 | struct lwp_struct { |
741e3902 | 396 | u8 reserved[128]; |
e7d820a5 QR |
397 | }; |
398 | ||
c04e051c DH |
399 | struct bndreg { |
400 | u64 lower_bound; | |
401 | u64 upper_bound; | |
e7d820a5 QR |
402 | } __packed; |
403 | ||
62e7759b DH |
404 | struct bndcsr { |
405 | u64 bndcfgu; | |
406 | u64 bndstatus; | |
e7d820a5 QR |
407 | } __packed; |
408 | ||
dc1e35c6 SS |
409 | struct xsave_hdr_struct { |
410 | u64 xstate_bv; | |
0b29643a FY |
411 | u64 xcomp_bv; |
412 | u64 reserved[6]; | |
dc1e35c6 SS |
413 | } __attribute__((packed)); |
414 | ||
415 | struct xsave_struct { | |
416 | struct i387_fxsave_struct i387; | |
417 | struct xsave_hdr_struct xsave_hdr; | |
a30469e7 | 418 | struct ymmh_struct ymmh; |
e7d820a5 | 419 | struct lwp_struct lwp; |
c04e051c | 420 | struct bndreg bndreg[4]; |
62e7759b | 421 | struct bndcsr bndcsr; |
dc1e35c6 SS |
422 | /* new processor state extensions will go here */ |
423 | } __attribute__ ((packed, aligned (64))); | |
424 | ||
61c4628b | 425 | union thread_xstate { |
99f8ecdf | 426 | struct i387_fsave_struct fsave; |
46265df0 | 427 | struct i387_fxsave_struct fxsave; |
4d46a89e | 428 | struct i387_soft_struct soft; |
b359e8a4 | 429 | struct xsave_struct xsave; |
46265df0 GOC |
430 | }; |
431 | ||
86603283 | 432 | struct fpu { |
7e16838d LT |
433 | unsigned int last_cpu; |
434 | unsigned int has_fpu; | |
86603283 AK |
435 | union thread_xstate *state; |
436 | }; | |
437 | ||
fe676203 | 438 | #ifdef CONFIG_X86_64 |
2f66dcc9 | 439 | DECLARE_PER_CPU(struct orig_ist, orig_ist); |
26f80bd6 | 440 | |
947e76cd BG |
441 | union irq_stack_union { |
442 | char irq_stack[IRQ_STACK_SIZE]; | |
443 | /* | |
444 | * GCC hardcodes the stack canary as %gs:40. Since the | |
445 | * irq_stack is the object at %gs:0, we reserve the bottom | |
446 | * 48 bytes of the irq stack for the canary. | |
447 | */ | |
448 | struct { | |
449 | char gs_base[40]; | |
450 | unsigned long stack_canary; | |
451 | }; | |
452 | }; | |
453 | ||
277d5b40 | 454 | DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible; |
2add8e23 BG |
455 | DECLARE_INIT_PER_CPU(irq_stack_union); |
456 | ||
26f80bd6 | 457 | DECLARE_PER_CPU(char *, irq_stack_ptr); |
9766cdbc | 458 | DECLARE_PER_CPU(unsigned int, irq_count); |
9766cdbc | 459 | extern asmlinkage void ignore_sysret(void); |
60a5317f TH |
460 | #else /* X86_64 */ |
461 | #ifdef CONFIG_CC_STACKPROTECTOR | |
1ea0d14e JF |
462 | /* |
463 | * Make sure stack canary segment base is cached-aligned: | |
464 | * "For Intel Atom processors, avoid non zero segment base address | |
465 | * that is not aligned to cache line boundary at all cost." | |
466 | * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) | |
467 | */ | |
468 | struct stack_canary { | |
469 | char __pad[20]; /* canary at %gs:20 */ | |
470 | unsigned long canary; | |
471 | }; | |
53f82452 | 472 | DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
96a388de | 473 | #endif |
198d208d SR |
474 | /* |
475 | * per-CPU IRQ handling stacks | |
476 | */ | |
477 | struct irq_stack { | |
478 | u32 stack[THREAD_SIZE/sizeof(u32)]; | |
479 | } __aligned(THREAD_SIZE); | |
480 | ||
481 | DECLARE_PER_CPU(struct irq_stack *, hardirq_stack); | |
482 | DECLARE_PER_CPU(struct irq_stack *, softirq_stack); | |
60a5317f | 483 | #endif /* X86_64 */ |
c758ecf6 | 484 | |
61c4628b | 485 | extern unsigned int xstate_size; |
aa283f49 SS |
486 | extern void free_thread_xstate(struct task_struct *); |
487 | extern struct kmem_cache *task_xstate_cachep; | |
683e0253 | 488 | |
24f1e32c FW |
489 | struct perf_event; |
490 | ||
cb38d377 | 491 | struct thread_struct { |
4d46a89e IM |
492 | /* Cached TLS descriptors: */ |
493 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | |
494 | unsigned long sp0; | |
495 | unsigned long sp; | |
cb38d377 | 496 | #ifdef CONFIG_X86_32 |
4d46a89e | 497 | unsigned long sysenter_cs; |
cb38d377 | 498 | #else |
4d46a89e IM |
499 | unsigned short es; |
500 | unsigned short ds; | |
501 | unsigned short fsindex; | |
502 | unsigned short gsindex; | |
cb38d377 | 503 | #endif |
0c23590f | 504 | #ifdef CONFIG_X86_32 |
4d46a89e | 505 | unsigned long ip; |
0c23590f | 506 | #endif |
d756f4ad | 507 | #ifdef CONFIG_X86_64 |
4d46a89e | 508 | unsigned long fs; |
d756f4ad | 509 | #endif |
4d46a89e | 510 | unsigned long gs; |
24f1e32c FW |
511 | /* Save middle states of ptrace breakpoints */ |
512 | struct perf_event *ptrace_bps[HBP_NUM]; | |
513 | /* Debug status used for traps, single steps, etc... */ | |
514 | unsigned long debugreg6; | |
326264a0 FW |
515 | /* Keep track of the exact dr7 value set by the user */ |
516 | unsigned long ptrace_dr7; | |
4d46a89e IM |
517 | /* Fault info: */ |
518 | unsigned long cr2; | |
51e7dc70 | 519 | unsigned long trap_nr; |
4d46a89e | 520 | unsigned long error_code; |
61c4628b | 521 | /* floating point and extended processor state */ |
86603283 | 522 | struct fpu fpu; |
cb38d377 | 523 | #ifdef CONFIG_X86_32 |
4d46a89e | 524 | /* Virtual 86 mode info */ |
cb38d377 GOC |
525 | struct vm86_struct __user *vm86_info; |
526 | unsigned long screen_bitmap; | |
4d46a89e IM |
527 | unsigned long v86flags; |
528 | unsigned long v86mask; | |
529 | unsigned long saved_sp0; | |
530 | unsigned int saved_fs; | |
531 | unsigned int saved_gs; | |
cb38d377 | 532 | #endif |
4d46a89e IM |
533 | /* IO permissions: */ |
534 | unsigned long *io_bitmap_ptr; | |
535 | unsigned long iopl; | |
536 | /* Max allowed port in the bitmap, in bytes: */ | |
537 | unsigned io_bitmap_max; | |
c375f15a VG |
538 | /* |
539 | * fpu_counter contains the number of consecutive context switches | |
540 | * that the FPU is used. If this is over a threshold, the lazy fpu | |
541 | * saving becomes unlazy to save the trap. This is an unsigned char | |
542 | * so that after 256 times the counter wraps and the behavior turns | |
543 | * lazy again; this to deal with bursty apps that only use FPU for | |
544 | * a short time | |
545 | */ | |
546 | unsigned char fpu_counter; | |
cb38d377 GOC |
547 | }; |
548 | ||
62d7d7ed GOC |
549 | /* |
550 | * Set IOPL bits in EFLAGS from given mask | |
551 | */ | |
552 | static inline void native_set_iopl_mask(unsigned mask) | |
553 | { | |
554 | #ifdef CONFIG_X86_32 | |
555 | unsigned int reg; | |
4d46a89e | 556 | |
cca2e6f8 JP |
557 | asm volatile ("pushfl;" |
558 | "popl %0;" | |
559 | "andl %1, %0;" | |
560 | "orl %2, %0;" | |
561 | "pushl %0;" | |
562 | "popfl" | |
563 | : "=&r" (reg) | |
564 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | |
62d7d7ed GOC |
565 | #endif |
566 | } | |
567 | ||
4d46a89e IM |
568 | static inline void |
569 | native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) | |
7818a1e0 GOC |
570 | { |
571 | tss->x86_tss.sp0 = thread->sp0; | |
572 | #ifdef CONFIG_X86_32 | |
4d46a89e | 573 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
7818a1e0 GOC |
574 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
575 | tss->x86_tss.ss1 = thread->sysenter_cs; | |
576 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | |
577 | } | |
578 | #endif | |
579 | } | |
1b46cbe0 | 580 | |
e801f864 GOC |
581 | static inline void native_swapgs(void) |
582 | { | |
583 | #ifdef CONFIG_X86_64 | |
584 | asm volatile("swapgs" ::: "memory"); | |
585 | #endif | |
586 | } | |
587 | ||
a7fcf28d | 588 | static inline unsigned long current_top_of_stack(void) |
8ef46a67 | 589 | { |
a7fcf28d | 590 | #ifdef CONFIG_X86_64 |
24933b82 | 591 | return this_cpu_read_stable(cpu_tss.x86_tss.sp0); |
a7fcf28d AL |
592 | #else |
593 | /* sp0 on x86_32 is special in and around vm86 mode. */ | |
594 | return this_cpu_read_stable(cpu_current_top_of_stack); | |
595 | #endif | |
8ef46a67 AL |
596 | } |
597 | ||
7818a1e0 GOC |
598 | #ifdef CONFIG_PARAVIRT |
599 | #include <asm/paravirt.h> | |
600 | #else | |
4d46a89e IM |
601 | #define __cpuid native_cpuid |
602 | #define paravirt_enabled() 0 | |
1b46cbe0 | 603 | |
cca2e6f8 JP |
604 | static inline void load_sp0(struct tss_struct *tss, |
605 | struct thread_struct *thread) | |
7818a1e0 GOC |
606 | { |
607 | native_load_sp0(tss, thread); | |
608 | } | |
609 | ||
62d7d7ed | 610 | #define set_iopl_mask native_set_iopl_mask |
1b46cbe0 GOC |
611 | #endif /* CONFIG_PARAVIRT */ |
612 | ||
fc87e906 | 613 | typedef struct { |
4d46a89e | 614 | unsigned long seg; |
fc87e906 GOC |
615 | } mm_segment_t; |
616 | ||
617 | ||
683e0253 GOC |
618 | /* Free all resources held by a thread. */ |
619 | extern void release_thread(struct task_struct *); | |
620 | ||
683e0253 | 621 | unsigned long get_wchan(struct task_struct *p); |
c758ecf6 GOC |
622 | |
623 | /* | |
624 | * Generic CPUID function | |
625 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | |
626 | * resulting in stale register contents being returned. | |
627 | */ | |
628 | static inline void cpuid(unsigned int op, | |
629 | unsigned int *eax, unsigned int *ebx, | |
630 | unsigned int *ecx, unsigned int *edx) | |
631 | { | |
632 | *eax = op; | |
633 | *ecx = 0; | |
634 | __cpuid(eax, ebx, ecx, edx); | |
635 | } | |
636 | ||
637 | /* Some CPUID calls want 'count' to be placed in ecx */ | |
638 | static inline void cpuid_count(unsigned int op, int count, | |
639 | unsigned int *eax, unsigned int *ebx, | |
640 | unsigned int *ecx, unsigned int *edx) | |
641 | { | |
642 | *eax = op; | |
643 | *ecx = count; | |
644 | __cpuid(eax, ebx, ecx, edx); | |
645 | } | |
646 | ||
647 | /* | |
648 | * CPUID functions returning a single datum | |
649 | */ | |
650 | static inline unsigned int cpuid_eax(unsigned int op) | |
651 | { | |
652 | unsigned int eax, ebx, ecx, edx; | |
653 | ||
654 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 655 | |
c758ecf6 GOC |
656 | return eax; |
657 | } | |
4d46a89e | 658 | |
c758ecf6 GOC |
659 | static inline unsigned int cpuid_ebx(unsigned int op) |
660 | { | |
661 | unsigned int eax, ebx, ecx, edx; | |
662 | ||
663 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 664 | |
c758ecf6 GOC |
665 | return ebx; |
666 | } | |
4d46a89e | 667 | |
c758ecf6 GOC |
668 | static inline unsigned int cpuid_ecx(unsigned int op) |
669 | { | |
670 | unsigned int eax, ebx, ecx, edx; | |
671 | ||
672 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 673 | |
c758ecf6 GOC |
674 | return ecx; |
675 | } | |
4d46a89e | 676 | |
c758ecf6 GOC |
677 | static inline unsigned int cpuid_edx(unsigned int op) |
678 | { | |
679 | unsigned int eax, ebx, ecx, edx; | |
680 | ||
681 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 682 | |
c758ecf6 GOC |
683 | return edx; |
684 | } | |
685 | ||
683e0253 GOC |
686 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
687 | static inline void rep_nop(void) | |
688 | { | |
cca2e6f8 | 689 | asm volatile("rep; nop" ::: "memory"); |
683e0253 GOC |
690 | } |
691 | ||
4d46a89e IM |
692 | static inline void cpu_relax(void) |
693 | { | |
694 | rep_nop(); | |
695 | } | |
696 | ||
3a6bfbc9 DB |
697 | #define cpu_relax_lowlatency() cpu_relax() |
698 | ||
5367b688 | 699 | /* Stop speculative execution and prefetching of modified code. */ |
683e0253 GOC |
700 | static inline void sync_core(void) |
701 | { | |
702 | int tmp; | |
4d46a89e | 703 | |
eb068e78 | 704 | #ifdef CONFIG_M486 |
45c39fb0 PA |
705 | /* |
706 | * Do a CPUID if available, otherwise do a jump. The jump | |
707 | * can conveniently enough be the jump around CPUID. | |
708 | */ | |
709 | asm volatile("cmpl %2,%1\n\t" | |
710 | "jl 1f\n\t" | |
711 | "cpuid\n" | |
712 | "1:" | |
713 | : "=a" (tmp) | |
714 | : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) | |
715 | : "ebx", "ecx", "edx", "memory"); | |
716 | #else | |
717 | /* | |
718 | * CPUID is a barrier to speculative execution. | |
719 | * Prefetched instructions are automatically | |
720 | * invalidated when modified. | |
721 | */ | |
722 | asm volatile("cpuid" | |
723 | : "=a" (tmp) | |
724 | : "0" (1) | |
725 | : "ebx", "ecx", "edx", "memory"); | |
5367b688 | 726 | #endif |
683e0253 GOC |
727 | } |
728 | ||
683e0253 | 729 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
02c68a02 | 730 | extern void init_amd_e400_c1e_mask(void); |
683e0253 | 731 | |
4d46a89e | 732 | extern unsigned long boot_option_idle_override; |
02c68a02 | 733 | extern bool amd_e400_c1e_detected; |
683e0253 | 734 | |
d1896049 | 735 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
69fb3676 | 736 | IDLE_POLL}; |
d1896049 | 737 | |
1a53905a GOC |
738 | extern void enable_sep_cpu(void); |
739 | extern int sysenter_setup(void); | |
740 | ||
29c84391 | 741 | extern void early_trap_init(void); |
8170e6be | 742 | void early_trap_pf_init(void); |
29c84391 | 743 | |
1a53905a | 744 | /* Defined in head.S */ |
4d46a89e | 745 | extern struct desc_ptr early_gdt_descr; |
1a53905a GOC |
746 | |
747 | extern void cpu_set_gdt(int); | |
552be871 | 748 | extern void switch_to_new_gdt(int); |
11e3a840 | 749 | extern void load_percpu_segment(int); |
1a53905a | 750 | extern void cpu_init(void); |
1a53905a | 751 | |
c2724775 MM |
752 | static inline unsigned long get_debugctlmsr(void) |
753 | { | |
ea8e61b7 | 754 | unsigned long debugctlmsr = 0; |
c2724775 MM |
755 | |
756 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
757 | if (boot_cpu_data.x86 < 6) | |
758 | return 0; | |
759 | #endif | |
760 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
761 | ||
ea8e61b7 | 762 | return debugctlmsr; |
c2724775 MM |
763 | } |
764 | ||
5b0e5084 JB |
765 | static inline void update_debugctlmsr(unsigned long debugctlmsr) |
766 | { | |
767 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
768 | if (boot_cpu_data.x86 < 6) | |
769 | return; | |
770 | #endif | |
771 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
772 | } | |
773 | ||
9bd1190a ON |
774 | extern void set_task_blockstep(struct task_struct *task, bool on); |
775 | ||
4d46a89e IM |
776 | /* |
777 | * from system description table in BIOS. Mostly for MCA use, but | |
778 | * others may find it useful: | |
779 | */ | |
780 | extern unsigned int machine_id; | |
781 | extern unsigned int machine_submodel_id; | |
782 | extern unsigned int BIOS_revision; | |
1a53905a | 783 | |
4d46a89e IM |
784 | /* Boot loader type from the setup header: */ |
785 | extern int bootloader_type; | |
5031296c | 786 | extern int bootloader_version; |
1a53905a | 787 | |
4d46a89e | 788 | extern char ignore_fpu_irq; |
683e0253 GOC |
789 | |
790 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | |
791 | #define ARCH_HAS_PREFETCHW | |
792 | #define ARCH_HAS_SPINLOCK_PREFETCH | |
793 | ||
ae2e15eb | 794 | #ifdef CONFIG_X86_32 |
a930dc45 | 795 | # define BASE_PREFETCH "" |
4d46a89e | 796 | # define ARCH_HAS_PREFETCH |
ae2e15eb | 797 | #else |
a930dc45 | 798 | # define BASE_PREFETCH "prefetcht0 %P1" |
ae2e15eb GOC |
799 | #endif |
800 | ||
4d46a89e IM |
801 | /* |
802 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) | |
803 | * | |
804 | * It's not worth to care about 3dnow prefetches for the K6 | |
805 | * because they are microcoded there and very slow. | |
806 | */ | |
ae2e15eb GOC |
807 | static inline void prefetch(const void *x) |
808 | { | |
a930dc45 | 809 | alternative_input(BASE_PREFETCH, "prefetchnta %P1", |
ae2e15eb | 810 | X86_FEATURE_XMM, |
a930dc45 | 811 | "m" (*(const char *)x)); |
ae2e15eb GOC |
812 | } |
813 | ||
4d46a89e IM |
814 | /* |
815 | * 3dnow prefetch to get an exclusive cache line. | |
816 | * Useful for spinlocks to avoid one state transition in the | |
817 | * cache coherency protocol: | |
818 | */ | |
ae2e15eb GOC |
819 | static inline void prefetchw(const void *x) |
820 | { | |
a930dc45 BP |
821 | alternative_input(BASE_PREFETCH, "prefetchw %P1", |
822 | X86_FEATURE_3DNOWPREFETCH, | |
823 | "m" (*(const char *)x)); | |
ae2e15eb GOC |
824 | } |
825 | ||
4d46a89e IM |
826 | static inline void spin_lock_prefetch(const void *x) |
827 | { | |
828 | prefetchw(x); | |
829 | } | |
830 | ||
d9e05cc5 AL |
831 | #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ |
832 | TOP_OF_KERNEL_STACK_PADDING) | |
833 | ||
2f66dcc9 GOC |
834 | #ifdef CONFIG_X86_32 |
835 | /* | |
836 | * User space process size: 3GB (default). | |
837 | */ | |
4d46a89e | 838 | #define TASK_SIZE PAGE_OFFSET |
d9517346 | 839 | #define TASK_SIZE_MAX TASK_SIZE |
4d46a89e IM |
840 | #define STACK_TOP TASK_SIZE |
841 | #define STACK_TOP_MAX STACK_TOP | |
842 | ||
843 | #define INIT_THREAD { \ | |
d9e05cc5 | 844 | .sp0 = TOP_OF_INIT_STACK, \ |
4d46a89e IM |
845 | .vm86_info = NULL, \ |
846 | .sysenter_cs = __KERNEL_CS, \ | |
847 | .io_bitmap_ptr = NULL, \ | |
2f66dcc9 GOC |
848 | } |
849 | ||
2f66dcc9 GOC |
850 | extern unsigned long thread_saved_pc(struct task_struct *tsk); |
851 | ||
2f66dcc9 | 852 | /* |
5c39403e | 853 | * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. |
2f66dcc9 | 854 | * This is necessary to guarantee that the entire "struct pt_regs" |
b595076a | 855 | * is accessible even if the CPU haven't stored the SS/ESP registers |
2f66dcc9 GOC |
856 | * on the stack (interrupt gate does not save these registers |
857 | * when switching to the same priv ring). | |
858 | * Therefore beware: accessing the ss/esp fields of the | |
859 | * "struct pt_regs" is possible, but they may contain the | |
860 | * completely wrong values. | |
861 | */ | |
5c39403e DV |
862 | #define task_pt_regs(task) \ |
863 | ({ \ | |
864 | unsigned long __ptr = (unsigned long)task_stack_page(task); \ | |
865 | __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ | |
866 | ((struct pt_regs *)__ptr) - 1; \ | |
2f66dcc9 GOC |
867 | }) |
868 | ||
4d46a89e | 869 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
2f66dcc9 GOC |
870 | |
871 | #else | |
872 | /* | |
07114f0f AL |
873 | * User space process size. 47bits minus one guard page. The guard |
874 | * page is necessary on Intel CPUs: if a SYSCALL instruction is at | |
875 | * the highest possible canonical userspace address, then that | |
876 | * syscall will enter the kernel with a non-canonical return | |
877 | * address, and SYSRET will explode dangerously. We avoid this | |
878 | * particular problem by preventing anything from being mapped | |
879 | * at the maximum canonical address. | |
2f66dcc9 | 880 | */ |
d9517346 | 881 | #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) |
2f66dcc9 GOC |
882 | |
883 | /* This decides where the kernel will search for a free chunk of vm | |
884 | * space during mmap's. | |
885 | */ | |
4d46a89e IM |
886 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
887 | 0xc0000000 : 0xFFFFe000) | |
2f66dcc9 | 888 | |
6bd33008 | 889 | #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ |
d9517346 | 890 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
6bd33008 | 891 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ |
d9517346 | 892 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
2f66dcc9 | 893 | |
922a70d3 | 894 | #define STACK_TOP TASK_SIZE |
d9517346 | 895 | #define STACK_TOP_MAX TASK_SIZE_MAX |
922a70d3 | 896 | |
2f66dcc9 | 897 | #define INIT_THREAD { \ |
d9e05cc5 | 898 | .sp0 = TOP_OF_INIT_STACK \ |
2f66dcc9 GOC |
899 | } |
900 | ||
2f66dcc9 GOC |
901 | /* |
902 | * Return saved PC of a blocked thread. | |
903 | * What is this good for? it will be always the scheduler or ret_from_fork. | |
904 | */ | |
4d46a89e | 905 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) |
2f66dcc9 | 906 | |
4d46a89e | 907 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
89240ba0 | 908 | extern unsigned long KSTK_ESP(struct task_struct *task); |
d046ff8b | 909 | |
2f66dcc9 GOC |
910 | #endif /* CONFIG_X86_64 */ |
911 | ||
513ad84b IM |
912 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
913 | unsigned long new_sp); | |
914 | ||
4d46a89e IM |
915 | /* |
916 | * This decides where the kernel will search for a free chunk of vm | |
683e0253 GOC |
917 | * space during mmap's. |
918 | */ | |
919 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | |
920 | ||
4d46a89e | 921 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) |
683e0253 | 922 | |
529e25f6 EB |
923 | /* Get/set a process' ability to use the timestamp counter instruction */ |
924 | #define GET_TSC_CTL(adr) get_tsc_mode((adr)) | |
925 | #define SET_TSC_CTL(val) set_tsc_mode((val)) | |
926 | ||
927 | extern int get_tsc_mode(unsigned long adr); | |
928 | extern int set_tsc_mode(unsigned int val); | |
929 | ||
fe3d197f DH |
930 | /* Register/unregister a process' MPX related resource */ |
931 | #define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk)) | |
932 | #define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk)) | |
933 | ||
934 | #ifdef CONFIG_X86_INTEL_MPX | |
935 | extern int mpx_enable_management(struct task_struct *tsk); | |
936 | extern int mpx_disable_management(struct task_struct *tsk); | |
937 | #else | |
938 | static inline int mpx_enable_management(struct task_struct *tsk) | |
939 | { | |
940 | return -EINVAL; | |
941 | } | |
942 | static inline int mpx_disable_management(struct task_struct *tsk) | |
943 | { | |
944 | return -EINVAL; | |
945 | } | |
946 | #endif /* CONFIG_X86_INTEL_MPX */ | |
947 | ||
8b84c8df | 948 | extern u16 amd_get_nb_id(int cpu); |
cc2749e4 | 949 | extern u32 amd_get_nodes_per_socket(void); |
6a812691 | 950 | |
96e39ac0 JW |
951 | static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) |
952 | { | |
953 | uint32_t base, eax, signature[3]; | |
954 | ||
955 | for (base = 0x40000000; base < 0x40010000; base += 0x100) { | |
956 | cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); | |
957 | ||
958 | if (!memcmp(sig, signature, 12) && | |
959 | (leaves == 0 || ((eax - base) >= leaves))) | |
960 | return base; | |
961 | } | |
962 | ||
963 | return 0; | |
964 | } | |
965 | ||
f05e798a DH |
966 | extern unsigned long arch_align_stack(unsigned long sp); |
967 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | |
968 | ||
969 | void default_idle(void); | |
6a377ddc LB |
970 | #ifdef CONFIG_XEN |
971 | bool xen_set_default_idle(void); | |
972 | #else | |
973 | #define xen_set_default_idle 0 | |
974 | #endif | |
f05e798a DH |
975 | |
976 | void stop_this_cpu(void *dummy); | |
4d067d8e | 977 | void df_debug(struct pt_regs *regs, long error_code); |
1965aae3 | 978 | #endif /* _ASM_X86_PROCESSOR_H */ |