]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * include/asm-i386/processor.h | |
3 | * | |
4 | * Copyright (C) 1994 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #ifndef __ASM_I386_PROCESSOR_H | |
8 | #define __ASM_I386_PROCESSOR_H | |
9 | ||
10 | #include <asm/vm86.h> | |
11 | #include <asm/math_emu.h> | |
12 | #include <asm/segment.h> | |
13 | #include <asm/page.h> | |
14 | #include <asm/types.h> | |
15 | #include <asm/sigcontext.h> | |
16 | #include <asm/cpufeature.h> | |
17 | #include <asm/msr.h> | |
18 | #include <asm/system.h> | |
19 | #include <linux/cache.h> | |
1da177e4 LT |
20 | #include <linux/threads.h> |
21 | #include <asm/percpu.h> | |
1e9f28fa | 22 | #include <linux/cpumask.h> |
d7cd5611 | 23 | #include <linux/init.h> |
1da177e4 LT |
24 | |
25 | /* flag for disabling the tsc */ | |
26 | extern int tsc_disable; | |
27 | ||
28 | struct desc_struct { | |
29 | unsigned long a,b; | |
30 | }; | |
31 | ||
32 | #define desc_empty(desc) \ | |
12aaa085 | 33 | (!((desc)->a | (desc)->b)) |
1da177e4 LT |
34 | |
35 | #define desc_equal(desc1, desc2) \ | |
36 | (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | |
37 | /* | |
38 | * Default implementation of macro that returns current | |
39 | * instruction pointer ("program counter"). | |
40 | */ | |
41 | #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) | |
42 | ||
43 | /* | |
44 | * CPU type and hardware bug flags. Kept separately for each CPU. | |
45 | * Members of this structure are referenced in head.S, so think twice | |
46 | * before touching them. [mj] | |
47 | */ | |
48 | ||
49 | struct cpuinfo_x86 { | |
50 | __u8 x86; /* CPU family */ | |
51 | __u8 x86_vendor; /* CPU vendor */ | |
52 | __u8 x86_model; | |
53 | __u8 x86_mask; | |
54 | char wp_works_ok; /* It doesn't on 386's */ | |
55 | char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ | |
56 | char hard_math; | |
57 | char rfu; | |
58 | int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ | |
59 | unsigned long x86_capability[NCAPINTS]; | |
60 | char x86_vendor_id[16]; | |
61 | char x86_model_id[64]; | |
62 | int x86_cache_size; /* in KB - valid for CPUS which support this | |
63 | call */ | |
64 | int x86_cache_alignment; /* In bytes */ | |
3f98bc49 AK |
65 | char fdiv_bug; |
66 | char f00f_bug; | |
67 | char coma_bug; | |
68 | char pad0; | |
69 | int x86_power; | |
1da177e4 | 70 | unsigned long loops_per_jiffy; |
1e9f28fa SS |
71 | #ifdef CONFIG_SMP |
72 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | |
73 | #endif | |
94605eff | 74 | unsigned char x86_max_cores; /* cpuid returned max cores value */ |
94605eff | 75 | unsigned char apicid; |
770d132f | 76 | unsigned short x86_clflush_size; |
4b89aff9 RS |
77 | #ifdef CONFIG_SMP |
78 | unsigned char booted_cores; /* number of cores as seen by OS */ | |
79 | __u8 phys_proc_id; /* Physical processor id. */ | |
80 | __u8 cpu_core_id; /* Core id */ | |
81 | #endif | |
1da177e4 LT |
82 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
83 | ||
84 | #define X86_VENDOR_INTEL 0 | |
85 | #define X86_VENDOR_CYRIX 1 | |
86 | #define X86_VENDOR_AMD 2 | |
87 | #define X86_VENDOR_UMC 3 | |
88 | #define X86_VENDOR_NEXGEN 4 | |
89 | #define X86_VENDOR_CENTAUR 5 | |
90 | #define X86_VENDOR_RISE 6 | |
91 | #define X86_VENDOR_TRANSMETA 7 | |
92 | #define X86_VENDOR_NSC 8 | |
93 | #define X86_VENDOR_NUM 9 | |
94 | #define X86_VENDOR_UNKNOWN 0xff | |
95 | ||
96 | /* | |
97 | * capabilities of CPUs | |
98 | */ | |
99 | ||
100 | extern struct cpuinfo_x86 boot_cpu_data; | |
101 | extern struct cpuinfo_x86 new_cpu_data; | |
102 | extern struct tss_struct doublefault_tss; | |
103 | DECLARE_PER_CPU(struct tss_struct, init_tss); | |
104 | ||
105 | #ifdef CONFIG_SMP | |
106 | extern struct cpuinfo_x86 cpu_data[]; | |
107 | #define current_cpu_data cpu_data[smp_processor_id()] | |
108 | #else | |
109 | #define cpu_data (&boot_cpu_data) | |
110 | #define current_cpu_data boot_cpu_data | |
111 | #endif | |
112 | ||
1e9f28fa | 113 | extern int cpu_llc_id[NR_CPUS]; |
1da177e4 LT |
114 | extern char ignore_fpu_irq; |
115 | ||
d7cd5611 RR |
116 | void __init cpu_detect(struct cpuinfo_x86 *c); |
117 | ||
1da177e4 LT |
118 | extern void identify_cpu(struct cpuinfo_x86 *); |
119 | extern void print_cpu_info(struct cpuinfo_x86 *); | |
120 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | |
240cd6a8 | 121 | extern unsigned short num_cache_leaves; |
1da177e4 LT |
122 | |
123 | #ifdef CONFIG_X86_HT | |
124 | extern void detect_ht(struct cpuinfo_x86 *c); | |
125 | #else | |
126 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | |
127 | #endif | |
128 | ||
129 | /* | |
130 | * EFLAGS bits | |
131 | */ | |
132 | #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ | |
133 | #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ | |
134 | #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ | |
135 | #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ | |
136 | #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ | |
137 | #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ | |
138 | #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ | |
139 | #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ | |
140 | #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ | |
141 | #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ | |
142 | #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ | |
143 | #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ | |
144 | #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ | |
145 | #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ | |
146 | #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ | |
147 | #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ | |
148 | #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ | |
149 | ||
d3561b7f RR |
150 | static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx, |
151 | unsigned int *ecx, unsigned int *edx) | |
9f093394 RR |
152 | { |
153 | /* ecx is often an input as well as an output. */ | |
154 | __asm__("cpuid" | |
155 | : "=a" (*eax), | |
156 | "=b" (*ebx), | |
157 | "=c" (*ecx), | |
158 | "=d" (*edx) | |
159 | : "0" (*eax), "2" (*ecx)); | |
160 | } | |
161 | ||
4bb0d3ec | 162 | #define load_cr3(pgdir) write_cr3(__pa(pgdir)) |
1da177e4 LT |
163 | |
164 | /* | |
165 | * Intel CPU features in CR4 | |
166 | */ | |
167 | #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ | |
168 | #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ | |
169 | #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ | |
170 | #define X86_CR4_DE 0x0008 /* enable debugging extensions */ | |
171 | #define X86_CR4_PSE 0x0010 /* enable page size extensions */ | |
172 | #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ | |
173 | #define X86_CR4_MCE 0x0040 /* Machine check enable */ | |
174 | #define X86_CR4_PGE 0x0080 /* enable global pages */ | |
175 | #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ | |
176 | #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ | |
177 | #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ | |
178 | ||
179 | /* | |
180 | * Save the cr4 feature set we're using (ie | |
181 | * Pentium 4MB enable and PPro Global page | |
182 | * enable), so that any CPU's that boot up | |
183 | * after us can get the correct flags. | |
184 | */ | |
185 | extern unsigned long mmu_cr4_features; | |
186 | ||
187 | static inline void set_in_cr4 (unsigned long mask) | |
188 | { | |
4bb0d3ec | 189 | unsigned cr4; |
1da177e4 | 190 | mmu_cr4_features |= mask; |
4bb0d3ec ZA |
191 | cr4 = read_cr4(); |
192 | cr4 |= mask; | |
193 | write_cr4(cr4); | |
1da177e4 LT |
194 | } |
195 | ||
196 | static inline void clear_in_cr4 (unsigned long mask) | |
197 | { | |
4bb0d3ec | 198 | unsigned cr4; |
1da177e4 | 199 | mmu_cr4_features &= ~mask; |
4bb0d3ec ZA |
200 | cr4 = read_cr4(); |
201 | cr4 &= ~mask; | |
202 | write_cr4(cr4); | |
1da177e4 LT |
203 | } |
204 | ||
205 | /* | |
206 | * NSC/Cyrix CPU configuration register indexes | |
207 | */ | |
208 | ||
209 | #define CX86_PCR0 0x20 | |
210 | #define CX86_GCR 0xb8 | |
211 | #define CX86_CCR0 0xc0 | |
212 | #define CX86_CCR1 0xc1 | |
213 | #define CX86_CCR2 0xc2 | |
214 | #define CX86_CCR3 0xc3 | |
215 | #define CX86_CCR4 0xe8 | |
216 | #define CX86_CCR5 0xe9 | |
217 | #define CX86_CCR6 0xea | |
218 | #define CX86_CCR7 0xeb | |
219 | #define CX86_PCR1 0xf0 | |
220 | #define CX86_DIR0 0xfe | |
221 | #define CX86_DIR1 0xff | |
222 | #define CX86_ARR_BASE 0xc4 | |
223 | #define CX86_RCR_BASE 0xdc | |
224 | ||
225 | /* | |
226 | * NSC/Cyrix CPU indexed register access macros | |
227 | */ | |
228 | ||
229 | #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) | |
230 | ||
231 | #define setCx86(reg, data) do { \ | |
232 | outb((reg), 0x22); \ | |
233 | outb((data), 0x23); \ | |
234 | } while (0) | |
235 | ||
487472bc AK |
236 | /* Stop speculative execution */ |
237 | static inline void sync_core(void) | |
245067d1 | 238 | { |
487472bc AK |
239 | int tmp; |
240 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | |
245067d1 ZA |
241 | } |
242 | ||
1da177e4 LT |
243 | static inline void __monitor(const void *eax, unsigned long ecx, |
244 | unsigned long edx) | |
245 | { | |
246 | /* "monitor %eax,%ecx,%edx;" */ | |
247 | asm volatile( | |
248 | ".byte 0x0f,0x01,0xc8;" | |
249 | : :"a" (eax), "c" (ecx), "d"(edx)); | |
250 | } | |
251 | ||
252 | static inline void __mwait(unsigned long eax, unsigned long ecx) | |
253 | { | |
254 | /* "mwait %eax,%ecx;" */ | |
255 | asm volatile( | |
256 | ".byte 0x0f,0x01,0xc9;" | |
257 | : :"a" (eax), "c" (ecx)); | |
258 | } | |
259 | ||
991528d7 VP |
260 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
261 | ||
1da177e4 LT |
262 | /* from system description table in BIOS. Mostly for MCA use, but |
263 | others may find it useful. */ | |
264 | extern unsigned int machine_id; | |
265 | extern unsigned int machine_submodel_id; | |
266 | extern unsigned int BIOS_revision; | |
267 | extern unsigned int mca_pentium_flag; | |
268 | ||
269 | /* Boot loader type from the setup header */ | |
270 | extern int bootloader_type; | |
271 | ||
272 | /* | |
273 | * User space process size: 3GB (default). | |
274 | */ | |
275 | #define TASK_SIZE (PAGE_OFFSET) | |
276 | ||
277 | /* This decides where the kernel will search for a free chunk of vm | |
278 | * space during mmap's. | |
279 | */ | |
280 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | |
281 | ||
282 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | |
283 | ||
284 | /* | |
285 | * Size of io_bitmap. | |
286 | */ | |
287 | #define IO_BITMAP_BITS 65536 | |
288 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | |
289 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | |
290 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) | |
291 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | |
292 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | |
293 | ||
294 | struct i387_fsave_struct { | |
295 | long cwd; | |
296 | long swd; | |
297 | long twd; | |
298 | long fip; | |
299 | long fcs; | |
300 | long foo; | |
301 | long fos; | |
302 | long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | |
303 | long status; /* software status information */ | |
304 | }; | |
305 | ||
306 | struct i387_fxsave_struct { | |
307 | unsigned short cwd; | |
308 | unsigned short swd; | |
309 | unsigned short twd; | |
310 | unsigned short fop; | |
311 | long fip; | |
312 | long fcs; | |
313 | long foo; | |
314 | long fos; | |
315 | long mxcsr; | |
316 | long mxcsr_mask; | |
317 | long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | |
318 | long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ | |
319 | long padding[56]; | |
320 | } __attribute__ ((aligned (16))); | |
321 | ||
322 | struct i387_soft_struct { | |
323 | long cwd; | |
324 | long swd; | |
325 | long twd; | |
326 | long fip; | |
327 | long fcs; | |
328 | long foo; | |
329 | long fos; | |
330 | long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | |
331 | unsigned char ftop, changed, lookahead, no_update, rm, alimit; | |
332 | struct info *info; | |
333 | unsigned long entry_eip; | |
334 | }; | |
335 | ||
336 | union i387_union { | |
337 | struct i387_fsave_struct fsave; | |
338 | struct i387_fxsave_struct fxsave; | |
339 | struct i387_soft_struct soft; | |
340 | }; | |
341 | ||
342 | typedef struct { | |
343 | unsigned long seg; | |
344 | } mm_segment_t; | |
345 | ||
346 | struct thread_struct; | |
347 | ||
348 | struct tss_struct { | |
349 | unsigned short back_link,__blh; | |
350 | unsigned long esp0; | |
351 | unsigned short ss0,__ss0h; | |
352 | unsigned long esp1; | |
353 | unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ | |
354 | unsigned long esp2; | |
355 | unsigned short ss2,__ss2h; | |
356 | unsigned long __cr3; | |
357 | unsigned long eip; | |
358 | unsigned long eflags; | |
359 | unsigned long eax,ecx,edx,ebx; | |
360 | unsigned long esp; | |
361 | unsigned long ebp; | |
362 | unsigned long esi; | |
363 | unsigned long edi; | |
364 | unsigned short es, __esh; | |
365 | unsigned short cs, __csh; | |
366 | unsigned short ss, __ssh; | |
367 | unsigned short ds, __dsh; | |
368 | unsigned short fs, __fsh; | |
369 | unsigned short gs, __gsh; | |
370 | unsigned short ldt, __ldth; | |
371 | unsigned short trace, io_bitmap_base; | |
372 | /* | |
373 | * The extra 1 is there because the CPU will access an | |
374 | * additional byte beyond the end of the IO permission | |
375 | * bitmap. The extra byte must be all 1 bits, and must | |
376 | * be within the limit. | |
377 | */ | |
378 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | |
379 | /* | |
380 | * Cache the current maximum and the last task that used the bitmap: | |
381 | */ | |
382 | unsigned long io_bitmap_max; | |
383 | struct thread_struct *io_bitmap_owner; | |
384 | /* | |
385 | * pads the TSS to be cacheline-aligned (size is 0x100) | |
386 | */ | |
387 | unsigned long __cacheline_filler[35]; | |
388 | /* | |
389 | * .. and then another 0x100 bytes for emergency kernel stack | |
390 | */ | |
391 | unsigned long stack[64]; | |
392 | } __attribute__((packed)); | |
393 | ||
394 | #define ARCH_MIN_TASKALIGN 16 | |
395 | ||
396 | struct thread_struct { | |
397 | /* cached TLS descriptors. */ | |
398 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | |
399 | unsigned long esp0; | |
400 | unsigned long sysenter_cs; | |
401 | unsigned long eip; | |
402 | unsigned long esp; | |
403 | unsigned long fs; | |
404 | unsigned long gs; | |
405 | /* Hardware debugging registers */ | |
406 | unsigned long debugreg[8]; /* %%db0-7 debug registers */ | |
407 | /* fault info */ | |
408 | unsigned long cr2, trap_no, error_code; | |
409 | /* floating point info */ | |
410 | union i387_union i387; | |
411 | /* virtual 86 mode info */ | |
412 | struct vm86_struct __user * vm86_info; | |
413 | unsigned long screen_bitmap; | |
414 | unsigned long v86flags, v86mask, saved_esp0; | |
415 | unsigned int saved_fs, saved_gs; | |
416 | /* IO permissions */ | |
417 | unsigned long *io_bitmap_ptr; | |
a5201129 | 418 | unsigned long iopl; |
1da177e4 LT |
419 | /* max allowed port in the bitmap, in bytes: */ |
420 | unsigned long io_bitmap_max; | |
421 | }; | |
422 | ||
423 | #define INIT_THREAD { \ | |
424 | .vm86_info = NULL, \ | |
425 | .sysenter_cs = __KERNEL_CS, \ | |
426 | .io_bitmap_ptr = NULL, \ | |
f95d47ca | 427 | .gs = __KERNEL_PDA, \ |
1da177e4 LT |
428 | } |
429 | ||
430 | /* | |
431 | * Note that the .io_bitmap member must be extra-big. This is because | |
432 | * the CPU will access an additional byte beyond the end of the IO | |
433 | * permission bitmap. The extra byte must be all 1 bits, and must | |
434 | * be within the limit. | |
435 | */ | |
436 | #define INIT_TSS { \ | |
437 | .esp0 = sizeof(init_stack) + (long)&init_stack, \ | |
438 | .ss0 = __KERNEL_DS, \ | |
439 | .ss1 = __KERNEL_CS, \ | |
1da177e4 LT |
440 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ |
441 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ | |
442 | } | |
443 | ||
1da177e4 | 444 | #define start_thread(regs, new_eip, new_esp) do { \ |
f95d47ca JF |
445 | __asm__("movl %0,%%fs": :"r" (0)); \ |
446 | regs->xgs = 0; \ | |
1da177e4 LT |
447 | set_fs(USER_DS); \ |
448 | regs->xds = __USER_DS; \ | |
449 | regs->xes = __USER_DS; \ | |
450 | regs->xss = __USER_DS; \ | |
451 | regs->xcs = __USER_CS; \ | |
452 | regs->eip = new_eip; \ | |
453 | regs->esp = new_esp; \ | |
454 | } while (0) | |
455 | ||
456 | /* Forward declaration, a strange C thing */ | |
457 | struct task_struct; | |
458 | struct mm_struct; | |
459 | ||
460 | /* Free all resources held by a thread. */ | |
461 | extern void release_thread(struct task_struct *); | |
462 | ||
463 | /* Prepare to copy thread state - unlazy all lazy status */ | |
464 | extern void prepare_to_copy(struct task_struct *tsk); | |
465 | ||
466 | /* | |
467 | * create a kernel thread without removing it from tasklists | |
468 | */ | |
469 | extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | |
470 | ||
471 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | |
176a2718 | 472 | void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack); |
1da177e4 LT |
473 | |
474 | unsigned long get_wchan(struct task_struct *p); | |
475 | ||
476 | #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) | |
477 | #define KSTK_TOP(info) \ | |
478 | ({ \ | |
479 | unsigned long *__ptr = (unsigned long *)(info); \ | |
480 | (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ | |
481 | }) | |
482 | ||
07b047fc | 483 | /* |
484 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. | |
485 | * This is necessary to guarantee that the entire "struct pt_regs" | |
486 | * is accessable even if the CPU haven't stored the SS/ESP registers | |
487 | * on the stack (interrupt gate does not save these registers | |
488 | * when switching to the same priv ring). | |
489 | * Therefore beware: accessing the xss/esp fields of the | |
490 | * "struct pt_regs" is possible, but they may contain the | |
491 | * completely wrong values. | |
492 | */ | |
1da177e4 LT |
493 | #define task_pt_regs(task) \ |
494 | ({ \ | |
495 | struct pt_regs *__regs__; \ | |
65e0fdff | 496 | __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ |
1da177e4 LT |
497 | __regs__ - 1; \ |
498 | }) | |
499 | ||
500 | #define KSTK_EIP(task) (task_pt_regs(task)->eip) | |
501 | #define KSTK_ESP(task) (task_pt_regs(task)->esp) | |
502 | ||
503 | ||
504 | struct microcode_header { | |
505 | unsigned int hdrver; | |
506 | unsigned int rev; | |
507 | unsigned int date; | |
508 | unsigned int sig; | |
509 | unsigned int cksum; | |
510 | unsigned int ldrver; | |
511 | unsigned int pf; | |
512 | unsigned int datasize; | |
513 | unsigned int totalsize; | |
514 | unsigned int reserved[3]; | |
515 | }; | |
516 | ||
517 | struct microcode { | |
518 | struct microcode_header hdr; | |
519 | unsigned int bits[0]; | |
520 | }; | |
521 | ||
522 | typedef struct microcode microcode_t; | |
523 | typedef struct microcode_header microcode_header_t; | |
524 | ||
525 | /* microcode format is extended from prescott processors */ | |
526 | struct extended_signature { | |
527 | unsigned int sig; | |
528 | unsigned int pf; | |
529 | unsigned int cksum; | |
530 | }; | |
531 | ||
532 | struct extended_sigtable { | |
533 | unsigned int count; | |
534 | unsigned int cksum; | |
535 | unsigned int reserved[3]; | |
536 | struct extended_signature sigs[0]; | |
537 | }; | |
1da177e4 LT |
538 | |
539 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | |
540 | static inline void rep_nop(void) | |
541 | { | |
542 | __asm__ __volatile__("rep;nop": : :"memory"); | |
543 | } | |
544 | ||
545 | #define cpu_relax() rep_nop() | |
546 | ||
139ec7c4 RR |
547 | #ifdef CONFIG_PARAVIRT |
548 | #include <asm/paravirt.h> | |
549 | #else | |
550 | #define paravirt_enabled() 0 | |
551 | #define __cpuid native_cpuid | |
552 | ||
553 | static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) | |
554 | { | |
555 | tss->esp0 = thread->esp0; | |
556 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | |
557 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | |
558 | tss->ss1 = thread->sysenter_cs; | |
559 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | |
560 | } | |
561 | } | |
562 | ||
563 | /* | |
564 | * These special macros can be used to get or set a debugging register | |
565 | */ | |
566 | #define get_debugreg(var, register) \ | |
567 | __asm__("movl %%db" #register ", %0" \ | |
568 | :"=r" (var)) | |
569 | #define set_debugreg(value, register) \ | |
570 | __asm__("movl %0,%%db" #register \ | |
571 | : /* no output */ \ | |
572 | :"r" (value)) | |
573 | ||
574 | #define set_iopl_mask native_set_iopl_mask | |
575 | #endif /* CONFIG_PARAVIRT */ | |
576 | ||
577 | /* | |
578 | * Set IOPL bits in EFLAGS from given mask | |
579 | */ | |
580 | static fastcall inline void native_set_iopl_mask(unsigned mask) | |
581 | { | |
582 | unsigned int reg; | |
583 | __asm__ __volatile__ ("pushfl;" | |
584 | "popl %0;" | |
585 | "andl %1, %0;" | |
586 | "orl %2, %0;" | |
587 | "pushl %0;" | |
588 | "popfl" | |
589 | : "=&r" (reg) | |
590 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | |
591 | } | |
592 | ||
593 | /* | |
594 | * Generic CPUID function | |
595 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | |
596 | * resulting in stale register contents being returned. | |
597 | */ | |
598 | static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) | |
599 | { | |
600 | *eax = op; | |
601 | *ecx = 0; | |
602 | __cpuid(eax, ebx, ecx, edx); | |
603 | } | |
604 | ||
605 | /* Some CPUID calls want 'count' to be placed in ecx */ | |
606 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | |
607 | int *edx) | |
608 | { | |
609 | *eax = op; | |
610 | *ecx = count; | |
611 | __cpuid(eax, ebx, ecx, edx); | |
612 | } | |
613 | ||
614 | /* | |
615 | * CPUID functions returning a single datum | |
616 | */ | |
617 | static inline unsigned int cpuid_eax(unsigned int op) | |
618 | { | |
619 | unsigned int eax, ebx, ecx, edx; | |
620 | ||
621 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
622 | return eax; | |
623 | } | |
624 | static inline unsigned int cpuid_ebx(unsigned int op) | |
625 | { | |
626 | unsigned int eax, ebx, ecx, edx; | |
627 | ||
628 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
629 | return ebx; | |
630 | } | |
631 | static inline unsigned int cpuid_ecx(unsigned int op) | |
632 | { | |
633 | unsigned int eax, ebx, ecx, edx; | |
634 | ||
635 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
636 | return ecx; | |
637 | } | |
638 | static inline unsigned int cpuid_edx(unsigned int op) | |
639 | { | |
640 | unsigned int eax, ebx, ecx, edx; | |
641 | ||
642 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
643 | return edx; | |
644 | } | |
645 | ||
1da177e4 LT |
646 | /* generic versions from gas */ |
647 | #define GENERIC_NOP1 ".byte 0x90\n" | |
648 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" | |
649 | #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" | |
650 | #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" | |
651 | #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 | |
652 | #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" | |
653 | #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" | |
654 | #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 | |
655 | ||
656 | /* Opteron nops */ | |
657 | #define K8_NOP1 GENERIC_NOP1 | |
658 | #define K8_NOP2 ".byte 0x66,0x90\n" | |
659 | #define K8_NOP3 ".byte 0x66,0x66,0x90\n" | |
660 | #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" | |
661 | #define K8_NOP5 K8_NOP3 K8_NOP2 | |
662 | #define K8_NOP6 K8_NOP3 K8_NOP3 | |
663 | #define K8_NOP7 K8_NOP4 K8_NOP3 | |
664 | #define K8_NOP8 K8_NOP4 K8_NOP4 | |
665 | ||
666 | /* K7 nops */ | |
667 | /* uses eax dependencies (arbitary choice) */ | |
668 | #define K7_NOP1 GENERIC_NOP1 | |
669 | #define K7_NOP2 ".byte 0x8b,0xc0\n" | |
670 | #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" | |
671 | #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" | |
672 | #define K7_NOP5 K7_NOP4 ASM_NOP1 | |
673 | #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" | |
674 | #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" | |
675 | #define K7_NOP8 K7_NOP7 ASM_NOP1 | |
676 | ||
677 | #ifdef CONFIG_MK8 | |
678 | #define ASM_NOP1 K8_NOP1 | |
679 | #define ASM_NOP2 K8_NOP2 | |
680 | #define ASM_NOP3 K8_NOP3 | |
681 | #define ASM_NOP4 K8_NOP4 | |
682 | #define ASM_NOP5 K8_NOP5 | |
683 | #define ASM_NOP6 K8_NOP6 | |
684 | #define ASM_NOP7 K8_NOP7 | |
685 | #define ASM_NOP8 K8_NOP8 | |
686 | #elif defined(CONFIG_MK7) | |
687 | #define ASM_NOP1 K7_NOP1 | |
688 | #define ASM_NOP2 K7_NOP2 | |
689 | #define ASM_NOP3 K7_NOP3 | |
690 | #define ASM_NOP4 K7_NOP4 | |
691 | #define ASM_NOP5 K7_NOP5 | |
692 | #define ASM_NOP6 K7_NOP6 | |
693 | #define ASM_NOP7 K7_NOP7 | |
694 | #define ASM_NOP8 K7_NOP8 | |
695 | #else | |
696 | #define ASM_NOP1 GENERIC_NOP1 | |
697 | #define ASM_NOP2 GENERIC_NOP2 | |
698 | #define ASM_NOP3 GENERIC_NOP3 | |
699 | #define ASM_NOP4 GENERIC_NOP4 | |
700 | #define ASM_NOP5 GENERIC_NOP5 | |
701 | #define ASM_NOP6 GENERIC_NOP6 | |
702 | #define ASM_NOP7 GENERIC_NOP7 | |
703 | #define ASM_NOP8 GENERIC_NOP8 | |
704 | #endif | |
705 | ||
706 | #define ASM_NOP_MAX 8 | |
707 | ||
708 | /* Prefetch instructions for Pentium III and AMD Athlon */ | |
709 | /* It's not worth to care about 3dnow! prefetches for the K6 | |
710 | because they are microcoded there and very slow. | |
711 | However we don't do prefetches for pre XP Athlons currently | |
712 | That should be fixed. */ | |
713 | #define ARCH_HAS_PREFETCH | |
e2afe674 | 714 | static inline void prefetch(const void *x) |
1da177e4 LT |
715 | { |
716 | alternative_input(ASM_NOP4, | |
717 | "prefetchnta (%1)", | |
718 | X86_FEATURE_XMM, | |
719 | "r" (x)); | |
720 | } | |
721 | ||
722 | #define ARCH_HAS_PREFETCH | |
723 | #define ARCH_HAS_PREFETCHW | |
724 | #define ARCH_HAS_SPINLOCK_PREFETCH | |
725 | ||
726 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | |
727 | spinlocks to avoid one state transition in the cache coherency protocol. */ | |
e2afe674 | 728 | static inline void prefetchw(const void *x) |
1da177e4 LT |
729 | { |
730 | alternative_input(ASM_NOP4, | |
731 | "prefetchw (%1)", | |
732 | X86_FEATURE_3DNOW, | |
733 | "r" (x)); | |
734 | } | |
735 | #define spin_lock_prefetch(x) prefetchw(x) | |
736 | ||
737 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | |
738 | ||
739 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | |
740 | ||
741 | extern unsigned long boot_option_idle_override; | |
6fe940d6 LS |
742 | extern void enable_sep_cpu(void); |
743 | extern int sysenter_setup(void); | |
1da177e4 | 744 | |
62111195 | 745 | extern int init_gdt(int cpu, struct task_struct *idle); |
9ee79a3d | 746 | extern void cpu_set_gdt(int); |
62111195 JF |
747 | extern void secondary_cpu_init(void); |
748 | ||
1da177e4 | 749 | #endif /* __ASM_I386_PROCESSOR_H */ |