]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_PROCESSOR_H |
2 | #define _ASM_X86_PROCESSOR_H | |
c758ecf6 | 3 | |
053de044 GOC |
4 | #include <asm/processor-flags.h> |
5 | ||
683e0253 GOC |
6 | /* Forward declaration, a strange C thing */ |
7 | struct task_struct; | |
8 | struct mm_struct; | |
9fda6a06 | 9 | struct vm86; |
683e0253 | 10 | |
2f66dcc9 GOC |
11 | #include <asm/math_emu.h> |
12 | #include <asm/segment.h> | |
2f66dcc9 | 13 | #include <asm/types.h> |
decb4c41 | 14 | #include <uapi/asm/sigcontext.h> |
2f66dcc9 | 15 | #include <asm/current.h> |
cd4d09ec | 16 | #include <asm/cpufeatures.h> |
2f66dcc9 | 17 | #include <asm/page.h> |
54321d94 | 18 | #include <asm/pgtable_types.h> |
5300db88 | 19 | #include <asm/percpu.h> |
2f66dcc9 GOC |
20 | #include <asm/msr.h> |
21 | #include <asm/desc_defs.h> | |
bd61643e | 22 | #include <asm/nops.h> |
f05e798a | 23 | #include <asm/special_insns.h> |
14b9675a | 24 | #include <asm/fpu/types.h> |
4d46a89e | 25 | |
2f66dcc9 | 26 | #include <linux/personality.h> |
5300db88 | 27 | #include <linux/cache.h> |
2f66dcc9 | 28 | #include <linux/threads.h> |
5cbc19a9 | 29 | #include <linux/math64.h> |
faa4602e | 30 | #include <linux/err.h> |
f05e798a DH |
31 | #include <linux/irqflags.h> |
32 | ||
33 | /* | |
34 | * We handle most unaligned accesses in hardware. On the other hand | |
35 | * unaligned DMA can be quite expensive on some Nehalem processors. | |
36 | * | |
37 | * Based on this we disable the IP header alignment in network drivers. | |
38 | */ | |
39 | #define NET_IP_ALIGN 0 | |
c72dcf83 | 40 | |
b332828c | 41 | #define HBP_NUM 4 |
0ccb8acc GOC |
42 | /* |
43 | * Default implementation of macro that returns current | |
44 | * instruction pointer ("program counter"). | |
45 | */ | |
46 | static inline void *current_text_addr(void) | |
47 | { | |
48 | void *pc; | |
4d46a89e IM |
49 | |
50 | asm volatile("mov $1f, %0; 1:":"=r" (pc)); | |
51 | ||
0ccb8acc GOC |
52 | return pc; |
53 | } | |
54 | ||
b8c1b8ea IM |
55 | /* |
56 | * These alignment constraints are for performance in the vSMP case, | |
57 | * but in the task_struct case we must also meet hardware imposed | |
58 | * alignment requirements of the FPU state: | |
59 | */ | |
dbcb4660 | 60 | #ifdef CONFIG_X86_VSMP |
4d46a89e IM |
61 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
62 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | |
dbcb4660 | 63 | #else |
b8c1b8ea | 64 | # define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state) |
4d46a89e | 65 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 |
dbcb4660 GOC |
66 | #endif |
67 | ||
e0ba94f1 AS |
68 | enum tlb_infos { |
69 | ENTRIES, | |
70 | NR_INFO | |
71 | }; | |
72 | ||
73 | extern u16 __read_mostly tlb_lli_4k[NR_INFO]; | |
74 | extern u16 __read_mostly tlb_lli_2m[NR_INFO]; | |
75 | extern u16 __read_mostly tlb_lli_4m[NR_INFO]; | |
76 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; | |
77 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; | |
78 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; | |
dd360393 | 79 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; |
c4211f42 | 80 | |
5300db88 GOC |
81 | /* |
82 | * CPU type and hardware bug flags. Kept separately for each CPU. | |
04402116 | 83 | * Members of this structure are referenced in head_32.S, so think twice |
5300db88 GOC |
84 | * before touching them. [mj] |
85 | */ | |
86 | ||
87 | struct cpuinfo_x86 { | |
4d46a89e IM |
88 | __u8 x86; /* CPU family */ |
89 | __u8 x86_vendor; /* CPU vendor */ | |
90 | __u8 x86_model; | |
91 | __u8 x86_mask; | |
6415813b | 92 | #ifdef CONFIG_X86_64 |
4d46a89e | 93 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
b1882e68 | 94 | int x86_tlbsize; |
13c6c532 | 95 | #endif |
4d46a89e IM |
96 | __u8 x86_virt_bits; |
97 | __u8 x86_phys_bits; | |
98 | /* CPUID returned core id bits: */ | |
99 | __u8 x86_coreid_bits; | |
79a8b9aa | 100 | __u8 cu_id; |
4d46a89e IM |
101 | /* Max extended CPUID function supported: */ |
102 | __u32 extended_cpuid_level; | |
4d46a89e IM |
103 | /* Maximum supported CPUID level, -1=no CPUID: */ |
104 | int cpuid_level; | |
65fc985b | 105 | __u32 x86_capability[NCAPINTS + NBUGINTS]; |
4d46a89e IM |
106 | char x86_vendor_id[16]; |
107 | char x86_model_id[64]; | |
108 | /* in KB - valid for CPUS which support this call: */ | |
109 | int x86_cache_size; | |
110 | int x86_cache_alignment; /* In bytes */ | |
cbc82b17 PWJ |
111 | /* Cache QoS architectural values: */ |
112 | int x86_cache_max_rmid; /* max index */ | |
113 | int x86_cache_occ_scale; /* scale to bytes */ | |
4d46a89e IM |
114 | int x86_power; |
115 | unsigned long loops_per_jiffy; | |
4d46a89e IM |
116 | /* cpuid returned max cores value: */ |
117 | u16 x86_max_cores; | |
118 | u16 apicid; | |
01aaea1a | 119 | u16 initial_apicid; |
4d46a89e | 120 | u16 x86_clflush_size; |
4d46a89e IM |
121 | /* number of cores as seen by the OS: */ |
122 | u16 booted_cores; | |
123 | /* Physical processor id: */ | |
124 | u16 phys_proc_id; | |
1f12e32f TG |
125 | /* Logical processor id: */ |
126 | u16 logical_proc_id; | |
4d46a89e IM |
127 | /* Core id: */ |
128 | u16 cpu_core_id; | |
129 | /* Index into per_cpu list: */ | |
130 | u16 cpu_index; | |
506ed6b5 | 131 | u32 microcode; |
2c773dd3 | 132 | }; |
5300db88 | 133 | |
47f10a36 HC |
134 | struct cpuid_regs { |
135 | u32 eax, ebx, ecx, edx; | |
136 | }; | |
137 | ||
138 | enum cpuid_regs_idx { | |
139 | CPUID_EAX = 0, | |
140 | CPUID_EBX, | |
141 | CPUID_ECX, | |
142 | CPUID_EDX, | |
143 | }; | |
144 | ||
4d46a89e IM |
145 | #define X86_VENDOR_INTEL 0 |
146 | #define X86_VENDOR_CYRIX 1 | |
147 | #define X86_VENDOR_AMD 2 | |
148 | #define X86_VENDOR_UMC 3 | |
4d46a89e IM |
149 | #define X86_VENDOR_CENTAUR 5 |
150 | #define X86_VENDOR_TRANSMETA 7 | |
151 | #define X86_VENDOR_NSC 8 | |
152 | #define X86_VENDOR_NUM 9 | |
153 | ||
154 | #define X86_VENDOR_UNKNOWN 0xff | |
5300db88 | 155 | |
1a53905a GOC |
156 | /* |
157 | * capabilities of CPUs | |
158 | */ | |
4d46a89e IM |
159 | extern struct cpuinfo_x86 boot_cpu_data; |
160 | extern struct cpuinfo_x86 new_cpu_data; | |
161 | ||
162 | extern struct tss_struct doublefault_tss; | |
3e0c3737 YL |
163 | extern __u32 cpu_caps_cleared[NCAPINTS]; |
164 | extern __u32 cpu_caps_set[NCAPINTS]; | |
5300db88 GOC |
165 | |
166 | #ifdef CONFIG_SMP | |
2c773dd3 | 167 | DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
5300db88 | 168 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
5300db88 | 169 | #else |
7b543a53 | 170 | #define cpu_info boot_cpu_data |
5300db88 | 171 | #define cpu_data(cpu) boot_cpu_data |
5300db88 GOC |
172 | #endif |
173 | ||
1c6c727d JS |
174 | extern const struct seq_operations cpuinfo_op; |
175 | ||
4d46a89e IM |
176 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) |
177 | ||
178 | extern void cpu_detect(struct cpuinfo_x86 *c); | |
1a53905a | 179 | |
f580366f | 180 | extern void early_cpu_init(void); |
1a53905a GOC |
181 | extern void identify_boot_cpu(void); |
182 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | |
5300db88 | 183 | extern void print_cpu_info(struct cpuinfo_x86 *); |
21c3fcf3 | 184 | void print_cpu_msr(struct cpuinfo_x86 *); |
5300db88 | 185 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); |
47bdf337 HC |
186 | extern u32 get_scattered_cpuid_leaf(unsigned int level, |
187 | unsigned int sub_leaf, | |
188 | enum cpuid_regs_idx reg); | |
5300db88 | 189 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
04a15418 | 190 | extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); |
5300db88 | 191 | |
bbb65d2d | 192 | extern void detect_extended_topology(struct cpuinfo_x86 *c); |
1a53905a | 193 | extern void detect_ht(struct cpuinfo_x86 *c); |
1a53905a | 194 | |
d288e1cf FY |
195 | #ifdef CONFIG_X86_32 |
196 | extern int have_cpuid_p(void); | |
197 | #else | |
198 | static inline int have_cpuid_p(void) | |
199 | { | |
200 | return 1; | |
201 | } | |
202 | #endif | |
c758ecf6 | 203 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
4d46a89e | 204 | unsigned int *ecx, unsigned int *edx) |
c758ecf6 GOC |
205 | { |
206 | /* ecx is often an input as well as an output. */ | |
45a94d7c | 207 | asm volatile("cpuid" |
cca2e6f8 JP |
208 | : "=a" (*eax), |
209 | "=b" (*ebx), | |
210 | "=c" (*ecx), | |
211 | "=d" (*edx) | |
506ed6b5 AK |
212 | : "0" (*eax), "2" (*ecx) |
213 | : "memory"); | |
c758ecf6 GOC |
214 | } |
215 | ||
5dedade6 BP |
216 | #define native_cpuid_reg(reg) \ |
217 | static inline unsigned int native_cpuid_##reg(unsigned int op) \ | |
218 | { \ | |
219 | unsigned int eax = op, ebx, ecx = 0, edx; \ | |
220 | \ | |
221 | native_cpuid(&eax, &ebx, &ecx, &edx); \ | |
222 | \ | |
223 | return reg; \ | |
224 | } | |
225 | ||
226 | /* | |
227 | * Native CPUID functions returning a single datum. | |
228 | */ | |
229 | native_cpuid_reg(eax) | |
230 | native_cpuid_reg(ebx) | |
231 | native_cpuid_reg(ecx) | |
232 | native_cpuid_reg(edx) | |
233 | ||
6c690ee1 AL |
234 | /* |
235 | * Friendlier CR3 helpers. | |
236 | */ | |
237 | static inline unsigned long read_cr3_pa(void) | |
238 | { | |
239 | return __read_cr3() & CR3_ADDR_MASK; | |
240 | } | |
241 | ||
c72dcf83 GOC |
242 | static inline void load_cr3(pgd_t *pgdir) |
243 | { | |
244 | write_cr3(__pa(pgdir)); | |
245 | } | |
c758ecf6 | 246 | |
ca241c75 GOC |
247 | #ifdef CONFIG_X86_32 |
248 | /* This is the TSS defined by the hardware. */ | |
249 | struct x86_hw_tss { | |
4d46a89e IM |
250 | unsigned short back_link, __blh; |
251 | unsigned long sp0; | |
252 | unsigned short ss0, __ss0h; | |
cf9328cc | 253 | unsigned long sp1; |
76e4c490 AL |
254 | |
255 | /* | |
cf9328cc AL |
256 | * We don't use ring 1, so ss1 is a convenient scratch space in |
257 | * the same cacheline as sp0. We use ss1 to cache the value in | |
258 | * MSR_IA32_SYSENTER_CS. When we context switch | |
259 | * MSR_IA32_SYSENTER_CS, we first check if the new value being | |
260 | * written matches ss1, and, if it's not, then we wrmsr the new | |
261 | * value and update ss1. | |
76e4c490 | 262 | * |
cf9328cc AL |
263 | * The only reason we context switch MSR_IA32_SYSENTER_CS is |
264 | * that we set it to zero in vm86 tasks to avoid corrupting the | |
265 | * stack if we were to go through the sysenter path from vm86 | |
266 | * mode. | |
76e4c490 | 267 | */ |
76e4c490 AL |
268 | unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ |
269 | ||
270 | unsigned short __ss1h; | |
4d46a89e IM |
271 | unsigned long sp2; |
272 | unsigned short ss2, __ss2h; | |
273 | unsigned long __cr3; | |
274 | unsigned long ip; | |
275 | unsigned long flags; | |
276 | unsigned long ax; | |
277 | unsigned long cx; | |
278 | unsigned long dx; | |
279 | unsigned long bx; | |
280 | unsigned long sp; | |
281 | unsigned long bp; | |
282 | unsigned long si; | |
283 | unsigned long di; | |
284 | unsigned short es, __esh; | |
285 | unsigned short cs, __csh; | |
286 | unsigned short ss, __ssh; | |
287 | unsigned short ds, __dsh; | |
288 | unsigned short fs, __fsh; | |
289 | unsigned short gs, __gsh; | |
290 | unsigned short ldt, __ldth; | |
291 | unsigned short trace; | |
292 | unsigned short io_bitmap_base; | |
293 | ||
ca241c75 GOC |
294 | } __attribute__((packed)); |
295 | #else | |
296 | struct x86_hw_tss { | |
4d46a89e IM |
297 | u32 reserved1; |
298 | u64 sp0; | |
299 | u64 sp1; | |
300 | u64 sp2; | |
301 | u64 reserved2; | |
302 | u64 ist[7]; | |
303 | u32 reserved3; | |
304 | u32 reserved4; | |
305 | u16 reserved5; | |
306 | u16 io_bitmap_base; | |
307 | ||
d3273dea | 308 | } __attribute__((packed)); |
ca241c75 GOC |
309 | #endif |
310 | ||
311 | /* | |
4d46a89e | 312 | * IO-bitmap sizes: |
ca241c75 | 313 | */ |
4d46a89e IM |
314 | #define IO_BITMAP_BITS 65536 |
315 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | |
316 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | |
317 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | |
318 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | |
ca241c75 GOC |
319 | |
320 | struct tss_struct { | |
4d46a89e IM |
321 | /* |
322 | * The hardware state: | |
323 | */ | |
324 | struct x86_hw_tss x86_tss; | |
ca241c75 GOC |
325 | |
326 | /* | |
327 | * The extra 1 is there because the CPU will access an | |
328 | * additional byte beyond the end of the IO permission | |
329 | * bitmap. The extra byte must be all 1 bits, and must | |
330 | * be within the limit. | |
331 | */ | |
4d46a89e | 332 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
4d46a89e | 333 | |
6dcc9414 | 334 | #ifdef CONFIG_X86_32 |
ca241c75 | 335 | /* |
2a41aa4f | 336 | * Space for the temporary SYSENTER stack. |
ca241c75 | 337 | */ |
2a41aa4f | 338 | unsigned long SYSENTER_stack_canary; |
d828c71f | 339 | unsigned long SYSENTER_stack[64]; |
6dcc9414 | 340 | #endif |
4d46a89e | 341 | |
84e65b0a | 342 | } ____cacheline_aligned; |
ca241c75 | 343 | |
24933b82 | 344 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); |
ca241c75 | 345 | |
4f53ab14 AL |
346 | /* |
347 | * sizeof(unsigned long) coming from an extra "long" at the end | |
348 | * of the iobitmap. | |
349 | * | |
350 | * -1? seg base+limit should be pointing to the address of the | |
351 | * last valid byte | |
352 | */ | |
353 | #define __KERNEL_TSS_LIMIT \ | |
354 | (IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1) | |
355 | ||
a7fcf28d AL |
356 | #ifdef CONFIG_X86_32 |
357 | DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); | |
358 | #endif | |
359 | ||
4d46a89e IM |
360 | /* |
361 | * Save the original ist values for checking stack pointers during debugging | |
362 | */ | |
1a53905a | 363 | struct orig_ist { |
4d46a89e | 364 | unsigned long ist[7]; |
1a53905a GOC |
365 | }; |
366 | ||
fe676203 | 367 | #ifdef CONFIG_X86_64 |
2f66dcc9 | 368 | DECLARE_PER_CPU(struct orig_ist, orig_ist); |
26f80bd6 | 369 | |
947e76cd BG |
370 | union irq_stack_union { |
371 | char irq_stack[IRQ_STACK_SIZE]; | |
372 | /* | |
373 | * GCC hardcodes the stack canary as %gs:40. Since the | |
374 | * irq_stack is the object at %gs:0, we reserve the bottom | |
375 | * 48 bytes of the irq stack for the canary. | |
376 | */ | |
377 | struct { | |
378 | char gs_base[40]; | |
379 | unsigned long stack_canary; | |
380 | }; | |
381 | }; | |
382 | ||
277d5b40 | 383 | DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible; |
2add8e23 BG |
384 | DECLARE_INIT_PER_CPU(irq_stack_union); |
385 | ||
26f80bd6 | 386 | DECLARE_PER_CPU(char *, irq_stack_ptr); |
9766cdbc | 387 | DECLARE_PER_CPU(unsigned int, irq_count); |
9766cdbc | 388 | extern asmlinkage void ignore_sysret(void); |
60a5317f TH |
389 | #else /* X86_64 */ |
390 | #ifdef CONFIG_CC_STACKPROTECTOR | |
1ea0d14e JF |
391 | /* |
392 | * Make sure stack canary segment base is cached-aligned: | |
393 | * "For Intel Atom processors, avoid non zero segment base address | |
394 | * that is not aligned to cache line boundary at all cost." | |
395 | * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) | |
396 | */ | |
397 | struct stack_canary { | |
398 | char __pad[20]; /* canary at %gs:20 */ | |
399 | unsigned long canary; | |
400 | }; | |
53f82452 | 401 | DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
96a388de | 402 | #endif |
198d208d SR |
403 | /* |
404 | * per-CPU IRQ handling stacks | |
405 | */ | |
406 | struct irq_stack { | |
407 | u32 stack[THREAD_SIZE/sizeof(u32)]; | |
408 | } __aligned(THREAD_SIZE); | |
409 | ||
410 | DECLARE_PER_CPU(struct irq_stack *, hardirq_stack); | |
411 | DECLARE_PER_CPU(struct irq_stack *, softirq_stack); | |
60a5317f | 412 | #endif /* X86_64 */ |
c758ecf6 | 413 | |
bf15a8cf | 414 | extern unsigned int fpu_kernel_xstate_size; |
a1141e0b | 415 | extern unsigned int fpu_user_xstate_size; |
683e0253 | 416 | |
24f1e32c FW |
417 | struct perf_event; |
418 | ||
13d4ea09 AL |
419 | typedef struct { |
420 | unsigned long seg; | |
421 | } mm_segment_t; | |
422 | ||
cb38d377 | 423 | struct thread_struct { |
4d46a89e IM |
424 | /* Cached TLS descriptors: */ |
425 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | |
426 | unsigned long sp0; | |
427 | unsigned long sp; | |
cb38d377 | 428 | #ifdef CONFIG_X86_32 |
4d46a89e | 429 | unsigned long sysenter_cs; |
cb38d377 | 430 | #else |
4d46a89e IM |
431 | unsigned short es; |
432 | unsigned short ds; | |
433 | unsigned short fsindex; | |
434 | unsigned short gsindex; | |
cb38d377 | 435 | #endif |
b9d989c7 AL |
436 | |
437 | u32 status; /* thread synchronous flags */ | |
438 | ||
d756f4ad | 439 | #ifdef CONFIG_X86_64 |
296f781a AL |
440 | unsigned long fsbase; |
441 | unsigned long gsbase; | |
442 | #else | |
443 | /* | |
444 | * XXX: this could presumably be unsigned short. Alternatively, | |
445 | * 32-bit kernels could be taught to use fsindex instead. | |
446 | */ | |
447 | unsigned long fs; | |
448 | unsigned long gs; | |
d756f4ad | 449 | #endif |
c5bedc68 | 450 | |
24f1e32c FW |
451 | /* Save middle states of ptrace breakpoints */ |
452 | struct perf_event *ptrace_bps[HBP_NUM]; | |
453 | /* Debug status used for traps, single steps, etc... */ | |
454 | unsigned long debugreg6; | |
326264a0 FW |
455 | /* Keep track of the exact dr7 value set by the user */ |
456 | unsigned long ptrace_dr7; | |
4d46a89e IM |
457 | /* Fault info: */ |
458 | unsigned long cr2; | |
51e7dc70 | 459 | unsigned long trap_nr; |
4d46a89e | 460 | unsigned long error_code; |
9fda6a06 | 461 | #ifdef CONFIG_VM86 |
4d46a89e | 462 | /* Virtual 86 mode info */ |
9fda6a06 | 463 | struct vm86 *vm86; |
cb38d377 | 464 | #endif |
4d46a89e IM |
465 | /* IO permissions: */ |
466 | unsigned long *io_bitmap_ptr; | |
467 | unsigned long iopl; | |
468 | /* Max allowed port in the bitmap, in bytes: */ | |
469 | unsigned io_bitmap_max; | |
0c8c0f03 | 470 | |
13d4ea09 AL |
471 | mm_segment_t addr_limit; |
472 | ||
2a53ccbc | 473 | unsigned int sig_on_uaccess_err:1; |
dfa9a942 AL |
474 | unsigned int uaccess_err:1; /* uaccess failed */ |
475 | ||
0c8c0f03 DH |
476 | /* Floating point and extended processor state */ |
477 | struct fpu fpu; | |
478 | /* | |
479 | * WARNING: 'fpu' is dynamically-sized. It *MUST* be at | |
480 | * the end. | |
481 | */ | |
cb38d377 GOC |
482 | }; |
483 | ||
b9d989c7 AL |
484 | /* |
485 | * Thread-synchronous status. | |
486 | * | |
487 | * This is different from the flags in that nobody else | |
488 | * ever touches our thread-synchronous status, so we don't | |
489 | * have to worry about atomic accesses. | |
490 | */ | |
491 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ | |
492 | ||
62d7d7ed GOC |
493 | /* |
494 | * Set IOPL bits in EFLAGS from given mask | |
495 | */ | |
496 | static inline void native_set_iopl_mask(unsigned mask) | |
497 | { | |
498 | #ifdef CONFIG_X86_32 | |
499 | unsigned int reg; | |
4d46a89e | 500 | |
cca2e6f8 JP |
501 | asm volatile ("pushfl;" |
502 | "popl %0;" | |
503 | "andl %1, %0;" | |
504 | "orl %2, %0;" | |
505 | "pushl %0;" | |
506 | "popfl" | |
507 | : "=&r" (reg) | |
508 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | |
62d7d7ed GOC |
509 | #endif |
510 | } | |
511 | ||
4d46a89e IM |
512 | static inline void |
513 | native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) | |
7818a1e0 GOC |
514 | { |
515 | tss->x86_tss.sp0 = thread->sp0; | |
516 | #ifdef CONFIG_X86_32 | |
4d46a89e | 517 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
7818a1e0 GOC |
518 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
519 | tss->x86_tss.ss1 = thread->sysenter_cs; | |
520 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | |
521 | } | |
522 | #endif | |
523 | } | |
1b46cbe0 | 524 | |
e801f864 GOC |
525 | static inline void native_swapgs(void) |
526 | { | |
527 | #ifdef CONFIG_X86_64 | |
528 | asm volatile("swapgs" ::: "memory"); | |
529 | #endif | |
530 | } | |
531 | ||
a7fcf28d | 532 | static inline unsigned long current_top_of_stack(void) |
8ef46a67 | 533 | { |
a7fcf28d | 534 | #ifdef CONFIG_X86_64 |
24933b82 | 535 | return this_cpu_read_stable(cpu_tss.x86_tss.sp0); |
a7fcf28d AL |
536 | #else |
537 | /* sp0 on x86_32 is special in and around vm86 mode. */ | |
538 | return this_cpu_read_stable(cpu_current_top_of_stack); | |
539 | #endif | |
8ef46a67 AL |
540 | } |
541 | ||
7818a1e0 GOC |
542 | #ifdef CONFIG_PARAVIRT |
543 | #include <asm/paravirt.h> | |
544 | #else | |
4d46a89e | 545 | #define __cpuid native_cpuid |
1b46cbe0 | 546 | |
cca2e6f8 JP |
547 | static inline void load_sp0(struct tss_struct *tss, |
548 | struct thread_struct *thread) | |
7818a1e0 GOC |
549 | { |
550 | native_load_sp0(tss, thread); | |
551 | } | |
552 | ||
62d7d7ed | 553 | #define set_iopl_mask native_set_iopl_mask |
1b46cbe0 GOC |
554 | #endif /* CONFIG_PARAVIRT */ |
555 | ||
683e0253 GOC |
556 | /* Free all resources held by a thread. */ |
557 | extern void release_thread(struct task_struct *); | |
558 | ||
683e0253 | 559 | unsigned long get_wchan(struct task_struct *p); |
c758ecf6 GOC |
560 | |
561 | /* | |
562 | * Generic CPUID function | |
563 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | |
564 | * resulting in stale register contents being returned. | |
565 | */ | |
566 | static inline void cpuid(unsigned int op, | |
567 | unsigned int *eax, unsigned int *ebx, | |
568 | unsigned int *ecx, unsigned int *edx) | |
569 | { | |
570 | *eax = op; | |
571 | *ecx = 0; | |
572 | __cpuid(eax, ebx, ecx, edx); | |
573 | } | |
574 | ||
575 | /* Some CPUID calls want 'count' to be placed in ecx */ | |
576 | static inline void cpuid_count(unsigned int op, int count, | |
577 | unsigned int *eax, unsigned int *ebx, | |
578 | unsigned int *ecx, unsigned int *edx) | |
579 | { | |
580 | *eax = op; | |
581 | *ecx = count; | |
582 | __cpuid(eax, ebx, ecx, edx); | |
583 | } | |
584 | ||
585 | /* | |
586 | * CPUID functions returning a single datum | |
587 | */ | |
588 | static inline unsigned int cpuid_eax(unsigned int op) | |
589 | { | |
590 | unsigned int eax, ebx, ecx, edx; | |
591 | ||
592 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 593 | |
c758ecf6 GOC |
594 | return eax; |
595 | } | |
4d46a89e | 596 | |
c758ecf6 GOC |
597 | static inline unsigned int cpuid_ebx(unsigned int op) |
598 | { | |
599 | unsigned int eax, ebx, ecx, edx; | |
600 | ||
601 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 602 | |
c758ecf6 GOC |
603 | return ebx; |
604 | } | |
4d46a89e | 605 | |
c758ecf6 GOC |
606 | static inline unsigned int cpuid_ecx(unsigned int op) |
607 | { | |
608 | unsigned int eax, ebx, ecx, edx; | |
609 | ||
610 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 611 | |
c758ecf6 GOC |
612 | return ecx; |
613 | } | |
4d46a89e | 614 | |
c758ecf6 GOC |
615 | static inline unsigned int cpuid_edx(unsigned int op) |
616 | { | |
617 | unsigned int eax, ebx, ecx, edx; | |
618 | ||
619 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 620 | |
c758ecf6 GOC |
621 | return edx; |
622 | } | |
623 | ||
683e0253 | 624 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
0b101e62 | 625 | static __always_inline void rep_nop(void) |
683e0253 | 626 | { |
cca2e6f8 | 627 | asm volatile("rep; nop" ::: "memory"); |
683e0253 GOC |
628 | } |
629 | ||
0b101e62 | 630 | static __always_inline void cpu_relax(void) |
4d46a89e IM |
631 | { |
632 | rep_nop(); | |
633 | } | |
634 | ||
c198b121 AL |
635 | /* |
636 | * This function forces the icache and prefetched instruction stream to | |
637 | * catch up with reality in two very specific cases: | |
638 | * | |
639 | * a) Text was modified using one virtual address and is about to be executed | |
640 | * from the same physical page at a different virtual address. | |
641 | * | |
642 | * b) Text was modified on a different CPU, may subsequently be | |
643 | * executed on this CPU, and you want to make sure the new version | |
644 | * gets executed. This generally means you're calling this in a IPI. | |
645 | * | |
646 | * If you're calling this for a different reason, you're probably doing | |
647 | * it wrong. | |
648 | */ | |
683e0253 GOC |
649 | static inline void sync_core(void) |
650 | { | |
45c39fb0 | 651 | /* |
c198b121 AL |
652 | * There are quite a few ways to do this. IRET-to-self is nice |
653 | * because it works on every CPU, at any CPL (so it's compatible | |
654 | * with paravirtualization), and it never exits to a hypervisor. | |
655 | * The only down sides are that it's a bit slow (it seems to be | |
656 | * a bit more than 2x slower than the fastest options) and that | |
657 | * it unmasks NMIs. The "push %cs" is needed because, in | |
658 | * paravirtual environments, __KERNEL_CS may not be a valid CS | |
659 | * value when we do IRET directly. | |
660 | * | |
661 | * In case NMI unmasking or performance ever becomes a problem, | |
662 | * the next best option appears to be MOV-to-CR2 and an | |
663 | * unconditional jump. That sequence also works on all CPUs, | |
664 | * but it will fault at CPL3 (i.e. Xen PV and lguest). | |
665 | * | |
666 | * CPUID is the conventional way, but it's nasty: it doesn't | |
667 | * exist on some 486-like CPUs, and it usually exits to a | |
668 | * hypervisor. | |
669 | * | |
670 | * Like all of Linux's memory ordering operations, this is a | |
671 | * compiler barrier as well. | |
45c39fb0 | 672 | */ |
c198b121 AL |
673 | register void *__sp asm(_ASM_SP); |
674 | ||
675 | #ifdef CONFIG_X86_32 | |
676 | asm volatile ( | |
677 | "pushfl\n\t" | |
678 | "pushl %%cs\n\t" | |
679 | "pushl $1f\n\t" | |
680 | "iret\n\t" | |
681 | "1:" | |
682 | : "+r" (__sp) : : "memory"); | |
45c39fb0 | 683 | #else |
c198b121 AL |
684 | unsigned int tmp; |
685 | ||
686 | asm volatile ( | |
687 | "mov %%ss, %0\n\t" | |
688 | "pushq %q0\n\t" | |
689 | "pushq %%rsp\n\t" | |
690 | "addq $8, (%%rsp)\n\t" | |
691 | "pushfq\n\t" | |
692 | "mov %%cs, %0\n\t" | |
693 | "pushq %q0\n\t" | |
694 | "pushq $1f\n\t" | |
695 | "iretq\n\t" | |
696 | "1:" | |
697 | : "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); | |
5367b688 | 698 | #endif |
683e0253 GOC |
699 | } |
700 | ||
683e0253 | 701 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
07c94a38 | 702 | extern void amd_e400_c1e_apic_setup(void); |
683e0253 | 703 | |
4d46a89e | 704 | extern unsigned long boot_option_idle_override; |
683e0253 | 705 | |
d1896049 | 706 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
69fb3676 | 707 | IDLE_POLL}; |
d1896049 | 708 | |
1a53905a GOC |
709 | extern void enable_sep_cpu(void); |
710 | extern int sysenter_setup(void); | |
711 | ||
29c84391 | 712 | extern void early_trap_init(void); |
8170e6be | 713 | void early_trap_pf_init(void); |
29c84391 | 714 | |
1a53905a | 715 | /* Defined in head.S */ |
4d46a89e | 716 | extern struct desc_ptr early_gdt_descr; |
1a53905a GOC |
717 | |
718 | extern void cpu_set_gdt(int); | |
552be871 | 719 | extern void switch_to_new_gdt(int); |
45fc8757 | 720 | extern void load_direct_gdt(int); |
69218e47 | 721 | extern void load_fixmap_gdt(int); |
11e3a840 | 722 | extern void load_percpu_segment(int); |
1a53905a | 723 | extern void cpu_init(void); |
1a53905a | 724 | |
c2724775 MM |
725 | static inline unsigned long get_debugctlmsr(void) |
726 | { | |
ea8e61b7 | 727 | unsigned long debugctlmsr = 0; |
c2724775 MM |
728 | |
729 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
730 | if (boot_cpu_data.x86 < 6) | |
731 | return 0; | |
732 | #endif | |
733 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
734 | ||
ea8e61b7 | 735 | return debugctlmsr; |
c2724775 MM |
736 | } |
737 | ||
5b0e5084 JB |
738 | static inline void update_debugctlmsr(unsigned long debugctlmsr) |
739 | { | |
740 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
741 | if (boot_cpu_data.x86 < 6) | |
742 | return; | |
743 | #endif | |
744 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
745 | } | |
746 | ||
9bd1190a ON |
747 | extern void set_task_blockstep(struct task_struct *task, bool on); |
748 | ||
4d46a89e IM |
749 | /* Boot loader type from the setup header: */ |
750 | extern int bootloader_type; | |
5031296c | 751 | extern int bootloader_version; |
1a53905a | 752 | |
4d46a89e | 753 | extern char ignore_fpu_irq; |
683e0253 GOC |
754 | |
755 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | |
756 | #define ARCH_HAS_PREFETCHW | |
757 | #define ARCH_HAS_SPINLOCK_PREFETCH | |
758 | ||
ae2e15eb | 759 | #ifdef CONFIG_X86_32 |
a930dc45 | 760 | # define BASE_PREFETCH "" |
4d46a89e | 761 | # define ARCH_HAS_PREFETCH |
ae2e15eb | 762 | #else |
a930dc45 | 763 | # define BASE_PREFETCH "prefetcht0 %P1" |
ae2e15eb GOC |
764 | #endif |
765 | ||
4d46a89e IM |
766 | /* |
767 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) | |
768 | * | |
769 | * It's not worth to care about 3dnow prefetches for the K6 | |
770 | * because they are microcoded there and very slow. | |
771 | */ | |
ae2e15eb GOC |
772 | static inline void prefetch(const void *x) |
773 | { | |
a930dc45 | 774 | alternative_input(BASE_PREFETCH, "prefetchnta %P1", |
ae2e15eb | 775 | X86_FEATURE_XMM, |
a930dc45 | 776 | "m" (*(const char *)x)); |
ae2e15eb GOC |
777 | } |
778 | ||
4d46a89e IM |
779 | /* |
780 | * 3dnow prefetch to get an exclusive cache line. | |
781 | * Useful for spinlocks to avoid one state transition in the | |
782 | * cache coherency protocol: | |
783 | */ | |
ae2e15eb GOC |
784 | static inline void prefetchw(const void *x) |
785 | { | |
a930dc45 BP |
786 | alternative_input(BASE_PREFETCH, "prefetchw %P1", |
787 | X86_FEATURE_3DNOWPREFETCH, | |
788 | "m" (*(const char *)x)); | |
ae2e15eb GOC |
789 | } |
790 | ||
4d46a89e IM |
791 | static inline void spin_lock_prefetch(const void *x) |
792 | { | |
793 | prefetchw(x); | |
794 | } | |
795 | ||
d9e05cc5 AL |
796 | #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ |
797 | TOP_OF_KERNEL_STACK_PADDING) | |
798 | ||
2f66dcc9 GOC |
799 | #ifdef CONFIG_X86_32 |
800 | /* | |
801 | * User space process size: 3GB (default). | |
802 | */ | |
8f3e474f | 803 | #define IA32_PAGE_OFFSET PAGE_OFFSET |
4d46a89e | 804 | #define TASK_SIZE PAGE_OFFSET |
d9517346 | 805 | #define TASK_SIZE_MAX TASK_SIZE |
4d46a89e IM |
806 | #define STACK_TOP TASK_SIZE |
807 | #define STACK_TOP_MAX STACK_TOP | |
808 | ||
809 | #define INIT_THREAD { \ | |
d9e05cc5 | 810 | .sp0 = TOP_OF_INIT_STACK, \ |
4d46a89e IM |
811 | .sysenter_cs = __KERNEL_CS, \ |
812 | .io_bitmap_ptr = NULL, \ | |
13d4ea09 | 813 | .addr_limit = KERNEL_DS, \ |
2f66dcc9 GOC |
814 | } |
815 | ||
2f66dcc9 | 816 | /* |
5c39403e | 817 | * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. |
2f66dcc9 | 818 | * This is necessary to guarantee that the entire "struct pt_regs" |
b595076a | 819 | * is accessible even if the CPU haven't stored the SS/ESP registers |
2f66dcc9 GOC |
820 | * on the stack (interrupt gate does not save these registers |
821 | * when switching to the same priv ring). | |
822 | * Therefore beware: accessing the ss/esp fields of the | |
823 | * "struct pt_regs" is possible, but they may contain the | |
824 | * completely wrong values. | |
825 | */ | |
5c39403e DV |
826 | #define task_pt_regs(task) \ |
827 | ({ \ | |
828 | unsigned long __ptr = (unsigned long)task_stack_page(task); \ | |
829 | __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ | |
830 | ((struct pt_regs *)__ptr) - 1; \ | |
2f66dcc9 GOC |
831 | }) |
832 | ||
4d46a89e | 833 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
2f66dcc9 GOC |
834 | |
835 | #else | |
836 | /* | |
07114f0f AL |
837 | * User space process size. 47bits minus one guard page. The guard |
838 | * page is necessary on Intel CPUs: if a SYSCALL instruction is at | |
839 | * the highest possible canonical userspace address, then that | |
840 | * syscall will enter the kernel with a non-canonical return | |
841 | * address, and SYSRET will explode dangerously. We avoid this | |
842 | * particular problem by preventing anything from being mapped | |
843 | * at the maximum canonical address. | |
2f66dcc9 | 844 | */ |
d9517346 | 845 | #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) |
2f66dcc9 GOC |
846 | |
847 | /* This decides where the kernel will search for a free chunk of vm | |
848 | * space during mmap's. | |
849 | */ | |
4d46a89e IM |
850 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
851 | 0xc0000000 : 0xFFFFe000) | |
2f66dcc9 | 852 | |
6bd33008 | 853 | #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ |
d9517346 | 854 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
6bd33008 | 855 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ |
d9517346 | 856 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
2f66dcc9 | 857 | |
922a70d3 | 858 | #define STACK_TOP TASK_SIZE |
d9517346 | 859 | #define STACK_TOP_MAX TASK_SIZE_MAX |
922a70d3 | 860 | |
13d4ea09 AL |
861 | #define INIT_THREAD { \ |
862 | .sp0 = TOP_OF_INIT_STACK, \ | |
863 | .addr_limit = KERNEL_DS, \ | |
2f66dcc9 GOC |
864 | } |
865 | ||
4d46a89e | 866 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
89240ba0 | 867 | extern unsigned long KSTK_ESP(struct task_struct *task); |
d046ff8b | 868 | |
2f66dcc9 GOC |
869 | #endif /* CONFIG_X86_64 */ |
870 | ||
ffcb043b BG |
871 | extern unsigned long thread_saved_pc(struct task_struct *tsk); |
872 | ||
513ad84b IM |
873 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
874 | unsigned long new_sp); | |
875 | ||
4d46a89e IM |
876 | /* |
877 | * This decides where the kernel will search for a free chunk of vm | |
683e0253 GOC |
878 | * space during mmap's. |
879 | */ | |
8f3e474f DS |
880 | #define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3)) |
881 | #define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE) | |
683e0253 | 882 | |
4d46a89e | 883 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) |
683e0253 | 884 | |
529e25f6 EB |
885 | /* Get/set a process' ability to use the timestamp counter instruction */ |
886 | #define GET_TSC_CTL(adr) get_tsc_mode((adr)) | |
887 | #define SET_TSC_CTL(val) set_tsc_mode((val)) | |
888 | ||
889 | extern int get_tsc_mode(unsigned long adr); | |
890 | extern int set_tsc_mode(unsigned int val); | |
891 | ||
e9ea1e7f KH |
892 | DECLARE_PER_CPU(u64, msr_misc_features_shadow); |
893 | ||
fe3d197f | 894 | /* Register/unregister a process' MPX related resource */ |
46a6e0cf DH |
895 | #define MPX_ENABLE_MANAGEMENT() mpx_enable_management() |
896 | #define MPX_DISABLE_MANAGEMENT() mpx_disable_management() | |
fe3d197f DH |
897 | |
898 | #ifdef CONFIG_X86_INTEL_MPX | |
46a6e0cf DH |
899 | extern int mpx_enable_management(void); |
900 | extern int mpx_disable_management(void); | |
fe3d197f | 901 | #else |
46a6e0cf | 902 | static inline int mpx_enable_management(void) |
fe3d197f DH |
903 | { |
904 | return -EINVAL; | |
905 | } | |
46a6e0cf | 906 | static inline int mpx_disable_management(void) |
fe3d197f DH |
907 | { |
908 | return -EINVAL; | |
909 | } | |
910 | #endif /* CONFIG_X86_INTEL_MPX */ | |
911 | ||
8b84c8df | 912 | extern u16 amd_get_nb_id(int cpu); |
cc2749e4 | 913 | extern u32 amd_get_nodes_per_socket(void); |
6a812691 | 914 | |
96e39ac0 JW |
915 | static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) |
916 | { | |
917 | uint32_t base, eax, signature[3]; | |
918 | ||
919 | for (base = 0x40000000; base < 0x40010000; base += 0x100) { | |
920 | cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); | |
921 | ||
922 | if (!memcmp(sig, signature, 12) && | |
923 | (leaves == 0 || ((eax - base) >= leaves))) | |
924 | return base; | |
925 | } | |
926 | ||
927 | return 0; | |
928 | } | |
929 | ||
f05e798a DH |
930 | extern unsigned long arch_align_stack(unsigned long sp); |
931 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | |
932 | ||
933 | void default_idle(void); | |
6a377ddc LB |
934 | #ifdef CONFIG_XEN |
935 | bool xen_set_default_idle(void); | |
936 | #else | |
937 | #define xen_set_default_idle 0 | |
938 | #endif | |
f05e798a DH |
939 | |
940 | void stop_this_cpu(void *dummy); | |
4d067d8e | 941 | void df_debug(struct pt_regs *regs, long error_code); |
1965aae3 | 942 | #endif /* _ASM_X86_PROCESSOR_H */ |