]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/asm-x86/processor.h
x86: unify thread struct.
[mirror_ubuntu-artful-kernel.git] / include / asm-x86 / processor.h
CommitLineData
c758ecf6
GOC
1#ifndef __ASM_X86_PROCESSOR_H
2#define __ASM_X86_PROCESSOR_H
3
053de044
GOC
4#include <asm/processor-flags.h>
5
683e0253
GOC
6/* Forward declaration, a strange C thing */
7struct task_struct;
8struct mm_struct;
9
c72dcf83 10#include <asm/page.h>
ca241c75 11#include <asm/percpu.h>
c72dcf83
GOC
12#include <asm/system.h>
13
0ccb8acc
GOC
14/*
15 * Default implementation of macro that returns current
16 * instruction pointer ("program counter").
17 */
18static inline void *current_text_addr(void)
19{
20 void *pc;
21 asm volatile("mov $1f,%0\n1:":"=r" (pc));
22 return pc;
23}
24
c758ecf6
GOC
25static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
26 unsigned int *ecx, unsigned int *edx)
27{
28 /* ecx is often an input as well as an output. */
29 __asm__("cpuid"
30 : "=a" (*eax),
31 "=b" (*ebx),
32 "=c" (*ecx),
33 "=d" (*edx)
34 : "0" (*eax), "2" (*ecx));
35}
36
c72dcf83
GOC
37static inline void load_cr3(pgd_t *pgdir)
38{
39 write_cr3(__pa(pgdir));
40}
c758ecf6 41
ca241c75
GOC
42#ifdef CONFIG_X86_32
43/* This is the TSS defined by the hardware. */
44struct x86_hw_tss {
45 unsigned short back_link, __blh;
46 unsigned long sp0;
47 unsigned short ss0, __ss0h;
48 unsigned long sp1;
49 unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
50 unsigned long sp2;
51 unsigned short ss2, __ss2h;
52 unsigned long __cr3;
53 unsigned long ip;
54 unsigned long flags;
55 unsigned long ax, cx, dx, bx;
56 unsigned long sp, bp, si, di;
57 unsigned short es, __esh;
58 unsigned short cs, __csh;
59 unsigned short ss, __ssh;
60 unsigned short ds, __dsh;
61 unsigned short fs, __fsh;
62 unsigned short gs, __gsh;
63 unsigned short ldt, __ldth;
64 unsigned short trace, io_bitmap_base;
65} __attribute__((packed));
66#else
67struct x86_hw_tss {
68 u32 reserved1;
69 u64 sp0;
70 u64 sp1;
71 u64 sp2;
72 u64 reserved2;
73 u64 ist[7];
74 u32 reserved3;
75 u32 reserved4;
76 u16 reserved5;
77 u16 io_bitmap_base;
78} __attribute__((packed)) ____cacheline_aligned;
79#endif
80
81/*
82 * Size of io_bitmap.
83 */
84#define IO_BITMAP_BITS 65536
85#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
86#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
87#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
88#define INVALID_IO_BITMAP_OFFSET 0x8000
89#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
90
91struct tss_struct {
92 struct x86_hw_tss x86_tss;
93
94 /*
95 * The extra 1 is there because the CPU will access an
96 * additional byte beyond the end of the IO permission
97 * bitmap. The extra byte must be all 1 bits, and must
98 * be within the limit.
99 */
100 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
101 /*
102 * Cache the current maximum and the last task that used the bitmap:
103 */
104 unsigned long io_bitmap_max;
105 struct thread_struct *io_bitmap_owner;
106 /*
107 * pads the TSS to be cacheline-aligned (size is 0x100)
108 */
109 unsigned long __cacheline_filler[35];
110 /*
111 * .. and then another 0x100 bytes for emergency kernel stack
112 */
113 unsigned long stack[64];
114} __attribute__((packed));
115
116DECLARE_PER_CPU(struct tss_struct, init_tss);
117
96a388de
TG
118#ifdef CONFIG_X86_32
119# include "processor_32.h"
120#else
121# include "processor_64.h"
122#endif
c758ecf6 123
683e0253
GOC
124extern void print_cpu_info(struct cpuinfo_x86 *);
125extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
126extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
127extern unsigned short num_cache_leaves;
128
cb38d377
GOC
129struct thread_struct {
130/* cached TLS descriptors. */
131 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
132 unsigned long sp0;
133 unsigned long sp;
134#ifdef CONFIG_X86_32
135 unsigned long sysenter_cs;
136#else
137 unsigned long usersp; /* Copy from PDA */
138 unsigned short es, ds, fsindex, gsindex;
139#endif
140 unsigned long ip;
141 unsigned long fs;
142 unsigned long gs;
143/* Hardware debugging registers */
144 unsigned long debugreg0;
145 unsigned long debugreg1;
146 unsigned long debugreg2;
147 unsigned long debugreg3;
148 unsigned long debugreg6;
149 unsigned long debugreg7;
150/* fault info */
151 unsigned long cr2, trap_no, error_code;
152/* floating point info */
153 union i387_union i387 __attribute__((aligned(16)));;
154#ifdef CONFIG_X86_32
155/* virtual 86 mode info */
156 struct vm86_struct __user *vm86_info;
157 unsigned long screen_bitmap;
158 unsigned long v86flags, v86mask, saved_sp0;
159 unsigned int saved_fs, saved_gs;
160#endif
161/* IO permissions */
162 unsigned long *io_bitmap_ptr;
163 unsigned long iopl;
164/* max allowed port in the bitmap, in bytes: */
165 unsigned io_bitmap_max;
166/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
167 unsigned long debugctlmsr;
168/* Debug Store - if not 0 points to a DS Save Area configuration;
169 * goes into MSR_IA32_DS_AREA */
170 unsigned long ds_area_msr;
171};
172
1b46cbe0
GOC
173static inline unsigned long native_get_debugreg(int regno)
174{
175 unsigned long val = 0; /* Damn you, gcc! */
176
177 switch (regno) {
178 case 0:
179 asm("mov %%db0, %0" :"=r" (val)); break;
180 case 1:
181 asm("mov %%db1, %0" :"=r" (val)); break;
182 case 2:
183 asm("mov %%db2, %0" :"=r" (val)); break;
184 case 3:
185 asm("mov %%db3, %0" :"=r" (val)); break;
186 case 6:
187 asm("mov %%db6, %0" :"=r" (val)); break;
188 case 7:
189 asm("mov %%db7, %0" :"=r" (val)); break;
190 default:
191 BUG();
192 }
193 return val;
194}
195
196static inline void native_set_debugreg(int regno, unsigned long value)
197{
198 switch (regno) {
199 case 0:
200 asm("mov %0,%%db0" : /* no output */ :"r" (value));
201 break;
202 case 1:
203 asm("mov %0,%%db1" : /* no output */ :"r" (value));
204 break;
205 case 2:
206 asm("mov %0,%%db2" : /* no output */ :"r" (value));
207 break;
208 case 3:
209 asm("mov %0,%%db3" : /* no output */ :"r" (value));
210 break;
211 case 6:
212 asm("mov %0,%%db6" : /* no output */ :"r" (value));
213 break;
214 case 7:
215 asm("mov %0,%%db7" : /* no output */ :"r" (value));
216 break;
217 default:
218 BUG();
219 }
220}
221
62d7d7ed
GOC
222/*
223 * Set IOPL bits in EFLAGS from given mask
224 */
225static inline void native_set_iopl_mask(unsigned mask)
226{
227#ifdef CONFIG_X86_32
228 unsigned int reg;
229 __asm__ __volatile__ ("pushfl;"
230 "popl %0;"
231 "andl %1, %0;"
232 "orl %2, %0;"
233 "pushl %0;"
234 "popfl"
235 : "=&r" (reg)
236 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
237#endif
238}
239
7818a1e0
GOC
240static inline void native_load_sp0(struct tss_struct *tss,
241 struct thread_struct *thread)
242{
243 tss->x86_tss.sp0 = thread->sp0;
244#ifdef CONFIG_X86_32
245 /* Only happens when SEP is enabled, no need to test "SEP"arately */
246 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
247 tss->x86_tss.ss1 = thread->sysenter_cs;
248 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
249 }
250#endif
251}
1b46cbe0 252
7818a1e0
GOC
253#ifdef CONFIG_PARAVIRT
254#include <asm/paravirt.h>
255#else
c758ecf6 256#define __cpuid native_cpuid
1b46cbe0
GOC
257#define paravirt_enabled() 0
258
259/*
260 * These special macros can be used to get or set a debugging register
261 */
262#define get_debugreg(var, register) \
263 (var) = native_get_debugreg(register)
264#define set_debugreg(value, register) \
265 native_set_debugreg(register, value)
266
7818a1e0
GOC
267static inline void load_sp0(struct tss_struct *tss,
268 struct thread_struct *thread)
269{
270 native_load_sp0(tss, thread);
271}
272
62d7d7ed 273#define set_iopl_mask native_set_iopl_mask
1b46cbe0
GOC
274#endif /* CONFIG_PARAVIRT */
275
276/*
277 * Save the cr4 feature set we're using (ie
278 * Pentium 4MB enable and PPro Global page
279 * enable), so that any CPU's that boot up
280 * after us can get the correct flags.
281 */
282extern unsigned long mmu_cr4_features;
283
284static inline void set_in_cr4(unsigned long mask)
285{
286 unsigned cr4;
287 mmu_cr4_features |= mask;
288 cr4 = read_cr4();
289 cr4 |= mask;
290 write_cr4(cr4);
291}
292
293static inline void clear_in_cr4(unsigned long mask)
294{
295 unsigned cr4;
296 mmu_cr4_features &= ~mask;
297 cr4 = read_cr4();
298 cr4 &= ~mask;
299 write_cr4(cr4);
300}
301
683e0253
GOC
302struct microcode_header {
303 unsigned int hdrver;
304 unsigned int rev;
305 unsigned int date;
306 unsigned int sig;
307 unsigned int cksum;
308 unsigned int ldrver;
309 unsigned int pf;
310 unsigned int datasize;
311 unsigned int totalsize;
312 unsigned int reserved[3];
313};
314
315struct microcode {
316 struct microcode_header hdr;
317 unsigned int bits[0];
318};
319
320typedef struct microcode microcode_t;
321typedef struct microcode_header microcode_header_t;
322
323/* microcode format is extended from prescott processors */
324struct extended_signature {
325 unsigned int sig;
326 unsigned int pf;
327 unsigned int cksum;
328};
329
330struct extended_sigtable {
331 unsigned int count;
332 unsigned int cksum;
333 unsigned int reserved[3];
334 struct extended_signature sigs[0];
335};
336
337/*
338 * create a kernel thread without removing it from tasklists
339 */
340extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
341
342/* Free all resources held by a thread. */
343extern void release_thread(struct task_struct *);
344
345/* Prepare to copy thread state - unlazy all lazy status */
346extern void prepare_to_copy(struct task_struct *tsk);
1b46cbe0 347
683e0253 348unsigned long get_wchan(struct task_struct *p);
c758ecf6
GOC
349
350/*
351 * Generic CPUID function
352 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
353 * resulting in stale register contents being returned.
354 */
355static inline void cpuid(unsigned int op,
356 unsigned int *eax, unsigned int *ebx,
357 unsigned int *ecx, unsigned int *edx)
358{
359 *eax = op;
360 *ecx = 0;
361 __cpuid(eax, ebx, ecx, edx);
362}
363
364/* Some CPUID calls want 'count' to be placed in ecx */
365static inline void cpuid_count(unsigned int op, int count,
366 unsigned int *eax, unsigned int *ebx,
367 unsigned int *ecx, unsigned int *edx)
368{
369 *eax = op;
370 *ecx = count;
371 __cpuid(eax, ebx, ecx, edx);
372}
373
374/*
375 * CPUID functions returning a single datum
376 */
377static inline unsigned int cpuid_eax(unsigned int op)
378{
379 unsigned int eax, ebx, ecx, edx;
380
381 cpuid(op, &eax, &ebx, &ecx, &edx);
382 return eax;
383}
384static inline unsigned int cpuid_ebx(unsigned int op)
385{
386 unsigned int eax, ebx, ecx, edx;
387
388 cpuid(op, &eax, &ebx, &ecx, &edx);
389 return ebx;
390}
391static inline unsigned int cpuid_ecx(unsigned int op)
392{
393 unsigned int eax, ebx, ecx, edx;
394
395 cpuid(op, &eax, &ebx, &ecx, &edx);
396 return ecx;
397}
398static inline unsigned int cpuid_edx(unsigned int op)
399{
400 unsigned int eax, ebx, ecx, edx;
401
402 cpuid(op, &eax, &ebx, &ecx, &edx);
403 return edx;
404}
405
683e0253
GOC
406/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
407static inline void rep_nop(void)
408{
409 __asm__ __volatile__("rep;nop": : :"memory");
410}
411
412/* Stop speculative execution */
413static inline void sync_core(void)
414{
415 int tmp;
416 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
417 : "ebx", "ecx", "edx", "memory");
418}
419
420#define cpu_relax() rep_nop()
421
422static inline void __monitor(const void *eax, unsigned long ecx,
423 unsigned long edx)
424{
425 /* "monitor %eax,%ecx,%edx;" */
426 asm volatile(
427 ".byte 0x0f,0x01,0xc8;"
428 : :"a" (eax), "c" (ecx), "d"(edx));
429}
430
431static inline void __mwait(unsigned long eax, unsigned long ecx)
432{
433 /* "mwait %eax,%ecx;" */
434 asm volatile(
435 ".byte 0x0f,0x01,0xc9;"
436 : :"a" (eax), "c" (ecx));
437}
438
439static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
440{
441 /* "mwait %eax,%ecx;" */
442 asm volatile(
443 "sti; .byte 0x0f,0x01,0xc9;"
444 : :"a" (eax), "c" (ecx));
445}
446
447extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
448
449extern int force_mwait;
450
451extern void select_idle_routine(const struct cpuinfo_x86 *c);
452
453extern unsigned long boot_option_idle_override;
454
455/* Boot loader type from the setup header */
456extern int bootloader_type;
457#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
458
459#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
460#define ARCH_HAS_PREFETCHW
461#define ARCH_HAS_SPINLOCK_PREFETCH
462
463#define spin_lock_prefetch(x) prefetchw(x)
464/* This decides where the kernel will search for a free chunk of vm
465 * space during mmap's.
466 */
467#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
468
469#define KSTK_EIP(task) (task_pt_regs(task)->ip)
470
c758ecf6 471#endif