]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/asm-x86/processor.h
x86: unify tss_struct
[mirror_ubuntu-artful-kernel.git] / include / asm-x86 / processor.h
CommitLineData
c758ecf6
GOC
1#ifndef __ASM_X86_PROCESSOR_H
2#define __ASM_X86_PROCESSOR_H
3
053de044
GOC
4#include <asm/processor-flags.h>
5
683e0253
GOC
6/* Forward declaration, a strange C thing */
7struct task_struct;
8struct mm_struct;
9
c72dcf83 10#include <asm/page.h>
ca241c75 11#include <asm/percpu.h>
c72dcf83
GOC
12#include <asm/system.h>
13
0ccb8acc
GOC
14/*
15 * Default implementation of macro that returns current
16 * instruction pointer ("program counter").
17 */
18static inline void *current_text_addr(void)
19{
20 void *pc;
21 asm volatile("mov $1f,%0\n1:":"=r" (pc));
22 return pc;
23}
24
c758ecf6
GOC
25static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
26 unsigned int *ecx, unsigned int *edx)
27{
28 /* ecx is often an input as well as an output. */
29 __asm__("cpuid"
30 : "=a" (*eax),
31 "=b" (*ebx),
32 "=c" (*ecx),
33 "=d" (*edx)
34 : "0" (*eax), "2" (*ecx));
35}
36
c72dcf83
GOC
37static inline void load_cr3(pgd_t *pgdir)
38{
39 write_cr3(__pa(pgdir));
40}
c758ecf6 41
ca241c75
GOC
42#ifdef CONFIG_X86_32
43/* This is the TSS defined by the hardware. */
44struct x86_hw_tss {
45 unsigned short back_link, __blh;
46 unsigned long sp0;
47 unsigned short ss0, __ss0h;
48 unsigned long sp1;
49 unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
50 unsigned long sp2;
51 unsigned short ss2, __ss2h;
52 unsigned long __cr3;
53 unsigned long ip;
54 unsigned long flags;
55 unsigned long ax, cx, dx, bx;
56 unsigned long sp, bp, si, di;
57 unsigned short es, __esh;
58 unsigned short cs, __csh;
59 unsigned short ss, __ssh;
60 unsigned short ds, __dsh;
61 unsigned short fs, __fsh;
62 unsigned short gs, __gsh;
63 unsigned short ldt, __ldth;
64 unsigned short trace, io_bitmap_base;
65} __attribute__((packed));
66#else
67struct x86_hw_tss {
68 u32 reserved1;
69 u64 sp0;
70 u64 sp1;
71 u64 sp2;
72 u64 reserved2;
73 u64 ist[7];
74 u32 reserved3;
75 u32 reserved4;
76 u16 reserved5;
77 u16 io_bitmap_base;
78} __attribute__((packed)) ____cacheline_aligned;
79#endif
80
81/*
82 * Size of io_bitmap.
83 */
84#define IO_BITMAP_BITS 65536
85#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
86#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
87#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
88#define INVALID_IO_BITMAP_OFFSET 0x8000
89#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
90
91struct tss_struct {
92 struct x86_hw_tss x86_tss;
93
94 /*
95 * The extra 1 is there because the CPU will access an
96 * additional byte beyond the end of the IO permission
97 * bitmap. The extra byte must be all 1 bits, and must
98 * be within the limit.
99 */
100 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
101 /*
102 * Cache the current maximum and the last task that used the bitmap:
103 */
104 unsigned long io_bitmap_max;
105 struct thread_struct *io_bitmap_owner;
106 /*
107 * pads the TSS to be cacheline-aligned (size is 0x100)
108 */
109 unsigned long __cacheline_filler[35];
110 /*
111 * .. and then another 0x100 bytes for emergency kernel stack
112 */
113 unsigned long stack[64];
114} __attribute__((packed));
115
116DECLARE_PER_CPU(struct tss_struct, init_tss);
117
96a388de
TG
118#ifdef CONFIG_X86_32
119# include "processor_32.h"
120#else
121# include "processor_64.h"
122#endif
c758ecf6 123
683e0253
GOC
124extern void print_cpu_info(struct cpuinfo_x86 *);
125extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
126extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
127extern unsigned short num_cache_leaves;
128
1b46cbe0
GOC
129static inline unsigned long native_get_debugreg(int regno)
130{
131 unsigned long val = 0; /* Damn you, gcc! */
132
133 switch (regno) {
134 case 0:
135 asm("mov %%db0, %0" :"=r" (val)); break;
136 case 1:
137 asm("mov %%db1, %0" :"=r" (val)); break;
138 case 2:
139 asm("mov %%db2, %0" :"=r" (val)); break;
140 case 3:
141 asm("mov %%db3, %0" :"=r" (val)); break;
142 case 6:
143 asm("mov %%db6, %0" :"=r" (val)); break;
144 case 7:
145 asm("mov %%db7, %0" :"=r" (val)); break;
146 default:
147 BUG();
148 }
149 return val;
150}
151
152static inline void native_set_debugreg(int regno, unsigned long value)
153{
154 switch (regno) {
155 case 0:
156 asm("mov %0,%%db0" : /* no output */ :"r" (value));
157 break;
158 case 1:
159 asm("mov %0,%%db1" : /* no output */ :"r" (value));
160 break;
161 case 2:
162 asm("mov %0,%%db2" : /* no output */ :"r" (value));
163 break;
164 case 3:
165 asm("mov %0,%%db3" : /* no output */ :"r" (value));
166 break;
167 case 6:
168 asm("mov %0,%%db6" : /* no output */ :"r" (value));
169 break;
170 case 7:
171 asm("mov %0,%%db7" : /* no output */ :"r" (value));
172 break;
173 default:
174 BUG();
175 }
176}
177
62d7d7ed
GOC
178/*
179 * Set IOPL bits in EFLAGS from given mask
180 */
181static inline void native_set_iopl_mask(unsigned mask)
182{
183#ifdef CONFIG_X86_32
184 unsigned int reg;
185 __asm__ __volatile__ ("pushfl;"
186 "popl %0;"
187 "andl %1, %0;"
188 "orl %2, %0;"
189 "pushl %0;"
190 "popfl"
191 : "=&r" (reg)
192 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
193#endif
194}
195
1b46cbe0 196
c758ecf6
GOC
197#ifndef CONFIG_PARAVIRT
198#define __cpuid native_cpuid
1b46cbe0
GOC
199#define paravirt_enabled() 0
200
201/*
202 * These special macros can be used to get or set a debugging register
203 */
204#define get_debugreg(var, register) \
205 (var) = native_get_debugreg(register)
206#define set_debugreg(value, register) \
207 native_set_debugreg(register, value)
208
62d7d7ed 209#define set_iopl_mask native_set_iopl_mask
1b46cbe0
GOC
210#endif /* CONFIG_PARAVIRT */
211
212/*
213 * Save the cr4 feature set we're using (ie
214 * Pentium 4MB enable and PPro Global page
215 * enable), so that any CPU's that boot up
216 * after us can get the correct flags.
217 */
218extern unsigned long mmu_cr4_features;
219
220static inline void set_in_cr4(unsigned long mask)
221{
222 unsigned cr4;
223 mmu_cr4_features |= mask;
224 cr4 = read_cr4();
225 cr4 |= mask;
226 write_cr4(cr4);
227}
228
229static inline void clear_in_cr4(unsigned long mask)
230{
231 unsigned cr4;
232 mmu_cr4_features &= ~mask;
233 cr4 = read_cr4();
234 cr4 &= ~mask;
235 write_cr4(cr4);
236}
237
683e0253
GOC
238struct microcode_header {
239 unsigned int hdrver;
240 unsigned int rev;
241 unsigned int date;
242 unsigned int sig;
243 unsigned int cksum;
244 unsigned int ldrver;
245 unsigned int pf;
246 unsigned int datasize;
247 unsigned int totalsize;
248 unsigned int reserved[3];
249};
250
251struct microcode {
252 struct microcode_header hdr;
253 unsigned int bits[0];
254};
255
256typedef struct microcode microcode_t;
257typedef struct microcode_header microcode_header_t;
258
259/* microcode format is extended from prescott processors */
260struct extended_signature {
261 unsigned int sig;
262 unsigned int pf;
263 unsigned int cksum;
264};
265
266struct extended_sigtable {
267 unsigned int count;
268 unsigned int cksum;
269 unsigned int reserved[3];
270 struct extended_signature sigs[0];
271};
272
273/*
274 * create a kernel thread without removing it from tasklists
275 */
276extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
277
278/* Free all resources held by a thread. */
279extern void release_thread(struct task_struct *);
280
281/* Prepare to copy thread state - unlazy all lazy status */
282extern void prepare_to_copy(struct task_struct *tsk);
1b46cbe0 283
683e0253 284unsigned long get_wchan(struct task_struct *p);
c758ecf6
GOC
285
286/*
287 * Generic CPUID function
288 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
289 * resulting in stale register contents being returned.
290 */
291static inline void cpuid(unsigned int op,
292 unsigned int *eax, unsigned int *ebx,
293 unsigned int *ecx, unsigned int *edx)
294{
295 *eax = op;
296 *ecx = 0;
297 __cpuid(eax, ebx, ecx, edx);
298}
299
300/* Some CPUID calls want 'count' to be placed in ecx */
301static inline void cpuid_count(unsigned int op, int count,
302 unsigned int *eax, unsigned int *ebx,
303 unsigned int *ecx, unsigned int *edx)
304{
305 *eax = op;
306 *ecx = count;
307 __cpuid(eax, ebx, ecx, edx);
308}
309
310/*
311 * CPUID functions returning a single datum
312 */
313static inline unsigned int cpuid_eax(unsigned int op)
314{
315 unsigned int eax, ebx, ecx, edx;
316
317 cpuid(op, &eax, &ebx, &ecx, &edx);
318 return eax;
319}
320static inline unsigned int cpuid_ebx(unsigned int op)
321{
322 unsigned int eax, ebx, ecx, edx;
323
324 cpuid(op, &eax, &ebx, &ecx, &edx);
325 return ebx;
326}
327static inline unsigned int cpuid_ecx(unsigned int op)
328{
329 unsigned int eax, ebx, ecx, edx;
330
331 cpuid(op, &eax, &ebx, &ecx, &edx);
332 return ecx;
333}
334static inline unsigned int cpuid_edx(unsigned int op)
335{
336 unsigned int eax, ebx, ecx, edx;
337
338 cpuid(op, &eax, &ebx, &ecx, &edx);
339 return edx;
340}
341
683e0253
GOC
342/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
343static inline void rep_nop(void)
344{
345 __asm__ __volatile__("rep;nop": : :"memory");
346}
347
348/* Stop speculative execution */
349static inline void sync_core(void)
350{
351 int tmp;
352 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
353 : "ebx", "ecx", "edx", "memory");
354}
355
356#define cpu_relax() rep_nop()
357
358static inline void __monitor(const void *eax, unsigned long ecx,
359 unsigned long edx)
360{
361 /* "monitor %eax,%ecx,%edx;" */
362 asm volatile(
363 ".byte 0x0f,0x01,0xc8;"
364 : :"a" (eax), "c" (ecx), "d"(edx));
365}
366
367static inline void __mwait(unsigned long eax, unsigned long ecx)
368{
369 /* "mwait %eax,%ecx;" */
370 asm volatile(
371 ".byte 0x0f,0x01,0xc9;"
372 : :"a" (eax), "c" (ecx));
373}
374
375static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
376{
377 /* "mwait %eax,%ecx;" */
378 asm volatile(
379 "sti; .byte 0x0f,0x01,0xc9;"
380 : :"a" (eax), "c" (ecx));
381}
382
383extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
384
385extern int force_mwait;
386
387extern void select_idle_routine(const struct cpuinfo_x86 *c);
388
389extern unsigned long boot_option_idle_override;
390
391/* Boot loader type from the setup header */
392extern int bootloader_type;
393#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
394
395#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
396#define ARCH_HAS_PREFETCHW
397#define ARCH_HAS_SPINLOCK_PREFETCH
398
399#define spin_lock_prefetch(x) prefetchw(x)
400/* This decides where the kernel will search for a free chunk of vm
401 * space during mmap's.
402 */
403#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
404
405#define KSTK_EIP(task) (task_pt_regs(task)->ip)
406
c758ecf6 407#endif