]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/asm-x86/processor.h
x86: unify common parts of processor.h
[mirror_ubuntu-bionic-kernel.git] / include / asm-x86 / processor.h
CommitLineData
c758ecf6
GOC
1#ifndef __ASM_X86_PROCESSOR_H
2#define __ASM_X86_PROCESSOR_H
3
053de044
GOC
4#include <asm/processor-flags.h>
5
683e0253
GOC
6/* Forward declaration, a strange C thing */
7struct task_struct;
8struct mm_struct;
9
c72dcf83
GOC
10#include <asm/page.h>
11#include <asm/system.h>
12
c758ecf6
GOC
13static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
14 unsigned int *ecx, unsigned int *edx)
15{
16 /* ecx is often an input as well as an output. */
17 __asm__("cpuid"
18 : "=a" (*eax),
19 "=b" (*ebx),
20 "=c" (*ecx),
21 "=d" (*edx)
22 : "0" (*eax), "2" (*ecx));
23}
24
c72dcf83
GOC
25static inline void load_cr3(pgd_t *pgdir)
26{
27 write_cr3(__pa(pgdir));
28}
c758ecf6 29
96a388de
TG
30#ifdef CONFIG_X86_32
31# include "processor_32.h"
32#else
33# include "processor_64.h"
34#endif
c758ecf6 35
683e0253
GOC
36extern void print_cpu_info(struct cpuinfo_x86 *);
37extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
38extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
39extern unsigned short num_cache_leaves;
40
1b46cbe0
GOC
41static inline unsigned long native_get_debugreg(int regno)
42{
43 unsigned long val = 0; /* Damn you, gcc! */
44
45 switch (regno) {
46 case 0:
47 asm("mov %%db0, %0" :"=r" (val)); break;
48 case 1:
49 asm("mov %%db1, %0" :"=r" (val)); break;
50 case 2:
51 asm("mov %%db2, %0" :"=r" (val)); break;
52 case 3:
53 asm("mov %%db3, %0" :"=r" (val)); break;
54 case 6:
55 asm("mov %%db6, %0" :"=r" (val)); break;
56 case 7:
57 asm("mov %%db7, %0" :"=r" (val)); break;
58 default:
59 BUG();
60 }
61 return val;
62}
63
64static inline void native_set_debugreg(int regno, unsigned long value)
65{
66 switch (regno) {
67 case 0:
68 asm("mov %0,%%db0" : /* no output */ :"r" (value));
69 break;
70 case 1:
71 asm("mov %0,%%db1" : /* no output */ :"r" (value));
72 break;
73 case 2:
74 asm("mov %0,%%db2" : /* no output */ :"r" (value));
75 break;
76 case 3:
77 asm("mov %0,%%db3" : /* no output */ :"r" (value));
78 break;
79 case 6:
80 asm("mov %0,%%db6" : /* no output */ :"r" (value));
81 break;
82 case 7:
83 asm("mov %0,%%db7" : /* no output */ :"r" (value));
84 break;
85 default:
86 BUG();
87 }
88}
89
62d7d7ed
GOC
90/*
91 * Set IOPL bits in EFLAGS from given mask
92 */
93static inline void native_set_iopl_mask(unsigned mask)
94{
95#ifdef CONFIG_X86_32
96 unsigned int reg;
97 __asm__ __volatile__ ("pushfl;"
98 "popl %0;"
99 "andl %1, %0;"
100 "orl %2, %0;"
101 "pushl %0;"
102 "popfl"
103 : "=&r" (reg)
104 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
105#endif
106}
107
1b46cbe0 108
c758ecf6
GOC
109#ifndef CONFIG_PARAVIRT
110#define __cpuid native_cpuid
1b46cbe0
GOC
111#define paravirt_enabled() 0
112
113/*
114 * These special macros can be used to get or set a debugging register
115 */
116#define get_debugreg(var, register) \
117 (var) = native_get_debugreg(register)
118#define set_debugreg(value, register) \
119 native_set_debugreg(register, value)
120
62d7d7ed 121#define set_iopl_mask native_set_iopl_mask
1b46cbe0
GOC
122#endif /* CONFIG_PARAVIRT */
123
124/*
125 * Save the cr4 feature set we're using (ie
126 * Pentium 4MB enable and PPro Global page
127 * enable), so that any CPU's that boot up
128 * after us can get the correct flags.
129 */
130extern unsigned long mmu_cr4_features;
131
132static inline void set_in_cr4(unsigned long mask)
133{
134 unsigned cr4;
135 mmu_cr4_features |= mask;
136 cr4 = read_cr4();
137 cr4 |= mask;
138 write_cr4(cr4);
139}
140
141static inline void clear_in_cr4(unsigned long mask)
142{
143 unsigned cr4;
144 mmu_cr4_features &= ~mask;
145 cr4 = read_cr4();
146 cr4 &= ~mask;
147 write_cr4(cr4);
148}
149
683e0253
GOC
150struct microcode_header {
151 unsigned int hdrver;
152 unsigned int rev;
153 unsigned int date;
154 unsigned int sig;
155 unsigned int cksum;
156 unsigned int ldrver;
157 unsigned int pf;
158 unsigned int datasize;
159 unsigned int totalsize;
160 unsigned int reserved[3];
161};
162
163struct microcode {
164 struct microcode_header hdr;
165 unsigned int bits[0];
166};
167
168typedef struct microcode microcode_t;
169typedef struct microcode_header microcode_header_t;
170
171/* microcode format is extended from prescott processors */
172struct extended_signature {
173 unsigned int sig;
174 unsigned int pf;
175 unsigned int cksum;
176};
177
178struct extended_sigtable {
179 unsigned int count;
180 unsigned int cksum;
181 unsigned int reserved[3];
182 struct extended_signature sigs[0];
183};
184
185/*
186 * create a kernel thread without removing it from tasklists
187 */
188extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
189
190/* Free all resources held by a thread. */
191extern void release_thread(struct task_struct *);
192
193/* Prepare to copy thread state - unlazy all lazy status */
194extern void prepare_to_copy(struct task_struct *tsk);
1b46cbe0 195
683e0253 196unsigned long get_wchan(struct task_struct *p);
c758ecf6
GOC
197
198/*
199 * Generic CPUID function
200 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
201 * resulting in stale register contents being returned.
202 */
203static inline void cpuid(unsigned int op,
204 unsigned int *eax, unsigned int *ebx,
205 unsigned int *ecx, unsigned int *edx)
206{
207 *eax = op;
208 *ecx = 0;
209 __cpuid(eax, ebx, ecx, edx);
210}
211
212/* Some CPUID calls want 'count' to be placed in ecx */
213static inline void cpuid_count(unsigned int op, int count,
214 unsigned int *eax, unsigned int *ebx,
215 unsigned int *ecx, unsigned int *edx)
216{
217 *eax = op;
218 *ecx = count;
219 __cpuid(eax, ebx, ecx, edx);
220}
221
222/*
223 * CPUID functions returning a single datum
224 */
225static inline unsigned int cpuid_eax(unsigned int op)
226{
227 unsigned int eax, ebx, ecx, edx;
228
229 cpuid(op, &eax, &ebx, &ecx, &edx);
230 return eax;
231}
232static inline unsigned int cpuid_ebx(unsigned int op)
233{
234 unsigned int eax, ebx, ecx, edx;
235
236 cpuid(op, &eax, &ebx, &ecx, &edx);
237 return ebx;
238}
239static inline unsigned int cpuid_ecx(unsigned int op)
240{
241 unsigned int eax, ebx, ecx, edx;
242
243 cpuid(op, &eax, &ebx, &ecx, &edx);
244 return ecx;
245}
246static inline unsigned int cpuid_edx(unsigned int op)
247{
248 unsigned int eax, ebx, ecx, edx;
249
250 cpuid(op, &eax, &ebx, &ecx, &edx);
251 return edx;
252}
253
683e0253
GOC
254/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
255static inline void rep_nop(void)
256{
257 __asm__ __volatile__("rep;nop": : :"memory");
258}
259
260/* Stop speculative execution */
261static inline void sync_core(void)
262{
263 int tmp;
264 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
265 : "ebx", "ecx", "edx", "memory");
266}
267
268#define cpu_relax() rep_nop()
269
270static inline void __monitor(const void *eax, unsigned long ecx,
271 unsigned long edx)
272{
273 /* "monitor %eax,%ecx,%edx;" */
274 asm volatile(
275 ".byte 0x0f,0x01,0xc8;"
276 : :"a" (eax), "c" (ecx), "d"(edx));
277}
278
279static inline void __mwait(unsigned long eax, unsigned long ecx)
280{
281 /* "mwait %eax,%ecx;" */
282 asm volatile(
283 ".byte 0x0f,0x01,0xc9;"
284 : :"a" (eax), "c" (ecx));
285}
286
287static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
288{
289 /* "mwait %eax,%ecx;" */
290 asm volatile(
291 "sti; .byte 0x0f,0x01,0xc9;"
292 : :"a" (eax), "c" (ecx));
293}
294
295extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
296
297extern int force_mwait;
298
299extern void select_idle_routine(const struct cpuinfo_x86 *c);
300
301extern unsigned long boot_option_idle_override;
302
303/* Boot loader type from the setup header */
304extern int bootloader_type;
305#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
306
307#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
308#define ARCH_HAS_PREFETCHW
309#define ARCH_HAS_SPINLOCK_PREFETCH
310
311#define spin_lock_prefetch(x) prefetchw(x)
312/* This decides where the kernel will search for a free chunk of vm
313 * space during mmap's.
314 */
315#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
316
317#define KSTK_EIP(task) (task_pt_regs(task)->ip)
318
c758ecf6 319#endif