]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SYSTEM_H |
2 | #define __ASM_SYSTEM_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/kernel.h> |
5 | #include <asm/segment.h> | |
a436ed9c | 6 | #include <asm/cmpxchg.h> |
1da177e4 LT |
7 | |
8 | #ifdef __KERNEL__ | |
9 | ||
1da177e4 LT |
10 | #define __STR(x) #x |
11 | #define STR(x) __STR(x) | |
12 | ||
13 | #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" | |
14 | #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" | |
15 | ||
16 | /* frame pointer must be last for get_wchan */ | |
658fdbef AK |
17 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" |
18 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" | |
1da177e4 LT |
19 | |
20 | #define __EXTRA_CLOBBER \ | |
21 | ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" | |
22 | ||
658fdbef | 23 | /* Save restore flags to clear handle leaking NT */ |
1da177e4 LT |
24 | #define switch_to(prev,next,last) \ |
25 | asm volatile(SAVE_CONTEXT \ | |
26 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | |
27 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | |
28 | "call __switch_to\n\t" \ | |
29 | ".globl thread_return\n" \ | |
30 | "thread_return:\n\t" \ | |
31 | "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ | |
32 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | |
d167a518 | 33 | LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ |
1da177e4 LT |
34 | "movq %%rax,%%rdi\n\t" \ |
35 | "jc ret_from_fork\n\t" \ | |
36 | RESTORE_CONTEXT \ | |
37 | : "=a" (last) \ | |
38 | : [next] "S" (next), [prev] "D" (prev), \ | |
39 | [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ | |
40 | [ti_flags] "i" (offsetof(struct thread_info, flags)),\ | |
41 | [tif_fork] "i" (TIF_FORK), \ | |
42 | [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ | |
43 | [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ | |
44 | : "memory", "cc" __EXTRA_CLOBBER) | |
45 | ||
46 | extern void load_gs_index(unsigned); | |
47 | ||
48 | /* | |
49 | * Load a segment. Fall back on loading the zero | |
50 | * segment if something goes wrong.. | |
51 | */ | |
52 | #define loadsegment(seg,value) \ | |
53 | asm volatile("\n" \ | |
54 | "1:\t" \ | |
55 | "movl %k0,%%" #seg "\n" \ | |
56 | "2:\n" \ | |
57 | ".section .fixup,\"ax\"\n" \ | |
58 | "3:\t" \ | |
59 | "movl %1,%%" #seg "\n\t" \ | |
60 | "jmp 2b\n" \ | |
61 | ".previous\n" \ | |
62 | ".section __ex_table,\"a\"\n\t" \ | |
63 | ".align 8\n\t" \ | |
64 | ".quad 1b,3b\n" \ | |
65 | ".previous" \ | |
66 | : :"r" (value), "r" (0)) | |
67 | ||
1da177e4 LT |
68 | /* |
69 | * Clear and set 'TS' bit respectively | |
70 | */ | |
71 | #define clts() __asm__ __volatile__ ("clts") | |
72 | ||
73 | static inline unsigned long read_cr0(void) | |
74 | { | |
75 | unsigned long cr0; | |
76 | asm volatile("movq %%cr0,%0" : "=r" (cr0)); | |
77 | return cr0; | |
78 | } | |
79 | ||
80 | static inline void write_cr0(unsigned long val) | |
81 | { | |
82 | asm volatile("movq %0,%%cr0" :: "r" (val)); | |
83 | } | |
84 | ||
85 | static inline unsigned long read_cr3(void) | |
86 | { | |
87 | unsigned long cr3; | |
88 | asm("movq %%cr3,%0" : "=r" (cr3)); | |
89 | return cr3; | |
90 | } | |
91 | ||
fbc16f2c GOC |
92 | static inline void write_cr3(unsigned long val) |
93 | { | |
94 | asm volatile("movq %0,%%cr3" :: "r" (val) : "memory"); | |
95 | } | |
96 | ||
1da177e4 LT |
97 | static inline unsigned long read_cr4(void) |
98 | { | |
99 | unsigned long cr4; | |
100 | asm("movq %%cr4,%0" : "=r" (cr4)); | |
101 | return cr4; | |
102 | } | |
103 | ||
104 | static inline void write_cr4(unsigned long val) | |
105 | { | |
fbc16f2c | 106 | asm volatile("movq %0,%%cr4" :: "r" (val) : "memory"); |
1da177e4 LT |
107 | } |
108 | ||
109 | #define stts() write_cr0(8 | read_cr0()) | |
110 | ||
111 | #define wbinvd() \ | |
112 | __asm__ __volatile__ ("wbinvd": : :"memory"); | |
113 | ||
4dc7a0bb IM |
114 | /* |
115 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
116 | * it needs a way to flush as much of the CPU's caches as possible. | |
117 | */ | |
118 | static inline void sched_cacheflush(void) | |
119 | { | |
120 | wbinvd(); | |
121 | } | |
122 | ||
1da177e4 LT |
123 | #endif /* __KERNEL__ */ |
124 | ||
125 | #define nop() __asm__ __volatile__ ("nop") | |
126 | ||
1da177e4 LT |
127 | #ifdef CONFIG_SMP |
128 | #define smp_mb() mb() | |
129 | #define smp_rmb() rmb() | |
130 | #define smp_wmb() wmb() | |
131 | #define smp_read_barrier_depends() do {} while(0) | |
132 | #else | |
133 | #define smp_mb() barrier() | |
134 | #define smp_rmb() barrier() | |
135 | #define smp_wmb() barrier() | |
136 | #define smp_read_barrier_depends() do {} while(0) | |
137 | #endif | |
138 | ||
139 | ||
140 | /* | |
141 | * Force strict CPU ordering. | |
142 | * And yes, this is required on UP too when we're talking | |
143 | * to devices. | |
144 | */ | |
145 | #define mb() asm volatile("mfence":::"memory") | |
146 | #define rmb() asm volatile("lfence":::"memory") | |
147 | ||
148 | #ifdef CONFIG_UNORDERED_IO | |
149 | #define wmb() asm volatile("sfence" ::: "memory") | |
150 | #else | |
151 | #define wmb() asm volatile("" ::: "memory") | |
152 | #endif | |
153 | #define read_barrier_depends() do {} while(0) | |
911b0ad2 | 154 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) |
1da177e4 LT |
155 | |
156 | #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) | |
157 | ||
2601e64d | 158 | #include <linux/irqflags.h> |
2ddb55f0 | 159 | |
1da177e4 LT |
160 | void cpu_idle_wait(void); |
161 | ||
1da177e4 | 162 | extern unsigned long arch_align_stack(unsigned long sp); |
d167a518 | 163 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
1da177e4 LT |
164 | |
165 | #endif |