]>
Commit | Line | Data |
---|---|---|
2052e8d4 CL |
1 | /* thread_info.h: low-level thread information |
2 | * | |
3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | |
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | |
5 | */ | |
6 | ||
61c4628b | 7 | #ifndef _ASM_X86_THREAD_INFO_H |
2052e8d4 CL |
8 | #define _ASM_X86_THREAD_INFO_H |
9 | ||
2052e8d4 CL |
10 | #include <linux/compiler.h> |
11 | #include <asm/page.h> | |
12a638e1 CL |
12 | #include <asm/types.h> |
13 | ||
2052e8d4 CL |
14 | /* |
15 | * low level task data that entry.S needs immediate access to | |
16 | * - this struct should fit entirely inside of one cache line | |
17 | * - this struct shares the supervisor stack pages | |
2052e8d4 CL |
18 | */ |
19 | #ifndef __ASSEMBLY__ | |
006c484b CL |
20 | struct task_struct; |
21 | struct exec_domain; | |
22 | #include <asm/processor.h> | |
2052e8d4 CL |
23 | |
24 | struct thread_info { | |
25 | struct task_struct *task; /* main task structure */ | |
26 | struct exec_domain *exec_domain; /* execution domain */ | |
006c484b CL |
27 | __u32 flags; /* low level flags */ |
28 | __u32 status; /* thread synchronous flags */ | |
2052e8d4 | 29 | __u32 cpu; /* current CPU */ |
006c484b | 30 | int preempt_count; /* 0 => preemptable, |
2052e8d4 | 31 | <0 => BUG */ |
006c484b | 32 | mm_segment_t addr_limit; |
2052e8d4 | 33 | struct restart_block restart_block; |
006c484b CL |
34 | void __user *sysenter_return; |
35 | #ifdef CONFIG_X86_32 | |
2052e8d4 CL |
36 | unsigned long previous_esp; /* ESP of the previous stack in |
37 | case of nested (IRQ) stacks | |
38 | */ | |
39 | __u8 supervisor_stack[0]; | |
006c484b | 40 | #endif |
2052e8d4 | 41 | }; |
3351cc03 CL |
42 | |
43 | #define INIT_THREAD_INFO(tsk) \ | |
44 | { \ | |
45 | .task = &tsk, \ | |
46 | .exec_domain = &default_exec_domain, \ | |
47 | .flags = 0, \ | |
48 | .cpu = 0, \ | |
49 | .preempt_count = 1, \ | |
50 | .addr_limit = KERNEL_DS, \ | |
51 | .restart_block = { \ | |
52 | .fn = do_no_restart_syscall, \ | |
53 | }, \ | |
54 | } | |
55 | ||
56 | #define init_thread_info (init_thread_union.thread_info) | |
57 | #define init_stack (init_thread_union.stack) | |
58 | ||
2052e8d4 CL |
59 | #else /* !__ASSEMBLY__ */ |
60 | ||
61 | #include <asm/asm-offsets.h> | |
62 | ||
63 | #endif | |
64 | ||
e57549b0 CL |
65 | /* |
66 | * thread information flags | |
67 | * - these are process state flags that various assembly files | |
68 | * may need to access | |
69 | * - pending work-to-be-done flags are in LSW | |
70 | * - other flags in MSW | |
71 | * Warning: layout of LSW is hardcoded in entry.S | |
72 | */ | |
73 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | |
74 | #define TIF_SIGPENDING 2 /* signal pending */ | |
75 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | |
76 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | |
77 | #define TIF_IRET 5 /* force IRET */ | |
78 | #ifdef CONFIG_X86_32 | |
79 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ | |
80 | #endif | |
81 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | |
82 | #define TIF_SECCOMP 8 /* secure computing */ | |
83 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ | |
84 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | |
85 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ | |
86 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | |
87 | #define TIF_IA32 17 /* 32bit process */ | |
88 | #define TIF_FORK 18 /* ret_from_fork */ | |
89 | #define TIF_ABI_PENDING 19 | |
90 | #define TIF_MEMDIE 20 | |
91 | #define TIF_DEBUG 21 /* uses debug registers */ | |
92 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | |
93 | #define TIF_FREEZE 23 /* is freezing for suspend */ | |
94 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ | |
95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | |
96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | |
97 | #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ | |
98 | ||
99 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | |
100 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | |
101 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | |
102 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | |
103 | #define _TIF_IRET (1 << TIF_IRET) | |
104 | #ifdef CONFIG_X86_32 | |
105 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) | |
106 | #else | |
107 | #define _TIF_SYSCALL_EMU 0 | |
108 | #endif | |
109 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | |
110 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | |
111 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | |
112 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) | |
113 | #define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) | |
114 | #define _TIF_NOTSC (1 << TIF_NOTSC) | |
115 | #define _TIF_IA32 (1 << TIF_IA32) | |
116 | #define _TIF_FORK (1 << TIF_FORK) | |
117 | #define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING) | |
118 | #define _TIF_DEBUG (1 << TIF_DEBUG) | |
119 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | |
120 | #define _TIF_FREEZE (1 << TIF_FREEZE) | |
121 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) | |
122 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | |
123 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | |
124 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) | |
125 | ||
00c1bb13 CL |
126 | /* work to do on interrupt/exception return */ |
127 | #define _TIF_WORK_MASK \ | |
128 | (0x0000FFFF & \ | |
129 | ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP| \ | |
130 | _TIF_SECCOMP|_TIF_SYSCALL_EMU)) | |
131 | ||
132 | /* work to do on any return to user space */ | |
133 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | |
134 | ||
b84200b3 | 135 | /* Only used for 64 bit */ |
00c1bb13 CL |
136 | #define _TIF_DO_NOTIFY_MASK \ |
137 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) | |
138 | ||
139 | /* flags to check in __switch_to() */ | |
140 | #define _TIF_WORK_CTXSW \ | |
141 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \ | |
142 | _TIF_NOTSC) | |
143 | ||
144 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | |
145 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | |
146 | ||
24e2de6e CL |
147 | #define PREEMPT_ACTIVE 0x10000000 |
148 | ||
b84200b3 CL |
149 | /* thread information allocation */ |
150 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
151 | #define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) | |
96a388de | 152 | #else |
b84200b3 | 153 | #define THREAD_FLAGS GFP_KERNEL |
96a388de | 154 | #endif |
61c4628b | 155 | |
b84200b3 CL |
156 | #define alloc_thread_info(tsk) \ |
157 | ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) | |
158 | ||
159 | #ifdef CONFIG_X86_32 | |
160 | ||
161 | #define STACK_WARN (THREAD_SIZE/8) | |
2052e8d4 CL |
162 | /* |
163 | * macros/functions for gaining access to the thread information structure | |
164 | * | |
165 | * preempt_count needs to be 1 initially, until the scheduler is functional. | |
166 | */ | |
167 | #ifndef __ASSEMBLY__ | |
168 | ||
2052e8d4 CL |
169 | |
170 | /* how to get the current stack pointer from C */ | |
171 | register unsigned long current_stack_pointer asm("esp") __used; | |
172 | ||
173 | /* how to get the thread information struct from C */ | |
174 | static inline struct thread_info *current_thread_info(void) | |
175 | { | |
176 | return (struct thread_info *) | |
177 | (current_stack_pointer & ~(THREAD_SIZE - 1)); | |
178 | } | |
179 | ||
2052e8d4 CL |
180 | #else /* !__ASSEMBLY__ */ |
181 | ||
182 | /* how to get the thread information struct from ASM */ | |
183 | #define GET_THREAD_INFO(reg) \ | |
184 | movl $-THREAD_SIZE, reg; \ | |
185 | andl %esp, reg | |
186 | ||
187 | /* use this one if reg already contains %esp */ | |
188 | #define GET_THREAD_INFO_WITH_ESP(reg) \ | |
189 | andl $-THREAD_SIZE, reg | |
190 | ||
191 | #endif | |
192 | ||
2052e8d4 CL |
193 | #else /* X86_32 */ |
194 | ||
2052e8d4 CL |
195 | #include <asm/pda.h> |
196 | ||
2052e8d4 CL |
197 | /* |
198 | * macros/functions for gaining access to the thread information structure | |
199 | * preempt_count needs to be 1 initially, until the scheduler is functional. | |
200 | */ | |
201 | #ifndef __ASSEMBLY__ | |
2052e8d4 CL |
202 | static inline struct thread_info *current_thread_info(void) |
203 | { | |
204 | struct thread_info *ti; | |
205 | ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); | |
206 | return ti; | |
207 | } | |
208 | ||
209 | /* do not use in interrupt context */ | |
210 | static inline struct thread_info *stack_thread_info(void) | |
211 | { | |
212 | struct thread_info *ti; | |
213 | asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); | |
214 | return ti; | |
215 | } | |
216 | ||
2052e8d4 CL |
217 | #else /* !__ASSEMBLY__ */ |
218 | ||
219 | /* how to get the thread information struct from ASM */ | |
220 | #define GET_THREAD_INFO(reg) \ | |
221 | movq %gs:pda_kernelstack,reg ; \ | |
222 | subq $(THREAD_SIZE-PDA_STACKOFFSET),reg | |
223 | ||
224 | #endif | |
225 | ||
f2ea3b1d CL |
226 | #endif /* !X86_32 */ |
227 | ||
2052e8d4 CL |
228 | /* |
229 | * Thread-synchronous status. | |
230 | * | |
231 | * This is different from the flags in that nobody else | |
232 | * ever touches our thread-synchronous status, so we don't | |
233 | * have to worry about atomic accesses. | |
234 | */ | |
235 | #define TS_USEDFPU 0x0001 /* FPU was used by this task | |
236 | this quantum (SMP) */ | |
f2ea3b1d | 237 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ |
2052e8d4 CL |
238 | #define TS_POLLING 0x0004 /* true if in idle loop |
239 | and not sleeping */ | |
240 | ||
241 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | |
242 | ||
61c4628b SS |
243 | #ifndef __ASSEMBLY__ |
244 | extern void arch_task_cache_init(void); | |
245 | extern void free_thread_info(struct thread_info *ti); | |
246 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); | |
2adee9b3 | 247 | #define arch_task_cache_init arch_task_cache_init |
61c4628b SS |
248 | #endif |
249 | #endif /* _ASM_X86_THREAD_INFO_H */ |