]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1995 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
6 | */ | |
7 | ||
8 | /* | |
9 | * This file handles the architecture-dependent parts of process handling.. | |
10 | */ | |
11 | ||
12 | #include <stdarg.h> | |
13 | ||
f3705136 | 14 | #include <linux/cpu.h> |
1da177e4 LT |
15 | #include <linux/errno.h> |
16 | #include <linux/sched.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/elfcore.h> | |
21 | #include <linux/smp.h> | |
1da177e4 LT |
22 | #include <linux/stddef.h> |
23 | #include <linux/slab.h> | |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/user.h> | |
1da177e4 | 26 | #include <linux/interrupt.h> |
1da177e4 LT |
27 | #include <linux/utsname.h> |
28 | #include <linux/delay.h> | |
29 | #include <linux/reboot.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/mc146818rtc.h> | |
32 | #include <linux/module.h> | |
33 | #include <linux/kallsyms.h> | |
34 | #include <linux/ptrace.h> | |
35 | #include <linux/random.h> | |
c16b63e0 | 36 | #include <linux/personality.h> |
74167347 | 37 | #include <linux/tick.h> |
7c3576d2 | 38 | #include <linux/percpu.h> |
529e25f6 | 39 | #include <linux/prctl.h> |
1da177e4 LT |
40 | |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/pgtable.h> | |
43 | #include <asm/system.h> | |
44 | #include <asm/io.h> | |
45 | #include <asm/ldt.h> | |
46 | #include <asm/processor.h> | |
47 | #include <asm/i387.h> | |
1da177e4 LT |
48 | #include <asm/desc.h> |
49 | #ifdef CONFIG_MATH_EMULATION | |
50 | #include <asm/math_emu.h> | |
51 | #endif | |
52 | ||
1da177e4 LT |
53 | #include <linux/err.h> |
54 | ||
f3705136 ZM |
55 | #include <asm/tlbflush.h> |
56 | #include <asm/cpu.h> | |
718fc13b | 57 | #include <asm/kdebug.h> |
f3705136 | 58 | |
1da177e4 LT |
59 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
60 | ||
61 | static int hlt_counter; | |
62 | ||
63 | unsigned long boot_option_idle_override = 0; | |
64 | EXPORT_SYMBOL(boot_option_idle_override); | |
65 | ||
7c3576d2 JF |
66 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
67 | EXPORT_PER_CPU_SYMBOL(current_task); | |
68 | ||
69 | DEFINE_PER_CPU(int, cpu_number); | |
70 | EXPORT_PER_CPU_SYMBOL(cpu_number); | |
71 | ||
1da177e4 LT |
72 | /* |
73 | * Return saved PC of a blocked thread. | |
74 | */ | |
75 | unsigned long thread_saved_pc(struct task_struct *tsk) | |
76 | { | |
faca6227 | 77 | return ((unsigned long *)tsk->thread.sp)[3]; |
1da177e4 LT |
78 | } |
79 | ||
80 | /* | |
81 | * Powermanagement idle function, if any.. | |
82 | */ | |
83 | void (*pm_idle)(void); | |
129f6946 | 84 | EXPORT_SYMBOL(pm_idle); |
1da177e4 LT |
85 | |
86 | void disable_hlt(void) | |
87 | { | |
88 | hlt_counter++; | |
89 | } | |
90 | ||
91 | EXPORT_SYMBOL(disable_hlt); | |
92 | ||
93 | void enable_hlt(void) | |
94 | { | |
95 | hlt_counter--; | |
96 | } | |
97 | ||
98 | EXPORT_SYMBOL(enable_hlt); | |
99 | ||
100 | /* | |
101 | * We use this if we don't have any better | |
102 | * idle routine.. | |
103 | */ | |
104 | void default_idle(void) | |
105 | { | |
106 | if (!hlt_counter && boot_cpu_data.hlt_works_ok) { | |
495ab9c0 | 107 | current_thread_info()->status &= ~TS_POLLING; |
0888f06a IM |
108 | /* |
109 | * TS_POLLING-cleared state must be visible before we | |
110 | * test NEED_RESCHED: | |
111 | */ | |
112 | smp_mb(); | |
113 | ||
7f424a8b | 114 | if (!need_resched()) |
72690a21 | 115 | safe_halt(); /* enables interrupts racelessly */ |
7f424a8b PZ |
116 | else |
117 | local_irq_enable(); | |
495ab9c0 | 118 | current_thread_info()->status |= TS_POLLING; |
1da177e4 | 119 | } else { |
3b22ec7b | 120 | local_irq_enable(); |
72690a21 AK |
121 | /* loop is done by the caller */ |
122 | cpu_relax(); | |
1da177e4 LT |
123 | } |
124 | } | |
129f6946 AD |
125 | #ifdef CONFIG_APM_MODULE |
126 | EXPORT_SYMBOL(default_idle); | |
127 | #endif | |
1da177e4 | 128 | |
f3705136 ZM |
129 | #ifdef CONFIG_HOTPLUG_CPU |
130 | #include <asm/nmi.h> | |
131 | /* We don't actually take CPU down, just spin without interrupts. */ | |
132 | static inline void play_dead(void) | |
133 | { | |
e1367daf LS |
134 | /* This must be done before dead CPU ack */ |
135 | cpu_exit_clear(); | |
136 | wbinvd(); | |
137 | mb(); | |
f3705136 ZM |
138 | /* Ack it */ |
139 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
140 | ||
e1367daf LS |
141 | /* |
142 | * With physical CPU hotplug, we should halt the cpu | |
143 | */ | |
f3705136 | 144 | local_irq_disable(); |
e1367daf | 145 | while (1) |
f2ab4461 | 146 | halt(); |
f3705136 ZM |
147 | } |
148 | #else | |
149 | static inline void play_dead(void) | |
150 | { | |
151 | BUG(); | |
152 | } | |
153 | #endif /* CONFIG_HOTPLUG_CPU */ | |
154 | ||
1da177e4 LT |
155 | /* |
156 | * The idle thread. There's no useful work to be | |
157 | * done, so just try to conserve power and have a | |
158 | * low exit latency (ie sit in a loop waiting for | |
159 | * somebody to say that they'd like to reschedule) | |
160 | */ | |
f3705136 | 161 | void cpu_idle(void) |
1da177e4 | 162 | { |
5bfb5d69 | 163 | int cpu = smp_processor_id(); |
f3705136 | 164 | |
495ab9c0 | 165 | current_thread_info()->status |= TS_POLLING; |
64c7c8f8 | 166 | |
1da177e4 LT |
167 | /* endless idle loop with no priority at all */ |
168 | while (1) { | |
74167347 | 169 | tick_nohz_stop_sched_tick(); |
1da177e4 LT |
170 | while (!need_resched()) { |
171 | void (*idle)(void); | |
172 | ||
f1d1a842 | 173 | check_pgt_cache(); |
1da177e4 LT |
174 | rmb(); |
175 | idle = pm_idle; | |
176 | ||
0723a69a BL |
177 | if (rcu_pending(cpu)) |
178 | rcu_check_callbacks(cpu, 0); | |
179 | ||
1da177e4 LT |
180 | if (!idle) |
181 | idle = default_idle; | |
182 | ||
f3705136 ZM |
183 | if (cpu_is_offline(cpu)) |
184 | play_dead(); | |
185 | ||
7f424a8b | 186 | local_irq_disable(); |
1da177e4 LT |
187 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
188 | idle(); | |
189 | } | |
74167347 | 190 | tick_nohz_restart_sched_tick(); |
5bfb5d69 | 191 | preempt_enable_no_resched(); |
1da177e4 | 192 | schedule(); |
5bfb5d69 | 193 | preempt_disable(); |
1da177e4 LT |
194 | } |
195 | } | |
196 | ||
9d975ebd | 197 | void __show_registers(struct pt_regs *regs, int all) |
1da177e4 LT |
198 | { |
199 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | |
bb1995d5 | 200 | unsigned long d0, d1, d2, d3, d6, d7; |
65ea5b03 | 201 | unsigned long sp; |
9d975ebd PE |
202 | unsigned short ss, gs; |
203 | ||
204 | if (user_mode_vm(regs)) { | |
65ea5b03 PA |
205 | sp = regs->sp; |
206 | ss = regs->ss & 0xffff; | |
9d975ebd PE |
207 | savesegment(gs, gs); |
208 | } else { | |
65ea5b03 | 209 | sp = (unsigned long) (®s->sp); |
9d975ebd PE |
210 | savesegment(ss, ss); |
211 | savesegment(gs, gs); | |
212 | } | |
1da177e4 LT |
213 | |
214 | printk("\n"); | |
60812a4a LT |
215 | printk("Pid: %d, comm: %s %s (%s %.*s)\n", |
216 | task_pid_nr(current), current->comm, | |
9d975ebd PE |
217 | print_tainted(), init_utsname()->release, |
218 | (int)strcspn(init_utsname()->version, " "), | |
219 | init_utsname()->version); | |
220 | ||
221 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", | |
92bc2056 | 222 | (u16)regs->cs, regs->ip, regs->flags, |
9d975ebd | 223 | smp_processor_id()); |
65ea5b03 | 224 | print_symbol("EIP is at %s\n", regs->ip); |
1da177e4 | 225 | |
1da177e4 | 226 | printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
65ea5b03 | 227 | regs->ax, regs->bx, regs->cx, regs->dx); |
9d975ebd | 228 | printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", |
65ea5b03 | 229 | regs->si, regs->di, regs->bp, sp); |
9d975ebd | 230 | printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", |
92bc2056 | 231 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); |
9d975ebd PE |
232 | |
233 | if (!all) | |
234 | return; | |
1da177e4 | 235 | |
4bb0d3ec ZA |
236 | cr0 = read_cr0(); |
237 | cr2 = read_cr2(); | |
238 | cr3 = read_cr3(); | |
ff6e8c0d | 239 | cr4 = read_cr4_safe(); |
9d975ebd PE |
240 | printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", |
241 | cr0, cr2, cr3, cr4); | |
bb1995d5 AS |
242 | |
243 | get_debugreg(d0, 0); | |
244 | get_debugreg(d1, 1); | |
245 | get_debugreg(d2, 2); | |
246 | get_debugreg(d3, 3); | |
247 | printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", | |
248 | d0, d1, d2, d3); | |
9d975ebd | 249 | |
bb1995d5 AS |
250 | get_debugreg(d6, 6); |
251 | get_debugreg(d7, 7); | |
9d975ebd PE |
252 | printk("DR6: %08lx DR7: %08lx\n", |
253 | d6, d7); | |
254 | } | |
bb1995d5 | 255 | |
9d975ebd PE |
256 | void show_regs(struct pt_regs *regs) |
257 | { | |
258 | __show_registers(regs, 1); | |
5bc27dc2 | 259 | show_trace(NULL, regs, ®s->sp, regs->bp); |
1da177e4 LT |
260 | } |
261 | ||
262 | /* | |
65ea5b03 PA |
263 | * This gets run with %bx containing the |
264 | * function to call, and %dx containing | |
1da177e4 LT |
265 | * the "args". |
266 | */ | |
267 | extern void kernel_thread_helper(void); | |
1da177e4 LT |
268 | |
269 | /* | |
270 | * Create a kernel thread | |
271 | */ | |
272 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
273 | { | |
274 | struct pt_regs regs; | |
275 | ||
276 | memset(®s, 0, sizeof(regs)); | |
277 | ||
65ea5b03 PA |
278 | regs.bx = (unsigned long) fn; |
279 | regs.dx = (unsigned long) arg; | |
1da177e4 | 280 | |
65ea5b03 PA |
281 | regs.ds = __USER_DS; |
282 | regs.es = __USER_DS; | |
283 | regs.fs = __KERNEL_PERCPU; | |
284 | regs.orig_ax = -1; | |
285 | regs.ip = (unsigned long) kernel_thread_helper; | |
286 | regs.cs = __KERNEL_CS | get_kernel_rpl(); | |
287 | regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; | |
1da177e4 LT |
288 | |
289 | /* Ok, create the new process.. */ | |
8cf2c519 | 290 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); |
1da177e4 | 291 | } |
129f6946 | 292 | EXPORT_SYMBOL(kernel_thread); |
1da177e4 LT |
293 | |
294 | /* | |
295 | * Free current thread data structures etc.. | |
296 | */ | |
297 | void exit_thread(void) | |
298 | { | |
1da177e4 | 299 | /* The process may have allocated an io port bitmap... nuke it. */ |
b3cf2576 SE |
300 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { |
301 | struct task_struct *tsk = current; | |
302 | struct thread_struct *t = &tsk->thread; | |
1da177e4 LT |
303 | int cpu = get_cpu(); |
304 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
305 | ||
306 | kfree(t->io_bitmap_ptr); | |
307 | t->io_bitmap_ptr = NULL; | |
b3cf2576 | 308 | clear_thread_flag(TIF_IO_BITMAP); |
1da177e4 LT |
309 | /* |
310 | * Careful, clear this in the TSS too: | |
311 | */ | |
312 | memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); | |
313 | t->io_bitmap_max = 0; | |
314 | tss->io_bitmap_owner = NULL; | |
315 | tss->io_bitmap_max = 0; | |
a75c54f9 | 316 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
317 | put_cpu(); |
318 | } | |
319 | } | |
320 | ||
321 | void flush_thread(void) | |
322 | { | |
323 | struct task_struct *tsk = current; | |
324 | ||
0f534093 RM |
325 | tsk->thread.debugreg0 = 0; |
326 | tsk->thread.debugreg1 = 0; | |
327 | tsk->thread.debugreg2 = 0; | |
328 | tsk->thread.debugreg3 = 0; | |
329 | tsk->thread.debugreg6 = 0; | |
330 | tsk->thread.debugreg7 = 0; | |
1da177e4 | 331 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); |
b3cf2576 | 332 | clear_tsk_thread_flag(tsk, TIF_DEBUG); |
1da177e4 LT |
333 | /* |
334 | * Forget coprocessor state.. | |
335 | */ | |
336 | clear_fpu(tsk); | |
337 | clear_used_math(); | |
338 | } | |
339 | ||
340 | void release_thread(struct task_struct *dead_task) | |
341 | { | |
2684927c | 342 | BUG_ON(dead_task->mm); |
1da177e4 LT |
343 | release_vm86_irqs(dead_task); |
344 | } | |
345 | ||
346 | /* | |
347 | * This gets called before we allocate a new thread and copy | |
348 | * the current task into it. | |
349 | */ | |
350 | void prepare_to_copy(struct task_struct *tsk) | |
351 | { | |
352 | unlazy_fpu(tsk); | |
353 | } | |
354 | ||
65ea5b03 | 355 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
1da177e4 LT |
356 | unsigned long unused, |
357 | struct task_struct * p, struct pt_regs * regs) | |
358 | { | |
359 | struct pt_regs * childregs; | |
360 | struct task_struct *tsk; | |
361 | int err; | |
362 | ||
07b047fc | 363 | childregs = task_pt_regs(p); |
f48d9663 | 364 | *childregs = *regs; |
65ea5b03 PA |
365 | childregs->ax = 0; |
366 | childregs->sp = sp; | |
f48d9663 | 367 | |
faca6227 PA |
368 | p->thread.sp = (unsigned long) childregs; |
369 | p->thread.sp0 = (unsigned long) (childregs+1); | |
1da177e4 | 370 | |
faca6227 | 371 | p->thread.ip = (unsigned long) ret_from_fork; |
1da177e4 | 372 | |
6612538c | 373 | savesegment(gs, p->thread.gs); |
1da177e4 LT |
374 | |
375 | tsk = current; | |
b3cf2576 | 376 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
52978be6 AD |
377 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
378 | IO_BITMAP_BYTES, GFP_KERNEL); | |
1da177e4 LT |
379 | if (!p->thread.io_bitmap_ptr) { |
380 | p->thread.io_bitmap_max = 0; | |
381 | return -ENOMEM; | |
382 | } | |
b3cf2576 | 383 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
1da177e4 LT |
384 | } |
385 | ||
efd1ca52 RM |
386 | err = 0; |
387 | ||
1da177e4 LT |
388 | /* |
389 | * Set a new TLS for the child thread? | |
390 | */ | |
efd1ca52 RM |
391 | if (clone_flags & CLONE_SETTLS) |
392 | err = do_set_thread_area(p, -1, | |
65ea5b03 | 393 | (struct user_desc __user *)childregs->si, 0); |
1da177e4 | 394 | |
1da177e4 LT |
395 | if (err && p->thread.io_bitmap_ptr) { |
396 | kfree(p->thread.io_bitmap_ptr); | |
397 | p->thread.io_bitmap_max = 0; | |
398 | } | |
399 | return err; | |
400 | } | |
401 | ||
513ad84b IM |
402 | void |
403 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |
404 | { | |
405 | __asm__("movl %0, %%gs" :: "r"(0)); | |
406 | regs->fs = 0; | |
407 | set_fs(USER_DS); | |
408 | regs->ds = __USER_DS; | |
409 | regs->es = __USER_DS; | |
410 | regs->ss = __USER_DS; | |
411 | regs->cs = __USER_CS; | |
412 | regs->ip = new_ip; | |
413 | regs->sp = new_sp; | |
aa283f49 SS |
414 | /* |
415 | * Free the old FP and other extended state | |
416 | */ | |
417 | free_thread_xstate(current); | |
513ad84b IM |
418 | } |
419 | EXPORT_SYMBOL_GPL(start_thread); | |
420 | ||
bdb4f156 | 421 | static void hard_disable_TSC(void) |
cf99abac AA |
422 | { |
423 | write_cr4(read_cr4() | X86_CR4_TSD); | |
424 | } | |
529e25f6 | 425 | |
cf99abac AA |
426 | void disable_TSC(void) |
427 | { | |
428 | preempt_disable(); | |
429 | if (!test_and_set_thread_flag(TIF_NOTSC)) | |
430 | /* | |
431 | * Must flip the CPU state synchronously with | |
432 | * TIF_NOTSC in the current running context. | |
433 | */ | |
434 | hard_disable_TSC(); | |
435 | preempt_enable(); | |
436 | } | |
529e25f6 | 437 | |
bdb4f156 | 438 | static void hard_enable_TSC(void) |
cf99abac AA |
439 | { |
440 | write_cr4(read_cr4() & ~X86_CR4_TSD); | |
441 | } | |
529e25f6 | 442 | |
a4928cff | 443 | static void enable_TSC(void) |
529e25f6 EB |
444 | { |
445 | preempt_disable(); | |
446 | if (test_and_clear_thread_flag(TIF_NOTSC)) | |
447 | /* | |
448 | * Must flip the CPU state synchronously with | |
449 | * TIF_NOTSC in the current running context. | |
450 | */ | |
451 | hard_enable_TSC(); | |
452 | preempt_enable(); | |
453 | } | |
454 | ||
455 | int get_tsc_mode(unsigned long adr) | |
456 | { | |
457 | unsigned int val; | |
458 | ||
459 | if (test_thread_flag(TIF_NOTSC)) | |
460 | val = PR_TSC_SIGSEGV; | |
461 | else | |
462 | val = PR_TSC_ENABLE; | |
463 | ||
464 | return put_user(val, (unsigned int __user *)adr); | |
465 | } | |
466 | ||
467 | int set_tsc_mode(unsigned int val) | |
468 | { | |
469 | if (val == PR_TSC_SIGSEGV) | |
470 | disable_TSC(); | |
471 | else if (val == PR_TSC_ENABLE) | |
472 | enable_TSC(); | |
473 | else | |
474 | return -EINVAL; | |
475 | ||
476 | return 0; | |
477 | } | |
cf99abac AA |
478 | |
479 | static noinline void | |
480 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |
481 | struct tss_struct *tss) | |
1da177e4 | 482 | { |
7e991604 | 483 | struct thread_struct *prev, *next; |
eee3af4a | 484 | unsigned long debugctl; |
b3cf2576 | 485 | |
7e991604 | 486 | prev = &prev_p->thread; |
b3cf2576 SE |
487 | next = &next_p->thread; |
488 | ||
eee3af4a MM |
489 | debugctl = prev->debugctlmsr; |
490 | if (next->ds_area_msr != prev->ds_area_msr) { | |
491 | /* we clear debugctl to make sure DS | |
492 | * is not in use when we change it */ | |
493 | debugctl = 0; | |
5b0e5084 | 494 | update_debugctlmsr(0); |
eee3af4a MM |
495 | wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); |
496 | } | |
497 | ||
498 | if (next->debugctlmsr != debugctl) | |
5b0e5084 | 499 | update_debugctlmsr(next->debugctlmsr); |
7e991604 | 500 | |
b3cf2576 | 501 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { |
0f534093 RM |
502 | set_debugreg(next->debugreg0, 0); |
503 | set_debugreg(next->debugreg1, 1); | |
504 | set_debugreg(next->debugreg2, 2); | |
505 | set_debugreg(next->debugreg3, 3); | |
b3cf2576 | 506 | /* no 4 and 5 */ |
0f534093 RM |
507 | set_debugreg(next->debugreg6, 6); |
508 | set_debugreg(next->debugreg7, 7); | |
b3cf2576 SE |
509 | } |
510 | ||
cf99abac AA |
511 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ |
512 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | |
513 | /* prev and next are different */ | |
514 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | |
515 | hard_disable_TSC(); | |
516 | else | |
517 | hard_enable_TSC(); | |
518 | } | |
cf99abac | 519 | |
b4ef95de | 520 | #ifdef X86_BTS |
eee3af4a MM |
521 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) |
522 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); | |
523 | ||
524 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) | |
525 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); | |
b4ef95de | 526 | #endif |
eee3af4a MM |
527 | |
528 | ||
b3cf2576 | 529 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { |
1da177e4 LT |
530 | /* |
531 | * Disable the bitmap via an invalid offset. We still cache | |
532 | * the previous bitmap owner and the IO bitmap contents: | |
533 | */ | |
a75c54f9 | 534 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
535 | return; |
536 | } | |
b3cf2576 | 537 | |
1da177e4 LT |
538 | if (likely(next == tss->io_bitmap_owner)) { |
539 | /* | |
540 | * Previous owner of the bitmap (hence the bitmap content) | |
541 | * matches the next task, we dont have to do anything but | |
542 | * to set a valid offset in the TSS: | |
543 | */ | |
a75c54f9 | 544 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
1da177e4 LT |
545 | return; |
546 | } | |
547 | /* | |
548 | * Lazy TSS's I/O bitmap copy. We set an invalid offset here | |
549 | * and we let the task to get a GPF in case an I/O instruction | |
550 | * is performed. The handler of the GPF will verify that the | |
551 | * faulting task has a valid I/O bitmap and, it true, does the | |
552 | * real copy and restart the instruction. This will save us | |
553 | * redundant copies when the currently switched task does not | |
554 | * perform any I/O during its timeslice. | |
555 | */ | |
a75c54f9 | 556 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; |
1da177e4 | 557 | } |
1da177e4 LT |
558 | |
559 | /* | |
560 | * switch_to(x,yn) should switch tasks from x to y. | |
561 | * | |
562 | * We fsave/fwait so that an exception goes off at the right time | |
563 | * (as a call from the fsave or fwait in effect) rather than to | |
564 | * the wrong process. Lazy FP saving no longer makes any sense | |
565 | * with modern CPU's, and this simplifies a lot of things (SMP | |
566 | * and UP become the same). | |
567 | * | |
568 | * NOTE! We used to use the x86 hardware context switching. The | |
569 | * reason for not using it any more becomes apparent when you | |
570 | * try to recover gracefully from saved state that is no longer | |
571 | * valid (stale segment register values in particular). With the | |
572 | * hardware task-switch, there is no way to fix up bad state in | |
573 | * a reasonable manner. | |
574 | * | |
575 | * The fact that Intel documents the hardware task-switching to | |
576 | * be slow is a fairly red herring - this code is not noticeably | |
577 | * faster. However, there _is_ some room for improvement here, | |
578 | * so the performance issues may eventually be a valid point. | |
579 | * More important, however, is the fact that this allows us much | |
580 | * more flexibility. | |
581 | * | |
65ea5b03 | 582 | * The return value (in %ax) will be the "prev" task after |
1da177e4 LT |
583 | * the task-switch, and shows up in ret_from_fork in entry.S, |
584 | * for example. | |
585 | */ | |
75604d7f | 586 | struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
1da177e4 LT |
587 | { |
588 | struct thread_struct *prev = &prev_p->thread, | |
589 | *next = &next_p->thread; | |
590 | int cpu = smp_processor_id(); | |
591 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
592 | ||
593 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | |
594 | ||
595 | __unlazy_fpu(prev_p); | |
596 | ||
acc20761 CE |
597 | |
598 | /* we're going to use this soon, after a few expensive things */ | |
599 | if (next_p->fpu_counter > 5) | |
61c4628b | 600 | prefetch(next->xstate); |
acc20761 | 601 | |
1da177e4 | 602 | /* |
e7a2ff59 | 603 | * Reload esp0. |
1da177e4 | 604 | */ |
faca6227 | 605 | load_sp0(tss, next); |
1da177e4 LT |
606 | |
607 | /* | |
464d1a78 | 608 | * Save away %gs. No need to save %fs, as it was saved on the |
f95d47ca JF |
609 | * stack on entry. No need to save %es and %ds, as those are |
610 | * always kernel segments while inside the kernel. Doing this | |
611 | * before setting the new TLS descriptors avoids the situation | |
612 | * where we temporarily have non-reloadable segments in %fs | |
613 | * and %gs. This could be an issue if the NMI handler ever | |
614 | * used %fs or %gs (it does not today), or if the kernel is | |
615 | * running inside of a hypervisor layer. | |
1da177e4 | 616 | */ |
464d1a78 | 617 | savesegment(gs, prev->gs); |
1da177e4 LT |
618 | |
619 | /* | |
e7a2ff59 | 620 | * Load the per-thread Thread-Local Storage descriptor. |
1da177e4 | 621 | */ |
e7a2ff59 | 622 | load_TLS(next, cpu); |
1da177e4 | 623 | |
8b151144 ZA |
624 | /* |
625 | * Restore IOPL if needed. In normal use, the flags restore | |
626 | * in the switch assembly will handle this. But if the kernel | |
627 | * is running virtualized at a non-zero CPL, the popf will | |
628 | * not restore flags, so it must be done in a separate step. | |
629 | */ | |
630 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) | |
631 | set_iopl_mask(next->iopl); | |
632 | ||
1da177e4 | 633 | /* |
b3cf2576 | 634 | * Now maybe handle debug registers and/or IO bitmaps |
1da177e4 | 635 | */ |
cf99abac AA |
636 | if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || |
637 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | |
638 | __switch_to_xtra(prev_p, next_p, tss); | |
ffaa8bd6 | 639 | |
9226d125 ZA |
640 | /* |
641 | * Leave lazy mode, flushing any hypercalls made here. | |
642 | * This must be done before restoring TLS segments so | |
643 | * the GDT and LDT are properly updated, and must be | |
644 | * done before math_state_restore, so the TS bit is up | |
645 | * to date. | |
646 | */ | |
647 | arch_leave_lazy_cpu_mode(); | |
648 | ||
acc20761 CE |
649 | /* If the task has used fpu the last 5 timeslices, just do a full |
650 | * restore of the math state immediately to avoid the trap; the | |
651 | * chances of needing FPU soon are obviously high now | |
652 | */ | |
653 | if (next_p->fpu_counter > 5) | |
654 | math_state_restore(); | |
655 | ||
9226d125 ZA |
656 | /* |
657 | * Restore %gs if needed (which is common) | |
658 | */ | |
659 | if (prev->gs | next->gs) | |
660 | loadsegment(gs, next->gs); | |
661 | ||
7c3576d2 | 662 | x86_write_percpu(current_task, next_p); |
9226d125 | 663 | |
1da177e4 LT |
664 | return prev_p; |
665 | } | |
666 | ||
667 | asmlinkage int sys_fork(struct pt_regs regs) | |
668 | { | |
65ea5b03 | 669 | return do_fork(SIGCHLD, regs.sp, ®s, 0, NULL, NULL); |
1da177e4 LT |
670 | } |
671 | ||
672 | asmlinkage int sys_clone(struct pt_regs regs) | |
673 | { | |
674 | unsigned long clone_flags; | |
675 | unsigned long newsp; | |
676 | int __user *parent_tidptr, *child_tidptr; | |
677 | ||
65ea5b03 PA |
678 | clone_flags = regs.bx; |
679 | newsp = regs.cx; | |
680 | parent_tidptr = (int __user *)regs.dx; | |
681 | child_tidptr = (int __user *)regs.di; | |
1da177e4 | 682 | if (!newsp) |
65ea5b03 | 683 | newsp = regs.sp; |
1da177e4 LT |
684 | return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); |
685 | } | |
686 | ||
687 | /* | |
688 | * This is trivial, and on the face of it looks like it | |
689 | * could equally well be done in user mode. | |
690 | * | |
691 | * Not so, for quite unobvious reasons - register pressure. | |
692 | * In user mode vfork() cannot have a stack frame, and if | |
693 | * done by calling the "clone()" system call directly, you | |
694 | * do not have enough call-clobbered registers to hold all | |
695 | * the information you need. | |
696 | */ | |
697 | asmlinkage int sys_vfork(struct pt_regs regs) | |
698 | { | |
65ea5b03 | 699 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, ®s, 0, NULL, NULL); |
1da177e4 LT |
700 | } |
701 | ||
702 | /* | |
703 | * sys_execve() executes a new program. | |
704 | */ | |
705 | asmlinkage int sys_execve(struct pt_regs regs) | |
706 | { | |
707 | int error; | |
708 | char * filename; | |
709 | ||
65ea5b03 | 710 | filename = getname((char __user *) regs.bx); |
1da177e4 LT |
711 | error = PTR_ERR(filename); |
712 | if (IS_ERR(filename)) | |
713 | goto out; | |
714 | error = do_execve(filename, | |
65ea5b03 PA |
715 | (char __user * __user *) regs.cx, |
716 | (char __user * __user *) regs.dx, | |
1da177e4 LT |
717 | ®s); |
718 | if (error == 0) { | |
1da177e4 LT |
719 | /* Make sure we don't return using sysenter.. */ |
720 | set_thread_flag(TIF_IRET); | |
721 | } | |
722 | putname(filename); | |
723 | out: | |
724 | return error; | |
725 | } | |
726 | ||
727 | #define top_esp (THREAD_SIZE - sizeof(unsigned long)) | |
728 | #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) | |
729 | ||
730 | unsigned long get_wchan(struct task_struct *p) | |
731 | { | |
65ea5b03 | 732 | unsigned long bp, sp, ip; |
1da177e4 LT |
733 | unsigned long stack_page; |
734 | int count = 0; | |
735 | if (!p || p == current || p->state == TASK_RUNNING) | |
736 | return 0; | |
65e0fdff | 737 | stack_page = (unsigned long)task_stack_page(p); |
faca6227 | 738 | sp = p->thread.sp; |
65ea5b03 | 739 | if (!stack_page || sp < stack_page || sp > top_esp+stack_page) |
1da177e4 | 740 | return 0; |
65ea5b03 PA |
741 | /* include/asm-i386/system.h:switch_to() pushes bp last. */ |
742 | bp = *(unsigned long *) sp; | |
1da177e4 | 743 | do { |
65ea5b03 | 744 | if (bp < stack_page || bp > top_ebp+stack_page) |
1da177e4 | 745 | return 0; |
65ea5b03 PA |
746 | ip = *(unsigned long *) (bp+4); |
747 | if (!in_sched_functions(ip)) | |
748 | return ip; | |
749 | bp = *(unsigned long *) bp; | |
1da177e4 LT |
750 | } while (count++ < 16); |
751 | return 0; | |
752 | } | |
753 | ||
1da177e4 LT |
754 | unsigned long arch_align_stack(unsigned long sp) |
755 | { | |
c16b63e0 | 756 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1da177e4 LT |
757 | sp -= get_random_int() % 8192; |
758 | return sp & ~0xf; | |
759 | } | |
c1d171a0 JK |
760 | |
761 | unsigned long arch_randomize_brk(struct mm_struct *mm) | |
762 | { | |
763 | unsigned long range_end = mm->brk + 0x02000000; | |
764 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | |
765 | } |