]>
Commit | Line | Data |
---|---|---|
995473ae | 1 | /* |
ba180fd4 | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
1da177e4 LT |
3 | * Copyright 2003 PathScale, Inc. |
4 | * Licensed under the GPL | |
5 | */ | |
6 | ||
c5d4bb17 JD |
7 | #include <linux/stddef.h> |
8 | #include <linux/err.h> | |
9 | #include <linux/hardirq.h> | |
c5d4bb17 | 10 | #include <linux/mm.h> |
6613c5e8 | 11 | #include <linux/module.h> |
c5d4bb17 JD |
12 | #include <linux/personality.h> |
13 | #include <linux/proc_fs.h> | |
14 | #include <linux/ptrace.h> | |
15 | #include <linux/random.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
c5d4bb17 | 17 | #include <linux/sched.h> |
6613c5e8 | 18 | #include <linux/seq_file.h> |
c5d4bb17 JD |
19 | #include <linux/tick.h> |
20 | #include <linux/threads.h> | |
21 | #include <asm/current.h> | |
22 | #include <asm/pgtable.h> | |
23 | #include <asm/uaccess.h> | |
4ff83ce1 | 24 | #include "as-layout.h" |
ba180fd4 | 25 | #include "kern_util.h" |
1da177e4 | 26 | #include "os.h" |
77bf4400 | 27 | #include "skas.h" |
ba180fd4 | 28 | #include "tlb.h" |
1da177e4 | 29 | |
ba180fd4 JD |
30 | /* |
31 | * This is a per-cpu array. A processor only modifies its entry and it only | |
1da177e4 LT |
32 | * cares about its entry, so it's OK if another processor is modifying its |
33 | * entry. | |
34 | */ | |
35 | struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; | |
36 | ||
2dc5802a | 37 | static inline int external_pid(void) |
1da177e4 | 38 | { |
77bf4400 | 39 | /* FIXME: Need to look up userspace_pid by cpu */ |
ba180fd4 | 40 | return userspace_pid[0]; |
1da177e4 LT |
41 | } |
42 | ||
43 | int pid_to_processor_id(int pid) | |
44 | { | |
45 | int i; | |
46 | ||
c5d4bb17 | 47 | for (i = 0; i < ncpus; i++) { |
ba180fd4 | 48 | if (cpu_tasks[i].pid == pid) |
6e21aec3 | 49 | return i; |
1da177e4 | 50 | } |
6e21aec3 | 51 | return -1; |
1da177e4 LT |
52 | } |
53 | ||
54 | void free_stack(unsigned long stack, int order) | |
55 | { | |
56 | free_pages(stack, order); | |
57 | } | |
58 | ||
59 | unsigned long alloc_stack(int order, int atomic) | |
60 | { | |
61 | unsigned long page; | |
53f9fc93 | 62 | gfp_t flags = GFP_KERNEL; |
1da177e4 | 63 | |
46db4a42 PBG |
64 | if (atomic) |
65 | flags = GFP_ATOMIC; | |
1da177e4 | 66 | page = __get_free_pages(flags, order); |
5c8aacea | 67 | |
6e21aec3 | 68 | return page; |
1da177e4 LT |
69 | } |
70 | ||
71 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
72 | { | |
73 | int pid; | |
74 | ||
75 | current->thread.request.u.thread.proc = fn; | |
76 | current->thread.request.u.thread.arg = arg; | |
e0877f07 JD |
77 | pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, |
78 | ¤t->thread.regs, 0, NULL, NULL); | |
6e21aec3 | 79 | return pid; |
1da177e4 LT |
80 | } |
81 | ||
6e21aec3 | 82 | static inline void set_current(struct task_struct *task) |
1da177e4 | 83 | { |
ca9bc0bb | 84 | cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) |
2dc5802a | 85 | { external_pid(), task }); |
1da177e4 LT |
86 | } |
87 | ||
291248fd | 88 | extern void arch_switch_to(struct task_struct *to); |
77bf4400 | 89 | |
1da177e4 LT |
90 | void *_switch_to(void *prev, void *next, void *last) |
91 | { | |
995473ae | 92 | struct task_struct *from = prev; |
291248fd | 93 | struct task_struct *to = next; |
f6e34c6a | 94 | |
995473ae JD |
95 | to->thread.prev_sched = from; |
96 | set_current(to); | |
f6e34c6a | 97 | |
3eddddcf | 98 | do { |
6aa802ce | 99 | current->thread.saved_task = NULL; |
77bf4400 | 100 | |
c5d4bb17 JD |
101 | switch_threads(&from->thread.switch_buf, |
102 | &to->thread.switch_buf); | |
77bf4400 | 103 | |
291248fd | 104 | arch_switch_to(current); |
77bf4400 | 105 | |
ba180fd4 | 106 | if (current->thread.saved_task) |
3eddddcf | 107 | show_regs(&(current->thread.regs)); |
c5d4bb17 JD |
108 | to = current->thread.saved_task; |
109 | from = current; | |
291248fd | 110 | } while (current->thread.saved_task); |
f6e34c6a | 111 | |
6e21aec3 | 112 | return current->thread.prev_sched; |
f6e34c6a | 113 | |
1da177e4 LT |
114 | } |
115 | ||
116 | void interrupt_end(void) | |
117 | { | |
ba180fd4 | 118 | if (need_resched()) |
6e21aec3 | 119 | schedule(); |
ba180fd4 | 120 | if (test_tsk_thread_flag(current, TIF_SIGPENDING)) |
6e21aec3 | 121 | do_signal(); |
1da177e4 LT |
122 | } |
123 | ||
1da177e4 LT |
124 | void exit_thread(void) |
125 | { | |
1da177e4 | 126 | } |
995473ae | 127 | |
1da177e4 LT |
128 | void *get_current(void) |
129 | { | |
6e21aec3 | 130 | return current; |
1da177e4 LT |
131 | } |
132 | ||
ba180fd4 JD |
133 | /* |
134 | * This is called magically, by its address being stuffed in a jmp_buf | |
77bf4400 JD |
135 | * and being longjmp-d to. |
136 | */ | |
137 | void new_thread_handler(void) | |
138 | { | |
139 | int (*fn)(void *), n; | |
140 | void *arg; | |
141 | ||
ba180fd4 | 142 | if (current->thread.prev_sched != NULL) |
77bf4400 JD |
143 | schedule_tail(current->thread.prev_sched); |
144 | current->thread.prev_sched = NULL; | |
145 | ||
146 | fn = current->thread.request.u.thread.proc; | |
147 | arg = current->thread.request.u.thread.arg; | |
148 | ||
ba180fd4 JD |
149 | /* |
150 | * The return value is 1 if the kernel thread execs a process, | |
77bf4400 JD |
151 | * 0 if it just exits |
152 | */ | |
153 | n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); | |
ba180fd4 | 154 | if (n == 1) { |
77bf4400 JD |
155 | /* Handle any immediate reschedules or signals */ |
156 | interrupt_end(); | |
157 | userspace(¤t->thread.regs.regs); | |
158 | } | |
159 | else do_exit(0); | |
160 | } | |
161 | ||
162 | /* Called magically, see new_thread_handler above */ | |
163 | void fork_handler(void) | |
164 | { | |
165 | force_flush_all(); | |
77bf4400 JD |
166 | |
167 | schedule_tail(current->thread.prev_sched); | |
168 | ||
ba180fd4 JD |
169 | /* |
170 | * XXX: if interrupt_end() calls schedule, this call to | |
77bf4400 | 171 | * arch_switch_to isn't needed. We could want to apply this to |
ba180fd4 JD |
172 | * improve performance. -bb |
173 | */ | |
291248fd | 174 | arch_switch_to(current); |
77bf4400 JD |
175 | |
176 | current->thread.prev_sched = NULL; | |
177 | ||
178 | /* Handle any immediate reschedules or signals */ | |
179 | interrupt_end(); | |
180 | ||
181 | userspace(¤t->thread.regs.regs); | |
182 | } | |
183 | ||
6f2c55b8 | 184 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
995473ae | 185 | unsigned long stack_top, struct task_struct * p, |
1da177e4 LT |
186 | struct pt_regs *regs) |
187 | { | |
77bf4400 JD |
188 | void (*handler)(void); |
189 | int ret = 0; | |
aa6758d4 | 190 | |
1da177e4 | 191 | p->thread = (struct thread_struct) INIT_THREAD; |
aa6758d4 | 192 | |
ba180fd4 | 193 | if (current->thread.forking) { |
77bf4400 JD |
194 | memcpy(&p->thread.regs.regs, ®s->regs, |
195 | sizeof(p->thread.regs.regs)); | |
18baddda | 196 | REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0); |
ba180fd4 | 197 | if (sp != 0) |
18baddda | 198 | REGS_SP(p->thread.regs.regs.gp) = sp; |
aa6758d4 | 199 | |
77bf4400 | 200 | handler = fork_handler; |
aa6758d4 | 201 | |
77bf4400 JD |
202 | arch_copy_thread(¤t->thread.arch, &p->thread.arch); |
203 | } | |
204 | else { | |
d25f2e12 | 205 | get_safe_registers(p->thread.regs.regs.gp); |
77bf4400 JD |
206 | p->thread.request.u.thread = current->thread.request.u.thread; |
207 | handler = new_thread_handler; | |
208 | } | |
209 | ||
210 | new_thread(task_stack_page(p), &p->thread.switch_buf, handler); | |
211 | ||
212 | if (current->thread.forking) { | |
213 | clear_flushed_tls(p); | |
214 | ||
215 | /* | |
216 | * Set a new TLS for the child thread? | |
217 | */ | |
218 | if (clone_flags & CLONE_SETTLS) | |
219 | ret = arch_copy_tls(p); | |
220 | } | |
aa6758d4 | 221 | |
aa6758d4 | 222 | return ret; |
1da177e4 LT |
223 | } |
224 | ||
225 | void initial_thread_cb(void (*proc)(void *), void *arg) | |
226 | { | |
227 | int save_kmalloc_ok = kmalloc_ok; | |
228 | ||
229 | kmalloc_ok = 0; | |
6aa802ce | 230 | initial_thread_cb_skas(proc, arg); |
1da177e4 LT |
231 | kmalloc_ok = save_kmalloc_ok; |
232 | } | |
995473ae | 233 | |
1da177e4 LT |
234 | void default_idle(void) |
235 | { | |
b160fb63 JD |
236 | unsigned long long nsecs; |
237 | ||
c5d4bb17 | 238 | while (1) { |
1da177e4 | 239 | /* endless idle loop with no priority at all */ |
1da177e4 LT |
240 | |
241 | /* | |
242 | * although we are an idle CPU, we do not want to | |
243 | * get into the scheduler unnecessarily. | |
244 | */ | |
ba180fd4 | 245 | if (need_resched()) |
1da177e4 | 246 | schedule(); |
995473ae | 247 | |
b8f8c3cf | 248 | tick_nohz_stop_sched_tick(1); |
b160fb63 JD |
249 | nsecs = disable_timer(); |
250 | idle_sleep(nsecs); | |
d2753a6d | 251 | tick_nohz_restart_sched_tick(); |
1da177e4 LT |
252 | } |
253 | } | |
254 | ||
255 | void cpu_idle(void) | |
256 | { | |
a5a678c8 | 257 | cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); |
77bf4400 | 258 | default_idle(); |
1da177e4 LT |
259 | } |
260 | ||
b6316293 PBG |
261 | int __cant_sleep(void) { |
262 | return in_atomic() || irqs_disabled() || in_interrupt(); | |
263 | /* Is in_interrupt() really needed? */ | |
1da177e4 LT |
264 | } |
265 | ||
1da177e4 LT |
266 | int user_context(unsigned long sp) |
267 | { | |
268 | unsigned long stack; | |
269 | ||
270 | stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); | |
a5a678c8 | 271 | return stack != (unsigned long) current_thread_info(); |
1da177e4 LT |
272 | } |
273 | ||
1da177e4 LT |
274 | extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; |
275 | ||
276 | void do_uml_exitcalls(void) | |
277 | { | |
278 | exitcall_t *call; | |
279 | ||
280 | call = &__uml_exitcall_end; | |
281 | while (--call >= &__uml_exitcall_begin) | |
282 | (*call)(); | |
283 | } | |
284 | ||
c0a9290e | 285 | char *uml_strdup(const char *string) |
1da177e4 | 286 | { |
dfe52244 | 287 | return kstrdup(string, GFP_KERNEL); |
1da177e4 LT |
288 | } |
289 | ||
1da177e4 LT |
290 | int copy_to_user_proc(void __user *to, void *from, int size) |
291 | { | |
6e21aec3 | 292 | return copy_to_user(to, from, size); |
1da177e4 LT |
293 | } |
294 | ||
295 | int copy_from_user_proc(void *to, void __user *from, int size) | |
296 | { | |
6e21aec3 | 297 | return copy_from_user(to, from, size); |
1da177e4 LT |
298 | } |
299 | ||
300 | int clear_user_proc(void __user *buf, int size) | |
301 | { | |
6e21aec3 | 302 | return clear_user(buf, size); |
1da177e4 LT |
303 | } |
304 | ||
305 | int strlen_user_proc(char __user *str) | |
306 | { | |
6e21aec3 | 307 | return strlen_user(str); |
1da177e4 LT |
308 | } |
309 | ||
310 | int smp_sigio_handler(void) | |
311 | { | |
312 | #ifdef CONFIG_SMP | |
a5a678c8 | 313 | int cpu = current_thread_info()->cpu; |
1da177e4 | 314 | IPI_handler(cpu); |
ba180fd4 | 315 | if (cpu != 0) |
6e21aec3 | 316 | return 1; |
1da177e4 | 317 | #endif |
6e21aec3 | 318 | return 0; |
1da177e4 LT |
319 | } |
320 | ||
1da177e4 LT |
321 | int cpu(void) |
322 | { | |
a5a678c8 | 323 | return current_thread_info()->cpu; |
1da177e4 LT |
324 | } |
325 | ||
326 | static atomic_t using_sysemu = ATOMIC_INIT(0); | |
327 | int sysemu_supported; | |
328 | ||
329 | void set_using_sysemu(int value) | |
330 | { | |
331 | if (value > sysemu_supported) | |
332 | return; | |
333 | atomic_set(&using_sysemu, value); | |
334 | } | |
335 | ||
336 | int get_using_sysemu(void) | |
337 | { | |
338 | return atomic_read(&using_sysemu); | |
339 | } | |
340 | ||
6613c5e8 | 341 | static int sysemu_proc_show(struct seq_file *m, void *v) |
1da177e4 | 342 | { |
6613c5e8 AD |
343 | seq_printf(m, "%d\n", get_using_sysemu()); |
344 | return 0; | |
345 | } | |
1da177e4 | 346 | |
6613c5e8 AD |
347 | static int sysemu_proc_open(struct inode *inode, struct file *file) |
348 | { | |
349 | return single_open(file, sysemu_proc_show, NULL); | |
1da177e4 LT |
350 | } |
351 | ||
6613c5e8 AD |
352 | static ssize_t sysemu_proc_write(struct file *file, const char __user *buf, |
353 | size_t count, loff_t *pos) | |
1da177e4 LT |
354 | { |
355 | char tmp[2]; | |
356 | ||
357 | if (copy_from_user(tmp, buf, 1)) | |
358 | return -EFAULT; | |
359 | ||
360 | if (tmp[0] >= '0' && tmp[0] <= '2') | |
361 | set_using_sysemu(tmp[0] - '0'); | |
ba180fd4 JD |
362 | /* We use the first char, but pretend to write everything */ |
363 | return count; | |
1da177e4 LT |
364 | } |
365 | ||
6613c5e8 AD |
366 | static const struct file_operations sysemu_proc_fops = { |
367 | .owner = THIS_MODULE, | |
368 | .open = sysemu_proc_open, | |
369 | .read = seq_read, | |
370 | .llseek = seq_lseek, | |
371 | .release = single_release, | |
372 | .write = sysemu_proc_write, | |
373 | }; | |
374 | ||
1da177e4 LT |
375 | int __init make_proc_sysemu(void) |
376 | { | |
377 | struct proc_dir_entry *ent; | |
378 | if (!sysemu_supported) | |
379 | return 0; | |
380 | ||
6613c5e8 | 381 | ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops); |
1da177e4 LT |
382 | |
383 | if (ent == NULL) | |
384 | { | |
30f417c6 | 385 | printk(KERN_WARNING "Failed to register /proc/sysemu\n"); |
6e21aec3 | 386 | return 0; |
1da177e4 LT |
387 | } |
388 | ||
1da177e4 LT |
389 | return 0; |
390 | } | |
391 | ||
392 | late_initcall(make_proc_sysemu); | |
393 | ||
394 | int singlestepping(void * t) | |
395 | { | |
396 | struct task_struct *task = t ? t : current; | |
397 | ||
c5d4bb17 | 398 | if (!(task->ptrace & PT_DTRACE)) |
ba180fd4 | 399 | return 0; |
1da177e4 LT |
400 | |
401 | if (task->thread.singlestep_syscall) | |
ba180fd4 | 402 | return 1; |
1da177e4 LT |
403 | |
404 | return 2; | |
405 | } | |
406 | ||
b8bd0220 BS |
407 | /* |
408 | * Only x86 and x86_64 have an arch_align_stack(). | |
409 | * All other arches have "#define arch_align_stack(x) (x)" | |
410 | * in their asm/system.h | |
411 | * As this is included in UML from asm-um/system-generic.h, | |
412 | * we can use it to behave as the subarch does. | |
413 | */ | |
414 | #ifndef arch_align_stack | |
1da177e4 LT |
415 | unsigned long arch_align_stack(unsigned long sp) |
416 | { | |
8f80e946 | 417 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1da177e4 LT |
418 | sp -= get_random_int() % 8192; |
419 | return sp & ~0xf; | |
420 | } | |
b8bd0220 | 421 | #endif |
c1127465 JD |
422 | |
423 | unsigned long get_wchan(struct task_struct *p) | |
424 | { | |
425 | unsigned long stack_page, sp, ip; | |
426 | bool seen_sched = 0; | |
427 | ||
428 | if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) | |
429 | return 0; | |
430 | ||
431 | stack_page = (unsigned long) task_stack_page(p); | |
432 | /* Bail if the process has no kernel stack for some reason */ | |
433 | if (stack_page == 0) | |
434 | return 0; | |
435 | ||
436 | sp = p->thread.switch_buf->JB_SP; | |
437 | /* | |
438 | * Bail if the stack pointer is below the bottom of the kernel | |
439 | * stack for some reason | |
440 | */ | |
441 | if (sp < stack_page) | |
442 | return 0; | |
443 | ||
444 | while (sp < stack_page + THREAD_SIZE) { | |
445 | ip = *((unsigned long *) sp); | |
446 | if (in_sched_functions(ip)) | |
447 | /* Ignore everything until we're above the scheduler */ | |
448 | seen_sched = 1; | |
449 | else if (kernel_text_address(ip) && seen_sched) | |
450 | return ip; | |
451 | ||
452 | sp += sizeof(unsigned long); | |
453 | } | |
454 | ||
455 | return 0; | |
456 | } | |
8192ab42 JD |
457 | |
458 | int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu) | |
459 | { | |
460 | int cpu = current_thread_info()->cpu; | |
461 | ||
462 | return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu); | |
463 | } | |
464 |