]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/um/kernel/process.c
staging: board: Migrate away from __pm_genpd_name_add_device()
[mirror_ubuntu-zesty-kernel.git] / arch / um / kernel / process.c
1 /*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7 #include <linux/stddef.h>
8 #include <linux/err.h>
9 #include <linux/hardirq.h>
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/personality.h>
13 #include <linux/proc_fs.h>
14 #include <linux/ptrace.h>
15 #include <linux/random.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/seq_file.h>
19 #include <linux/tick.h>
20 #include <linux/threads.h>
21 #include <linux/tracehook.h>
22 #include <asm/current.h>
23 #include <asm/pgtable.h>
24 #include <asm/mmu_context.h>
25 #include <asm/uaccess.h>
26 #include <as-layout.h>
27 #include <kern_util.h>
28 #include <os.h>
29 #include <skas.h>
30
31 /*
32 * This is a per-cpu array. A processor only modifies its entry and it only
33 * cares about its entry, so it's OK if another processor is modifying its
34 * entry.
35 */
36 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
37
38 static inline int external_pid(void)
39 {
40 /* FIXME: Need to look up userspace_pid by cpu */
41 return userspace_pid[0];
42 }
43
44 int pid_to_processor_id(int pid)
45 {
46 int i;
47
48 for (i = 0; i < ncpus; i++) {
49 if (cpu_tasks[i].pid == pid)
50 return i;
51 }
52 return -1;
53 }
54
55 void free_stack(unsigned long stack, int order)
56 {
57 free_pages(stack, order);
58 }
59
60 unsigned long alloc_stack(int order, int atomic)
61 {
62 unsigned long page;
63 gfp_t flags = GFP_KERNEL;
64
65 if (atomic)
66 flags = GFP_ATOMIC;
67 page = __get_free_pages(flags, order);
68
69 return page;
70 }
71
72 static inline void set_current(struct task_struct *task)
73 {
74 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
75 { external_pid(), task });
76 }
77
78 extern void arch_switch_to(struct task_struct *to);
79
80 void *__switch_to(struct task_struct *from, struct task_struct *to)
81 {
82 to->thread.prev_sched = from;
83 set_current(to);
84
85 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
86 arch_switch_to(current);
87
88 return current->thread.prev_sched;
89 }
90
91 void interrupt_end(void)
92 {
93 if (need_resched())
94 schedule();
95 if (test_thread_flag(TIF_SIGPENDING))
96 do_signal();
97 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
98 tracehook_notify_resume(&current->thread.regs);
99 }
100
101 void exit_thread(void)
102 {
103 }
104
105 int get_current_pid(void)
106 {
107 return task_pid_nr(current);
108 }
109
110 /*
111 * This is called magically, by its address being stuffed in a jmp_buf
112 * and being longjmp-d to.
113 */
114 void new_thread_handler(void)
115 {
116 int (*fn)(void *), n;
117 void *arg;
118
119 if (current->thread.prev_sched != NULL)
120 schedule_tail(current->thread.prev_sched);
121 current->thread.prev_sched = NULL;
122
123 fn = current->thread.request.u.thread.proc;
124 arg = current->thread.request.u.thread.arg;
125
126 /*
127 * callback returns only if the kernel thread execs a process
128 */
129 n = fn(arg);
130 userspace(&current->thread.regs.regs);
131 }
132
133 /* Called magically, see new_thread_handler above */
134 void fork_handler(void)
135 {
136 force_flush_all();
137
138 schedule_tail(current->thread.prev_sched);
139
140 /*
141 * XXX: if interrupt_end() calls schedule, this call to
142 * arch_switch_to isn't needed. We could want to apply this to
143 * improve performance. -bb
144 */
145 arch_switch_to(current);
146
147 current->thread.prev_sched = NULL;
148
149 userspace(&current->thread.regs.regs);
150 }
151
152 int copy_thread(unsigned long clone_flags, unsigned long sp,
153 unsigned long arg, struct task_struct * p)
154 {
155 void (*handler)(void);
156 int kthread = current->flags & PF_KTHREAD;
157 int ret = 0;
158
159 p->thread = (struct thread_struct) INIT_THREAD;
160
161 if (!kthread) {
162 memcpy(&p->thread.regs.regs, current_pt_regs(),
163 sizeof(p->thread.regs.regs));
164 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
165 if (sp != 0)
166 REGS_SP(p->thread.regs.regs.gp) = sp;
167
168 handler = fork_handler;
169
170 arch_copy_thread(&current->thread.arch, &p->thread.arch);
171 } else {
172 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
173 p->thread.request.u.thread.proc = (int (*)(void *))sp;
174 p->thread.request.u.thread.arg = (void *)arg;
175 handler = new_thread_handler;
176 }
177
178 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
179
180 if (!kthread) {
181 clear_flushed_tls(p);
182
183 /*
184 * Set a new TLS for the child thread?
185 */
186 if (clone_flags & CLONE_SETTLS)
187 ret = arch_copy_tls(p);
188 }
189
190 return ret;
191 }
192
193 void initial_thread_cb(void (*proc)(void *), void *arg)
194 {
195 int save_kmalloc_ok = kmalloc_ok;
196
197 kmalloc_ok = 0;
198 initial_thread_cb_skas(proc, arg);
199 kmalloc_ok = save_kmalloc_ok;
200 }
201
202 void arch_cpu_idle(void)
203 {
204 unsigned long long nsecs;
205
206 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
207 nsecs = disable_timer();
208 idle_sleep(nsecs);
209 local_irq_enable();
210 }
211
212 int __cant_sleep(void) {
213 return in_atomic() || irqs_disabled() || in_interrupt();
214 /* Is in_interrupt() really needed? */
215 }
216
217 int user_context(unsigned long sp)
218 {
219 unsigned long stack;
220
221 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
222 return stack != (unsigned long) current_thread_info();
223 }
224
225 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
226
227 void do_uml_exitcalls(void)
228 {
229 exitcall_t *call;
230
231 call = &__uml_exitcall_end;
232 while (--call >= &__uml_exitcall_begin)
233 (*call)();
234 }
235
236 char *uml_strdup(const char *string)
237 {
238 return kstrdup(string, GFP_KERNEL);
239 }
240 EXPORT_SYMBOL(uml_strdup);
241
242 int copy_to_user_proc(void __user *to, void *from, int size)
243 {
244 return copy_to_user(to, from, size);
245 }
246
247 int copy_from_user_proc(void *to, void __user *from, int size)
248 {
249 return copy_from_user(to, from, size);
250 }
251
252 int clear_user_proc(void __user *buf, int size)
253 {
254 return clear_user(buf, size);
255 }
256
257 int strlen_user_proc(char __user *str)
258 {
259 return strlen_user(str);
260 }
261
262 int cpu(void)
263 {
264 return current_thread_info()->cpu;
265 }
266
267 static atomic_t using_sysemu = ATOMIC_INIT(0);
268 int sysemu_supported;
269
270 void set_using_sysemu(int value)
271 {
272 if (value > sysemu_supported)
273 return;
274 atomic_set(&using_sysemu, value);
275 }
276
277 int get_using_sysemu(void)
278 {
279 return atomic_read(&using_sysemu);
280 }
281
282 static int sysemu_proc_show(struct seq_file *m, void *v)
283 {
284 seq_printf(m, "%d\n", get_using_sysemu());
285 return 0;
286 }
287
288 static int sysemu_proc_open(struct inode *inode, struct file *file)
289 {
290 return single_open(file, sysemu_proc_show, NULL);
291 }
292
293 static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
294 size_t count, loff_t *pos)
295 {
296 char tmp[2];
297
298 if (copy_from_user(tmp, buf, 1))
299 return -EFAULT;
300
301 if (tmp[0] >= '0' && tmp[0] <= '2')
302 set_using_sysemu(tmp[0] - '0');
303 /* We use the first char, but pretend to write everything */
304 return count;
305 }
306
307 static const struct file_operations sysemu_proc_fops = {
308 .owner = THIS_MODULE,
309 .open = sysemu_proc_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313 .write = sysemu_proc_write,
314 };
315
316 int __init make_proc_sysemu(void)
317 {
318 struct proc_dir_entry *ent;
319 if (!sysemu_supported)
320 return 0;
321
322 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
323
324 if (ent == NULL)
325 {
326 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
327 return 0;
328 }
329
330 return 0;
331 }
332
333 late_initcall(make_proc_sysemu);
334
335 int singlestepping(void * t)
336 {
337 struct task_struct *task = t ? t : current;
338
339 if (!(task->ptrace & PT_DTRACE))
340 return 0;
341
342 if (task->thread.singlestep_syscall)
343 return 1;
344
345 return 2;
346 }
347
348 /*
349 * Only x86 and x86_64 have an arch_align_stack().
350 * All other arches have "#define arch_align_stack(x) (x)"
351 * in their asm/exec.h
352 * As this is included in UML from asm-um/system-generic.h,
353 * we can use it to behave as the subarch does.
354 */
355 #ifndef arch_align_stack
356 unsigned long arch_align_stack(unsigned long sp)
357 {
358 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
359 sp -= get_random_int() % 8192;
360 return sp & ~0xf;
361 }
362 #endif
363
364 unsigned long get_wchan(struct task_struct *p)
365 {
366 unsigned long stack_page, sp, ip;
367 bool seen_sched = 0;
368
369 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
370 return 0;
371
372 stack_page = (unsigned long) task_stack_page(p);
373 /* Bail if the process has no kernel stack for some reason */
374 if (stack_page == 0)
375 return 0;
376
377 sp = p->thread.switch_buf->JB_SP;
378 /*
379 * Bail if the stack pointer is below the bottom of the kernel
380 * stack for some reason
381 */
382 if (sp < stack_page)
383 return 0;
384
385 while (sp < stack_page + THREAD_SIZE) {
386 ip = *((unsigned long *) sp);
387 if (in_sched_functions(ip))
388 /* Ignore everything until we're above the scheduler */
389 seen_sched = 1;
390 else if (kernel_text_address(ip) && seen_sched)
391 return ip;
392
393 sp += sizeof(unsigned long);
394 }
395
396 return 0;
397 }
398
399 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
400 {
401 int cpu = current_thread_info()->cpu;
402
403 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
404 }
405