]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | |
3 | * Copyright 2003 PathScale, Inc. | |
4 | * Licensed under the GPL | |
5 | */ | |
6 | ||
7 | #include "linux/config.h" | |
8 | #include "linux/kernel.h" | |
9 | #include "linux/sched.h" | |
10 | #include "linux/interrupt.h" | |
11 | #include "linux/mm.h" | |
12 | #include "linux/slab.h" | |
13 | #include "linux/utsname.h" | |
14 | #include "linux/fs.h" | |
15 | #include "linux/utime.h" | |
16 | #include "linux/smp_lock.h" | |
17 | #include "linux/module.h" | |
18 | #include "linux/init.h" | |
19 | #include "linux/capability.h" | |
20 | #include "linux/vmalloc.h" | |
21 | #include "linux/spinlock.h" | |
22 | #include "linux/proc_fs.h" | |
23 | #include "linux/ptrace.h" | |
24 | #include "linux/random.h" | |
25 | #include "asm/unistd.h" | |
26 | #include "asm/mman.h" | |
27 | #include "asm/segment.h" | |
28 | #include "asm/stat.h" | |
29 | #include "asm/pgtable.h" | |
30 | #include "asm/processor.h" | |
31 | #include "asm/tlbflush.h" | |
32 | #include "asm/uaccess.h" | |
33 | #include "asm/user.h" | |
34 | #include "user_util.h" | |
35 | #include "kern_util.h" | |
36 | #include "kern.h" | |
37 | #include "signal_kern.h" | |
38 | #include "signal_user.h" | |
39 | #include "init.h" | |
40 | #include "irq_user.h" | |
41 | #include "mem_user.h" | |
42 | #include "time_user.h" | |
43 | #include "tlb.h" | |
44 | #include "frame_kern.h" | |
45 | #include "sigcontext.h" | |
46 | #include "2_5compat.h" | |
47 | #include "os.h" | |
48 | #include "mode.h" | |
49 | #include "mode_kern.h" | |
50 | #include "choose-mode.h" | |
51 | ||
52 | /* This is a per-cpu array. A processor only modifies its entry and it only | |
53 | * cares about its entry, so it's OK if another processor is modifying its | |
54 | * entry. | |
55 | */ | |
56 | struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; | |
57 | ||
58 | struct task_struct *get_task(int pid, int require) | |
59 | { | |
60 | struct task_struct *ret; | |
61 | ||
62 | read_lock(&tasklist_lock); | |
63 | ret = find_task_by_pid(pid); | |
64 | read_unlock(&tasklist_lock); | |
65 | ||
66 | if(require && (ret == NULL)) panic("get_task couldn't find a task\n"); | |
67 | return(ret); | |
68 | } | |
69 | ||
70 | int external_pid(void *t) | |
71 | { | |
72 | struct task_struct *task = t ? t : current; | |
73 | ||
74 | return(CHOOSE_MODE_PROC(external_pid_tt, external_pid_skas, task)); | |
75 | } | |
76 | ||
77 | int pid_to_processor_id(int pid) | |
78 | { | |
79 | int i; | |
80 | ||
81 | for(i = 0; i < ncpus; i++){ | |
82 | if(cpu_tasks[i].pid == pid) return(i); | |
83 | } | |
84 | return(-1); | |
85 | } | |
86 | ||
87 | void free_stack(unsigned long stack, int order) | |
88 | { | |
89 | free_pages(stack, order); | |
90 | } | |
91 | ||
92 | unsigned long alloc_stack(int order, int atomic) | |
93 | { | |
94 | unsigned long page; | |
95 | int flags = GFP_KERNEL; | |
96 | ||
97 | if(atomic) flags |= GFP_ATOMIC; | |
98 | page = __get_free_pages(flags, order); | |
99 | if(page == 0) | |
100 | return(0); | |
101 | stack_protections(page); | |
102 | return(page); | |
103 | } | |
104 | ||
105 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
106 | { | |
107 | int pid; | |
108 | ||
109 | current->thread.request.u.thread.proc = fn; | |
110 | current->thread.request.u.thread.arg = arg; | |
111 | pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, NULL, 0, NULL, | |
112 | NULL); | |
113 | if(pid < 0) | |
114 | panic("do_fork failed in kernel_thread, errno = %d", pid); | |
115 | return(pid); | |
116 | } | |
117 | ||
118 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
119 | struct task_struct *tsk) | |
120 | { | |
121 | int cpu = smp_processor_id(); | |
122 | ||
123 | if (prev != next) | |
124 | cpu_clear(cpu, prev->cpu_vm_mask); | |
125 | cpu_set(cpu, next->cpu_vm_mask); | |
126 | } | |
127 | ||
128 | void set_current(void *t) | |
129 | { | |
130 | struct task_struct *task = t; | |
131 | ||
132 | cpu_tasks[task->thread_info->cpu] = ((struct cpu_task) | |
133 | { external_pid(task), task }); | |
134 | } | |
135 | ||
136 | void *_switch_to(void *prev, void *next, void *last) | |
137 | { | |
138 | return(CHOOSE_MODE(switch_to_tt(prev, next), | |
139 | switch_to_skas(prev, next))); | |
140 | } | |
141 | ||
142 | void interrupt_end(void) | |
143 | { | |
144 | if(need_resched()) schedule(); | |
145 | if(test_tsk_thread_flag(current, TIF_SIGPENDING)) do_signal(); | |
146 | } | |
147 | ||
148 | void release_thread(struct task_struct *task) | |
149 | { | |
150 | CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task)); | |
151 | } | |
152 | ||
153 | void exit_thread(void) | |
154 | { | |
155 | CHOOSE_MODE(exit_thread_tt(), exit_thread_skas()); | |
156 | unprotect_stack((unsigned long) current_thread); | |
157 | } | |
158 | ||
159 | void *get_current(void) | |
160 | { | |
161 | return(current); | |
162 | } | |
163 | ||
164 | void prepare_to_copy(struct task_struct *tsk) | |
165 | { | |
166 | } | |
167 | ||
168 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |
169 | unsigned long stack_top, struct task_struct * p, | |
170 | struct pt_regs *regs) | |
171 | { | |
172 | p->thread = (struct thread_struct) INIT_THREAD; | |
173 | return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr, | |
174 | clone_flags, sp, stack_top, p, regs)); | |
175 | } | |
176 | ||
177 | void initial_thread_cb(void (*proc)(void *), void *arg) | |
178 | { | |
179 | int save_kmalloc_ok = kmalloc_ok; | |
180 | ||
181 | kmalloc_ok = 0; | |
182 | CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc, | |
183 | arg); | |
184 | kmalloc_ok = save_kmalloc_ok; | |
185 | } | |
186 | ||
187 | unsigned long stack_sp(unsigned long page) | |
188 | { | |
189 | return(page + PAGE_SIZE - sizeof(void *)); | |
190 | } | |
191 | ||
192 | int current_pid(void) | |
193 | { | |
194 | return(current->pid); | |
195 | } | |
196 | ||
197 | void default_idle(void) | |
198 | { | |
199 | uml_idle_timer(); | |
200 | ||
201 | atomic_inc(&init_mm.mm_count); | |
202 | current->mm = &init_mm; | |
203 | current->active_mm = &init_mm; | |
204 | ||
205 | while(1){ | |
206 | /* endless idle loop with no priority at all */ | |
207 | SET_PRI(current); | |
208 | ||
209 | /* | |
210 | * although we are an idle CPU, we do not want to | |
211 | * get into the scheduler unnecessarily. | |
212 | */ | |
213 | if(need_resched()) | |
214 | schedule(); | |
215 | ||
216 | idle_sleep(10); | |
217 | } | |
218 | } | |
219 | ||
220 | void cpu_idle(void) | |
221 | { | |
222 | CHOOSE_MODE(init_idle_tt(), init_idle_skas()); | |
223 | } | |
224 | ||
225 | int page_size(void) | |
226 | { | |
227 | return(PAGE_SIZE); | |
228 | } | |
229 | ||
230 | unsigned long page_mask(void) | |
231 | { | |
232 | return(PAGE_MASK); | |
233 | } | |
234 | ||
235 | void *um_virt_to_phys(struct task_struct *task, unsigned long addr, | |
236 | pte_t *pte_out) | |
237 | { | |
238 | pgd_t *pgd; | |
239 | pud_t *pud; | |
240 | pmd_t *pmd; | |
241 | pte_t *pte; | |
242 | ||
243 | if(task->mm == NULL) | |
244 | return(ERR_PTR(-EINVAL)); | |
245 | pgd = pgd_offset(task->mm, addr); | |
246 | if(!pgd_present(*pgd)) | |
247 | return(ERR_PTR(-EINVAL)); | |
248 | ||
249 | pud = pud_offset(pgd, addr); | |
250 | if(!pud_present(*pud)) | |
251 | return(ERR_PTR(-EINVAL)); | |
252 | ||
253 | pmd = pmd_offset(pud, addr); | |
254 | if(!pmd_present(*pmd)) | |
255 | return(ERR_PTR(-EINVAL)); | |
256 | ||
257 | pte = pte_offset_kernel(pmd, addr); | |
258 | if(!pte_present(*pte)) | |
259 | return(ERR_PTR(-EINVAL)); | |
260 | ||
261 | if(pte_out != NULL) | |
262 | *pte_out = *pte; | |
263 | return((void *) (pte_val(*pte) & PAGE_MASK) + (addr & ~PAGE_MASK)); | |
264 | } | |
265 | ||
266 | char *current_cmd(void) | |
267 | { | |
268 | #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM) | |
269 | return("(Unknown)"); | |
270 | #else | |
271 | void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL); | |
272 | return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr); | |
273 | #endif | |
274 | } | |
275 | ||
276 | void force_sigbus(void) | |
277 | { | |
278 | printk(KERN_ERR "Killing pid %d because of a lack of memory\n", | |
279 | current->pid); | |
280 | lock_kernel(); | |
281 | sigaddset(¤t->pending.signal, SIGBUS); | |
282 | recalc_sigpending(); | |
283 | current->flags |= PF_SIGNALED; | |
284 | do_exit(SIGBUS | 0x80); | |
285 | } | |
286 | ||
287 | void dump_thread(struct pt_regs *regs, struct user *u) | |
288 | { | |
289 | } | |
290 | ||
291 | void enable_hlt(void) | |
292 | { | |
293 | panic("enable_hlt"); | |
294 | } | |
295 | ||
296 | EXPORT_SYMBOL(enable_hlt); | |
297 | ||
298 | void disable_hlt(void) | |
299 | { | |
300 | panic("disable_hlt"); | |
301 | } | |
302 | ||
303 | EXPORT_SYMBOL(disable_hlt); | |
304 | ||
305 | void *um_kmalloc(int size) | |
306 | { | |
307 | return(kmalloc(size, GFP_KERNEL)); | |
308 | } | |
309 | ||
310 | void *um_kmalloc_atomic(int size) | |
311 | { | |
312 | return(kmalloc(size, GFP_ATOMIC)); | |
313 | } | |
314 | ||
315 | void *um_vmalloc(int size) | |
316 | { | |
317 | return(vmalloc(size)); | |
318 | } | |
319 | ||
320 | unsigned long get_fault_addr(void) | |
321 | { | |
322 | return((unsigned long) current->thread.fault_addr); | |
323 | } | |
324 | ||
325 | EXPORT_SYMBOL(get_fault_addr); | |
326 | ||
327 | void not_implemented(void) | |
328 | { | |
329 | printk(KERN_DEBUG "Something isn't implemented in here\n"); | |
330 | } | |
331 | ||
332 | EXPORT_SYMBOL(not_implemented); | |
333 | ||
334 | int user_context(unsigned long sp) | |
335 | { | |
336 | unsigned long stack; | |
337 | ||
338 | stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); | |
339 | return(stack != (unsigned long) current_thread); | |
340 | } | |
341 | ||
342 | extern void remove_umid_dir(void); | |
343 | ||
344 | __uml_exitcall(remove_umid_dir); | |
345 | ||
346 | extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; | |
347 | ||
348 | void do_uml_exitcalls(void) | |
349 | { | |
350 | exitcall_t *call; | |
351 | ||
352 | call = &__uml_exitcall_end; | |
353 | while (--call >= &__uml_exitcall_begin) | |
354 | (*call)(); | |
355 | } | |
356 | ||
357 | char *uml_strdup(char *string) | |
358 | { | |
359 | char *new; | |
360 | ||
361 | new = kmalloc(strlen(string) + 1, GFP_KERNEL); | |
362 | if(new == NULL) return(NULL); | |
363 | strcpy(new, string); | |
364 | return(new); | |
365 | } | |
366 | ||
367 | void *get_init_task(void) | |
368 | { | |
369 | return(&init_thread_union.thread_info.task); | |
370 | } | |
371 | ||
372 | int copy_to_user_proc(void __user *to, void *from, int size) | |
373 | { | |
374 | return(copy_to_user(to, from, size)); | |
375 | } | |
376 | ||
377 | int copy_from_user_proc(void *to, void __user *from, int size) | |
378 | { | |
379 | return(copy_from_user(to, from, size)); | |
380 | } | |
381 | ||
382 | int clear_user_proc(void __user *buf, int size) | |
383 | { | |
384 | return(clear_user(buf, size)); | |
385 | } | |
386 | ||
387 | int strlen_user_proc(char __user *str) | |
388 | { | |
389 | return(strlen_user(str)); | |
390 | } | |
391 | ||
392 | int smp_sigio_handler(void) | |
393 | { | |
394 | #ifdef CONFIG_SMP | |
395 | int cpu = current_thread->cpu; | |
396 | IPI_handler(cpu); | |
397 | if(cpu != 0) | |
398 | return(1); | |
399 | #endif | |
400 | return(0); | |
401 | } | |
402 | ||
403 | int um_in_interrupt(void) | |
404 | { | |
405 | return(in_interrupt()); | |
406 | } | |
407 | ||
408 | int cpu(void) | |
409 | { | |
410 | return(current_thread->cpu); | |
411 | } | |
412 | ||
413 | static atomic_t using_sysemu = ATOMIC_INIT(0); | |
414 | int sysemu_supported; | |
415 | ||
416 | void set_using_sysemu(int value) | |
417 | { | |
418 | if (value > sysemu_supported) | |
419 | return; | |
420 | atomic_set(&using_sysemu, value); | |
421 | } | |
422 | ||
423 | int get_using_sysemu(void) | |
424 | { | |
425 | return atomic_read(&using_sysemu); | |
426 | } | |
427 | ||
428 | static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) | |
429 | { | |
430 | if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/ | |
431 | *eof = 1; | |
432 | ||
433 | return strlen(buf); | |
434 | } | |
435 | ||
436 | static int proc_write_sysemu(struct file *file,const char *buf, unsigned long count,void *data) | |
437 | { | |
438 | char tmp[2]; | |
439 | ||
440 | if (copy_from_user(tmp, buf, 1)) | |
441 | return -EFAULT; | |
442 | ||
443 | if (tmp[0] >= '0' && tmp[0] <= '2') | |
444 | set_using_sysemu(tmp[0] - '0'); | |
445 | return count; /*We use the first char, but pretend to write everything*/ | |
446 | } | |
447 | ||
448 | int __init make_proc_sysemu(void) | |
449 | { | |
450 | struct proc_dir_entry *ent; | |
451 | if (!sysemu_supported) | |
452 | return 0; | |
453 | ||
454 | ent = create_proc_entry("sysemu", 0600, &proc_root); | |
455 | ||
456 | if (ent == NULL) | |
457 | { | |
458 | printk("Failed to register /proc/sysemu\n"); | |
459 | return(0); | |
460 | } | |
461 | ||
462 | ent->read_proc = proc_read_sysemu; | |
463 | ent->write_proc = proc_write_sysemu; | |
464 | ||
465 | return 0; | |
466 | } | |
467 | ||
468 | late_initcall(make_proc_sysemu); | |
469 | ||
470 | int singlestepping(void * t) | |
471 | { | |
472 | struct task_struct *task = t ? t : current; | |
473 | ||
474 | if ( ! (task->ptrace & PT_DTRACE) ) | |
475 | return(0); | |
476 | ||
477 | if (task->thread.singlestep_syscall) | |
478 | return(1); | |
479 | ||
480 | return 2; | |
481 | } | |
482 | ||
483 | unsigned long arch_align_stack(unsigned long sp) | |
484 | { | |
485 | if (randomize_va_space) | |
486 | sp -= get_random_int() % 8192; | |
487 | return sp & ~0xf; | |
488 | } | |
489 | ||
490 | ||
491 | /* | |
492 | * Overrides for Emacs so that we follow Linus's tabbing style. | |
493 | * Emacs will notice this stuff at the end of the file and automatically | |
494 | * adjust the settings for this buffer only. This must remain at the end | |
495 | * of the file. | |
496 | * --------------------------------------------------------------------------- | |
497 | * Local variables: | |
498 | * c-file-style: "linux" | |
499 | * End: | |
500 | */ |