]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/um/kernel/skas/process.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[mirror_ubuntu-bionic-kernel.git] / arch / um / kernel / skas / process.c
1 /*
2 * Copyright (C) 2002- 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6 #include <stdlib.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <errno.h>
10 #include <signal.h>
11 #include <setjmp.h>
12 #include <sched.h>
13 #include <sys/wait.h>
14 #include <sys/mman.h>
15 #include <sys/user.h>
16 #include <sys/time.h>
17 #include <asm/unistd.h>
18 #include <asm/types.h>
19 #include "user.h"
20 #include "ptrace_user.h"
21 #include "time_user.h"
22 #include "sysdep/ptrace.h"
23 #include "user_util.h"
24 #include "kern_util.h"
25 #include "skas.h"
26 #include "stub-data.h"
27 #include "mm_id.h"
28 #include "sysdep/sigcontext.h"
29 #include "sysdep/stub.h"
30 #include "os.h"
31 #include "proc_mm.h"
32 #include "skas_ptrace.h"
33 #include "chan_user.h"
34 #include "signal_user.h"
35 #include "registers.h"
36 #include "mem.h"
37 #include "uml-config.h"
38 #include "process.h"
39
40 int is_skas_winch(int pid, int fd, void *data)
41 {
42 if(pid != os_getpgrp())
43 return(0);
44
45 register_winch_irq(-1, fd, -1, data);
46 return(1);
47 }
48
49 void wait_stub_done(int pid, int sig, char * fname)
50 {
51 int n, status, err;
52
53 do {
54 if ( sig != -1 ) {
55 err = ptrace(PTRACE_CONT, pid, 0, sig);
56 if(err)
57 panic("%s : continue failed, errno = %d\n",
58 fname, errno);
59 }
60 sig = 0;
61
62 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
63 } while((n >= 0) && WIFSTOPPED(status) &&
64 ((WSTOPSIG(status) == SIGVTALRM) ||
65 /* running UML inside a detached screen can cause
66 * SIGWINCHes
67 */
68 (WSTOPSIG(status) == SIGWINCH)));
69
70 if((n < 0) || !WIFSTOPPED(status) ||
71 (WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
72 panic("%s : failed to wait for SIGUSR1/SIGTRAP, "
73 "pid = %d, n = %d, errno = %d, status = 0x%x\n",
74 fname, pid, n, errno, status);
75 }
76 }
77
78 void get_skas_faultinfo(int pid, struct faultinfo * fi)
79 {
80 int err;
81
82 if(ptrace_faultinfo){
83 err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
84 if(err)
85 panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, "
86 "errno = %d\n", errno);
87
88 /* Special handling for i386, which has different structs */
89 if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
90 memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
91 sizeof(struct faultinfo) -
92 sizeof(struct ptrace_faultinfo));
93 }
94 else {
95 wait_stub_done(pid, SIGSEGV, "get_skas_faultinfo");
96
97 /* faultinfo is prepared by the stub-segv-handler at start of
98 * the stub stack page. We just have to copy it.
99 */
100 memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
101 }
102 }
103
104 static void handle_segv(int pid, union uml_pt_regs * regs)
105 {
106 get_skas_faultinfo(pid, &regs->skas.faultinfo);
107 segv(regs->skas.faultinfo, 0, 1, NULL);
108 }
109
110 /*To use the same value of using_sysemu as the caller, ask it that value (in local_using_sysemu)*/
111 static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu)
112 {
113 int err, status;
114
115 /* Mark this as a syscall */
116 UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->skas.regs);
117
118 if (!local_using_sysemu)
119 {
120 err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, __NR_getpid);
121 if(err < 0)
122 panic("handle_trap - nullifying syscall failed errno = %d\n",
123 errno);
124
125 err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
126 if(err < 0)
127 panic("handle_trap - continuing to end of syscall failed, "
128 "errno = %d\n", errno);
129
130 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
131 if((err < 0) || !WIFSTOPPED(status) ||
132 (WSTOPSIG(status) != SIGTRAP + 0x80))
133 panic("handle_trap - failed to wait at end of syscall, "
134 "errno = %d, status = %d\n", errno, status);
135 }
136
137 handle_syscall(regs);
138 }
139
140 extern int __syscall_stub_start;
141 int stub_code_fd = -1;
142 __u64 stub_code_offset;
143
144 static int userspace_tramp(void *stack)
145 {
146 void *addr;
147
148 ptrace(PTRACE_TRACEME, 0, 0, 0);
149
150 init_new_thread_signals(1);
151 enable_timer();
152
153 if(!proc_mm){
154 /* This has a pte, but it can't be mapped in with the usual
155 * tlb_flush mechanism because this is part of that mechanism
156 */
157 addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(),
158 PROT_EXEC, MAP_FIXED | MAP_PRIVATE,
159 stub_code_fd, stub_code_offset);
160 if(addr == MAP_FAILED){
161 printk("mapping stub code failed, errno = %d\n",
162 errno);
163 exit(1);
164 }
165
166 if(stack != NULL){
167 int fd;
168 __u64 offset;
169
170 fd = phys_mapping(to_phys(stack), &offset);
171 addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(),
172 PROT_READ | PROT_WRITE,
173 MAP_FIXED | MAP_SHARED, fd, offset);
174 if(addr == MAP_FAILED){
175 printk("mapping stub stack failed, "
176 "errno = %d\n", errno);
177 exit(1);
178 }
179 }
180 }
181 if(!ptrace_faultinfo){
182 unsigned long v = UML_CONFIG_STUB_CODE +
183 (unsigned long) stub_segv_handler -
184 (unsigned long) &__syscall_stub_start;
185
186 set_sigstack((void *) UML_CONFIG_STUB_DATA, page_size());
187 set_handler(SIGSEGV, (void *) v, SA_ONSTACK,
188 SIGIO, SIGWINCH, SIGALRM, SIGVTALRM,
189 SIGUSR1, -1);
190 }
191
192 os_stop_process(os_getpid());
193 return(0);
194 }
195
196 /* Each element set once, and only accessed by a single processor anyway */
197 #undef NR_CPUS
198 #define NR_CPUS 1
199 int userspace_pid[NR_CPUS];
200
201 int start_userspace(unsigned long stub_stack)
202 {
203 void *stack;
204 unsigned long sp;
205 int pid, status, n, flags;
206
207 if ( stub_code_fd == -1 )
208 stub_code_fd = phys_mapping(to_phys(&__syscall_stub_start),
209 &stub_code_offset);
210
211 stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
212 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
213 if(stack == MAP_FAILED)
214 panic("start_userspace : mmap failed, errno = %d", errno);
215 sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
216
217 flags = CLONE_FILES | SIGCHLD;
218 if(proc_mm) flags |= CLONE_VM;
219 pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
220 if(pid < 0)
221 panic("start_userspace : clone failed, errno = %d", errno);
222
223 do {
224 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
225 if(n < 0)
226 panic("start_userspace : wait failed, errno = %d",
227 errno);
228 } while(WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
229
230 if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP))
231 panic("start_userspace : expected SIGSTOP, got status = %d",
232 status);
233
234 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, (void *)PTRACE_O_TRACESYSGOOD) < 0)
235 panic("start_userspace : PTRACE_SETOPTIONS failed, errno=%d\n",
236 errno);
237
238 if(munmap(stack, PAGE_SIZE) < 0)
239 panic("start_userspace : munmap failed, errno = %d\n", errno);
240
241 return(pid);
242 }
243
244 void userspace(union uml_pt_regs *regs)
245 {
246 int err, status, op, pid = userspace_pid[0];
247 int local_using_sysemu; /*To prevent races if using_sysemu changes under us.*/
248
249 while(1){
250 restore_registers(pid, regs);
251
252 /* Now we set local_using_sysemu to be used for one loop */
253 local_using_sysemu = get_using_sysemu();
254
255 op = SELECT_PTRACE_OPERATION(local_using_sysemu, singlestepping(NULL));
256
257 err = ptrace(op, pid, 0, 0);
258 if(err)
259 panic("userspace - could not resume userspace process, "
260 "pid=%d, ptrace operation = %d, errno = %d\n",
261 op, errno);
262
263 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
264 if(err < 0)
265 panic("userspace - waitpid failed, errno = %d\n",
266 errno);
267
268 regs->skas.is_user = 1;
269 save_registers(pid, regs);
270 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
271
272 if(WIFSTOPPED(status)){
273 switch(WSTOPSIG(status)){
274 case SIGSEGV:
275 if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo)
276 user_signal(SIGSEGV, regs, pid);
277 else handle_segv(pid, regs);
278 break;
279 case SIGTRAP + 0x80:
280 handle_trap(pid, regs, local_using_sysemu);
281 break;
282 case SIGTRAP:
283 relay_signal(SIGTRAP, regs);
284 break;
285 case SIGIO:
286 case SIGVTALRM:
287 case SIGILL:
288 case SIGBUS:
289 case SIGFPE:
290 case SIGWINCH:
291 user_signal(WSTOPSIG(status), regs, pid);
292 break;
293 default:
294 printk("userspace - child stopped with signal "
295 "%d\n", WSTOPSIG(status));
296 }
297 pid = userspace_pid[0];
298 interrupt_end();
299
300 /* Avoid -ERESTARTSYS handling in host */
301 PT_SYSCALL_NR(regs->skas.regs) = -1;
302 }
303 }
304 }
305 #define INIT_JMP_NEW_THREAD 0
306 #define INIT_JMP_REMOVE_SIGSTACK 1
307 #define INIT_JMP_CALLBACK 2
308 #define INIT_JMP_HALT 3
309 #define INIT_JMP_REBOOT 4
310
311
312 int copy_context_skas0(unsigned long new_stack, int pid)
313 {
314 int err;
315 unsigned long regs[MAX_REG_NR];
316 unsigned long current_stack = current_stub_stack();
317 struct stub_data *data = (struct stub_data *) current_stack;
318 struct stub_data *child_data = (struct stub_data *) new_stack;
319 __u64 new_offset;
320 int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
321
322 /* prepare offset and fd of child's stack as argument for parent's
323 * and child's mmap2 calls
324 */
325 *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset),
326 .fd = new_fd,
327 .timer = ((struct itimerval)
328 { { 0, 1000000 / hz() },
329 { 0, 1000000 / hz() }})});
330 get_safe_registers(regs);
331
332 /* Set parent's instruction pointer to start of clone-stub */
333 regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
334 (unsigned long) stub_clone_handler -
335 (unsigned long) &__syscall_stub_start;
336 regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + PAGE_SIZE -
337 sizeof(void *);
338 err = ptrace_setregs(pid, regs);
339 if(err < 0)
340 panic("copy_context_skas0 : PTRACE_SETREGS failed, "
341 "pid = %d, errno = %d\n", pid, errno);
342
343 /* set a well known return code for detection of child write failure */
344 child_data->err = 12345678;
345
346 /* Wait, until parent has finished its work: read child's pid from
347 * parent's stack, and check, if bad result.
348 */
349 wait_stub_done(pid, 0, "copy_context_skas0");
350
351 pid = data->err;
352 if(pid < 0)
353 panic("copy_context_skas0 - stub-parent reports error %d\n",
354 pid);
355
356 /* Wait, until child has finished too: read child's result from
357 * child's stack and check it.
358 */
359 wait_stub_done(pid, -1, "copy_context_skas0");
360 if (child_data->err != UML_CONFIG_STUB_DATA)
361 panic("copy_context_skas0 - stub-child reports error %d\n",
362 child_data->err);
363
364 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
365 (void *)PTRACE_O_TRACESYSGOOD) < 0)
366 panic("copy_context_skas0 : PTRACE_SETOPTIONS failed, "
367 "errno = %d\n", errno);
368
369 return pid;
370 }
371
372 /*
373 * This is used only, if proc_mm is available, while PTRACE_FAULTINFO
374 * isn't. Opening /proc/mm creates a new mm_context, which lacks the stub-pages
375 * Thus, we map them using /proc/mm-fd
376 */
377 void map_stub_pages(int fd, unsigned long code,
378 unsigned long data, unsigned long stack)
379 {
380 struct proc_mm_op mmop;
381 int n;
382
383 mmop = ((struct proc_mm_op) { .op = MM_MMAP,
384 .u =
385 { .mmap =
386 { .addr = code,
387 .len = PAGE_SIZE,
388 .prot = PROT_EXEC,
389 .flags = MAP_FIXED | MAP_PRIVATE,
390 .fd = stub_code_fd,
391 .offset = stub_code_offset
392 } } });
393 n = os_write_file(fd, &mmop, sizeof(mmop));
394 if(n != sizeof(mmop))
395 panic("map_stub_pages : /proc/mm map for code failed, "
396 "err = %d\n", -n);
397
398 if ( stack ) {
399 __u64 map_offset;
400 int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
401 mmop = ((struct proc_mm_op)
402 { .op = MM_MMAP,
403 .u =
404 { .mmap =
405 { .addr = data,
406 .len = PAGE_SIZE,
407 .prot = PROT_READ | PROT_WRITE,
408 .flags = MAP_FIXED | MAP_SHARED,
409 .fd = map_fd,
410 .offset = map_offset
411 } } });
412 n = os_write_file(fd, &mmop, sizeof(mmop));
413 if(n != sizeof(mmop))
414 panic("map_stub_pages : /proc/mm map for data failed, "
415 "err = %d\n", -n);
416 }
417 }
418
419 void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
420 void (*handler)(int))
421 {
422 unsigned long flags;
423 sigjmp_buf switch_buf, fork_buf;
424
425 *switch_buf_ptr = &switch_buf;
426 *fork_buf_ptr = &fork_buf;
427
428 /* Somewhat subtle - siglongjmp restores the signal mask before doing
429 * the longjmp. This means that when jumping from one stack to another
430 * when the target stack has interrupts enabled, an interrupt may occur
431 * on the source stack. This is bad when starting up a process because
432 * it's not supposed to get timer ticks until it has been scheduled.
433 * So, we disable interrupts around the sigsetjmp to ensure that
434 * they can't happen until we get back here where they are safe.
435 */
436 flags = get_signals();
437 block_signals();
438 if(sigsetjmp(fork_buf, 1) == 0)
439 new_thread_proc(stack, handler);
440
441 remove_sigstack();
442
443 set_signals(flags);
444 }
445
446 void thread_wait(void *sw, void *fb)
447 {
448 sigjmp_buf buf, **switch_buf = sw, *fork_buf;
449
450 *switch_buf = &buf;
451 fork_buf = fb;
452 if(sigsetjmp(buf, 1) == 0)
453 siglongjmp(*fork_buf, INIT_JMP_REMOVE_SIGSTACK);
454 }
455
456 void switch_threads(void *me, void *next)
457 {
458 sigjmp_buf my_buf, **me_ptr = me, *next_buf = next;
459
460 *me_ptr = &my_buf;
461 if(sigsetjmp(my_buf, 1) == 0)
462 siglongjmp(*next_buf, 1);
463 }
464
465 static sigjmp_buf initial_jmpbuf;
466
467 /* XXX Make these percpu */
468 static void (*cb_proc)(void *arg);
469 static void *cb_arg;
470 static sigjmp_buf *cb_back;
471
472 int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr)
473 {
474 sigjmp_buf **switch_buf = switch_buf_ptr;
475 int n;
476
477 set_handler(SIGWINCH, (__sighandler_t) sig_handler,
478 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM,
479 SIGVTALRM, -1);
480
481 *fork_buf_ptr = &initial_jmpbuf;
482 n = sigsetjmp(initial_jmpbuf, 1);
483 switch(n){
484 case INIT_JMP_NEW_THREAD:
485 new_thread_proc((void *) stack, new_thread_handler);
486 break;
487 case INIT_JMP_REMOVE_SIGSTACK:
488 remove_sigstack();
489 break;
490 case INIT_JMP_CALLBACK:
491 (*cb_proc)(cb_arg);
492 siglongjmp(*cb_back, 1);
493 break;
494 case INIT_JMP_HALT:
495 kmalloc_ok = 0;
496 return(0);
497 case INIT_JMP_REBOOT:
498 kmalloc_ok = 0;
499 return(1);
500 default:
501 panic("Bad sigsetjmp return in start_idle_thread - %d\n", n);
502 }
503 siglongjmp(**switch_buf, 1);
504 }
505
506 void remove_sigstack(void)
507 {
508 stack_t stack = ((stack_t) { .ss_flags = SS_DISABLE,
509 .ss_sp = NULL,
510 .ss_size = 0 });
511
512 if(sigaltstack(&stack, NULL) != 0)
513 panic("disabling signal stack failed, errno = %d\n", errno);
514 }
515
516 void initial_thread_cb_skas(void (*proc)(void *), void *arg)
517 {
518 sigjmp_buf here;
519
520 cb_proc = proc;
521 cb_arg = arg;
522 cb_back = &here;
523
524 block_signals();
525 if(sigsetjmp(here, 1) == 0)
526 siglongjmp(initial_jmpbuf, INIT_JMP_CALLBACK);
527 unblock_signals();
528
529 cb_proc = NULL;
530 cb_arg = NULL;
531 cb_back = NULL;
532 }
533
534 void halt_skas(void)
535 {
536 block_signals();
537 siglongjmp(initial_jmpbuf, INIT_JMP_HALT);
538 }
539
540 void reboot_skas(void)
541 {
542 block_signals();
543 siglongjmp(initial_jmpbuf, INIT_JMP_REBOOT);
544 }
545
546 void switch_mm_skas(struct mm_id *mm_idp)
547 {
548 int err;
549
550 #warning need cpu pid in switch_mm_skas
551 if(proc_mm){
552 err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
553 mm_idp->u.mm_fd);
554 if(err)
555 panic("switch_mm_skas - PTRACE_SWITCH_MM failed, "
556 "errno = %d\n", errno);
557 }
558 else userspace_pid[0] = mm_idp->u.pid;
559 }
560
561 /*
562 * Overrides for Emacs so that we follow Linus's tabbing style.
563 * Emacs will notice this stuff at the end of the file and automatically
564 * adjust the settings for this buffer only. This must remain at the end
565 * of the file.
566 * ---------------------------------------------------------------------------
567 * Local variables:
568 * c-file-style: "linux"
569 * End:
570 */