]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
um: merge arch/um/sys-{i386,x86_64}
authorAl Viro <viro@ftp.linux.org.uk>
Thu, 18 Aug 2011 19:03:19 +0000 (20:03 +0100)
committerRichard Weinberger <richard@nod.at>
Wed, 2 Nov 2011 13:14:51 +0000 (14:14 +0100)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Richard Weinberger <richard@nod.at>
91 files changed:
arch/um/Makefile
arch/um/Makefile-i386
arch/um/Makefile-x86_64
arch/um/sys-i386/Makefile [deleted file]
arch/um/sys-i386/bug.c [deleted file]
arch/um/sys-i386/bugs.c [deleted file]
arch/um/sys-i386/checksum.S [deleted file]
arch/um/sys-i386/delay.c [deleted file]
arch/um/sys-i386/elfcore.c [deleted file]
arch/um/sys-i386/fault.c [deleted file]
arch/um/sys-i386/ksyms.c [deleted file]
arch/um/sys-i386/ldt.c [deleted file]
arch/um/sys-i386/mem.c [deleted file]
arch/um/sys-i386/ptrace.c [deleted file]
arch/um/sys-i386/ptrace_user.c [deleted file]
arch/um/sys-i386/setjmp.S [deleted file]
arch/um/sys-i386/signal.c [deleted file]
arch/um/sys-i386/stub.S [deleted file]
arch/um/sys-i386/stub_segv.c [deleted file]
arch/um/sys-i386/sys_call_table.S [deleted file]
arch/um/sys-i386/syscalls.c [deleted file]
arch/um/sys-i386/sysrq.c [deleted file]
arch/um/sys-i386/tls.c [deleted file]
arch/um/sys-i386/user-offsets.c [deleted file]
arch/um/sys-x86/Makefile [new file with mode: 0644]
arch/um/sys-x86/bug.c [new file with mode: 0644]
arch/um/sys-x86/bugs_32.c [new file with mode: 0644]
arch/um/sys-x86/bugs_64.c [new file with mode: 0644]
arch/um/sys-x86/checksum_32.S [new file with mode: 0644]
arch/um/sys-x86/delay_32.c [new file with mode: 0644]
arch/um/sys-x86/delay_64.c [new file with mode: 0644]
arch/um/sys-x86/elfcore.c [new file with mode: 0644]
arch/um/sys-x86/fault.c [new file with mode: 0644]
arch/um/sys-x86/ksyms.c [new file with mode: 0644]
arch/um/sys-x86/ldt.c [new file with mode: 0644]
arch/um/sys-x86/mem_32.c [new file with mode: 0644]
arch/um/sys-x86/mem_64.c [new file with mode: 0644]
arch/um/sys-x86/ptrace_32.c [new file with mode: 0644]
arch/um/sys-x86/ptrace_64.c [new file with mode: 0644]
arch/um/sys-x86/ptrace_user.c [new file with mode: 0644]
arch/um/sys-x86/setjmp_32.S [new file with mode: 0644]
arch/um/sys-x86/setjmp_64.S [new file with mode: 0644]
arch/um/sys-x86/signal_32.c [new file with mode: 0644]
arch/um/sys-x86/signal_64.c [new file with mode: 0644]
arch/um/sys-x86/stub_32.S [new file with mode: 0644]
arch/um/sys-x86/stub_64.S [new file with mode: 0644]
arch/um/sys-x86/stub_segv_32.c [new file with mode: 0644]
arch/um/sys-x86/stub_segv_64.c [new file with mode: 0644]
arch/um/sys-x86/sys_call_table_32.S [new file with mode: 0644]
arch/um/sys-x86/sys_call_table_64.c [new file with mode: 0644]
arch/um/sys-x86/syscalls_32.c [new file with mode: 0644]
arch/um/sys-x86/syscalls_64.c [new file with mode: 0644]
arch/um/sys-x86/sysrq_32.c [new file with mode: 0644]
arch/um/sys-x86/sysrq_64.c [new file with mode: 0644]
arch/um/sys-x86/tls_32.c [new file with mode: 0644]
arch/um/sys-x86/tls_64.c [new file with mode: 0644]
arch/um/sys-x86/user-offsets.c [new file with mode: 0644]
arch/um/sys-x86/vdso/Makefile [new file with mode: 0644]
arch/um/sys-x86/vdso/checkundef.sh [new file with mode: 0644]
arch/um/sys-x86/vdso/um_vdso.c [new file with mode: 0644]
arch/um/sys-x86/vdso/vdso-layout.lds.S [new file with mode: 0644]
arch/um/sys-x86/vdso/vdso-note.S [new file with mode: 0644]
arch/um/sys-x86/vdso/vdso.S [new file with mode: 0644]
arch/um/sys-x86/vdso/vdso.lds.S [new file with mode: 0644]
arch/um/sys-x86/vdso/vma.c [new file with mode: 0644]
arch/um/sys-x86_64/Makefile [deleted file]
arch/um/sys-x86_64/bug.c [deleted file]
arch/um/sys-x86_64/bugs.c [deleted file]
arch/um/sys-x86_64/delay.c [deleted file]
arch/um/sys-x86_64/fault.c [deleted file]
arch/um/sys-x86_64/ksyms.c [deleted file]
arch/um/sys-x86_64/mem.c [deleted file]
arch/um/sys-x86_64/ptrace.c [deleted file]
arch/um/sys-x86_64/ptrace_user.c [deleted file]
arch/um/sys-x86_64/setjmp.S [deleted file]
arch/um/sys-x86_64/signal.c [deleted file]
arch/um/sys-x86_64/stub.S [deleted file]
arch/um/sys-x86_64/stub_segv.c [deleted file]
arch/um/sys-x86_64/syscall_table.c [deleted file]
arch/um/sys-x86_64/syscalls.c [deleted file]
arch/um/sys-x86_64/sysrq.c [deleted file]
arch/um/sys-x86_64/tls.c [deleted file]
arch/um/sys-x86_64/user-offsets.c [deleted file]
arch/um/sys-x86_64/vdso/Makefile [deleted file]
arch/um/sys-x86_64/vdso/checkundef.sh [deleted file]
arch/um/sys-x86_64/vdso/um_vdso.c [deleted file]
arch/um/sys-x86_64/vdso/vdso-layout.lds.S [deleted file]
arch/um/sys-x86_64/vdso/vdso-note.S [deleted file]
arch/um/sys-x86_64/vdso/vdso.S [deleted file]
arch/um/sys-x86_64/vdso/vdso.lds.S [deleted file]
arch/um/sys-x86_64/vdso/vma.c [deleted file]

index 184494d890b6c88269146f71f65ed3cf74672417..bd0587e5c466ce0c9ab34fef3572dbf391a80029 100644 (file)
@@ -121,8 +121,8 @@ archclean:
 
 # Generated files
 
-$(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.s: FORCE
-       $(Q)$(MAKE) $(build)=$(ARCH_DIR)/sys-$(SUBARCH) $@
+$(ARCH_DIR)/sys-$(HEADER_ARCH)/user-offsets.s: FORCE
+       $(Q)$(MAKE) $(build)=$(ARCH_DIR)/sys-$(HEADER_ARCH) $@
 
 define filechk_gen-asm-offsets
         (set -e; \
@@ -137,7 +137,7 @@ define filechk_gen-asm-offsets
          echo ""; )
 endef
 
-include/generated/user_constants.h: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.s
+include/generated/user_constants.h: $(ARCH_DIR)/sys-$(HEADER_ARCH)/user-offsets.s
        $(call filechk,gen-asm-offsets)
 
 export SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS HEADER_ARCH DEV_NULL_PATH
index 302cbe504543c04ea709f030b74425fd6cb859e2..7e5f1baf7c9306cb69f7a9b911d74fcc0a02ceb7 100644 (file)
@@ -1,4 +1,4 @@
-core-y += arch/um/sys-i386/ arch/x86/crypto/
+core-y += arch/um/sys-x86/ arch/x86/crypto/
 
 TOP_ADDR := $(CONFIG_TOP_ADDR)
 
index a9cd7e77a7abcf01f1bdef701d2beda93d0aea93..92d8f8fb6b64072d968ba233b5ca6feca2e8fbf3 100644 (file)
@@ -1,7 +1,7 @@
 # Copyright 2003 - 2004 Pathscale, Inc
 # Released under the GPL
 
-core-y += arch/um/sys-x86_64/ arch/x86/crypto/
+core-y += arch/um/sys-x86/ arch/x86/crypto/
 START := 0x60000000
 
 _extra_flags_ = -fno-builtin -m64
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
deleted file mode 100644 (file)
index 231bb98..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
-#
-
-obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
-       ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
-       sys_call_table.o tls.o mem.o
-
-obj-$(CONFIG_BINFMT_ELF) += elfcore.o
-
-subarch-obj-y = lib/string_32.o lib/atomic64_32.o lib/atomic64_cx8_32.o
-subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o
-subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
-subarch-obj-$(CONFIG_MODULES) += kernel/module.o
-
-USER_OBJS := bugs.o ptrace_user.o fault.o
-
-extra-y += user-offsets.s
-$(obj)/user-offsets.s: c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS)
-
-UNPROFILE_OBJS := stub_segv.o
-CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
-
-include arch/um/scripts/Makefile.rules
diff --git a/arch/um/sys-i386/bug.c b/arch/um/sys-i386/bug.c
deleted file mode 100644 (file)
index 8d4f273..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL V2
- */
-
-#include <linux/uaccess.h>
-#include <asm/errno.h>
-
-/* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
- * that's not relevant in skas mode.
- */
-
-int is_valid_bugaddr(unsigned long eip)
-{
-       unsigned short ud2;
-
-       if (probe_kernel_address((unsigned short __user *)eip, ud2))
-               return 0;
-
-       return ud2 == 0x0b0f;
-}
diff --git a/arch/um/sys-i386/bugs.c b/arch/um/sys-i386/bugs.c
deleted file mode 100644 (file)
index 7058e1f..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <signal.h>
-#include "kern_util.h"
-#include "longjmp.h"
-#include "task.h"
-#include "sysdep/ptrace.h"
-
-/* Set during early boot */
-static int host_has_cmov = 1;
-static jmp_buf cmov_test_return;
-
-static void cmov_sigill_test_handler(int sig)
-{
-       host_has_cmov = 0;
-       longjmp(cmov_test_return, 1);
-}
-
-void arch_check_bugs(void)
-{
-       struct sigaction old, new;
-
-       printk(UM_KERN_INFO "Checking for host processor cmov support...");
-       new.sa_handler = cmov_sigill_test_handler;
-
-       /* Make sure that SIGILL is enabled after the handler longjmps back */
-       new.sa_flags = SA_NODEFER;
-       sigemptyset(&new.sa_mask);
-       sigaction(SIGILL, &new, &old);
-
-       if (setjmp(cmov_test_return) == 0) {
-               unsigned long foo = 0;
-               __asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo));
-               printk(UM_KERN_CONT "Yes\n");
-       } else
-               printk(UM_KERN_CONT "No\n");
-
-       sigaction(SIGILL, &old, &new);
-}
-
-void arch_examine_signal(int sig, struct uml_pt_regs *regs)
-{
-       unsigned char tmp[2];
-
-       /*
-        * This is testing for a cmov (0x0f 0x4x) instruction causing a
-        * SIGILL in init.
-        */
-       if ((sig != SIGILL) || (TASK_PID(get_current()) != 1))
-               return;
-
-       if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) {
-               printk(UM_KERN_ERR "SIGILL in init, could not read "
-                      "instructions!\n");
-               return;
-       }
-
-       if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40))
-               return;
-
-       if (host_has_cmov == 0)
-               printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
-                      "processor doesn't implement.  Boot a filesystem "
-                      "compiled for older processors");
-       else if (host_has_cmov == 1)
-               printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
-                      "processor claims to implement");
-       else
-               printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)",
-                       host_has_cmov);
-}
diff --git a/arch/um/sys-i386/checksum.S b/arch/um/sys-i386/checksum.S
deleted file mode 100644 (file)
index f058d2f..0000000
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             IP/TCP/UDP checksumming routines
- *
- * Authors:    Jorge Cwik, <jorge@laser.satlink.net>
- *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *             Tom May, <ftom@netcom.com>
- *              Pentium Pro/II routines:
- *              Alexander Kjeldaas <astor@guardian.no>
- *              Finn Arne Gangstad <finnag@guardian.no>
- *             Lots of code moved from tcp.c and ip.c; see those files
- *             for more names.
- *
- * Changes:     Ingo Molnar, converted csum_partial_copy() to 2.1 exception
- *                          handling.
- *             Andi Kleen,  add zeroing on error
- *                   converted to pure assembler
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- */
-
-#include <asm/errno.h>
-                               
-/*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-
-/*     
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
- */
-               
-.text
-.align 4
-.globl csum_partial
-               
-#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
-
-         /*            
-          * Experiments with Ethernet and SLIP connections show that buff
-          * is aligned on either a 2-byte or 4-byte boundary.  We get at
-          * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
-          * Fortunately, it is easy to convert 2-byte alignment to 4-byte
-          * alignment for the unrolled loop.
-          */           
-csum_partial:
-       pushl %esi
-       pushl %ebx
-       movl 20(%esp),%eax      # Function arg: unsigned int sum
-       movl 16(%esp),%ecx      # Function arg: int len
-       movl 12(%esp),%esi      # Function arg: unsigned char *buff
-       testl $2, %esi          # Check alignment.
-       jz 2f                   # Jump if alignment is ok.
-       subl $2, %ecx           # Alignment uses up two bytes.
-       jae 1f                  # Jump if we had at least two bytes.
-       addl $2, %ecx           # ecx was < 2.  Deal with it.
-       jmp 4f
-1:     movw (%esi), %bx
-       addl $2, %esi
-       addw %bx, %ax
-       adcl $0, %eax
-2:
-       movl %ecx, %edx
-       shrl $5, %ecx
-       jz 2f
-       testl %esi, %esi
-1:     movl (%esi), %ebx
-       adcl %ebx, %eax
-       movl 4(%esi), %ebx
-       adcl %ebx, %eax
-       movl 8(%esi), %ebx
-       adcl %ebx, %eax
-       movl 12(%esi), %ebx
-       adcl %ebx, %eax
-       movl 16(%esi), %ebx
-       adcl %ebx, %eax
-       movl 20(%esi), %ebx
-       adcl %ebx, %eax
-       movl 24(%esi), %ebx
-       adcl %ebx, %eax
-       movl 28(%esi), %ebx
-       adcl %ebx, %eax
-       lea 32(%esi), %esi
-       dec %ecx
-       jne 1b
-       adcl $0, %eax
-2:     movl %edx, %ecx
-       andl $0x1c, %edx
-       je 4f
-       shrl $2, %edx           # This clears CF
-3:     adcl (%esi), %eax
-       lea 4(%esi), %esi
-       dec %edx
-       jne 3b
-       adcl $0, %eax
-4:     andl $3, %ecx
-       jz 7f
-       cmpl $2, %ecx
-       jb 5f
-       movw (%esi),%cx
-       leal 2(%esi),%esi
-       je 6f
-       shll $16,%ecx
-5:     movb (%esi),%cl
-6:     addl %ecx,%eax
-       adcl $0, %eax 
-7:     
-       popl %ebx
-       popl %esi
-       ret
-
-#else
-
-/* Version for PentiumII/PPro */
-
-csum_partial:
-       pushl %esi
-       pushl %ebx
-       movl 20(%esp),%eax      # Function arg: unsigned int sum
-       movl 16(%esp),%ecx      # Function arg: int len
-       movl 12(%esp),%esi      # Function arg: const unsigned char *buf
-
-       testl $2, %esi         
-       jnz 30f                 
-10:
-       movl %ecx, %edx
-       movl %ecx, %ebx
-       andl $0x7c, %ebx
-       shrl $7, %ecx
-       addl %ebx,%esi
-       shrl $2, %ebx  
-       negl %ebx
-       lea 45f(%ebx,%ebx,2), %ebx
-       testl %esi, %esi
-       jmp *%ebx
-
-       # Handle 2-byte-aligned regions
-20:    addw (%esi), %ax
-       lea 2(%esi), %esi
-       adcl $0, %eax
-       jmp 10b
-
-30:    subl $2, %ecx          
-       ja 20b                 
-       je 32f
-       movzbl (%esi),%ebx      # csumming 1 byte, 2-aligned
-       addl %ebx, %eax
-       adcl $0, %eax
-       jmp 80f
-32:
-       addw (%esi), %ax        # csumming 2 bytes, 2-aligned
-       adcl $0, %eax
-       jmp 80f
-
-40: 
-       addl -128(%esi), %eax
-       adcl -124(%esi), %eax
-       adcl -120(%esi), %eax
-       adcl -116(%esi), %eax   
-       adcl -112(%esi), %eax   
-       adcl -108(%esi), %eax
-       adcl -104(%esi), %eax
-       adcl -100(%esi), %eax
-       adcl -96(%esi), %eax
-       adcl -92(%esi), %eax
-       adcl -88(%esi), %eax
-       adcl -84(%esi), %eax
-       adcl -80(%esi), %eax
-       adcl -76(%esi), %eax
-       adcl -72(%esi), %eax
-       adcl -68(%esi), %eax
-       adcl -64(%esi), %eax     
-       adcl -60(%esi), %eax     
-       adcl -56(%esi), %eax     
-       adcl -52(%esi), %eax   
-       adcl -48(%esi), %eax   
-       adcl -44(%esi), %eax
-       adcl -40(%esi), %eax
-       adcl -36(%esi), %eax
-       adcl -32(%esi), %eax
-       adcl -28(%esi), %eax
-       adcl -24(%esi), %eax
-       adcl -20(%esi), %eax
-       adcl -16(%esi), %eax
-       adcl -12(%esi), %eax
-       adcl -8(%esi), %eax
-       adcl -4(%esi), %eax
-45:
-       lea 128(%esi), %esi
-       adcl $0, %eax
-       dec %ecx
-       jge 40b
-       movl %edx, %ecx
-50:    andl $3, %ecx
-       jz 80f
-
-       # Handle the last 1-3 bytes without jumping
-       notl %ecx               # 1->2, 2->1, 3->0, higher bits are masked
-       movl $0xffffff,%ebx     # by the shll and shrl instructions
-       shll $3,%ecx
-       shrl %cl,%ebx
-       andl -128(%esi),%ebx    # esi is 4-aligned so should be ok
-       addl %ebx,%eax
-       adcl $0,%eax
-80: 
-       popl %ebx
-       popl %esi
-       ret
-                               
-#endif
-
-/*
-unsigned int csum_partial_copy_generic (const char *src, char *dst,
-                                 int len, int sum, int *src_err_ptr, int *dst_err_ptr)
- */ 
-
-/*
- * Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for all access types.
- *
- * FIXME: could someone double-check whether I haven't mixed up some SRC and
- *       DST definitions? It's damn hard to trigger all cases.  I hope I got
- *       them all but there's no guarantee.
- */
-
-#define SRC(y...)                      \
-       9999: y;                        \
-       .section __ex_table, "a";       \
-       .long 9999b, 6001f      ;       \
-       .previous
-
-#define DST(y...)                      \
-       9999: y;                        \
-       .section __ex_table, "a";       \
-       .long 9999b, 6002f      ;       \
-       .previous
-
-.align 4
-
-#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
-
-#define ARGBASE 16             
-#define FP             12
-
-csum_partial_copy_generic_i386:
-       subl  $4,%esp   
-       pushl %edi
-       pushl %esi
-       pushl %ebx
-       movl ARGBASE+16(%esp),%eax      # sum
-       movl ARGBASE+12(%esp),%ecx      # len
-       movl ARGBASE+4(%esp),%esi       # src
-       movl ARGBASE+8(%esp),%edi       # dst
-
-       testl $2, %edi                  # Check alignment. 
-       jz 2f                           # Jump if alignment is ok.
-       subl $2, %ecx                   # Alignment uses up two bytes.
-       jae 1f                          # Jump if we had at least two bytes.
-       addl $2, %ecx                   # ecx was < 2.  Deal with it.
-       jmp 4f
-SRC(1: movw (%esi), %bx        )
-       addl $2, %esi
-DST(   movw %bx, (%edi)        )
-       addl $2, %edi
-       addw %bx, %ax   
-       adcl $0, %eax
-2:
-       movl %ecx, FP(%esp)
-       shrl $5, %ecx
-       jz 2f
-       testl %esi, %esi
-SRC(1: movl (%esi), %ebx       )
-SRC(   movl 4(%esi), %edx      )
-       adcl %ebx, %eax
-DST(   movl %ebx, (%edi)       )
-       adcl %edx, %eax
-DST(   movl %edx, 4(%edi)      )
-
-SRC(   movl 8(%esi), %ebx      )
-SRC(   movl 12(%esi), %edx     )
-       adcl %ebx, %eax
-DST(   movl %ebx, 8(%edi)      )
-       adcl %edx, %eax
-DST(   movl %edx, 12(%edi)     )
-
-SRC(   movl 16(%esi), %ebx     )
-SRC(   movl 20(%esi), %edx     )
-       adcl %ebx, %eax
-DST(   movl %ebx, 16(%edi)     )
-       adcl %edx, %eax
-DST(   movl %edx, 20(%edi)     )
-
-SRC(   movl 24(%esi), %ebx     )
-SRC(   movl 28(%esi), %edx     )
-       adcl %ebx, %eax
-DST(   movl %ebx, 24(%edi)     )
-       adcl %edx, %eax
-DST(   movl %edx, 28(%edi)     )
-
-       lea 32(%esi), %esi
-       lea 32(%edi), %edi
-       dec %ecx
-       jne 1b
-       adcl $0, %eax
-2:     movl FP(%esp), %edx
-       movl %edx, %ecx
-       andl $0x1c, %edx
-       je 4f
-       shrl $2, %edx                   # This clears CF
-SRC(3: movl (%esi), %ebx       )
-       adcl %ebx, %eax
-DST(   movl %ebx, (%edi)       )
-       lea 4(%esi), %esi
-       lea 4(%edi), %edi
-       dec %edx
-       jne 3b
-       adcl $0, %eax
-4:     andl $3, %ecx
-       jz 7f
-       cmpl $2, %ecx
-       jb 5f
-SRC(   movw (%esi), %cx        )
-       leal 2(%esi), %esi
-DST(   movw %cx, (%edi)        )
-       leal 2(%edi), %edi
-       je 6f
-       shll $16,%ecx
-SRC(5: movb (%esi), %cl        )
-DST(   movb %cl, (%edi)        )
-6:     addl %ecx, %eax
-       adcl $0, %eax
-7:
-5000:
-
-# Exception handler:
-.section .fixup, "ax"                                                  
-
-6001:
-       movl ARGBASE+20(%esp), %ebx     # src_err_ptr
-       movl $-EFAULT, (%ebx)
-
-       # zero the complete destination - computing the rest
-       # is too much work 
-       movl ARGBASE+8(%esp), %edi      # dst
-       movl ARGBASE+12(%esp), %ecx     # len
-       xorl %eax,%eax
-       rep ; stosb
-
-       jmp 5000b
-
-6002:
-       movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
-       movl $-EFAULT,(%ebx)
-       jmp 5000b
-
-.previous
-
-       popl %ebx
-       popl %esi
-       popl %edi
-       popl %ecx                       # equivalent to addl $4,%esp
-       ret     
-
-#else
-
-/* Version for PentiumII/PPro */
-
-#define ROUND1(x) \
-       SRC(movl x(%esi), %ebx  )       ;       \
-       addl %ebx, %eax                 ;       \
-       DST(movl %ebx, x(%edi)  )       ; 
-
-#define ROUND(x) \
-       SRC(movl x(%esi), %ebx  )       ;       \
-       adcl %ebx, %eax                 ;       \
-       DST(movl %ebx, x(%edi)  )       ;
-
-#define ARGBASE 12
-               
-csum_partial_copy_generic_i386:
-       pushl %ebx
-       pushl %edi
-       pushl %esi
-       movl ARGBASE+4(%esp),%esi       #src
-       movl ARGBASE+8(%esp),%edi       #dst    
-       movl ARGBASE+12(%esp),%ecx      #len
-       movl ARGBASE+16(%esp),%eax      #sum
-#      movl %ecx, %edx  
-       movl %ecx, %ebx  
-       movl %esi, %edx
-       shrl $6, %ecx     
-       andl $0x3c, %ebx  
-       negl %ebx
-       subl %ebx, %esi  
-       subl %ebx, %edi  
-       lea  -1(%esi),%edx
-       andl $-32,%edx
-       lea 3f(%ebx,%ebx), %ebx
-       testl %esi, %esi 
-       jmp *%ebx
-1:     addl $64,%esi
-       addl $64,%edi 
-       SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
-       ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)    
-       ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)    
-       ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)    
-       ROUND (-16) ROUND(-12) ROUND(-8)  ROUND(-4)     
-3:     adcl $0,%eax
-       addl $64, %edx
-       dec %ecx
-       jge 1b
-4:     movl ARGBASE+12(%esp),%edx      #len
-       andl $3, %edx
-       jz 7f
-       cmpl $2, %edx
-       jb 5f
-SRC(   movw (%esi), %dx         )
-       leal 2(%esi), %esi
-DST(   movw %dx, (%edi)         )
-       leal 2(%edi), %edi
-       je 6f
-       shll $16,%edx
-5:
-SRC(   movb (%esi), %dl         )
-DST(   movb %dl, (%edi)         )
-6:     addl %edx, %eax
-       adcl $0, %eax
-7:
-.section .fixup, "ax"
-6001:  movl    ARGBASE+20(%esp), %ebx  # src_err_ptr   
-       movl $-EFAULT, (%ebx)
-       # zero the complete destination (computing the rest is too much work)
-       movl ARGBASE+8(%esp),%edi       # dst
-       movl ARGBASE+12(%esp),%ecx      # len
-       xorl %eax,%eax
-       rep; stosb
-       jmp 7b
-6002:  movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
-       movl $-EFAULT, (%ebx)
-       jmp  7b                 
-.previous                              
-
-       popl %esi
-       popl %edi
-       popl %ebx
-       ret
-                               
-#undef ROUND
-#undef ROUND1          
-               
-#endif
diff --git a/arch/um/sys-i386/delay.c b/arch/um/sys-i386/delay.c
deleted file mode 100644 (file)
index f3fe1a6..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
- * Mostly copied from arch/x86/lib/delay.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <asm/param.h>
-
-void __delay(unsigned long loops)
-{
-       asm volatile(
-               "test %0,%0\n"
-               "jz 3f\n"
-               "jmp 1f\n"
-
-               ".align 16\n"
-               "1: jmp 2f\n"
-
-               ".align 16\n"
-               "2: dec %0\n"
-               " jnz 2b\n"
-               "3: dec %0\n"
-
-               : /* we don't need output */
-               : "a" (loops)
-       );
-}
-EXPORT_SYMBOL(__delay);
-
-inline void __const_udelay(unsigned long xloops)
-{
-       int d0;
-
-       xloops *= 4;
-       asm("mull %%edx"
-               : "=d" (xloops), "=&a" (d0)
-               : "1" (xloops), "0"
-               (loops_per_jiffy * (HZ/4)));
-
-       __delay(++xloops);
-}
-EXPORT_SYMBOL(__const_udelay);
-
-void __udelay(unsigned long usecs)
-{
-       __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
-}
-EXPORT_SYMBOL(__udelay);
-
-void __ndelay(unsigned long nsecs)
-{
-       __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
-}
-EXPORT_SYMBOL(__ndelay);
diff --git a/arch/um/sys-i386/elfcore.c b/arch/um/sys-i386/elfcore.c
deleted file mode 100644 (file)
index 6bb49b6..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-#include <linux/elf.h>
-#include <linux/coredump.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-
-#include <asm/elf.h>
-
-
-Elf32_Half elf_core_extra_phdrs(void)
-{
-       return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
-}
-
-int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
-                              unsigned long limit)
-{
-       if ( vsyscall_ehdr ) {
-               const struct elfhdr *const ehdrp =
-                       (struct elfhdr *) vsyscall_ehdr;
-               const struct elf_phdr *const phdrp =
-                       (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
-               int i;
-               Elf32_Off ofs = 0;
-
-               for (i = 0; i < ehdrp->e_phnum; ++i) {
-                       struct elf_phdr phdr = phdrp[i];
-
-                       if (phdr.p_type == PT_LOAD) {
-                               ofs = phdr.p_offset = offset;
-                               offset += phdr.p_filesz;
-                       } else {
-                               phdr.p_offset += ofs;
-                       }
-                       phdr.p_paddr = 0; /* match other core phdrs */
-                       *size += sizeof(phdr);
-                       if (*size > limit
-                           || !dump_write(file, &phdr, sizeof(phdr)))
-                               return 0;
-               }
-       }
-       return 1;
-}
-
-int elf_core_write_extra_data(struct file *file, size_t *size,
-                             unsigned long limit)
-{
-       if ( vsyscall_ehdr ) {
-               const struct elfhdr *const ehdrp =
-                       (struct elfhdr *) vsyscall_ehdr;
-               const struct elf_phdr *const phdrp =
-                       (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
-               int i;
-
-               for (i = 0; i < ehdrp->e_phnum; ++i) {
-                       if (phdrp[i].p_type == PT_LOAD) {
-                               void *addr = (void *) phdrp[i].p_vaddr;
-                               size_t filesz = phdrp[i].p_filesz;
-
-                               *size += filesz;
-                               if (*size > limit
-                                   || !dump_write(file, addr, filesz))
-                                       return 0;
-                       }
-               }
-       }
-       return 1;
-}
-
-size_t elf_core_extra_data_size(void)
-{
-       if ( vsyscall_ehdr ) {
-               const struct elfhdr *const ehdrp =
-                       (struct elfhdr *)vsyscall_ehdr;
-               const struct elf_phdr *const phdrp =
-                       (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
-               int i;
-
-               for (i = 0; i < ehdrp->e_phnum; ++i)
-                       if (phdrp[i].p_type == PT_LOAD)
-                               return (size_t) phdrp[i].p_filesz;
-       }
-       return 0;
-}
diff --git a/arch/um/sys-i386/fault.c b/arch/um/sys-i386/fault.c
deleted file mode 100644 (file)
index d670f68..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/* 
- * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include "sysdep/ptrace.h"
-
-/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
-struct exception_table_entry
-{
-       unsigned long insn;
-       unsigned long fixup;
-};
-
-const struct exception_table_entry *search_exception_tables(unsigned long add);
-
-/* Compare this to arch/i386/mm/extable.c:fixup_exception() */
-int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
-{
-       const struct exception_table_entry *fixup;
-
-       fixup = search_exception_tables(address);
-       if (fixup != 0) {
-               UPT_IP(regs) = fixup->fixup;
-               return 1;
-       }
-       return 0;
-}
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c
deleted file mode 100644 (file)
index bfbefd3..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "linux/module.h"
-#include "asm/checksum.h"
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
deleted file mode 100644 (file)
index 3f2bf20..0000000
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <asm/unistd.h>
-#include "os.h"
-#include "proc_mm.h"
-#include "skas.h"
-#include "skas_ptrace.h"
-#include "sysdep/tls.h"
-
-extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
-
-static long write_ldt_entry(struct mm_id *mm_idp, int func,
-                    struct user_desc *desc, void **addr, int done)
-{
-       long res;
-
-       if (proc_mm) {
-               /*
-                * This is a special handling for the case, that the mm to
-                * modify isn't current->active_mm.
-                * If this is called directly by modify_ldt,
-                *     (current->active_mm->context.skas.u == mm_idp)
-                * will be true. So no call to __switch_mm(mm_idp) is done.
-                * If this is called in case of init_new_ldt or PTRACE_LDT,
-                * mm_idp won't belong to current->active_mm, but child->mm.
-                * So we need to switch child's mm into our userspace, then
-                * later switch back.
-                *
-                * Note: I'm unsure: should interrupts be disabled here?
-                */
-               if (!current->active_mm || current->active_mm == &init_mm ||
-                   mm_idp != &current->active_mm->context.id)
-                       __switch_mm(mm_idp);
-       }
-
-       if (ptrace_ldt) {
-               struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
-                       .func = func,
-                       .ptr = desc,
-                       .bytecount = sizeof(*desc)};
-               u32 cpu;
-               int pid;
-
-               if (!proc_mm)
-                       pid = mm_idp->u.pid;
-               else {
-                       cpu = get_cpu();
-                       pid = userspace_pid[cpu];
-               }
-
-               res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
-
-               if (proc_mm)
-                       put_cpu();
-       }
-       else {
-               void *stub_addr;
-               res = syscall_stub_data(mm_idp, (unsigned long *)desc,
-                                       (sizeof(*desc) + sizeof(long) - 1) &
-                                           ~(sizeof(long) - 1),
-                                       addr, &stub_addr);
-               if (!res) {
-                       unsigned long args[] = { func,
-                                                (unsigned long)stub_addr,
-                                                sizeof(*desc),
-                                                0, 0, 0 };
-                       res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
-                                              0, addr, done);
-               }
-       }
-
-       if (proc_mm) {
-               /*
-                * This is the second part of special handling, that makes
-                * PTRACE_LDT possible to implement.
-                */
-               if (current->active_mm && current->active_mm != &init_mm &&
-                   mm_idp != &current->active_mm->context.id)
-                       __switch_mm(&current->active_mm->context.id);
-       }
-
-       return res;
-}
-
-static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
-{
-       int res, n;
-       struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
-                       .func = 0,
-                       .bytecount = bytecount,
-                       .ptr = kmalloc(bytecount, GFP_KERNEL)};
-       u32 cpu;
-
-       if (ptrace_ldt.ptr == NULL)
-               return -ENOMEM;
-
-       /*
-        * This is called from sys_modify_ldt only, so userspace_pid gives
-        * us the right number
-        */
-
-       cpu = get_cpu();
-       res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
-       put_cpu();
-       if (res < 0)
-               goto out;
-
-       n = copy_to_user(ptr, ptrace_ldt.ptr, res);
-       if (n != 0)
-               res = -EFAULT;
-
-  out:
-       kfree(ptrace_ldt.ptr);
-
-       return res;
-}
-
-/*
- * In skas mode, we hold our own ldt data in UML.
- * Thus, the code implementing sys_modify_ldt_skas
- * is very similar to (and mostly stolen from) sys_modify_ldt
- * for arch/i386/kernel/ldt.c
- * The routines copied and modified in part are:
- * - read_ldt
- * - read_default_ldt
- * - write_ldt
- * - sys_modify_ldt_skas
- */
-
-static int read_ldt(void __user * ptr, unsigned long bytecount)
-{
-       int i, err = 0;
-       unsigned long size;
-       uml_ldt_t * ldt = &current->mm->context.ldt;
-
-       if (!ldt->entry_count)
-               goto out;
-       if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-               bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-       err = bytecount;
-
-       if (ptrace_ldt)
-               return read_ldt_from_host(ptr, bytecount);
-
-       mutex_lock(&ldt->lock);
-       if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
-               size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
-               if (size > bytecount)
-                       size = bytecount;
-               if (copy_to_user(ptr, ldt->u.entries, size))
-                       err = -EFAULT;
-               bytecount -= size;
-               ptr += size;
-       }
-       else {
-               for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
-                    i++) {
-                       size = PAGE_SIZE;
-                       if (size > bytecount)
-                               size = bytecount;
-                       if (copy_to_user(ptr, ldt->u.pages[i], size)) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       bytecount -= size;
-                       ptr += size;
-               }
-       }
-       mutex_unlock(&ldt->lock);
-
-       if (bytecount == 0 || err == -EFAULT)
-               goto out;
-
-       if (clear_user(ptr, bytecount))
-               err = -EFAULT;
-
-out:
-       return err;
-}
-
-static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-{
-       int err;
-
-       if (bytecount > 5*LDT_ENTRY_SIZE)
-               bytecount = 5*LDT_ENTRY_SIZE;
-
-       err = bytecount;
-       /*
-        * UML doesn't support lcall7 and lcall27.
-        * So, we don't really have a default ldt, but emulate
-        * an empty ldt of common host default ldt size.
-        */
-       if (clear_user(ptr, bytecount))
-               err = -EFAULT;
-
-       return err;
-}
-
-static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
-{
-       uml_ldt_t * ldt = &current->mm->context.ldt;
-       struct mm_id * mm_idp = &current->mm->context.id;
-       int i, err;
-       struct user_desc ldt_info;
-       struct ldt_entry entry0, *ldt_p;
-       void *addr = NULL;
-
-       err = -EINVAL;
-       if (bytecount != sizeof(ldt_info))
-               goto out;
-       err = -EFAULT;
-       if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
-               goto out;
-
-       err = -EINVAL;
-       if (ldt_info.entry_number >= LDT_ENTRIES)
-               goto out;
-       if (ldt_info.contents == 3) {
-               if (func == 1)
-                       goto out;
-               if (ldt_info.seg_not_present == 0)
-                       goto out;
-       }
-
-       if (!ptrace_ldt)
-               mutex_lock(&ldt->lock);
-
-       err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
-       if (err)
-               goto out_unlock;
-       else if (ptrace_ldt) {
-               /* With PTRACE_LDT available, this is used as a flag only */
-               ldt->entry_count = 1;
-               goto out;
-       }
-
-       if (ldt_info.entry_number >= ldt->entry_count &&
-           ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
-               for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
-                    i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
-                    i++) {
-                       if (i == 0)
-                               memcpy(&entry0, ldt->u.entries,
-                                      sizeof(entry0));
-                       ldt->u.pages[i] = (struct ldt_entry *)
-                               __get_free_page(GFP_KERNEL|__GFP_ZERO);
-                       if (!ldt->u.pages[i]) {
-                               err = -ENOMEM;
-                               /* Undo the change in host */
-                               memset(&ldt_info, 0, sizeof(ldt_info));
-                               write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
-                               goto out_unlock;
-                       }
-                       if (i == 0) {
-                               memcpy(ldt->u.pages[0], &entry0,
-                                      sizeof(entry0));
-                               memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
-                                      sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
-                       }
-                       ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
-               }
-       }
-       if (ldt->entry_count <= ldt_info.entry_number)
-               ldt->entry_count = ldt_info.entry_number + 1;
-
-       if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
-               ldt_p = ldt->u.entries + ldt_info.entry_number;
-       else
-               ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
-                       ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
-
-       if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
-          (func == 1 || LDT_empty(&ldt_info))) {
-               ldt_p->a = 0;
-               ldt_p->b = 0;
-       }
-       else{
-               if (func == 1)
-                       ldt_info.useable = 0;
-               ldt_p->a = LDT_entry_a(&ldt_info);
-               ldt_p->b = LDT_entry_b(&ldt_info);
-       }
-       err = 0;
-
-out_unlock:
-       mutex_unlock(&ldt->lock);
-out:
-       return err;
-}
-
-static long do_modify_ldt_skas(int func, void __user *ptr,
-                              unsigned long bytecount)
-{
-       int ret = -ENOSYS;
-
-       switch (func) {
-               case 0:
-                       ret = read_ldt(ptr, bytecount);
-                       break;
-               case 1:
-               case 0x11:
-                       ret = write_ldt(ptr, bytecount, func);
-                       break;
-               case 2:
-                       ret = read_default_ldt(ptr, bytecount);
-                       break;
-       }
-       return ret;
-}
-
-static DEFINE_SPINLOCK(host_ldt_lock);
-static short dummy_list[9] = {0, -1};
-static short * host_ldt_entries = NULL;
-
-static void ldt_get_host_info(void)
-{
-       long ret;
-       struct ldt_entry * ldt;
-       short *tmp;
-       int i, size, k, order;
-
-       spin_lock(&host_ldt_lock);
-
-       if (host_ldt_entries != NULL) {
-               spin_unlock(&host_ldt_lock);
-               return;
-       }
-       host_ldt_entries = dummy_list+1;
-
-       spin_unlock(&host_ldt_lock);
-
-       for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
-               ;
-
-       ldt = (struct ldt_entry *)
-             __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
-       if (ldt == NULL) {
-               printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
-                      "for host ldt\n");
-               return;
-       }
-
-       ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
-       if (ret < 0) {
-               printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
-               goto out_free;
-       }
-       if (ret == 0) {
-               /* default_ldt is active, simply write an empty entry 0 */
-               host_ldt_entries = dummy_list;
-               goto out_free;
-       }
-
-       for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
-               if (ldt[i].a != 0 || ldt[i].b != 0)
-                       size++;
-       }
-
-       if (size < ARRAY_SIZE(dummy_list))
-               host_ldt_entries = dummy_list;
-       else {
-               size = (size + 1) * sizeof(dummy_list[0]);
-               tmp = kmalloc(size, GFP_KERNEL);
-               if (tmp == NULL) {
-                       printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
-                              "host ldt list\n");
-                       goto out_free;
-               }
-               host_ldt_entries = tmp;
-       }
-
-       for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
-               if (ldt[i].a != 0 || ldt[i].b != 0)
-                       host_ldt_entries[k++] = i;
-       }
-       host_ldt_entries[k] = -1;
-
-out_free:
-       free_pages((unsigned long)ldt, order);
-}
-
-long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
-{
-       struct user_desc desc;
-       short * num_p;
-       int i;
-       long page, err=0;
-       void *addr = NULL;
-       struct proc_mm_op copy;
-
-
-       if (!ptrace_ldt)
-               mutex_init(&new_mm->ldt.lock);
-
-       if (!from_mm) {
-               memset(&desc, 0, sizeof(desc));
-               /*
-                * We have to initialize a clean ldt.
-                */
-               if (proc_mm) {
-                       /*
-                        * If the new mm was created using proc_mm, host's
-                        * default-ldt currently is assigned, which normally
-                        * contains the call-gates for lcall7 and lcall27.
-                        * To remove these gates, we simply write an empty
-                        * entry as number 0 to the host.
-                        */
-                       err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
-               }
-               else{
-                       /*
-                        * Now we try to retrieve info about the ldt, we
-                        * inherited from the host. All ldt-entries found
-                        * will be reset in the following loop
-                        */
-                       ldt_get_host_info();
-                       for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
-                               desc.entry_number = *num_p;
-                               err = write_ldt_entry(&new_mm->id, 1, &desc,
-                                                     &addr, *(num_p + 1) == -1);
-                               if (err)
-                                       break;
-                       }
-               }
-               new_mm->ldt.entry_count = 0;
-
-               goto out;
-       }
-
-       if (proc_mm) {
-               /*
-                * We have a valid from_mm, so we now have to copy the LDT of
-                * from_mm to new_mm, because using proc_mm an new mm with
-                * an empty/default LDT was created in new_mm()
-                */
-               copy = ((struct proc_mm_op) { .op       = MM_COPY_SEGMENTS,
-                                             .u        =
-                                             { .copy_segments =
-                                                       from_mm->id.u.mm_fd } } );
-               i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
-               if (i != sizeof(copy))
-                       printk(KERN_ERR "new_mm : /proc/mm copy_segments "
-                              "failed, err = %d\n", -i);
-       }
-
-       if (!ptrace_ldt) {
-               /*
-                * Our local LDT is used to supply the data for
-                * modify_ldt(READLDT), if PTRACE_LDT isn't available,
-                * i.e., we have to use the stub for modify_ldt, which
-                * can't handle the big read buffer of up to 64kB.
-                */
-               mutex_lock(&from_mm->ldt.lock);
-               if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES)
-                       memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
-                              sizeof(new_mm->ldt.u.entries));
-               else {
-                       i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
-                       while (i-->0) {
-                               page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
-                               if (!page) {
-                                       err = -ENOMEM;
-                                       break;
-                               }
-                               new_mm->ldt.u.pages[i] =
-                                       (struct ldt_entry *) page;
-                               memcpy(new_mm->ldt.u.pages[i],
-                                      from_mm->ldt.u.pages[i], PAGE_SIZE);
-                       }
-               }
-               new_mm->ldt.entry_count = from_mm->ldt.entry_count;
-               mutex_unlock(&from_mm->ldt.lock);
-       }
-
-    out:
-       return err;
-}
-
-
-void free_ldt(struct mm_context *mm)
-{
-       int i;
-
-       if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) {
-               i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
-               while (i-- > 0)
-                       free_page((long) mm->ldt.u.pages[i]);
-       }
-       mm->ldt.entry_count = 0;
-}
-
-int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
-{
-       return do_modify_ldt_skas(func, ptr, bytecount);
-}
diff --git a/arch/um/sys-i386/mem.c b/arch/um/sys-i386/mem.c
deleted file mode 100644 (file)
index 639900a..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/mm.h>
-#include <asm/page.h>
-#include <asm/mman.h>
-
-static struct vm_area_struct gate_vma;
-
-static int __init gate_vma_init(void)
-{
-       if (!FIXADDR_USER_START)
-               return 0;
-
-       gate_vma.vm_mm = NULL;
-       gate_vma.vm_start = FIXADDR_USER_START;
-       gate_vma.vm_end = FIXADDR_USER_END;
-       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-       gate_vma.vm_page_prot = __P101;
-
-       /*
-        * Make sure the vDSO gets into every core dump.
-        * Dumping its contents makes post-mortem fully interpretable later
-        * without matching up the same kernel and hardware config to see
-        * what PC values meant.
-        */
-       gate_vma.vm_flags |= VM_ALWAYSDUMP;
-
-       return 0;
-}
-__initcall(gate_vma_init);
-
-struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
-{
-       return FIXADDR_USER_START ? &gate_vma : NULL;
-}
-
-int in_gate_area_no_mm(unsigned long addr)
-{
-       if (!FIXADDR_USER_START)
-               return 0;
-
-       if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
-               return 1;
-
-       return 0;
-}
-
-int in_gate_area(struct mm_struct *mm, unsigned long addr)
-{
-       struct vm_area_struct *vma = get_gate_vma(mm);
-
-       if (!vma)
-               return 0;
-
-       return (addr >= vma->vm_start) && (addr < vma->vm_end);
-}
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
deleted file mode 100644 (file)
index a174fde..0000000
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include "linux/mm.h"
-#include "linux/sched.h"
-#include "asm/uaccess.h"
-#include "skas.h"
-
-extern int arch_switch_tls(struct task_struct *to);
-
-void arch_switch_to(struct task_struct *to)
-{
-       int err = arch_switch_tls(to);
-       if (!err)
-               return;
-
-       if (err != -EINVAL)
-               printk(KERN_WARNING "arch_switch_tls failed, errno %d, "
-                      "not EINVAL\n", -err);
-       else
-               printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n");
-}
-
-int is_syscall(unsigned long addr)
-{
-       unsigned short instr;
-       int n;
-
-       n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
-       if (n) {
-               /* access_process_vm() grants access to vsyscall and stub,
-                * while copy_from_user doesn't. Maybe access_process_vm is
-                * slow, but that doesn't matter, since it will be called only
-                * in case of singlestepping, if copy_from_user failed.
-                */
-               n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
-               if (n != sizeof(instr)) {
-                       printk(KERN_ERR "is_syscall : failed to read "
-                              "instruction from 0x%lx\n", addr);
-                       return 1;
-               }
-       }
-       /* int 0x80 or sysenter */
-       return (instr == 0x80cd) || (instr == 0x340f);
-}
-
-/* determines which flags the user has access to. */
-/* 1 = access 0 = no access */
-#define FLAG_MASK 0x00044dd5
-
-static const int reg_offsets[] = {
-       [EBX] = HOST_EBX,
-       [ECX] = HOST_ECX,
-       [EDX] = HOST_EDX,
-       [ESI] = HOST_ESI,
-       [EDI] = HOST_EDI,
-       [EBP] = HOST_EBP,
-       [EAX] = HOST_EAX,
-       [DS] = HOST_DS,
-       [ES] = HOST_ES,
-       [FS] = HOST_FS,
-       [GS] = HOST_GS,
-       [EIP] = HOST_IP,
-       [CS] = HOST_CS,
-       [EFL] = HOST_EFLAGS,
-       [UESP] = HOST_SP,
-       [SS] = HOST_SS,
-};
-
-int putreg(struct task_struct *child, int regno, unsigned long value)
-{
-       regno >>= 2;
-       switch (regno) {
-       case EBX:
-       case ECX:
-       case EDX:
-       case ESI:
-       case EDI:
-       case EBP:
-       case EAX:
-       case EIP:
-       case UESP:
-               break;
-       case FS:
-               if (value && (value & 3) != 3)
-                       return -EIO;
-               break;
-       case GS:
-               if (value && (value & 3) != 3)
-                       return -EIO;
-               break;
-       case DS:
-       case ES:
-               if (value && (value & 3) != 3)
-                       return -EIO;
-               value &= 0xffff;
-               break;
-       case SS:
-       case CS:
-               if ((value & 3) != 3)
-                       return -EIO;
-               value &= 0xffff;
-               break;
-       case EFL:
-               value &= FLAG_MASK;
-               child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
-               return 0;
-       case ORIG_EAX:
-               child->thread.regs.regs.syscall = value;
-               return 0;
-       default :
-               panic("Bad register in putreg() : %d\n", regno);
-       }
-       child->thread.regs.regs.gp[reg_offsets[regno]] = value;
-       return 0;
-}
-
-int poke_user(struct task_struct *child, long addr, long data)
-{
-       if ((addr & 3) || addr < 0)
-               return -EIO;
-
-       if (addr < MAX_REG_OFFSET)
-               return putreg(child, addr, data);
-       else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
-                (addr <= offsetof(struct user, u_debugreg[7]))) {
-               addr -= offsetof(struct user, u_debugreg[0]);
-               addr = addr >> 2;
-               if ((addr == 4) || (addr == 5))
-                       return -EIO;
-               child->thread.arch.debugregs[addr] = data;
-               return 0;
-       }
-       return -EIO;
-}
-
-unsigned long getreg(struct task_struct *child, int regno)
-{
-       unsigned long mask = ~0UL;
-
-       regno >>= 2;
-       switch (regno) {
-       case ORIG_EAX:
-               return child->thread.regs.regs.syscall;
-       case FS:
-       case GS:
-       case DS:
-       case ES:
-       case SS:
-       case CS:
-               mask = 0xffff;
-               break;
-       case EIP:
-       case UESP:
-       case EAX:
-       case EBX:
-       case ECX:
-       case EDX:
-       case ESI:
-       case EDI:
-       case EBP:
-       case EFL:
-               break;
-       default:
-               panic("Bad register in getreg() : %d\n", regno);
-       }
-       return mask & child->thread.regs.regs.gp[reg_offsets[regno]];
-}
-
-/* read the word at location addr in the USER area. */
-int peek_user(struct task_struct *child, long addr, long data)
-{
-       unsigned long tmp;
-
-       if ((addr & 3) || addr < 0)
-               return -EIO;
-
-       tmp = 0;  /* Default return condition */
-       if (addr < MAX_REG_OFFSET) {
-               tmp = getreg(child, addr);
-       }
-       else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
-                (addr <= offsetof(struct user, u_debugreg[7]))) {
-               addr -= offsetof(struct user, u_debugreg[0]);
-               addr = addr >> 2;
-               tmp = child->thread.arch.debugregs[addr];
-       }
-       return put_user(tmp, (unsigned long __user *) data);
-}
-
-static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
-{
-       int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
-       struct user_i387_struct fpregs;
-
-       err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
-       if (err)
-               return err;
-
-       n = copy_to_user(buf, &fpregs, sizeof(fpregs));
-       if(n > 0)
-               return -EFAULT;
-
-       return n;
-}
-
-static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
-{
-       int n, cpu = ((struct thread_info *) child->stack)->cpu;
-       struct user_i387_struct fpregs;
-
-       n = copy_from_user(&fpregs, buf, sizeof(fpregs));
-       if (n > 0)
-               return -EFAULT;
-
-       return restore_fp_registers(userspace_pid[cpu],
-                                   (unsigned long *) &fpregs);
-}
-
-static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
-{
-       int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
-       struct user_fxsr_struct fpregs;
-
-       err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
-       if (err)
-               return err;
-
-       n = copy_to_user(buf, &fpregs, sizeof(fpregs));
-       if(n > 0)
-               return -EFAULT;
-
-       return n;
-}
-
-static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
-{
-       int n, cpu = ((struct thread_info *) child->stack)->cpu;
-       struct user_fxsr_struct fpregs;
-
-       n = copy_from_user(&fpregs, buf, sizeof(fpregs));
-       if (n > 0)
-               return -EFAULT;
-
-       return restore_fpx_registers(userspace_pid[cpu],
-                                    (unsigned long *) &fpregs);
-}
-
-long subarch_ptrace(struct task_struct *child, long request,
-                   unsigned long addr, unsigned long data)
-{
-       int ret = -EIO;
-       void __user *datap = (void __user *) data;
-       switch (request) {
-       case PTRACE_GETFPREGS: /* Get the child FPU state. */
-               ret = get_fpregs(datap, child);
-               break;
-       case PTRACE_SETFPREGS: /* Set the child FPU state. */
-               ret = set_fpregs(datap, child);
-               break;
-       case PTRACE_GETFPXREGS: /* Get the child FPU state. */
-               ret = get_fpxregs(datap, child);
-               break;
-       case PTRACE_SETFPXREGS: /* Set the child FPU state. */
-               ret = set_fpxregs(datap, child);
-               break;
-       default:
-               ret = -EIO;
-       }
-       return ret;
-}
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c
deleted file mode 100644 (file)
index 0b10c3e..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <errno.h>
-#include <sys/ptrace.h>
-
-int ptrace_getregs(long pid, unsigned long *regs_out)
-{
-       if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
-               return -errno;
-       return 0;
-}
-
-int ptrace_setregs(long pid, unsigned long *regs)
-{
-       if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0)
-               return -errno;
-       return 0;
-}
diff --git a/arch/um/sys-i386/setjmp.S b/arch/um/sys-i386/setjmp.S
deleted file mode 100644 (file)
index b766792..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#
-# arch/i386/setjmp.S
-#
-# setjmp/longjmp for the i386 architecture
-#
-
-#
-# The jmp_buf is assumed to contain the following, in order:
-#      %ebx
-#      %esp
-#      %ebp
-#      %esi
-#      %edi
-#      <return address>
-#
-
-       .text
-       .align 4
-       .globl setjmp
-       .type setjmp, @function
-setjmp:
-#ifdef _REGPARM
-       movl %eax,%edx
-#else
-       movl 4(%esp),%edx
-#endif
-       popl %ecx                       # Return address, and adjust the stack
-       xorl %eax,%eax                  # Return value
-       movl %ebx,(%edx)
-       movl %esp,4(%edx)               # Post-return %esp!
-       pushl %ecx                      # Make the call/return stack happy
-       movl %ebp,8(%edx)
-       movl %esi,12(%edx)
-       movl %edi,16(%edx)
-       movl %ecx,20(%edx)              # Return address
-       ret
-
-       .size setjmp,.-setjmp
-
-       .text
-       .align 4
-       .globl longjmp
-       .type longjmp, @function
-longjmp:
-#ifdef _REGPARM
-       xchgl %eax,%edx
-#else
-       movl 4(%esp),%edx               # jmp_ptr address
-       movl 8(%esp),%eax               # Return value
-#endif
-       movl (%edx),%ebx
-       movl 4(%edx),%esp
-       movl 8(%edx),%ebp
-       movl 12(%edx),%esi
-       movl 16(%edx),%edi
-       jmp *20(%edx)
-
-       .size longjmp,.-longjmp
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
deleted file mode 100644 (file)
index bcbfb0d..0000000
+++ /dev/null
@@ -1,498 +0,0 @@
-/*
- * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <linux/ptrace.h>
-#include <asm/unistd.h>
-#include <asm/uaccess.h>
-#include <asm/ucontext.h>
-#include "frame_kern.h"
-#include "skas.h"
-
-/*
- * FPU tag word conversions.
- */
-
-static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
-{
-       unsigned int tmp; /* to avoid 16 bit prefixes in the code */
-
-       /* Transform each pair of bits into 01 (valid) or 00 (empty) */
-       tmp = ~twd;
-       tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
-       /* and move the valid bits to the lower byte. */
-       tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
-       tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
-       tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
-       return tmp;
-}
-
-static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave)
-{
-       struct _fpxreg *st = NULL;
-       unsigned long twd = (unsigned long) fxsave->twd;
-       unsigned long tag;
-       unsigned long ret = 0xffff0000;
-       int i;
-
-#define FPREG_ADDR(f, n)       ((char *)&(f)->st_space + (n) * 16)
-
-       for (i = 0; i < 8; i++) {
-               if (twd & 0x1) {
-                       st = (struct _fpxreg *) FPREG_ADDR(fxsave, i);
-
-                       switch (st->exponent & 0x7fff) {
-                       case 0x7fff:
-                               tag = 2;                /* Special */
-                               break;
-                       case 0x0000:
-                               if ( !st->significand[0] &&
-                                    !st->significand[1] &&
-                                    !st->significand[2] &&
-                                    !st->significand[3] ) {
-                                       tag = 1;        /* Zero */
-                               } else {
-                                       tag = 2;        /* Special */
-                               }
-                               break;
-                       default:
-                               if (st->significand[3] & 0x8000) {
-                                       tag = 0;        /* Valid */
-                               } else {
-                                       tag = 2;        /* Special */
-                               }
-                               break;
-                       }
-               } else {
-                       tag = 3;                        /* Empty */
-               }
-               ret |= (tag << (2 * i));
-               twd = twd >> 1;
-       }
-       return ret;
-}
-
-static int convert_fxsr_to_user(struct _fpstate __user *buf,
-                               struct user_fxsr_struct *fxsave)
-{
-       unsigned long env[7];
-       struct _fpreg __user *to;
-       struct _fpxreg *from;
-       int i;
-
-       env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
-       env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
-       env[2] = twd_fxsr_to_i387(fxsave);
-       env[3] = fxsave->fip;
-       env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
-       env[5] = fxsave->foo;
-       env[6] = fxsave->fos;
-
-       if (__copy_to_user(buf, env, 7 * sizeof(unsigned long)))
-               return 1;
-
-       to = &buf->_st[0];
-       from = (struct _fpxreg *) &fxsave->st_space[0];
-       for (i = 0; i < 8; i++, to++, from++) {
-               unsigned long __user *t = (unsigned long __user *)to;
-               unsigned long *f = (unsigned long *)from;
-
-               if (__put_user(*f, t) ||
-                               __put_user(*(f + 1), t + 1) ||
-                               __put_user(from->exponent, &to->exponent))
-                       return 1;
-       }
-       return 0;
-}
-
-static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave,
-                                 struct _fpstate __user *buf)
-{
-       unsigned long env[7];
-       struct _fpxreg *to;
-       struct _fpreg __user *from;
-       int i;
-
-       if (copy_from_user( env, buf, 7 * sizeof(long)))
-               return 1;
-
-       fxsave->cwd = (unsigned short)(env[0] & 0xffff);
-       fxsave->swd = (unsigned short)(env[1] & 0xffff);
-       fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
-       fxsave->fip = env[3];
-       fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
-       fxsave->fcs = (env[4] & 0xffff);
-       fxsave->foo = env[5];
-       fxsave->fos = env[6];
-
-       to = (struct _fpxreg *) &fxsave->st_space[0];
-       from = &buf->_st[0];
-       for (i = 0; i < 8; i++, to++, from++) {
-               unsigned long *t = (unsigned long *)to;
-               unsigned long __user *f = (unsigned long __user *)from;
-
-               if (__get_user(*t, f) ||
-                   __get_user(*(t + 1), f + 1) ||
-                   __get_user(to->exponent, &from->exponent))
-                       return 1;
-       }
-       return 0;
-}
-
-extern int have_fpx_regs;
-
-static int copy_sc_from_user(struct pt_regs *regs,
-                            struct sigcontext __user *from)
-{
-       struct sigcontext sc;
-       int err, pid;
-
-       err = copy_from_user(&sc, from, sizeof(sc));
-       if (err)
-               return err;
-
-       pid = userspace_pid[current_thread_info()->cpu];
-
-#define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname
-
-       GETREG(GS, gs);
-       GETREG(FS, fs);
-       GETREG(ES, es);
-       GETREG(DS, ds);
-       GETREG(EDI, di);
-       GETREG(ESI, si);
-       GETREG(EBP, bp);
-       GETREG(SP, sp);
-       GETREG(EBX, bx);
-       GETREG(EDX, dx);
-       GETREG(ECX, cx);
-       GETREG(EAX, ax);
-       GETREG(IP, ip);
-       GETREG(CS, cs);
-       GETREG(EFLAGS, flags);
-       GETREG(SS, ss);
-
-#undef GETREG
-       if (have_fpx_regs) {
-               struct user_fxsr_struct fpx;
-
-               err = copy_from_user(&fpx,
-                       &((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0],
-                                    sizeof(struct user_fxsr_struct));
-               if (err)
-                       return 1;
-
-               err = convert_fxsr_from_user(&fpx, sc.fpstate);
-               if (err)
-                       return 1;
-
-               err = restore_fpx_registers(pid, (unsigned long *) &fpx);
-               if (err < 0) {
-                       printk(KERN_ERR "copy_sc_from_user - "
-                              "restore_fpx_registers failed, errno = %d\n",
-                              -err);
-                       return 1;
-               }
-       } else {
-               struct user_i387_struct fp;
-
-               err = copy_from_user(&fp, sc.fpstate,
-                                    sizeof(struct user_i387_struct));
-               if (err)
-                       return 1;
-
-               err = restore_fp_registers(pid, (unsigned long *) &fp);
-               if (err < 0) {
-                       printk(KERN_ERR "copy_sc_from_user - "
-                              "restore_fp_registers failed, errno = %d\n",
-                              -err);
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-static int copy_sc_to_user(struct sigcontext __user *to,
-                          struct _fpstate __user *to_fp, struct pt_regs *regs,
-                          unsigned long sp)
-{
-       struct sigcontext sc;
-       struct faultinfo * fi = &current->thread.arch.faultinfo;
-       int err, pid;
-       memset(&sc, 0, sizeof(struct sigcontext));
-
-       sc.gs = REGS_GS(regs->regs.gp);
-       sc.fs = REGS_FS(regs->regs.gp);
-       sc.es = REGS_ES(regs->regs.gp);
-       sc.ds = REGS_DS(regs->regs.gp);
-       sc.di = REGS_EDI(regs->regs.gp);
-       sc.si = REGS_ESI(regs->regs.gp);
-       sc.bp = REGS_EBP(regs->regs.gp);
-       sc.sp = sp;
-       sc.bx = REGS_EBX(regs->regs.gp);
-       sc.dx = REGS_EDX(regs->regs.gp);
-       sc.cx = REGS_ECX(regs->regs.gp);
-       sc.ax = REGS_EAX(regs->regs.gp);
-       sc.ip = REGS_IP(regs->regs.gp);
-       sc.cs = REGS_CS(regs->regs.gp);
-       sc.flags = REGS_EFLAGS(regs->regs.gp);
-       sc.sp_at_signal = regs->regs.gp[UESP];
-       sc.ss = regs->regs.gp[SS];
-       sc.cr2 = fi->cr2;
-       sc.err = fi->error_code;
-       sc.trapno = fi->trap_no;
-
-       to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1));
-       sc.fpstate = to_fp;
-
-       pid = userspace_pid[current_thread_info()->cpu];
-       if (have_fpx_regs) {
-               struct user_fxsr_struct fpx;
-
-               err = save_fpx_registers(pid, (unsigned long *) &fpx);
-               if (err < 0){
-                       printk(KERN_ERR "copy_sc_to_user - save_fpx_registers "
-                              "failed, errno = %d\n", err);
-                       return 1;
-               }
-
-               err = convert_fxsr_to_user(to_fp, &fpx);
-               if (err)
-                       return 1;
-
-               err |= __put_user(fpx.swd, &to_fp->status);
-               err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
-               if (err)
-                       return 1;
-
-               if (copy_to_user(&to_fp->_fxsr_env[0], &fpx,
-                                sizeof(struct user_fxsr_struct)))
-                       return 1;
-       }
-       else {
-               struct user_i387_struct fp;
-
-               err = save_fp_registers(pid, (unsigned long *) &fp);
-               if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
-                       return 1;
-       }
-
-       return copy_to_user(to, &sc, sizeof(sc));
-}
-
-static int copy_ucontext_to_user(struct ucontext __user *uc,
-                                struct _fpstate __user *fp, sigset_t *set,
-                                unsigned long sp)
-{
-       int err = 0;
-
-       err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp);
-       err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags);
-       err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size);
-       err |= copy_sc_to_user(&uc->uc_mcontext, fp, &current->thread.regs, sp);
-       err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set));
-       return err;
-}
-
-struct sigframe
-{
-       char __user *pretcode;
-       int sig;
-       struct sigcontext sc;
-       struct _fpstate fpstate;
-       unsigned long extramask[_NSIG_WORDS-1];
-       char retcode[8];
-};
-
-struct rt_sigframe
-{
-       char __user *pretcode;
-       int sig;
-       struct siginfo __user *pinfo;
-       void __user *puc;
-       struct siginfo info;
-       struct ucontext uc;
-       struct _fpstate fpstate;
-       char retcode[8];
-};
-
-int setup_signal_stack_sc(unsigned long stack_top, int sig,
-                         struct k_sigaction *ka, struct pt_regs *regs,
-                         sigset_t *mask)
-{
-       struct sigframe __user *frame;
-       void __user *restorer;
-       unsigned long save_sp = PT_REGS_SP(regs);
-       int err = 0;
-
-       /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */
-       stack_top = ((stack_top + 4) & -16UL) - 4;
-       frame = (struct sigframe __user *) stack_top - 1;
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               return 1;
-
-       restorer = frame->retcode;
-       if (ka->sa.sa_flags & SA_RESTORER)
-               restorer = ka->sa.sa_restorer;
-
-       /* Update SP now because the page fault handler refuses to extend
-        * the stack if the faulting address is too far below the current
-        * SP, which frame now certainly is.  If there's an error, the original
-        * value is restored on the way out.
-        * When writing the sigcontext to the stack, we have to write the
-        * original value, so that's passed to copy_sc_to_user, which does
-        * the right thing with it.
-        */
-       PT_REGS_SP(regs) = (unsigned long) frame;
-
-       err |= __put_user(restorer, &frame->pretcode);
-       err |= __put_user(sig, &frame->sig);
-       err |= copy_sc_to_user(&frame->sc, NULL, regs, save_sp);
-       err |= __put_user(mask->sig[0], &frame->sc.oldmask);
-       if (_NSIG_WORDS > 1)
-               err |= __copy_to_user(&frame->extramask, &mask->sig[1],
-                                     sizeof(frame->extramask));
-
-       /*
-        * This is popl %eax ; movl $,%eax ; int $0x80
-        *
-        * WE DO NOT USE IT ANY MORE! It's only left here for historical
-        * reasons and because gdb uses it as a signature to notice
-        * signal handler stack frames.
-        */
-       err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
-       err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
-       err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
-
-       if (err)
-               goto err;
-
-       PT_REGS_SP(regs) = (unsigned long) frame;
-       PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
-       PT_REGS_EAX(regs) = (unsigned long) sig;
-       PT_REGS_EDX(regs) = (unsigned long) 0;
-       PT_REGS_ECX(regs) = (unsigned long) 0;
-
-       if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
-               ptrace_notify(SIGTRAP);
-       return 0;
-
-err:
-       PT_REGS_SP(regs) = save_sp;
-       return err;
-}
-
-int setup_signal_stack_si(unsigned long stack_top, int sig,
-                         struct k_sigaction *ka, struct pt_regs *regs,
-                         siginfo_t *info, sigset_t *mask)
-{
-       struct rt_sigframe __user *frame;
-       void __user *restorer;
-       unsigned long save_sp = PT_REGS_SP(regs);
-       int err = 0;
-
-       stack_top &= -8UL;
-       frame = (struct rt_sigframe __user *) stack_top - 1;
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               return 1;
-
-       restorer = frame->retcode;
-       if (ka->sa.sa_flags & SA_RESTORER)
-               restorer = ka->sa.sa_restorer;
-
-       /* See comment above about why this is here */
-       PT_REGS_SP(regs) = (unsigned long) frame;
-
-       err |= __put_user(restorer, &frame->pretcode);
-       err |= __put_user(sig, &frame->sig);
-       err |= __put_user(&frame->info, &frame->pinfo);
-       err |= __put_user(&frame->uc, &frame->puc);
-       err |= copy_siginfo_to_user(&frame->info, info);
-       err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask,
-                                    save_sp);
-
-       /*
-        * This is movl $,%eax ; int $0x80
-        *
-        * WE DO NOT USE IT ANY MORE! It's only left here for historical
-        * reasons and because gdb uses it as a signature to notice
-        * signal handler stack frames.
-        */
-       err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
-       err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
-       err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
-
-       if (err)
-               goto err;
-
-       PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
-       PT_REGS_EAX(regs) = (unsigned long) sig;
-       PT_REGS_EDX(regs) = (unsigned long) &frame->info;
-       PT_REGS_ECX(regs) = (unsigned long) &frame->uc;
-
-       if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
-               ptrace_notify(SIGTRAP);
-       return 0;
-
-err:
-       PT_REGS_SP(regs) = save_sp;
-       return err;
-}
-
-long sys_sigreturn(struct pt_regs regs)
-{
-       unsigned long sp = PT_REGS_SP(&current->thread.regs);
-       struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
-       sigset_t set;
-       struct sigcontext __user *sc = &frame->sc;
-       unsigned long __user *oldmask = &sc->oldmask;
-       unsigned long __user *extramask = frame->extramask;
-       int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
-
-       if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
-           copy_from_user(&set.sig[1], extramask, sig_size))
-               goto segfault;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       set_current_blocked(&set);
-
-       if (copy_sc_from_user(&current->thread.regs, sc))
-               goto segfault;
-
-       /* Avoid ERESTART handling */
-       PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
-       return PT_REGS_SYSCALL_RET(&current->thread.regs);
-
- segfault:
-       force_sig(SIGSEGV, current);
-       return 0;
-}
-
-long sys_rt_sigreturn(struct pt_regs regs)
-{
-       unsigned long sp = PT_REGS_SP(&current->thread.regs);
-       struct rt_sigframe __user *frame =
-               (struct rt_sigframe __user *) (sp - 4);
-       sigset_t set;
-       struct ucontext __user *uc = &frame->uc;
-       int sig_size = _NSIG_WORDS * sizeof(unsigned long);
-
-       if (copy_from_user(&set, &uc->uc_sigmask, sig_size))
-               goto segfault;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       set_current_blocked(&set);
-
-       if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
-               goto segfault;
-
-       /* Avoid ERESTART handling */
-       PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
-       return PT_REGS_SYSCALL_RET(&current->thread.regs);
-
- segfault:
-       force_sig(SIGSEGV, current);
-       return 0;
-}
diff --git a/arch/um/sys-i386/stub.S b/arch/um/sys-i386/stub.S
deleted file mode 100644 (file)
index 54a36ec..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#include "as-layout.h"
-
-       .globl syscall_stub
-.section .__syscall_stub, "ax"
-
-       .globl batch_syscall_stub
-batch_syscall_stub:
-       /* load pointer to first operation */
-       mov     $(STUB_DATA+8), %esp
-
-again:
-       /* load length of additional data */
-       mov     0x0(%esp), %eax
-
-       /* if(length == 0) : end of list */
-       /* write possible 0 to header */
-       mov     %eax, STUB_DATA+4
-       cmpl    $0, %eax
-       jz      done
-
-       /* save current pointer */
-       mov     %esp, STUB_DATA+4
-
-       /* skip additional data */
-       add     %eax, %esp
-
-       /* load syscall-# */
-       pop     %eax
-
-       /* load syscall params */
-       pop     %ebx
-       pop     %ecx
-       pop     %edx
-       pop     %esi
-       pop     %edi
-       pop     %ebp
-
-       /* execute syscall */
-       int     $0x80
-
-       /* check return value */
-       pop     %ebx
-       cmp     %ebx, %eax
-       je      again
-
-done:
-       /* save return value */
-       mov     %eax, STUB_DATA
-
-       /* stop */
-       int3
diff --git a/arch/um/sys-i386/stub_segv.c b/arch/um/sys-i386/stub_segv.c
deleted file mode 100644 (file)
index 28ccf73..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include "sysdep/stub.h"
-#include "sysdep/sigcontext.h"
-
-void __attribute__ ((__section__ (".__syscall_stub")))
-stub_segv_handler(int sig)
-{
-       struct sigcontext *sc = (struct sigcontext *) (&sig + 1);
-
-       GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA), sc);
-
-       trap_myself();
-}
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S
deleted file mode 100644 (file)
index de27407..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <linux/linkage.h>
-/* Steal i386 syscall table for our purposes, but with some slight changes.*/
-
-#define sys_iopl sys_ni_syscall
-#define sys_ioperm sys_ni_syscall
-
-#define sys_vm86old sys_ni_syscall
-#define sys_vm86 sys_ni_syscall
-
-#define old_mmap sys_old_mmap
-
-#define ptregs_fork sys_fork
-#define ptregs_execve sys_execve
-#define ptregs_iopl sys_iopl
-#define ptregs_vm86old sys_vm86old
-#define ptregs_sigreturn sys_sigreturn
-#define ptregs_clone sys_clone
-#define ptregs_vm86 sys_vm86
-#define ptregs_rt_sigreturn sys_rt_sigreturn
-#define ptregs_sigaltstack sys_sigaltstack
-#define ptregs_vfork sys_vfork
-
-.section .rodata,"a"
-
-#include "../../x86/kernel/syscall_table_32.S"
-
-ENTRY(syscall_table_size)
-.long .-sys_call_table
diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
deleted file mode 100644 (file)
index 70ca357..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/* 
- * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#include "linux/sched.h"
-#include "linux/shm.h"
-#include "linux/ipc.h"
-#include "linux/syscalls.h"
-#include "asm/mman.h"
-#include "asm/uaccess.h"
-#include "asm/unistd.h"
-
-/*
- * The prototype on i386 is:
- *
- *     int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
- *
- * and the "newtls" arg. on i386 is read by copy_thread directly from the
- * register saved on the stack.
- */
-long sys_clone(unsigned long clone_flags, unsigned long newsp,
-              int __user *parent_tid, void *newtls, int __user *child_tid)
-{
-       long ret;
-
-       if (!newsp)
-               newsp = UPT_SP(&current->thread.regs.regs);
-
-       current->thread.forking = 1;
-       ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
-                     child_tid);
-       current->thread.forking = 0;
-       return ret;
-}
-
-long sys_sigaction(int sig, const struct old_sigaction __user *act,
-                        struct old_sigaction __user *oact)
-{
-       struct k_sigaction new_ka, old_ka;
-       int ret;
-
-       if (act) {
-               old_sigset_t mask;
-               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
-                       return -EFAULT;
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
-               siginitset(&new_ka.sa.sa_mask, mask);
-       }
-
-       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
-       if (!ret && oact) {
-               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
-                       return -EFAULT;
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
-       }
-
-       return ret;
-}
diff --git a/arch/um/sys-i386/sysrq.c b/arch/um/sys-i386/sysrq.c
deleted file mode 100644 (file)
index 171b3e9..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (C) 2001 - 2003 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#include "linux/kernel.h"
-#include "linux/smp.h"
-#include "linux/sched.h"
-#include "linux/kallsyms.h"
-#include "asm/ptrace.h"
-#include "sysrq.h"
-
-/* This is declared by <linux/sched.h> */
-void show_regs(struct pt_regs *regs)
-{
-        printk("\n");
-        printk("EIP: %04lx:[<%08lx>] CPU: %d %s", 
-              0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs),
-              smp_processor_id(), print_tainted());
-        if (PT_REGS_CS(regs) & 3)
-                printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs),
-                      PT_REGS_SP(regs));
-        printk(" EFLAGS: %08lx\n    %s\n", PT_REGS_EFLAGS(regs),
-              print_tainted());
-        printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
-                PT_REGS_EAX(regs), PT_REGS_EBX(regs), 
-              PT_REGS_ECX(regs), 
-              PT_REGS_EDX(regs));
-        printk("ESI: %08lx EDI: %08lx EBP: %08lx",
-              PT_REGS_ESI(regs), PT_REGS_EDI(regs), 
-              PT_REGS_EBP(regs));
-        printk(" DS: %04lx ES: %04lx\n",
-              0xffff & PT_REGS_DS(regs), 
-              0xffff & PT_REGS_ES(regs));
-
-        show_trace(NULL, (unsigned long *) &regs);
-}
-
-/* Copied from i386. */
-static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
-{
-       return  p > (void *)tinfo &&
-               p < (void *)tinfo + THREAD_SIZE - 3;
-}
-
-/* Adapted from i386 (we also print the address we read from). */
-static inline unsigned long print_context_stack(struct thread_info *tinfo,
-                               unsigned long *stack, unsigned long ebp)
-{
-       unsigned long addr;
-
-#ifdef CONFIG_FRAME_POINTER
-       while (valid_stack_ptr(tinfo, (void *)ebp)) {
-               addr = *(unsigned long *)(ebp + 4);
-               printk("%08lx:  [<%08lx>]", ebp + 4, addr);
-               print_symbol(" %s", addr);
-               printk("\n");
-               ebp = *(unsigned long *)ebp;
-       }
-#else
-       while (valid_stack_ptr(tinfo, stack)) {
-               addr = *stack;
-               if (__kernel_text_address(addr)) {
-                       printk("%08lx:  [<%08lx>]", (unsigned long) stack, addr);
-                       print_symbol(" %s", addr);
-                       printk("\n");
-               }
-               stack++;
-       }
-#endif
-       return ebp;
-}
-
-void show_trace(struct task_struct* task, unsigned long * stack)
-{
-       unsigned long ebp;
-       struct thread_info *context;
-
-       /* Turn this into BUG_ON if possible. */
-       if (!stack) {
-               stack = (unsigned long*) &stack;
-               printk("show_trace: got NULL stack, implicit assumption task == current");
-               WARN_ON(1);
-       }
-
-       if (!task)
-               task = current;
-
-       if (task != current) {
-               ebp = (unsigned long) KSTK_EBP(task);
-       } else {
-               asm ("movl %%ebp, %0" : "=r" (ebp) : );
-       }
-
-       context = (struct thread_info *)
-               ((unsigned long)stack & (~(THREAD_SIZE - 1)));
-       print_context_stack(context, stack, ebp);
-
-       printk("\n");
-}
-
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c
deleted file mode 100644 (file)
index c6c7131..0000000
+++ /dev/null
@@ -1,396 +0,0 @@
-/*
- * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
- * Licensed under the GPL
- */
-
-#include "linux/percpu.h"
-#include "linux/sched.h"
-#include "asm/uaccess.h"
-#include "os.h"
-#include "skas.h"
-#include "sysdep/tls.h"
-
-/*
- * If needed we can detect when it's uninitialized.
- *
- * These are initialized in an initcall and unchanged thereafter.
- */
-static int host_supports_tls = -1;
-int host_gdt_entry_tls_min;
-
-int do_set_thread_area(struct user_desc *info)
-{
-       int ret;
-       u32 cpu;
-
-       cpu = get_cpu();
-       ret = os_set_thread_area(info, userspace_pid[cpu]);
-       put_cpu();
-
-       if (ret)
-               printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
-                      "index = %d\n", ret, info->entry_number);
-
-       return ret;
-}
-
-int do_get_thread_area(struct user_desc *info)
-{
-       int ret;
-       u32 cpu;
-
-       cpu = get_cpu();
-       ret = os_get_thread_area(info, userspace_pid[cpu]);
-       put_cpu();
-
-       if (ret)
-               printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
-                      "index = %d\n", ret, info->entry_number);
-
-       return ret;
-}
-
-/*
- * sys_get_thread_area: get a yet unused TLS descriptor index.
- * XXX: Consider leaving one free slot for glibc usage at first place. This must
- * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
- *
- * Also, this must be tested when compiling in SKAS mode with dynamic linking
- * and running against NPTL.
- */
-static int get_free_idx(struct task_struct* task)
-{
-       struct thread_struct *t = &task->thread;
-       int idx;
-
-       if (!t->arch.tls_array)
-               return GDT_ENTRY_TLS_MIN;
-
-       for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
-               if (!t->arch.tls_array[idx].present)
-                       return idx + GDT_ENTRY_TLS_MIN;
-       return -ESRCH;
-}
-
-static inline void clear_user_desc(struct user_desc* info)
-{
-       /* Postcondition: LDT_empty(info) returns true. */
-       memset(info, 0, sizeof(*info));
-
-       /*
-        * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
-        * indeed an empty user_desc.
-        */
-       info->read_exec_only = 1;
-       info->seg_not_present = 1;
-}
-
-#define O_FORCE 1
-
-static int load_TLS(int flags, struct task_struct *to)
-{
-       int ret = 0;
-       int idx;
-
-       for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
-               struct uml_tls_struct* curr =
-                       &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
-
-               /*
-                * Actually, now if it wasn't flushed it gets cleared and
-                * flushed to the host, which will clear it.
-                */
-               if (!curr->present) {
-                       if (!curr->flushed) {
-                               clear_user_desc(&curr->tls);
-                               curr->tls.entry_number = idx;
-                       } else {
-                               WARN_ON(!LDT_empty(&curr->tls));
-                               continue;
-                       }
-               }
-
-               if (!(flags & O_FORCE) && curr->flushed)
-                       continue;
-
-               ret = do_set_thread_area(&curr->tls);
-               if (ret)
-                       goto out;
-
-               curr->flushed = 1;
-       }
-out:
-       return ret;
-}
-
-/*
- * Verify if we need to do a flush for the new process, i.e. if there are any
- * present desc's, only if they haven't been flushed.
- */
-static inline int needs_TLS_update(struct task_struct *task)
-{
-       int i;
-       int ret = 0;
-
-       for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
-               struct uml_tls_struct* curr =
-                       &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
-
-               /*
-                * Can't test curr->present, we may need to clear a descriptor
-                * which had a value.
-                */
-               if (curr->flushed)
-                       continue;
-               ret = 1;
-               break;
-       }
-       return ret;
-}
-
-/*
- * On a newly forked process, the TLS descriptors haven't yet been flushed. So
- * we mark them as such and the first switch_to will do the job.
- */
-void clear_flushed_tls(struct task_struct *task)
-{
-       int i;
-
-       for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
-               struct uml_tls_struct* curr =
-                       &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
-
-               /*
-                * Still correct to do this, if it wasn't present on the host it
-                * will remain as flushed as it was.
-                */
-               if (!curr->present)
-                       continue;
-
-               curr->flushed = 0;
-       }
-}
-
-/*
- * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
- * common host process. So this is needed in SKAS0 too.
- *
- * However, if each thread had a different host process (and this was discussed
- * for SMP support) this won't be needed.
- *
- * And this will not need be used when (and if) we'll add support to the host
- * SKAS patch.
- */
-
-int arch_switch_tls(struct task_struct *to)
-{
-       if (!host_supports_tls)
-               return 0;
-
-       /*
-        * We have no need whatsoever to switch TLS for kernel threads; beyond
-        * that, that would also result in us calling os_set_thread_area with
-        * userspace_pid[cpu] == 0, which gives an error.
-        */
-       if (likely(to->mm))
-               return load_TLS(O_FORCE, to);
-
-       return 0;
-}
-
-static int set_tls_entry(struct task_struct* task, struct user_desc *info,
-                        int idx, int flushed)
-{
-       struct thread_struct *t = &task->thread;
-
-       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-               return -EINVAL;
-
-       t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
-       t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
-       t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
-
-       return 0;
-}
-
-int arch_copy_tls(struct task_struct *new)
-{
-       struct user_desc info;
-       int idx, ret = -EFAULT;
-
-       if (copy_from_user(&info,
-                          (void __user *) UPT_ESI(&new->thread.regs.regs),
-                          sizeof(info)))
-               goto out;
-
-       ret = -EINVAL;
-       if (LDT_empty(&info))
-               goto out;
-
-       idx = info.entry_number;
-
-       ret = set_tls_entry(new, &info, idx, 0);
-out:
-       return ret;
-}
-
-/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
-static int get_tls_entry(struct task_struct *task, struct user_desc *info,
-                        int idx)
-{
-       struct thread_struct *t = &task->thread;
-
-       if (!t->arch.tls_array)
-               goto clear;
-
-       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-               return -EINVAL;
-
-       if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
-               goto clear;
-
-       *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
-
-out:
-       /*
-        * Temporary debugging check, to make sure that things have been
-        * flushed. This could be triggered if load_TLS() failed.
-        */
-       if (unlikely(task == current &&
-                    !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
-               printk(KERN_ERR "get_tls_entry: task with pid %d got here "
-                               "without flushed TLS.", current->pid);
-       }
-
-       return 0;
-clear:
-       /*
-        * When the TLS entry has not been set, the values read to user in the
-        * tls_array are 0 (because it's cleared at boot, see
-        * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
-        */
-       clear_user_desc(info);
-       info->entry_number = idx;
-       goto out;
-}
-
-int sys_set_thread_area(struct user_desc __user *user_desc)
-{
-       struct user_desc info;
-       int idx, ret;
-
-       if (!host_supports_tls)
-               return -ENOSYS;
-
-       if (copy_from_user(&info, user_desc, sizeof(info)))
-               return -EFAULT;
-
-       idx = info.entry_number;
-
-       if (idx == -1) {
-               idx = get_free_idx(current);
-               if (idx < 0)
-                       return idx;
-               info.entry_number = idx;
-               /* Tell the user which slot we chose for him.*/
-               if (put_user(idx, &user_desc->entry_number))
-                       return -EFAULT;
-       }
-
-       ret = do_set_thread_area(&info);
-       if (ret)
-               return ret;
-       return set_tls_entry(current, &info, idx, 1);
-}
-
-/*
- * Perform set_thread_area on behalf of the traced child.
- * Note: error handling is not done on the deferred load, and this differ from
- * i386. However the only possible error are caused by bugs.
- */
-int ptrace_set_thread_area(struct task_struct *child, int idx,
-                          struct user_desc __user *user_desc)
-{
-       struct user_desc info;
-
-       if (!host_supports_tls)
-               return -EIO;
-
-       if (copy_from_user(&info, user_desc, sizeof(info)))
-               return -EFAULT;
-
-       return set_tls_entry(child, &info, idx, 0);
-}
-
-int sys_get_thread_area(struct user_desc __user *user_desc)
-{
-       struct user_desc info;
-       int idx, ret;
-
-       if (!host_supports_tls)
-               return -ENOSYS;
-
-       if (get_user(idx, &user_desc->entry_number))
-               return -EFAULT;
-
-       ret = get_tls_entry(current, &info, idx);
-       if (ret < 0)
-               goto out;
-
-       if (copy_to_user(user_desc, &info, sizeof(info)))
-               ret = -EFAULT;
-
-out:
-       return ret;
-}
-
-/*
- * Perform get_thread_area on behalf of the traced child.
- */
-int ptrace_get_thread_area(struct task_struct *child, int idx,
-               struct user_desc __user *user_desc)
-{
-       struct user_desc info;
-       int ret;
-
-       if (!host_supports_tls)
-               return -EIO;
-
-       ret = get_tls_entry(child, &info, idx);
-       if (ret < 0)
-               goto out;
-
-       if (copy_to_user(user_desc, &info, sizeof(info)))
-               ret = -EFAULT;
-out:
-       return ret;
-}
-
-/*
- * This code is really i386-only, but it detects and logs x86_64 GDT indexes
- * if a 32-bit UML is running on a 64-bit host.
- */
-static int __init __setup_host_supports_tls(void)
-{
-       check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
-       if (host_supports_tls) {
-               printk(KERN_INFO "Host TLS support detected\n");
-               printk(KERN_INFO "Detected host type: ");
-               switch (host_gdt_entry_tls_min) {
-               case GDT_ENTRY_TLS_MIN_I386:
-                       printk(KERN_CONT "i386");
-                       break;
-               case GDT_ENTRY_TLS_MIN_X86_64:
-                       printk(KERN_CONT "x86_64");
-                       break;
-               }
-               printk(KERN_CONT " (GDT indexes %d to %d)\n",
-                      host_gdt_entry_tls_min,
-                      host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
-       } else
-               printk(KERN_ERR "  Host TLS support NOT detected! "
-                               "TLS support inside UML will not work\n");
-       return 0;
-}
-
-__initcall(__setup_host_supports_tls);
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
deleted file mode 100644 (file)
index 5f883bf..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-#include <stdio.h>
-#include <stddef.h>
-#include <signal.h>
-#include <sys/poll.h>
-#include <sys/user.h>
-#include <sys/mman.h>
-#include <asm/ptrace.h>
-
-#define DEFINE(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define DEFINE_LONGS(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
-
-#define OFFSET(sym, str, mem) \
-       DEFINE(sym, offsetof(struct str, mem));
-
-void foo(void)
-{
-       OFFSET(HOST_SC_TRAPNO, sigcontext, trapno);
-       OFFSET(HOST_SC_ERR, sigcontext, err);
-       OFFSET(HOST_SC_CR2, sigcontext, cr2);
-
-       DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct));
-       DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct));
-
-       DEFINE(HOST_IP, EIP);
-       DEFINE(HOST_SP, UESP);
-       DEFINE(HOST_EFLAGS, EFL);
-       DEFINE(HOST_EAX, EAX);
-       DEFINE(HOST_EBX, EBX);
-       DEFINE(HOST_ECX, ECX);
-       DEFINE(HOST_EDX, EDX);
-       DEFINE(HOST_ESI, ESI);
-       DEFINE(HOST_EDI, EDI);
-       DEFINE(HOST_EBP, EBP);
-       DEFINE(HOST_CS, CS);
-       DEFINE(HOST_SS, SS);
-       DEFINE(HOST_DS, DS);
-       DEFINE(HOST_FS, FS);
-       DEFINE(HOST_ES, ES);
-       DEFINE(HOST_GS, GS);
-       DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
-
-       /* XXX Duplicated between i386 and x86_64 */
-       DEFINE(UM_POLLIN, POLLIN);
-       DEFINE(UM_POLLPRI, POLLPRI);
-       DEFINE(UM_POLLOUT, POLLOUT);
-
-       DEFINE(UM_PROT_READ, PROT_READ);
-       DEFINE(UM_PROT_WRITE, PROT_WRITE);
-       DEFINE(UM_PROT_EXEC, PROT_EXEC);
-}
diff --git a/arch/um/sys-x86/Makefile b/arch/um/sys-x86/Makefile
new file mode 100644 (file)
index 0000000..671de0b
--- /dev/null
@@ -0,0 +1,45 @@
+#
+# Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+#
+
+ifeq ($(CONFIG_X86_32),y)
+       BITS := 32
+else
+       BITS := 64
+endif
+
+obj-y = bug.o bugs_$(BITS).o delay_$(BITS).o fault.o ksyms.o ldt.o \
+       ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal_$(BITS).o \
+       stub_$(BITS).o stub_segv_$(BITS).o syscalls_$(BITS).o \
+       sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o mem_$(BITS).o
+
+ifeq ($(CONFIG_X86_32),y)
+
+obj-y += checksum_32.o
+obj-$(CONFIG_BINFMT_ELF) += elfcore.o
+
+subarch-obj-y = lib/string_32.o lib/atomic64_32.o lib/atomic64_cx8_32.o
+subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o
+subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
+subarch-obj-$(CONFIG_MODULES) += kernel/module.o
+
+else
+
+obj-y += vdso/
+
+subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
+               lib/rwsem.o
+
+endif
+
+subarch-obj-$(CONFIG_MODULES) += kernel/module.o
+
+USER_OBJS := bugs_$(BITS).o ptrace_user.o fault.o
+
+extra-y += user-offsets.s
+$(obj)/user-offsets.s: c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS)
+
+UNPROFILE_OBJS := stub_segv.o
+CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
+
+include arch/um/scripts/Makefile.rules
diff --git a/arch/um/sys-x86/bug.c b/arch/um/sys-x86/bug.c
new file mode 100644 (file)
index 0000000..e8034e3
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
+ * Licensed under the GPL V2
+ */
+
+#include <linux/uaccess.h>
+
+/*
+ * Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
+ * that's not relevant in skas mode.
+ */
+
+int is_valid_bugaddr(unsigned long eip)
+{
+       unsigned short ud2;
+
+       if (probe_kernel_address((unsigned short __user *)eip, ud2))
+               return 0;
+
+       return ud2 == 0x0b0f;
+}
diff --git a/arch/um/sys-x86/bugs_32.c b/arch/um/sys-x86/bugs_32.c
new file mode 100644 (file)
index 0000000..7058e1f
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include <signal.h>
+#include "kern_util.h"
+#include "longjmp.h"
+#include "task.h"
+#include "sysdep/ptrace.h"
+
+/* Set during early boot */
+static int host_has_cmov = 1;
+static jmp_buf cmov_test_return;
+
+static void cmov_sigill_test_handler(int sig)
+{
+       host_has_cmov = 0;
+       longjmp(cmov_test_return, 1);
+}
+
+void arch_check_bugs(void)
+{
+       struct sigaction old, new;
+
+       printk(UM_KERN_INFO "Checking for host processor cmov support...");
+       new.sa_handler = cmov_sigill_test_handler;
+
+       /* Make sure that SIGILL is enabled after the handler longjmps back */
+       new.sa_flags = SA_NODEFER;
+       sigemptyset(&new.sa_mask);
+       sigaction(SIGILL, &new, &old);
+
+       if (setjmp(cmov_test_return) == 0) {
+               unsigned long foo = 0;
+               __asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo));
+               printk(UM_KERN_CONT "Yes\n");
+       } else
+               printk(UM_KERN_CONT "No\n");
+
+       sigaction(SIGILL, &old, &new);
+}
+
+void arch_examine_signal(int sig, struct uml_pt_regs *regs)
+{
+       unsigned char tmp[2];
+
+       /*
+        * This is testing for a cmov (0x0f 0x4x) instruction causing a
+        * SIGILL in init.
+        */
+       if ((sig != SIGILL) || (TASK_PID(get_current()) != 1))
+               return;
+
+       if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) {
+               printk(UM_KERN_ERR "SIGILL in init, could not read "
+                      "instructions!\n");
+               return;
+       }
+
+       if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40))
+               return;
+
+       if (host_has_cmov == 0)
+               printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
+                      "processor doesn't implement.  Boot a filesystem "
+                      "compiled for older processors");
+       else if (host_has_cmov == 1)
+               printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
+                      "processor claims to implement");
+       else
+               printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)",
+                       host_has_cmov);
+}
diff --git a/arch/um/sys-x86/bugs_64.c b/arch/um/sys-x86/bugs_64.c
new file mode 100644 (file)
index 0000000..44e02ba
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2003 PathScale, Inc.
+ *
+ * Licensed under the GPL
+ */
+
+#include "sysdep/ptrace.h"
+
+void arch_check_bugs(void)
+{
+}
+
+void arch_examine_signal(int sig, struct uml_pt_regs *regs)
+{
+}
diff --git a/arch/um/sys-x86/checksum_32.S b/arch/um/sys-x86/checksum_32.S
new file mode 100644 (file)
index 0000000..f058d2f
--- /dev/null
@@ -0,0 +1,458 @@
+/*
+ * INET                An implementation of the TCP/IP protocol suite for the LINUX
+ *             operating system.  INET is implemented using the  BSD Socket
+ *             interface as the means of communication with the user level.
+ *
+ *             IP/TCP/UDP checksumming routines
+ *
+ * Authors:    Jorge Cwik, <jorge@laser.satlink.net>
+ *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ *             Tom May, <ftom@netcom.com>
+ *              Pentium Pro/II routines:
+ *              Alexander Kjeldaas <astor@guardian.no>
+ *              Finn Arne Gangstad <finnag@guardian.no>
+ *             Lots of code moved from tcp.c and ip.c; see those files
+ *             for more names.
+ *
+ * Changes:     Ingo Molnar, converted csum_partial_copy() to 2.1 exception
+ *                          handling.
+ *             Andi Kleen,  add zeroing on error
+ *                   converted to pure assembler
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/errno.h>
+                               
+/*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+
+/*     
+unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+ */
+               
+.text
+.align 4
+.globl csum_partial
+               
+#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
+
+         /*            
+          * Experiments with Ethernet and SLIP connections show that buff
+          * is aligned on either a 2-byte or 4-byte boundary.  We get at
+          * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
+          * Fortunately, it is easy to convert 2-byte alignment to 4-byte
+          * alignment for the unrolled loop.
+          */           
+csum_partial:
+       pushl %esi
+       pushl %ebx
+       movl 20(%esp),%eax      # Function arg: unsigned int sum
+       movl 16(%esp),%ecx      # Function arg: int len
+       movl 12(%esp),%esi      # Function arg: unsigned char *buff
+       testl $2, %esi          # Check alignment.
+       jz 2f                   # Jump if alignment is ok.
+       subl $2, %ecx           # Alignment uses up two bytes.
+       jae 1f                  # Jump if we had at least two bytes.
+       addl $2, %ecx           # ecx was < 2.  Deal with it.
+       jmp 4f
+1:     movw (%esi), %bx
+       addl $2, %esi
+       addw %bx, %ax
+       adcl $0, %eax
+2:
+       movl %ecx, %edx
+       shrl $5, %ecx
+       jz 2f
+       testl %esi, %esi
+1:     movl (%esi), %ebx
+       adcl %ebx, %eax
+       movl 4(%esi), %ebx
+       adcl %ebx, %eax
+       movl 8(%esi), %ebx
+       adcl %ebx, %eax
+       movl 12(%esi), %ebx
+       adcl %ebx, %eax
+       movl 16(%esi), %ebx
+       adcl %ebx, %eax
+       movl 20(%esi), %ebx
+       adcl %ebx, %eax
+       movl 24(%esi), %ebx
+       adcl %ebx, %eax
+       movl 28(%esi), %ebx
+       adcl %ebx, %eax
+       lea 32(%esi), %esi
+       dec %ecx
+       jne 1b
+       adcl $0, %eax
+2:     movl %edx, %ecx
+       andl $0x1c, %edx
+       je 4f
+       shrl $2, %edx           # This clears CF
+3:     adcl (%esi), %eax
+       lea 4(%esi), %esi
+       dec %edx
+       jne 3b
+       adcl $0, %eax
+4:     andl $3, %ecx
+       jz 7f
+       cmpl $2, %ecx
+       jb 5f
+       movw (%esi),%cx
+       leal 2(%esi),%esi
+       je 6f
+       shll $16,%ecx
+5:     movb (%esi),%cl
+6:     addl %ecx,%eax
+       adcl $0, %eax 
+7:     
+       popl %ebx
+       popl %esi
+       ret
+
+#else
+
+/* Version for PentiumII/PPro */
+
+csum_partial:
+       pushl %esi
+       pushl %ebx
+       movl 20(%esp),%eax      # Function arg: unsigned int sum
+       movl 16(%esp),%ecx      # Function arg: int len
+       movl 12(%esp),%esi      # Function arg: const unsigned char *buf
+
+       testl $2, %esi         
+       jnz 30f                 
+10:
+       movl %ecx, %edx
+       movl %ecx, %ebx
+       andl $0x7c, %ebx
+       shrl $7, %ecx
+       addl %ebx,%esi
+       shrl $2, %ebx  
+       negl %ebx
+       lea 45f(%ebx,%ebx,2), %ebx
+       testl %esi, %esi
+       jmp *%ebx
+
+       # Handle 2-byte-aligned regions
+20:    addw (%esi), %ax
+       lea 2(%esi), %esi
+       adcl $0, %eax
+       jmp 10b
+
+30:    subl $2, %ecx          
+       ja 20b                 
+       je 32f
+       movzbl (%esi),%ebx      # csumming 1 byte, 2-aligned
+       addl %ebx, %eax
+       adcl $0, %eax
+       jmp 80f
+32:
+       addw (%esi), %ax        # csumming 2 bytes, 2-aligned
+       adcl $0, %eax
+       jmp 80f
+
+40: 
+       addl -128(%esi), %eax
+       adcl -124(%esi), %eax
+       adcl -120(%esi), %eax
+       adcl -116(%esi), %eax   
+       adcl -112(%esi), %eax   
+       adcl -108(%esi), %eax
+       adcl -104(%esi), %eax
+       adcl -100(%esi), %eax
+       adcl -96(%esi), %eax
+       adcl -92(%esi), %eax
+       adcl -88(%esi), %eax
+       adcl -84(%esi), %eax
+       adcl -80(%esi), %eax
+       adcl -76(%esi), %eax
+       adcl -72(%esi), %eax
+       adcl -68(%esi), %eax
+       adcl -64(%esi), %eax     
+       adcl -60(%esi), %eax     
+       adcl -56(%esi), %eax     
+       adcl -52(%esi), %eax   
+       adcl -48(%esi), %eax   
+       adcl -44(%esi), %eax
+       adcl -40(%esi), %eax
+       adcl -36(%esi), %eax
+       adcl -32(%esi), %eax
+       adcl -28(%esi), %eax
+       adcl -24(%esi), %eax
+       adcl -20(%esi), %eax
+       adcl -16(%esi), %eax
+       adcl -12(%esi), %eax
+       adcl -8(%esi), %eax
+       adcl -4(%esi), %eax
+45:
+       lea 128(%esi), %esi
+       adcl $0, %eax
+       dec %ecx
+       jge 40b
+       movl %edx, %ecx
+50:    andl $3, %ecx
+       jz 80f
+
+       # Handle the last 1-3 bytes without jumping
+       notl %ecx               # 1->2, 2->1, 3->0, higher bits are masked
+       movl $0xffffff,%ebx     # by the shll and shrl instructions
+       shll $3,%ecx
+       shrl %cl,%ebx
+       andl -128(%esi),%ebx    # esi is 4-aligned so should be ok
+       addl %ebx,%eax
+       adcl $0,%eax
+80: 
+       popl %ebx
+       popl %esi
+       ret
+                               
+#endif
+
+/*
+unsigned int csum_partial_copy_generic (const char *src, char *dst,
+                                 int len, int sum, int *src_err_ptr, int *dst_err_ptr)
+ */ 
+
+/*
+ * Copy from ds while checksumming, otherwise like csum_partial
+ *
+ * The macros SRC and DST specify the type of access for the instruction.
+ * thus we can call a custom exception handler for all access types.
+ *
+ * FIXME: could someone double-check whether I haven't mixed up some SRC and
+ *       DST definitions? It's damn hard to trigger all cases.  I hope I got
+ *       them all but there's no guarantee.
+ */
+
+#define SRC(y...)                      \
+       9999: y;                        \
+       .section __ex_table, "a";       \
+       .long 9999b, 6001f      ;       \
+       .previous
+
+#define DST(y...)                      \
+       9999: y;                        \
+       .section __ex_table, "a";       \
+       .long 9999b, 6002f      ;       \
+       .previous
+
+.align 4
+
+#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
+
+#define ARGBASE 16             
+#define FP             12
+
+csum_partial_copy_generic_i386:
+       subl  $4,%esp   
+       pushl %edi
+       pushl %esi
+       pushl %ebx
+       movl ARGBASE+16(%esp),%eax      # sum
+       movl ARGBASE+12(%esp),%ecx      # len
+       movl ARGBASE+4(%esp),%esi       # src
+       movl ARGBASE+8(%esp),%edi       # dst
+
+       testl $2, %edi                  # Check alignment. 
+       jz 2f                           # Jump if alignment is ok.
+       subl $2, %ecx                   # Alignment uses up two bytes.
+       jae 1f                          # Jump if we had at least two bytes.
+       addl $2, %ecx                   # ecx was < 2.  Deal with it.
+       jmp 4f
+SRC(1: movw (%esi), %bx        )
+       addl $2, %esi
+DST(   movw %bx, (%edi)        )
+       addl $2, %edi
+       addw %bx, %ax   
+       adcl $0, %eax
+2:
+       movl %ecx, FP(%esp)
+       shrl $5, %ecx
+       jz 2f
+       testl %esi, %esi
+SRC(1: movl (%esi), %ebx       )
+SRC(   movl 4(%esi), %edx      )
+       adcl %ebx, %eax
+DST(   movl %ebx, (%edi)       )
+       adcl %edx, %eax
+DST(   movl %edx, 4(%edi)      )
+
+SRC(   movl 8(%esi), %ebx      )
+SRC(   movl 12(%esi), %edx     )
+       adcl %ebx, %eax
+DST(   movl %ebx, 8(%edi)      )
+       adcl %edx, %eax
+DST(   movl %edx, 12(%edi)     )
+
+SRC(   movl 16(%esi), %ebx     )
+SRC(   movl 20(%esi), %edx     )
+       adcl %ebx, %eax
+DST(   movl %ebx, 16(%edi)     )
+       adcl %edx, %eax
+DST(   movl %edx, 20(%edi)     )
+
+SRC(   movl 24(%esi), %ebx     )
+SRC(   movl 28(%esi), %edx     )
+       adcl %ebx, %eax
+DST(   movl %ebx, 24(%edi)     )
+       adcl %edx, %eax
+DST(   movl %edx, 28(%edi)     )
+
+       lea 32(%esi), %esi
+       lea 32(%edi), %edi
+       dec %ecx
+       jne 1b
+       adcl $0, %eax
+2:     movl FP(%esp), %edx
+       movl %edx, %ecx
+       andl $0x1c, %edx
+       je 4f
+       shrl $2, %edx                   # This clears CF
+SRC(3: movl (%esi), %ebx       )
+       adcl %ebx, %eax
+DST(   movl %ebx, (%edi)       )
+       lea 4(%esi), %esi
+       lea 4(%edi), %edi
+       dec %edx
+       jne 3b
+       adcl $0, %eax
+4:     andl $3, %ecx
+       jz 7f
+       cmpl $2, %ecx
+       jb 5f
+SRC(   movw (%esi), %cx        )
+       leal 2(%esi), %esi
+DST(   movw %cx, (%edi)        )
+       leal 2(%edi), %edi
+       je 6f
+       shll $16,%ecx
+SRC(5: movb (%esi), %cl        )
+DST(   movb %cl, (%edi)        )
+6:     addl %ecx, %eax
+       adcl $0, %eax
+7:
+5000:
+
+# Exception handler:
+.section .fixup, "ax"                                                  
+
+6001:
+       movl ARGBASE+20(%esp), %ebx     # src_err_ptr
+       movl $-EFAULT, (%ebx)
+
+       # zero the complete destination - computing the rest
+       # is too much work 
+       movl ARGBASE+8(%esp), %edi      # dst
+       movl ARGBASE+12(%esp), %ecx     # len
+       xorl %eax,%eax
+       rep ; stosb
+
+       jmp 5000b
+
+6002:
+       movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
+       movl $-EFAULT,(%ebx)
+       jmp 5000b
+
+.previous
+
+       popl %ebx
+       popl %esi
+       popl %edi
+       popl %ecx                       # equivalent to addl $4,%esp
+       ret     
+
+#else
+
+/* Version for PentiumII/PPro */
+
+#define ROUND1(x) \
+       SRC(movl x(%esi), %ebx  )       ;       \
+       addl %ebx, %eax                 ;       \
+       DST(movl %ebx, x(%edi)  )       ; 
+
+#define ROUND(x) \
+       SRC(movl x(%esi), %ebx  )       ;       \
+       adcl %ebx, %eax                 ;       \
+       DST(movl %ebx, x(%edi)  )       ;
+
+#define ARGBASE 12
+               
+csum_partial_copy_generic_i386:
+       pushl %ebx
+       pushl %edi
+       pushl %esi
+       movl ARGBASE+4(%esp),%esi       #src
+       movl ARGBASE+8(%esp),%edi       #dst    
+       movl ARGBASE+12(%esp),%ecx      #len
+       movl ARGBASE+16(%esp),%eax      #sum
+#      movl %ecx, %edx  
+       movl %ecx, %ebx  
+       movl %esi, %edx
+       shrl $6, %ecx     
+       andl $0x3c, %ebx  
+       negl %ebx
+       subl %ebx, %esi  
+       subl %ebx, %edi  
+       lea  -1(%esi),%edx
+       andl $-32,%edx
+       lea 3f(%ebx,%ebx), %ebx
+       testl %esi, %esi 
+       jmp *%ebx
+1:     addl $64,%esi
+       addl $64,%edi 
+       SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
+       ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)    
+       ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)    
+       ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)    
+       ROUND (-16) ROUND(-12) ROUND(-8)  ROUND(-4)     
+3:     adcl $0,%eax
+       addl $64, %edx
+       dec %ecx
+       jge 1b
+4:     movl ARGBASE+12(%esp),%edx      #len
+       andl $3, %edx
+       jz 7f
+       cmpl $2, %edx
+       jb 5f
+SRC(   movw (%esi), %dx         )
+       leal 2(%esi), %esi
+DST(   movw %dx, (%edi)         )
+       leal 2(%edi), %edi
+       je 6f
+       shll $16,%edx
+5:
+SRC(   movb (%esi), %dl         )
+DST(   movb %dl, (%edi)         )
+6:     addl %edx, %eax
+       adcl $0, %eax
+7:
+.section .fixup, "ax"
+6001:  movl    ARGBASE+20(%esp), %ebx  # src_err_ptr   
+       movl $-EFAULT, (%ebx)
+       # zero the complete destination (computing the rest is too much work)
+       movl ARGBASE+8(%esp),%edi       # dst
+       movl ARGBASE+12(%esp),%ecx      # len
+       xorl %eax,%eax
+       rep; stosb
+       jmp 7b
+6002:  movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
+       movl $-EFAULT, (%ebx)
+       jmp  7b                 
+.previous                              
+
+       popl %esi
+       popl %edi
+       popl %ebx
+       ret
+                               
+#undef ROUND
+#undef ROUND1          
+               
+#endif
diff --git a/arch/um/sys-x86/delay_32.c b/arch/um/sys-x86/delay_32.c
new file mode 100644 (file)
index 0000000..f3fe1a6
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
+ * Mostly copied from arch/x86/lib/delay.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <asm/param.h>
+
+void __delay(unsigned long loops)
+{
+       asm volatile(
+               "test %0,%0\n"
+               "jz 3f\n"
+               "jmp 1f\n"
+
+               ".align 16\n"
+               "1: jmp 2f\n"
+
+               ".align 16\n"
+               "2: dec %0\n"
+               " jnz 2b\n"
+               "3: dec %0\n"
+
+               : /* we don't need output */
+               : "a" (loops)
+       );
+}
+EXPORT_SYMBOL(__delay);
+
+inline void __const_udelay(unsigned long xloops)
+{
+       int d0;
+
+       xloops *= 4;
+       asm("mull %%edx"
+               : "=d" (xloops), "=&a" (d0)
+               : "1" (xloops), "0"
+               (loops_per_jiffy * (HZ/4)));
+
+       __delay(++xloops);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+void __udelay(unsigned long usecs)
+{
+       __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long nsecs)
+{
+       __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/um/sys-x86/delay_64.c b/arch/um/sys-x86/delay_64.c
new file mode 100644 (file)
index 0000000..f3fe1a6
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
+ * Mostly copied from arch/x86/lib/delay.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <asm/param.h>
+
+void __delay(unsigned long loops)
+{
+       asm volatile(
+               "test %0,%0\n"
+               "jz 3f\n"
+               "jmp 1f\n"
+
+               ".align 16\n"
+               "1: jmp 2f\n"
+
+               ".align 16\n"
+               "2: dec %0\n"
+               " jnz 2b\n"
+               "3: dec %0\n"
+
+               : /* we don't need output */
+               : "a" (loops)
+       );
+}
+EXPORT_SYMBOL(__delay);
+
+inline void __const_udelay(unsigned long xloops)
+{
+       int d0;
+
+       xloops *= 4;
+       asm("mull %%edx"
+               : "=d" (xloops), "=&a" (d0)
+               : "1" (xloops), "0"
+               (loops_per_jiffy * (HZ/4)));
+
+       __delay(++xloops);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+void __udelay(unsigned long usecs)
+{
+       __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long nsecs)
+{
+       __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/um/sys-x86/elfcore.c b/arch/um/sys-x86/elfcore.c
new file mode 100644 (file)
index 0000000..6bb49b6
--- /dev/null
@@ -0,0 +1,83 @@
+#include <linux/elf.h>
+#include <linux/coredump.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+#include <asm/elf.h>
+
+
+Elf32_Half elf_core_extra_phdrs(void)
+{
+       return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
+}
+
+int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
+                              unsigned long limit)
+{
+       if ( vsyscall_ehdr ) {
+               const struct elfhdr *const ehdrp =
+                       (struct elfhdr *) vsyscall_ehdr;
+               const struct elf_phdr *const phdrp =
+                       (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
+               int i;
+               Elf32_Off ofs = 0;
+
+               for (i = 0; i < ehdrp->e_phnum; ++i) {
+                       struct elf_phdr phdr = phdrp[i];
+
+                       if (phdr.p_type == PT_LOAD) {
+                               ofs = phdr.p_offset = offset;
+                               offset += phdr.p_filesz;
+                       } else {
+                               phdr.p_offset += ofs;
+                       }
+                       phdr.p_paddr = 0; /* match other core phdrs */
+                       *size += sizeof(phdr);
+                       if (*size > limit
+                           || !dump_write(file, &phdr, sizeof(phdr)))
+                               return 0;
+               }
+       }
+       return 1;
+}
+
+int elf_core_write_extra_data(struct file *file, size_t *size,
+                             unsigned long limit)
+{
+       if ( vsyscall_ehdr ) {
+               const struct elfhdr *const ehdrp =
+                       (struct elfhdr *) vsyscall_ehdr;
+               const struct elf_phdr *const phdrp =
+                       (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
+               int i;
+
+               for (i = 0; i < ehdrp->e_phnum; ++i) {
+                       if (phdrp[i].p_type == PT_LOAD) {
+                               void *addr = (void *) phdrp[i].p_vaddr;
+                               size_t filesz = phdrp[i].p_filesz;
+
+                               *size += filesz;
+                               if (*size > limit
+                                   || !dump_write(file, addr, filesz))
+                                       return 0;
+                       }
+               }
+       }
+       return 1;
+}
+
+size_t elf_core_extra_data_size(void)
+{
+       if ( vsyscall_ehdr ) {
+               const struct elfhdr *const ehdrp =
+                       (struct elfhdr *)vsyscall_ehdr;
+               const struct elf_phdr *const phdrp =
+                       (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
+               int i;
+
+               for (i = 0; i < ehdrp->e_phnum; ++i)
+                       if (phdrp[i].p_type == PT_LOAD)
+                               return (size_t) phdrp[i].p_filesz;
+       }
+       return 0;
+}
diff --git a/arch/um/sys-x86/fault.c b/arch/um/sys-x86/fault.c
new file mode 100644 (file)
index 0000000..d670f68
--- /dev/null
@@ -0,0 +1,28 @@
+/* 
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include "sysdep/ptrace.h"
+
+/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
+struct exception_table_entry
+{
+       unsigned long insn;
+       unsigned long fixup;
+};
+
+const struct exception_table_entry *search_exception_tables(unsigned long add);
+
+/* Compare this to arch/i386/mm/extable.c:fixup_exception() */
+int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
+{
+       const struct exception_table_entry *fixup;
+
+       fixup = search_exception_tables(address);
+       if (fixup != 0) {
+               UPT_IP(regs) = fixup->fixup;
+               return 1;
+       }
+       return 0;
+}
diff --git a/arch/um/sys-x86/ksyms.c b/arch/um/sys-x86/ksyms.c
new file mode 100644 (file)
index 0000000..2e8f43e
--- /dev/null
@@ -0,0 +1,13 @@
+#include <linux/module.h>
+#include <asm/string.h>
+#include <asm/checksum.h>
+
+#ifndef CONFIG_X86_32
+/*XXX: we need them because they would be exported by x86_64 */
+#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
+EXPORT_SYMBOL(memcpy);
+#else
+EXPORT_SYMBOL(__memcpy);
+#endif
+#endif
+EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-x86/ldt.c b/arch/um/sys-x86/ldt.c
new file mode 100644 (file)
index 0000000..3f2bf20
--- /dev/null
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/unistd.h>
+#include "os.h"
+#include "proc_mm.h"
+#include "skas.h"
+#include "skas_ptrace.h"
+#include "sysdep/tls.h"
+
+extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
+
+static long write_ldt_entry(struct mm_id *mm_idp, int func,
+                    struct user_desc *desc, void **addr, int done)
+{
+       long res;
+
+       if (proc_mm) {
+               /*
+                * This is a special handling for the case, that the mm to
+                * modify isn't current->active_mm.
+                * If this is called directly by modify_ldt,
+                *     (current->active_mm->context.skas.u == mm_idp)
+                * will be true. So no call to __switch_mm(mm_idp) is done.
+                * If this is called in case of init_new_ldt or PTRACE_LDT,
+                * mm_idp won't belong to current->active_mm, but child->mm.
+                * So we need to switch child's mm into our userspace, then
+                * later switch back.
+                *
+                * Note: I'm unsure: should interrupts be disabled here?
+                */
+               if (!current->active_mm || current->active_mm == &init_mm ||
+                   mm_idp != &current->active_mm->context.id)
+                       __switch_mm(mm_idp);
+       }
+
+       if (ptrace_ldt) {
+               struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
+                       .func = func,
+                       .ptr = desc,
+                       .bytecount = sizeof(*desc)};
+               u32 cpu;
+               int pid;
+
+               if (!proc_mm)
+                       pid = mm_idp->u.pid;
+               else {
+                       cpu = get_cpu();
+                       pid = userspace_pid[cpu];
+               }
+
+               res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
+
+               if (proc_mm)
+                       put_cpu();
+       }
+       else {
+               void *stub_addr;
+               res = syscall_stub_data(mm_idp, (unsigned long *)desc,
+                                       (sizeof(*desc) + sizeof(long) - 1) &
+                                           ~(sizeof(long) - 1),
+                                       addr, &stub_addr);
+               if (!res) {
+                       unsigned long args[] = { func,
+                                                (unsigned long)stub_addr,
+                                                sizeof(*desc),
+                                                0, 0, 0 };
+                       res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
+                                              0, addr, done);
+               }
+       }
+
+       if (proc_mm) {
+               /*
+                * This is the second part of special handling, that makes
+                * PTRACE_LDT possible to implement.
+                */
+               if (current->active_mm && current->active_mm != &init_mm &&
+                   mm_idp != &current->active_mm->context.id)
+                       __switch_mm(&current->active_mm->context.id);
+       }
+
+       return res;
+}
+
+static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
+{
+       int res, n;
+       struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
+                       .func = 0,
+                       .bytecount = bytecount,
+                       .ptr = kmalloc(bytecount, GFP_KERNEL)};
+       u32 cpu;
+
+       if (ptrace_ldt.ptr == NULL)
+               return -ENOMEM;
+
+       /*
+        * This is called from sys_modify_ldt only, so userspace_pid gives
+        * us the right number
+        */
+
+       cpu = get_cpu();
+       res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
+       put_cpu();
+       if (res < 0)
+               goto out;
+
+       n = copy_to_user(ptr, ptrace_ldt.ptr, res);
+       if (n != 0)
+               res = -EFAULT;
+
+  out:
+       kfree(ptrace_ldt.ptr);
+
+       return res;
+}
+
+/*
+ * In skas mode, we hold our own ldt data in UML.
+ * Thus, the code implementing sys_modify_ldt_skas
+ * is very similar to (and mostly stolen from) sys_modify_ldt
+ * for arch/i386/kernel/ldt.c
+ * The routines copied and modified in part are:
+ * - read_ldt
+ * - read_default_ldt
+ * - write_ldt
+ * - sys_modify_ldt_skas
+ */
+
+static int read_ldt(void __user * ptr, unsigned long bytecount)
+{
+       int i, err = 0;
+       unsigned long size;
+       uml_ldt_t * ldt = &current->mm->context.ldt;
+
+       if (!ldt->entry_count)
+               goto out;
+       if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+               bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+       err = bytecount;
+
+       if (ptrace_ldt)
+               return read_ldt_from_host(ptr, bytecount);
+
+       mutex_lock(&ldt->lock);
+       if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
+               size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
+               if (size > bytecount)
+                       size = bytecount;
+               if (copy_to_user(ptr, ldt->u.entries, size))
+                       err = -EFAULT;
+               bytecount -= size;
+               ptr += size;
+       }
+       else {
+               for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
+                    i++) {
+                       size = PAGE_SIZE;
+                       if (size > bytecount)
+                               size = bytecount;
+                       if (copy_to_user(ptr, ldt->u.pages[i], size)) {
+                               err = -EFAULT;
+                               break;
+                       }
+                       bytecount -= size;
+                       ptr += size;
+               }
+       }
+       mutex_unlock(&ldt->lock);
+
+       if (bytecount == 0 || err == -EFAULT)
+               goto out;
+
+       if (clear_user(ptr, bytecount))
+               err = -EFAULT;
+
+out:
+       return err;
+}
+
+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
+{
+       int err;
+
+       if (bytecount > 5*LDT_ENTRY_SIZE)
+               bytecount = 5*LDT_ENTRY_SIZE;
+
+       err = bytecount;
+       /*
+        * UML doesn't support lcall7 and lcall27.
+        * So, we don't really have a default ldt, but emulate
+        * an empty ldt of common host default ldt size.
+        */
+       if (clear_user(ptr, bytecount))
+               err = -EFAULT;
+
+       return err;
+}
+
+static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
+{
+       uml_ldt_t * ldt = &current->mm->context.ldt;
+       struct mm_id * mm_idp = &current->mm->context.id;
+       int i, err;
+       struct user_desc ldt_info;
+       struct ldt_entry entry0, *ldt_p;
+       void *addr = NULL;
+
+       err = -EINVAL;
+       if (bytecount != sizeof(ldt_info))
+               goto out;
+       err = -EFAULT;
+       if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
+               goto out;
+
+       err = -EINVAL;
+       if (ldt_info.entry_number >= LDT_ENTRIES)
+               goto out;
+       if (ldt_info.contents == 3) {
+               if (func == 1)
+                       goto out;
+               if (ldt_info.seg_not_present == 0)
+                       goto out;
+       }
+
+       if (!ptrace_ldt)
+               mutex_lock(&ldt->lock);
+
+       err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
+       if (err)
+               goto out_unlock;
+       else if (ptrace_ldt) {
+               /* With PTRACE_LDT available, this is used as a flag only */
+               ldt->entry_count = 1;
+               goto out;
+       }
+
+       if (ldt_info.entry_number >= ldt->entry_count &&
+           ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
+               for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
+                    i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
+                    i++) {
+                       if (i == 0)
+                               memcpy(&entry0, ldt->u.entries,
+                                      sizeof(entry0));
+                       ldt->u.pages[i] = (struct ldt_entry *)
+                               __get_free_page(GFP_KERNEL|__GFP_ZERO);
+                       if (!ldt->u.pages[i]) {
+                               err = -ENOMEM;
+                               /* Undo the change in host */
+                               memset(&ldt_info, 0, sizeof(ldt_info));
+                               write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
+                               goto out_unlock;
+                       }
+                       if (i == 0) {
+                               memcpy(ldt->u.pages[0], &entry0,
+                                      sizeof(entry0));
+                               memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
+                                      sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
+                       }
+                       ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
+               }
+       }
+       if (ldt->entry_count <= ldt_info.entry_number)
+               ldt->entry_count = ldt_info.entry_number + 1;
+
+       if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
+               ldt_p = ldt->u.entries + ldt_info.entry_number;
+       else
+               ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
+                       ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
+
+       if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
+          (func == 1 || LDT_empty(&ldt_info))) {
+               ldt_p->a = 0;
+               ldt_p->b = 0;
+       }
+       else{
+               if (func == 1)
+                       ldt_info.useable = 0;
+               ldt_p->a = LDT_entry_a(&ldt_info);
+               ldt_p->b = LDT_entry_b(&ldt_info);
+       }
+       err = 0;
+
+out_unlock:
+       mutex_unlock(&ldt->lock);
+out:
+       return err;
+}
+
+static long do_modify_ldt_skas(int func, void __user *ptr,
+                              unsigned long bytecount)
+{
+       int ret = -ENOSYS;
+
+       switch (func) {
+               case 0:
+                       ret = read_ldt(ptr, bytecount);
+                       break;
+               case 1:
+               case 0x11:
+                       ret = write_ldt(ptr, bytecount, func);
+                       break;
+               case 2:
+                       ret = read_default_ldt(ptr, bytecount);
+                       break;
+       }
+       return ret;
+}
+
+static DEFINE_SPINLOCK(host_ldt_lock);
+static short dummy_list[9] = {0, -1};
+static short * host_ldt_entries = NULL;
+
+static void ldt_get_host_info(void)
+{
+       long ret;
+       struct ldt_entry * ldt;
+       short *tmp;
+       int i, size, k, order;
+
+       spin_lock(&host_ldt_lock);
+
+       if (host_ldt_entries != NULL) {
+               spin_unlock(&host_ldt_lock);
+               return;
+       }
+       host_ldt_entries = dummy_list+1;
+
+       spin_unlock(&host_ldt_lock);
+
+       for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
+               ;
+
+       ldt = (struct ldt_entry *)
+             __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
+       if (ldt == NULL) {
+               printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
+                      "for host ldt\n");
+               return;
+       }
+
+       ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
+       if (ret < 0) {
+               printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
+               goto out_free;
+       }
+       if (ret == 0) {
+               /* default_ldt is active, simply write an empty entry 0 */
+               host_ldt_entries = dummy_list;
+               goto out_free;
+       }
+
+       for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
+               if (ldt[i].a != 0 || ldt[i].b != 0)
+                       size++;
+       }
+
+       if (size < ARRAY_SIZE(dummy_list))
+               host_ldt_entries = dummy_list;
+       else {
+               size = (size + 1) * sizeof(dummy_list[0]);
+               tmp = kmalloc(size, GFP_KERNEL);
+               if (tmp == NULL) {
+                       printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
+                              "host ldt list\n");
+                       goto out_free;
+               }
+               host_ldt_entries = tmp;
+       }
+
+       for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
+               if (ldt[i].a != 0 || ldt[i].b != 0)
+                       host_ldt_entries[k++] = i;
+       }
+       host_ldt_entries[k] = -1;
+
+out_free:
+       free_pages((unsigned long)ldt, order);
+}
+
+long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
+{
+       struct user_desc desc;
+       short * num_p;
+       int i;
+       long page, err=0;
+       void *addr = NULL;
+       struct proc_mm_op copy;
+
+
+       if (!ptrace_ldt)
+               mutex_init(&new_mm->ldt.lock);
+
+       if (!from_mm) {
+               memset(&desc, 0, sizeof(desc));
+               /*
+                * We have to initialize a clean ldt.
+                */
+               if (proc_mm) {
+                       /*
+                        * If the new mm was created using proc_mm, host's
+                        * default-ldt currently is assigned, which normally
+                        * contains the call-gates for lcall7 and lcall27.
+                        * To remove these gates, we simply write an empty
+                        * entry as number 0 to the host.
+                        */
+                       err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
+               }
+               else{
+                       /*
+                        * Now we try to retrieve info about the ldt, we
+                        * inherited from the host. All ldt-entries found
+                        * will be reset in the following loop
+                        */
+                       ldt_get_host_info();
+                       for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
+                               desc.entry_number = *num_p;
+                               err = write_ldt_entry(&new_mm->id, 1, &desc,
+                                                     &addr, *(num_p + 1) == -1);
+                               if (err)
+                                       break;
+                       }
+               }
+               new_mm->ldt.entry_count = 0;
+
+               goto out;
+       }
+
+       if (proc_mm) {
+               /*
+                * We have a valid from_mm, so we now have to copy the LDT of
+                * from_mm to new_mm, because using proc_mm an new mm with
+                * an empty/default LDT was created in new_mm()
+                */
+               copy = ((struct proc_mm_op) { .op       = MM_COPY_SEGMENTS,
+                                             .u        =
+                                             { .copy_segments =
+                                                       from_mm->id.u.mm_fd } } );
+               i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
+               if (i != sizeof(copy))
+                       printk(KERN_ERR "new_mm : /proc/mm copy_segments "
+                              "failed, err = %d\n", -i);
+       }
+
+       if (!ptrace_ldt) {
+               /*
+                * Our local LDT is used to supply the data for
+                * modify_ldt(READLDT), if PTRACE_LDT isn't available,
+                * i.e., we have to use the stub for modify_ldt, which
+                * can't handle the big read buffer of up to 64kB.
+                */
+               mutex_lock(&from_mm->ldt.lock);
+               if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES)
+                       memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
+                              sizeof(new_mm->ldt.u.entries));
+               else {
+                       i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
+                       while (i-->0) {
+                               page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
+                               if (!page) {
+                                       err = -ENOMEM;
+                                       break;
+                               }
+                               new_mm->ldt.u.pages[i] =
+                                       (struct ldt_entry *) page;
+                               memcpy(new_mm->ldt.u.pages[i],
+                                      from_mm->ldt.u.pages[i], PAGE_SIZE);
+                       }
+               }
+               new_mm->ldt.entry_count = from_mm->ldt.entry_count;
+               mutex_unlock(&from_mm->ldt.lock);
+       }
+
+    out:
+       return err;
+}
+
+
+void free_ldt(struct mm_context *mm)
+{
+       int i;
+
+       if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) {
+               i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
+               while (i-- > 0)
+                       free_page((long) mm->ldt.u.pages[i]);
+       }
+       mm->ldt.entry_count = 0;
+}
+
+int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+{
+       return do_modify_ldt_skas(func, ptr, bytecount);
+}
diff --git a/arch/um/sys-x86/mem_32.c b/arch/um/sys-x86/mem_32.c
new file mode 100644 (file)
index 0000000..639900a
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/mman.h>
+
+static struct vm_area_struct gate_vma;
+
+static int __init gate_vma_init(void)
+{
+       if (!FIXADDR_USER_START)
+               return 0;
+
+       gate_vma.vm_mm = NULL;
+       gate_vma.vm_start = FIXADDR_USER_START;
+       gate_vma.vm_end = FIXADDR_USER_END;
+       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+       gate_vma.vm_page_prot = __P101;
+
+       /*
+        * Make sure the vDSO gets into every core dump.
+        * Dumping its contents makes post-mortem fully interpretable later
+        * without matching up the same kernel and hardware config to see
+        * what PC values meant.
+        */
+       gate_vma.vm_flags |= VM_ALWAYSDUMP;
+
+       return 0;
+}
+__initcall(gate_vma_init);
+
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+       return FIXADDR_USER_START ? &gate_vma : NULL;
+}
+
+int in_gate_area_no_mm(unsigned long addr)
+{
+       if (!FIXADDR_USER_START)
+               return 0;
+
+       if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
+               return 1;
+
+       return 0;
+}
+
+int in_gate_area(struct mm_struct *mm, unsigned long addr)
+{
+       struct vm_area_struct *vma = get_gate_vma(mm);
+
+       if (!vma)
+               return 0;
+
+       return (addr >= vma->vm_start) && (addr < vma->vm_end);
+}
diff --git a/arch/um/sys-x86/mem_64.c b/arch/um/sys-x86/mem_64.c
new file mode 100644 (file)
index 0000000..5465187
--- /dev/null
@@ -0,0 +1,26 @@
+#include "linux/mm.h"
+#include "asm/page.h"
+#include "asm/mman.h"
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+       if (vma->vm_mm && vma->vm_start == um_vdso_addr)
+               return "[vdso]";
+
+       return NULL;
+}
+
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+       return NULL;
+}
+
+int in_gate_area(struct mm_struct *mm, unsigned long addr)
+{
+       return 0;
+}
+
+int in_gate_area_no_mm(unsigned long addr)
+{
+       return 0;
+}
diff --git a/arch/um/sys-x86/ptrace_32.c b/arch/um/sys-x86/ptrace_32.c
new file mode 100644 (file)
index 0000000..a174fde
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include "linux/mm.h"
+#include "linux/sched.h"
+#include "asm/uaccess.h"
+#include "skas.h"
+
+extern int arch_switch_tls(struct task_struct *to);
+
+void arch_switch_to(struct task_struct *to)
+{
+       int err = arch_switch_tls(to);
+       if (!err)
+               return;
+
+       if (err != -EINVAL)
+               printk(KERN_WARNING "arch_switch_tls failed, errno %d, "
+                      "not EINVAL\n", -err);
+       else
+               printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n");
+}
+
+int is_syscall(unsigned long addr)
+{
+       unsigned short instr;
+       int n;
+
+       n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
+       if (n) {
+               /* access_process_vm() grants access to vsyscall and stub,
+                * while copy_from_user doesn't. Maybe access_process_vm is
+                * slow, but that doesn't matter, since it will be called only
+                * in case of singlestepping, if copy_from_user failed.
+                */
+               n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+               if (n != sizeof(instr)) {
+                       printk(KERN_ERR "is_syscall : failed to read "
+                              "instruction from 0x%lx\n", addr);
+                       return 1;
+               }
+       }
+       /* int 0x80 or sysenter */
+       return (instr == 0x80cd) || (instr == 0x340f);
+}
+
+/* determines which flags the user has access to. */
+/* 1 = access 0 = no access */
+#define FLAG_MASK 0x00044dd5
+
+static const int reg_offsets[] = {
+       [EBX] = HOST_EBX,
+       [ECX] = HOST_ECX,
+       [EDX] = HOST_EDX,
+       [ESI] = HOST_ESI,
+       [EDI] = HOST_EDI,
+       [EBP] = HOST_EBP,
+       [EAX] = HOST_EAX,
+       [DS] = HOST_DS,
+       [ES] = HOST_ES,
+       [FS] = HOST_FS,
+       [GS] = HOST_GS,
+       [EIP] = HOST_IP,
+       [CS] = HOST_CS,
+       [EFL] = HOST_EFLAGS,
+       [UESP] = HOST_SP,
+       [SS] = HOST_SS,
+};
+
+int putreg(struct task_struct *child, int regno, unsigned long value)
+{
+       regno >>= 2;
+       switch (regno) {
+       case EBX:
+       case ECX:
+       case EDX:
+       case ESI:
+       case EDI:
+       case EBP:
+       case EAX:
+       case EIP:
+       case UESP:
+               break;
+       case FS:
+               if (value && (value & 3) != 3)
+                       return -EIO;
+               break;
+       case GS:
+               if (value && (value & 3) != 3)
+                       return -EIO;
+               break;
+       case DS:
+       case ES:
+               if (value && (value & 3) != 3)
+                       return -EIO;
+               value &= 0xffff;
+               break;
+       case SS:
+       case CS:
+               if ((value & 3) != 3)
+                       return -EIO;
+               value &= 0xffff;
+               break;
+       case EFL:
+               value &= FLAG_MASK;
+               child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
+               return 0;
+       case ORIG_EAX:
+               child->thread.regs.regs.syscall = value;
+               return 0;
+       default :
+               panic("Bad register in putreg() : %d\n", regno);
+       }
+       child->thread.regs.regs.gp[reg_offsets[regno]] = value;
+       return 0;
+}
+
+int poke_user(struct task_struct *child, long addr, long data)
+{
+       if ((addr & 3) || addr < 0)
+               return -EIO;
+
+       if (addr < MAX_REG_OFFSET)
+               return putreg(child, addr, data);
+       else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
+                (addr <= offsetof(struct user, u_debugreg[7]))) {
+               addr -= offsetof(struct user, u_debugreg[0]);
+               addr = addr >> 2;
+               if ((addr == 4) || (addr == 5))
+                       return -EIO;
+               child->thread.arch.debugregs[addr] = data;
+               return 0;
+       }
+       return -EIO;
+}
+
+unsigned long getreg(struct task_struct *child, int regno)
+{
+       unsigned long mask = ~0UL;
+
+       regno >>= 2;
+       switch (regno) {
+       case ORIG_EAX:
+               return child->thread.regs.regs.syscall;
+       case FS:
+       case GS:
+       case DS:
+       case ES:
+       case SS:
+       case CS:
+               mask = 0xffff;
+               break;
+       case EIP:
+       case UESP:
+       case EAX:
+       case EBX:
+       case ECX:
+       case EDX:
+       case ESI:
+       case EDI:
+       case EBP:
+       case EFL:
+               break;
+       default:
+               panic("Bad register in getreg() : %d\n", regno);
+       }
+       return mask & child->thread.regs.regs.gp[reg_offsets[regno]];
+}
+
+/* read the word at location addr in the USER area. */
+int peek_user(struct task_struct *child, long addr, long data)
+{
+       unsigned long tmp;
+
+       if ((addr & 3) || addr < 0)
+               return -EIO;
+
+       tmp = 0;  /* Default return condition */
+       if (addr < MAX_REG_OFFSET) {
+               tmp = getreg(child, addr);
+       }
+       else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
+                (addr <= offsetof(struct user, u_debugreg[7]))) {
+               addr -= offsetof(struct user, u_debugreg[0]);
+               addr = addr >> 2;
+               tmp = child->thread.arch.debugregs[addr];
+       }
+       return put_user(tmp, (unsigned long __user *) data);
+}
+
+static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+{
+       int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
+       struct user_i387_struct fpregs;
+
+       err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
+       if (err)
+               return err;
+
+       n = copy_to_user(buf, &fpregs, sizeof(fpregs));
+       if(n > 0)
+               return -EFAULT;
+
+       return n;
+}
+
+static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+{
+       int n, cpu = ((struct thread_info *) child->stack)->cpu;
+       struct user_i387_struct fpregs;
+
+       n = copy_from_user(&fpregs, buf, sizeof(fpregs));
+       if (n > 0)
+               return -EFAULT;
+
+       return restore_fp_registers(userspace_pid[cpu],
+                                   (unsigned long *) &fpregs);
+}
+
+static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
+{
+       int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
+       struct user_fxsr_struct fpregs;
+
+       err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
+       if (err)
+               return err;
+
+       n = copy_to_user(buf, &fpregs, sizeof(fpregs));
+       if(n > 0)
+               return -EFAULT;
+
+       return n;
+}
+
+static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
+{
+       int n, cpu = ((struct thread_info *) child->stack)->cpu;
+       struct user_fxsr_struct fpregs;
+
+       n = copy_from_user(&fpregs, buf, sizeof(fpregs));
+       if (n > 0)
+               return -EFAULT;
+
+       return restore_fpx_registers(userspace_pid[cpu],
+                                    (unsigned long *) &fpregs);
+}
+
+long subarch_ptrace(struct task_struct *child, long request,
+                   unsigned long addr, unsigned long data)
+{
+       int ret = -EIO;
+       void __user *datap = (void __user *) data;
+       switch (request) {
+       case PTRACE_GETFPREGS: /* Get the child FPU state. */
+               ret = get_fpregs(datap, child);
+               break;
+       case PTRACE_SETFPREGS: /* Set the child FPU state. */
+               ret = set_fpregs(datap, child);
+               break;
+       case PTRACE_GETFPXREGS: /* Get the child FPU state. */
+               ret = get_fpxregs(datap, child);
+               break;
+       case PTRACE_SETFPXREGS: /* Set the child FPU state. */
+               ret = set_fpxregs(datap, child);
+               break;
+       default:
+               ret = -EIO;
+       }
+       return ret;
+}
diff --git a/arch/um/sys-x86/ptrace_64.c b/arch/um/sys-x86/ptrace_64.c
new file mode 100644 (file)
index 0000000..44e68e0
--- /dev/null
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2003 PathScale, Inc.
+ * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ *
+ * Licensed under the GPL
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#define __FRAME_OFFSETS
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+/*
+ * determines which flags the user has access to.
+ * 1 = access 0 = no access
+ */
+#define FLAG_MASK 0x44dd5UL
+
+static const int reg_offsets[] =
+{
+       [R8 >> 3] = HOST_R8,
+       [R9 >> 3] = HOST_R9,
+       [R10 >> 3] = HOST_R10,
+       [R11 >> 3] = HOST_R11,
+       [R12 >> 3] = HOST_R12,
+       [R13 >> 3] = HOST_R13,
+       [R14 >> 3] = HOST_R14,
+       [R15 >> 3] = HOST_R15,
+       [RIP >> 3] = HOST_IP,
+       [RSP >> 3] = HOST_SP,
+       [RAX >> 3] = HOST_RAX,
+       [RBX >> 3] = HOST_RBX,
+       [RCX >> 3] = HOST_RCX,
+       [RDX >> 3] = HOST_RDX,
+       [RSI >> 3] = HOST_RSI,
+       [RDI >> 3] = HOST_RDI,
+       [RBP >> 3] = HOST_RBP,
+       [CS >> 3] = HOST_CS,
+       [SS >> 3] = HOST_SS,
+       [FS_BASE >> 3] = HOST_FS_BASE,
+       [GS_BASE >> 3] = HOST_GS_BASE,
+       [DS >> 3] = HOST_DS,
+       [ES >> 3] = HOST_ES,
+       [FS >> 3] = HOST_FS,
+       [GS >> 3] = HOST_GS,
+       [EFLAGS >> 3] = HOST_EFLAGS,
+       [ORIG_RAX >> 3] = HOST_ORIG_RAX,
+};
+
+int putreg(struct task_struct *child, int regno, unsigned long value)
+{
+#ifdef TIF_IA32
+       /*
+        * Some code in the 64bit emulation may not be 64bit clean.
+        * Don't take any chances.
+        */
+       if (test_tsk_thread_flag(child, TIF_IA32))
+               value &= 0xffffffff;
+#endif
+       switch (regno) {
+       case R8:
+       case R9:
+       case R10:
+       case R11:
+       case R12:
+       case R13:
+       case R14:
+       case R15:
+       case RIP:
+       case RSP:
+       case RAX:
+       case RBX:
+       case RCX:
+       case RDX:
+       case RSI:
+       case RDI:
+       case RBP:
+       case ORIG_RAX:
+               break;
+
+       case FS:
+       case GS:
+       case DS:
+       case ES:
+       case SS:
+       case CS:
+               if (value && (value & 3) != 3)
+                       return -EIO;
+               value &= 0xffff;
+               break;
+
+       case FS_BASE:
+       case GS_BASE:
+               if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
+                       return -EIO;
+               break;
+
+       case EFLAGS:
+               value &= FLAG_MASK;
+               child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
+               return 0;
+
+       default:
+               panic("Bad register in putreg(): %d\n", regno);
+       }
+
+       child->thread.regs.regs.gp[reg_offsets[regno >> 3]] = value;
+       return 0;
+}
+
+int poke_user(struct task_struct *child, long addr, long data)
+{
+       if ((addr & 3) || addr < 0)
+               return -EIO;
+
+       if (addr < MAX_REG_OFFSET)
+               return putreg(child, addr, data);
+       else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
+               (addr <= offsetof(struct user, u_debugreg[7]))) {
+               addr -= offsetof(struct user, u_debugreg[0]);
+               addr = addr >> 2;
+               if ((addr == 4) || (addr == 5))
+                       return -EIO;
+               child->thread.arch.debugregs[addr] = data;
+               return 0;
+       }
+       return -EIO;
+}
+
+unsigned long getreg(struct task_struct *child, int regno)
+{
+       unsigned long mask = ~0UL;
+#ifdef TIF_IA32
+       if (test_tsk_thread_flag(child, TIF_IA32))
+               mask = 0xffffffff;
+#endif
+       switch (regno) {
+       case R8:
+       case R9:
+       case R10:
+       case R11:
+       case R12:
+       case R13:
+       case R14:
+       case R15:
+       case RIP:
+       case RSP:
+       case RAX:
+       case RBX:
+       case RCX:
+       case RDX:
+       case RSI:
+       case RDI:
+       case RBP:
+       case ORIG_RAX:
+       case EFLAGS:
+       case FS_BASE:
+       case GS_BASE:
+               break;
+       case FS:
+       case GS:
+       case DS:
+       case ES:
+       case SS:
+       case CS:
+               mask = 0xffff;
+               break;
+       default:
+               panic("Bad register in getreg: %d\n", regno);
+       }
+       return mask & child->thread.regs.regs.gp[reg_offsets[regno >> 3]];
+}
+
+int peek_user(struct task_struct *child, long addr, long data)
+{
+       /* read the word at location addr in the USER area. */
+       unsigned long tmp;
+
+       if ((addr & 3) || addr < 0)
+               return -EIO;
+
+       tmp = 0;  /* Default return condition */
+       if (addr < MAX_REG_OFFSET)
+               tmp = getreg(child, addr);
+       else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
+               (addr <= offsetof(struct user, u_debugreg[7]))) {
+               addr -= offsetof(struct user, u_debugreg[0]);
+               addr = addr >> 2;
+               tmp = child->thread.arch.debugregs[addr];
+       }
+       return put_user(tmp, (unsigned long *) data);
+}
+
+/* XXX Mostly copied from sys-i386 */
+int is_syscall(unsigned long addr)
+{
+       unsigned short instr;
+       int n;
+
+       n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
+       if (n) {
+               /*
+                * access_process_vm() grants access to vsyscall and stub,
+                * while copy_from_user doesn't. Maybe access_process_vm is
+                * slow, but that doesn't matter, since it will be called only
+                * in case of singlestepping, if copy_from_user failed.
+                */
+               n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+               if (n != sizeof(instr)) {
+                       printk("is_syscall : failed to read instruction from "
+                              "0x%lx\n", addr);
+                       return 1;
+               }
+       }
+       /* sysenter */
+       return instr == 0x050f;
+}
+
+static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+{
+       int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
+       long fpregs[HOST_FP_SIZE];
+
+       BUG_ON(sizeof(*buf) != sizeof(fpregs));
+       err = save_fp_registers(userspace_pid[cpu], fpregs);
+       if (err)
+               return err;
+
+       n = copy_to_user(buf, fpregs, sizeof(fpregs));
+       if (n > 0)
+               return -EFAULT;
+
+       return n;
+}
+
+static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
+{
+       int n, cpu = ((struct thread_info *) child->stack)->cpu;
+       long fpregs[HOST_FP_SIZE];
+
+       BUG_ON(sizeof(*buf) != sizeof(fpregs));
+       n = copy_from_user(fpregs, buf, sizeof(fpregs));
+       if (n > 0)
+               return -EFAULT;
+
+       return restore_fp_registers(userspace_pid[cpu], fpregs);
+}
+
+long subarch_ptrace(struct task_struct *child, long request,
+                   unsigned long addr, unsigned long data)
+{
+       int ret = -EIO;
+       void __user *datap = (void __user *) data;
+
+       switch (request) {
+       case PTRACE_GETFPREGS: /* Get the child FPU state. */
+               ret = get_fpregs(datap, child);
+               break;
+       case PTRACE_SETFPREGS: /* Set the child FPU state. */
+               ret = set_fpregs(datap, child);
+               break;
+       case PTRACE_ARCH_PRCTL:
+               /* XXX Calls ptrace on the host - needs some SMP thinking */
+               ret = arch_prctl(child, data, (void __user *) addr);
+               break;
+       }
+
+       return ret;
+}
diff --git a/arch/um/sys-x86/ptrace_user.c b/arch/um/sys-x86/ptrace_user.c
new file mode 100644 (file)
index 0000000..3960ca1
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include <errno.h>
+#include "ptrace_user.h"
+
+int ptrace_getregs(long pid, unsigned long *regs_out)
+{
+       if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
+               return -errno;
+       return 0;
+}
+
+int ptrace_setregs(long pid, unsigned long *regs)
+{
+       if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0)
+               return -errno;
+       return 0;
+}
diff --git a/arch/um/sys-x86/setjmp_32.S b/arch/um/sys-x86/setjmp_32.S
new file mode 100644 (file)
index 0000000..b766792
--- /dev/null
@@ -0,0 +1,58 @@
+#
+# arch/i386/setjmp.S
+#
+# setjmp/longjmp for the i386 architecture
+#
+
+#
+# The jmp_buf is assumed to contain the following, in order:
+#      %ebx
+#      %esp
+#      %ebp
+#      %esi
+#      %edi
+#      <return address>
+#
+
+       .text
+       .align 4
+       .globl setjmp
+       .type setjmp, @function
+setjmp:
+#ifdef _REGPARM
+       movl %eax,%edx
+#else
+       movl 4(%esp),%edx
+#endif
+       popl %ecx                       # Return address, and adjust the stack
+       xorl %eax,%eax                  # Return value
+       movl %ebx,(%edx)
+       movl %esp,4(%edx)               # Post-return %esp!
+       pushl %ecx                      # Make the call/return stack happy
+       movl %ebp,8(%edx)
+       movl %esi,12(%edx)
+       movl %edi,16(%edx)
+       movl %ecx,20(%edx)              # Return address
+       ret
+
+       .size setjmp,.-setjmp
+
+       .text
+       .align 4
+       .globl longjmp
+       .type longjmp, @function
+longjmp:
+#ifdef _REGPARM
+       xchgl %eax,%edx
+#else
+       movl 4(%esp),%edx               # jmp_ptr address
+       movl 8(%esp),%eax               # Return value
+#endif
+       movl (%edx),%ebx
+       movl 4(%edx),%esp
+       movl 8(%edx),%ebp
+       movl 12(%edx),%esi
+       movl 16(%edx),%edi
+       jmp *20(%edx)
+
+       .size longjmp,.-longjmp
diff --git a/arch/um/sys-x86/setjmp_64.S b/arch/um/sys-x86/setjmp_64.S
new file mode 100644 (file)
index 0000000..45f547b
--- /dev/null
@@ -0,0 +1,54 @@
+#
+# arch/x86_64/setjmp.S
+#
+# setjmp/longjmp for the x86-64 architecture
+#
+
+#
+# The jmp_buf is assumed to contain the following, in order:
+#      %rbx
+#      %rsp (post-return)
+#      %rbp
+#      %r12
+#      %r13
+#      %r14
+#      %r15
+#      <return address>
+#
+
+       .text
+       .align 4
+       .globl setjmp
+       .type setjmp, @function
+setjmp:
+       pop  %rsi                       # Return address, and adjust the stack
+       xorl %eax,%eax                  # Return value
+       movq %rbx,(%rdi)
+       movq %rsp,8(%rdi)               # Post-return %rsp!
+       push %rsi                       # Make the call/return stack happy
+       movq %rbp,16(%rdi)
+       movq %r12,24(%rdi)
+       movq %r13,32(%rdi)
+       movq %r14,40(%rdi)
+       movq %r15,48(%rdi)
+       movq %rsi,56(%rdi)              # Return address
+       ret
+
+       .size setjmp,.-setjmp
+
+       .text
+       .align 4
+       .globl longjmp
+       .type longjmp, @function
+longjmp:
+       movl %esi,%eax                  # Return value (int)
+       movq (%rdi),%rbx
+       movq 8(%rdi),%rsp
+       movq 16(%rdi),%rbp
+       movq 24(%rdi),%r12
+       movq 32(%rdi),%r13
+       movq 40(%rdi),%r14
+       movq 48(%rdi),%r15
+       jmp *56(%rdi)
+
+       .size longjmp,.-longjmp
diff --git a/arch/um/sys-x86/signal_32.c b/arch/um/sys-x86/signal_32.c
new file mode 100644 (file)
index 0000000..bcbfb0d
--- /dev/null
@@ -0,0 +1,498 @@
+/*
+ * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include <linux/ptrace.h>
+#include <asm/unistd.h>
+#include <asm/uaccess.h>
+#include <asm/ucontext.h>
+#include "frame_kern.h"
+#include "skas.h"
+
+/*
+ * FPU tag word conversions.
+ */
+
+static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
+{
+       unsigned int tmp; /* to avoid 16 bit prefixes in the code */
+
+       /* Transform each pair of bits into 01 (valid) or 00 (empty) */
+       tmp = ~twd;
+       tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
+       /* and move the valid bits to the lower byte. */
+       tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
+       tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
+       tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
+       return tmp;
+}
+
+static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave)
+{
+       struct _fpxreg *st = NULL;
+       unsigned long twd = (unsigned long) fxsave->twd;
+       unsigned long tag;
+       unsigned long ret = 0xffff0000;
+       int i;
+
+#define FPREG_ADDR(f, n)       ((char *)&(f)->st_space + (n) * 16)
+
+       for (i = 0; i < 8; i++) {
+               if (twd & 0x1) {
+                       st = (struct _fpxreg *) FPREG_ADDR(fxsave, i);
+
+                       switch (st->exponent & 0x7fff) {
+                       case 0x7fff:
+                               tag = 2;                /* Special */
+                               break;
+                       case 0x0000:
+                               if ( !st->significand[0] &&
+                                    !st->significand[1] &&
+                                    !st->significand[2] &&
+                                    !st->significand[3] ) {
+                                       tag = 1;        /* Zero */
+                               } else {
+                                       tag = 2;        /* Special */
+                               }
+                               break;
+                       default:
+                               if (st->significand[3] & 0x8000) {
+                                       tag = 0;        /* Valid */
+                               } else {
+                                       tag = 2;        /* Special */
+                               }
+                               break;
+                       }
+               } else {
+                       tag = 3;                        /* Empty */
+               }
+               ret |= (tag << (2 * i));
+               twd = twd >> 1;
+       }
+       return ret;
+}
+
+static int convert_fxsr_to_user(struct _fpstate __user *buf,
+                               struct user_fxsr_struct *fxsave)
+{
+       unsigned long env[7];
+       struct _fpreg __user *to;
+       struct _fpxreg *from;
+       int i;
+
+       env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
+       env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
+       env[2] = twd_fxsr_to_i387(fxsave);
+       env[3] = fxsave->fip;
+       env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
+       env[5] = fxsave->foo;
+       env[6] = fxsave->fos;
+
+       if (__copy_to_user(buf, env, 7 * sizeof(unsigned long)))
+               return 1;
+
+       to = &buf->_st[0];
+       from = (struct _fpxreg *) &fxsave->st_space[0];
+       for (i = 0; i < 8; i++, to++, from++) {
+               unsigned long __user *t = (unsigned long __user *)to;
+               unsigned long *f = (unsigned long *)from;
+
+               if (__put_user(*f, t) ||
+                               __put_user(*(f + 1), t + 1) ||
+                               __put_user(from->exponent, &to->exponent))
+                       return 1;
+       }
+       return 0;
+}
+
+static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave,
+                                 struct _fpstate __user *buf)
+{
+       unsigned long env[7];
+       struct _fpxreg *to;
+       struct _fpreg __user *from;
+       int i;
+
+       if (copy_from_user( env, buf, 7 * sizeof(long)))
+               return 1;
+
+       fxsave->cwd = (unsigned short)(env[0] & 0xffff);
+       fxsave->swd = (unsigned short)(env[1] & 0xffff);
+       fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
+       fxsave->fip = env[3];
+       fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
+       fxsave->fcs = (env[4] & 0xffff);
+       fxsave->foo = env[5];
+       fxsave->fos = env[6];
+
+       to = (struct _fpxreg *) &fxsave->st_space[0];
+       from = &buf->_st[0];
+       for (i = 0; i < 8; i++, to++, from++) {
+               unsigned long *t = (unsigned long *)to;
+               unsigned long __user *f = (unsigned long __user *)from;
+
+               if (__get_user(*t, f) ||
+                   __get_user(*(t + 1), f + 1) ||
+                   __get_user(to->exponent, &from->exponent))
+                       return 1;
+       }
+       return 0;
+}
+
+extern int have_fpx_regs;
+
+static int copy_sc_from_user(struct pt_regs *regs,
+                            struct sigcontext __user *from)
+{
+       struct sigcontext sc;
+       int err, pid;
+
+       err = copy_from_user(&sc, from, sizeof(sc));
+       if (err)
+               return err;
+
+       pid = userspace_pid[current_thread_info()->cpu];
+
+#define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname
+
+       GETREG(GS, gs);
+       GETREG(FS, fs);
+       GETREG(ES, es);
+       GETREG(DS, ds);
+       GETREG(EDI, di);
+       GETREG(ESI, si);
+       GETREG(EBP, bp);
+       GETREG(SP, sp);
+       GETREG(EBX, bx);
+       GETREG(EDX, dx);
+       GETREG(ECX, cx);
+       GETREG(EAX, ax);
+       GETREG(IP, ip);
+       GETREG(CS, cs);
+       GETREG(EFLAGS, flags);
+       GETREG(SS, ss);
+
+#undef GETREG
+       if (have_fpx_regs) {
+               struct user_fxsr_struct fpx;
+
+               err = copy_from_user(&fpx,
+                       &((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0],
+                                    sizeof(struct user_fxsr_struct));
+               if (err)
+                       return 1;
+
+               err = convert_fxsr_from_user(&fpx, sc.fpstate);
+               if (err)
+                       return 1;
+
+               err = restore_fpx_registers(pid, (unsigned long *) &fpx);
+               if (err < 0) {
+                       printk(KERN_ERR "copy_sc_from_user - "
+                              "restore_fpx_registers failed, errno = %d\n",
+                              -err);
+                       return 1;
+               }
+       } else {
+               struct user_i387_struct fp;
+
+               err = copy_from_user(&fp, sc.fpstate,
+                                    sizeof(struct user_i387_struct));
+               if (err)
+                       return 1;
+
+               err = restore_fp_registers(pid, (unsigned long *) &fp);
+               if (err < 0) {
+                       printk(KERN_ERR "copy_sc_from_user - "
+                              "restore_fp_registers failed, errno = %d\n",
+                              -err);
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+static int copy_sc_to_user(struct sigcontext __user *to,
+                          struct _fpstate __user *to_fp, struct pt_regs *regs,
+                          unsigned long sp)
+{
+       struct sigcontext sc;
+       struct faultinfo * fi = &current->thread.arch.faultinfo;
+       int err, pid;
+       memset(&sc, 0, sizeof(struct sigcontext));
+
+       sc.gs = REGS_GS(regs->regs.gp);
+       sc.fs = REGS_FS(regs->regs.gp);
+       sc.es = REGS_ES(regs->regs.gp);
+       sc.ds = REGS_DS(regs->regs.gp);
+       sc.di = REGS_EDI(regs->regs.gp);
+       sc.si = REGS_ESI(regs->regs.gp);
+       sc.bp = REGS_EBP(regs->regs.gp);
+       sc.sp = sp;
+       sc.bx = REGS_EBX(regs->regs.gp);
+       sc.dx = REGS_EDX(regs->regs.gp);
+       sc.cx = REGS_ECX(regs->regs.gp);
+       sc.ax = REGS_EAX(regs->regs.gp);
+       sc.ip = REGS_IP(regs->regs.gp);
+       sc.cs = REGS_CS(regs->regs.gp);
+       sc.flags = REGS_EFLAGS(regs->regs.gp);
+       sc.sp_at_signal = regs->regs.gp[UESP];
+       sc.ss = regs->regs.gp[SS];
+       sc.cr2 = fi->cr2;
+       sc.err = fi->error_code;
+       sc.trapno = fi->trap_no;
+
+       to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1));
+       sc.fpstate = to_fp;
+
+       pid = userspace_pid[current_thread_info()->cpu];
+       if (have_fpx_regs) {
+               struct user_fxsr_struct fpx;
+
+               err = save_fpx_registers(pid, (unsigned long *) &fpx);
+               if (err < 0){
+                       printk(KERN_ERR "copy_sc_to_user - save_fpx_registers "
+                              "failed, errno = %d\n", err);
+                       return 1;
+               }
+
+               err = convert_fxsr_to_user(to_fp, &fpx);
+               if (err)
+                       return 1;
+
+               err |= __put_user(fpx.swd, &to_fp->status);
+               err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
+               if (err)
+                       return 1;
+
+               if (copy_to_user(&to_fp->_fxsr_env[0], &fpx,
+                                sizeof(struct user_fxsr_struct)))
+                       return 1;
+       }
+       else {
+               struct user_i387_struct fp;
+
+               err = save_fp_registers(pid, (unsigned long *) &fp);
+               if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
+                       return 1;
+       }
+
+       return copy_to_user(to, &sc, sizeof(sc));
+}
+
+static int copy_ucontext_to_user(struct ucontext __user *uc,
+                                struct _fpstate __user *fp, sigset_t *set,
+                                unsigned long sp)
+{
+       int err = 0;
+
+       err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp);
+       err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags);
+       err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size);
+       err |= copy_sc_to_user(&uc->uc_mcontext, fp, &current->thread.regs, sp);
+       err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set));
+       return err;
+}
+
+struct sigframe
+{
+       char __user *pretcode;
+       int sig;
+       struct sigcontext sc;
+       struct _fpstate fpstate;
+       unsigned long extramask[_NSIG_WORDS-1];
+       char retcode[8];
+};
+
+struct rt_sigframe
+{
+       char __user *pretcode;
+       int sig;
+       struct siginfo __user *pinfo;
+       void __user *puc;
+       struct siginfo info;
+       struct ucontext uc;
+       struct _fpstate fpstate;
+       char retcode[8];
+};
+
+int setup_signal_stack_sc(unsigned long stack_top, int sig,
+                         struct k_sigaction *ka, struct pt_regs *regs,
+                         sigset_t *mask)
+{
+       struct sigframe __user *frame;
+       void __user *restorer;
+       unsigned long save_sp = PT_REGS_SP(regs);
+       int err = 0;
+
+       /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */
+       stack_top = ((stack_top + 4) & -16UL) - 4;
+       frame = (struct sigframe __user *) stack_top - 1;
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               return 1;
+
+       restorer = frame->retcode;
+       if (ka->sa.sa_flags & SA_RESTORER)
+               restorer = ka->sa.sa_restorer;
+
+       /* Update SP now because the page fault handler refuses to extend
+        * the stack if the faulting address is too far below the current
+        * SP, which frame now certainly is.  If there's an error, the original
+        * value is restored on the way out.
+        * When writing the sigcontext to the stack, we have to write the
+        * original value, so that's passed to copy_sc_to_user, which does
+        * the right thing with it.
+        */
+       PT_REGS_SP(regs) = (unsigned long) frame;
+
+       err |= __put_user(restorer, &frame->pretcode);
+       err |= __put_user(sig, &frame->sig);
+       err |= copy_sc_to_user(&frame->sc, NULL, regs, save_sp);
+       err |= __put_user(mask->sig[0], &frame->sc.oldmask);
+       if (_NSIG_WORDS > 1)
+               err |= __copy_to_user(&frame->extramask, &mask->sig[1],
+                                     sizeof(frame->extramask));
+
+       /*
+        * This is popl %eax ; movl $,%eax ; int $0x80
+        *
+        * WE DO NOT USE IT ANY MORE! It's only left here for historical
+        * reasons and because gdb uses it as a signature to notice
+        * signal handler stack frames.
+        */
+       err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
+       err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
+       err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
+
+       if (err)
+               goto err;
+
+       PT_REGS_SP(regs) = (unsigned long) frame;
+       PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
+       PT_REGS_EAX(regs) = (unsigned long) sig;
+       PT_REGS_EDX(regs) = (unsigned long) 0;
+       PT_REGS_ECX(regs) = (unsigned long) 0;
+
+       if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
+               ptrace_notify(SIGTRAP);
+       return 0;
+
+err:
+       PT_REGS_SP(regs) = save_sp;
+       return err;
+}
+
+int setup_signal_stack_si(unsigned long stack_top, int sig,
+                         struct k_sigaction *ka, struct pt_regs *regs,
+                         siginfo_t *info, sigset_t *mask)
+{
+       struct rt_sigframe __user *frame;
+       void __user *restorer;
+       unsigned long save_sp = PT_REGS_SP(regs);
+       int err = 0;
+
+       stack_top &= -8UL;
+       frame = (struct rt_sigframe __user *) stack_top - 1;
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               return 1;
+
+       restorer = frame->retcode;
+       if (ka->sa.sa_flags & SA_RESTORER)
+               restorer = ka->sa.sa_restorer;
+
+       /* See comment above about why this is here */
+       PT_REGS_SP(regs) = (unsigned long) frame;
+
+       err |= __put_user(restorer, &frame->pretcode);
+       err |= __put_user(sig, &frame->sig);
+       err |= __put_user(&frame->info, &frame->pinfo);
+       err |= __put_user(&frame->uc, &frame->puc);
+       err |= copy_siginfo_to_user(&frame->info, info);
+       err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask,
+                                    save_sp);
+
+       /*
+        * This is movl $,%eax ; int $0x80
+        *
+        * WE DO NOT USE IT ANY MORE! It's only left here for historical
+        * reasons and because gdb uses it as a signature to notice
+        * signal handler stack frames.
+        */
+       err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
+       err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
+       err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
+
+       if (err)
+               goto err;
+
+       PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
+       PT_REGS_EAX(regs) = (unsigned long) sig;
+       PT_REGS_EDX(regs) = (unsigned long) &frame->info;
+       PT_REGS_ECX(regs) = (unsigned long) &frame->uc;
+
+       if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
+               ptrace_notify(SIGTRAP);
+       return 0;
+
+err:
+       PT_REGS_SP(regs) = save_sp;
+       return err;
+}
+
+long sys_sigreturn(struct pt_regs regs)
+{
+       unsigned long sp = PT_REGS_SP(&current->thread.regs);
+       struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
+       sigset_t set;
+       struct sigcontext __user *sc = &frame->sc;
+       unsigned long __user *oldmask = &sc->oldmask;
+       unsigned long __user *extramask = frame->extramask;
+       int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
+
+       if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
+           copy_from_user(&set.sig[1], extramask, sig_size))
+               goto segfault;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       set_current_blocked(&set);
+
+       if (copy_sc_from_user(&current->thread.regs, sc))
+               goto segfault;
+
+       /* Avoid ERESTART handling */
+       PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
+       return PT_REGS_SYSCALL_RET(&current->thread.regs);
+
+ segfault:
+       force_sig(SIGSEGV, current);
+       return 0;
+}
+
+long sys_rt_sigreturn(struct pt_regs regs)
+{
+       unsigned long sp = PT_REGS_SP(&current->thread.regs);
+       struct rt_sigframe __user *frame =
+               (struct rt_sigframe __user *) (sp - 4);
+       sigset_t set;
+       struct ucontext __user *uc = &frame->uc;
+       int sig_size = _NSIG_WORDS * sizeof(unsigned long);
+
+       if (copy_from_user(&set, &uc->uc_sigmask, sig_size))
+               goto segfault;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       set_current_blocked(&set);
+
+       if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
+               goto segfault;
+
+       /* Avoid ERESTART handling */
+       PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
+       return PT_REGS_SYSCALL_RET(&current->thread.regs);
+
+ segfault:
+       force_sig(SIGSEGV, current);
+       return 0;
+}
diff --git a/arch/um/sys-x86/signal_64.c b/arch/um/sys-x86/signal_64.c
new file mode 100644 (file)
index 0000000..255b2ca
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2003 PathScale, Inc.
+ * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include <linux/personality.h>
+#include <linux/ptrace.h>
+#include <linux/kernel.h>
+#include <asm/unistd.h>
+#include <asm/uaccess.h>
+#include <asm/ucontext.h>
+#include "frame_kern.h"
+#include "skas.h"
+
+static int copy_sc_from_user(struct pt_regs *regs,
+                            struct sigcontext __user *from)
+{
+       struct sigcontext sc;
+       struct user_i387_struct fp;
+       void __user *buf;
+       int err;
+
+       err = copy_from_user(&sc, from, sizeof(sc));
+       if (err)
+               return err;
+
+#define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname
+
+       GETREG(R8, r8);
+       GETREG(R9, r9);
+       GETREG(R10, r10);
+       GETREG(R11, r11);
+       GETREG(R12, r12);
+       GETREG(R13, r13);
+       GETREG(R14, r14);
+       GETREG(R15, r15);
+       GETREG(RDI, di);
+       GETREG(RSI, si);
+       GETREG(RBP, bp);
+       GETREG(RBX, bx);
+       GETREG(RDX, dx);
+       GETREG(RAX, ax);
+       GETREG(RCX, cx);
+       GETREG(SP, sp);
+       GETREG(IP, ip);
+       GETREG(EFLAGS, flags);
+       GETREG(CS, cs);
+#undef GETREG
+
+       buf = sc.fpstate;
+
+       err = copy_from_user(&fp, buf, sizeof(struct user_i387_struct));
+       if (err)
+               return 1;
+
+       err = restore_fp_registers(userspace_pid[current_thread_info()->cpu],
+                                  (unsigned long *) &fp);
+       if (err < 0) {
+               printk(KERN_ERR "copy_sc_from_user - "
+                      "restore_fp_registers failed, errno = %d\n",
+                      -err);
+               return 1;
+       }
+
+       return 0;
+}
+
+static int copy_sc_to_user(struct sigcontext __user *to,
+                          struct _fpstate __user *to_fp, struct pt_regs *regs,
+                          unsigned long mask, unsigned long sp)
+{
+       struct faultinfo * fi = &current->thread.arch.faultinfo;
+       struct sigcontext sc;
+       struct user_i387_struct fp;
+       int err = 0;
+       memset(&sc, 0, sizeof(struct sigcontext));
+
+#define PUTREG(regno, regname) sc.regname = regs->regs.gp[HOST_##regno]
+
+       PUTREG(RDI, di);
+       PUTREG(RSI, si);
+       PUTREG(RBP, bp);
+       /*
+        * Must use original RSP, which is passed in, rather than what's in
+        * signal frame.
+        */
+       sc.sp = sp;
+       PUTREG(RBX, bx);
+       PUTREG(RDX, dx);
+       PUTREG(RCX, cx);
+       PUTREG(RAX, ax);
+       PUTREG(R8, r8);
+       PUTREG(R9, r9);
+       PUTREG(R10, r10);
+       PUTREG(R11, r11);
+       PUTREG(R12, r12);
+       PUTREG(R13, r13);
+       PUTREG(R14, r14);
+       PUTREG(R15, r15);
+       PUTREG(CS, cs); /* XXX x86_64 doesn't do this */
+
+       sc.cr2 = fi->cr2;
+       sc.err = fi->error_code;
+       sc.trapno = fi->trap_no;
+
+       PUTREG(IP, ip);
+       PUTREG(EFLAGS, flags);
+#undef PUTREG
+
+       sc.oldmask = mask;
+
+       err = copy_to_user(to, &sc, sizeof(struct sigcontext));
+       if (err)
+               return 1;
+
+       err = save_fp_registers(userspace_pid[current_thread_info()->cpu],
+                               (unsigned long *) &fp);
+       if (err < 0) {
+               printk(KERN_ERR "copy_sc_from_user - restore_fp_registers "
+                      "failed, errno = %d\n", -err);
+               return 1;
+       }
+
+       if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
+               return 1;
+
+       return err;
+}
+
+struct rt_sigframe
+{
+       char __user *pretcode;
+       struct ucontext uc;
+       struct siginfo info;
+       struct _fpstate fpstate;
+};
+
+int setup_signal_stack_si(unsigned long stack_top, int sig,
+                         struct k_sigaction *ka, struct pt_regs * regs,
+                         siginfo_t *info, sigset_t *set)
+{
+       struct rt_sigframe __user *frame;
+       unsigned long save_sp = PT_REGS_RSP(regs);
+       int err = 0;
+       struct task_struct *me = current;
+
+       frame = (struct rt_sigframe __user *)
+               round_down(stack_top - sizeof(struct rt_sigframe), 16);
+       /* Subtract 128 for a red zone and 8 for proper alignment */
+       frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8);
+
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               goto out;
+
+       if (ka->sa.sa_flags & SA_SIGINFO) {
+               err |= copy_siginfo_to_user(&frame->info, info);
+               if (err)
+                       goto out;
+       }
+
+       /*
+        * Update SP now because the page fault handler refuses to extend
+        * the stack if the faulting address is too far below the current
+        * SP, which frame now certainly is.  If there's an error, the original
+        * value is restored on the way out.
+        * When writing the sigcontext to the stack, we have to write the
+        * original value, so that's passed to copy_sc_to_user, which does
+        * the right thing with it.
+        */
+       PT_REGS_RSP(regs) = (unsigned long) frame;
+
+       /* Create the ucontext.  */
+       err |= __put_user(0, &frame->uc.uc_flags);
+       err |= __put_user(0, &frame->uc.uc_link);
+       err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+       err |= __put_user(sas_ss_flags(save_sp),
+                         &frame->uc.uc_stack.ss_flags);
+       err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+       err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
+                              set->sig[0], save_sp);
+       err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
+       if (sizeof(*set) == 16) {
+               __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
+               __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
+       }
+       else
+               err |= __copy_to_user(&frame->uc.uc_sigmask, set,
+                                     sizeof(*set));
+
+       /*
+        * Set up to return from userspace.  If provided, use a stub
+        * already in userspace.
+        */
+       /* x86-64 should always use SA_RESTORER. */
+       if (ka->sa.sa_flags & SA_RESTORER)
+               err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
+       else
+               /* could use a vstub here */
+               goto restore_sp;
+
+       if (err)
+               goto restore_sp;
+
+       /* Set up registers for signal handler */
+       {
+               struct exec_domain *ed = current_thread_info()->exec_domain;
+               if (unlikely(ed && ed->signal_invmap && sig < 32))
+                       sig = ed->signal_invmap[sig];
+       }
+
+       PT_REGS_RDI(regs) = sig;
+       /* In case the signal handler was declared without prototypes */
+       PT_REGS_RAX(regs) = 0;
+
+       /*
+        * This also works for non SA_SIGINFO handlers because they expect the
+        * next argument after the signal number on the stack.
+        */
+       PT_REGS_RSI(regs) = (unsigned long) &frame->info;
+       PT_REGS_RDX(regs) = (unsigned long) &frame->uc;
+       PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler;
+ out:
+       return err;
+
+restore_sp:
+       PT_REGS_RSP(regs) = save_sp;
+       return err;
+}
+
+long sys_rt_sigreturn(struct pt_regs *regs)
+{
+       unsigned long sp = PT_REGS_SP(&current->thread.regs);
+       struct rt_sigframe __user *frame =
+               (struct rt_sigframe __user *)(sp - 8);
+       struct ucontext __user *uc = &frame->uc;
+       sigset_t set;
+
+       if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
+               goto segfault;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       set_current_blocked(&set);
+
+       if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
+               goto segfault;
+
+       /* Avoid ERESTART handling */
+       PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
+       return PT_REGS_SYSCALL_RET(&current->thread.regs);
+
+ segfault:
+       force_sig(SIGSEGV, current);
+       return 0;
+}
diff --git a/arch/um/sys-x86/stub_32.S b/arch/um/sys-x86/stub_32.S
new file mode 100644 (file)
index 0000000..54a36ec
--- /dev/null
@@ -0,0 +1,51 @@
+#include "as-layout.h"
+
+       .globl syscall_stub
+.section .__syscall_stub, "ax"
+
+       .globl batch_syscall_stub
+batch_syscall_stub:
+       /* load pointer to first operation */
+       mov     $(STUB_DATA+8), %esp
+
+again:
+       /* load length of additional data */
+       mov     0x0(%esp), %eax
+
+       /* if(length == 0) : end of list */
+       /* write possible 0 to header */
+       mov     %eax, STUB_DATA+4
+       cmpl    $0, %eax
+       jz      done
+
+       /* save current pointer */
+       mov     %esp, STUB_DATA+4
+
+       /* skip additional data */
+       add     %eax, %esp
+
+       /* load syscall-# */
+       pop     %eax
+
+       /* load syscall params */
+       pop     %ebx
+       pop     %ecx
+       pop     %edx
+       pop     %esi
+       pop     %edi
+       pop     %ebp
+
+       /* execute syscall */
+       int     $0x80
+
+       /* check return value */
+       pop     %ebx
+       cmp     %ebx, %eax
+       je      again
+
+done:
+       /* save return value */
+       mov     %eax, STUB_DATA
+
+       /* stop */
+       int3
diff --git a/arch/um/sys-x86/stub_64.S b/arch/um/sys-x86/stub_64.S
new file mode 100644 (file)
index 0000000..20e4a96
--- /dev/null
@@ -0,0 +1,66 @@
+#include "as-layout.h"
+
+       .globl syscall_stub
+.section .__syscall_stub, "ax"
+syscall_stub:
+       syscall
+       /* We don't have 64-bit constants, so this constructs the address
+        * we need.
+        */
+       movq    $(STUB_DATA >> 32), %rbx
+       salq    $32, %rbx
+       movq    $(STUB_DATA & 0xffffffff), %rcx
+       or      %rcx, %rbx
+       movq    %rax, (%rbx)
+       int3
+
+       .globl batch_syscall_stub
+batch_syscall_stub:
+       mov     $(STUB_DATA >> 32), %rbx
+       sal     $32, %rbx
+       mov     $(STUB_DATA & 0xffffffff), %rax
+       or      %rax, %rbx
+       /* load pointer to first operation */
+       mov     %rbx, %rsp
+       add     $0x10, %rsp
+again:
+       /* load length of additional data */
+       mov     0x0(%rsp), %rax
+
+       /* if(length == 0) : end of list */
+       /* write possible 0 to header */
+       mov     %rax, 8(%rbx)
+       cmp     $0, %rax
+       jz      done
+
+       /* save current pointer */
+       mov     %rsp, 8(%rbx)
+
+       /* skip additional data */
+       add     %rax, %rsp
+
+       /* load syscall-# */
+       pop     %rax
+
+       /* load syscall params */
+       pop     %rdi
+       pop     %rsi
+       pop     %rdx
+       pop     %r10
+       pop     %r8
+       pop     %r9
+
+       /* execute syscall */
+       syscall
+
+       /* check return value */
+       pop     %rcx
+       cmp     %rcx, %rax
+       je      again
+
+done:
+       /* save return value */
+       mov     %rax, (%rbx)
+
+       /* stop */
+       int3
diff --git a/arch/um/sys-x86/stub_segv_32.c b/arch/um/sys-x86/stub_segv_32.c
new file mode 100644 (file)
index 0000000..28ccf73
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include "sysdep/stub.h"
+#include "sysdep/sigcontext.h"
+
+void __attribute__ ((__section__ (".__syscall_stub")))
+stub_segv_handler(int sig)
+{
+       struct sigcontext *sc = (struct sigcontext *) (&sig + 1);
+
+       GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA), sc);
+
+       trap_myself();
+}
diff --git a/arch/um/sys-x86/stub_segv_64.c b/arch/um/sys-x86/stub_segv_64.c
new file mode 100644 (file)
index 0000000..ced051a
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include <signal.h>
+#include "as-layout.h"
+#include "sysdep/stub.h"
+#include "sysdep/faultinfo.h"
+#include "sysdep/sigcontext.h"
+
+void __attribute__ ((__section__ (".__syscall_stub")))
+stub_segv_handler(int sig)
+{
+       struct ucontext *uc;
+
+       __asm__ __volatile__("movq %%rdx, %0" : "=g" (uc) :);
+       GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA),
+                             &uc->uc_mcontext);
+       trap_myself();
+}
+
diff --git a/arch/um/sys-x86/sys_call_table_32.S b/arch/um/sys-x86/sys_call_table_32.S
new file mode 100644 (file)
index 0000000..de27407
--- /dev/null
@@ -0,0 +1,28 @@
+#include <linux/linkage.h>
+/* Steal i386 syscall table for our purposes, but with some slight changes.*/
+
+#define sys_iopl sys_ni_syscall
+#define sys_ioperm sys_ni_syscall
+
+#define sys_vm86old sys_ni_syscall
+#define sys_vm86 sys_ni_syscall
+
+#define old_mmap sys_old_mmap
+
+#define ptregs_fork sys_fork
+#define ptregs_execve sys_execve
+#define ptregs_iopl sys_iopl
+#define ptregs_vm86old sys_vm86old
+#define ptregs_sigreturn sys_sigreturn
+#define ptregs_clone sys_clone
+#define ptregs_vm86 sys_vm86
+#define ptregs_rt_sigreturn sys_rt_sigreturn
+#define ptregs_sigaltstack sys_sigaltstack
+#define ptregs_vfork sys_vfork
+
+.section .rodata,"a"
+
+#include "../../x86/kernel/syscall_table_32.S"
+
+ENTRY(syscall_table_size)
+.long .-sys_call_table
diff --git a/arch/um/sys-x86/sys_call_table_64.c b/arch/um/sys-x86/sys_call_table_64.c
new file mode 100644 (file)
index 0000000..f46de82
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c
+ * with some changes for UML.
+ */
+
+#include <linux/linkage.h>
+#include <linux/sys.h>
+#include <linux/cache.h>
+
+#define __NO_STUBS
+
+/*
+ * Below you can see, in terms of #define's, the differences between the x86-64
+ * and the UML syscall table.
+ */
+
+/* Not going to be implemented by UML, since we have no hardware. */
+#define stub_iopl sys_ni_syscall
+#define sys_ioperm sys_ni_syscall
+
+/*
+ * The UML TLS problem. Note that x86_64 does not implement this, so the below
+ * is needed only for the ia32 compatibility.
+ */
+
+/* On UML we call it this way ("old" means it's not mmap2) */
+#define sys_mmap old_mmap
+
+#define stub_clone sys_clone
+#define stub_fork sys_fork
+#define stub_vfork sys_vfork
+#define stub_execve sys_execve
+#define stub_rt_sigsuspend sys_rt_sigsuspend
+#define stub_sigaltstack sys_sigaltstack
+#define stub_rt_sigreturn sys_rt_sigreturn
+
+#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
+#undef _ASM_X86_UNISTD_64_H
+#include "../../x86/include/asm/unistd_64.h"
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [ nr ] = sym,
+#undef _ASM_X86_UNISTD_64_H
+
+typedef void (*sys_call_ptr_t)(void);
+
+extern void sys_ni_syscall(void);
+
+/*
+ * We used to have a trick here which made sure that holes in the
+ * x86_64 table were filled in with sys_ni_syscall, but a comment in
+ * unistd_64.h says that holes aren't allowed, so the trick was
+ * removed.
+ * The trick looked like this
+ *     [0 ... UM_NR_syscall_max] = &sys_ni_syscall
+ * before including unistd_64.h - the later initializations overwrote
+ * the sys_ni_syscall filler.
+ */
+
+sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+#include "../../x86/include/asm/unistd_64.h"
+};
+
+int syscall_table_size = sizeof(sys_call_table);
diff --git a/arch/um/sys-x86/syscalls_32.c b/arch/um/sys-x86/syscalls_32.c
new file mode 100644 (file)
index 0000000..70ca357
--- /dev/null
@@ -0,0 +1,66 @@
+/* 
+ * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Licensed under the GPL
+ */
+
+#include "linux/sched.h"
+#include "linux/shm.h"
+#include "linux/ipc.h"
+#include "linux/syscalls.h"
+#include "asm/mman.h"
+#include "asm/uaccess.h"
+#include "asm/unistd.h"
+
+/*
+ * The prototype on i386 is:
+ *
+ *     int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
+ *
+ * and the "newtls" arg. on i386 is read by copy_thread directly from the
+ * register saved on the stack.
+ */
+long sys_clone(unsigned long clone_flags, unsigned long newsp,
+              int __user *parent_tid, void *newtls, int __user *child_tid)
+{
+       long ret;
+
+       if (!newsp)
+               newsp = UPT_SP(&current->thread.regs.regs);
+
+       current->thread.forking = 1;
+       ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
+                     child_tid);
+       current->thread.forking = 0;
+       return ret;
+}
+
+long sys_sigaction(int sig, const struct old_sigaction __user *act,
+                        struct old_sigaction __user *oact)
+{
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+
+       if (act) {
+               old_sigset_t mask;
+               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                       return -EFAULT;
+               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+               __get_user(mask, &act->sa_mask);
+               siginitset(&new_ka.sa.sa_mask, mask);
+       }
+
+       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+       if (!ret && oact) {
+               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                       return -EFAULT;
+               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+       }
+
+       return ret;
+}
diff --git a/arch/um/sys-x86/syscalls_64.c b/arch/um/sys-x86/syscalls_64.c
new file mode 100644 (file)
index 0000000..f3d82bb
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Copyright 2003 PathScale, Inc.
+ *
+ * Licensed under the GPL
+ */
+
+#include "linux/linkage.h"
+#include "linux/personality.h"
+#include "linux/utsname.h"
+#include "asm/prctl.h" /* XXX This should get the constants from libc */
+#include "asm/uaccess.h"
+#include "os.h"
+
+long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
+{
+       unsigned long *ptr = addr, tmp;
+       long ret;
+       int pid = task->mm->context.id.u.pid;
+
+       /*
+        * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
+        * be safe), we need to call arch_prctl on the host because
+        * setting %fs may result in something else happening (like a
+        * GDT or thread.fs being set instead).  So, we let the host
+        * fiddle the registers and thread struct and restore the
+        * registers afterwards.
+        *
+        * So, the saved registers are stored to the process (this
+        * needed because a stub may have been the last thing to run),
+        * arch_prctl is run on the host, then the registers are read
+        * back.
+        */
+       switch (code) {
+       case ARCH_SET_FS:
+       case ARCH_SET_GS:
+               ret = restore_registers(pid, &current->thread.regs.regs);
+               if (ret)
+                       return ret;
+               break;
+       case ARCH_GET_FS:
+       case ARCH_GET_GS:
+               /*
+                * With these two, we read to a local pointer and
+                * put_user it to the userspace pointer that we were
+                * given.  If addr isn't valid (because it hasn't been
+                * faulted in or is just bogus), we want put_user to
+                * fault it in (or return -EFAULT) instead of having
+                * the host return -EFAULT.
+                */
+               ptr = &tmp;
+       }
+
+       ret = os_arch_prctl(pid, code, ptr);
+       if (ret)
+               return ret;
+
+       switch (code) {
+       case ARCH_SET_FS:
+               current->thread.arch.fs = (unsigned long) ptr;
+               ret = save_registers(pid, &current->thread.regs.regs);
+               break;
+       case ARCH_SET_GS:
+               ret = save_registers(pid, &current->thread.regs.regs);
+               break;
+       case ARCH_GET_FS:
+               ret = put_user(tmp, addr);
+               break;
+       case ARCH_GET_GS:
+               ret = put_user(tmp, addr);
+               break;
+       }
+
+       return ret;
+}
+
+long sys_arch_prctl(int code, unsigned long addr)
+{
+       return arch_prctl(current, code, (unsigned long __user *) addr);
+}
+
+long sys_clone(unsigned long clone_flags, unsigned long newsp,
+              void __user *parent_tid, void __user *child_tid)
+{
+       long ret;
+
+       if (!newsp)
+               newsp = UPT_SP(&current->thread.regs.regs);
+       current->thread.forking = 1;
+       ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
+                     child_tid);
+       current->thread.forking = 0;
+       return ret;
+}
+
+void arch_switch_to(struct task_struct *to)
+{
+       if ((to->thread.arch.fs == 0) || (to->mm == NULL))
+               return;
+
+       arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
+}
diff --git a/arch/um/sys-x86/sysrq_32.c b/arch/um/sys-x86/sysrq_32.c
new file mode 100644 (file)
index 0000000..171b3e9
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2001 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Licensed under the GPL
+ */
+
+#include "linux/kernel.h"
+#include "linux/smp.h"
+#include "linux/sched.h"
+#include "linux/kallsyms.h"
+#include "asm/ptrace.h"
+#include "sysrq.h"
+
+/* This is declared by <linux/sched.h> */
+void show_regs(struct pt_regs *regs)
+{
+        printk("\n");
+        printk("EIP: %04lx:[<%08lx>] CPU: %d %s", 
+              0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs),
+              smp_processor_id(), print_tainted());
+        if (PT_REGS_CS(regs) & 3)
+                printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs),
+                      PT_REGS_SP(regs));
+        printk(" EFLAGS: %08lx\n    %s\n", PT_REGS_EFLAGS(regs),
+              print_tainted());
+        printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+                PT_REGS_EAX(regs), PT_REGS_EBX(regs), 
+              PT_REGS_ECX(regs), 
+              PT_REGS_EDX(regs));
+        printk("ESI: %08lx EDI: %08lx EBP: %08lx",
+              PT_REGS_ESI(regs), PT_REGS_EDI(regs), 
+              PT_REGS_EBP(regs));
+        printk(" DS: %04lx ES: %04lx\n",
+              0xffff & PT_REGS_DS(regs), 
+              0xffff & PT_REGS_ES(regs));
+
+        show_trace(NULL, (unsigned long *) &regs);
+}
+
+/* Copied from i386. */
+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+{
+       return  p > (void *)tinfo &&
+               p < (void *)tinfo + THREAD_SIZE - 3;
+}
+
+/* Adapted from i386 (we also print the address we read from). */
+static inline unsigned long print_context_stack(struct thread_info *tinfo,
+                               unsigned long *stack, unsigned long ebp)
+{
+       unsigned long addr;
+
+#ifdef CONFIG_FRAME_POINTER
+       while (valid_stack_ptr(tinfo, (void *)ebp)) {
+               addr = *(unsigned long *)(ebp + 4);
+               printk("%08lx:  [<%08lx>]", ebp + 4, addr);
+               print_symbol(" %s", addr);
+               printk("\n");
+               ebp = *(unsigned long *)ebp;
+       }
+#else
+       while (valid_stack_ptr(tinfo, stack)) {
+               addr = *stack;
+               if (__kernel_text_address(addr)) {
+                       printk("%08lx:  [<%08lx>]", (unsigned long) stack, addr);
+                       print_symbol(" %s", addr);
+                       printk("\n");
+               }
+               stack++;
+       }
+#endif
+       return ebp;
+}
+
+void show_trace(struct task_struct* task, unsigned long * stack)
+{
+       unsigned long ebp;
+       struct thread_info *context;
+
+       /* Turn this into BUG_ON if possible. */
+       if (!stack) {
+               stack = (unsigned long*) &stack;
+               printk("show_trace: got NULL stack, implicit assumption task == current");
+               WARN_ON(1);
+       }
+
+       if (!task)
+               task = current;
+
+       if (task != current) {
+               ebp = (unsigned long) KSTK_EBP(task);
+       } else {
+               asm ("movl %%ebp, %0" : "=r" (ebp) : );
+       }
+
+       context = (struct thread_info *)
+               ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+       print_context_stack(context, stack, ebp);
+
+       printk("\n");
+}
+
diff --git a/arch/um/sys-x86/sysrq_64.c b/arch/um/sys-x86/sysrq_64.c
new file mode 100644 (file)
index 0000000..f4f82be
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2003 PathScale, Inc.
+ *
+ * Licensed under the GPL
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <asm/current.h>
+#include <asm/ptrace.h>
+#include "sysrq.h"
+
+void __show_regs(struct pt_regs *regs)
+{
+       printk("\n");
+       print_modules();
+       printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
+               current->comm, print_tainted(), init_utsname()->release);
+       printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff,
+              PT_REGS_RIP(regs));
+       printk(KERN_INFO "RSP: %016lx  EFLAGS: %08lx\n", PT_REGS_RSP(regs),
+              PT_REGS_EFLAGS(regs));
+       printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
+              PT_REGS_RAX(regs), PT_REGS_RBX(regs), PT_REGS_RCX(regs));
+       printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
+              PT_REGS_RDX(regs), PT_REGS_RSI(regs), PT_REGS_RDI(regs));
+       printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
+              PT_REGS_RBP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs));
+       printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
+              PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs));
+       printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
+              PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs));
+}
+
+void show_regs(struct pt_regs *regs)
+{
+       __show_regs(regs);
+       show_trace(current, (unsigned long *) &regs);
+}
diff --git a/arch/um/sys-x86/tls_32.c b/arch/um/sys-x86/tls_32.c
new file mode 100644 (file)
index 0000000..c6c7131
--- /dev/null
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
+ * Licensed under the GPL
+ */
+
+#include "linux/percpu.h"
+#include "linux/sched.h"
+#include "asm/uaccess.h"
+#include "os.h"
+#include "skas.h"
+#include "sysdep/tls.h"
+
+/*
+ * If needed we can detect when it's uninitialized.
+ *
+ * These are initialized in an initcall and unchanged thereafter.
+ */
+static int host_supports_tls = -1;
+int host_gdt_entry_tls_min;
+
+int do_set_thread_area(struct user_desc *info)
+{
+       int ret;
+       u32 cpu;
+
+       cpu = get_cpu();
+       ret = os_set_thread_area(info, userspace_pid[cpu]);
+       put_cpu();
+
+       if (ret)
+               printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
+                      "index = %d\n", ret, info->entry_number);
+
+       return ret;
+}
+
+int do_get_thread_area(struct user_desc *info)
+{
+       int ret;
+       u32 cpu;
+
+       cpu = get_cpu();
+       ret = os_get_thread_area(info, userspace_pid[cpu]);
+       put_cpu();
+
+       if (ret)
+               printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
+                      "index = %d\n", ret, info->entry_number);
+
+       return ret;
+}
+
+/*
+ * sys_get_thread_area: get a yet unused TLS descriptor index.
+ * XXX: Consider leaving one free slot for glibc usage at first place. This must
+ * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
+ *
+ * Also, this must be tested when compiling in SKAS mode with dynamic linking
+ * and running against NPTL.
+ */
+static int get_free_idx(struct task_struct* task)
+{
+       struct thread_struct *t = &task->thread;
+       int idx;
+
+       if (!t->arch.tls_array)
+               return GDT_ENTRY_TLS_MIN;
+
+       for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+               if (!t->arch.tls_array[idx].present)
+                       return idx + GDT_ENTRY_TLS_MIN;
+       return -ESRCH;
+}
+
+static inline void clear_user_desc(struct user_desc* info)
+{
+       /* Postcondition: LDT_empty(info) returns true. */
+       memset(info, 0, sizeof(*info));
+
+       /*
+        * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
+        * indeed an empty user_desc.
+        */
+       info->read_exec_only = 1;
+       info->seg_not_present = 1;
+}
+
+#define O_FORCE 1
+
+static int load_TLS(int flags, struct task_struct *to)
+{
+       int ret = 0;
+       int idx;
+
+       for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
+               struct uml_tls_struct* curr =
+                       &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
+
+               /*
+                * Actually, now if it wasn't flushed it gets cleared and
+                * flushed to the host, which will clear it.
+                */
+               if (!curr->present) {
+                       if (!curr->flushed) {
+                               clear_user_desc(&curr->tls);
+                               curr->tls.entry_number = idx;
+                       } else {
+                               WARN_ON(!LDT_empty(&curr->tls));
+                               continue;
+                       }
+               }
+
+               if (!(flags & O_FORCE) && curr->flushed)
+                       continue;
+
+               ret = do_set_thread_area(&curr->tls);
+               if (ret)
+                       goto out;
+
+               curr->flushed = 1;
+       }
+out:
+       return ret;
+}
+
+/*
+ * Verify if we need to do a flush for the new process, i.e. if there are any
+ * present desc's, only if they haven't been flushed.
+ */
+static inline int needs_TLS_update(struct task_struct *task)
+{
+       int i;
+       int ret = 0;
+
+       for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
+               struct uml_tls_struct* curr =
+                       &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
+
+               /*
+                * Can't test curr->present, we may need to clear a descriptor
+                * which had a value.
+                */
+               if (curr->flushed)
+                       continue;
+               ret = 1;
+               break;
+       }
+       return ret;
+}
+
+/*
+ * On a newly forked process, the TLS descriptors haven't yet been flushed. So
+ * we mark them as such and the first switch_to will do the job.
+ */
+void clear_flushed_tls(struct task_struct *task)
+{
+       int i;
+
+       for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
+               struct uml_tls_struct* curr =
+                       &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
+
+               /*
+                * Still correct to do this, if it wasn't present on the host it
+                * will remain as flushed as it was.
+                */
+               if (!curr->present)
+                       continue;
+
+               curr->flushed = 0;
+       }
+}
+
+/*
+ * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
+ * common host process. So this is needed in SKAS0 too.
+ *
+ * However, if each thread had a different host process (and this was discussed
+ * for SMP support) this won't be needed.
+ *
+ * And this will not need be used when (and if) we'll add support to the host
+ * SKAS patch.
+ */
+
+int arch_switch_tls(struct task_struct *to)
+{
+       if (!host_supports_tls)
+               return 0;
+
+       /*
+        * We have no need whatsoever to switch TLS for kernel threads; beyond
+        * that, that would also result in us calling os_set_thread_area with
+        * userspace_pid[cpu] == 0, which gives an error.
+        */
+       if (likely(to->mm))
+               return load_TLS(O_FORCE, to);
+
+       return 0;
+}
+
+static int set_tls_entry(struct task_struct* task, struct user_desc *info,
+                        int idx, int flushed)
+{
+       struct thread_struct *t = &task->thread;
+
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
+
+       t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
+       t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
+       t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
+
+       return 0;
+}
+
+int arch_copy_tls(struct task_struct *new)
+{
+       struct user_desc info;
+       int idx, ret = -EFAULT;
+
+       if (copy_from_user(&info,
+                          (void __user *) UPT_ESI(&new->thread.regs.regs),
+                          sizeof(info)))
+               goto out;
+
+       ret = -EINVAL;
+       if (LDT_empty(&info))
+               goto out;
+
+       idx = info.entry_number;
+
+       ret = set_tls_entry(new, &info, idx, 0);
+out:
+       return ret;
+}
+
+/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
+static int get_tls_entry(struct task_struct *task, struct user_desc *info,
+                        int idx)
+{
+       struct thread_struct *t = &task->thread;
+
+       if (!t->arch.tls_array)
+               goto clear;
+
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
+
+       if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
+               goto clear;
+
+       *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
+
+out:
+       /*
+        * Temporary debugging check, to make sure that things have been
+        * flushed. This could be triggered if load_TLS() failed.
+        */
+       if (unlikely(task == current &&
+                    !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
+               printk(KERN_ERR "get_tls_entry: task with pid %d got here "
+                               "without flushed TLS.", current->pid);
+       }
+
+       return 0;
+clear:
+       /*
+        * When the TLS entry has not been set, the values read to user in the
+        * tls_array are 0 (because it's cleared at boot, see
+        * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
+        */
+       clear_user_desc(info);
+       info->entry_number = idx;
+       goto out;
+}
+
+int sys_set_thread_area(struct user_desc __user *user_desc)
+{
+       struct user_desc info;
+       int idx, ret;
+
+       if (!host_supports_tls)
+               return -ENOSYS;
+
+       if (copy_from_user(&info, user_desc, sizeof(info)))
+               return -EFAULT;
+
+       idx = info.entry_number;
+
+       if (idx == -1) {
+               idx = get_free_idx(current);
+               if (idx < 0)
+                       return idx;
+               info.entry_number = idx;
+               /* Tell the user which slot we chose for him.*/
+               if (put_user(idx, &user_desc->entry_number))
+                       return -EFAULT;
+       }
+
+       ret = do_set_thread_area(&info);
+       if (ret)
+               return ret;
+       return set_tls_entry(current, &info, idx, 1);
+}
+
+/*
+ * Perform set_thread_area on behalf of the traced child.
+ * Note: error handling is not done on the deferred load, and this differ from
+ * i386. However the only possible error are caused by bugs.
+ */
+int ptrace_set_thread_area(struct task_struct *child, int idx,
+                          struct user_desc __user *user_desc)
+{
+       struct user_desc info;
+
+       if (!host_supports_tls)
+               return -EIO;
+
+       if (copy_from_user(&info, user_desc, sizeof(info)))
+               return -EFAULT;
+
+       return set_tls_entry(child, &info, idx, 0);
+}
+
+int sys_get_thread_area(struct user_desc __user *user_desc)
+{
+       struct user_desc info;
+       int idx, ret;
+
+       if (!host_supports_tls)
+               return -ENOSYS;
+
+       if (get_user(idx, &user_desc->entry_number))
+               return -EFAULT;
+
+       ret = get_tls_entry(current, &info, idx);
+       if (ret < 0)
+               goto out;
+
+       if (copy_to_user(user_desc, &info, sizeof(info)))
+               ret = -EFAULT;
+
+out:
+       return ret;
+}
+
+/*
+ * Perform get_thread_area on behalf of the traced child.
+ */
+int ptrace_get_thread_area(struct task_struct *child, int idx,
+               struct user_desc __user *user_desc)
+{
+       struct user_desc info;
+       int ret;
+
+       if (!host_supports_tls)
+               return -EIO;
+
+       ret = get_tls_entry(child, &info, idx);
+       if (ret < 0)
+               goto out;
+
+       if (copy_to_user(user_desc, &info, sizeof(info)))
+               ret = -EFAULT;
+out:
+       return ret;
+}
+
+/*
+ * This code is really i386-only, but it detects and logs x86_64 GDT indexes
+ * if a 32-bit UML is running on a 64-bit host.
+ */
+static int __init __setup_host_supports_tls(void)
+{
+       check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
+       if (host_supports_tls) {
+               printk(KERN_INFO "Host TLS support detected\n");
+               printk(KERN_INFO "Detected host type: ");
+               switch (host_gdt_entry_tls_min) {
+               case GDT_ENTRY_TLS_MIN_I386:
+                       printk(KERN_CONT "i386");
+                       break;
+               case GDT_ENTRY_TLS_MIN_X86_64:
+                       printk(KERN_CONT "x86_64");
+                       break;
+               }
+               printk(KERN_CONT " (GDT indexes %d to %d)\n",
+                      host_gdt_entry_tls_min,
+                      host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
+       } else
+               printk(KERN_ERR "  Host TLS support NOT detected! "
+                               "TLS support inside UML will not work\n");
+       return 0;
+}
+
+__initcall(__setup_host_supports_tls);
diff --git a/arch/um/sys-x86/tls_64.c b/arch/um/sys-x86/tls_64.c
new file mode 100644 (file)
index 0000000..f7ba462
--- /dev/null
@@ -0,0 +1,17 @@
+#include "linux/sched.h"
+
+void clear_flushed_tls(struct task_struct *task)
+{
+}
+
+int arch_copy_tls(struct task_struct *t)
+{
+       /*
+        * If CLONE_SETTLS is set, we need to save the thread id
+        * (which is argument 5, child_tid, of clone) so it can be set
+        * during context switches.
+        */
+       t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
+
+       return 0;
+}
diff --git a/arch/um/sys-x86/user-offsets.c b/arch/um/sys-x86/user-offsets.c
new file mode 100644 (file)
index 0000000..718f0c0
--- /dev/null
@@ -0,0 +1,86 @@
+#include <stdio.h>
+#include <stddef.h>
+#include <signal.h>
+#include <sys/poll.h>
+#include <sys/mman.h>
+#include <sys/user.h>
+#define __FRAME_OFFSETS
+#include <asm/ptrace.h>
+#include <asm/types.h>
+
+#define DEFINE(sym, val) \
+       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define DEFINE_LONGS(sym, val) \
+       asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
+
+#define OFFSET(sym, str, mem) \
+       DEFINE(sym, offsetof(struct str, mem));
+
+void foo(void)
+{
+       OFFSET(HOST_SC_TRAPNO, sigcontext, trapno);
+       OFFSET(HOST_SC_ERR, sigcontext, err);
+       OFFSET(HOST_SC_CR2, sigcontext, cr2);
+
+#ifdef __i386__
+       DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct));
+       DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct));
+
+       DEFINE(HOST_IP, EIP);
+       DEFINE(HOST_SP, UESP);
+       DEFINE(HOST_EFLAGS, EFL);
+       DEFINE(HOST_EAX, EAX);
+       DEFINE(HOST_EBX, EBX);
+       DEFINE(HOST_ECX, ECX);
+       DEFINE(HOST_EDX, EDX);
+       DEFINE(HOST_ESI, ESI);
+       DEFINE(HOST_EDI, EDI);
+       DEFINE(HOST_EBP, EBP);
+       DEFINE(HOST_CS, CS);
+       DEFINE(HOST_SS, SS);
+       DEFINE(HOST_DS, DS);
+       DEFINE(HOST_FS, FS);
+       DEFINE(HOST_ES, ES);
+       DEFINE(HOST_GS, GS);
+#else
+       DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
+       DEFINE_LONGS(HOST_RBX, RBX);
+       DEFINE_LONGS(HOST_RCX, RCX);
+       DEFINE_LONGS(HOST_RDI, RDI);
+       DEFINE_LONGS(HOST_RSI, RSI);
+       DEFINE_LONGS(HOST_RDX, RDX);
+       DEFINE_LONGS(HOST_RBP, RBP);
+       DEFINE_LONGS(HOST_RAX, RAX);
+       DEFINE_LONGS(HOST_R8, R8);
+       DEFINE_LONGS(HOST_R9, R9);
+       DEFINE_LONGS(HOST_R10, R10);
+       DEFINE_LONGS(HOST_R11, R11);
+       DEFINE_LONGS(HOST_R12, R12);
+       DEFINE_LONGS(HOST_R13, R13);
+       DEFINE_LONGS(HOST_R14, R14);
+       DEFINE_LONGS(HOST_R15, R15);
+       DEFINE_LONGS(HOST_ORIG_RAX, ORIG_RAX);
+       DEFINE_LONGS(HOST_CS, CS);
+       DEFINE_LONGS(HOST_SS, SS);
+       DEFINE_LONGS(HOST_EFLAGS, EFLAGS);
+#if 0
+       DEFINE_LONGS(HOST_FS, FS);
+       DEFINE_LONGS(HOST_GS, GS);
+       DEFINE_LONGS(HOST_DS, DS);
+       DEFINE_LONGS(HOST_ES, ES);
+#endif
+
+       DEFINE_LONGS(HOST_IP, RIP);
+       DEFINE_LONGS(HOST_SP, RSP);
+#endif
+
+       DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
+       DEFINE(UM_POLLIN, POLLIN);
+       DEFINE(UM_POLLPRI, POLLPRI);
+       DEFINE(UM_POLLOUT, POLLOUT);
+
+       DEFINE(UM_PROT_READ, PROT_READ);
+       DEFINE(UM_PROT_WRITE, PROT_WRITE);
+       DEFINE(UM_PROT_EXEC, PROT_EXEC);
+}
diff --git a/arch/um/sys-x86/vdso/Makefile b/arch/um/sys-x86/vdso/Makefile
new file mode 100644 (file)
index 0000000..5dffe6d
--- /dev/null
@@ -0,0 +1,90 @@
+#
+# Building vDSO images for x86.
+#
+
+VDSO64-y               := y
+
+vdso-install-$(VDSO64-y)       += vdso.so
+
+
+# files to link into the vdso
+vobjs-y := vdso-note.o um_vdso.o
+
+# files to link into kernel
+obj-$(VDSO64-y)                        += vdso.o vma.o
+
+vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
+
+$(obj)/vdso.o: $(obj)/vdso.so
+
+targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
+
+export CPPFLAGS_vdso.lds += -P -C
+
+VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+       -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+
+$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
+
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+       $(call if_changed,vdso)
+
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+       $(call if_changed,objcopy)
+
+#
+# Don't omit frame pointers for ease of userspace debugging, but do
+# optimize sibling calls.
+#
+CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
+       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+       -fno-omit-frame-pointer -foptimize-sibling-calls
+
+$(vobjs): KBUILD_CFLAGS += $(CFL)
+
+#
+# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
+#
+CFLAGS_REMOVE_vdso-note.o = -pg
+CFLAGS_REMOVE_um_vdso.o = -pg
+
+targets += vdso-syms.lds
+obj-$(VDSO64-y)                        += vdso-syms.lds
+
+#
+# Match symbols in the DSO that look like VDSO*; produce a file of constants.
+#
+sed-vdsosym := -e 's/^00*/0/' \
+       -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
+quiet_cmd_vdsosym = VDSOSYM $@
+define cmd_vdsosym
+       $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
+endef
+
+$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
+       $(call if_changed,vdsosym)
+
+#
+# The DSO images are built using a special linker script.
+#
+quiet_cmd_vdso = VDSO    $@
+      cmd_vdso = $(CC) -nostdlib -o $@ \
+                      $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+                      -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+                sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+
+VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+GCOV_PROFILE := n
+
+#
+# Install the unstripped copy of vdso*.so listed in $(vdso-install-y).
+#
+quiet_cmd_vdso_install = INSTALL $@
+      cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE
+       @mkdir -p $(MODLIB)/vdso
+       $(call cmd,vdso_install)
+
+PHONY += vdso_install $(vdso-install-y)
+vdso_install: $(vdso-install-y)
diff --git a/arch/um/sys-x86/vdso/checkundef.sh b/arch/um/sys-x86/vdso/checkundef.sh
new file mode 100644 (file)
index 0000000..7ee90a9
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/sh
+nm="$1"
+file="$2"
+$nm "$file" | grep '^ *U' > /dev/null 2>&1
+if [ $? -eq 1 ]; then
+    exit 0
+else
+    echo "$file: undefined symbols found" >&2
+    exit 1
+fi
diff --git a/arch/um/sys-x86/vdso/um_vdso.c b/arch/um/sys-x86/vdso/um_vdso.c
new file mode 100644 (file)
index 0000000..7c441b5
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This vDSO turns all calls into a syscall so that UML can trap them.
+ */
+
+
+/* Disable profiling for userspace code */
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/time.h>
+#include <linux/getcpu.h>
+#include <asm/unistd.h>
+
+int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+{
+       long ret;
+
+       asm("syscall" : "=a" (ret) :
+               "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+
+       return ret;
+}
+int clock_gettime(clockid_t, struct timespec *)
+       __attribute__((weak, alias("__vdso_clock_gettime")));
+
+int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+       long ret;
+
+       asm("syscall" : "=a" (ret) :
+               "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+
+       return ret;
+}
+int gettimeofday(struct timeval *, struct timezone *)
+       __attribute__((weak, alias("__vdso_gettimeofday")));
+
+time_t __vdso_time(time_t *t)
+{
+       long secs;
+
+       asm volatile("syscall"
+               : "=a" (secs)
+               : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
+
+       return secs;
+}
+int time(time_t *t) __attribute__((weak, alias("__vdso_time")));
+
+long
+__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
+{
+       /*
+        * UML does not support SMP, we can cheat here. :)
+        */
+
+       if (cpu)
+               *cpu = 0;
+       if (node)
+               *node = 0;
+
+       return 0;
+}
+
+long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
+       __attribute__((weak, alias("__vdso_getcpu")));
diff --git a/arch/um/sys-x86/vdso/vdso-layout.lds.S b/arch/um/sys-x86/vdso/vdso-layout.lds.S
new file mode 100644 (file)
index 0000000..634a2cf
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Linker script for vDSO.  This is an ELF shared object prelinked to
+ * its virtual address, and with only one read-only segment.
+ * This script controls its layout.
+ */
+
+SECTIONS
+{
+       . = VDSO_PRELINK + SIZEOF_HEADERS;
+
+       .hash           : { *(.hash) }                  :text
+       .gnu.hash       : { *(.gnu.hash) }
+       .dynsym         : { *(.dynsym) }
+       .dynstr         : { *(.dynstr) }
+       .gnu.version    : { *(.gnu.version) }
+       .gnu.version_d  : { *(.gnu.version_d) }
+       .gnu.version_r  : { *(.gnu.version_r) }
+
+       .note           : { *(.note.*) }                :text   :note
+
+       .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
+       .eh_frame       : { KEEP (*(.eh_frame)) }       :text
+
+       .dynamic        : { *(.dynamic) }               :text   :dynamic
+
+       .rodata         : { *(.rodata*) }               :text
+       .data           : {
+             *(.data*)
+             *(.sdata*)
+             *(.got.plt) *(.got)
+             *(.gnu.linkonce.d.*)
+             *(.bss*)
+             *(.dynbss*)
+             *(.gnu.linkonce.b.*)
+       }
+
+       .altinstructions        : { *(.altinstructions) }
+       .altinstr_replacement   : { *(.altinstr_replacement) }
+
+       /*
+        * Align the actual code well away from the non-instruction data.
+        * This is the best thing for the I-cache.
+        */
+       . = ALIGN(0x100);
+
+       .text           : { *(.text*) }                 :text   =0x90909090
+}
+
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME        0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+       text            PT_LOAD         FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+       dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
+       note            PT_NOTE         FLAGS(4);               /* PF_R */
+       eh_frame_hdr    PT_GNU_EH_FRAME;
+}
diff --git a/arch/um/sys-x86/vdso/vdso-note.S b/arch/um/sys-x86/vdso/vdso-note.S
new file mode 100644 (file)
index 0000000..79a071e
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+       .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/um/sys-x86/vdso/vdso.S b/arch/um/sys-x86/vdso/vdso.S
new file mode 100644 (file)
index 0000000..03b0532
--- /dev/null
@@ -0,0 +1,10 @@
+#include <linux/init.h>
+
+__INITDATA
+
+       .globl vdso_start, vdso_end
+vdso_start:
+       .incbin "arch/um/sys-x86/vdso/vdso.so"
+vdso_end:
+
+__FINIT
diff --git a/arch/um/sys-x86/vdso/vdso.lds.S b/arch/um/sys-x86/vdso/vdso.lds.S
new file mode 100644 (file)
index 0000000..b96b267
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Linker script for 64-bit vDSO.
+ * We #include the file to define the layout details.
+ * Here we only choose the prelinked virtual address.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.  We can define local symbols here called VDSO* to make their
+ * values visible using the asm-x86/vdso.h macros from the kernel proper.
+ */
+
+#define VDSO_PRELINK 0xffffffffff700000
+#include "vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+       LINUX_2.6 {
+       global:
+               clock_gettime;
+               __vdso_clock_gettime;
+               gettimeofday;
+               __vdso_gettimeofday;
+               getcpu;
+               __vdso_getcpu;
+               time;
+               __vdso_time;
+       local: *;
+       };
+}
+
+VDSO64_PRELINK = VDSO_PRELINK;
diff --git a/arch/um/sys-x86/vdso/vma.c b/arch/um/sys-x86/vdso/vma.c
new file mode 100644 (file)
index 0000000..9495c8d
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <linux/init.h>
+
+unsigned int __read_mostly vdso_enabled = 1;
+unsigned long um_vdso_addr;
+
+extern unsigned long task_size;
+extern char vdso_start[], vdso_end[];
+
+static struct page **vdsop;
+
+static int __init init_vdso(void)
+{
+       struct page *um_vdso;
+
+       BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
+
+       um_vdso_addr = task_size - PAGE_SIZE;
+
+       vdsop = kmalloc(GFP_KERNEL, sizeof(struct page *));
+       if (!vdsop)
+               goto oom;
+
+       um_vdso = alloc_page(GFP_KERNEL);
+       if (!um_vdso) {
+               kfree(vdsop);
+
+               goto oom;
+       }
+
+       copy_page(page_address(um_vdso), vdso_start);
+       *vdsop = um_vdso;
+
+       return 0;
+
+oom:
+       printk(KERN_ERR "Cannot allocate vdso\n");
+       vdso_enabled = 0;
+
+       return -ENOMEM;
+}
+subsys_initcall(init_vdso);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+       int err;
+       struct mm_struct *mm = current->mm;
+
+       if (!vdso_enabled)
+               return 0;
+
+       down_write(&mm->mmap_sem);
+
+       err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
+               VM_READ|VM_EXEC|
+               VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+               VM_ALWAYSDUMP,
+               vdsop);
+
+       up_write(&mm->mmap_sem);
+
+       return err;
+}
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
deleted file mode 100644 (file)
index 0971846..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Copyright 2003 PathScale, Inc.
-#
-# Licensed under the GPL
-#
-
-obj-y = bug.o bugs.o delay.o fault.o ldt.o ptrace.o ptrace_user.o mem.o \
-       setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
-       sysrq.o ksyms.o tls.o
-
-obj-y += vdso/
-
-subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
-               lib/rwsem.o
-subarch-obj-$(CONFIG_MODULES) += kernel/module.o
-
-ldt-y = ../sys-i386/ldt.o
-
-USER_OBJS := ptrace_user.o
-
-extra-y += user-offsets.s
-$(obj)/user-offsets.s: c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS)
-
-UNPROFILE_OBJS := stub_segv.o
-CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
-
-include arch/um/scripts/Makefile.rules
diff --git a/arch/um/sys-x86_64/bug.c b/arch/um/sys-x86_64/bug.c
deleted file mode 100644 (file)
index e8034e3..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL V2
- */
-
-#include <linux/uaccess.h>
-
-/*
- * Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
- * that's not relevant in skas mode.
- */
-
-int is_valid_bugaddr(unsigned long eip)
-{
-       unsigned short ud2;
-
-       if (probe_kernel_address((unsigned short __user *)eip, ud2))
-               return 0;
-
-       return ud2 == 0x0b0f;
-}
diff --git a/arch/um/sys-x86_64/bugs.c b/arch/um/sys-x86_64/bugs.c
deleted file mode 100644 (file)
index 44e02ba..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright 2003 PathScale, Inc.
- *
- * Licensed under the GPL
- */
-
-#include "sysdep/ptrace.h"
-
-void arch_check_bugs(void)
-{
-}
-
-void arch_examine_signal(int sig, struct uml_pt_regs *regs)
-{
-}
diff --git a/arch/um/sys-x86_64/delay.c b/arch/um/sys-x86_64/delay.c
deleted file mode 100644 (file)
index f3fe1a6..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
- * Mostly copied from arch/x86/lib/delay.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <asm/param.h>
-
-void __delay(unsigned long loops)
-{
-       asm volatile(
-               "test %0,%0\n"
-               "jz 3f\n"
-               "jmp 1f\n"
-
-               ".align 16\n"
-               "1: jmp 2f\n"
-
-               ".align 16\n"
-               "2: dec %0\n"
-               " jnz 2b\n"
-               "3: dec %0\n"
-
-               : /* we don't need output */
-               : "a" (loops)
-       );
-}
-EXPORT_SYMBOL(__delay);
-
-inline void __const_udelay(unsigned long xloops)
-{
-       int d0;
-
-       xloops *= 4;
-       asm("mull %%edx"
-               : "=d" (xloops), "=&a" (d0)
-               : "1" (xloops), "0"
-               (loops_per_jiffy * (HZ/4)));
-
-       __delay(++xloops);
-}
-EXPORT_SYMBOL(__const_udelay);
-
-void __udelay(unsigned long usecs)
-{
-       __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
-}
-EXPORT_SYMBOL(__udelay);
-
-void __ndelay(unsigned long nsecs)
-{
-       __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
-}
-EXPORT_SYMBOL(__ndelay);
diff --git a/arch/um/sys-x86_64/fault.c b/arch/um/sys-x86_64/fault.c
deleted file mode 100644 (file)
index ce85117..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2003 PathScale, Inc.
- *
- * Licensed under the GPL
- */
-
-#include "sysdep/ptrace.h"
-
-/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
-struct exception_table_entry
-{
-       unsigned long insn;
-       unsigned long fixup;
-};
-
-const struct exception_table_entry *search_exception_tables(unsigned long add);
-
-int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
-{
-       const struct exception_table_entry *fixup;
-
-       fixup = search_exception_tables(address);
-       if (fixup != 0) {
-               UPT_IP(regs) = fixup->fixup;
-               return 1;
-       }
-       return 0;
-}
diff --git a/arch/um/sys-x86_64/ksyms.c b/arch/um/sys-x86_64/ksyms.c
deleted file mode 100644 (file)
index 1db2fce..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#include <linux/module.h>
-#include <asm/string.h>
-#include <asm/checksum.h>
-
-/*XXX: we need them because they would be exported by x86_64 */
-#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
-EXPORT_SYMBOL(memcpy);
-#else
-EXPORT_SYMBOL(__memcpy);
-#endif
-EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-x86_64/mem.c b/arch/um/sys-x86_64/mem.c
deleted file mode 100644 (file)
index 5465187..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#include "linux/mm.h"
-#include "asm/page.h"
-#include "asm/mman.h"
-
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
-       if (vma->vm_mm && vma->vm_start == um_vdso_addr)
-               return "[vdso]";
-
-       return NULL;
-}
-
-struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
-{
-       return NULL;
-}
-
-int in_gate_area(struct mm_struct *mm, unsigned long addr)
-{
-       return 0;
-}
-
-int in_gate_area_no_mm(unsigned long addr)
-{
-       return 0;
-}
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
deleted file mode 100644 (file)
index 44e68e0..0000000
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright 2003 PathScale, Inc.
- * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- *
- * Licensed under the GPL
- */
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#define __FRAME_OFFSETS
-#include <asm/ptrace.h>
-#include <asm/uaccess.h>
-
-/*
- * determines which flags the user has access to.
- * 1 = access 0 = no access
- */
-#define FLAG_MASK 0x44dd5UL
-
-static const int reg_offsets[] =
-{
-       [R8 >> 3] = HOST_R8,
-       [R9 >> 3] = HOST_R9,
-       [R10 >> 3] = HOST_R10,
-       [R11 >> 3] = HOST_R11,
-       [R12 >> 3] = HOST_R12,
-       [R13 >> 3] = HOST_R13,
-       [R14 >> 3] = HOST_R14,
-       [R15 >> 3] = HOST_R15,
-       [RIP >> 3] = HOST_IP,
-       [RSP >> 3] = HOST_SP,
-       [RAX >> 3] = HOST_RAX,
-       [RBX >> 3] = HOST_RBX,
-       [RCX >> 3] = HOST_RCX,
-       [RDX >> 3] = HOST_RDX,
-       [RSI >> 3] = HOST_RSI,
-       [RDI >> 3] = HOST_RDI,
-       [RBP >> 3] = HOST_RBP,
-       [CS >> 3] = HOST_CS,
-       [SS >> 3] = HOST_SS,
-       [FS_BASE >> 3] = HOST_FS_BASE,
-       [GS_BASE >> 3] = HOST_GS_BASE,
-       [DS >> 3] = HOST_DS,
-       [ES >> 3] = HOST_ES,
-       [FS >> 3] = HOST_FS,
-       [GS >> 3] = HOST_GS,
-       [EFLAGS >> 3] = HOST_EFLAGS,
-       [ORIG_RAX >> 3] = HOST_ORIG_RAX,
-};
-
-int putreg(struct task_struct *child, int regno, unsigned long value)
-{
-#ifdef TIF_IA32
-       /*
-        * Some code in the 64bit emulation may not be 64bit clean.
-        * Don't take any chances.
-        */
-       if (test_tsk_thread_flag(child, TIF_IA32))
-               value &= 0xffffffff;
-#endif
-       switch (regno) {
-       case R8:
-       case R9:
-       case R10:
-       case R11:
-       case R12:
-       case R13:
-       case R14:
-       case R15:
-       case RIP:
-       case RSP:
-       case RAX:
-       case RBX:
-       case RCX:
-       case RDX:
-       case RSI:
-       case RDI:
-       case RBP:
-       case ORIG_RAX:
-               break;
-
-       case FS:
-       case GS:
-       case DS:
-       case ES:
-       case SS:
-       case CS:
-               if (value && (value & 3) != 3)
-                       return -EIO;
-               value &= 0xffff;
-               break;
-
-       case FS_BASE:
-       case GS_BASE:
-               if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
-                       return -EIO;
-               break;
-
-       case EFLAGS:
-               value &= FLAG_MASK;
-               child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
-               return 0;
-
-       default:
-               panic("Bad register in putreg(): %d\n", regno);
-       }
-
-       child->thread.regs.regs.gp[reg_offsets[regno >> 3]] = value;
-       return 0;
-}
-
-int poke_user(struct task_struct *child, long addr, long data)
-{
-       if ((addr & 3) || addr < 0)
-               return -EIO;
-
-       if (addr < MAX_REG_OFFSET)
-               return putreg(child, addr, data);
-       else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
-               (addr <= offsetof(struct user, u_debugreg[7]))) {
-               addr -= offsetof(struct user, u_debugreg[0]);
-               addr = addr >> 2;
-               if ((addr == 4) || (addr == 5))
-                       return -EIO;
-               child->thread.arch.debugregs[addr] = data;
-               return 0;
-       }
-       return -EIO;
-}
-
-unsigned long getreg(struct task_struct *child, int regno)
-{
-       unsigned long mask = ~0UL;
-#ifdef TIF_IA32
-       if (test_tsk_thread_flag(child, TIF_IA32))
-               mask = 0xffffffff;
-#endif
-       switch (regno) {
-       case R8:
-       case R9:
-       case R10:
-       case R11:
-       case R12:
-       case R13:
-       case R14:
-       case R15:
-       case RIP:
-       case RSP:
-       case RAX:
-       case RBX:
-       case RCX:
-       case RDX:
-       case RSI:
-       case RDI:
-       case RBP:
-       case ORIG_RAX:
-       case EFLAGS:
-       case FS_BASE:
-       case GS_BASE:
-               break;
-       case FS:
-       case GS:
-       case DS:
-       case ES:
-       case SS:
-       case CS:
-               mask = 0xffff;
-               break;
-       default:
-               panic("Bad register in getreg: %d\n", regno);
-       }
-       return mask & child->thread.regs.regs.gp[reg_offsets[regno >> 3]];
-}
-
-int peek_user(struct task_struct *child, long addr, long data)
-{
-       /* read the word at location addr in the USER area. */
-       unsigned long tmp;
-
-       if ((addr & 3) || addr < 0)
-               return -EIO;
-
-       tmp = 0;  /* Default return condition */
-       if (addr < MAX_REG_OFFSET)
-               tmp = getreg(child, addr);
-       else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
-               (addr <= offsetof(struct user, u_debugreg[7]))) {
-               addr -= offsetof(struct user, u_debugreg[0]);
-               addr = addr >> 2;
-               tmp = child->thread.arch.debugregs[addr];
-       }
-       return put_user(tmp, (unsigned long *) data);
-}
-
-/* XXX Mostly copied from sys-i386 */
-int is_syscall(unsigned long addr)
-{
-       unsigned short instr;
-       int n;
-
-       n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
-       if (n) {
-               /*
-                * access_process_vm() grants access to vsyscall and stub,
-                * while copy_from_user doesn't. Maybe access_process_vm is
-                * slow, but that doesn't matter, since it will be called only
-                * in case of singlestepping, if copy_from_user failed.
-                */
-               n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
-               if (n != sizeof(instr)) {
-                       printk("is_syscall : failed to read instruction from "
-                              "0x%lx\n", addr);
-                       return 1;
-               }
-       }
-       /* sysenter */
-       return instr == 0x050f;
-}
-
-static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
-{
-       int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
-       long fpregs[HOST_FP_SIZE];
-
-       BUG_ON(sizeof(*buf) != sizeof(fpregs));
-       err = save_fp_registers(userspace_pid[cpu], fpregs);
-       if (err)
-               return err;
-
-       n = copy_to_user(buf, fpregs, sizeof(fpregs));
-       if (n > 0)
-               return -EFAULT;
-
-       return n;
-}
-
-static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
-{
-       int n, cpu = ((struct thread_info *) child->stack)->cpu;
-       long fpregs[HOST_FP_SIZE];
-
-       BUG_ON(sizeof(*buf) != sizeof(fpregs));
-       n = copy_from_user(fpregs, buf, sizeof(fpregs));
-       if (n > 0)
-               return -EFAULT;
-
-       return restore_fp_registers(userspace_pid[cpu], fpregs);
-}
-
-long subarch_ptrace(struct task_struct *child, long request,
-                   unsigned long addr, unsigned long data)
-{
-       int ret = -EIO;
-       void __user *datap = (void __user *) data;
-
-       switch (request) {
-       case PTRACE_GETFPREGS: /* Get the child FPU state. */
-               ret = get_fpregs(datap, child);
-               break;
-       case PTRACE_SETFPREGS: /* Set the child FPU state. */
-               ret = set_fpregs(datap, child);
-               break;
-       case PTRACE_ARCH_PRCTL:
-               /* XXX Calls ptrace on the host - needs some SMP thinking */
-               ret = arch_prctl(child, data, (void __user *) addr);
-               break;
-       }
-
-       return ret;
-}
diff --git a/arch/um/sys-x86_64/ptrace_user.c b/arch/um/sys-x86_64/ptrace_user.c
deleted file mode 100644 (file)
index c57a496..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2003 PathScale, Inc.
- *
- * Licensed under the GPL
- */
-
-#include <errno.h>
-#include "ptrace_user.h"
-
-int ptrace_getregs(long pid, unsigned long *regs_out)
-{
-       if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
-               return -errno;
-       return(0);
-}
-
-int ptrace_setregs(long pid, unsigned long *regs_out)
-{
-       if (ptrace(PTRACE_SETREGS, pid, 0, regs_out) < 0)
-               return -errno;
-       return(0);
-}
diff --git a/arch/um/sys-x86_64/setjmp.S b/arch/um/sys-x86_64/setjmp.S
deleted file mode 100644 (file)
index 45f547b..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# arch/x86_64/setjmp.S
-#
-# setjmp/longjmp for the x86-64 architecture
-#
-
-#
-# The jmp_buf is assumed to contain the following, in order:
-#      %rbx
-#      %rsp (post-return)
-#      %rbp
-#      %r12
-#      %r13
-#      %r14
-#      %r15
-#      <return address>
-#
-
-       .text
-       .align 4
-       .globl setjmp
-       .type setjmp, @function
-setjmp:
-       pop  %rsi                       # Return address, and adjust the stack
-       xorl %eax,%eax                  # Return value
-       movq %rbx,(%rdi)
-       movq %rsp,8(%rdi)               # Post-return %rsp!
-       push %rsi                       # Make the call/return stack happy
-       movq %rbp,16(%rdi)
-       movq %r12,24(%rdi)
-       movq %r13,32(%rdi)
-       movq %r14,40(%rdi)
-       movq %r15,48(%rdi)
-       movq %rsi,56(%rdi)              # Return address
-       ret
-
-       .size setjmp,.-setjmp
-
-       .text
-       .align 4
-       .globl longjmp
-       .type longjmp, @function
-longjmp:
-       movl %esi,%eax                  # Return value (int)
-       movq (%rdi),%rbx
-       movq 8(%rdi),%rsp
-       movq 16(%rdi),%rbp
-       movq 24(%rdi),%r12
-       movq 32(%rdi),%r13
-       movq 40(%rdi),%r14
-       movq 48(%rdi),%r15
-       jmp *56(%rdi)
-
-       .size longjmp,.-longjmp
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
deleted file mode 100644 (file)
index 255b2ca..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright (C) 2003 PathScale, Inc.
- * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <linux/personality.h>
-#include <linux/ptrace.h>
-#include <linux/kernel.h>
-#include <asm/unistd.h>
-#include <asm/uaccess.h>
-#include <asm/ucontext.h>
-#include "frame_kern.h"
-#include "skas.h"
-
-static int copy_sc_from_user(struct pt_regs *regs,
-                            struct sigcontext __user *from)
-{
-       struct sigcontext sc;
-       struct user_i387_struct fp;
-       void __user *buf;
-       int err;
-
-       err = copy_from_user(&sc, from, sizeof(sc));
-       if (err)
-               return err;
-
-#define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname
-
-       GETREG(R8, r8);
-       GETREG(R9, r9);
-       GETREG(R10, r10);
-       GETREG(R11, r11);
-       GETREG(R12, r12);
-       GETREG(R13, r13);
-       GETREG(R14, r14);
-       GETREG(R15, r15);
-       GETREG(RDI, di);
-       GETREG(RSI, si);
-       GETREG(RBP, bp);
-       GETREG(RBX, bx);
-       GETREG(RDX, dx);
-       GETREG(RAX, ax);
-       GETREG(RCX, cx);
-       GETREG(SP, sp);
-       GETREG(IP, ip);
-       GETREG(EFLAGS, flags);
-       GETREG(CS, cs);
-#undef GETREG
-
-       buf = sc.fpstate;
-
-       err = copy_from_user(&fp, buf, sizeof(struct user_i387_struct));
-       if (err)
-               return 1;
-
-       err = restore_fp_registers(userspace_pid[current_thread_info()->cpu],
-                                  (unsigned long *) &fp);
-       if (err < 0) {
-               printk(KERN_ERR "copy_sc_from_user - "
-                      "restore_fp_registers failed, errno = %d\n",
-                      -err);
-               return 1;
-       }
-
-       return 0;
-}
-
-static int copy_sc_to_user(struct sigcontext __user *to,
-                          struct _fpstate __user *to_fp, struct pt_regs *regs,
-                          unsigned long mask, unsigned long sp)
-{
-       struct faultinfo * fi = &current->thread.arch.faultinfo;
-       struct sigcontext sc;
-       struct user_i387_struct fp;
-       int err = 0;
-       memset(&sc, 0, sizeof(struct sigcontext));
-
-#define PUTREG(regno, regname) sc.regname = regs->regs.gp[HOST_##regno]
-
-       PUTREG(RDI, di);
-       PUTREG(RSI, si);
-       PUTREG(RBP, bp);
-       /*
-        * Must use original RSP, which is passed in, rather than what's in
-        * signal frame.
-        */
-       sc.sp = sp;
-       PUTREG(RBX, bx);
-       PUTREG(RDX, dx);
-       PUTREG(RCX, cx);
-       PUTREG(RAX, ax);
-       PUTREG(R8, r8);
-       PUTREG(R9, r9);
-       PUTREG(R10, r10);
-       PUTREG(R11, r11);
-       PUTREG(R12, r12);
-       PUTREG(R13, r13);
-       PUTREG(R14, r14);
-       PUTREG(R15, r15);
-       PUTREG(CS, cs); /* XXX x86_64 doesn't do this */
-
-       sc.cr2 = fi->cr2;
-       sc.err = fi->error_code;
-       sc.trapno = fi->trap_no;
-
-       PUTREG(IP, ip);
-       PUTREG(EFLAGS, flags);
-#undef PUTREG
-
-       sc.oldmask = mask;
-
-       err = copy_to_user(to, &sc, sizeof(struct sigcontext));
-       if (err)
-               return 1;
-
-       err = save_fp_registers(userspace_pid[current_thread_info()->cpu],
-                               (unsigned long *) &fp);
-       if (err < 0) {
-               printk(KERN_ERR "copy_sc_from_user - restore_fp_registers "
-                      "failed, errno = %d\n", -err);
-               return 1;
-       }
-
-       if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
-               return 1;
-
-       return err;
-}
-
-struct rt_sigframe
-{
-       char __user *pretcode;
-       struct ucontext uc;
-       struct siginfo info;
-       struct _fpstate fpstate;
-};
-
-int setup_signal_stack_si(unsigned long stack_top, int sig,
-                         struct k_sigaction *ka, struct pt_regs * regs,
-                         siginfo_t *info, sigset_t *set)
-{
-       struct rt_sigframe __user *frame;
-       unsigned long save_sp = PT_REGS_RSP(regs);
-       int err = 0;
-       struct task_struct *me = current;
-
-       frame = (struct rt_sigframe __user *)
-               round_down(stack_top - sizeof(struct rt_sigframe), 16);
-       /* Subtract 128 for a red zone and 8 for proper alignment */
-       frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8);
-
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               goto out;
-
-       if (ka->sa.sa_flags & SA_SIGINFO) {
-               err |= copy_siginfo_to_user(&frame->info, info);
-               if (err)
-                       goto out;
-       }
-
-       /*
-        * Update SP now because the page fault handler refuses to extend
-        * the stack if the faulting address is too far below the current
-        * SP, which frame now certainly is.  If there's an error, the original
-        * value is restored on the way out.
-        * When writing the sigcontext to the stack, we have to write the
-        * original value, so that's passed to copy_sc_to_user, which does
-        * the right thing with it.
-        */
-       PT_REGS_RSP(regs) = (unsigned long) frame;
-
-       /* Create the ucontext.  */
-       err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(save_sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
-                              set->sig[0], save_sp);
-       err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
-       if (sizeof(*set) == 16) {
-               __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
-               __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
-       }
-       else
-               err |= __copy_to_user(&frame->uc.uc_sigmask, set,
-                                     sizeof(*set));
-
-       /*
-        * Set up to return from userspace.  If provided, use a stub
-        * already in userspace.
-        */
-       /* x86-64 should always use SA_RESTORER. */
-       if (ka->sa.sa_flags & SA_RESTORER)
-               err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
-       else
-               /* could use a vstub here */
-               goto restore_sp;
-
-       if (err)
-               goto restore_sp;
-
-       /* Set up registers for signal handler */
-       {
-               struct exec_domain *ed = current_thread_info()->exec_domain;
-               if (unlikely(ed && ed->signal_invmap && sig < 32))
-                       sig = ed->signal_invmap[sig];
-       }
-
-       PT_REGS_RDI(regs) = sig;
-       /* In case the signal handler was declared without prototypes */
-       PT_REGS_RAX(regs) = 0;
-
-       /*
-        * This also works for non SA_SIGINFO handlers because they expect the
-        * next argument after the signal number on the stack.
-        */
-       PT_REGS_RSI(regs) = (unsigned long) &frame->info;
-       PT_REGS_RDX(regs) = (unsigned long) &frame->uc;
-       PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler;
- out:
-       return err;
-
-restore_sp:
-       PT_REGS_RSP(regs) = save_sp;
-       return err;
-}
-
-long sys_rt_sigreturn(struct pt_regs *regs)
-{
-       unsigned long sp = PT_REGS_SP(&current->thread.regs);
-       struct rt_sigframe __user *frame =
-               (struct rt_sigframe __user *)(sp - 8);
-       struct ucontext __user *uc = &frame->uc;
-       sigset_t set;
-
-       if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
-               goto segfault;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       set_current_blocked(&set);
-
-       if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
-               goto segfault;
-
-       /* Avoid ERESTART handling */
-       PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
-       return PT_REGS_SYSCALL_RET(&current->thread.regs);
-
- segfault:
-       force_sig(SIGSEGV, current);
-       return 0;
-}
diff --git a/arch/um/sys-x86_64/stub.S b/arch/um/sys-x86_64/stub.S
deleted file mode 100644 (file)
index 20e4a96..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-#include "as-layout.h"
-
-       .globl syscall_stub
-.section .__syscall_stub, "ax"
-syscall_stub:
-       syscall
-       /* We don't have 64-bit constants, so this constructs the address
-        * we need.
-        */
-       movq    $(STUB_DATA >> 32), %rbx
-       salq    $32, %rbx
-       movq    $(STUB_DATA & 0xffffffff), %rcx
-       or      %rcx, %rbx
-       movq    %rax, (%rbx)
-       int3
-
-       .globl batch_syscall_stub
-batch_syscall_stub:
-       mov     $(STUB_DATA >> 32), %rbx
-       sal     $32, %rbx
-       mov     $(STUB_DATA & 0xffffffff), %rax
-       or      %rax, %rbx
-       /* load pointer to first operation */
-       mov     %rbx, %rsp
-       add     $0x10, %rsp
-again:
-       /* load length of additional data */
-       mov     0x0(%rsp), %rax
-
-       /* if(length == 0) : end of list */
-       /* write possible 0 to header */
-       mov     %rax, 8(%rbx)
-       cmp     $0, %rax
-       jz      done
-
-       /* save current pointer */
-       mov     %rsp, 8(%rbx)
-
-       /* skip additional data */
-       add     %rax, %rsp
-
-       /* load syscall-# */
-       pop     %rax
-
-       /* load syscall params */
-       pop     %rdi
-       pop     %rsi
-       pop     %rdx
-       pop     %r10
-       pop     %r8
-       pop     %r9
-
-       /* execute syscall */
-       syscall
-
-       /* check return value */
-       pop     %rcx
-       cmp     %rcx, %rax
-       je      again
-
-done:
-       /* save return value */
-       mov     %rax, (%rbx)
-
-       /* stop */
-       int3
diff --git a/arch/um/sys-x86_64/stub_segv.c b/arch/um/sys-x86_64/stub_segv.c
deleted file mode 100644 (file)
index ced051a..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <signal.h>
-#include "as-layout.h"
-#include "sysdep/stub.h"
-#include "sysdep/faultinfo.h"
-#include "sysdep/sigcontext.h"
-
-void __attribute__ ((__section__ (".__syscall_stub")))
-stub_segv_handler(int sig)
-{
-       struct ucontext *uc;
-
-       __asm__ __volatile__("movq %%rdx, %0" : "=g" (uc) :);
-       GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA),
-                             &uc->uc_mcontext);
-       trap_myself();
-}
-
diff --git a/arch/um/sys-x86_64/syscall_table.c b/arch/um/sys-x86_64/syscall_table.c
deleted file mode 100644 (file)
index f46de82..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c
- * with some changes for UML.
- */
-
-#include <linux/linkage.h>
-#include <linux/sys.h>
-#include <linux/cache.h>
-
-#define __NO_STUBS
-
-/*
- * Below you can see, in terms of #define's, the differences between the x86-64
- * and the UML syscall table.
- */
-
-/* Not going to be implemented by UML, since we have no hardware. */
-#define stub_iopl sys_ni_syscall
-#define sys_ioperm sys_ni_syscall
-
-/*
- * The UML TLS problem. Note that x86_64 does not implement this, so the below
- * is needed only for the ia32 compatibility.
- */
-
-/* On UML we call it this way ("old" means it's not mmap2) */
-#define sys_mmap old_mmap
-
-#define stub_clone sys_clone
-#define stub_fork sys_fork
-#define stub_vfork sys_vfork
-#define stub_execve sys_execve
-#define stub_rt_sigsuspend sys_rt_sigsuspend
-#define stub_sigaltstack sys_sigaltstack
-#define stub_rt_sigreturn sys_rt_sigreturn
-
-#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
-#undef _ASM_X86_UNISTD_64_H
-#include "../../x86/include/asm/unistd_64.h"
-
-#undef __SYSCALL
-#define __SYSCALL(nr, sym) [ nr ] = sym,
-#undef _ASM_X86_UNISTD_64_H
-
-typedef void (*sys_call_ptr_t)(void);
-
-extern void sys_ni_syscall(void);
-
-/*
- * We used to have a trick here which made sure that holes in the
- * x86_64 table were filled in with sys_ni_syscall, but a comment in
- * unistd_64.h says that holes aren't allowed, so the trick was
- * removed.
- * The trick looked like this
- *     [0 ... UM_NR_syscall_max] = &sys_ni_syscall
- * before including unistd_64.h - the later initializations overwrote
- * the sys_ni_syscall filler.
- */
-
-sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
-#include "../../x86/include/asm/unistd_64.h"
-};
-
-int syscall_table_size = sizeof(sys_call_table);
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
deleted file mode 100644 (file)
index f3d82bb..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Copyright 2003 PathScale, Inc.
- *
- * Licensed under the GPL
- */
-
-#include "linux/linkage.h"
-#include "linux/personality.h"
-#include "linux/utsname.h"
-#include "asm/prctl.h" /* XXX This should get the constants from libc */
-#include "asm/uaccess.h"
-#include "os.h"
-
-long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
-{
-       unsigned long *ptr = addr, tmp;
-       long ret;
-       int pid = task->mm->context.id.u.pid;
-
-       /*
-        * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
-        * be safe), we need to call arch_prctl on the host because
-        * setting %fs may result in something else happening (like a
-        * GDT or thread.fs being set instead).  So, we let the host
-        * fiddle the registers and thread struct and restore the
-        * registers afterwards.
-        *
-        * So, the saved registers are stored to the process (this
-        * needed because a stub may have been the last thing to run),
-        * arch_prctl is run on the host, then the registers are read
-        * back.
-        */
-       switch (code) {
-       case ARCH_SET_FS:
-       case ARCH_SET_GS:
-               ret = restore_registers(pid, &current->thread.regs.regs);
-               if (ret)
-                       return ret;
-               break;
-       case ARCH_GET_FS:
-       case ARCH_GET_GS:
-               /*
-                * With these two, we read to a local pointer and
-                * put_user it to the userspace pointer that we were
-                * given.  If addr isn't valid (because it hasn't been
-                * faulted in or is just bogus), we want put_user to
-                * fault it in (or return -EFAULT) instead of having
-                * the host return -EFAULT.
-                */
-               ptr = &tmp;
-       }
-
-       ret = os_arch_prctl(pid, code, ptr);
-       if (ret)
-               return ret;
-
-       switch (code) {
-       case ARCH_SET_FS:
-               current->thread.arch.fs = (unsigned long) ptr;
-               ret = save_registers(pid, &current->thread.regs.regs);
-               break;
-       case ARCH_SET_GS:
-               ret = save_registers(pid, &current->thread.regs.regs);
-               break;
-       case ARCH_GET_FS:
-               ret = put_user(tmp, addr);
-               break;
-       case ARCH_GET_GS:
-               ret = put_user(tmp, addr);
-               break;
-       }
-
-       return ret;
-}
-
-long sys_arch_prctl(int code, unsigned long addr)
-{
-       return arch_prctl(current, code, (unsigned long __user *) addr);
-}
-
-long sys_clone(unsigned long clone_flags, unsigned long newsp,
-              void __user *parent_tid, void __user *child_tid)
-{
-       long ret;
-
-       if (!newsp)
-               newsp = UPT_SP(&current->thread.regs.regs);
-       current->thread.forking = 1;
-       ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
-                     child_tid);
-       current->thread.forking = 0;
-       return ret;
-}
-
-void arch_switch_to(struct task_struct *to)
-{
-       if ((to->thread.arch.fs == 0) || (to->mm == NULL))
-               return;
-
-       arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
-}
diff --git a/arch/um/sys-x86_64/sysrq.c b/arch/um/sys-x86_64/sysrq.c
deleted file mode 100644 (file)
index f4f82be..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2003 PathScale, Inc.
- *
- * Licensed under the GPL
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/utsname.h>
-#include <asm/current.h>
-#include <asm/ptrace.h>
-#include "sysrq.h"
-
-void __show_regs(struct pt_regs *regs)
-{
-       printk("\n");
-       print_modules();
-       printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
-               current->comm, print_tainted(), init_utsname()->release);
-       printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff,
-              PT_REGS_RIP(regs));
-       printk(KERN_INFO "RSP: %016lx  EFLAGS: %08lx\n", PT_REGS_RSP(regs),
-              PT_REGS_EFLAGS(regs));
-       printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
-              PT_REGS_RAX(regs), PT_REGS_RBX(regs), PT_REGS_RCX(regs));
-       printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
-              PT_REGS_RDX(regs), PT_REGS_RSI(regs), PT_REGS_RDI(regs));
-       printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
-              PT_REGS_RBP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs));
-       printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
-              PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs));
-       printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
-              PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs));
-}
-
-void show_regs(struct pt_regs *regs)
-{
-       __show_regs(regs);
-       show_trace(current, (unsigned long *) &regs);
-}
diff --git a/arch/um/sys-x86_64/tls.c b/arch/um/sys-x86_64/tls.c
deleted file mode 100644 (file)
index f7ba462..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#include "linux/sched.h"
-
-void clear_flushed_tls(struct task_struct *task)
-{
-}
-
-int arch_copy_tls(struct task_struct *t)
-{
-       /*
-        * If CLONE_SETTLS is set, we need to save the thread id
-        * (which is argument 5, child_tid, of clone) so it can be set
-        * during context switches.
-        */
-       t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
-
-       return 0;
-}
diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c
deleted file mode 100644 (file)
index 9735854..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-#include <stdio.h>
-#include <stddef.h>
-#include <signal.h>
-#include <sys/poll.h>
-#include <sys/mman.h>
-#include <sys/user.h>
-#define __FRAME_OFFSETS
-#include <asm/ptrace.h>
-#include <asm/types.h>
-
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define DEFINE_LONGS(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
-
-#define OFFSET(sym, str, mem) \
-       DEFINE(sym, offsetof(struct str, mem));
-
-void foo(void)
-{
-       OFFSET(HOST_SC_CR2, sigcontext, cr2);
-       OFFSET(HOST_SC_ERR, sigcontext, err);
-       OFFSET(HOST_SC_TRAPNO, sigcontext, trapno);
-
-       DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
-       DEFINE_LONGS(HOST_RBX, RBX);
-       DEFINE_LONGS(HOST_RCX, RCX);
-       DEFINE_LONGS(HOST_RDI, RDI);
-       DEFINE_LONGS(HOST_RSI, RSI);
-       DEFINE_LONGS(HOST_RDX, RDX);
-       DEFINE_LONGS(HOST_RBP, RBP);
-       DEFINE_LONGS(HOST_RAX, RAX);
-       DEFINE_LONGS(HOST_R8, R8);
-       DEFINE_LONGS(HOST_R9, R9);
-       DEFINE_LONGS(HOST_R10, R10);
-       DEFINE_LONGS(HOST_R11, R11);
-       DEFINE_LONGS(HOST_R12, R12);
-       DEFINE_LONGS(HOST_R13, R13);
-       DEFINE_LONGS(HOST_R14, R14);
-       DEFINE_LONGS(HOST_R15, R15);
-       DEFINE_LONGS(HOST_ORIG_RAX, ORIG_RAX);
-       DEFINE_LONGS(HOST_CS, CS);
-       DEFINE_LONGS(HOST_SS, SS);
-       DEFINE_LONGS(HOST_EFLAGS, EFLAGS);
-#if 0
-       DEFINE_LONGS(HOST_FS, FS);
-       DEFINE_LONGS(HOST_GS, GS);
-       DEFINE_LONGS(HOST_DS, DS);
-       DEFINE_LONGS(HOST_ES, ES);
-#endif
-
-       DEFINE_LONGS(HOST_IP, RIP);
-       DEFINE_LONGS(HOST_SP, RSP);
-       DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
-
-       /* XXX Duplicated between i386 and x86_64 */
-       DEFINE(UM_POLLIN, POLLIN);
-       DEFINE(UM_POLLPRI, POLLPRI);
-       DEFINE(UM_POLLOUT, POLLOUT);
-
-       DEFINE(UM_PROT_READ, PROT_READ);
-       DEFINE(UM_PROT_WRITE, PROT_WRITE);
-       DEFINE(UM_PROT_EXEC, PROT_EXEC);
-}
diff --git a/arch/um/sys-x86_64/vdso/Makefile b/arch/um/sys-x86_64/vdso/Makefile
deleted file mode 100644 (file)
index 5dffe6d..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Building vDSO images for x86.
-#
-
-VDSO64-y               := y
-
-vdso-install-$(VDSO64-y)       += vdso.so
-
-
-# files to link into the vdso
-vobjs-y := vdso-note.o um_vdso.o
-
-# files to link into kernel
-obj-$(VDSO64-y)                        += vdso.o vma.o
-
-vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
-
-$(obj)/vdso.o: $(obj)/vdso.so
-
-targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
-
-export CPPFLAGS_vdso.lds += -P -C
-
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-       -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
-
-$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
-
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
-       $(call if_changed,vdso)
-
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
-       $(call if_changed,objcopy)
-
-#
-# Don't omit frame pointers for ease of userspace debugging, but do
-# optimize sibling calls.
-#
-CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-       -fno-omit-frame-pointer -foptimize-sibling-calls
-
-$(vobjs): KBUILD_CFLAGS += $(CFL)
-
-#
-# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
-#
-CFLAGS_REMOVE_vdso-note.o = -pg
-CFLAGS_REMOVE_um_vdso.o = -pg
-
-targets += vdso-syms.lds
-obj-$(VDSO64-y)                        += vdso-syms.lds
-
-#
-# Match symbols in the DSO that look like VDSO*; produce a file of constants.
-#
-sed-vdsosym := -e 's/^00*/0/' \
-       -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
-quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
-       $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
-endef
-
-$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
-       $(call if_changed,vdsosym)
-
-#
-# The DSO images are built using a special linker script.
-#
-quiet_cmd_vdso = VDSO    $@
-      cmd_vdso = $(CC) -nostdlib -o $@ \
-                      $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-                      -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
-                sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
-GCOV_PROFILE := n
-
-#
-# Install the unstripped copy of vdso*.so listed in $(vdso-install-y).
-#
-quiet_cmd_vdso_install = INSTALL $@
-      cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE
-       @mkdir -p $(MODLIB)/vdso
-       $(call cmd,vdso_install)
-
-PHONY += vdso_install $(vdso-install-y)
-vdso_install: $(vdso-install-y)
diff --git a/arch/um/sys-x86_64/vdso/checkundef.sh b/arch/um/sys-x86_64/vdso/checkundef.sh
deleted file mode 100644 (file)
index 7ee90a9..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-nm="$1"
-file="$2"
-$nm "$file" | grep '^ *U' > /dev/null 2>&1
-if [ $? -eq 1 ]; then
-    exit 0
-else
-    echo "$file: undefined symbols found" >&2
-    exit 1
-fi
diff --git a/arch/um/sys-x86_64/vdso/um_vdso.c b/arch/um/sys-x86_64/vdso/um_vdso.c
deleted file mode 100644 (file)
index 7c441b5..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This vDSO turns all calls into a syscall so that UML can trap them.
- */
-
-
-/* Disable profiling for userspace code */
-#define DISABLE_BRANCH_PROFILING
-
-#include <linux/time.h>
-#include <linux/getcpu.h>
-#include <asm/unistd.h>
-
-int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
-{
-       long ret;
-
-       asm("syscall" : "=a" (ret) :
-               "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
-
-       return ret;
-}
-int clock_gettime(clockid_t, struct timespec *)
-       __attribute__((weak, alias("__vdso_clock_gettime")));
-
-int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
-{
-       long ret;
-
-       asm("syscall" : "=a" (ret) :
-               "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
-
-       return ret;
-}
-int gettimeofday(struct timeval *, struct timezone *)
-       __attribute__((weak, alias("__vdso_gettimeofday")));
-
-time_t __vdso_time(time_t *t)
-{
-       long secs;
-
-       asm volatile("syscall"
-               : "=a" (secs)
-               : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
-
-       return secs;
-}
-int time(time_t *t) __attribute__((weak, alias("__vdso_time")));
-
-long
-__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
-{
-       /*
-        * UML does not support SMP, we can cheat here. :)
-        */
-
-       if (cpu)
-               *cpu = 0;
-       if (node)
-               *node = 0;
-
-       return 0;
-}
-
-long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
-       __attribute__((weak, alias("__vdso_getcpu")));
diff --git a/arch/um/sys-x86_64/vdso/vdso-layout.lds.S b/arch/um/sys-x86_64/vdso/vdso-layout.lds.S
deleted file mode 100644 (file)
index 634a2cf..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Linker script for vDSO.  This is an ELF shared object prelinked to
- * its virtual address, and with only one read-only segment.
- * This script controls its layout.
- */
-
-SECTIONS
-{
-       . = VDSO_PRELINK + SIZEOF_HEADERS;
-
-       .hash           : { *(.hash) }                  :text
-       .gnu.hash       : { *(.gnu.hash) }
-       .dynsym         : { *(.dynsym) }
-       .dynstr         : { *(.dynstr) }
-       .gnu.version    : { *(.gnu.version) }
-       .gnu.version_d  : { *(.gnu.version_d) }
-       .gnu.version_r  : { *(.gnu.version_r) }
-
-       .note           : { *(.note.*) }                :text   :note
-
-       .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
-       .eh_frame       : { KEEP (*(.eh_frame)) }       :text
-
-       .dynamic        : { *(.dynamic) }               :text   :dynamic
-
-       .rodata         : { *(.rodata*) }               :text
-       .data           : {
-             *(.data*)
-             *(.sdata*)
-             *(.got.plt) *(.got)
-             *(.gnu.linkonce.d.*)
-             *(.bss*)
-             *(.dynbss*)
-             *(.gnu.linkonce.b.*)
-       }
-
-       .altinstructions        : { *(.altinstructions) }
-       .altinstr_replacement   : { *(.altinstr_replacement) }
-
-       /*
-        * Align the actual code well away from the non-instruction data.
-        * This is the best thing for the I-cache.
-        */
-       . = ALIGN(0x100);
-
-       .text           : { *(.text*) }                 :text   =0x90909090
-}
-
-/*
- * Very old versions of ld do not recognize this name token; use the constant.
- */
-#define PT_GNU_EH_FRAME        0x6474e550
-
-/*
- * We must supply the ELF program headers explicitly to get just one
- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
- */
-PHDRS
-{
-       text            PT_LOAD         FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
-       dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
-       note            PT_NOTE         FLAGS(4);               /* PF_R */
-       eh_frame_hdr    PT_GNU_EH_FRAME;
-}
diff --git a/arch/um/sys-x86_64/vdso/vdso-note.S b/arch/um/sys-x86_64/vdso/vdso-note.S
deleted file mode 100644 (file)
index 79a071e..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
- * Here we can supply some information useful to userland.
- */
-
-#include <linux/uts.h>
-#include <linux/version.h>
-#include <linux/elfnote.h>
-
-ELFNOTE_START(Linux, 0, "a")
-       .long LINUX_VERSION_CODE
-ELFNOTE_END
diff --git a/arch/um/sys-x86_64/vdso/vdso.S b/arch/um/sys-x86_64/vdso/vdso.S
deleted file mode 100644 (file)
index ec82c16..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#include <linux/init.h>
-
-__INITDATA
-
-       .globl vdso_start, vdso_end
-vdso_start:
-       .incbin "arch/um/sys-x86_64/vdso/vdso.so"
-vdso_end:
-
-__FINIT
diff --git a/arch/um/sys-x86_64/vdso/vdso.lds.S b/arch/um/sys-x86_64/vdso/vdso.lds.S
deleted file mode 100644 (file)
index b96b267..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Linker script for 64-bit vDSO.
- * We #include the file to define the layout details.
- * Here we only choose the prelinked virtual address.
- *
- * This file defines the version script giving the user-exported symbols in
- * the DSO.  We can define local symbols here called VDSO* to make their
- * values visible using the asm-x86/vdso.h macros from the kernel proper.
- */
-
-#define VDSO_PRELINK 0xffffffffff700000
-#include "vdso-layout.lds.S"
-
-/*
- * This controls what userland symbols we export from the vDSO.
- */
-VERSION {
-       LINUX_2.6 {
-       global:
-               clock_gettime;
-               __vdso_clock_gettime;
-               gettimeofday;
-               __vdso_gettimeofday;
-               getcpu;
-               __vdso_getcpu;
-               time;
-               __vdso_time;
-       local: *;
-       };
-}
-
-VDSO64_PRELINK = VDSO_PRELINK;
diff --git a/arch/um/sys-x86_64/vdso/vma.c b/arch/um/sys-x86_64/vdso/vma.c
deleted file mode 100644 (file)
index 9495c8d..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <asm/page.h>
-#include <linux/init.h>
-
-unsigned int __read_mostly vdso_enabled = 1;
-unsigned long um_vdso_addr;
-
-extern unsigned long task_size;
-extern char vdso_start[], vdso_end[];
-
-static struct page **vdsop;
-
-static int __init init_vdso(void)
-{
-       struct page *um_vdso;
-
-       BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
-
-       um_vdso_addr = task_size - PAGE_SIZE;
-
-       vdsop = kmalloc(GFP_KERNEL, sizeof(struct page *));
-       if (!vdsop)
-               goto oom;
-
-       um_vdso = alloc_page(GFP_KERNEL);
-       if (!um_vdso) {
-               kfree(vdsop);
-
-               goto oom;
-       }
-
-       copy_page(page_address(um_vdso), vdso_start);
-       *vdsop = um_vdso;
-
-       return 0;
-
-oom:
-       printk(KERN_ERR "Cannot allocate vdso\n");
-       vdso_enabled = 0;
-
-       return -ENOMEM;
-}
-subsys_initcall(init_vdso);
-
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
-{
-       int err;
-       struct mm_struct *mm = current->mm;
-
-       if (!vdso_enabled)
-               return 0;
-
-       down_write(&mm->mmap_sem);
-
-       err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
-               VM_READ|VM_EXEC|
-               VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
-               VM_ALWAYSDUMP,
-               vdsop);
-
-       up_write(&mm->mmap_sem);
-
-       return err;
-}