]>
Commit | Line | Data |
---|---|---|
ba180fd4 JD |
1 | /* |
2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | |
1da177e4 LT |
3 | * Licensed under the GPL |
4 | */ | |
5 | ||
37185b33 AV |
6 | #include <linux/mm.h> |
7 | #include <linux/sched.h> | |
8 | #include <linux/slab.h> | |
9 | #include <asm/pgalloc.h> | |
10 | #include <asm/pgtable.h> | |
d5f20be7 | 11 | #include <asm/sections.h> |
37185b33 AV |
12 | #include <as-layout.h> |
13 | #include <os.h> | |
14 | #include <skas.h> | |
1da177e4 | 15 | |
d67b569f JD |
16 | static int init_stub_pte(struct mm_struct *mm, unsigned long proc, |
17 | unsigned long kernel) | |
18 | { | |
19 | pgd_t *pgd; | |
20 | pud_t *pud; | |
21 | pmd_t *pmd; | |
22 | pte_t *pte; | |
23 | ||
d67b569f JD |
24 | pgd = pgd_offset(mm, proc); |
25 | pud = pud_alloc(mm, pgd, proc); | |
26 | if (!pud) | |
27 | goto out; | |
28 | ||
29 | pmd = pmd_alloc(mm, pud, proc); | |
30 | if (!pmd) | |
31 | goto out_pmd; | |
32 | ||
8ac1f832 | 33 | pte = pte_alloc_map(mm, NULL, pmd, proc); |
d67b569f JD |
34 | if (!pte) |
35 | goto out_pte; | |
36 | ||
d67b569f | 37 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); |
21c935e5 | 38 | *pte = pte_mkread(*pte); |
ba180fd4 | 39 | return 0; |
d67b569f | 40 | |
d67b569f | 41 | out_pte: |
5e541973 | 42 | pmd_free(mm, pmd); |
4d04c707 RK |
43 | out_pmd: |
44 | pud_free(mm, pud); | |
d67b569f | 45 | out: |
ba180fd4 | 46 | return -ENOMEM; |
d67b569f JD |
47 | } |
48 | ||
77bf4400 | 49 | int init_new_context(struct task_struct *task, struct mm_struct *mm) |
1da177e4 | 50 | { |
6c738ffa JD |
51 | struct mm_context *from_mm = NULL; |
52 | struct mm_context *to_mm = &mm->context; | |
8b51304e | 53 | unsigned long stack = 0; |
12919aa6 | 54 | int ret = -ENOMEM; |
1da177e4 | 55 | |
d0b5e15f RW |
56 | stack = get_zeroed_page(GFP_KERNEL); |
57 | if (stack == 0) | |
58 | goto out; | |
858259cf BS |
59 | |
60 | to_mm->id.stack = stack; | |
ba180fd4 | 61 | if (current->mm != NULL && current->mm != &init_mm) |
6c738ffa | 62 | from_mm = ¤t->mm->context; |
9786a8f3 | 63 | |
d0b5e15f RW |
64 | if (from_mm) |
65 | to_mm->id.u.pid = copy_context_skas0(stack, | |
66 | from_mm->id.u.pid); | |
67 | else to_mm->id.u.pid = start_userspace(stack); | |
68 | ||
69 | if (to_mm->id.u.pid < 0) { | |
70 | ret = to_mm->id.u.pid; | |
71 | goto out_free; | |
858259cf BS |
72 | } |
73 | ||
74 | ret = init_new_ldt(to_mm, from_mm); | |
ba180fd4 JD |
75 | if (ret < 0) { |
76 | printk(KERN_ERR "init_new_context_skas - init_ldt" | |
858259cf BS |
77 | " failed, errno = %d\n", ret); |
78 | goto out_free; | |
d67b569f JD |
79 | } |
80 | ||
81 | return 0; | |
82 | ||
83 | out_free: | |
ba180fd4 | 84 | if (to_mm->id.stack != 0) |
858259cf | 85 | free_page(to_mm->id.stack); |
d67b569f JD |
86 | out: |
87 | return ret; | |
1da177e4 LT |
88 | } |
89 | ||
ac2aca28 | 90 | void uml_setup_stubs(struct mm_struct *mm) |
3963333f | 91 | { |
3963333f JD |
92 | int err, ret; |
93 | ||
3963333f | 94 | ret = init_stub_pte(mm, STUB_CODE, |
05eacfd0 | 95 | (unsigned long) __syscall_stub_start); |
3963333f JD |
96 | if (ret) |
97 | goto out; | |
98 | ||
99 | ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); | |
100 | if (ret) | |
101 | goto out; | |
102 | ||
05eacfd0 | 103 | mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); |
ea6fb417 | 104 | mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); |
3963333f JD |
105 | |
106 | /* dup_mmap already holds mmap_sem */ | |
107 | err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, | |
108 | VM_READ | VM_MAYREAD | VM_EXEC | | |
dee20035 | 109 | VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP, |
ea6fb417 | 110 | mm->context.stub_pages); |
3963333f JD |
111 | if (err) { |
112 | printk(KERN_ERR "install_special_mapping returned %d\n", err); | |
ea6fb417 | 113 | goto out; |
3963333f JD |
114 | } |
115 | return; | |
116 | ||
3963333f JD |
117 | out: |
118 | force_sigsegv(SIGSEGV, current); | |
119 | } | |
120 | ||
121 | void arch_exit_mmap(struct mm_struct *mm) | |
122 | { | |
123 | pte_t *pte; | |
124 | ||
125 | pte = virt_to_pte(mm, STUB_CODE); | |
126 | if (pte != NULL) | |
127 | pte_clear(mm, STUB_CODE, pte); | |
128 | ||
129 | pte = virt_to_pte(mm, STUB_DATA); | |
130 | if (pte == NULL) | |
131 | return; | |
132 | ||
133 | pte_clear(mm, STUB_DATA, pte); | |
134 | } | |
135 | ||
77bf4400 | 136 | void destroy_context(struct mm_struct *mm) |
1da177e4 | 137 | { |
6c738ffa | 138 | struct mm_context *mmu = &mm->context; |
1da177e4 | 139 | |
d0b5e15f RW |
140 | /* |
141 | * If init_new_context wasn't called, this will be | |
142 | * zero, resulting in a kill(0), which will result in the | |
143 | * whole UML suddenly dying. Also, cover negative and | |
144 | * 1 cases, since they shouldn't happen either. | |
145 | */ | |
146 | if (mmu->id.u.pid < 2) { | |
147 | printk(KERN_ERR "corrupt mm_context - pid = %d\n", | |
148 | mmu->id.u.pid); | |
149 | return; | |
00a905e6 | 150 | } |
d0b5e15f | 151 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
8b51304e | 152 | |
d0b5e15f | 153 | free_page(mmu->id.stack); |
28078e8f | 154 | free_ldt(mmu); |
d67b569f | 155 | } |