]>
Commit | Line | Data |
---|---|---|
ba180fd4 | 1 | /* |
2eb5f31b | 2 | * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) |
ba180fd4 | 3 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
1da177e4 LT |
4 | * Licensed under the GPL |
5 | */ | |
6 | ||
37185b33 | 7 | #include <linux/mm.h> |
3f07c014 | 8 | #include <linux/sched/signal.h> |
37185b33 | 9 | #include <linux/slab.h> |
3f07c014 | 10 | |
37185b33 AV |
11 | #include <asm/pgalloc.h> |
12 | #include <asm/pgtable.h> | |
d5f20be7 | 13 | #include <asm/sections.h> |
37185b33 AV |
14 | #include <as-layout.h> |
15 | #include <os.h> | |
16 | #include <skas.h> | |
1da177e4 | 17 | |
d67b569f JD |
18 | static int init_stub_pte(struct mm_struct *mm, unsigned long proc, |
19 | unsigned long kernel) | |
20 | { | |
21 | pgd_t *pgd; | |
22 | pud_t *pud; | |
23 | pmd_t *pmd; | |
24 | pte_t *pte; | |
25 | ||
d67b569f JD |
26 | pgd = pgd_offset(mm, proc); |
27 | pud = pud_alloc(mm, pgd, proc); | |
28 | if (!pud) | |
29 | goto out; | |
30 | ||
31 | pmd = pmd_alloc(mm, pud, proc); | |
32 | if (!pmd) | |
33 | goto out_pmd; | |
34 | ||
3ed3a4f0 | 35 | pte = pte_alloc_map(mm, pmd, proc); |
d67b569f JD |
36 | if (!pte) |
37 | goto out_pte; | |
38 | ||
d67b569f | 39 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); |
21c935e5 | 40 | *pte = pte_mkread(*pte); |
ba180fd4 | 41 | return 0; |
d67b569f | 42 | |
d67b569f | 43 | out_pte: |
5e541973 | 44 | pmd_free(mm, pmd); |
4d04c707 RK |
45 | out_pmd: |
46 | pud_free(mm, pud); | |
d67b569f | 47 | out: |
ba180fd4 | 48 | return -ENOMEM; |
d67b569f JD |
49 | } |
50 | ||
77bf4400 | 51 | int init_new_context(struct task_struct *task, struct mm_struct *mm) |
1da177e4 | 52 | { |
6c738ffa JD |
53 | struct mm_context *from_mm = NULL; |
54 | struct mm_context *to_mm = &mm->context; | |
8b51304e | 55 | unsigned long stack = 0; |
12919aa6 | 56 | int ret = -ENOMEM; |
1da177e4 | 57 | |
d0b5e15f RW |
58 | stack = get_zeroed_page(GFP_KERNEL); |
59 | if (stack == 0) | |
60 | goto out; | |
858259cf BS |
61 | |
62 | to_mm->id.stack = stack; | |
ba180fd4 | 63 | if (current->mm != NULL && current->mm != &init_mm) |
6c738ffa | 64 | from_mm = ¤t->mm->context; |
9786a8f3 | 65 | |
2eb5f31b | 66 | block_signals(); |
d0b5e15f RW |
67 | if (from_mm) |
68 | to_mm->id.u.pid = copy_context_skas0(stack, | |
69 | from_mm->id.u.pid); | |
70 | else to_mm->id.u.pid = start_userspace(stack); | |
2eb5f31b | 71 | unblock_signals(); |
d0b5e15f RW |
72 | |
73 | if (to_mm->id.u.pid < 0) { | |
74 | ret = to_mm->id.u.pid; | |
75 | goto out_free; | |
858259cf BS |
76 | } |
77 | ||
78 | ret = init_new_ldt(to_mm, from_mm); | |
ba180fd4 JD |
79 | if (ret < 0) { |
80 | printk(KERN_ERR "init_new_context_skas - init_ldt" | |
858259cf BS |
81 | " failed, errno = %d\n", ret); |
82 | goto out_free; | |
d67b569f JD |
83 | } |
84 | ||
85 | return 0; | |
86 | ||
87 | out_free: | |
ba180fd4 | 88 | if (to_mm->id.stack != 0) |
858259cf | 89 | free_page(to_mm->id.stack); |
d67b569f JD |
90 | out: |
91 | return ret; | |
1da177e4 LT |
92 | } |
93 | ||
ac2aca28 | 94 | void uml_setup_stubs(struct mm_struct *mm) |
3963333f | 95 | { |
3963333f JD |
96 | int err, ret; |
97 | ||
3963333f | 98 | ret = init_stub_pte(mm, STUB_CODE, |
05eacfd0 | 99 | (unsigned long) __syscall_stub_start); |
3963333f JD |
100 | if (ret) |
101 | goto out; | |
102 | ||
103 | ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); | |
104 | if (ret) | |
105 | goto out; | |
106 | ||
05eacfd0 | 107 | mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); |
ea6fb417 | 108 | mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); |
3963333f JD |
109 | |
110 | /* dup_mmap already holds mmap_sem */ | |
111 | err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, | |
112 | VM_READ | VM_MAYREAD | VM_EXEC | | |
dee20035 | 113 | VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP, |
ea6fb417 | 114 | mm->context.stub_pages); |
3963333f JD |
115 | if (err) { |
116 | printk(KERN_ERR "install_special_mapping returned %d\n", err); | |
ea6fb417 | 117 | goto out; |
3963333f JD |
118 | } |
119 | return; | |
120 | ||
3963333f JD |
121 | out: |
122 | force_sigsegv(SIGSEGV, current); | |
123 | } | |
124 | ||
125 | void arch_exit_mmap(struct mm_struct *mm) | |
126 | { | |
127 | pte_t *pte; | |
128 | ||
129 | pte = virt_to_pte(mm, STUB_CODE); | |
130 | if (pte != NULL) | |
131 | pte_clear(mm, STUB_CODE, pte); | |
132 | ||
133 | pte = virt_to_pte(mm, STUB_DATA); | |
134 | if (pte == NULL) | |
135 | return; | |
136 | ||
137 | pte_clear(mm, STUB_DATA, pte); | |
138 | } | |
139 | ||
77bf4400 | 140 | void destroy_context(struct mm_struct *mm) |
1da177e4 | 141 | { |
6c738ffa | 142 | struct mm_context *mmu = &mm->context; |
1da177e4 | 143 | |
d0b5e15f RW |
144 | /* |
145 | * If init_new_context wasn't called, this will be | |
146 | * zero, resulting in a kill(0), which will result in the | |
147 | * whole UML suddenly dying. Also, cover negative and | |
148 | * 1 cases, since they shouldn't happen either. | |
149 | */ | |
150 | if (mmu->id.u.pid < 2) { | |
151 | printk(KERN_ERR "corrupt mm_context - pid = %d\n", | |
152 | mmu->id.u.pid); | |
153 | return; | |
00a905e6 | 154 | } |
d0b5e15f | 155 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
8b51304e | 156 | |
d0b5e15f | 157 | free_page(mmu->id.stack); |
28078e8f | 158 | free_ldt(mmu); |
d67b569f | 159 | } |