]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/um/kernel/skas/mmu.c
uml: clean up TASK_SIZE usage
[mirror_ubuntu-artful-kernel.git] / arch / um / kernel / skas / mmu.c
CommitLineData
ba180fd4
JD
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1da177e4
LT
3 * Licensed under the GPL
4 */
5
d67b569f 6#include "linux/mm.h"
ba180fd4 7#include "linux/sched.h"
d67b569f
JD
8#include "asm/pgalloc.h"
9#include "asm/pgtable.h"
54ae36f2 10#include "as-layout.h"
1da177e4
LT
11#include "os.h"
12#include "skas.h"
13
d67b569f
JD
14extern int __syscall_stub_start;
15
16static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
17 unsigned long kernel)
18{
19 pgd_t *pgd;
20 pud_t *pud;
21 pmd_t *pmd;
22 pte_t *pte;
23
d67b569f
JD
24 pgd = pgd_offset(mm, proc);
25 pud = pud_alloc(mm, pgd, proc);
26 if (!pud)
27 goto out;
28
29 pmd = pmd_alloc(mm, pud, proc);
30 if (!pmd)
31 goto out_pmd;
32
33 pte = pte_alloc_map(mm, pmd, proc);
34 if (!pte)
35 goto out_pte;
36
ba180fd4
JD
37 /*
38 * There's an interaction between the skas0 stub pages, stack
d67b569f 39 * randomization, and the BUG at the end of exit_mmap. exit_mmap
ba180fd4
JD
40 * checks that the number of page tables freed is the same as had
41 * been allocated. If the stack is on the last page table page,
d67b569f
JD
42 * then the stack pte page will be freed, and if not, it won't. To
43 * avoid having to know where the stack is, or if the process mapped
44 * something at the top of its address space for some other reason,
45 * we set TASK_SIZE to end at the start of the last page table.
46 * This keeps exit_mmap off the last page, but introduces a leak
47 * of that page. So, we hang onto it here and free it in
48 * destroy_context_skas.
49 */
50
6c738ffa 51 mm->context.last_page_table = pmd_page_vaddr(*pmd);
7ef93905 52#ifdef CONFIG_3_LEVEL_PGTABLES
6c738ffa 53 mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
7ef93905 54#endif
d67b569f
JD
55
56 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
21c935e5 57 *pte = pte_mkread(*pte);
ba180fd4 58 return 0;
d67b569f
JD
59
60 out_pmd:
5e541973 61 pud_free(mm, pud);
d67b569f 62 out_pte:
5e541973 63 pmd_free(mm, pmd);
d67b569f 64 out:
ba180fd4 65 return -ENOMEM;
d67b569f
JD
66}
67
77bf4400 68int init_new_context(struct task_struct *task, struct mm_struct *mm)
1da177e4 69{
6c738ffa
JD
70 struct mm_context *from_mm = NULL;
71 struct mm_context *to_mm = &mm->context;
8b51304e 72 unsigned long stack = 0;
12919aa6 73 int ret = -ENOMEM;
1da177e4 74
ba180fd4 75 if (skas_needs_stub) {
8b51304e 76 stack = get_zeroed_page(GFP_KERNEL);
ba180fd4 77 if (stack == 0)
8b51304e 78 goto out;
1da177e4 79
ba180fd4
JD
80 /*
81 * This zeros the entry that pgd_alloc didn't, needed since
d67b569f
JD
82 * we are about to reinitialize it, and want mm.nr_ptes to
83 * be accurate.
84 */
85 mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
1da177e4 86
54ae36f2 87 ret = init_stub_pte(mm, STUB_CODE,
d67b569f 88 (unsigned long) &__syscall_stub_start);
ba180fd4 89 if (ret)
8b51304e 90 goto out_free;
d67b569f 91
54ae36f2 92 ret = init_stub_pte(mm, STUB_DATA, stack);
ba180fd4 93 if (ret)
d67b569f
JD
94 goto out_free;
95
96 mm->nr_ptes--;
8b51304e 97 }
858259cf
BS
98
99 to_mm->id.stack = stack;
ba180fd4 100 if (current->mm != NULL && current->mm != &init_mm)
6c738ffa 101 from_mm = &current->mm->context;
9786a8f3 102
ba180fd4 103 if (proc_mm) {
12919aa6 104 ret = new_mm(stack);
ba180fd4
JD
105 if (ret < 0) {
106 printk(KERN_ERR "init_new_context_skas - "
107 "new_mm failed, errno = %d\n", ret);
8b51304e
BS
108 goto out_free;
109 }
858259cf 110 to_mm->id.u.mm_fd = ret;
8b51304e
BS
111 }
112 else {
ba180fd4 113 if (from_mm)
858259cf
BS
114 to_mm->id.u.pid = copy_context_skas0(stack,
115 from_mm->id.u.pid);
116 else to_mm->id.u.pid = start_userspace(stack);
3e6f2ac4
JD
117
118 if (to_mm->id.u.pid < 0) {
119 ret = to_mm->id.u.pid;
120 goto out_free;
121 }
858259cf
BS
122 }
123
124 ret = init_new_ldt(to_mm, from_mm);
ba180fd4
JD
125 if (ret < 0) {
126 printk(KERN_ERR "init_new_context_skas - init_ldt"
858259cf
BS
127 " failed, errno = %d\n", ret);
128 goto out_free;
d67b569f
JD
129 }
130
131 return 0;
132
133 out_free:
ba180fd4 134 if (to_mm->id.stack != 0)
858259cf 135 free_page(to_mm->id.stack);
d67b569f
JD
136 out:
137 return ret;
1da177e4
LT
138}
139
77bf4400 140void destroy_context(struct mm_struct *mm)
1da177e4 141{
6c738ffa 142 struct mm_context *mmu = &mm->context;
1da177e4 143
ba180fd4 144 if (proc_mm)
d67b569f 145 os_close_file(mmu->id.u.mm_fd);
8b51304e 146 else
d67b569f 147 os_kill_ptraced_process(mmu->id.u.pid, 1);
8b51304e 148
ba180fd4 149 if (!proc_mm || !ptrace_faultinfo) {
d67b569f 150 free_page(mmu->id.stack);
4c21e2f2 151 pte_lock_deinit(virt_to_page(mmu->last_page_table));
5e541973 152 pte_free_kernel(mm, (pte_t *) mmu->last_page_table);
df849a15 153 dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
7ef93905 154#ifdef CONFIG_3_LEVEL_PGTABLES
5e541973 155 pmd_free(mm, (pmd_t *) mmu->last_pmd);
7ef93905 156#endif
d67b569f 157 }
28078e8f
JD
158
159 free_ldt(mmu);
d67b569f 160}