]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - mm/mprotect.c
[PATCH] core remove PageReserved
[mirror_ubuntu-bionic-kernel.git] / mm / mprotect.c
1 /*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
7 * Address space accounting code <alan@redhat.com>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22
23 #include <asm/uaccess.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27
28 static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
29 unsigned long addr, unsigned long end, pgprot_t newprot)
30 {
31 pte_t *pte;
32
33 pte = pte_offset_map(pmd, addr);
34 do {
35 if (pte_present(*pte)) {
36 pte_t ptent;
37
38 /* Avoid an SMP race with hardware updated dirty/clean
39 * bits by wiping the pte and then setting the new pte
40 * into place.
41 */
42 ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
43 set_pte_at(mm, addr, pte, ptent);
44 lazy_mmu_prot_update(ptent);
45 }
46 } while (pte++, addr += PAGE_SIZE, addr != end);
47 pte_unmap(pte - 1);
48 }
49
50 static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
51 unsigned long addr, unsigned long end, pgprot_t newprot)
52 {
53 pmd_t *pmd;
54 unsigned long next;
55
56 pmd = pmd_offset(pud, addr);
57 do {
58 next = pmd_addr_end(addr, end);
59 if (pmd_none_or_clear_bad(pmd))
60 continue;
61 change_pte_range(mm, pmd, addr, next, newprot);
62 } while (pmd++, addr = next, addr != end);
63 }
64
65 static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
66 unsigned long addr, unsigned long end, pgprot_t newprot)
67 {
68 pud_t *pud;
69 unsigned long next;
70
71 pud = pud_offset(pgd, addr);
72 do {
73 next = pud_addr_end(addr, end);
74 if (pud_none_or_clear_bad(pud))
75 continue;
76 change_pmd_range(mm, pud, addr, next, newprot);
77 } while (pud++, addr = next, addr != end);
78 }
79
80 static void change_protection(struct vm_area_struct *vma,
81 unsigned long addr, unsigned long end, pgprot_t newprot)
82 {
83 struct mm_struct *mm = vma->vm_mm;
84 pgd_t *pgd;
85 unsigned long next;
86 unsigned long start = addr;
87
88 BUG_ON(addr >= end);
89 pgd = pgd_offset(mm, addr);
90 flush_cache_range(vma, addr, end);
91 spin_lock(&mm->page_table_lock);
92 do {
93 next = pgd_addr_end(addr, end);
94 if (pgd_none_or_clear_bad(pgd))
95 continue;
96 change_pud_range(mm, pgd, addr, next, newprot);
97 } while (pgd++, addr = next, addr != end);
98 flush_tlb_range(vma, start, end);
99 spin_unlock(&mm->page_table_lock);
100 }
101
102 static int
103 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
104 unsigned long start, unsigned long end, unsigned long newflags)
105 {
106 struct mm_struct *mm = vma->vm_mm;
107 unsigned long oldflags = vma->vm_flags;
108 long nrpages = (end - start) >> PAGE_SHIFT;
109 unsigned long charged = 0;
110 pgprot_t newprot;
111 pgoff_t pgoff;
112 int error;
113
114 if (newflags == oldflags) {
115 *pprev = vma;
116 return 0;
117 }
118
119 /*
120 * If we make a private mapping writable we increase our commit;
121 * but (without finer accounting) cannot reduce our commit if we
122 * make it unwritable again.
123 *
124 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
125 * a MAP_NORESERVE private mapping to writable will now reserve.
126 */
127 if (newflags & VM_WRITE) {
128 if (oldflags & VM_RESERVED) {
129 BUG_ON(oldflags & VM_WRITE);
130 printk(KERN_WARNING "program %s is using MAP_PRIVATE, "
131 "PROT_WRITE mprotect of VM_RESERVED memory, "
132 "which is deprecated. Please report this to "
133 "linux-kernel@vger.kernel.org\n",current->comm);
134 return -EACCES;
135 }
136 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED|VM_HUGETLB))) {
137 charged = nrpages;
138 if (security_vm_enough_memory(charged))
139 return -ENOMEM;
140 newflags |= VM_ACCOUNT;
141 }
142 }
143
144 newprot = protection_map[newflags & 0xf];
145
146 /*
147 * First try to merge with previous and/or next vma.
148 */
149 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
150 *pprev = vma_merge(mm, *pprev, start, end, newflags,
151 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
152 if (*pprev) {
153 vma = *pprev;
154 goto success;
155 }
156
157 *pprev = vma;
158
159 if (start != vma->vm_start) {
160 error = split_vma(mm, vma, start, 1);
161 if (error)
162 goto fail;
163 }
164
165 if (end != vma->vm_end) {
166 error = split_vma(mm, vma, end, 0);
167 if (error)
168 goto fail;
169 }
170
171 success:
172 /*
173 * vm_flags and vm_page_prot are protected by the mmap_sem
174 * held in write mode.
175 */
176 vma->vm_flags = newflags;
177 vma->vm_page_prot = newprot;
178 change_protection(vma, start, end, newprot);
179 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
180 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
181 return 0;
182
183 fail:
184 vm_unacct_memory(charged);
185 return error;
186 }
187
188 asmlinkage long
189 sys_mprotect(unsigned long start, size_t len, unsigned long prot)
190 {
191 unsigned long vm_flags, nstart, end, tmp, reqprot;
192 struct vm_area_struct *vma, *prev;
193 int error = -EINVAL;
194 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
195 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
196 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
197 return -EINVAL;
198
199 if (start & ~PAGE_MASK)
200 return -EINVAL;
201 if (!len)
202 return 0;
203 len = PAGE_ALIGN(len);
204 end = start + len;
205 if (end <= start)
206 return -ENOMEM;
207 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
208 return -EINVAL;
209
210 reqprot = prot;
211 /*
212 * Does the application expect PROT_READ to imply PROT_EXEC:
213 */
214 if (unlikely((prot & PROT_READ) &&
215 (current->personality & READ_IMPLIES_EXEC)))
216 prot |= PROT_EXEC;
217
218 vm_flags = calc_vm_prot_bits(prot);
219
220 down_write(&current->mm->mmap_sem);
221
222 vma = find_vma_prev(current->mm, start, &prev);
223 error = -ENOMEM;
224 if (!vma)
225 goto out;
226 if (unlikely(grows & PROT_GROWSDOWN)) {
227 if (vma->vm_start >= end)
228 goto out;
229 start = vma->vm_start;
230 error = -EINVAL;
231 if (!(vma->vm_flags & VM_GROWSDOWN))
232 goto out;
233 }
234 else {
235 if (vma->vm_start > start)
236 goto out;
237 if (unlikely(grows & PROT_GROWSUP)) {
238 end = vma->vm_end;
239 error = -EINVAL;
240 if (!(vma->vm_flags & VM_GROWSUP))
241 goto out;
242 }
243 }
244 if (start > vma->vm_start)
245 prev = vma;
246
247 for (nstart = start ; ; ) {
248 unsigned long newflags;
249
250 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
251
252 if (is_vm_hugetlb_page(vma)) {
253 error = -EACCES;
254 goto out;
255 }
256
257 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
258
259 /* newflags >> 4 shift VM_MAY% in place of VM_% */
260 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
261 error = -EACCES;
262 goto out;
263 }
264
265 error = security_file_mprotect(vma, reqprot, prot);
266 if (error)
267 goto out;
268
269 tmp = vma->vm_end;
270 if (tmp > end)
271 tmp = end;
272 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
273 if (error)
274 goto out;
275 nstart = tmp;
276
277 if (nstart < prev->vm_end)
278 nstart = prev->vm_end;
279 if (nstart >= end)
280 goto out;
281
282 vma = prev->vm_next;
283 if (!vma || vma->vm_start != nstart) {
284 error = -ENOMEM;
285 goto out;
286 }
287 }
288 out:
289 up_write(&current->mm->mmap_sem);
290 return error;
291 }