]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/mprotect.c | |
3 | * | |
4 | * (C) Copyright 1994 Linus Torvalds | |
5 | * (C) Copyright 2002 Christoph Hellwig | |
6 | * | |
046c6884 | 7 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
8 | * (C) Copyright 2002 Red Hat Inc, All Rights Reserved |
9 | */ | |
10 | ||
11 | #include <linux/mm.h> | |
12 | #include <linux/hugetlb.h> | |
1da177e4 LT |
13 | #include <linux/shm.h> |
14 | #include <linux/mman.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/highmem.h> | |
17 | #include <linux/security.h> | |
18 | #include <linux/mempolicy.h> | |
19 | #include <linux/personality.h> | |
20 | #include <linux/syscalls.h> | |
0697212a CL |
21 | #include <linux/swap.h> |
22 | #include <linux/swapops.h> | |
cddb8a5c | 23 | #include <linux/mmu_notifier.h> |
64cdd548 | 24 | #include <linux/migrate.h> |
cdd6c482 | 25 | #include <linux/perf_event.h> |
1da177e4 LT |
26 | #include <asm/uaccess.h> |
27 | #include <asm/pgtable.h> | |
28 | #include <asm/cacheflush.h> | |
29 | #include <asm/tlbflush.h> | |
30 | ||
1c12c4cf VP |
31 | #ifndef pgprot_modify |
32 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |
33 | { | |
34 | return newprot; | |
35 | } | |
36 | #endif | |
37 | ||
4b10e7d5 | 38 | static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
c1e6098b | 39 | unsigned long addr, unsigned long end, pgprot_t newprot, |
90572890 | 40 | int dirty_accountable, int prot_numa, bool *ret_all_same_cpupid) |
1da177e4 | 41 | { |
4b10e7d5 | 42 | struct mm_struct *mm = vma->vm_mm; |
0697212a | 43 | pte_t *pte, oldpte; |
705e87c0 | 44 | spinlock_t *ptl; |
7da4d641 | 45 | unsigned long pages = 0; |
90572890 PZ |
46 | bool all_same_cpupid = true; |
47 | int last_cpu = -1; | |
b795854b | 48 | int last_pid = -1; |
1da177e4 | 49 | |
705e87c0 | 50 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
6606c3e0 | 51 | arch_enter_lazy_mmu_mode(); |
1da177e4 | 52 | do { |
0697212a CL |
53 | oldpte = *pte; |
54 | if (pte_present(oldpte)) { | |
1da177e4 | 55 | pte_t ptent; |
4b10e7d5 | 56 | bool updated = false; |
1da177e4 | 57 | |
1ea0704e | 58 | ptent = ptep_modify_prot_start(mm, addr, pte); |
4b10e7d5 MG |
59 | if (!prot_numa) { |
60 | ptent = pte_modify(ptent, newprot); | |
61 | updated = true; | |
62 | } else { | |
63 | struct page *page; | |
64 | ||
65 | page = vm_normal_page(vma, addr, oldpte); | |
66 | if (page) { | |
90572890 PZ |
67 | int cpupid = page_cpupid_last(page); |
68 | int this_cpu = cpupid_to_cpu(cpupid); | |
69 | int this_pid = cpupid_to_pid(cpupid); | |
b795854b | 70 | |
90572890 PZ |
71 | if (last_cpu == -1) |
72 | last_cpu = this_cpu; | |
b795854b MG |
73 | if (last_pid == -1) |
74 | last_pid = this_pid; | |
90572890 | 75 | if (last_cpu != this_cpu || |
b795854b | 76 | last_pid != this_pid) { |
90572890 | 77 | all_same_cpupid = false; |
b795854b | 78 | } |
9532fec1 | 79 | |
1bc115d8 | 80 | if (!pte_numa(oldpte)) { |
4b10e7d5 MG |
81 | ptent = pte_mknuma(ptent); |
82 | updated = true; | |
83 | } | |
84 | } | |
85 | } | |
1ea0704e | 86 | |
c1e6098b PZ |
87 | /* |
88 | * Avoid taking write faults for pages we know to be | |
89 | * dirty. | |
90 | */ | |
4b10e7d5 | 91 | if (dirty_accountable && pte_dirty(ptent)) { |
c1e6098b | 92 | ptent = pte_mkwrite(ptent); |
4b10e7d5 MG |
93 | updated = true; |
94 | } | |
1ea0704e | 95 | |
4b10e7d5 MG |
96 | if (updated) |
97 | pages++; | |
1ea0704e | 98 | ptep_modify_prot_commit(mm, addr, pte, ptent); |
ce1744f4 | 99 | } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { |
0697212a CL |
100 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
101 | ||
102 | if (is_write_migration_entry(entry)) { | |
103 | /* | |
104 | * A protection check is difficult so | |
105 | * just be safe and disable write | |
106 | */ | |
107 | make_migration_entry_read(&entry); | |
108 | set_pte_at(mm, addr, pte, | |
109 | swp_entry_to_pte(entry)); | |
e920e14c MG |
110 | |
111 | pages++; | |
0697212a | 112 | } |
1da177e4 LT |
113 | } |
114 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
6606c3e0 | 115 | arch_leave_lazy_mmu_mode(); |
705e87c0 | 116 | pte_unmap_unlock(pte - 1, ptl); |
7da4d641 | 117 | |
90572890 | 118 | *ret_all_same_cpupid = all_same_cpupid; |
7da4d641 | 119 | return pages; |
1da177e4 LT |
120 | } |
121 | ||
4b10e7d5 MG |
122 | #ifdef CONFIG_NUMA_BALANCING |
123 | static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, | |
7d12efae | 124 | pmd_t *pmd) |
4b10e7d5 MG |
125 | { |
126 | spin_lock(&mm->page_table_lock); | |
127 | set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd)); | |
128 | spin_unlock(&mm->page_table_lock); | |
129 | } | |
130 | #else | |
131 | static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, | |
7d12efae | 132 | pmd_t *pmd) |
4b10e7d5 MG |
133 | { |
134 | BUG(); | |
1da177e4 | 135 | } |
4b10e7d5 | 136 | #endif /* CONFIG_NUMA_BALANCING */ |
1da177e4 | 137 | |
7d12efae AM |
138 | static inline unsigned long change_pmd_range(struct vm_area_struct *vma, |
139 | pud_t *pud, unsigned long addr, unsigned long end, | |
140 | pgprot_t newprot, int dirty_accountable, int prot_numa) | |
1da177e4 LT |
141 | { |
142 | pmd_t *pmd; | |
143 | unsigned long next; | |
7da4d641 | 144 | unsigned long pages = 0; |
90572890 | 145 | bool all_same_cpupid; |
1da177e4 LT |
146 | |
147 | pmd = pmd_offset(pud, addr); | |
148 | do { | |
25cbbef1 MG |
149 | unsigned long this_pages; |
150 | ||
1da177e4 | 151 | next = pmd_addr_end(addr, end); |
cd7548ab JW |
152 | if (pmd_trans_huge(*pmd)) { |
153 | if (next - addr != HPAGE_PMD_SIZE) | |
e180377f | 154 | split_huge_page_pmd(vma, addr, pmd); |
f123d74a MG |
155 | else { |
156 | int nr_ptes = change_huge_pmd(vma, pmd, addr, | |
157 | newprot, prot_numa); | |
158 | ||
159 | if (nr_ptes) { | |
160 | if (nr_ptes == HPAGE_PMD_NR) | |
161 | pages++; | |
162 | ||
163 | continue; | |
164 | } | |
7da4d641 | 165 | } |
cd7548ab JW |
166 | /* fall through */ |
167 | } | |
1da177e4 LT |
168 | if (pmd_none_or_clear_bad(pmd)) |
169 | continue; | |
25cbbef1 | 170 | this_pages = change_pte_range(vma, pmd, addr, next, newprot, |
90572890 | 171 | dirty_accountable, prot_numa, &all_same_cpupid); |
25cbbef1 | 172 | pages += this_pages; |
4b10e7d5 | 173 | |
9532fec1 MG |
174 | /* |
175 | * If we are changing protections for NUMA hinting faults then | |
176 | * set pmd_numa if the examined pages were all on the same | |
177 | * node. This allows a regular PMD to be handled as one fault | |
178 | * and effectively batches the taking of the PTL | |
179 | */ | |
90572890 | 180 | if (prot_numa && this_pages && all_same_cpupid) |
4b10e7d5 | 181 | change_pmd_protnuma(vma->vm_mm, addr, pmd); |
1da177e4 | 182 | } while (pmd++, addr = next, addr != end); |
7da4d641 PZ |
183 | |
184 | return pages; | |
1da177e4 LT |
185 | } |
186 | ||
7d12efae AM |
187 | static inline unsigned long change_pud_range(struct vm_area_struct *vma, |
188 | pgd_t *pgd, unsigned long addr, unsigned long end, | |
189 | pgprot_t newprot, int dirty_accountable, int prot_numa) | |
1da177e4 LT |
190 | { |
191 | pud_t *pud; | |
192 | unsigned long next; | |
7da4d641 | 193 | unsigned long pages = 0; |
1da177e4 LT |
194 | |
195 | pud = pud_offset(pgd, addr); | |
196 | do { | |
197 | next = pud_addr_end(addr, end); | |
198 | if (pud_none_or_clear_bad(pud)) | |
199 | continue; | |
7da4d641 | 200 | pages += change_pmd_range(vma, pud, addr, next, newprot, |
4b10e7d5 | 201 | dirty_accountable, prot_numa); |
1da177e4 | 202 | } while (pud++, addr = next, addr != end); |
7da4d641 PZ |
203 | |
204 | return pages; | |
1da177e4 LT |
205 | } |
206 | ||
7da4d641 | 207 | static unsigned long change_protection_range(struct vm_area_struct *vma, |
c1e6098b | 208 | unsigned long addr, unsigned long end, pgprot_t newprot, |
4b10e7d5 | 209 | int dirty_accountable, int prot_numa) |
1da177e4 LT |
210 | { |
211 | struct mm_struct *mm = vma->vm_mm; | |
212 | pgd_t *pgd; | |
213 | unsigned long next; | |
214 | unsigned long start = addr; | |
7da4d641 | 215 | unsigned long pages = 0; |
1da177e4 LT |
216 | |
217 | BUG_ON(addr >= end); | |
218 | pgd = pgd_offset(mm, addr); | |
219 | flush_cache_range(vma, addr, end); | |
1da177e4 LT |
220 | do { |
221 | next = pgd_addr_end(addr, end); | |
222 | if (pgd_none_or_clear_bad(pgd)) | |
223 | continue; | |
7da4d641 | 224 | pages += change_pud_range(vma, pgd, addr, next, newprot, |
4b10e7d5 | 225 | dirty_accountable, prot_numa); |
1da177e4 | 226 | } while (pgd++, addr = next, addr != end); |
7da4d641 | 227 | |
1233d588 IM |
228 | /* Only flush the TLB if we actually modified any entries: */ |
229 | if (pages) | |
230 | flush_tlb_range(vma, start, end); | |
7da4d641 PZ |
231 | |
232 | return pages; | |
233 | } | |
234 | ||
235 | unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, | |
236 | unsigned long end, pgprot_t newprot, | |
4b10e7d5 | 237 | int dirty_accountable, int prot_numa) |
7da4d641 PZ |
238 | { |
239 | struct mm_struct *mm = vma->vm_mm; | |
240 | unsigned long pages; | |
241 | ||
242 | mmu_notifier_invalidate_range_start(mm, start, end); | |
243 | if (is_vm_hugetlb_page(vma)) | |
244 | pages = hugetlb_change_protection(vma, start, end, newprot); | |
245 | else | |
4b10e7d5 | 246 | pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); |
7da4d641 PZ |
247 | mmu_notifier_invalidate_range_end(mm, start, end); |
248 | ||
249 | return pages; | |
1da177e4 LT |
250 | } |
251 | ||
b6a2fea3 | 252 | int |
1da177e4 LT |
253 | mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, |
254 | unsigned long start, unsigned long end, unsigned long newflags) | |
255 | { | |
256 | struct mm_struct *mm = vma->vm_mm; | |
257 | unsigned long oldflags = vma->vm_flags; | |
258 | long nrpages = (end - start) >> PAGE_SHIFT; | |
259 | unsigned long charged = 0; | |
1da177e4 LT |
260 | pgoff_t pgoff; |
261 | int error; | |
c1e6098b | 262 | int dirty_accountable = 0; |
1da177e4 LT |
263 | |
264 | if (newflags == oldflags) { | |
265 | *pprev = vma; | |
266 | return 0; | |
267 | } | |
268 | ||
269 | /* | |
270 | * If we make a private mapping writable we increase our commit; | |
271 | * but (without finer accounting) cannot reduce our commit if we | |
5a6fe125 MG |
272 | * make it unwritable again. hugetlb mapping were accounted for |
273 | * even if read-only so there is no need to account for them here | |
1da177e4 LT |
274 | */ |
275 | if (newflags & VM_WRITE) { | |
5a6fe125 | 276 | if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| |
cdfd4325 | 277 | VM_SHARED|VM_NORESERVE))) { |
1da177e4 | 278 | charged = nrpages; |
191c5424 | 279 | if (security_vm_enough_memory_mm(mm, charged)) |
1da177e4 LT |
280 | return -ENOMEM; |
281 | newflags |= VM_ACCOUNT; | |
282 | } | |
283 | } | |
284 | ||
1da177e4 LT |
285 | /* |
286 | * First try to merge with previous and/or next vma. | |
287 | */ | |
288 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | |
289 | *pprev = vma_merge(mm, *pprev, start, end, newflags, | |
290 | vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); | |
291 | if (*pprev) { | |
292 | vma = *pprev; | |
293 | goto success; | |
294 | } | |
295 | ||
296 | *pprev = vma; | |
297 | ||
298 | if (start != vma->vm_start) { | |
299 | error = split_vma(mm, vma, start, 1); | |
300 | if (error) | |
301 | goto fail; | |
302 | } | |
303 | ||
304 | if (end != vma->vm_end) { | |
305 | error = split_vma(mm, vma, end, 0); | |
306 | if (error) | |
307 | goto fail; | |
308 | } | |
309 | ||
310 | success: | |
311 | /* | |
312 | * vm_flags and vm_page_prot are protected by the mmap_sem | |
313 | * held in write mode. | |
314 | */ | |
315 | vma->vm_flags = newflags; | |
1c12c4cf VP |
316 | vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, |
317 | vm_get_page_prot(newflags)); | |
318 | ||
c1e6098b | 319 | if (vma_wants_writenotify(vma)) { |
1ddd439e | 320 | vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); |
c1e6098b PZ |
321 | dirty_accountable = 1; |
322 | } | |
d08b3851 | 323 | |
7d12efae AM |
324 | change_protection(vma, start, end, vma->vm_page_prot, |
325 | dirty_accountable, 0); | |
7da4d641 | 326 | |
ab50b8ed HD |
327 | vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); |
328 | vm_stat_account(mm, newflags, vma->vm_file, nrpages); | |
63bfd738 | 329 | perf_event_mmap(vma); |
1da177e4 LT |
330 | return 0; |
331 | ||
332 | fail: | |
333 | vm_unacct_memory(charged); | |
334 | return error; | |
335 | } | |
336 | ||
6a6160a7 HC |
337 | SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, |
338 | unsigned long, prot) | |
1da177e4 LT |
339 | { |
340 | unsigned long vm_flags, nstart, end, tmp, reqprot; | |
341 | struct vm_area_struct *vma, *prev; | |
342 | int error = -EINVAL; | |
343 | const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); | |
344 | prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); | |
345 | if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ | |
346 | return -EINVAL; | |
347 | ||
348 | if (start & ~PAGE_MASK) | |
349 | return -EINVAL; | |
350 | if (!len) | |
351 | return 0; | |
352 | len = PAGE_ALIGN(len); | |
353 | end = start + len; | |
354 | if (end <= start) | |
355 | return -ENOMEM; | |
b845f313 | 356 | if (!arch_validate_prot(prot)) |
1da177e4 LT |
357 | return -EINVAL; |
358 | ||
359 | reqprot = prot; | |
360 | /* | |
361 | * Does the application expect PROT_READ to imply PROT_EXEC: | |
362 | */ | |
b344e05c | 363 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
1da177e4 LT |
364 | prot |= PROT_EXEC; |
365 | ||
366 | vm_flags = calc_vm_prot_bits(prot); | |
367 | ||
368 | down_write(¤t->mm->mmap_sem); | |
369 | ||
097d5910 | 370 | vma = find_vma(current->mm, start); |
1da177e4 LT |
371 | error = -ENOMEM; |
372 | if (!vma) | |
373 | goto out; | |
097d5910 | 374 | prev = vma->vm_prev; |
1da177e4 LT |
375 | if (unlikely(grows & PROT_GROWSDOWN)) { |
376 | if (vma->vm_start >= end) | |
377 | goto out; | |
378 | start = vma->vm_start; | |
379 | error = -EINVAL; | |
380 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
381 | goto out; | |
7d12efae | 382 | } else { |
1da177e4 LT |
383 | if (vma->vm_start > start) |
384 | goto out; | |
385 | if (unlikely(grows & PROT_GROWSUP)) { | |
386 | end = vma->vm_end; | |
387 | error = -EINVAL; | |
388 | if (!(vma->vm_flags & VM_GROWSUP)) | |
389 | goto out; | |
390 | } | |
391 | } | |
392 | if (start > vma->vm_start) | |
393 | prev = vma; | |
394 | ||
395 | for (nstart = start ; ; ) { | |
396 | unsigned long newflags; | |
397 | ||
7d12efae | 398 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
1da177e4 | 399 | |
7d12efae AM |
400 | newflags = vm_flags; |
401 | newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); | |
1da177e4 | 402 | |
7e2cff42 PBG |
403 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ |
404 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { | |
1da177e4 LT |
405 | error = -EACCES; |
406 | goto out; | |
407 | } | |
408 | ||
409 | error = security_file_mprotect(vma, reqprot, prot); | |
410 | if (error) | |
411 | goto out; | |
412 | ||
413 | tmp = vma->vm_end; | |
414 | if (tmp > end) | |
415 | tmp = end; | |
416 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); | |
417 | if (error) | |
418 | goto out; | |
419 | nstart = tmp; | |
420 | ||
421 | if (nstart < prev->vm_end) | |
422 | nstart = prev->vm_end; | |
423 | if (nstart >= end) | |
424 | goto out; | |
425 | ||
426 | vma = prev->vm_next; | |
427 | if (!vma || vma->vm_start != nstart) { | |
428 | error = -ENOMEM; | |
429 | goto out; | |
430 | } | |
431 | } | |
432 | out: | |
433 | up_write(¤t->mm->mmap_sem); | |
434 | return error; | |
435 | } |