]>
git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - mm/mlock.c
4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
8 #include <linux/capability.h>
9 #include <linux/mman.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/syscalls.h>
16 #include <linux/sched.h>
17 #include <linux/export.h>
18 #include <linux/rmap.h>
19 #include <linux/mmzone.h>
20 #include <linux/hugetlb.h>
24 int can_do_mlock(void)
26 if (capable(CAP_IPC_LOCK
))
28 if (rlimit(RLIMIT_MEMLOCK
) != 0)
32 EXPORT_SYMBOL(can_do_mlock
);
35 * Mlocked pages are marked with PageMlocked() flag for efficient testing
36 * in vmscan and, possibly, the fault path; and to support semi-accurate
39 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
40 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
41 * The unevictable list is an LRU sibling list to the [in]active lists.
42 * PageUnevictable is set to indicate the unevictable state.
44 * When lazy mlocking via vmscan, it is important to ensure that the
45 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
46 * may have mlocked a page that is being munlocked. So lazy mlock must take
47 * the mmap_sem for read, and verify that the vma really is locked
52 * LRU accounting for clear_page_mlock()
54 void clear_page_mlock(struct page
*page
)
56 if (!TestClearPageMlocked(page
))
59 mod_zone_page_state(page_zone(page
), NR_MLOCK
,
60 -hpage_nr_pages(page
));
61 count_vm_event(UNEVICTABLE_PGCLEARED
);
62 if (!isolate_lru_page(page
)) {
63 putback_lru_page(page
);
66 * We lost the race. the page already moved to evictable list.
68 if (PageUnevictable(page
))
69 count_vm_event(UNEVICTABLE_PGSTRANDED
);
74 * Mark page as mlocked if not already.
75 * If page on LRU, isolate and putback to move to unevictable list.
77 void mlock_vma_page(struct page
*page
)
79 BUG_ON(!PageLocked(page
));
81 if (!TestSetPageMlocked(page
)) {
82 mod_zone_page_state(page_zone(page
), NR_MLOCK
,
83 hpage_nr_pages(page
));
84 count_vm_event(UNEVICTABLE_PGMLOCKED
);
85 if (!isolate_lru_page(page
))
86 putback_lru_page(page
);
91 * munlock_vma_page - munlock a vma page
92 * @page - page to be unlocked
94 * called from munlock()/munmap() path with page supposedly on the LRU.
95 * When we munlock a page, because the vma where we found the page is being
96 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
97 * page locked so that we can leave it on the unevictable lru list and not
98 * bother vmscan with it. However, to walk the page's rmap list in
99 * try_to_munlock() we must isolate the page from the LRU. If some other
100 * task has removed the page from the LRU, we won't be able to do that.
101 * So we clear the PageMlocked as we might not get another chance. If we
102 * can't isolate the page, we leave it for putback_lru_page() and vmscan
103 * [page_referenced()/try_to_unmap()] to deal with.
105 void munlock_vma_page(struct page
*page
)
107 BUG_ON(!PageLocked(page
));
109 if (TestClearPageMlocked(page
)) {
110 mod_zone_page_state(page_zone(page
), NR_MLOCK
,
111 -hpage_nr_pages(page
));
112 if (!isolate_lru_page(page
)) {
113 int ret
= SWAP_AGAIN
;
116 * Optimization: if the page was mapped just once,
117 * that's our mapping and we don't need to check all the
120 if (page_mapcount(page
) > 1)
121 ret
= try_to_munlock(page
);
123 * did try_to_unlock() succeed or punt?
125 if (ret
!= SWAP_MLOCK
)
126 count_vm_event(UNEVICTABLE_PGMUNLOCKED
);
128 putback_lru_page(page
);
131 * Some other task has removed the page from the LRU.
132 * putback_lru_page() will take care of removing the
133 * page from the unevictable list, if necessary.
134 * vmscan [page_referenced()] will move the page back
135 * to the unevictable list if some other vma has it
138 if (PageUnevictable(page
))
139 count_vm_event(UNEVICTABLE_PGSTRANDED
);
141 count_vm_event(UNEVICTABLE_PGMUNLOCKED
);
147 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
149 * @start: start address
152 * This takes care of making the pages present too.
154 * return 0 on success, negative error code on error.
156 * vma->vm_mm->mmap_sem must be held for at least read.
158 long __mlock_vma_pages_range(struct vm_area_struct
*vma
,
159 unsigned long start
, unsigned long end
, int *nonblocking
)
161 struct mm_struct
*mm
= vma
->vm_mm
;
162 unsigned long addr
= start
;
163 int nr_pages
= (end
- start
) / PAGE_SIZE
;
166 VM_BUG_ON(start
& ~PAGE_MASK
);
167 VM_BUG_ON(end
& ~PAGE_MASK
);
168 VM_BUG_ON(start
< vma
->vm_start
);
169 VM_BUG_ON(end
> vma
->vm_end
);
170 VM_BUG_ON(!rwsem_is_locked(&mm
->mmap_sem
));
172 gup_flags
= FOLL_TOUCH
| FOLL_MLOCK
;
174 * We want to touch writable mappings with a write fault in order
175 * to break COW, except for shared mappings because these don't COW
176 * and we would not want to dirty them for nothing.
178 if ((vma
->vm_flags
& (VM_WRITE
| VM_SHARED
)) == VM_WRITE
)
179 gup_flags
|= FOLL_WRITE
;
182 * We want mlock to succeed for regions that have any permissions
183 * other than PROT_NONE.
185 if (vma
->vm_flags
& (VM_READ
| VM_WRITE
| VM_EXEC
))
186 gup_flags
|= FOLL_FORCE
;
188 return __get_user_pages(current
, mm
, addr
, nr_pages
, gup_flags
,
189 NULL
, NULL
, nonblocking
);
193 * convert get_user_pages() return value to posix mlock() error
195 static int __mlock_posix_error_return(long retval
)
197 if (retval
== -EFAULT
)
199 else if (retval
== -ENOMEM
)
205 * munlock_vma_pages_range() - munlock all pages in the vma range.'
206 * @vma - vma containing range to be munlock()ed.
207 * @start - start address in @vma of the range
208 * @end - end of range in @vma.
210 * For mremap(), munmap() and exit().
212 * Called with @vma VM_LOCKED.
214 * Returns with VM_LOCKED cleared. Callers must be prepared to
217 * We don't save and restore VM_LOCKED here because pages are
218 * still on lru. In unmap path, pages might be scanned by reclaim
219 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
220 * free them. This will result in freeing mlocked pages.
222 void munlock_vma_pages_range(struct vm_area_struct
*vma
,
223 unsigned long start
, unsigned long end
)
228 vma
->vm_flags
&= ~VM_LOCKED
;
230 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
233 * Although FOLL_DUMP is intended for get_dump_page(),
234 * it just so happens that its special treatment of the
235 * ZERO_PAGE (returning an error instead of doing get_page)
236 * suits munlock very well (and if somehow an abnormal page
237 * has sneaked into the range, we won't oops here: great).
239 page
= follow_page(vma
, addr
, FOLL_GET
| FOLL_DUMP
);
240 if (page
&& !IS_ERR(page
)) {
242 munlock_vma_page(page
);
251 * mlock_fixup - handle mlock[all]/munlock[all] requests.
253 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
254 * munlock is a no-op. However, for some special vmas, we go ahead and
257 * For vmas that pass the filters, merge/split as appropriate.
259 static int mlock_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
260 unsigned long start
, unsigned long end
, vm_flags_t newflags
)
262 struct mm_struct
*mm
= vma
->vm_mm
;
266 int lock
= !!(newflags
& VM_LOCKED
);
268 if (newflags
== vma
->vm_flags
|| (vma
->vm_flags
& VM_SPECIAL
) ||
269 is_vm_hugetlb_page(vma
) || vma
== get_gate_vma(current
->mm
))
270 goto out
; /* don't set VM_LOCKED, don't count */
272 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
273 *prev
= vma_merge(mm
, *prev
, start
, end
, newflags
, vma
->anon_vma
,
274 vma
->vm_file
, pgoff
, vma_policy(vma
));
280 if (start
!= vma
->vm_start
) {
281 ret
= split_vma(mm
, vma
, start
, 1);
286 if (end
!= vma
->vm_end
) {
287 ret
= split_vma(mm
, vma
, end
, 0);
294 * Keep track of amount of locked VM.
296 nr_pages
= (end
- start
) >> PAGE_SHIFT
;
298 nr_pages
= -nr_pages
;
299 mm
->locked_vm
+= nr_pages
;
302 * vm_flags is protected by the mmap_sem held in write mode.
303 * It's okay if try_to_unmap_one unmaps a page just after we
304 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
308 vma
->vm_flags
= newflags
;
310 munlock_vma_pages_range(vma
, start
, end
);
317 static int do_mlock(unsigned long start
, size_t len
, int on
)
319 unsigned long nstart
, end
, tmp
;
320 struct vm_area_struct
* vma
, * prev
;
323 VM_BUG_ON(start
& ~PAGE_MASK
);
324 VM_BUG_ON(len
!= PAGE_ALIGN(len
));
330 vma
= find_vma(current
->mm
, start
);
331 if (!vma
|| vma
->vm_start
> start
)
335 if (start
> vma
->vm_start
)
338 for (nstart
= start
; ; ) {
341 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
343 newflags
= vma
->vm_flags
& ~VM_LOCKED
;
345 newflags
|= VM_LOCKED
| VM_POPULATE
;
350 error
= mlock_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
354 if (nstart
< prev
->vm_end
)
355 nstart
= prev
->vm_end
;
360 if (!vma
|| vma
->vm_start
!= nstart
) {
369 * __mm_populate - populate and/or mlock pages within a range of address space.
371 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
372 * flags. VMAs must be already marked with the desired vm_flags, and
373 * mmap_sem must not be held.
375 int __mm_populate(unsigned long start
, unsigned long len
, int ignore_errors
)
377 struct mm_struct
*mm
= current
->mm
;
378 unsigned long end
, nstart
, nend
;
379 struct vm_area_struct
*vma
= NULL
;
383 VM_BUG_ON(start
& ~PAGE_MASK
);
384 VM_BUG_ON(len
!= PAGE_ALIGN(len
));
387 for (nstart
= start
; nstart
< end
; nstart
= nend
) {
389 * We want to fault in pages for [nstart; end) address range.
390 * Find first corresponding VMA.
394 down_read(&mm
->mmap_sem
);
395 vma
= find_vma(mm
, nstart
);
396 } else if (nstart
>= vma
->vm_end
)
398 if (!vma
|| vma
->vm_start
>= end
)
401 * Set [nstart; nend) to intersection of desired address
402 * range with the first VMA. Also, skip undesirable VMA types.
404 nend
= min(end
, vma
->vm_end
);
405 if ((vma
->vm_flags
& (VM_IO
| VM_PFNMAP
| VM_POPULATE
)) !=
408 if (nstart
< vma
->vm_start
)
409 nstart
= vma
->vm_start
;
411 * Now fault in a range of pages. __mlock_vma_pages_range()
412 * double checks the vma flags, so that it won't mlock pages
413 * if the vma was already munlocked.
415 ret
= __mlock_vma_pages_range(vma
, nstart
, nend
, &locked
);
419 continue; /* continue at next VMA */
421 ret
= __mlock_posix_error_return(ret
);
424 nend
= nstart
+ ret
* PAGE_SIZE
;
428 up_read(&mm
->mmap_sem
);
429 return ret
; /* 0 or negative error code */
432 SYSCALL_DEFINE2(mlock
, unsigned long, start
, size_t, len
)
434 unsigned long locked
;
435 unsigned long lock_limit
;
441 lru_add_drain_all(); /* flush pagevec */
443 down_write(¤t
->mm
->mmap_sem
);
444 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
447 locked
= len
>> PAGE_SHIFT
;
448 locked
+= current
->mm
->locked_vm
;
450 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
451 lock_limit
>>= PAGE_SHIFT
;
453 /* check against resource limits */
454 if ((locked
<= lock_limit
) || capable(CAP_IPC_LOCK
))
455 error
= do_mlock(start
, len
, 1);
456 up_write(¤t
->mm
->mmap_sem
);
458 error
= __mm_populate(start
, len
, 0);
462 SYSCALL_DEFINE2(munlock
, unsigned long, start
, size_t, len
)
466 down_write(¤t
->mm
->mmap_sem
);
467 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
469 ret
= do_mlock(start
, len
, 0);
470 up_write(¤t
->mm
->mmap_sem
);
474 static int do_mlockall(int flags
)
476 struct vm_area_struct
* vma
, * prev
= NULL
;
478 if (flags
& MCL_FUTURE
)
479 current
->mm
->def_flags
|= VM_LOCKED
| VM_POPULATE
;
481 current
->mm
->def_flags
&= ~(VM_LOCKED
| VM_POPULATE
);
482 if (flags
== MCL_FUTURE
)
485 for (vma
= current
->mm
->mmap
; vma
; vma
= prev
->vm_next
) {
488 newflags
= vma
->vm_flags
& ~VM_LOCKED
;
489 if (flags
& MCL_CURRENT
)
490 newflags
|= VM_LOCKED
| VM_POPULATE
;
493 mlock_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
, newflags
);
499 SYSCALL_DEFINE1(mlockall
, int, flags
)
501 unsigned long lock_limit
;
504 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
511 if (flags
& MCL_CURRENT
)
512 lru_add_drain_all(); /* flush pagevec */
514 down_write(¤t
->mm
->mmap_sem
);
516 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
517 lock_limit
>>= PAGE_SHIFT
;
520 if (!(flags
& MCL_CURRENT
) || (current
->mm
->total_vm
<= lock_limit
) ||
521 capable(CAP_IPC_LOCK
))
522 ret
= do_mlockall(flags
);
523 up_write(¤t
->mm
->mmap_sem
);
524 if (!ret
&& (flags
& MCL_CURRENT
))
525 mm_populate(0, TASK_SIZE
);
530 SYSCALL_DEFINE0(munlockall
)
534 down_write(¤t
->mm
->mmap_sem
);
535 ret
= do_mlockall(0);
536 up_write(¤t
->mm
->mmap_sem
);
541 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
542 * shm segments) get accounted against the user_struct instead.
544 static DEFINE_SPINLOCK(shmlock_user_lock
);
546 int user_shm_lock(size_t size
, struct user_struct
*user
)
548 unsigned long lock_limit
, locked
;
551 locked
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
552 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
553 if (lock_limit
== RLIM_INFINITY
)
555 lock_limit
>>= PAGE_SHIFT
;
556 spin_lock(&shmlock_user_lock
);
558 locked
+ user
->locked_shm
> lock_limit
&& !capable(CAP_IPC_LOCK
))
561 user
->locked_shm
+= locked
;
564 spin_unlock(&shmlock_user_lock
);
568 void user_shm_unlock(size_t size
, struct user_struct
*user
)
570 spin_lock(&shmlock_user_lock
);
571 user
->locked_shm
-= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
572 spin_unlock(&shmlock_user_lock
);