]>
git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - mm/mlock.c
1 // SPDX-License-Identifier: GPL-2.0
5 * (C) Copyright 1995 Linus Torvalds
6 * (C) Copyright 2002 Christoph Hellwig
9 #include <linux/capability.h>
10 #include <linux/mman.h>
12 #include <linux/sched/user.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mempolicy.h>
18 #include <linux/syscalls.h>
19 #include <linux/sched.h>
20 #include <linux/export.h>
21 #include <linux/rmap.h>
22 #include <linux/mmzone.h>
23 #include <linux/hugetlb.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm_inline.h>
29 bool can_do_mlock(void)
31 if (rlimit(RLIMIT_MEMLOCK
) != 0)
33 if (capable(CAP_IPC_LOCK
))
37 EXPORT_SYMBOL(can_do_mlock
);
40 * Mlocked pages are marked with PageMlocked() flag for efficient testing
41 * in vmscan and, possibly, the fault path; and to support semi-accurate
44 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
45 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
46 * The unevictable list is an LRU sibling list to the [in]active lists.
47 * PageUnevictable is set to indicate the unevictable state.
49 * When lazy mlocking via vmscan, it is important to ensure that the
50 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
51 * may have mlocked a page that is being munlocked. So lazy mlock must take
52 * the mmap_lock for read, and verify that the vma really is locked
57 * LRU accounting for clear_page_mlock()
59 void clear_page_mlock(struct page
*page
)
63 if (!TestClearPageMlocked(page
))
66 nr_pages
= thp_nr_pages(page
);
67 mod_zone_page_state(page_zone(page
), NR_MLOCK
, -nr_pages
);
68 count_vm_events(UNEVICTABLE_PGCLEARED
, nr_pages
);
70 * The previous TestClearPageMlocked() corresponds to the smp_mb()
71 * in __pagevec_lru_add_fn().
73 * See __pagevec_lru_add_fn for more explanation.
75 if (!isolate_lru_page(page
)) {
76 putback_lru_page(page
);
79 * We lost the race. the page already moved to evictable list.
81 if (PageUnevictable(page
))
82 count_vm_events(UNEVICTABLE_PGSTRANDED
, nr_pages
);
87 * Mark page as mlocked if not already.
88 * If page on LRU, isolate and putback to move to unevictable list.
90 void mlock_vma_page(struct page
*page
)
92 /* Serialize with page migration */
93 BUG_ON(!PageLocked(page
));
95 VM_BUG_ON_PAGE(PageTail(page
), page
);
96 VM_BUG_ON_PAGE(PageCompound(page
) && PageDoubleMap(page
), page
);
98 if (!TestSetPageMlocked(page
)) {
99 int nr_pages
= thp_nr_pages(page
);
101 mod_zone_page_state(page_zone(page
), NR_MLOCK
, nr_pages
);
102 count_vm_events(UNEVICTABLE_PGMLOCKED
, nr_pages
);
103 if (!isolate_lru_page(page
))
104 putback_lru_page(page
);
109 * Isolate a page from LRU with optional get_page() pin.
110 * Assumes lru_lock already held and page already pinned.
112 static bool __munlock_isolate_lru_page(struct page
*page
, bool getpage
)
115 struct lruvec
*lruvec
;
117 lruvec
= mem_cgroup_page_lruvec(page
, page_pgdat(page
));
121 del_page_from_lru_list(page
, lruvec
, page_lru(page
));
129 * Finish munlock after successful page isolation
131 * Page must be locked. This is a wrapper for try_to_munlock()
132 * and putback_lru_page() with munlock accounting.
134 static void __munlock_isolated_page(struct page
*page
)
137 * Optimization: if the page was mapped just once, that's our mapping
138 * and we don't need to check all the other vmas.
140 if (page_mapcount(page
) > 1)
141 try_to_munlock(page
);
143 /* Did try_to_unlock() succeed or punt? */
144 if (!PageMlocked(page
))
145 count_vm_events(UNEVICTABLE_PGMUNLOCKED
, thp_nr_pages(page
));
147 putback_lru_page(page
);
151 * Accounting for page isolation fail during munlock
153 * Performs accounting when page isolation fails in munlock. There is nothing
154 * else to do because it means some other task has already removed the page
155 * from the LRU. putback_lru_page() will take care of removing the page from
156 * the unevictable list, if necessary. vmscan [page_referenced()] will move
157 * the page back to the unevictable list if some other vma has it mlocked.
159 static void __munlock_isolation_failed(struct page
*page
)
161 int nr_pages
= thp_nr_pages(page
);
163 if (PageUnevictable(page
))
164 __count_vm_events(UNEVICTABLE_PGSTRANDED
, nr_pages
);
166 __count_vm_events(UNEVICTABLE_PGMUNLOCKED
, nr_pages
);
170 * munlock_vma_page - munlock a vma page
171 * @page: page to be unlocked, either a normal page or THP page head
173 * returns the size of the page as a page mask (0 for normal page,
174 * HPAGE_PMD_NR - 1 for THP head page)
176 * called from munlock()/munmap() path with page supposedly on the LRU.
177 * When we munlock a page, because the vma where we found the page is being
178 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
179 * page locked so that we can leave it on the unevictable lru list and not
180 * bother vmscan with it. However, to walk the page's rmap list in
181 * try_to_munlock() we must isolate the page from the LRU. If some other
182 * task has removed the page from the LRU, we won't be able to do that.
183 * So we clear the PageMlocked as we might not get another chance. If we
184 * can't isolate the page, we leave it for putback_lru_page() and vmscan
185 * [page_referenced()/try_to_unmap()] to deal with.
187 unsigned int munlock_vma_page(struct page
*page
)
190 pg_data_t
*pgdat
= page_pgdat(page
);
192 /* For try_to_munlock() and to serialize with page migration */
193 BUG_ON(!PageLocked(page
));
195 VM_BUG_ON_PAGE(PageTail(page
), page
);
198 * Serialize with any parallel __split_huge_page_refcount() which
199 * might otherwise copy PageMlocked to part of the tail pages before
200 * we clear it in the head page. It also stabilizes thp_nr_pages().
202 spin_lock_irq(&pgdat
->lru_lock
);
204 if (!TestClearPageMlocked(page
)) {
205 /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
210 nr_pages
= thp_nr_pages(page
);
211 __mod_zone_page_state(page_zone(page
), NR_MLOCK
, -nr_pages
);
213 if (__munlock_isolate_lru_page(page
, true)) {
214 spin_unlock_irq(&pgdat
->lru_lock
);
215 __munlock_isolated_page(page
);
218 __munlock_isolation_failed(page
);
221 spin_unlock_irq(&pgdat
->lru_lock
);
228 * convert get_user_pages() return value to posix mlock() error
230 static int __mlock_posix_error_return(long retval
)
232 if (retval
== -EFAULT
)
234 else if (retval
== -ENOMEM
)
240 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
242 * The fast path is available only for evictable pages with single mapping.
243 * Then we can bypass the per-cpu pvec and get better performance.
244 * when mapcount > 1 we need try_to_munlock() which can fail.
245 * when !page_evictable(), we need the full redo logic of putback_lru_page to
246 * avoid leaving evictable page in unevictable list.
248 * In case of success, @page is added to @pvec and @pgrescued is incremented
249 * in case that the page was previously unevictable. @page is also unlocked.
251 static bool __putback_lru_fast_prepare(struct page
*page
, struct pagevec
*pvec
,
254 VM_BUG_ON_PAGE(PageLRU(page
), page
);
255 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
257 if (page_mapcount(page
) <= 1 && page_evictable(page
)) {
258 pagevec_add(pvec
, page
);
259 if (TestClearPageUnevictable(page
))
269 * Putback multiple evictable pages to the LRU
271 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
272 * the pages might have meanwhile become unevictable but that is OK.
274 static void __putback_lru_fast(struct pagevec
*pvec
, int pgrescued
)
276 count_vm_events(UNEVICTABLE_PGMUNLOCKED
, pagevec_count(pvec
));
278 *__pagevec_lru_add() calls release_pages() so we don't call
279 * put_page() explicitly
281 __pagevec_lru_add(pvec
);
282 count_vm_events(UNEVICTABLE_PGRESCUED
, pgrescued
);
286 * Munlock a batch of pages from the same zone
288 * The work is split to two main phases. First phase clears the Mlocked flag
289 * and attempts to isolate the pages, all under a single zone lru lock.
290 * The second phase finishes the munlock only for pages where isolation
293 * Note that the pagevec may be modified during the process.
295 static void __munlock_pagevec(struct pagevec
*pvec
, struct zone
*zone
)
298 int nr
= pagevec_count(pvec
);
299 int delta_munlocked
= -nr
;
300 struct pagevec pvec_putback
;
303 pagevec_init(&pvec_putback
);
305 /* Phase 1: page isolation */
306 spin_lock_irq(&zone
->zone_pgdat
->lru_lock
);
307 for (i
= 0; i
< nr
; i
++) {
308 struct page
*page
= pvec
->pages
[i
];
310 if (TestClearPageMlocked(page
)) {
312 * We already have pin from follow_page_mask()
313 * so we can spare the get_page() here.
315 if (__munlock_isolate_lru_page(page
, false))
318 __munlock_isolation_failed(page
);
324 * We won't be munlocking this page in the next phase
325 * but we still need to release the follow_page_mask()
326 * pin. We cannot do it under lru_lock however. If it's
327 * the last pin, __page_cache_release() would deadlock.
329 pagevec_add(&pvec_putback
, pvec
->pages
[i
]);
330 pvec
->pages
[i
] = NULL
;
332 __mod_zone_page_state(zone
, NR_MLOCK
, delta_munlocked
);
333 spin_unlock_irq(&zone
->zone_pgdat
->lru_lock
);
335 /* Now we can release pins of pages that we are not munlocking */
336 pagevec_release(&pvec_putback
);
338 /* Phase 2: page munlock */
339 for (i
= 0; i
< nr
; i
++) {
340 struct page
*page
= pvec
->pages
[i
];
344 if (!__putback_lru_fast_prepare(page
, &pvec_putback
,
347 * Slow path. We don't want to lose the last
348 * pin before unlock_page()
350 get_page(page
); /* for putback_lru_page() */
351 __munlock_isolated_page(page
);
353 put_page(page
); /* from follow_page_mask() */
359 * Phase 3: page putback for pages that qualified for the fast path
360 * This will also call put_page() to return pin from follow_page_mask()
362 if (pagevec_count(&pvec_putback
))
363 __putback_lru_fast(&pvec_putback
, pgrescued
);
367 * Fill up pagevec for __munlock_pagevec using pte walk
369 * The function expects that the struct page corresponding to @start address is
370 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
372 * The rest of @pvec is filled by subsequent pages within the same pmd and same
373 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
374 * pages also get pinned.
376 * Returns the address of the next page that should be scanned. This equals
377 * @start + PAGE_SIZE when no page could be added by the pte walk.
379 static unsigned long __munlock_pagevec_fill(struct pagevec
*pvec
,
380 struct vm_area_struct
*vma
, struct zone
*zone
,
381 unsigned long start
, unsigned long end
)
387 * Initialize pte walk starting at the already pinned page where we
388 * are sure that there is a pte, as it was pinned under the same
389 * mmap_lock write op.
391 pte
= get_locked_pte(vma
->vm_mm
, start
, &ptl
);
392 /* Make sure we do not cross the page table boundary */
393 end
= pgd_addr_end(start
, end
);
394 end
= p4d_addr_end(start
, end
);
395 end
= pud_addr_end(start
, end
);
396 end
= pmd_addr_end(start
, end
);
398 /* The page next to the pinned page is the first we will try to get */
400 while (start
< end
) {
401 struct page
*page
= NULL
;
403 if (pte_present(*pte
))
404 page
= vm_normal_page(vma
, start
, *pte
);
406 * Break if page could not be obtained or the page's node+zone does not
409 if (!page
|| page_zone(page
) != zone
)
413 * Do not use pagevec for PTE-mapped THP,
414 * munlock_vma_pages_range() will handle them.
416 if (PageTransCompound(page
))
421 * Increase the address that will be returned *before* the
422 * eventual break due to pvec becoming full by adding the page
425 if (pagevec_add(pvec
, page
) == 0)
428 pte_unmap_unlock(pte
, ptl
);
433 * munlock_vma_pages_range() - munlock all pages in the vma range.'
434 * @vma - vma containing range to be munlock()ed.
435 * @start - start address in @vma of the range
436 * @end - end of range in @vma.
438 * For mremap(), munmap() and exit().
440 * Called with @vma VM_LOCKED.
442 * Returns with VM_LOCKED cleared. Callers must be prepared to
445 * We don't save and restore VM_LOCKED here because pages are
446 * still on lru. In unmap path, pages might be scanned by reclaim
447 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
448 * free them. This will result in freeing mlocked pages.
450 void munlock_vma_pages_range(struct vm_area_struct
*vma
,
451 unsigned long start
, unsigned long end
)
453 vma
->vm_flags
&= VM_LOCKED_CLEAR_MASK
;
455 while (start
< end
) {
457 unsigned int page_mask
= 0;
458 unsigned long page_increm
;
464 * Although FOLL_DUMP is intended for get_dump_page(),
465 * it just so happens that its special treatment of the
466 * ZERO_PAGE (returning an error instead of doing get_page)
467 * suits munlock very well (and if somehow an abnormal page
468 * has sneaked into the range, we won't oops here: great).
470 page
= follow_page(vma
, start
, FOLL_GET
| FOLL_DUMP
);
472 if (page
&& !IS_ERR(page
)) {
473 if (PageTransTail(page
)) {
474 VM_BUG_ON_PAGE(PageMlocked(page
), page
);
475 put_page(page
); /* follow_page_mask() */
476 } else if (PageTransHuge(page
)) {
479 * Any THP page found by follow_page_mask() may
480 * have gotten split before reaching
481 * munlock_vma_page(), so we need to compute
482 * the page_mask here instead.
484 page_mask
= munlock_vma_page(page
);
486 put_page(page
); /* follow_page_mask() */
489 * Non-huge pages are handled in batches via
490 * pagevec. The pin from follow_page_mask()
491 * prevents them from collapsing by THP.
493 pagevec_add(&pvec
, page
);
494 zone
= page_zone(page
);
497 * Try to fill the rest of pagevec using fast
498 * pte walk. This will also update start to
499 * the next page to process. Then munlock the
502 start
= __munlock_pagevec_fill(&pvec
, vma
,
504 __munlock_pagevec(&pvec
, zone
);
508 page_increm
= 1 + page_mask
;
509 start
+= page_increm
* PAGE_SIZE
;
516 * mlock_fixup - handle mlock[all]/munlock[all] requests.
518 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
519 * munlock is a no-op. However, for some special vmas, we go ahead and
522 * For vmas that pass the filters, merge/split as appropriate.
524 static int mlock_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
525 unsigned long start
, unsigned long end
, vm_flags_t newflags
)
527 struct mm_struct
*mm
= vma
->vm_mm
;
531 int lock
= !!(newflags
& VM_LOCKED
);
532 vm_flags_t old_flags
= vma
->vm_flags
;
534 if (newflags
== vma
->vm_flags
|| (vma
->vm_flags
& VM_SPECIAL
) ||
535 is_vm_hugetlb_page(vma
) || vma
== get_gate_vma(current
->mm
) ||
537 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
540 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
541 *prev
= vma_merge(mm
, *prev
, start
, end
, newflags
, vma
->anon_vma
,
542 vma
->vm_file
, pgoff
, vma_policy(vma
),
543 vma
->vm_userfaultfd_ctx
);
549 if (start
!= vma
->vm_start
) {
550 ret
= split_vma(mm
, vma
, start
, 1);
555 if (end
!= vma
->vm_end
) {
556 ret
= split_vma(mm
, vma
, end
, 0);
563 * Keep track of amount of locked VM.
565 nr_pages
= (end
- start
) >> PAGE_SHIFT
;
567 nr_pages
= -nr_pages
;
568 else if (old_flags
& VM_LOCKED
)
570 mm
->locked_vm
+= nr_pages
;
573 * vm_flags is protected by the mmap_lock held in write mode.
574 * It's okay if try_to_unmap_one unmaps a page just after we
575 * set VM_LOCKED, populate_vma_page_range will bring it back.
579 vma
->vm_flags
= newflags
;
581 munlock_vma_pages_range(vma
, start
, end
);
588 static int apply_vma_lock_flags(unsigned long start
, size_t len
,
591 unsigned long nstart
, end
, tmp
;
592 struct vm_area_struct
* vma
, * prev
;
595 VM_BUG_ON(offset_in_page(start
));
596 VM_BUG_ON(len
!= PAGE_ALIGN(len
));
602 vma
= find_vma(current
->mm
, start
);
603 if (!vma
|| vma
->vm_start
> start
)
607 if (start
> vma
->vm_start
)
610 for (nstart
= start
; ; ) {
611 vm_flags_t newflags
= vma
->vm_flags
& VM_LOCKED_CLEAR_MASK
;
615 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
619 error
= mlock_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
623 if (nstart
< prev
->vm_end
)
624 nstart
= prev
->vm_end
;
629 if (!vma
|| vma
->vm_start
!= nstart
) {
638 * Go through vma areas and sum size of mlocked
639 * vma pages, as return value.
640 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
642 * Return value: previously mlocked page counts
644 static unsigned long count_mm_mlocked_page_nr(struct mm_struct
*mm
,
645 unsigned long start
, size_t len
)
647 struct vm_area_struct
*vma
;
648 unsigned long count
= 0;
653 vma
= find_vma(mm
, start
);
657 for (; vma
; vma
= vma
->vm_next
) {
658 if (start
>= vma
->vm_end
)
660 if (start
+ len
<= vma
->vm_start
)
662 if (vma
->vm_flags
& VM_LOCKED
) {
663 if (start
> vma
->vm_start
)
664 count
-= (start
- vma
->vm_start
);
665 if (start
+ len
< vma
->vm_end
) {
666 count
+= start
+ len
- vma
->vm_start
;
669 count
+= vma
->vm_end
- vma
->vm_start
;
673 return count
>> PAGE_SHIFT
;
676 static __must_check
int do_mlock(unsigned long start
, size_t len
, vm_flags_t flags
)
678 unsigned long locked
;
679 unsigned long lock_limit
;
682 start
= untagged_addr(start
);
687 len
= PAGE_ALIGN(len
+ (offset_in_page(start
)));
690 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
691 lock_limit
>>= PAGE_SHIFT
;
692 locked
= len
>> PAGE_SHIFT
;
694 if (mmap_write_lock_killable(current
->mm
))
697 locked
+= current
->mm
->locked_vm
;
698 if ((locked
> lock_limit
) && (!capable(CAP_IPC_LOCK
))) {
700 * It is possible that the regions requested intersect with
701 * previously mlocked areas, that part area in "mm->locked_vm"
702 * should not be counted to new mlock increment count. So check
703 * and adjust locked count if necessary.
705 locked
-= count_mm_mlocked_page_nr(current
->mm
,
709 /* check against resource limits */
710 if ((locked
<= lock_limit
) || capable(CAP_IPC_LOCK
))
711 error
= apply_vma_lock_flags(start
, len
, flags
);
713 mmap_write_unlock(current
->mm
);
717 error
= __mm_populate(start
, len
, 0);
719 return __mlock_posix_error_return(error
);
723 SYSCALL_DEFINE2(mlock
, unsigned long, start
, size_t, len
)
725 return do_mlock(start
, len
, VM_LOCKED
);
728 SYSCALL_DEFINE3(mlock2
, unsigned long, start
, size_t, len
, int, flags
)
730 vm_flags_t vm_flags
= VM_LOCKED
;
732 if (flags
& ~MLOCK_ONFAULT
)
735 if (flags
& MLOCK_ONFAULT
)
736 vm_flags
|= VM_LOCKONFAULT
;
738 return do_mlock(start
, len
, vm_flags
);
741 SYSCALL_DEFINE2(munlock
, unsigned long, start
, size_t, len
)
745 start
= untagged_addr(start
);
747 len
= PAGE_ALIGN(len
+ (offset_in_page(start
)));
750 if (mmap_write_lock_killable(current
->mm
))
752 ret
= apply_vma_lock_flags(start
, len
, 0);
753 mmap_write_unlock(current
->mm
);
759 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
760 * and translate into the appropriate modifications to mm->def_flags and/or the
761 * flags for all current VMAs.
763 * There are a couple of subtleties with this. If mlockall() is called multiple
764 * times with different flags, the values do not necessarily stack. If mlockall
765 * is called once including the MCL_FUTURE flag and then a second time without
766 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
768 static int apply_mlockall_flags(int flags
)
770 struct vm_area_struct
* vma
, * prev
= NULL
;
771 vm_flags_t to_add
= 0;
773 current
->mm
->def_flags
&= VM_LOCKED_CLEAR_MASK
;
774 if (flags
& MCL_FUTURE
) {
775 current
->mm
->def_flags
|= VM_LOCKED
;
777 if (flags
& MCL_ONFAULT
)
778 current
->mm
->def_flags
|= VM_LOCKONFAULT
;
780 if (!(flags
& MCL_CURRENT
))
784 if (flags
& MCL_CURRENT
) {
786 if (flags
& MCL_ONFAULT
)
787 to_add
|= VM_LOCKONFAULT
;
790 for (vma
= current
->mm
->mmap
; vma
; vma
= prev
->vm_next
) {
793 newflags
= vma
->vm_flags
& VM_LOCKED_CLEAR_MASK
;
797 mlock_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
, newflags
);
804 SYSCALL_DEFINE1(mlockall
, int, flags
)
806 unsigned long lock_limit
;
809 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
| MCL_ONFAULT
)) ||
810 flags
== MCL_ONFAULT
)
816 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
817 lock_limit
>>= PAGE_SHIFT
;
819 if (mmap_write_lock_killable(current
->mm
))
823 if (!(flags
& MCL_CURRENT
) || (current
->mm
->total_vm
<= lock_limit
) ||
824 capable(CAP_IPC_LOCK
))
825 ret
= apply_mlockall_flags(flags
);
826 mmap_write_unlock(current
->mm
);
827 if (!ret
&& (flags
& MCL_CURRENT
))
828 mm_populate(0, TASK_SIZE
);
833 SYSCALL_DEFINE0(munlockall
)
837 if (mmap_write_lock_killable(current
->mm
))
839 ret
= apply_mlockall_flags(0);
840 mmap_write_unlock(current
->mm
);
845 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
846 * shm segments) get accounted against the user_struct instead.
848 static DEFINE_SPINLOCK(shmlock_user_lock
);
850 int user_shm_lock(size_t size
, struct user_struct
*user
)
852 unsigned long lock_limit
, locked
;
855 locked
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
856 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
857 if (lock_limit
== RLIM_INFINITY
)
859 lock_limit
>>= PAGE_SHIFT
;
860 spin_lock(&shmlock_user_lock
);
862 locked
+ user
->locked_shm
> lock_limit
&& !capable(CAP_IPC_LOCK
))
865 user
->locked_shm
+= locked
;
868 spin_unlock(&shmlock_user_lock
);
872 void user_shm_unlock(size_t size
, struct user_struct
*user
)
874 spin_lock(&shmlock_user_lock
);
875 user
->locked_shm
-= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
876 spin_unlock(&shmlock_user_lock
);