]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/swap.c
mm, memcg: decouple e{low,min} state mutations from protection checks
[mirror_ubuntu-jammy-kernel.git] / mm / swap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/swap.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8 /*
9 * This file contains the default values for the operation of the
10 * Linux VM subsystem. Fine-tuning documentation can be found in
11 * Documentation/admin-guide/sysctl/vm.rst.
12 * Started 18.12.91
13 * Swap aging added 23.2.95, Stephen Tweedie.
14 * Buffermem limits added 12.3.98, Rik van Riel.
15 */
16
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/mman.h>
22 #include <linux/pagemap.h>
23 #include <linux/pagevec.h>
24 #include <linux/init.h>
25 #include <linux/export.h>
26 #include <linux/mm_inline.h>
27 #include <linux/percpu_counter.h>
28 #include <linux/memremap.h>
29 #include <linux/percpu.h>
30 #include <linux/cpu.h>
31 #include <linux/notifier.h>
32 #include <linux/backing-dev.h>
33 #include <linux/memcontrol.h>
34 #include <linux/gfp.h>
35 #include <linux/uio.h>
36 #include <linux/hugetlb.h>
37 #include <linux/page_idle.h>
38 #include <linux/local_lock.h>
39
40 #include "internal.h"
41
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/pagemap.h>
44
45 /* How many pages do we try to swap or page in/out together? */
46 int page_cluster;
47
48 /* Protecting only lru_rotate.pvec which requires disabling interrupts */
49 struct lru_rotate {
50 local_lock_t lock;
51 struct pagevec pvec;
52 };
53 static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
54 .lock = INIT_LOCAL_LOCK(lock),
55 };
56
57 /*
58 * The following struct pagevec are grouped together because they are protected
59 * by disabling preemption (and interrupts remain enabled).
60 */
61 struct lru_pvecs {
62 local_lock_t lock;
63 struct pagevec lru_add;
64 struct pagevec lru_deactivate_file;
65 struct pagevec lru_deactivate;
66 struct pagevec lru_lazyfree;
67 #ifdef CONFIG_SMP
68 struct pagevec activate_page;
69 #endif
70 };
71 static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
72 .lock = INIT_LOCAL_LOCK(lock),
73 };
74
75 /*
76 * This path almost never happens for VM activity - pages are normally
77 * freed via pagevecs. But it gets used by networking.
78 */
79 static void __page_cache_release(struct page *page)
80 {
81 if (PageLRU(page)) {
82 pg_data_t *pgdat = page_pgdat(page);
83 struct lruvec *lruvec;
84 unsigned long flags;
85
86 spin_lock_irqsave(&pgdat->lru_lock, flags);
87 lruvec = mem_cgroup_page_lruvec(page, pgdat);
88 VM_BUG_ON_PAGE(!PageLRU(page), page);
89 __ClearPageLRU(page);
90 del_page_from_lru_list(page, lruvec, page_off_lru(page));
91 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
92 }
93 __ClearPageWaiters(page);
94 }
95
96 static void __put_single_page(struct page *page)
97 {
98 __page_cache_release(page);
99 mem_cgroup_uncharge(page);
100 free_unref_page(page);
101 }
102
103 static void __put_compound_page(struct page *page)
104 {
105 /*
106 * __page_cache_release() is supposed to be called for thp, not for
107 * hugetlb. This is because hugetlb page does never have PageLRU set
108 * (it's never listed to any LRU lists) and no memcg routines should
109 * be called for hugetlb (it has a separate hugetlb_cgroup.)
110 */
111 if (!PageHuge(page))
112 __page_cache_release(page);
113 destroy_compound_page(page);
114 }
115
116 void __put_page(struct page *page)
117 {
118 if (is_zone_device_page(page)) {
119 put_dev_pagemap(page->pgmap);
120
121 /*
122 * The page belongs to the device that created pgmap. Do
123 * not return it to page allocator.
124 */
125 return;
126 }
127
128 if (unlikely(PageCompound(page)))
129 __put_compound_page(page);
130 else
131 __put_single_page(page);
132 }
133 EXPORT_SYMBOL(__put_page);
134
135 /**
136 * put_pages_list() - release a list of pages
137 * @pages: list of pages threaded on page->lru
138 *
139 * Release a list of pages which are strung together on page.lru. Currently
140 * used by read_cache_pages() and related error recovery code.
141 */
142 void put_pages_list(struct list_head *pages)
143 {
144 while (!list_empty(pages)) {
145 struct page *victim;
146
147 victim = lru_to_page(pages);
148 list_del(&victim->lru);
149 put_page(victim);
150 }
151 }
152 EXPORT_SYMBOL(put_pages_list);
153
154 /*
155 * get_kernel_pages() - pin kernel pages in memory
156 * @kiov: An array of struct kvec structures
157 * @nr_segs: number of segments to pin
158 * @write: pinning for read/write, currently ignored
159 * @pages: array that receives pointers to the pages pinned.
160 * Should be at least nr_segs long.
161 *
162 * Returns number of pages pinned. This may be fewer than the number
163 * requested. If nr_pages is 0 or negative, returns 0. If no pages
164 * were pinned, returns -errno. Each page returned must be released
165 * with a put_page() call when it is finished with.
166 */
167 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
168 struct page **pages)
169 {
170 int seg;
171
172 for (seg = 0; seg < nr_segs; seg++) {
173 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
174 return seg;
175
176 pages[seg] = kmap_to_page(kiov[seg].iov_base);
177 get_page(pages[seg]);
178 }
179
180 return seg;
181 }
182 EXPORT_SYMBOL_GPL(get_kernel_pages);
183
184 /*
185 * get_kernel_page() - pin a kernel page in memory
186 * @start: starting kernel address
187 * @write: pinning for read/write, currently ignored
188 * @pages: array that receives pointer to the page pinned.
189 * Must be at least nr_segs long.
190 *
191 * Returns 1 if page is pinned. If the page was not pinned, returns
192 * -errno. The page returned must be released with a put_page() call
193 * when it is finished with.
194 */
195 int get_kernel_page(unsigned long start, int write, struct page **pages)
196 {
197 const struct kvec kiov = {
198 .iov_base = (void *)start,
199 .iov_len = PAGE_SIZE
200 };
201
202 return get_kernel_pages(&kiov, 1, write, pages);
203 }
204 EXPORT_SYMBOL_GPL(get_kernel_page);
205
206 static void pagevec_lru_move_fn(struct pagevec *pvec,
207 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
208 void *arg)
209 {
210 int i;
211 struct pglist_data *pgdat = NULL;
212 struct lruvec *lruvec;
213 unsigned long flags = 0;
214
215 for (i = 0; i < pagevec_count(pvec); i++) {
216 struct page *page = pvec->pages[i];
217 struct pglist_data *pagepgdat = page_pgdat(page);
218
219 if (pagepgdat != pgdat) {
220 if (pgdat)
221 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
222 pgdat = pagepgdat;
223 spin_lock_irqsave(&pgdat->lru_lock, flags);
224 }
225
226 lruvec = mem_cgroup_page_lruvec(page, pgdat);
227 (*move_fn)(page, lruvec, arg);
228 }
229 if (pgdat)
230 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
231 release_pages(pvec->pages, pvec->nr);
232 pagevec_reinit(pvec);
233 }
234
235 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
236 void *arg)
237 {
238 int *pgmoved = arg;
239
240 if (PageLRU(page) && !PageUnevictable(page)) {
241 del_page_from_lru_list(page, lruvec, page_lru(page));
242 ClearPageActive(page);
243 add_page_to_lru_list_tail(page, lruvec, page_lru(page));
244 (*pgmoved) += hpage_nr_pages(page);
245 }
246 }
247
248 /*
249 * pagevec_move_tail() must be called with IRQ disabled.
250 * Otherwise this may cause nasty races.
251 */
252 static void pagevec_move_tail(struct pagevec *pvec)
253 {
254 int pgmoved = 0;
255
256 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
257 __count_vm_events(PGROTATED, pgmoved);
258 }
259
260 /*
261 * Writeback is about to end against a page which has been marked for immediate
262 * reclaim. If it still appears to be reclaimable, move it to the tail of the
263 * inactive list.
264 */
265 void rotate_reclaimable_page(struct page *page)
266 {
267 if (!PageLocked(page) && !PageDirty(page) &&
268 !PageUnevictable(page) && PageLRU(page)) {
269 struct pagevec *pvec;
270 unsigned long flags;
271
272 get_page(page);
273 local_lock_irqsave(&lru_rotate.lock, flags);
274 pvec = this_cpu_ptr(&lru_rotate.pvec);
275 if (!pagevec_add(pvec, page) || PageCompound(page))
276 pagevec_move_tail(pvec);
277 local_unlock_irqrestore(&lru_rotate.lock, flags);
278 }
279 }
280
281 void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
282 {
283 do {
284 unsigned long lrusize;
285
286 /* Record cost event */
287 if (file)
288 lruvec->file_cost += nr_pages;
289 else
290 lruvec->anon_cost += nr_pages;
291
292 /*
293 * Decay previous events
294 *
295 * Because workloads change over time (and to avoid
296 * overflow) we keep these statistics as a floating
297 * average, which ends up weighing recent refaults
298 * more than old ones.
299 */
300 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
301 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
302 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
303 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
304
305 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
306 lruvec->file_cost /= 2;
307 lruvec->anon_cost /= 2;
308 }
309 } while ((lruvec = parent_lruvec(lruvec)));
310 }
311
312 void lru_note_cost_page(struct page *page)
313 {
314 lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
315 page_is_file_lru(page), hpage_nr_pages(page));
316 }
317
318 static void __activate_page(struct page *page, struct lruvec *lruvec,
319 void *arg)
320 {
321 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
322 int lru = page_lru_base_type(page);
323 int nr_pages = hpage_nr_pages(page);
324
325 del_page_from_lru_list(page, lruvec, lru);
326 SetPageActive(page);
327 lru += LRU_ACTIVE;
328 add_page_to_lru_list(page, lruvec, lru);
329 trace_mm_lru_activate(page);
330
331 __count_vm_events(PGACTIVATE, nr_pages);
332 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
333 nr_pages);
334 }
335 }
336
337 #ifdef CONFIG_SMP
338 static void activate_page_drain(int cpu)
339 {
340 struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
341
342 if (pagevec_count(pvec))
343 pagevec_lru_move_fn(pvec, __activate_page, NULL);
344 }
345
346 static bool need_activate_page_drain(int cpu)
347 {
348 return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
349 }
350
351 void activate_page(struct page *page)
352 {
353 page = compound_head(page);
354 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
355 struct pagevec *pvec;
356
357 local_lock(&lru_pvecs.lock);
358 pvec = this_cpu_ptr(&lru_pvecs.activate_page);
359 get_page(page);
360 if (!pagevec_add(pvec, page) || PageCompound(page))
361 pagevec_lru_move_fn(pvec, __activate_page, NULL);
362 local_unlock(&lru_pvecs.lock);
363 }
364 }
365
366 #else
367 static inline void activate_page_drain(int cpu)
368 {
369 }
370
371 void activate_page(struct page *page)
372 {
373 pg_data_t *pgdat = page_pgdat(page);
374
375 page = compound_head(page);
376 spin_lock_irq(&pgdat->lru_lock);
377 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
378 spin_unlock_irq(&pgdat->lru_lock);
379 }
380 #endif
381
382 static void __lru_cache_activate_page(struct page *page)
383 {
384 struct pagevec *pvec;
385 int i;
386
387 local_lock(&lru_pvecs.lock);
388 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
389
390 /*
391 * Search backwards on the optimistic assumption that the page being
392 * activated has just been added to this pagevec. Note that only
393 * the local pagevec is examined as a !PageLRU page could be in the
394 * process of being released, reclaimed, migrated or on a remote
395 * pagevec that is currently being drained. Furthermore, marking
396 * a remote pagevec's page PageActive potentially hits a race where
397 * a page is marked PageActive just after it is added to the inactive
398 * list causing accounting errors and BUG_ON checks to trigger.
399 */
400 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
401 struct page *pagevec_page = pvec->pages[i];
402
403 if (pagevec_page == page) {
404 SetPageActive(page);
405 break;
406 }
407 }
408
409 local_unlock(&lru_pvecs.lock);
410 }
411
412 /*
413 * Mark a page as having seen activity.
414 *
415 * inactive,unreferenced -> inactive,referenced
416 * inactive,referenced -> active,unreferenced
417 * active,unreferenced -> active,referenced
418 *
419 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
420 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
421 */
422 void mark_page_accessed(struct page *page)
423 {
424 page = compound_head(page);
425
426 if (!PageReferenced(page)) {
427 SetPageReferenced(page);
428 } else if (PageUnevictable(page)) {
429 /*
430 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
431 * this list is never rotated or maintained, so marking an
432 * evictable page accessed has no effect.
433 */
434 } else if (!PageActive(page)) {
435 /*
436 * If the page is on the LRU, queue it for activation via
437 * lru_pvecs.activate_page. Otherwise, assume the page is on a
438 * pagevec, mark it active and it'll be moved to the active
439 * LRU on the next drain.
440 */
441 if (PageLRU(page))
442 activate_page(page);
443 else
444 __lru_cache_activate_page(page);
445 ClearPageReferenced(page);
446 workingset_activation(page);
447 }
448 if (page_is_idle(page))
449 clear_page_idle(page);
450 }
451 EXPORT_SYMBOL(mark_page_accessed);
452
453 /**
454 * lru_cache_add - add a page to a page list
455 * @page: the page to be added to the LRU.
456 *
457 * Queue the page for addition to the LRU via pagevec. The decision on whether
458 * to add the page to the [in]active [file|anon] list is deferred until the
459 * pagevec is drained. This gives a chance for the caller of lru_cache_add()
460 * have the page added to the active list using mark_page_accessed().
461 */
462 void lru_cache_add(struct page *page)
463 {
464 struct pagevec *pvec;
465
466 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
467 VM_BUG_ON_PAGE(PageLRU(page), page);
468
469 get_page(page);
470 local_lock(&lru_pvecs.lock);
471 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
472 if (!pagevec_add(pvec, page) || PageCompound(page))
473 __pagevec_lru_add(pvec);
474 local_unlock(&lru_pvecs.lock);
475 }
476 EXPORT_SYMBOL(lru_cache_add);
477
478 /**
479 * lru_cache_add_active_or_unevictable
480 * @page: the page to be added to LRU
481 * @vma: vma in which page is mapped for determining reclaimability
482 *
483 * Place @page on the active or unevictable LRU list, depending on its
484 * evictability. Note that if the page is not evictable, it goes
485 * directly back onto it's zone's unevictable list, it does NOT use a
486 * per cpu pagevec.
487 */
488 void lru_cache_add_active_or_unevictable(struct page *page,
489 struct vm_area_struct *vma)
490 {
491 VM_BUG_ON_PAGE(PageLRU(page), page);
492
493 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
494 SetPageActive(page);
495 else if (!TestSetPageMlocked(page)) {
496 /*
497 * We use the irq-unsafe __mod_zone_page_stat because this
498 * counter is not modified from interrupt context, and the pte
499 * lock is held(spinlock), which implies preemption disabled.
500 */
501 __mod_zone_page_state(page_zone(page), NR_MLOCK,
502 hpage_nr_pages(page));
503 count_vm_event(UNEVICTABLE_PGMLOCKED);
504 }
505 lru_cache_add(page);
506 }
507
508 /*
509 * If the page can not be invalidated, it is moved to the
510 * inactive list to speed up its reclaim. It is moved to the
511 * head of the list, rather than the tail, to give the flusher
512 * threads some time to write it out, as this is much more
513 * effective than the single-page writeout from reclaim.
514 *
515 * If the page isn't page_mapped and dirty/writeback, the page
516 * could reclaim asap using PG_reclaim.
517 *
518 * 1. active, mapped page -> none
519 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
520 * 3. inactive, mapped page -> none
521 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
522 * 5. inactive, clean -> inactive, tail
523 * 6. Others -> none
524 *
525 * In 4, why it moves inactive's head, the VM expects the page would
526 * be write it out by flusher threads as this is much more effective
527 * than the single-page writeout from reclaim.
528 */
529 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
530 void *arg)
531 {
532 int lru;
533 bool active;
534 int nr_pages = hpage_nr_pages(page);
535
536 if (!PageLRU(page))
537 return;
538
539 if (PageUnevictable(page))
540 return;
541
542 /* Some processes are using the page */
543 if (page_mapped(page))
544 return;
545
546 active = PageActive(page);
547 lru = page_lru_base_type(page);
548
549 del_page_from_lru_list(page, lruvec, lru + active);
550 ClearPageActive(page);
551 ClearPageReferenced(page);
552
553 if (PageWriteback(page) || PageDirty(page)) {
554 /*
555 * PG_reclaim could be raced with end_page_writeback
556 * It can make readahead confusing. But race window
557 * is _really_ small and it's non-critical problem.
558 */
559 add_page_to_lru_list(page, lruvec, lru);
560 SetPageReclaim(page);
561 } else {
562 /*
563 * The page's writeback ends up during pagevec
564 * We moves tha page into tail of inactive.
565 */
566 add_page_to_lru_list_tail(page, lruvec, lru);
567 __count_vm_events(PGROTATED, nr_pages);
568 }
569
570 if (active) {
571 __count_vm_events(PGDEACTIVATE, nr_pages);
572 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
573 nr_pages);
574 }
575 }
576
577 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
578 void *arg)
579 {
580 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
581 int lru = page_lru_base_type(page);
582 int nr_pages = hpage_nr_pages(page);
583
584 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
585 ClearPageActive(page);
586 ClearPageReferenced(page);
587 add_page_to_lru_list(page, lruvec, lru);
588
589 __count_vm_events(PGDEACTIVATE, nr_pages);
590 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
591 nr_pages);
592 }
593 }
594
595 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
596 void *arg)
597 {
598 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
599 !PageSwapCache(page) && !PageUnevictable(page)) {
600 bool active = PageActive(page);
601 int nr_pages = hpage_nr_pages(page);
602
603 del_page_from_lru_list(page, lruvec,
604 LRU_INACTIVE_ANON + active);
605 ClearPageActive(page);
606 ClearPageReferenced(page);
607 /*
608 * Lazyfree pages are clean anonymous pages. They have
609 * PG_swapbacked flag cleared, to distinguish them from normal
610 * anonymous pages
611 */
612 ClearPageSwapBacked(page);
613 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
614
615 __count_vm_events(PGLAZYFREE, nr_pages);
616 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
617 nr_pages);
618 }
619 }
620
621 /*
622 * Drain pages out of the cpu's pagevecs.
623 * Either "cpu" is the current CPU, and preemption has already been
624 * disabled; or "cpu" is being hot-unplugged, and is already dead.
625 */
626 void lru_add_drain_cpu(int cpu)
627 {
628 struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
629
630 if (pagevec_count(pvec))
631 __pagevec_lru_add(pvec);
632
633 pvec = &per_cpu(lru_rotate.pvec, cpu);
634 if (pagevec_count(pvec)) {
635 unsigned long flags;
636
637 /* No harm done if a racing interrupt already did this */
638 local_lock_irqsave(&lru_rotate.lock, flags);
639 pagevec_move_tail(pvec);
640 local_unlock_irqrestore(&lru_rotate.lock, flags);
641 }
642
643 pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
644 if (pagevec_count(pvec))
645 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
646
647 pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
648 if (pagevec_count(pvec))
649 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
650
651 pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
652 if (pagevec_count(pvec))
653 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
654
655 activate_page_drain(cpu);
656 }
657
658 /**
659 * deactivate_file_page - forcefully deactivate a file page
660 * @page: page to deactivate
661 *
662 * This function hints the VM that @page is a good reclaim candidate,
663 * for example if its invalidation fails due to the page being dirty
664 * or under writeback.
665 */
666 void deactivate_file_page(struct page *page)
667 {
668 /*
669 * In a workload with many unevictable page such as mprotect,
670 * unevictable page deactivation for accelerating reclaim is pointless.
671 */
672 if (PageUnevictable(page))
673 return;
674
675 if (likely(get_page_unless_zero(page))) {
676 struct pagevec *pvec;
677
678 local_lock(&lru_pvecs.lock);
679 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
680
681 if (!pagevec_add(pvec, page) || PageCompound(page))
682 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
683 local_unlock(&lru_pvecs.lock);
684 }
685 }
686
687 /*
688 * deactivate_page - deactivate a page
689 * @page: page to deactivate
690 *
691 * deactivate_page() moves @page to the inactive list if @page was on the active
692 * list and was not an unevictable page. This is done to accelerate the reclaim
693 * of @page.
694 */
695 void deactivate_page(struct page *page)
696 {
697 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
698 struct pagevec *pvec;
699
700 local_lock(&lru_pvecs.lock);
701 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
702 get_page(page);
703 if (!pagevec_add(pvec, page) || PageCompound(page))
704 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
705 local_unlock(&lru_pvecs.lock);
706 }
707 }
708
709 /**
710 * mark_page_lazyfree - make an anon page lazyfree
711 * @page: page to deactivate
712 *
713 * mark_page_lazyfree() moves @page to the inactive file list.
714 * This is done to accelerate the reclaim of @page.
715 */
716 void mark_page_lazyfree(struct page *page)
717 {
718 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
719 !PageSwapCache(page) && !PageUnevictable(page)) {
720 struct pagevec *pvec;
721
722 local_lock(&lru_pvecs.lock);
723 pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
724 get_page(page);
725 if (!pagevec_add(pvec, page) || PageCompound(page))
726 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
727 local_unlock(&lru_pvecs.lock);
728 }
729 }
730
731 void lru_add_drain(void)
732 {
733 local_lock(&lru_pvecs.lock);
734 lru_add_drain_cpu(smp_processor_id());
735 local_unlock(&lru_pvecs.lock);
736 }
737
738 void lru_add_drain_cpu_zone(struct zone *zone)
739 {
740 local_lock(&lru_pvecs.lock);
741 lru_add_drain_cpu(smp_processor_id());
742 drain_local_pages(zone);
743 local_unlock(&lru_pvecs.lock);
744 }
745
746 #ifdef CONFIG_SMP
747
748 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
749
750 static void lru_add_drain_per_cpu(struct work_struct *dummy)
751 {
752 lru_add_drain();
753 }
754
755 /*
756 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
757 * kworkers being shut down before our page_alloc_cpu_dead callback is
758 * executed on the offlined cpu.
759 * Calling this function with cpu hotplug locks held can actually lead
760 * to obscure indirect dependencies via WQ context.
761 */
762 void lru_add_drain_all(void)
763 {
764 static seqcount_t seqcount = SEQCNT_ZERO(seqcount);
765 static DEFINE_MUTEX(lock);
766 static struct cpumask has_work;
767 int cpu, seq;
768
769 /*
770 * Make sure nobody triggers this path before mm_percpu_wq is fully
771 * initialized.
772 */
773 if (WARN_ON(!mm_percpu_wq))
774 return;
775
776 seq = raw_read_seqcount_latch(&seqcount);
777
778 mutex_lock(&lock);
779
780 /*
781 * Piggyback on drain started and finished while we waited for lock:
782 * all pages pended at the time of our enter were drained from vectors.
783 */
784 if (__read_seqcount_retry(&seqcount, seq))
785 goto done;
786
787 raw_write_seqcount_latch(&seqcount);
788
789 cpumask_clear(&has_work);
790
791 for_each_online_cpu(cpu) {
792 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
793
794 if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
795 pagevec_count(&per_cpu(lru_rotate.pvec, cpu)) ||
796 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
797 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
798 pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
799 need_activate_page_drain(cpu)) {
800 INIT_WORK(work, lru_add_drain_per_cpu);
801 queue_work_on(cpu, mm_percpu_wq, work);
802 cpumask_set_cpu(cpu, &has_work);
803 }
804 }
805
806 for_each_cpu(cpu, &has_work)
807 flush_work(&per_cpu(lru_add_drain_work, cpu));
808
809 done:
810 mutex_unlock(&lock);
811 }
812 #else
813 void lru_add_drain_all(void)
814 {
815 lru_add_drain();
816 }
817 #endif
818
819 /**
820 * release_pages - batched put_page()
821 * @pages: array of pages to release
822 * @nr: number of pages
823 *
824 * Decrement the reference count on all the pages in @pages. If it
825 * fell to zero, remove the page from the LRU and free it.
826 */
827 void release_pages(struct page **pages, int nr)
828 {
829 int i;
830 LIST_HEAD(pages_to_free);
831 struct pglist_data *locked_pgdat = NULL;
832 struct lruvec *lruvec;
833 unsigned long flags;
834 unsigned int lock_batch;
835
836 for (i = 0; i < nr; i++) {
837 struct page *page = pages[i];
838
839 /*
840 * Make sure the IRQ-safe lock-holding time does not get
841 * excessive with a continuous string of pages from the
842 * same pgdat. The lock is held only if pgdat != NULL.
843 */
844 if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
845 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
846 locked_pgdat = NULL;
847 }
848
849 if (is_huge_zero_page(page))
850 continue;
851
852 if (is_zone_device_page(page)) {
853 if (locked_pgdat) {
854 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
855 flags);
856 locked_pgdat = NULL;
857 }
858 /*
859 * ZONE_DEVICE pages that return 'false' from
860 * put_devmap_managed_page() do not require special
861 * processing, and instead, expect a call to
862 * put_page_testzero().
863 */
864 if (page_is_devmap_managed(page)) {
865 put_devmap_managed_page(page);
866 continue;
867 }
868 }
869
870 page = compound_head(page);
871 if (!put_page_testzero(page))
872 continue;
873
874 if (PageCompound(page)) {
875 if (locked_pgdat) {
876 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
877 locked_pgdat = NULL;
878 }
879 __put_compound_page(page);
880 continue;
881 }
882
883 if (PageLRU(page)) {
884 struct pglist_data *pgdat = page_pgdat(page);
885
886 if (pgdat != locked_pgdat) {
887 if (locked_pgdat)
888 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
889 flags);
890 lock_batch = 0;
891 locked_pgdat = pgdat;
892 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
893 }
894
895 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
896 VM_BUG_ON_PAGE(!PageLRU(page), page);
897 __ClearPageLRU(page);
898 del_page_from_lru_list(page, lruvec, page_off_lru(page));
899 }
900
901 /* Clear Active bit in case of parallel mark_page_accessed */
902 __ClearPageActive(page);
903 __ClearPageWaiters(page);
904
905 list_add(&page->lru, &pages_to_free);
906 }
907 if (locked_pgdat)
908 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
909
910 mem_cgroup_uncharge_list(&pages_to_free);
911 free_unref_page_list(&pages_to_free);
912 }
913 EXPORT_SYMBOL(release_pages);
914
915 /*
916 * The pages which we're about to release may be in the deferred lru-addition
917 * queues. That would prevent them from really being freed right now. That's
918 * OK from a correctness point of view but is inefficient - those pages may be
919 * cache-warm and we want to give them back to the page allocator ASAP.
920 *
921 * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
922 * and __pagevec_lru_add_active() call release_pages() directly to avoid
923 * mutual recursion.
924 */
925 void __pagevec_release(struct pagevec *pvec)
926 {
927 if (!pvec->percpu_pvec_drained) {
928 lru_add_drain();
929 pvec->percpu_pvec_drained = true;
930 }
931 release_pages(pvec->pages, pagevec_count(pvec));
932 pagevec_reinit(pvec);
933 }
934 EXPORT_SYMBOL(__pagevec_release);
935
936 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
937 /* used by __split_huge_page_refcount() */
938 void lru_add_page_tail(struct page *page, struct page *page_tail,
939 struct lruvec *lruvec, struct list_head *list)
940 {
941 VM_BUG_ON_PAGE(!PageHead(page), page);
942 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
943 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
944 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
945
946 if (!list)
947 SetPageLRU(page_tail);
948
949 if (likely(PageLRU(page)))
950 list_add_tail(&page_tail->lru, &page->lru);
951 else if (list) {
952 /* page reclaim is reclaiming a huge page */
953 get_page(page_tail);
954 list_add_tail(&page_tail->lru, list);
955 } else {
956 /*
957 * Head page has not yet been counted, as an hpage,
958 * so we must account for each subpage individually.
959 *
960 * Put page_tail on the list at the correct position
961 * so they all end up in order.
962 */
963 add_page_to_lru_list_tail(page_tail, lruvec,
964 page_lru(page_tail));
965 }
966 }
967 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
968
969 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
970 void *arg)
971 {
972 enum lru_list lru;
973 int was_unevictable = TestClearPageUnevictable(page);
974 int nr_pages = hpage_nr_pages(page);
975
976 VM_BUG_ON_PAGE(PageLRU(page), page);
977
978 /*
979 * Page becomes evictable in two ways:
980 * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
981 * 2) Before acquiring LRU lock to put the page to correct LRU and then
982 * a) do PageLRU check with lock [check_move_unevictable_pages]
983 * b) do PageLRU check before lock [clear_page_mlock]
984 *
985 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
986 * following strict ordering:
987 *
988 * #0: __pagevec_lru_add_fn #1: clear_page_mlock
989 *
990 * SetPageLRU() TestClearPageMlocked()
991 * smp_mb() // explicit ordering // above provides strict
992 * // ordering
993 * PageMlocked() PageLRU()
994 *
995 *
996 * if '#1' does not observe setting of PG_lru by '#0' and fails
997 * isolation, the explicit barrier will make sure that page_evictable
998 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
999 * can be reordered after PageMlocked check and can make '#1' to fail
1000 * the isolation of the page whose Mlocked bit is cleared (#0 is also
1001 * looking at the same page) and the evictable page will be stranded
1002 * in an unevictable LRU.
1003 */
1004 SetPageLRU(page);
1005 smp_mb__after_atomic();
1006
1007 if (page_evictable(page)) {
1008 lru = page_lru(page);
1009 if (was_unevictable)
1010 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
1011 } else {
1012 lru = LRU_UNEVICTABLE;
1013 ClearPageActive(page);
1014 SetPageUnevictable(page);
1015 if (!was_unevictable)
1016 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
1017 }
1018
1019 add_page_to_lru_list(page, lruvec, lru);
1020 trace_mm_lru_insertion(page, lru);
1021 }
1022
1023 /*
1024 * Add the passed pages to the LRU, then drop the caller's refcount
1025 * on them. Reinitialises the caller's pagevec.
1026 */
1027 void __pagevec_lru_add(struct pagevec *pvec)
1028 {
1029 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
1030 }
1031
1032 /**
1033 * pagevec_lookup_entries - gang pagecache lookup
1034 * @pvec: Where the resulting entries are placed
1035 * @mapping: The address_space to search
1036 * @start: The starting entry index
1037 * @nr_entries: The maximum number of pages
1038 * @indices: The cache indices corresponding to the entries in @pvec
1039 *
1040 * pagevec_lookup_entries() will search for and return a group of up
1041 * to @nr_pages pages and shadow entries in the mapping. All
1042 * entries are placed in @pvec. pagevec_lookup_entries() takes a
1043 * reference against actual pages in @pvec.
1044 *
1045 * The search returns a group of mapping-contiguous entries with
1046 * ascending indexes. There may be holes in the indices due to
1047 * not-present entries.
1048 *
1049 * Only one subpage of a Transparent Huge Page is returned in one call:
1050 * allowing truncate_inode_pages_range() to evict the whole THP without
1051 * cycling through a pagevec of extra references.
1052 *
1053 * pagevec_lookup_entries() returns the number of entries which were
1054 * found.
1055 */
1056 unsigned pagevec_lookup_entries(struct pagevec *pvec,
1057 struct address_space *mapping,
1058 pgoff_t start, unsigned nr_entries,
1059 pgoff_t *indices)
1060 {
1061 pvec->nr = find_get_entries(mapping, start, nr_entries,
1062 pvec->pages, indices);
1063 return pagevec_count(pvec);
1064 }
1065
1066 /**
1067 * pagevec_remove_exceptionals - pagevec exceptionals pruning
1068 * @pvec: The pagevec to prune
1069 *
1070 * pagevec_lookup_entries() fills both pages and exceptional radix
1071 * tree entries into the pagevec. This function prunes all
1072 * exceptionals from @pvec without leaving holes, so that it can be
1073 * passed on to page-only pagevec operations.
1074 */
1075 void pagevec_remove_exceptionals(struct pagevec *pvec)
1076 {
1077 int i, j;
1078
1079 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1080 struct page *page = pvec->pages[i];
1081 if (!xa_is_value(page))
1082 pvec->pages[j++] = page;
1083 }
1084 pvec->nr = j;
1085 }
1086
1087 /**
1088 * pagevec_lookup_range - gang pagecache lookup
1089 * @pvec: Where the resulting pages are placed
1090 * @mapping: The address_space to search
1091 * @start: The starting page index
1092 * @end: The final page index
1093 *
1094 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
1095 * pages in the mapping starting from index @start and upto index @end
1096 * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a
1097 * reference against the pages in @pvec.
1098 *
1099 * The search returns a group of mapping-contiguous pages with ascending
1100 * indexes. There may be holes in the indices due to not-present pages. We
1101 * also update @start to index the next page for the traversal.
1102 *
1103 * pagevec_lookup_range() returns the number of pages which were found. If this
1104 * number is smaller than PAGEVEC_SIZE, the end of specified range has been
1105 * reached.
1106 */
1107 unsigned pagevec_lookup_range(struct pagevec *pvec,
1108 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1109 {
1110 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1111 pvec->pages);
1112 return pagevec_count(pvec);
1113 }
1114 EXPORT_SYMBOL(pagevec_lookup_range);
1115
1116 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1117 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1118 xa_mark_t tag)
1119 {
1120 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1121 PAGEVEC_SIZE, pvec->pages);
1122 return pagevec_count(pvec);
1123 }
1124 EXPORT_SYMBOL(pagevec_lookup_range_tag);
1125
1126 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
1127 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1128 xa_mark_t tag, unsigned max_pages)
1129 {
1130 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1131 min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
1132 return pagevec_count(pvec);
1133 }
1134 EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
1135 /*
1136 * Perform any setup for the swap system
1137 */
1138 void __init swap_setup(void)
1139 {
1140 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1141
1142 /* Use a smaller cluster for small-memory machines */
1143 if (megs < 16)
1144 page_cluster = 2;
1145 else
1146 page_cluster = 3;
1147 /*
1148 * Right now other parts of the system means that we
1149 * _really_ don't want to cluster much more
1150 */
1151 }
1152
1153 #ifdef CONFIG_DEV_PAGEMAP_OPS
1154 void put_devmap_managed_page(struct page *page)
1155 {
1156 int count;
1157
1158 if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
1159 return;
1160
1161 count = page_ref_dec_return(page);
1162
1163 /*
1164 * devmap page refcounts are 1-based, rather than 0-based: if
1165 * refcount is 1, then the page is free and the refcount is
1166 * stable because nobody holds a reference on the page.
1167 */
1168 if (count == 1)
1169 free_devmap_managed_page(page);
1170 else if (!count)
1171 __put_page(page);
1172 }
1173 EXPORT_SYMBOL(put_devmap_managed_page);
1174 #endif