]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/vmscan.c
[PATCH] vmscan: use unsigned longs
[mirror_ubuntu-zesty-kernel.git] / mm / vmscan.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/file.h>
23#include <linux/writeback.h>
24#include <linux/blkdev.h>
25#include <linux/buffer_head.h> /* for try_to_release_page(),
26 buffer_heads_over_limit */
27#include <linux/mm_inline.h>
28#include <linux/pagevec.h>
29#include <linux/backing-dev.h>
30#include <linux/rmap.h>
31#include <linux/topology.h>
32#include <linux/cpu.h>
33#include <linux/cpuset.h>
34#include <linux/notifier.h>
35#include <linux/rwsem.h>
36
37#include <asm/tlbflush.h>
38#include <asm/div64.h>
39
40#include <linux/swapops.h>
41
42/* possible outcome of pageout() */
43typedef enum {
44 /* failed to write page out, page is locked */
45 PAGE_KEEP,
46 /* move page to the active list, page is locked */
47 PAGE_ACTIVATE,
48 /* page has been sent to the disk successfully, page is unlocked */
49 PAGE_SUCCESS,
50 /* page is clean and locked */
51 PAGE_CLEAN,
52} pageout_t;
53
54struct scan_control {
1da177e4
LT
55 /* Incremented by the number of inactive pages that were scanned */
56 unsigned long nr_scanned;
57
58 /* Incremented by the number of pages reclaimed */
59 unsigned long nr_reclaimed;
60
61 unsigned long nr_mapped; /* From page_state */
62
1da177e4 63 /* This context's GFP mask */
6daa0e28 64 gfp_t gfp_mask;
1da177e4
LT
65
66 int may_writepage;
67
f1fd1067
CL
68 /* Can pages be swapped as part of reclaim? */
69 int may_swap;
70
1da177e4
LT
71 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
72 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
73 * In this context, it doesn't matter that we scan the
74 * whole list at once. */
75 int swap_cluster_max;
76};
77
78/*
79 * The list of shrinker callbacks used by to apply pressure to
80 * ageable caches.
81 */
82struct shrinker {
83 shrinker_t shrinker;
84 struct list_head list;
85 int seeks; /* seeks to recreate an obj */
86 long nr; /* objs pending delete */
87};
88
89#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
90
91#ifdef ARCH_HAS_PREFETCH
92#define prefetch_prev_lru_page(_page, _base, _field) \
93 do { \
94 if ((_page)->lru.prev != _base) { \
95 struct page *prev; \
96 \
97 prev = lru_to_page(&(_page->lru)); \
98 prefetch(&prev->_field); \
99 } \
100 } while (0)
101#else
102#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
103#endif
104
105#ifdef ARCH_HAS_PREFETCHW
106#define prefetchw_prev_lru_page(_page, _base, _field) \
107 do { \
108 if ((_page)->lru.prev != _base) { \
109 struct page *prev; \
110 \
111 prev = lru_to_page(&(_page->lru)); \
112 prefetchw(&prev->_field); \
113 } \
114 } while (0)
115#else
116#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
117#endif
118
119/*
120 * From 0 .. 100. Higher means more swappy.
121 */
122int vm_swappiness = 60;
123static long total_memory;
124
125static LIST_HEAD(shrinker_list);
126static DECLARE_RWSEM(shrinker_rwsem);
127
128/*
129 * Add a shrinker callback to be called from the vm
130 */
131struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
132{
133 struct shrinker *shrinker;
134
135 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
136 if (shrinker) {
137 shrinker->shrinker = theshrinker;
138 shrinker->seeks = seeks;
139 shrinker->nr = 0;
140 down_write(&shrinker_rwsem);
141 list_add_tail(&shrinker->list, &shrinker_list);
142 up_write(&shrinker_rwsem);
143 }
144 return shrinker;
145}
146EXPORT_SYMBOL(set_shrinker);
147
148/*
149 * Remove one
150 */
151void remove_shrinker(struct shrinker *shrinker)
152{
153 down_write(&shrinker_rwsem);
154 list_del(&shrinker->list);
155 up_write(&shrinker_rwsem);
156 kfree(shrinker);
157}
158EXPORT_SYMBOL(remove_shrinker);
159
160#define SHRINK_BATCH 128
161/*
162 * Call the shrink functions to age shrinkable caches
163 *
164 * Here we assume it costs one seek to replace a lru page and that it also
165 * takes a seek to recreate a cache object. With this in mind we age equal
166 * percentages of the lru and ageable caches. This should balance the seeks
167 * generated by these structures.
168 *
169 * If the vm encounted mapped pages on the LRU it increase the pressure on
170 * slab to avoid swapping.
171 *
172 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
173 *
174 * `lru_pages' represents the number of on-LRU pages in all the zones which
175 * are eligible for the caller's allocation attempt. It is used for balancing
176 * slab reclaim versus page reclaim.
b15e0905 177 *
178 * Returns the number of slab objects which we shrunk.
1da177e4 179 */
69e05944
AM
180unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
181 unsigned long lru_pages)
1da177e4
LT
182{
183 struct shrinker *shrinker;
69e05944 184 unsigned long ret = 0;
1da177e4
LT
185
186 if (scanned == 0)
187 scanned = SWAP_CLUSTER_MAX;
188
189 if (!down_read_trylock(&shrinker_rwsem))
b15e0905 190 return 1; /* Assume we'll be able to shrink next time */
1da177e4
LT
191
192 list_for_each_entry(shrinker, &shrinker_list, list) {
193 unsigned long long delta;
194 unsigned long total_scan;
ea164d73 195 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
1da177e4
LT
196
197 delta = (4 * scanned) / shrinker->seeks;
ea164d73 198 delta *= max_pass;
1da177e4
LT
199 do_div(delta, lru_pages + 1);
200 shrinker->nr += delta;
ea164d73
AA
201 if (shrinker->nr < 0) {
202 printk(KERN_ERR "%s: nr=%ld\n",
203 __FUNCTION__, shrinker->nr);
204 shrinker->nr = max_pass;
205 }
206
207 /*
208 * Avoid risking looping forever due to too large nr value:
209 * never try to free more than twice the estimate number of
210 * freeable entries.
211 */
212 if (shrinker->nr > max_pass * 2)
213 shrinker->nr = max_pass * 2;
1da177e4
LT
214
215 total_scan = shrinker->nr;
216 shrinker->nr = 0;
217
218 while (total_scan >= SHRINK_BATCH) {
219 long this_scan = SHRINK_BATCH;
220 int shrink_ret;
b15e0905 221 int nr_before;
1da177e4 222
b15e0905 223 nr_before = (*shrinker->shrinker)(0, gfp_mask);
1da177e4
LT
224 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
225 if (shrink_ret == -1)
226 break;
b15e0905 227 if (shrink_ret < nr_before)
228 ret += nr_before - shrink_ret;
1da177e4
LT
229 mod_page_state(slabs_scanned, this_scan);
230 total_scan -= this_scan;
231
232 cond_resched();
233 }
234
235 shrinker->nr += total_scan;
236 }
237 up_read(&shrinker_rwsem);
b15e0905 238 return ret;
1da177e4
LT
239}
240
241/* Called without lock on whether page is mapped, so answer is unstable */
242static inline int page_mapping_inuse(struct page *page)
243{
244 struct address_space *mapping;
245
246 /* Page is in somebody's page tables. */
247 if (page_mapped(page))
248 return 1;
249
250 /* Be more reluctant to reclaim swapcache than pagecache */
251 if (PageSwapCache(page))
252 return 1;
253
254 mapping = page_mapping(page);
255 if (!mapping)
256 return 0;
257
258 /* File is mmap'd by somebody? */
259 return mapping_mapped(mapping);
260}
261
262static inline int is_page_cache_freeable(struct page *page)
263{
264 return page_count(page) - !!PagePrivate(page) == 2;
265}
266
267static int may_write_to_queue(struct backing_dev_info *bdi)
268{
930d9152 269 if (current->flags & PF_SWAPWRITE)
1da177e4
LT
270 return 1;
271 if (!bdi_write_congested(bdi))
272 return 1;
273 if (bdi == current->backing_dev_info)
274 return 1;
275 return 0;
276}
277
278/*
279 * We detected a synchronous write error writing a page out. Probably
280 * -ENOSPC. We need to propagate that into the address_space for a subsequent
281 * fsync(), msync() or close().
282 *
283 * The tricky part is that after writepage we cannot touch the mapping: nothing
284 * prevents it from being freed up. But we have a ref on the page and once
285 * that page is locked, the mapping is pinned.
286 *
287 * We're allowed to run sleeping lock_page() here because we know the caller has
288 * __GFP_FS.
289 */
290static void handle_write_error(struct address_space *mapping,
291 struct page *page, int error)
292{
293 lock_page(page);
294 if (page_mapping(page) == mapping) {
295 if (error == -ENOSPC)
296 set_bit(AS_ENOSPC, &mapping->flags);
297 else
298 set_bit(AS_EIO, &mapping->flags);
299 }
300 unlock_page(page);
301}
302
303/*
304 * pageout is called by shrink_list() for each dirty page. Calls ->writepage().
305 */
306static pageout_t pageout(struct page *page, struct address_space *mapping)
307{
308 /*
309 * If the page is dirty, only perform writeback if that write
310 * will be non-blocking. To prevent this allocation from being
311 * stalled by pagecache activity. But note that there may be
312 * stalls if we need to run get_block(). We could test
313 * PagePrivate for that.
314 *
315 * If this process is currently in generic_file_write() against
316 * this page's queue, we can perform writeback even if that
317 * will block.
318 *
319 * If the page is swapcache, write it back even if that would
320 * block, for some throttling. This happens by accident, because
321 * swap_backing_dev_info is bust: it doesn't reflect the
322 * congestion state of the swapdevs. Easy to fix, if needed.
323 * See swapfile.c:page_queue_congested().
324 */
325 if (!is_page_cache_freeable(page))
326 return PAGE_KEEP;
327 if (!mapping) {
328 /*
329 * Some data journaling orphaned pages can have
330 * page->mapping == NULL while being dirty with clean buffers.
331 */
323aca6c 332 if (PagePrivate(page)) {
1da177e4
LT
333 if (try_to_free_buffers(page)) {
334 ClearPageDirty(page);
335 printk("%s: orphaned page\n", __FUNCTION__);
336 return PAGE_CLEAN;
337 }
338 }
339 return PAGE_KEEP;
340 }
341 if (mapping->a_ops->writepage == NULL)
342 return PAGE_ACTIVATE;
343 if (!may_write_to_queue(mapping->backing_dev_info))
344 return PAGE_KEEP;
345
346 if (clear_page_dirty_for_io(page)) {
347 int res;
348 struct writeback_control wbc = {
349 .sync_mode = WB_SYNC_NONE,
350 .nr_to_write = SWAP_CLUSTER_MAX,
351 .nonblocking = 1,
352 .for_reclaim = 1,
353 };
354
355 SetPageReclaim(page);
356 res = mapping->a_ops->writepage(page, &wbc);
357 if (res < 0)
358 handle_write_error(mapping, page, res);
994fc28c 359 if (res == AOP_WRITEPAGE_ACTIVATE) {
1da177e4
LT
360 ClearPageReclaim(page);
361 return PAGE_ACTIVATE;
362 }
363 if (!PageWriteback(page)) {
364 /* synchronous write or broken a_ops? */
365 ClearPageReclaim(page);
366 }
367
368 return PAGE_SUCCESS;
369 }
370
371 return PAGE_CLEAN;
372}
373
49d2e9cc
CL
374static int remove_mapping(struct address_space *mapping, struct page *page)
375{
376 if (!mapping)
377 return 0; /* truncate got there first */
378
379 write_lock_irq(&mapping->tree_lock);
380
381 /*
382 * The non-racy check for busy page. It is critical to check
383 * PageDirty _after_ making sure that the page is freeable and
384 * not in use by anybody. (pagecache + us == 2)
385 */
386 if (unlikely(page_count(page) != 2))
387 goto cannot_free;
388 smp_rmb();
389 if (unlikely(PageDirty(page)))
390 goto cannot_free;
391
392 if (PageSwapCache(page)) {
393 swp_entry_t swap = { .val = page_private(page) };
394 __delete_from_swap_cache(page);
395 write_unlock_irq(&mapping->tree_lock);
396 swap_free(swap);
397 __put_page(page); /* The pagecache ref */
398 return 1;
399 }
400
401 __remove_from_page_cache(page);
402 write_unlock_irq(&mapping->tree_lock);
403 __put_page(page);
404 return 1;
405
406cannot_free:
407 write_unlock_irq(&mapping->tree_lock);
408 return 0;
409}
410
1da177e4
LT
411/*
412 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
413 */
69e05944
AM
414static unsigned long shrink_list(struct list_head *page_list,
415 struct scan_control *sc)
1da177e4
LT
416{
417 LIST_HEAD(ret_pages);
418 struct pagevec freed_pvec;
419 int pgactivate = 0;
69e05944 420 unsigned long reclaimed = 0;
1da177e4
LT
421
422 cond_resched();
423
424 pagevec_init(&freed_pvec, 1);
425 while (!list_empty(page_list)) {
426 struct address_space *mapping;
427 struct page *page;
428 int may_enter_fs;
429 int referenced;
430
431 cond_resched();
432
433 page = lru_to_page(page_list);
434 list_del(&page->lru);
435
436 if (TestSetPageLocked(page))
437 goto keep;
438
439 BUG_ON(PageActive(page));
440
441 sc->nr_scanned++;
80e43426
CL
442
443 if (!sc->may_swap && page_mapped(page))
444 goto keep_locked;
445
1da177e4
LT
446 /* Double the slab pressure for mapped and swapcache pages */
447 if (page_mapped(page) || PageSwapCache(page))
448 sc->nr_scanned++;
449
450 if (PageWriteback(page))
451 goto keep_locked;
452
f7b7fd8f 453 referenced = page_referenced(page, 1);
1da177e4
LT
454 /* In active use or really unfreeable? Activate it. */
455 if (referenced && page_mapping_inuse(page))
456 goto activate_locked;
457
458#ifdef CONFIG_SWAP
459 /*
460 * Anonymous process memory has backing store?
461 * Try to allocate it some swap space here.
462 */
c340010e 463 if (PageAnon(page) && !PageSwapCache(page)) {
f1fd1067
CL
464 if (!sc->may_swap)
465 goto keep_locked;
1480a540 466 if (!add_to_swap(page, GFP_ATOMIC))
1da177e4
LT
467 goto activate_locked;
468 }
469#endif /* CONFIG_SWAP */
470
471 mapping = page_mapping(page);
472 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
473 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
474
475 /*
476 * The page is mapped into the page tables of one or more
477 * processes. Try to unmap it here.
478 */
479 if (page_mapped(page) && mapping) {
aa3f18b3
CL
480 /*
481 * No unmapping if we do not swap
482 */
483 if (!sc->may_swap)
484 goto keep_locked;
485
a48d07af 486 switch (try_to_unmap(page, 0)) {
1da177e4
LT
487 case SWAP_FAIL:
488 goto activate_locked;
489 case SWAP_AGAIN:
490 goto keep_locked;
491 case SWAP_SUCCESS:
492 ; /* try to free the page below */
493 }
494 }
495
496 if (PageDirty(page)) {
497 if (referenced)
498 goto keep_locked;
499 if (!may_enter_fs)
500 goto keep_locked;
52a8363e 501 if (!sc->may_writepage)
1da177e4
LT
502 goto keep_locked;
503
504 /* Page is dirty, try to write it out here */
505 switch(pageout(page, mapping)) {
506 case PAGE_KEEP:
507 goto keep_locked;
508 case PAGE_ACTIVATE:
509 goto activate_locked;
510 case PAGE_SUCCESS:
511 if (PageWriteback(page) || PageDirty(page))
512 goto keep;
513 /*
514 * A synchronous write - probably a ramdisk. Go
515 * ahead and try to reclaim the page.
516 */
517 if (TestSetPageLocked(page))
518 goto keep;
519 if (PageDirty(page) || PageWriteback(page))
520 goto keep_locked;
521 mapping = page_mapping(page);
522 case PAGE_CLEAN:
523 ; /* try to free the page below */
524 }
525 }
526
527 /*
528 * If the page has buffers, try to free the buffer mappings
529 * associated with this page. If we succeed we try to free
530 * the page as well.
531 *
532 * We do this even if the page is PageDirty().
533 * try_to_release_page() does not perform I/O, but it is
534 * possible for a page to have PageDirty set, but it is actually
535 * clean (all its buffers are clean). This happens if the
536 * buffers were written out directly, with submit_bh(). ext3
537 * will do this, as well as the blockdev mapping.
538 * try_to_release_page() will discover that cleanness and will
539 * drop the buffers and mark the page clean - it can be freed.
540 *
541 * Rarely, pages can have buffers and no ->mapping. These are
542 * the pages which were not successfully invalidated in
543 * truncate_complete_page(). We try to drop those buffers here
544 * and if that worked, and the page is no longer mapped into
545 * process address space (page_count == 1) it can be freed.
546 * Otherwise, leave the page on the LRU so it is swappable.
547 */
548 if (PagePrivate(page)) {
549 if (!try_to_release_page(page, sc->gfp_mask))
550 goto activate_locked;
551 if (!mapping && page_count(page) == 1)
552 goto free_it;
553 }
554
49d2e9cc
CL
555 if (!remove_mapping(mapping, page))
556 goto keep_locked;
1da177e4
LT
557
558free_it:
559 unlock_page(page);
560 reclaimed++;
561 if (!pagevec_add(&freed_pvec, page))
562 __pagevec_release_nonlru(&freed_pvec);
563 continue;
564
565activate_locked:
566 SetPageActive(page);
567 pgactivate++;
568keep_locked:
569 unlock_page(page);
570keep:
571 list_add(&page->lru, &ret_pages);
572 BUG_ON(PageLRU(page));
573 }
574 list_splice(&ret_pages, page_list);
575 if (pagevec_count(&freed_pvec))
576 __pagevec_release_nonlru(&freed_pvec);
577 mod_page_state(pgactivate, pgactivate);
578 sc->nr_reclaimed += reclaimed;
579 return reclaimed;
580}
581
7cbe34cf 582#ifdef CONFIG_MIGRATION
8419c318
CL
583static inline void move_to_lru(struct page *page)
584{
585 list_del(&page->lru);
586 if (PageActive(page)) {
587 /*
588 * lru_cache_add_active checks that
589 * the PG_active bit is off.
590 */
591 ClearPageActive(page);
592 lru_cache_add_active(page);
593 } else {
594 lru_cache_add(page);
595 }
596 put_page(page);
597}
598
599/*
053837fc 600 * Add isolated pages on the list back to the LRU.
8419c318
CL
601 *
602 * returns the number of pages put back.
603 */
69e05944 604unsigned long putback_lru_pages(struct list_head *l)
8419c318
CL
605{
606 struct page *page;
607 struct page *page2;
69e05944 608 unsigned long count = 0;
8419c318
CL
609
610 list_for_each_entry_safe(page, page2, l, lru) {
611 move_to_lru(page);
612 count++;
613 }
614 return count;
615}
616
e965f963
CL
617/*
618 * Non migratable page
619 */
620int fail_migrate_page(struct page *newpage, struct page *page)
621{
622 return -EIO;
623}
624EXPORT_SYMBOL(fail_migrate_page);
625
49d2e9cc
CL
626/*
627 * swapout a single page
628 * page is locked upon entry, unlocked on exit
49d2e9cc
CL
629 */
630static int swap_page(struct page *page)
631{
632 struct address_space *mapping = page_mapping(page);
633
634 if (page_mapped(page) && mapping)
418aade4 635 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
49d2e9cc
CL
636 goto unlock_retry;
637
638 if (PageDirty(page)) {
639 /* Page is dirty, try to write it out here */
640 switch(pageout(page, mapping)) {
641 case PAGE_KEEP:
642 case PAGE_ACTIVATE:
643 goto unlock_retry;
644
645 case PAGE_SUCCESS:
646 goto retry;
647
648 case PAGE_CLEAN:
649 ; /* try to free the page below */
650 }
651 }
652
653 if (PagePrivate(page)) {
654 if (!try_to_release_page(page, GFP_KERNEL) ||
655 (!mapping && page_count(page) == 1))
656 goto unlock_retry;
657 }
658
659 if (remove_mapping(mapping, page)) {
660 /* Success */
661 unlock_page(page);
662 return 0;
663 }
664
665unlock_retry:
666 unlock_page(page);
667
668retry:
d0d96328 669 return -EAGAIN;
49d2e9cc 670}
e965f963 671EXPORT_SYMBOL(swap_page);
a48d07af
CL
672
673/*
674 * Page migration was first developed in the context of the memory hotplug
675 * project. The main authors of the migration code are:
676 *
677 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
678 * Hirokazu Takahashi <taka@valinux.co.jp>
679 * Dave Hansen <haveblue@us.ibm.com>
680 * Christoph Lameter <clameter@sgi.com>
681 */
682
683/*
684 * Remove references for a page and establish the new page with the correct
685 * basic settings to be able to stop accesses to the page.
686 */
e965f963 687int migrate_page_remove_references(struct page *newpage,
a48d07af
CL
688 struct page *page, int nr_refs)
689{
690 struct address_space *mapping = page_mapping(page);
691 struct page **radix_pointer;
692
693 /*
694 * Avoid doing any of the following work if the page count
695 * indicates that the page is in use or truncate has removed
696 * the page.
697 */
698 if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
4983da07 699 return -EAGAIN;
a48d07af
CL
700
701 /*
702 * Establish swap ptes for anonymous pages or destroy pte
703 * maps for files.
704 *
705 * In order to reestablish file backed mappings the fault handlers
706 * will take the radix tree_lock which may then be used to stop
707 * processses from accessing this page until the new page is ready.
708 *
709 * A process accessing via a swap pte (an anonymous page) will take a
710 * page_lock on the old page which will block the process until the
711 * migration attempt is complete. At that time the PageSwapCache bit
712 * will be examined. If the page was migrated then the PageSwapCache
713 * bit will be clear and the operation to retrieve the page will be
714 * retried which will find the new page in the radix tree. Then a new
715 * direct mapping may be generated based on the radix tree contents.
716 *
717 * If the page was not migrated then the PageSwapCache bit
718 * is still set and the operation may continue.
719 */
4983da07
CL
720 if (try_to_unmap(page, 1) == SWAP_FAIL)
721 /* A vma has VM_LOCKED set -> Permanent failure */
722 return -EPERM;
a48d07af
CL
723
724 /*
725 * Give up if we were unable to remove all mappings.
726 */
727 if (page_mapcount(page))
4983da07 728 return -EAGAIN;
a48d07af
CL
729
730 write_lock_irq(&mapping->tree_lock);
731
732 radix_pointer = (struct page **)radix_tree_lookup_slot(
733 &mapping->page_tree,
734 page_index(page));
735
736 if (!page_mapping(page) || page_count(page) != nr_refs ||
737 *radix_pointer != page) {
738 write_unlock_irq(&mapping->tree_lock);
4983da07 739 return -EAGAIN;
a48d07af
CL
740 }
741
742 /*
743 * Now we know that no one else is looking at the page.
744 *
745 * Certain minimal information about a page must be available
746 * in order for other subsystems to properly handle the page if they
747 * find it through the radix tree update before we are finished
748 * copying the page.
749 */
750 get_page(newpage);
751 newpage->index = page->index;
752 newpage->mapping = page->mapping;
753 if (PageSwapCache(page)) {
754 SetPageSwapCache(newpage);
755 set_page_private(newpage, page_private(page));
756 }
757
758 *radix_pointer = newpage;
759 __put_page(page);
760 write_unlock_irq(&mapping->tree_lock);
761
762 return 0;
763}
e965f963 764EXPORT_SYMBOL(migrate_page_remove_references);
a48d07af
CL
765
766/*
767 * Copy the page to its new location
768 */
769void migrate_page_copy(struct page *newpage, struct page *page)
770{
771 copy_highpage(newpage, page);
772
773 if (PageError(page))
774 SetPageError(newpage);
775 if (PageReferenced(page))
776 SetPageReferenced(newpage);
777 if (PageUptodate(page))
778 SetPageUptodate(newpage);
779 if (PageActive(page))
780 SetPageActive(newpage);
781 if (PageChecked(page))
782 SetPageChecked(newpage);
783 if (PageMappedToDisk(page))
784 SetPageMappedToDisk(newpage);
785
786 if (PageDirty(page)) {
787 clear_page_dirty_for_io(page);
788 set_page_dirty(newpage);
789 }
790
791 ClearPageSwapCache(page);
792 ClearPageActive(page);
793 ClearPagePrivate(page);
794 set_page_private(page, 0);
795 page->mapping = NULL;
796
797 /*
798 * If any waiters have accumulated on the new page then
799 * wake them up.
800 */
801 if (PageWriteback(newpage))
802 end_page_writeback(newpage);
803}
e965f963 804EXPORT_SYMBOL(migrate_page_copy);
a48d07af
CL
805
806/*
807 * Common logic to directly migrate a single page suitable for
808 * pages that do not use PagePrivate.
809 *
810 * Pages are locked upon entry and exit.
811 */
812int migrate_page(struct page *newpage, struct page *page)
813{
4983da07
CL
814 int rc;
815
a48d07af
CL
816 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
817
4983da07
CL
818 rc = migrate_page_remove_references(newpage, page, 2);
819
820 if (rc)
821 return rc;
a48d07af
CL
822
823 migrate_page_copy(newpage, page);
824
a3351e52
CL
825 /*
826 * Remove auxiliary swap entries and replace
827 * them with real ptes.
828 *
829 * Note that a real pte entry will allow processes that are not
830 * waiting on the page lock to use the new page via the page tables
831 * before the new page is unlocked.
832 */
833 remove_from_swap(newpage);
a48d07af
CL
834 return 0;
835}
e965f963 836EXPORT_SYMBOL(migrate_page);
a48d07af 837
49d2e9cc
CL
838/*
839 * migrate_pages
840 *
841 * Two lists are passed to this function. The first list
842 * contains the pages isolated from the LRU to be migrated.
843 * The second list contains new pages that the pages isolated
844 * can be moved to. If the second list is NULL then all
845 * pages are swapped out.
846 *
847 * The function returns after 10 attempts or if no pages
418aade4 848 * are movable anymore because to has become empty
49d2e9cc
CL
849 * or no retryable pages exist anymore.
850 *
d0d96328 851 * Return: Number of pages not migrated when "to" ran empty.
49d2e9cc 852 */
69e05944 853unsigned long migrate_pages(struct list_head *from, struct list_head *to,
d4984711 854 struct list_head *moved, struct list_head *failed)
49d2e9cc 855{
69e05944
AM
856 unsigned long retry;
857 unsigned long nr_failed = 0;
49d2e9cc
CL
858 int pass = 0;
859 struct page *page;
860 struct page *page2;
861 int swapwrite = current->flags & PF_SWAPWRITE;
d0d96328 862 int rc;
49d2e9cc
CL
863
864 if (!swapwrite)
865 current->flags |= PF_SWAPWRITE;
866
867redo:
868 retry = 0;
869
d4984711 870 list_for_each_entry_safe(page, page2, from, lru) {
a48d07af
CL
871 struct page *newpage = NULL;
872 struct address_space *mapping;
873
49d2e9cc
CL
874 cond_resched();
875
d0d96328
CL
876 rc = 0;
877 if (page_count(page) == 1)
ee27497d 878 /* page was freed from under us. So we are done. */
d0d96328
CL
879 goto next;
880
a48d07af
CL
881 if (to && list_empty(to))
882 break;
883
49d2e9cc
CL
884 /*
885 * Skip locked pages during the first two passes to give the
7cbe34cf
CL
886 * functions holding the lock time to release the page. Later we
887 * use lock_page() to have a higher chance of acquiring the
888 * lock.
49d2e9cc 889 */
d0d96328 890 rc = -EAGAIN;
49d2e9cc
CL
891 if (pass > 2)
892 lock_page(page);
893 else
894 if (TestSetPageLocked(page))
d0d96328 895 goto next;
49d2e9cc
CL
896
897 /*
898 * Only wait on writeback if we have already done a pass where
899 * we we may have triggered writeouts for lots of pages.
900 */
7cbe34cf 901 if (pass > 0) {
49d2e9cc 902 wait_on_page_writeback(page);
7cbe34cf 903 } else {
d0d96328
CL
904 if (PageWriteback(page))
905 goto unlock_page;
7cbe34cf 906 }
49d2e9cc 907
d0d96328
CL
908 /*
909 * Anonymous pages must have swap cache references otherwise
910 * the information contained in the page maps cannot be
911 * preserved.
912 */
49d2e9cc 913 if (PageAnon(page) && !PageSwapCache(page)) {
1480a540 914 if (!add_to_swap(page, GFP_KERNEL)) {
d0d96328
CL
915 rc = -ENOMEM;
916 goto unlock_page;
49d2e9cc
CL
917 }
918 }
49d2e9cc 919
a48d07af
CL
920 if (!to) {
921 rc = swap_page(page);
922 goto next;
923 }
924
925 newpage = lru_to_page(to);
926 lock_page(newpage);
927
49d2e9cc 928 /*
a48d07af 929 * Pages are properly locked and writeback is complete.
49d2e9cc
CL
930 * Try to migrate the page.
931 */
a48d07af
CL
932 mapping = page_mapping(page);
933 if (!mapping)
934 goto unlock_both;
935
e965f963 936 if (mapping->a_ops->migratepage) {
418aade4
CL
937 /*
938 * Most pages have a mapping and most filesystems
939 * should provide a migration function. Anonymous
940 * pages are part of swap space which also has its
941 * own migration function. This is the most common
942 * path for page migration.
943 */
e965f963
CL
944 rc = mapping->a_ops->migratepage(newpage, page);
945 goto unlock_both;
946 }
947
a48d07af 948 /*
418aade4
CL
949 * Default handling if a filesystem does not provide
950 * a migration function. We can only migrate clean
951 * pages so try to write out any dirty pages first.
a48d07af
CL
952 */
953 if (PageDirty(page)) {
954 switch (pageout(page, mapping)) {
955 case PAGE_KEEP:
956 case PAGE_ACTIVATE:
957 goto unlock_both;
958
959 case PAGE_SUCCESS:
960 unlock_page(newpage);
961 goto next;
962
963 case PAGE_CLEAN:
964 ; /* try to migrate the page below */
965 }
966 }
418aade4 967
a48d07af 968 /*
418aade4
CL
969 * Buffers are managed in a filesystem specific way.
970 * We must have no buffers or drop them.
a48d07af
CL
971 */
972 if (!page_has_buffers(page) ||
973 try_to_release_page(page, GFP_KERNEL)) {
974 rc = migrate_page(newpage, page);
975 goto unlock_both;
976 }
977
978 /*
979 * On early passes with mapped pages simply
980 * retry. There may be a lock held for some
981 * buffers that may go away. Later
982 * swap them out.
983 */
984 if (pass > 4) {
418aade4
CL
985 /*
986 * Persistently unable to drop buffers..... As a
987 * measure of last resort we fall back to
988 * swap_page().
989 */
a48d07af
CL
990 unlock_page(newpage);
991 newpage = NULL;
992 rc = swap_page(page);
993 goto next;
994 }
995
996unlock_both:
997 unlock_page(newpage);
d0d96328
CL
998
999unlock_page:
1000 unlock_page(page);
1001
1002next:
1003 if (rc == -EAGAIN) {
1004 retry++;
1005 } else if (rc) {
1006 /* Permanent failure */
1007 list_move(&page->lru, failed);
1008 nr_failed++;
1009 } else {
a48d07af
CL
1010 if (newpage) {
1011 /* Successful migration. Return page to LRU */
1012 move_to_lru(newpage);
1013 }
d4984711 1014 list_move(&page->lru, moved);
d4984711 1015 }
49d2e9cc
CL
1016 }
1017 if (retry && pass++ < 10)
1018 goto redo;
1019
1020 if (!swapwrite)
1021 current->flags &= ~PF_SWAPWRITE;
1022
49d2e9cc
CL
1023 return nr_failed + retry;
1024}
8419c318 1025
8419c318
CL
1026/*
1027 * Isolate one page from the LRU lists and put it on the
053837fc 1028 * indicated list with elevated refcount.
8419c318
CL
1029 *
1030 * Result:
1031 * 0 = page not on LRU list
1032 * 1 = page removed from LRU list and added to the specified list.
8419c318
CL
1033 */
1034int isolate_lru_page(struct page *page)
1035{
053837fc 1036 int ret = 0;
8419c318 1037
053837fc
NP
1038 if (PageLRU(page)) {
1039 struct zone *zone = page_zone(page);
1040 spin_lock_irq(&zone->lru_lock);
8d438f96 1041 if (PageLRU(page)) {
053837fc
NP
1042 ret = 1;
1043 get_page(page);
8d438f96 1044 ClearPageLRU(page);
053837fc
NP
1045 if (PageActive(page))
1046 del_page_from_active_list(zone, page);
1047 else
1048 del_page_from_inactive_list(zone, page);
1049 }
1050 spin_unlock_irq(&zone->lru_lock);
8419c318 1051 }
053837fc
NP
1052
1053 return ret;
8419c318 1054}
7cbe34cf 1055#endif
49d2e9cc 1056
1da177e4
LT
1057/*
1058 * zone->lru_lock is heavily contended. Some of the functions that
1059 * shrink the lists perform better by taking out a batch of pages
1060 * and working on them outside the LRU lock.
1061 *
1062 * For pagecache intensive workloads, this function is the hottest
1063 * spot in the kernel (apart from copy_*_user functions).
1064 *
1065 * Appropriate locks must be held before calling this function.
1066 *
1067 * @nr_to_scan: The number of pages to look through on the list.
1068 * @src: The LRU list to pull pages off.
1069 * @dst: The temp list to put pages on to.
1070 * @scanned: The number of pages that were scanned.
1071 *
1072 * returns how many pages were moved onto *@dst.
1073 */
69e05944
AM
1074static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1075 struct list_head *src, struct list_head *dst,
1076 unsigned long *scanned)
1da177e4 1077{
69e05944 1078 unsigned long nr_taken = 0;
1da177e4 1079 struct page *page;
69e05944 1080 unsigned long scan = 0;
1da177e4
LT
1081
1082 while (scan++ < nr_to_scan && !list_empty(src)) {
7c8ee9a8 1083 struct list_head *target;
1da177e4
LT
1084 page = lru_to_page(src);
1085 prefetchw_prev_lru_page(page, src, flags);
1086
8d438f96
NP
1087 BUG_ON(!PageLRU(page));
1088
053837fc 1089 list_del(&page->lru);
7c8ee9a8
NP
1090 target = src;
1091 if (likely(get_page_unless_zero(page))) {
053837fc 1092 /*
7c8ee9a8
NP
1093 * Be careful not to clear PageLRU until after we're
1094 * sure the page is not being freed elsewhere -- the
1095 * page release code relies on it.
053837fc 1096 */
7c8ee9a8
NP
1097 ClearPageLRU(page);
1098 target = dst;
1099 nr_taken++;
1100 } /* else it is being freed elsewhere */
46453a6e 1101
7c8ee9a8 1102 list_add(&page->lru, target);
1da177e4
LT
1103 }
1104
1105 *scanned = scan;
1106 return nr_taken;
1107}
1108
1109/*
1110 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
1111 */
69e05944
AM
1112static void shrink_cache(unsigned long max_scan, struct zone *zone,
1113 struct scan_control *sc)
1da177e4
LT
1114{
1115 LIST_HEAD(page_list);
1116 struct pagevec pvec;
69e05944 1117 unsigned long nr_scanned = 0;
1da177e4
LT
1118
1119 pagevec_init(&pvec, 1);
1120
1121 lru_add_drain();
1122 spin_lock_irq(&zone->lru_lock);
69e05944 1123 do {
1da177e4 1124 struct page *page;
69e05944
AM
1125 unsigned long nr_taken;
1126 unsigned long nr_scan;
1127 unsigned long nr_freed;
1da177e4
LT
1128
1129 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
1130 &zone->inactive_list,
1131 &page_list, &nr_scan);
1132 zone->nr_inactive -= nr_taken;
1133 zone->pages_scanned += nr_scan;
1134 spin_unlock_irq(&zone->lru_lock);
1135
1136 if (nr_taken == 0)
1137 goto done;
1138
69e05944 1139 nr_scanned += nr_scan;
1da177e4 1140 nr_freed = shrink_list(&page_list, sc);
1da177e4 1141
a74609fa
NP
1142 local_irq_disable();
1143 if (current_is_kswapd()) {
1144 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
1145 __mod_page_state(kswapd_steal, nr_freed);
1146 } else
1147 __mod_page_state_zone(zone, pgscan_direct, nr_scan);
1148 __mod_page_state_zone(zone, pgsteal, nr_freed);
1149
1150 spin_lock(&zone->lru_lock);
1da177e4
LT
1151 /*
1152 * Put back any unfreeable pages.
1153 */
1154 while (!list_empty(&page_list)) {
1155 page = lru_to_page(&page_list);
8d438f96
NP
1156 BUG_ON(PageLRU(page));
1157 SetPageLRU(page);
1da177e4
LT
1158 list_del(&page->lru);
1159 if (PageActive(page))
1160 add_page_to_active_list(zone, page);
1161 else
1162 add_page_to_inactive_list(zone, page);
1163 if (!pagevec_add(&pvec, page)) {
1164 spin_unlock_irq(&zone->lru_lock);
1165 __pagevec_release(&pvec);
1166 spin_lock_irq(&zone->lru_lock);
1167 }
1168 }
69e05944 1169 } while (nr_scanned < max_scan);
1da177e4
LT
1170 spin_unlock_irq(&zone->lru_lock);
1171done:
1172 pagevec_release(&pvec);
1173}
1174
1175/*
1176 * This moves pages from the active list to the inactive list.
1177 *
1178 * We move them the other way if the page is referenced by one or more
1179 * processes, from rmap.
1180 *
1181 * If the pages are mostly unmapped, the processing is fast and it is
1182 * appropriate to hold zone->lru_lock across the whole operation. But if
1183 * the pages are mapped, the processing is slow (page_referenced()) so we
1184 * should drop zone->lru_lock around each page. It's impossible to balance
1185 * this, so instead we remove the pages from the LRU while processing them.
1186 * It is safe to rely on PG_active against the non-LRU pages in here because
1187 * nobody will play with that bit on a non-LRU page.
1188 *
1189 * The downside is that we have to touch page->_count against each page.
1190 * But we had to alter page->flags anyway.
1191 */
1192static void
69e05944
AM
1193refill_inactive_zone(unsigned long nr_pages, struct zone *zone,
1194 struct scan_control *sc)
1da177e4 1195{
69e05944 1196 unsigned long pgmoved;
1da177e4 1197 int pgdeactivate = 0;
69e05944 1198 unsigned long pgscanned;
1da177e4
LT
1199 LIST_HEAD(l_hold); /* The pages which were snipped off */
1200 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
1201 LIST_HEAD(l_active); /* Pages to go onto the active_list */
1202 struct page *page;
1203 struct pagevec pvec;
1204 int reclaim_mapped = 0;
2903fb16
CL
1205
1206 if (unlikely(sc->may_swap)) {
1207 long mapped_ratio;
1208 long distress;
1209 long swap_tendency;
1210
1211 /*
1212 * `distress' is a measure of how much trouble we're having
1213 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
1214 */
1215 distress = 100 >> zone->prev_priority;
1216
1217 /*
1218 * The point of this algorithm is to decide when to start
1219 * reclaiming mapped memory instead of just pagecache. Work out
1220 * how much memory
1221 * is mapped.
1222 */
1223 mapped_ratio = (sc->nr_mapped * 100) / total_memory;
1224
1225 /*
1226 * Now decide how much we really want to unmap some pages. The
1227 * mapped ratio is downgraded - just because there's a lot of
1228 * mapped memory doesn't necessarily mean that page reclaim
1229 * isn't succeeding.
1230 *
1231 * The distress ratio is important - we don't want to start
1232 * going oom.
1233 *
1234 * A 100% value of vm_swappiness overrides this algorithm
1235 * altogether.
1236 */
1237 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
1238
1239 /*
1240 * Now use this metric to decide whether to start moving mapped
1241 * memory onto the inactive list.
1242 */
1243 if (swap_tendency >= 100)
1244 reclaim_mapped = 1;
1245 }
1da177e4
LT
1246
1247 lru_add_drain();
1248 spin_lock_irq(&zone->lru_lock);
1249 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
1250 &l_hold, &pgscanned);
1251 zone->pages_scanned += pgscanned;
1252 zone->nr_active -= pgmoved;
1253 spin_unlock_irq(&zone->lru_lock);
1254
1da177e4
LT
1255 while (!list_empty(&l_hold)) {
1256 cond_resched();
1257 page = lru_to_page(&l_hold);
1258 list_del(&page->lru);
1259 if (page_mapped(page)) {
1260 if (!reclaim_mapped ||
1261 (total_swap_pages == 0 && PageAnon(page)) ||
f7b7fd8f 1262 page_referenced(page, 0)) {
1da177e4
LT
1263 list_add(&page->lru, &l_active);
1264 continue;
1265 }
1266 }
1267 list_add(&page->lru, &l_inactive);
1268 }
1269
1270 pagevec_init(&pvec, 1);
1271 pgmoved = 0;
1272 spin_lock_irq(&zone->lru_lock);
1273 while (!list_empty(&l_inactive)) {
1274 page = lru_to_page(&l_inactive);
1275 prefetchw_prev_lru_page(page, &l_inactive, flags);
8d438f96
NP
1276 BUG_ON(PageLRU(page));
1277 SetPageLRU(page);
4c84cacf
NP
1278 BUG_ON(!PageActive(page));
1279 ClearPageActive(page);
1280
1da177e4
LT
1281 list_move(&page->lru, &zone->inactive_list);
1282 pgmoved++;
1283 if (!pagevec_add(&pvec, page)) {
1284 zone->nr_inactive += pgmoved;
1285 spin_unlock_irq(&zone->lru_lock);
1286 pgdeactivate += pgmoved;
1287 pgmoved = 0;
1288 if (buffer_heads_over_limit)
1289 pagevec_strip(&pvec);
1290 __pagevec_release(&pvec);
1291 spin_lock_irq(&zone->lru_lock);
1292 }
1293 }
1294 zone->nr_inactive += pgmoved;
1295 pgdeactivate += pgmoved;
1296 if (buffer_heads_over_limit) {
1297 spin_unlock_irq(&zone->lru_lock);
1298 pagevec_strip(&pvec);
1299 spin_lock_irq(&zone->lru_lock);
1300 }
1301
1302 pgmoved = 0;
1303 while (!list_empty(&l_active)) {
1304 page = lru_to_page(&l_active);
1305 prefetchw_prev_lru_page(page, &l_active, flags);
8d438f96
NP
1306 BUG_ON(PageLRU(page));
1307 SetPageLRU(page);
1da177e4
LT
1308 BUG_ON(!PageActive(page));
1309 list_move(&page->lru, &zone->active_list);
1310 pgmoved++;
1311 if (!pagevec_add(&pvec, page)) {
1312 zone->nr_active += pgmoved;
1313 pgmoved = 0;
1314 spin_unlock_irq(&zone->lru_lock);
1315 __pagevec_release(&pvec);
1316 spin_lock_irq(&zone->lru_lock);
1317 }
1318 }
1319 zone->nr_active += pgmoved;
a74609fa
NP
1320 spin_unlock(&zone->lru_lock);
1321
1322 __mod_page_state_zone(zone, pgrefill, pgscanned);
1323 __mod_page_state(pgdeactivate, pgdeactivate);
1324 local_irq_enable();
1da177e4 1325
a74609fa 1326 pagevec_release(&pvec);
1da177e4
LT
1327}
1328
1329/*
1330 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1331 */
69e05944
AM
1332static void shrink_zone(int priority, struct zone *zone,
1333 struct scan_control *sc)
1da177e4
LT
1334{
1335 unsigned long nr_active;
1336 unsigned long nr_inactive;
8695949a 1337 unsigned long nr_to_scan;
1da177e4 1338
53e9a615
MH
1339 atomic_inc(&zone->reclaim_in_progress);
1340
1da177e4
LT
1341 /*
1342 * Add one to `nr_to_scan' just to make sure that the kernel will
1343 * slowly sift through the active list.
1344 */
8695949a 1345 zone->nr_scan_active += (zone->nr_active >> priority) + 1;
1da177e4
LT
1346 nr_active = zone->nr_scan_active;
1347 if (nr_active >= sc->swap_cluster_max)
1348 zone->nr_scan_active = 0;
1349 else
1350 nr_active = 0;
1351
8695949a 1352 zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1;
1da177e4
LT
1353 nr_inactive = zone->nr_scan_inactive;
1354 if (nr_inactive >= sc->swap_cluster_max)
1355 zone->nr_scan_inactive = 0;
1356 else
1357 nr_inactive = 0;
1358
1da177e4
LT
1359 while (nr_active || nr_inactive) {
1360 if (nr_active) {
8695949a 1361 nr_to_scan = min(nr_active,
1da177e4 1362 (unsigned long)sc->swap_cluster_max);
8695949a
CL
1363 nr_active -= nr_to_scan;
1364 refill_inactive_zone(nr_to_scan, zone, sc);
1da177e4
LT
1365 }
1366
1367 if (nr_inactive) {
8695949a 1368 nr_to_scan = min(nr_inactive,
1da177e4 1369 (unsigned long)sc->swap_cluster_max);
8695949a
CL
1370 nr_inactive -= nr_to_scan;
1371 shrink_cache(nr_to_scan, zone, sc);
1da177e4
LT
1372 }
1373 }
1374
1375 throttle_vm_writeout();
53e9a615
MH
1376
1377 atomic_dec(&zone->reclaim_in_progress);
1da177e4
LT
1378}
1379
1380/*
1381 * This is the direct reclaim path, for page-allocating processes. We only
1382 * try to reclaim pages from zones which will satisfy the caller's allocation
1383 * request.
1384 *
1385 * We reclaim from a zone even if that zone is over pages_high. Because:
1386 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1387 * allocation or
1388 * b) The zones may be over pages_high but they must go *over* pages_high to
1389 * satisfy the `incremental min' zone defense algorithm.
1390 *
1391 * Returns the number of reclaimed pages.
1392 *
1393 * If a zone is deemed to be full of pinned pages then just give it a light
1394 * scan then give up on it.
1395 */
69e05944
AM
1396static void shrink_caches(int priority, struct zone **zones,
1397 struct scan_control *sc)
1da177e4
LT
1398{
1399 int i;
1400
1401 for (i = 0; zones[i] != NULL; i++) {
1402 struct zone *zone = zones[i];
1403
f3fe6512 1404 if (!populated_zone(zone))
1da177e4
LT
1405 continue;
1406
9bf2229f 1407 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1da177e4
LT
1408 continue;
1409
8695949a
CL
1410 zone->temp_priority = priority;
1411 if (zone->prev_priority > priority)
1412 zone->prev_priority = priority;
1da177e4 1413
8695949a 1414 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1da177e4
LT
1415 continue; /* Let kswapd poll it */
1416
8695949a 1417 shrink_zone(priority, zone, sc);
1da177e4
LT
1418 }
1419}
1420
1421/*
1422 * This is the main entry point to direct page reclaim.
1423 *
1424 * If a full scan of the inactive list fails to free enough memory then we
1425 * are "out of memory" and something needs to be killed.
1426 *
1427 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1428 * high - the zone may be full of dirty or under-writeback pages, which this
1429 * caller can't do much about. We kick pdflush and take explicit naps in the
1430 * hope that some of these pages can be written. But if the allocating task
1431 * holds filesystem locks which prevent writeout this might not work, and the
1432 * allocation attempt will fail.
1433 */
69e05944 1434unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1da177e4
LT
1435{
1436 int priority;
1437 int ret = 0;
69e05944
AM
1438 unsigned long total_scanned = 0;
1439 unsigned long total_reclaimed = 0;
1da177e4 1440 struct reclaim_state *reclaim_state = current->reclaim_state;
1da177e4
LT
1441 unsigned long lru_pages = 0;
1442 int i;
179e9639
AM
1443 struct scan_control sc = {
1444 .gfp_mask = gfp_mask,
1445 .may_writepage = !laptop_mode,
1446 .swap_cluster_max = SWAP_CLUSTER_MAX,
1447 .may_swap = 1,
1448 };
1da177e4
LT
1449
1450 inc_page_state(allocstall);
1451
1452 for (i = 0; zones[i] != NULL; i++) {
1453 struct zone *zone = zones[i];
1454
9bf2229f 1455 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1da177e4
LT
1456 continue;
1457
1458 zone->temp_priority = DEF_PRIORITY;
1459 lru_pages += zone->nr_active + zone->nr_inactive;
1460 }
1461
1462 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1463 sc.nr_mapped = read_page_state(nr_mapped);
1464 sc.nr_scanned = 0;
1465 sc.nr_reclaimed = 0;
f7b7fd8f
RR
1466 if (!priority)
1467 disable_swap_token();
8695949a 1468 shrink_caches(priority, zones, &sc);
1da177e4
LT
1469 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1470 if (reclaim_state) {
1471 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1472 reclaim_state->reclaimed_slab = 0;
1473 }
1474 total_scanned += sc.nr_scanned;
1475 total_reclaimed += sc.nr_reclaimed;
1476 if (total_reclaimed >= sc.swap_cluster_max) {
1477 ret = 1;
1478 goto out;
1479 }
1480
1481 /*
1482 * Try to write back as many pages as we just scanned. This
1483 * tends to cause slow streaming writers to write data to the
1484 * disk smoothly, at the dirtying rate, which is nice. But
1485 * that's undesirable in laptop mode, where we *want* lumpy
1486 * writeout. So in laptop mode, write out the whole world.
1487 */
179e9639
AM
1488 if (total_scanned > sc.swap_cluster_max +
1489 sc.swap_cluster_max / 2) {
687a21ce 1490 wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1da177e4
LT
1491 sc.may_writepage = 1;
1492 }
1493
1494 /* Take a nap, wait for some writeback to complete */
1495 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
1496 blk_congestion_wait(WRITE, HZ/10);
1497 }
1498out:
1499 for (i = 0; zones[i] != 0; i++) {
1500 struct zone *zone = zones[i];
1501
9bf2229f 1502 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1da177e4
LT
1503 continue;
1504
1505 zone->prev_priority = zone->temp_priority;
1506 }
1507 return ret;
1508}
1509
1510/*
1511 * For kswapd, balance_pgdat() will work across all this node's zones until
1512 * they are all at pages_high.
1513 *
1514 * If `nr_pages' is non-zero then it is the number of pages which are to be
1515 * reclaimed, regardless of the zone occupancies. This is a software suspend
1516 * special.
1517 *
1518 * Returns the number of pages which were actually freed.
1519 *
1520 * There is special handling here for zones which are full of pinned pages.
1521 * This can happen if the pages are all mlocked, or if they are all used by
1522 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
1523 * What we do is to detect the case where all pages in the zone have been
1524 * scanned twice and there has been zero successful reclaim. Mark the zone as
1525 * dead and from now on, only perform a short scan. Basically we're polling
1526 * the zone for when the problem goes away.
1527 *
1528 * kswapd scans the zones in the highmem->normal->dma direction. It skips
1529 * zones which have free_pages > pages_high, but once a zone is found to have
1530 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1531 * of the number of free pages in the lower zones. This interoperates with
1532 * the page allocator fallback scheme to ensure that aging of pages is balanced
1533 * across the zones.
1534 */
69e05944
AM
1535static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
1536 int order)
1da177e4 1537{
69e05944 1538 unsigned long to_free = nr_pages;
1da177e4
LT
1539 int all_zones_ok;
1540 int priority;
1541 int i;
69e05944
AM
1542 unsigned long total_scanned;
1543 unsigned long total_reclaimed;
1da177e4 1544 struct reclaim_state *reclaim_state = current->reclaim_state;
179e9639
AM
1545 struct scan_control sc = {
1546 .gfp_mask = GFP_KERNEL,
1547 .may_swap = 1,
1548 .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX,
1549 };
1da177e4
LT
1550
1551loop_again:
1552 total_scanned = 0;
1553 total_reclaimed = 0;
179e9639 1554 sc.may_writepage = !laptop_mode,
1da177e4
LT
1555 sc.nr_mapped = read_page_state(nr_mapped);
1556
1557 inc_page_state(pageoutrun);
1558
1559 for (i = 0; i < pgdat->nr_zones; i++) {
1560 struct zone *zone = pgdat->node_zones + i;
1561
1562 zone->temp_priority = DEF_PRIORITY;
1563 }
1564
1565 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1566 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1567 unsigned long lru_pages = 0;
1568
f7b7fd8f
RR
1569 /* The swap token gets in the way of swapout... */
1570 if (!priority)
1571 disable_swap_token();
1572
1da177e4
LT
1573 all_zones_ok = 1;
1574
1575 if (nr_pages == 0) {
1576 /*
1577 * Scan in the highmem->dma direction for the highest
1578 * zone which needs scanning
1579 */
1580 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1581 struct zone *zone = pgdat->node_zones + i;
1582
f3fe6512 1583 if (!populated_zone(zone))
1da177e4
LT
1584 continue;
1585
1586 if (zone->all_unreclaimable &&
1587 priority != DEF_PRIORITY)
1588 continue;
1589
1590 if (!zone_watermark_ok(zone, order,
7fb1d9fc 1591 zone->pages_high, 0, 0)) {
1da177e4
LT
1592 end_zone = i;
1593 goto scan;
1594 }
1595 }
1596 goto out;
1597 } else {
1598 end_zone = pgdat->nr_zones - 1;
1599 }
1600scan:
1601 for (i = 0; i <= end_zone; i++) {
1602 struct zone *zone = pgdat->node_zones + i;
1603
1604 lru_pages += zone->nr_active + zone->nr_inactive;
1605 }
1606
1607 /*
1608 * Now scan the zone in the dma->highmem direction, stopping
1609 * at the last zone which needs scanning.
1610 *
1611 * We do this because the page allocator works in the opposite
1612 * direction. This prevents the page allocator from allocating
1613 * pages behind kswapd's direction of progress, which would
1614 * cause too much scanning of the lower zones.
1615 */
1616 for (i = 0; i <= end_zone; i++) {
1617 struct zone *zone = pgdat->node_zones + i;
b15e0905 1618 int nr_slab;
1da177e4 1619
f3fe6512 1620 if (!populated_zone(zone))
1da177e4
LT
1621 continue;
1622
1623 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1624 continue;
1625
1626 if (nr_pages == 0) { /* Not software suspend */
1627 if (!zone_watermark_ok(zone, order,
7fb1d9fc 1628 zone->pages_high, end_zone, 0))
1da177e4
LT
1629 all_zones_ok = 0;
1630 }
1631 zone->temp_priority = priority;
1632 if (zone->prev_priority > priority)
1633 zone->prev_priority = priority;
1634 sc.nr_scanned = 0;
1635 sc.nr_reclaimed = 0;
8695949a 1636 shrink_zone(priority, zone, &sc);
1da177e4 1637 reclaim_state->reclaimed_slab = 0;
b15e0905 1638 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1639 lru_pages);
1da177e4
LT
1640 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1641 total_reclaimed += sc.nr_reclaimed;
1642 total_scanned += sc.nr_scanned;
1643 if (zone->all_unreclaimable)
1644 continue;
b15e0905 1645 if (nr_slab == 0 && zone->pages_scanned >=
1646 (zone->nr_active + zone->nr_inactive) * 4)
1da177e4
LT
1647 zone->all_unreclaimable = 1;
1648 /*
1649 * If we've done a decent amount of scanning and
1650 * the reclaim ratio is low, start doing writepage
1651 * even in laptop mode
1652 */
1653 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1654 total_scanned > total_reclaimed+total_reclaimed/2)
1655 sc.may_writepage = 1;
1656 }
1657 if (nr_pages && to_free > total_reclaimed)
1658 continue; /* swsusp: need to do more work */
1659 if (all_zones_ok)
1660 break; /* kswapd: all done */
1661 /*
1662 * OK, kswapd is getting into trouble. Take a nap, then take
1663 * another pass across the zones.
1664 */
1665 if (total_scanned && priority < DEF_PRIORITY - 2)
1666 blk_congestion_wait(WRITE, HZ/10);
1667
1668 /*
1669 * We do this so kswapd doesn't build up large priorities for
1670 * example when it is freeing in parallel with allocators. It
1671 * matches the direct reclaim path behaviour in terms of impact
1672 * on zone->*_priority.
1673 */
1674 if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages))
1675 break;
1676 }
1677out:
1678 for (i = 0; i < pgdat->nr_zones; i++) {
1679 struct zone *zone = pgdat->node_zones + i;
1680
1681 zone->prev_priority = zone->temp_priority;
1682 }
1683 if (!all_zones_ok) {
1684 cond_resched();
1685 goto loop_again;
1686 }
1687
1688 return total_reclaimed;
1689}
1690
1691/*
1692 * The background pageout daemon, started as a kernel thread
1693 * from the init process.
1694 *
1695 * This basically trickles out pages so that we have _some_
1696 * free memory available even if there is no other activity
1697 * that frees anything up. This is needed for things like routing
1698 * etc, where we otherwise might have all activity going on in
1699 * asynchronous contexts that cannot page things out.
1700 *
1701 * If there are applications that are active memory-allocators
1702 * (most normal use), this basically shouldn't matter.
1703 */
1704static int kswapd(void *p)
1705{
1706 unsigned long order;
1707 pg_data_t *pgdat = (pg_data_t*)p;
1708 struct task_struct *tsk = current;
1709 DEFINE_WAIT(wait);
1710 struct reclaim_state reclaim_state = {
1711 .reclaimed_slab = 0,
1712 };
1713 cpumask_t cpumask;
1714
1715 daemonize("kswapd%d", pgdat->node_id);
1716 cpumask = node_to_cpumask(pgdat->node_id);
1717 if (!cpus_empty(cpumask))
1718 set_cpus_allowed(tsk, cpumask);
1719 current->reclaim_state = &reclaim_state;
1720
1721 /*
1722 * Tell the memory management that we're a "memory allocator",
1723 * and that if we need more memory we should get access to it
1724 * regardless (see "__alloc_pages()"). "kswapd" should
1725 * never get caught in the normal page freeing logic.
1726 *
1727 * (Kswapd normally doesn't need memory anyway, but sometimes
1728 * you need a small amount of memory in order to be able to
1729 * page out something else, and this flag essentially protects
1730 * us from recursively trying to free more memory as we're
1731 * trying to free the first piece of memory in the first place).
1732 */
930d9152 1733 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1da177e4
LT
1734
1735 order = 0;
1736 for ( ; ; ) {
1737 unsigned long new_order;
3e1d1d28
CL
1738
1739 try_to_freeze();
1da177e4
LT
1740
1741 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1742 new_order = pgdat->kswapd_max_order;
1743 pgdat->kswapd_max_order = 0;
1744 if (order < new_order) {
1745 /*
1746 * Don't sleep if someone wants a larger 'order'
1747 * allocation
1748 */
1749 order = new_order;
1750 } else {
1751 schedule();
1752 order = pgdat->kswapd_max_order;
1753 }
1754 finish_wait(&pgdat->kswapd_wait, &wait);
1755
1756 balance_pgdat(pgdat, 0, order);
1757 }
1758 return 0;
1759}
1760
1761/*
1762 * A zone is low on free memory, so wake its kswapd task to service it.
1763 */
1764void wakeup_kswapd(struct zone *zone, int order)
1765{
1766 pg_data_t *pgdat;
1767
f3fe6512 1768 if (!populated_zone(zone))
1da177e4
LT
1769 return;
1770
1771 pgdat = zone->zone_pgdat;
7fb1d9fc 1772 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1da177e4
LT
1773 return;
1774 if (pgdat->kswapd_max_order < order)
1775 pgdat->kswapd_max_order = order;
9bf2229f 1776 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1da177e4 1777 return;
8d0986e2 1778 if (!waitqueue_active(&pgdat->kswapd_wait))
1da177e4 1779 return;
8d0986e2 1780 wake_up_interruptible(&pgdat->kswapd_wait);
1da177e4
LT
1781}
1782
1783#ifdef CONFIG_PM
1784/*
1785 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1786 * pages.
1787 */
69e05944 1788unsigned long shrink_all_memory(unsigned long nr_pages)
1da177e4
LT
1789{
1790 pg_data_t *pgdat;
69e05944
AM
1791 unsigned long nr_to_free = nr_pages;
1792 unsigned long ret = 0;
1da177e4
LT
1793 struct reclaim_state reclaim_state = {
1794 .reclaimed_slab = 0,
1795 };
1796
1797 current->reclaim_state = &reclaim_state;
1798 for_each_pgdat(pgdat) {
69e05944
AM
1799 unsigned long freed;
1800
1da177e4
LT
1801 freed = balance_pgdat(pgdat, nr_to_free, 0);
1802 ret += freed;
1803 nr_to_free -= freed;
69e05944 1804 if ((long)nr_to_free <= 0)
1da177e4
LT
1805 break;
1806 }
1807 current->reclaim_state = NULL;
1808 return ret;
1809}
1810#endif
1811
1812#ifdef CONFIG_HOTPLUG_CPU
1813/* It's optimal to keep kswapds on the same CPUs as their memory, but
1814 not required for correctness. So if the last cpu in a node goes
1815 away, we get changed to run anywhere: as the first one comes back,
1816 restore their cpu bindings. */
1817static int __devinit cpu_callback(struct notifier_block *nfb,
69e05944 1818 unsigned long action, void *hcpu)
1da177e4
LT
1819{
1820 pg_data_t *pgdat;
1821 cpumask_t mask;
1822
1823 if (action == CPU_ONLINE) {
1824 for_each_pgdat(pgdat) {
1825 mask = node_to_cpumask(pgdat->node_id);
1826 if (any_online_cpu(mask) != NR_CPUS)
1827 /* One of our CPUs online: restore mask */
1828 set_cpus_allowed(pgdat->kswapd, mask);
1829 }
1830 }
1831 return NOTIFY_OK;
1832}
1833#endif /* CONFIG_HOTPLUG_CPU */
1834
1835static int __init kswapd_init(void)
1836{
1837 pg_data_t *pgdat;
69e05944 1838
1da177e4 1839 swap_setup();
69e05944
AM
1840 for_each_pgdat(pgdat) {
1841 pid_t pid;
1842
1843 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
1844 BUG_ON(pid < 0);
1845 pgdat->kswapd = find_task_by_pid(pid);
1846 }
1da177e4
LT
1847 total_memory = nr_free_pagecache_pages();
1848 hotcpu_notifier(cpu_callback, 0);
1849 return 0;
1850}
1851
1852module_init(kswapd_init)
9eeff239
CL
1853
1854#ifdef CONFIG_NUMA
1855/*
1856 * Zone reclaim mode
1857 *
1858 * If non-zero call zone_reclaim when the number of free pages falls below
1859 * the watermarks.
1860 *
1861 * In the future we may add flags to the mode. However, the page allocator
1862 * should only have to check that zone_reclaim_mode != 0 before calling
1863 * zone_reclaim().
1864 */
1865int zone_reclaim_mode __read_mostly;
1866
1b2ffb78
CL
1867#define RECLAIM_OFF 0
1868#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */
1869#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1870#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
2a16e3f4 1871#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
1b2ffb78 1872
9eeff239
CL
1873/*
1874 * Mininum time between zone reclaim scans
1875 */
2a11ff06 1876int zone_reclaim_interval __read_mostly = 30*HZ;
a92f7126
CL
1877
1878/*
1879 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1880 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1881 * a zone.
1882 */
1883#define ZONE_RECLAIM_PRIORITY 4
1884
9eeff239
CL
1885/*
1886 * Try to free up some pages from this zone through reclaim.
1887 */
179e9639 1888static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
9eeff239 1889{
69e05944 1890 const unsigned long nr_pages = 1 << order;
9eeff239
CL
1891 struct task_struct *p = current;
1892 struct reclaim_state reclaim_state;
8695949a 1893 int priority;
179e9639
AM
1894 struct scan_control sc = {
1895 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1896 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1897 .nr_mapped = read_page_state(nr_mapped),
69e05944
AM
1898 .swap_cluster_max = max_t(unsigned long, nr_pages,
1899 SWAP_CLUSTER_MAX),
179e9639
AM
1900 .gfp_mask = gfp_mask,
1901 };
9eeff239
CL
1902
1903 disable_swap_token();
9eeff239 1904 cond_resched();
d4f7796e
CL
1905 /*
1906 * We need to be able to allocate from the reserves for RECLAIM_SWAP
1907 * and we also need to be able to write out pages for RECLAIM_WRITE
1908 * and RECLAIM_SWAP.
1909 */
1910 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
9eeff239
CL
1911 reclaim_state.reclaimed_slab = 0;
1912 p->reclaim_state = &reclaim_state;
c84db23c 1913
a92f7126
CL
1914 /*
1915 * Free memory by calling shrink zone with increasing priorities
1916 * until we have enough memory freed.
1917 */
8695949a 1918 priority = ZONE_RECLAIM_PRIORITY;
a92f7126 1919 do {
8695949a
CL
1920 shrink_zone(priority, zone, &sc);
1921 priority--;
1922 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
c84db23c 1923
2a16e3f4
CL
1924 if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
1925 /*
1926 * shrink_slab does not currently allow us to determine
1927 * how many pages were freed in the zone. So we just
1928 * shake the slab and then go offnode for a single allocation.
1929 *
1930 * shrink_slab will free memory on all zones and may take
1931 * a long time.
1932 */
1933 shrink_slab(sc.nr_scanned, gfp_mask, order);
2a16e3f4
CL
1934 }
1935
9eeff239 1936 p->reclaim_state = NULL;
d4f7796e 1937 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
9eeff239
CL
1938
1939 if (sc.nr_reclaimed == 0)
1940 zone->last_unsuccessful_zone_reclaim = jiffies;
1941
c84db23c 1942 return sc.nr_reclaimed >= nr_pages;
9eeff239 1943}
179e9639
AM
1944
1945int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1946{
1947 cpumask_t mask;
1948 int node_id;
1949
1950 /*
1951 * Do not reclaim if there was a recent unsuccessful attempt at zone
1952 * reclaim. In that case we let allocations go off node for the
1953 * zone_reclaim_interval. Otherwise we would scan for each off-node
1954 * page allocation.
1955 */
1956 if (time_before(jiffies,
1957 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval))
1958 return 0;
1959
1960 /*
1961 * Avoid concurrent zone reclaims, do not reclaim in a zone that does
1962 * not have reclaimable pages and if we should not delay the allocation
1963 * then do not scan.
1964 */
1965 if (!(gfp_mask & __GFP_WAIT) ||
1966 zone->all_unreclaimable ||
1967 atomic_read(&zone->reclaim_in_progress) > 0 ||
1968 (current->flags & PF_MEMALLOC))
1969 return 0;
1970
1971 /*
1972 * Only run zone reclaim on the local zone or on zones that do not
1973 * have associated processors. This will favor the local processor
1974 * over remote processors and spread off node memory allocations
1975 * as wide as possible.
1976 */
1977 node_id = zone->zone_pgdat->node_id;
1978 mask = node_to_cpumask(node_id);
1979 if (!cpus_empty(mask) && node_id != numa_node_id())
1980 return 0;
1981 return __zone_reclaim(zone, gfp_mask, order);
1982}
9eeff239 1983#endif