]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - mm/filemap.c
mm: thrash detection-based file cache sizing
[mirror_ubuntu-zesty-kernel.git] / mm / filemap.c
index 05c44aa44188e8dbb5fe62c342bd8c63cdd2f4b5..a603c4d7d3c9c7ff2659bbdd273d20fbb5e90f6e 100644 (file)
@@ -469,7 +469,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
 static int page_cache_tree_insert(struct address_space *mapping,
-                                 struct page *page)
+                                 struct page *page, void **shadowp)
 {
        void **slot;
        int error;
@@ -484,6 +484,8 @@ static int page_cache_tree_insert(struct address_space *mapping,
                radix_tree_replace_slot(slot, page);
                mapping->nrshadows--;
                mapping->nrpages++;
+               if (shadowp)
+                       *shadowp = p;
                return 0;
        }
        error = radix_tree_insert(&mapping->page_tree, page->index, page);
@@ -492,18 +494,10 @@ static int page_cache_tree_insert(struct address_space *mapping,
        return error;
 }
 
-/**
- * add_to_page_cache_locked - add a locked page to the pagecache
- * @page:      page to add
- * @mapping:   the page's address_space
- * @offset:    page index
- * @gfp_mask:  page allocation mode
- *
- * This function is used to add a page to the pagecache. It must be locked.
- * This function does not add the page to the LRU.  The caller must do that.
- */
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
-               pgoff_t offset, gfp_t gfp_mask)
+static int __add_to_page_cache_locked(struct page *page,
+                                     struct address_space *mapping,
+                                     pgoff_t offset, gfp_t gfp_mask,
+                                     void **shadowp)
 {
        int error;
 
@@ -526,7 +520,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
        page->index = offset;
 
        spin_lock_irq(&mapping->tree_lock);
-       error = page_cache_tree_insert(mapping, page);
+       error = page_cache_tree_insert(mapping, page, shadowp);
        radix_tree_preload_end();
        if (unlikely(error))
                goto err_insert;
@@ -542,16 +536,49 @@ err_insert:
        page_cache_release(page);
        return error;
 }
+
+/**
+ * add_to_page_cache_locked - add a locked page to the pagecache
+ * @page:      page to add
+ * @mapping:   the page's address_space
+ * @offset:    page index
+ * @gfp_mask:  page allocation mode
+ *
+ * This function is used to add a page to the pagecache. It must be locked.
+ * This function does not add the page to the LRU.  The caller must do that.
+ */
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+               pgoff_t offset, gfp_t gfp_mask)
+{
+       return __add_to_page_cache_locked(page, mapping, offset,
+                                         gfp_mask, NULL);
+}
 EXPORT_SYMBOL(add_to_page_cache_locked);
 
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
                                pgoff_t offset, gfp_t gfp_mask)
 {
+       void *shadow = NULL;
        int ret;
 
-       ret = add_to_page_cache(page, mapping, offset, gfp_mask);
-       if (ret == 0)
-               lru_cache_add_file(page);
+       __set_page_locked(page);
+       ret = __add_to_page_cache_locked(page, mapping, offset,
+                                        gfp_mask, &shadow);
+       if (unlikely(ret))
+               __clear_page_locked(page);
+       else {
+               /*
+                * The page might have been evicted from cache only
+                * recently, in which case it should be activated like
+                * any other repeatedly accessed page.
+                */
+               if (shadow && workingset_refault(shadow)) {
+                       SetPageActive(page);
+                       workingset_activation(page);
+               } else
+                       ClearPageActive(page);
+               lru_cache_add(page);
+       }
        return ret;
 }
 EXPORT_SYMBOL_GPL(add_to_page_cache_lru);