]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - mm/filemap.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
[mirror_ubuntu-artful-kernel.git] / mm / filemap.c
index 50b52fe51937ca70e62a33ab1553aef9b77ad1a0..69568388c699493ac694a960ca7c24b90b13e080 100644 (file)
@@ -132,44 +132,28 @@ static int page_cache_tree_insert(struct address_space *mapping,
                if (!dax_mapping(mapping)) {
                        if (shadowp)
                                *shadowp = p;
-                       if (node)
-                               workingset_node_shadows_dec(node);
                } else {
                        /* DAX can replace empty locked entry with a hole */
                        WARN_ON_ONCE(p !=
-                               (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
-                                        RADIX_DAX_ENTRY_LOCK));
-                       /* DAX accounts exceptional entries as normal pages */
-                       if (node)
-                               workingset_node_pages_dec(node);
+                               dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
                        /* Wakeup waiters for exceptional entry lock */
-                       dax_wake_mapping_entry_waiter(mapping, page->index,
+                       dax_wake_mapping_entry_waiter(mapping, page->index, p,
                                                      false);
                }
        }
-       radix_tree_replace_slot(slot, page);
+       __radix_tree_replace(&mapping->page_tree, node, slot, page,
+                            workingset_update_node, mapping);
        mapping->nrpages++;
-       if (node) {
-               workingset_node_pages_inc(node);
-               /*
-                * Don't track node that contains actual pages.
-                *
-                * Avoid acquiring the list_lru lock if already
-                * untracked.  The list_empty() test is safe as
-                * node->private_list is protected by
-                * mapping->tree_lock.
-                */
-               if (!list_empty(&node->private_list))
-                       list_lru_del(&workingset_shadow_nodes,
-                                    &node->private_list);
-       }
        return 0;
 }
 
 static void page_cache_tree_delete(struct address_space *mapping,
                                   struct page *page, void *shadow)
 {
-       int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
+       int i, nr;
+
+       /* hugetlb pages are represented by one entry in the radix tree */
+       nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageTail(page), page);
@@ -182,44 +166,11 @@ static void page_cache_tree_delete(struct address_space *mapping,
                __radix_tree_lookup(&mapping->page_tree, page->index + i,
                                    &node, &slot);
 
-               radix_tree_clear_tags(&mapping->page_tree, node, slot);
-
-               if (!node) {
-                       VM_BUG_ON_PAGE(nr != 1, page);
-                       /*
-                        * We need a node to properly account shadow
-                        * entries. Don't plant any without. XXX
-                        */
-                       shadow = NULL;
-               }
-
-               radix_tree_replace_slot(slot, shadow);
+               VM_BUG_ON_PAGE(!node && nr != 1, page);
 
-               if (!node)
-                       break;
-
-               workingset_node_pages_dec(node);
-               if (shadow)
-                       workingset_node_shadows_inc(node);
-               else
-                       if (__radix_tree_delete_node(&mapping->page_tree, node))
-                               continue;
-
-               /*
-                * Track node that only contains shadow entries. DAX mappings
-                * contain no shadow entries and may contain other exceptional
-                * entries so skip those.
-                *
-                * Avoid acquiring the list_lru lock if already tracked.
-                * The list_empty() test is safe as node->private_list is
-                * protected by mapping->tree_lock.
-                */
-               if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
-                               list_empty(&node->private_list)) {
-                       node->private_data = mapping;
-                       list_lru_add(&workingset_shadow_nodes,
-                                       &node->private_list);
-               }
+               radix_tree_clear_tags(&mapping->page_tree, node, slot);
+               __radix_tree_replace(&mapping->page_tree, node, slot, shadow,
+                                    workingset_update_node, mapping);
        }
 
        if (shadow) {