]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - mm/filemap.c
UBUNTU: [Config] CONFIG_NET_DSA_LOOP=m
[mirror_ubuntu-artful-kernel.git] / mm / filemap.c
index a49702445ce05beeb8d80b46f0ee57c116986be2..62aafdb0c8b8a319e8839086b6ab8b9e00ad9550 100644 (file)
@@ -130,17 +130,8 @@ static int page_cache_tree_insert(struct address_space *mapping,
                        return -EEXIST;
 
                mapping->nrexceptional--;
-               if (!dax_mapping(mapping)) {
-                       if (shadowp)
-                               *shadowp = p;
-               } else {
-                       /* DAX can replace empty locked entry with a hole */
-                       WARN_ON_ONCE(p !=
-                               dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
-                       /* Wakeup waiters for exceptional entry lock */
-                       dax_wake_mapping_entry_waiter(mapping, page->index, p,
-                                                     true);
-               }
+               if (shadowp)
+                       *shadowp = p;
        }
        __radix_tree_replace(&mapping->page_tree, node, slot, page,
                             workingset_update_node, mapping);
@@ -885,6 +876,7 @@ void __init pagecache_init(void)
        page_writeback_init();
 }
 
+/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
 struct wait_page_key {
        struct page *page;
        int bit_nr;
@@ -909,8 +901,10 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
 
        if (wait_page->bit_nr != key->bit_nr)
                return 0;
+
+       /* Stop walking if it's locked */
        if (test_bit(key->bit_nr, &key->page->flags))
-               return 0;
+               return -1;
 
        return autoremove_wake_function(wait, mode, sync, key);
 }
@@ -964,6 +958,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
        int ret = 0;
 
        init_wait(wait);
+       wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0;
        wait->func = wake_page_function;
        wait_page.page = page;
        wait_page.bit_nr = bit_nr;
@@ -972,10 +967,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
                spin_lock_irq(&q->lock);
 
                if (likely(list_empty(&wait->entry))) {
-                       if (lock)
-                               __add_wait_queue_entry_tail_exclusive(q, wait);
-                       else
-                               __add_wait_queue(q, wait);
+                       __add_wait_queue_entry_tail(q, wait);
                        SetPageWaiters(page);
                }
 
@@ -985,10 +977,6 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 
                if (likely(test_bit(bit_nr, &page->flags))) {
                        io_schedule();
-                       if (unlikely(signal_pending_state(state, current))) {
-                               ret = -EINTR;
-                               break;
-                       }
                }
 
                if (lock) {
@@ -998,6 +986,11 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
                        if (!test_bit(bit_nr, &page->flags))
                                break;
                }
+
+               if (unlikely(signal_pending_state(state, current))) {
+                       ret = -EINTR;
+                       break;
+               }
        }
 
        finish_wait(q, wait);
@@ -1039,7 +1032,7 @@ void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
        unsigned long flags;
 
        spin_lock_irqsave(&q->lock, flags);
-       __add_wait_queue(q, waiter);
+       __add_wait_queue_entry_tail(q, waiter);
        SetPageWaiters(page);
        spin_unlock_irqrestore(&q->lock, flags);
 }
@@ -2541,7 +2534,7 @@ int filemap_page_mkwrite(struct vm_fault *vmf)
        int ret = VM_FAULT_LOCKED;
 
        sb_start_pagefault(inode->i_sb);
-       file_update_time(vmf->vma->vm_file);
+       vma_file_update_time(vmf->vma);
        lock_page(page);
        if (page->mapping != inode->i_mapping) {
                unlock_page(page);