]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - mm/shmem.c
shmem_file_write is redundant
[mirror_ubuntu-zesty-kernel.git] / mm / shmem.c
index 404e53bb212764f8a6ab7f5bf688c54d4d4c9d06..5dfe79048f6d5e66f3379a2bfa66ebaf12e0074c 100644 (file)
 
 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
 enum sgp_type {
-       SGP_QUICK,      /* don't try more than file page cache lookup */
        SGP_READ,       /* don't exceed i_size, don't allocate page */
        SGP_CACHE,      /* don't exceed i_size, may allocate page */
        SGP_WRITE,      /* may exceed i_size, may allocate page */
-       SGP_FAULT,      /* same as SGP_CACHE, return with page locked */
 };
 
 static int shmem_getpage(struct inode *inode, unsigned long idx,
@@ -731,6 +729,8 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
                                (void) shmem_getpage(inode,
                                        attr->ia_size>>PAGE_CACHE_SHIFT,
                                                &page, SGP_READ, NULL);
+                               if (page)
+                                       unlock_page(page);
                        }
                        /*
                         * Reset SHMEM_PAGEIN flag so that shmem_truncate can
@@ -915,6 +915,21 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        struct inode *inode;
 
        BUG_ON(!PageLocked(page));
+       /*
+        * shmem_backing_dev_info's capabilities prevent regular writeback or
+        * sync from ever calling shmem_writepage; but a stacking filesystem
+        * may use the ->writepage of its underlying filesystem, in which case
+        * we want to do nothing when that underlying filesystem is tmpfs
+        * (writing out to swap is useful as a response to memory pressure, but
+        * of no use to stabilize the data) - just redirty the page, unlock it
+        * and claim success in this case.  AOP_WRITEPAGE_ACTIVATE, and the
+        * page_mapped check below, must be avoided unless we're in reclaim.
+        */
+       if (!wbc->for_reclaim) {
+               set_page_dirty(page);
+               unlock_page(page);
+               return 0;
+       }
        BUG_ON(page_mapped(page));
 
        mapping = page->mapping;
@@ -1010,54 +1025,34 @@ out:
        return err;
 }
 
-static struct page *shmem_swapin_async(struct shared_policy *p,
-                                      swp_entry_t entry, unsigned long idx)
+static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+                       struct shmem_inode_info *info, unsigned long idx)
 {
-       struct page *page;
        struct vm_area_struct pvma;
+       struct page *page;
 
        /* Create a pseudo vma that just contains the policy */
-       memset(&pvma, 0, sizeof(struct vm_area_struct));
-       pvma.vm_end = PAGE_SIZE;
+       pvma.vm_start = 0;
        pvma.vm_pgoff = idx;
-       pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
-       page = read_swap_cache_async(entry, &pvma, 0);
+       pvma.vm_ops = NULL;
+       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+       page = swapin_readahead(entry, gfp, &pvma, 0);
        mpol_free(pvma.vm_policy);
        return page;
 }
 
-static struct page *shmem_swapin(struct shmem_inode_info *info,
-                                swp_entry_t entry, unsigned long idx)
-{
-       struct shared_policy *p = &info->policy;
-       int i, num;
-       struct page *page;
-       unsigned long offset;
-
-       num = valid_swaphandles(entry, &offset);
-       for (i = 0; i < num; offset++, i++) {
-               page = shmem_swapin_async(p,
-                               swp_entry(swp_type(entry), offset), idx);
-               if (!page)
-                       break;
-               page_cache_release(page);
-       }
-       lru_add_drain();        /* Push any new pages onto the LRU now */
-       return shmem_swapin_async(p, entry, idx);
-}
-
-static struct page *
-shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
-                unsigned long idx)
+static struct page *shmem_alloc_page(gfp_t gfp,
+                       struct shmem_inode_info *info, unsigned long idx)
 {
        struct vm_area_struct pvma;
        struct page *page;
 
-       memset(&pvma, 0, sizeof(struct vm_area_struct));
-       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+       /* Create a pseudo vma that just contains the policy */
+       pvma.vm_start = 0;
        pvma.vm_pgoff = idx;
-       pvma.vm_end = PAGE_SIZE;
-       page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
+       pvma.vm_ops = NULL;
+       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+       page = alloc_page_vma(gfp, &pvma, 0);
        mpol_free(pvma.vm_policy);
        return page;
 }
@@ -1068,17 +1063,16 @@ static inline int shmem_parse_mpol(char *value, int *policy,
        return 1;
 }
 
-static inline struct page *
-shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
+static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+                       struct shmem_inode_info *info, unsigned long idx)
 {
-       swapin_readahead(entry, 0, NULL);
-       return read_swap_cache_async(entry, NULL, 0);
+       return swapin_readahead(entry, gfp, NULL, 0);
 }
 
-static inline struct page *
-shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
+static inline struct page *shmem_alloc_page(gfp_t gfp,
+                       struct shmem_inode_info *info, unsigned long idx)
 {
-       return alloc_page(gfp | __GFP_ZERO);
+       return alloc_page(gfp);
 }
 #endif
 
@@ -1099,6 +1093,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
        struct page *swappage;
        swp_entry_t *entry;
        swp_entry_t swap;
+       gfp_t gfp;
        int error;
 
        if (idx >= SHMEM_MAX_INDEX)
@@ -1111,7 +1106,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
         * Normally, filepage is NULL on entry, and either found
         * uptodate immediately, or allocated and zeroed, or read
         * in under swappage, which is then assigned to filepage.
-        * But shmem_readpage and shmem_write_begin pass in a locked
+        * But shmem_readpage (required for splice) passes in a locked
         * filepage, which may be found not uptodate by other callers
         * too, and may need to be copied from the swappage read in.
         */
@@ -1121,8 +1116,7 @@ repeat:
        if (filepage && PageUptodate(filepage))
                goto done;
        error = 0;
-       if (sgp == SGP_QUICK)
-               goto failed;
+       gfp = mapping_gfp_mask(mapping);
 
        spin_lock(&info->lock);
        shmem_recalc_inode(inode);
@@ -1145,7 +1139,7 @@ repeat:
                                *type |= VM_FAULT_MAJOR;
                        }
                        spin_unlock(&info->lock);
-                       swappage = shmem_swapin(info, swap, idx);
+                       swappage = shmem_swapin(swap, gfp, info, idx);
                        if (!swappage) {
                                spin_lock(&info->lock);
                                entry = shmem_swp_alloc(info, idx, sgp);
@@ -1257,9 +1251,7 @@ repeat:
 
                if (!filepage) {
                        spin_unlock(&info->lock);
-                       filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
-                                                   info,
-                                                   idx);
+                       filepage = shmem_alloc_page(gfp, info, idx);
                        if (!filepage) {
                                shmem_unacct_blocks(info->flags, 1);
                                shmem_free_blocks(inode, 1);
@@ -1291,16 +1283,12 @@ repeat:
 
                info->alloced++;
                spin_unlock(&info->lock);
+               clear_highpage(filepage);
                flush_dcache_page(filepage);
                SetPageUptodate(filepage);
        }
 done:
-       if (*pagep != filepage) {
-               *pagep = filepage;
-               if (sgp != SGP_FAULT)
-                       unlock_page(filepage);
-
-       }
+       *pagep = filepage;
        return 0;
 
 failed:
@@ -1320,7 +1308,7 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
                return VM_FAULT_SIGBUS;
 
-       error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret);
+       error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
 
@@ -1478,118 +1466,16 @@ shmem_write_end(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
 
+       if (pos + copied > inode->i_size)
+               i_size_write(inode, pos + copied);
+
+       unlock_page(page);
        set_page_dirty(page);
        page_cache_release(page);
 
-       if (pos+copied > inode->i_size)
-               i_size_write(inode, pos+copied);
-
        return copied;
 }
 
-static ssize_t
-shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
-       struct inode    *inode = file->f_path.dentry->d_inode;
-       loff_t          pos;
-       unsigned long   written;
-       ssize_t         err;
-
-       if ((ssize_t) count < 0)
-               return -EINVAL;
-
-       if (!access_ok(VERIFY_READ, buf, count))
-               return -EFAULT;
-
-       mutex_lock(&inode->i_mutex);
-
-       pos = *ppos;
-       written = 0;
-
-       err = generic_write_checks(file, &pos, &count, 0);
-       if (err || !count)
-               goto out;
-
-       err = remove_suid(file->f_path.dentry);
-       if (err)
-               goto out;
-
-       inode->i_ctime = inode->i_mtime = CURRENT_TIME;
-
-       do {
-               struct page *page = NULL;
-               unsigned long bytes, index, offset;
-               char *kaddr;
-               int left;
-
-               offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
-               index = pos >> PAGE_CACHE_SHIFT;
-               bytes = PAGE_CACHE_SIZE - offset;
-               if (bytes > count)
-                       bytes = count;
-
-               /*
-                * We don't hold page lock across copy from user -
-                * what would it guard against? - so no deadlock here.
-                * But it still may be a good idea to prefault below.
-                */
-
-               err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
-               if (err)
-                       break;
-
-               left = bytes;
-               if (PageHighMem(page)) {
-                       volatile unsigned char dummy;
-                       __get_user(dummy, buf);
-                       __get_user(dummy, buf + bytes - 1);
-
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       left = __copy_from_user_inatomic(kaddr + offset,
-                                                       buf, bytes);
-                       kunmap_atomic(kaddr, KM_USER0);
-               }
-               if (left) {
-                       kaddr = kmap(page);
-                       left = __copy_from_user(kaddr + offset, buf, bytes);
-                       kunmap(page);
-               }
-
-               written += bytes;
-               count -= bytes;
-               pos += bytes;
-               buf += bytes;
-               if (pos > inode->i_size)
-                       i_size_write(inode, pos);
-
-               flush_dcache_page(page);
-               set_page_dirty(page);
-               mark_page_accessed(page);
-               page_cache_release(page);
-
-               if (left) {
-                       pos -= left;
-                       written -= left;
-                       err = -EFAULT;
-                       break;
-               }
-
-               /*
-                * Our dirty pages are not counted in nr_dirty,
-                * and we do not attempt to balance dirty pages.
-                */
-
-               cond_resched();
-       } while (count);
-
-       *ppos = pos;
-       if (written)
-               err = written;
-out:
-       mutex_unlock(&inode->i_mutex);
-       return err;
-}
-
 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
 {
        struct inode *inode = filp->f_path.dentry->d_inode;
@@ -1619,6 +1505,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                                desc->error = 0;
                        break;
                }
+               if (page)
+                       unlock_page(page);
 
                /*
                 * We must evaluate after, since reads (unlike writes)
@@ -1908,6 +1796,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                        iput(inode);
                        return error;
                }
+               unlock_page(page);
                inode->i_op = &shmem_symlink_inode_operations;
                kaddr = kmap_atomic(page, KM_USER0);
                memcpy(kaddr, symname, len);
@@ -1935,6 +1824,8 @@ static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
        struct page *page = NULL;
        int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
        nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
+       if (page)
+               unlock_page(page);
        return page;
 }
 
@@ -2359,7 +2250,8 @@ static const struct file_operations shmem_file_operations = {
 #ifdef CONFIG_TMPFS
        .llseek         = generic_file_llseek,
        .read           = shmem_file_read,
-       .write          = shmem_file_write,
+       .write          = do_sync_write,
+       .aio_write      = generic_file_aio_write,
        .fsync          = simple_sync_file,
        .splice_read    = generic_file_splice_read,
        .splice_write   = generic_file_splice_write,