]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
buffer: have alloc_page_buffers() use __GFP_NOFAIL
authorJens Axboe <axboe@kernel.dk>
Wed, 27 Sep 2017 11:40:16 +0000 (05:40 -0600)
committerJens Axboe <axboe@kernel.dk>
Tue, 3 Oct 2017 14:38:17 +0000 (08:38 -0600)
Instead of adding weird retry logic in that function, utilize
__GFP_NOFAIL to ensure that the vm takes care of handling any
potential retries appropriately. This means we don't have to
call free_more_memory() from here.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/bitmap.c
fs/buffer.c
fs/ntfs/aops.c
fs/ntfs/mft.c
include/linux/buffer_head.h

index d2121637b4abc222c40524fee7a68e588adc2077..4d8ed74efadf8d42ef9d799d3d05a765a52fca46 100644 (file)
@@ -368,7 +368,7 @@ static int read_page(struct file *file, unsigned long index,
        pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
                 (unsigned long long)index << PAGE_SHIFT);
 
-       bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
+       bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
        if (!bh) {
                ret = -ENOMEM;
                goto out;
index 170df856bdb99715b126b2d129ff19ea95d46ad8..1234ae343aef0780fe3fbe3c2a5d37d2068cbfaa 100644 (file)
@@ -861,16 +861,19 @@ int remove_inode_buffers(struct inode *inode)
  * which may not fail from ordinary buffer allocations.
  */
 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
-               int retry)
+               bool retry)
 {
        struct buffer_head *bh, *head;
+       gfp_t gfp = GFP_NOFS;
        long offset;
 
-try_again:
+       if (retry)
+               gfp |= __GFP_NOFAIL;
+
        head = NULL;
        offset = PAGE_SIZE;
        while ((offset -= size) >= 0) {
-               bh = alloc_buffer_head(GFP_NOFS);
+               bh = alloc_buffer_head(gfp);
                if (!bh)
                        goto no_grow;
 
@@ -896,23 +899,7 @@ no_grow:
                } while (head);
        }
 
-       /*
-        * Return failure for non-async IO requests.  Async IO requests
-        * are not allowed to fail, so we have to wait until buffer heads
-        * become available.  But we don't want tasks sleeping with 
-        * partially complete buffers, so all were released above.
-        */
-       if (!retry)
-               return NULL;
-
-       /* We're _really_ low on memory. Now we just
-        * wait for old buffer heads to become free due to
-        * finishing IO.  Since this is an async request and
-        * the reserve list is empty, we're sure there are 
-        * async buffer heads in use.
-        */
-       free_more_memory();
-       goto try_again;
+       return NULL;
 }
 EXPORT_SYMBOL_GPL(alloc_page_buffers);
 
@@ -1021,7 +1008,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        /*
         * Allocate some buffers for this page
         */
-       bh = alloc_page_buffers(page, size, 0);
+       bh = alloc_page_buffers(page, size, false);
        if (!bh)
                goto failed;
 
@@ -1575,7 +1562,7 @@ void create_empty_buffers(struct page *page,
 {
        struct buffer_head *bh, *head, *tail;
 
-       head = alloc_page_buffers(page, blocksize, 1);
+       head = alloc_page_buffers(page, blocksize, true);
        bh = head;
        do {
                bh->b_state |= b_state;
@@ -2638,7 +2625,7 @@ int nobh_write_begin(struct address_space *mapping,
         * Be careful: the buffer linked list is a NULL terminated one, rather
         * than the circular one we're used to.
         */
-       head = alloc_page_buffers(page, blocksize, 0);
+       head = alloc_page_buffers(page, blocksize, false);
        if (!head) {
                ret = -ENOMEM;
                goto out_release;
index cc91856b5e2d926a6ac27166bfdb2156e29f7d9e..3a2e509c77c5711580c1319540cbb8790811da22 100644 (file)
@@ -1739,7 +1739,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
        spin_lock(&mapping->private_lock);
        if (unlikely(!page_has_buffers(page))) {
                spin_unlock(&mapping->private_lock);
-               bh = head = alloc_page_buffers(page, bh_size, 1);
+               bh = head = alloc_page_buffers(page, bh_size, true);
                spin_lock(&mapping->private_lock);
                if (likely(!page_has_buffers(page))) {
                        struct buffer_head *tail;
index b6f402194f02c233db4034dffff98ce3de8b751f..ee8392aee9f6524ba8097c96a80959b6b1084dcd 100644 (file)
@@ -507,7 +507,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
        if (unlikely(!page_has_buffers(page))) {
                struct buffer_head *tail;
 
-               bh = head = alloc_page_buffers(page, blocksize, 1);
+               bh = head = alloc_page_buffers(page, blocksize, true);
                do {
                        set_buffer_uptodate(bh);
                        tail = bh;
index c8dae555eccf9f30e50213eef55d2e6bee648d06..ae2d25f01b98e66b4c35e29a8c5a1365f6c807ec 100644 (file)
@@ -156,7 +156,7 @@ void set_bh_page(struct buffer_head *bh,
                struct page *page, unsigned long offset);
 int try_to_free_buffers(struct page *);
 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
-               int retry);
+               bool retry);
 void create_empty_buffers(struct page *, unsigned long,
                        unsigned long b_state);
 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);