]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - fs/buffer.c
modpost: file2alias: check prototype of handler
[mirror_ubuntu-bionic-kernel.git] / fs / buffer.c
index 32ce01f0f95f3edfd65ee64cd1a90c421553e56f..e744e18ec0685ce1f95474418d72f94ad2c8098d 100644 (file)
@@ -208,6 +208,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
        struct buffer_head *head;
        struct page *page;
        int all_mapped = 1;
+       static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
 
        index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
        page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
@@ -235,15 +236,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
         * file io on the block device and getblk.  It gets dealt with
         * elsewhere, don't buffer_error if we had some unmapped buffers
         */
-       if (all_mapped) {
-               printk("__find_get_block_slow() failed. "
-                       "block=%llu, b_blocknr=%llu\n",
-                       (unsigned long long)block,
-                       (unsigned long long)bh->b_blocknr);
-               printk("b_state=0x%08lx, b_size=%zu\n",
-                       bh->b_state, bh->b_size);
-               printk("device %pg blocksize: %d\n", bdev,
-                       1 << bd_inode->i_blkbits);
+       ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
+       if (all_mapped && __ratelimit(&last_warned)) {
+               printk("__find_get_block_slow() failed. block=%llu, "
+                      "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
+                      "device %pg blocksize: %d\n",
+                      (unsigned long long)block,
+                      (unsigned long long)bh->b_blocknr,
+                      bh->b_state, bh->b_size, bdev,
+                      1 << bd_inode->i_blkbits);
        }
 out_unlock:
        spin_unlock(&bd_mapping->private_lock);
@@ -252,27 +253,6 @@ out:
        return ret;
 }
 
-/*
- * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
- */
-static void free_more_memory(void)
-{
-       struct zoneref *z;
-       int nid;
-
-       wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
-       yield();
-
-       for_each_online_node(nid) {
-
-               z = first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
-                                               gfp_zone(GFP_NOFS), NULL);
-               if (z->zone)
-                       try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
-                                               GFP_NOFS, NULL);
-       }
-}
-
 /*
  * I/O completion handler for block_read_full_page() - pages
  * which come unlocked at the end of I/O.
@@ -861,16 +841,19 @@ int remove_inode_buffers(struct inode *inode)
  * which may not fail from ordinary buffer allocations.
  */
 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
-               int retry)
+               bool retry)
 {
        struct buffer_head *bh, *head;
+       gfp_t gfp = GFP_NOFS;
        long offset;
 
-try_again:
+       if (retry)
+               gfp |= __GFP_NOFAIL;
+
        head = NULL;
        offset = PAGE_SIZE;
        while ((offset -= size) >= 0) {
-               bh = alloc_buffer_head(GFP_NOFS);
+               bh = alloc_buffer_head(gfp);
                if (!bh)
                        goto no_grow;
 
@@ -896,23 +879,7 @@ no_grow:
                } while (head);
        }
 
-       /*
-        * Return failure for non-async IO requests.  Async IO requests
-        * are not allowed to fail, so we have to wait until buffer heads
-        * become available.  But we don't want tasks sleeping with 
-        * partially complete buffers, so all were released above.
-        */
-       if (!retry)
-               return NULL;
-
-       /* We're _really_ low on memory. Now we just
-        * wait for old buffer heads to become free due to
-        * finishing IO.  Since this is an async request and
-        * the reserve list is empty, we're sure there are 
-        * async buffer heads in use.
-        */
-       free_more_memory();
-       goto try_again;
+       return NULL;
 }
 EXPORT_SYMBOL_GPL(alloc_page_buffers);
 
@@ -1001,8 +968,6 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        gfp_mask |= __GFP_NOFAIL;
 
        page = find_or_create_page(inode->i_mapping, index, gfp_mask);
-       if (!page)
-               return ret;
 
        BUG_ON(!PageLocked(page));
 
@@ -1021,9 +986,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        /*
         * Allocate some buffers for this page
         */
-       bh = alloc_page_buffers(page, size, 0);
-       if (!bh)
-               goto failed;
+       bh = alloc_page_buffers(page, size, true);
 
        /*
         * Link the page to the buffers and initialise them.  Take the
@@ -1103,8 +1066,6 @@ __getblk_slow(struct block_device *bdev, sector_t block,
                ret = grow_buffers(bdev, block, size, gfp);
                if (ret < 0)
                        return NULL;
-               if (ret == 0)
-                       free_more_memory();
        }
 }
 
@@ -1575,7 +1536,7 @@ void create_empty_buffers(struct page *page,
 {
        struct buffer_head *bh, *head, *tail;
 
-       head = alloc_page_buffers(page, blocksize, 1);
+       head = alloc_page_buffers(page, blocksize, true);
        bh = head;
        do {
                bh->b_state |= b_state;
@@ -1632,7 +1593,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
        struct buffer_head *head;
 
        end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
-       pagevec_init(&pvec, 0);
+       pagevec_init(&pvec);
        while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) {
                count = pagevec_count(&pvec);
                for (i = 0; i < count; i++) {
@@ -1979,8 +1940,8 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
        case IOMAP_MAPPED:
                if (offset >= i_size_read(inode))
                        set_buffer_new(bh);
-               bh->b_blocknr = (iomap->blkno >> (inode->i_blkbits - 9)) +
-                               ((offset - iomap->offset) >> inode->i_blkbits);
+               bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
+                               inode->i_blkbits;
                set_buffer_mapped(bh);
                break;
        }
@@ -2639,7 +2600,7 @@ int nobh_write_begin(struct address_space *mapping,
         * Be careful: the buffer linked list is a NULL terminated one, rather
         * than the circular one we're used to.
         */
-       head = alloc_page_buffers(page, blocksize, 0);
+       head = alloc_page_buffers(page, blocksize, false);
        if (!head) {
                ret = -ENOMEM;
                goto out_release;
@@ -3056,8 +3017,16 @@ void guard_bio_eod(int op, struct bio *bio)
        sector_t maxsector;
        struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
        unsigned truncated_bytes;
+       struct hd_struct *part;
+
+       rcu_read_lock();
+       part = __disk_get_part(bio->bi_disk, bio->bi_partno);
+       if (part)
+               maxsector = part_nr_sects_read(part);
+       else
+               maxsector = get_capacity(bio->bi_disk);
+       rcu_read_unlock();
 
-       maxsector = get_capacity(bio->bi_disk);
        if (!maxsector)
                return;
 
@@ -3076,6 +3045,13 @@ void guard_bio_eod(int op, struct bio *bio)
        /* Uhhuh. We've got a bio that straddles the device size! */
        truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
 
+       /*
+        * The bio contains more than one segment which spans EOD, just return
+        * and let IO layer turn it into an EIO
+        */
+       if (truncated_bytes > bvec->bv_len)
+               return;
+
        /* Truncate the bio.. */
        bio->bi_iter.bi_size -= truncated_bytes;
        bvec->bv_len -= truncated_bytes;
@@ -3546,7 +3522,7 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
        if (length <= 0)
                return -ENOENT;
 
-       pagevec_init(&pvec, 0);
+       pagevec_init(&pvec);
 
        do {
                unsigned nr_pages, i;