]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - fs/buffer.c
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[mirror_ubuntu-artful-kernel.git] / fs / buffer.c
index 6dc1475dcb2da30278130892277b4ef9c3b34d53..6c48f20eddd4b60256c0e825e53dbbf724d115cb 100644 (file)
@@ -1022,7 +1022,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
                bh = page_buffers(page);
                if (bh->b_size == size) {
                        end_block = init_page_buffers(page, bdev,
-                                               index << sizebits, size);
+                                               (sector_t)index << sizebits,
+                                               size);
                        goto done;
                }
                if (!try_to_free_buffers(page))
@@ -1043,7 +1044,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
         */
        spin_lock(&inode->i_mapping->private_lock);
        link_dev_buffers(page, bh);
-       end_block = init_page_buffers(page, bdev, index << sizebits, size);
+       end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
+                       size);
        spin_unlock(&inode->i_mapping->private_lock);
 done:
        ret = (block < end_block) ? 1 : -ENXIO;
@@ -1253,7 +1255,7 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
  * a local interrupt disable for that.
  */
 
-#define BH_LRU_SIZE    8
+#define BH_LRU_SIZE    16
 
 struct bh_lru {
        struct buffer_head *bhs[BH_LRU_SIZE];
@@ -1331,8 +1333,8 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
        for (i = 0; i < BH_LRU_SIZE; i++) {
                struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
 
-               if (bh && bh->b_bdev == bdev &&
-                               bh->b_blocknr == block && bh->b_size == size) {
+               if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
+                   bh->b_size == size) {
                        if (i) {
                                while (i) {
                                        __this_cpu_write(bh_lrus.bhs[i],
@@ -2326,6 +2328,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
                err = 0;
 
                balance_dirty_pages_ratelimited(mapping);
+
+               if (unlikely(fatal_signal_pending(current))) {
+                       err = -EINTR;
+                       goto out;
+               }
        }
 
        /* page covers the boundary, find the boundary offset */
@@ -2964,7 +2971,7 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
 
 /*
  * This allows us to do IO even on the odd last sectors
- * of a device, even if the bh block size is some multiple
+ * of a device, even if the block size is some multiple
  * of the physical sector size.
  *
  * We'll just truncate the bio to the size of the device,
@@ -2974,10 +2981,11 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
  * errors, this only handles the "we need to be able to
  * do IO at the final sector" case.
  */
-static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
+void guard_bio_eod(int rw, struct bio *bio)
 {
        sector_t maxsector;
-       unsigned bytes;
+       struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
+       unsigned truncated_bytes;
 
        maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
        if (!maxsector)
@@ -2992,23 +3000,20 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
                return;
 
        maxsector -= bio->bi_iter.bi_sector;
-       bytes = bio->bi_iter.bi_size;
-       if (likely((bytes >> 9) <= maxsector))
+       if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
                return;
 
-       /* Uhhuh. We've got a bh that straddles the device size! */
-       bytes = maxsector << 9;
+       /* Uhhuh. We've got a bio that straddles the device size! */
+       truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
 
        /* Truncate the bio.. */
-       bio->bi_iter.bi_size bytes;
-       bio->bi_io_vec[0].bv_len = bytes;
+       bio->bi_iter.bi_size -= truncated_bytes;
+       bvec->bv_len -= truncated_bytes;
 
        /* ..and clear the end of the buffer for reads */
        if ((rw & RW_MASK) == READ) {
-               void *kaddr = kmap_atomic(bh->b_page);
-               memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
-               kunmap_atomic(kaddr);
-               flush_dcache_page(bh->b_page);
+               zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len,
+                               truncated_bytes);
        }
 }
 
@@ -3049,7 +3054,7 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
        bio->bi_flags |= bio_flags;
 
        /* Take care of bh's that straddle the end of the device */
-       guard_bh_eod(rw, bio, bh);
+       guard_bio_eod(rw, bio);
 
        if (buffer_meta(bh))
                rw |= REQ_META;