]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 6 May 2012 17:20:07 +0000 (10:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 6 May 2012 17:20:07 +0000 (10:20 -0700)
Pull btrfs fixes from Chris Mason:
 "The big ones here are a memory leak we introduced in rc1, and a
  scheduling while atomic if the transid on disk doesn't match the
  transid we expected.  This happens for corrupt blocks, or out of date
  disks.

  It also fixes up the ioctl definition for our ioctl to resolve logical
  inode numbers.  The __u32 was a merging error and doesn't match what
  we ship in the progs."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: avoid sleeping in verify_parent_transid while atomic
  Btrfs: fix crash in scrub repair code when device is missing
  btrfs: Fix mismatching struct members in ioctl.h
  Btrfs: fix page leak when allocing extent buffers
  Btrfs: Add properly locking around add_root_to_dirty_list

1  2 
fs/btrfs/extent_io.c
fs/btrfs/scrub.c

diff --combined fs/btrfs/extent_io.c
index 198c2ba2fa405ee13ee9ce65ba45a6c4be3773ec,2fb52c26c677d22867cbe96e46e230813fe03053..c9018a05036e943a52ad91d81019bb4b934b6b9a
@@@ -2612,10 -2612,10 +2612,10 @@@ static int __extent_read_full_page(stru
  
                if (zero_offset) {
                        iosize = PAGE_CACHE_SIZE - zero_offset;
 -                      userpage = kmap_atomic(page, KM_USER0);
 +                      userpage = kmap_atomic(page);
                        memset(userpage + zero_offset, 0, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage, KM_USER0);
 +                      kunmap_atomic(userpage);
                }
        }
        while (cur <= end) {
                        struct extent_state *cached = NULL;
  
                        iosize = PAGE_CACHE_SIZE - pg_offset;
 -                      userpage = kmap_atomic(page, KM_USER0);
 +                      userpage = kmap_atomic(page);
                        memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage, KM_USER0);
 +                      kunmap_atomic(userpage);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
                        unlock_extent_cached(tree, cur, cur + iosize - 1,
                        char *userpage;
                        struct extent_state *cached = NULL;
  
 -                      userpage = kmap_atomic(page, KM_USER0);
 +                      userpage = kmap_atomic(page);
                        memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage, KM_USER0);
 +                      kunmap_atomic(userpage);
  
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
@@@ -2823,10 -2823,10 +2823,10 @@@ static int __extent_writepage(struct pa
        if (page->index == end_index) {
                char *userpage;
  
 -              userpage = kmap_atomic(page, KM_USER0);
 +              userpage = kmap_atomic(page);
                memset(userpage + pg_offset, 0,
                       PAGE_CACHE_SIZE - pg_offset);
 -              kunmap_atomic(userpage, KM_USER0);
 +              kunmap_atomic(userpage);
                flush_dcache_page(page);
        }
        pg_offset = 0;
@@@ -4120,6 -4120,7 +4120,7 @@@ struct extent_buffer *alloc_extent_buff
                        if (atomic_inc_not_zero(&exists->refs)) {
                                spin_unlock(&mapping->private_lock);
                                unlock_page(p);
+                               page_cache_release(p);
                                mark_extent_buffer_accessed(exists);
                                goto free_eb;
                        }
@@@ -4199,8 -4200,7 +4200,7 @@@ free_eb
                        unlock_page(eb->pages[i]);
        }
  
-       if (!atomic_dec_and_test(&eb->refs))
-               return exists;
+       WARN_ON(!atomic_dec_and_test(&eb->refs));
        btrfs_release_extent_buffer(eb);
        return exists;
  }
diff --combined fs/btrfs/scrub.c
index 4f76fc3f8e896ed70b01bcb2f45bdbc68a583ba7,7e487be0094e504d28eae282c7a2be52566874a3..2f3d6f917fb3373c02335b6912fcba1006f5fabe
@@@ -998,6 -998,7 +998,7 @@@ static int scrub_setup_recheck_block(st
                        page = sblock->pagev + page_index;
                        page->logical = logical;
                        page->physical = bbio->stripes[mirror_index].physical;
+                       /* for missing devices, bdev is NULL */
                        page->bdev = bbio->stripes[mirror_index].dev->bdev;
                        page->mirror_num = mirror_index + 1;
                        page->page = alloc_page(GFP_NOFS);
@@@ -1042,6 -1043,12 +1043,12 @@@ static int scrub_recheck_block(struct b
                struct scrub_page *page = sblock->pagev + page_num;
                DECLARE_COMPLETION_ONSTACK(complete);
  
+               if (page->bdev == NULL) {
+                       page->io_error = 1;
+                       sblock->no_io_error_seen = 0;
+                       continue;
+               }
                BUG_ON(!page->page);
                bio = bio_alloc(GFP_NOFS, 1);
                if (!bio)
@@@ -1091,7 -1098,7 +1098,7 @@@ static void scrub_recheck_block_checksu
        if (is_metadata) {
                struct btrfs_header *h;
  
 -              mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
 +              mapped_buffer = kmap_atomic(sblock->pagev[0].page);
                h = (struct btrfs_header *)mapped_buffer;
  
                if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
                if (!have_csum)
                        return;
  
 -              mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
 +              mapped_buffer = kmap_atomic(sblock->pagev[0].page);
        }
  
        for (page_num = 0;;) {
                        crc = btrfs_csum_data(root, mapped_buffer, crc,
                                              PAGE_SIZE);
  
 -              kunmap_atomic(mapped_buffer, KM_USER0);
 +              kunmap_atomic(mapped_buffer);
                page_num++;
                if (page_num >= sblock->page_count)
                        break;
                BUG_ON(!sblock->pagev[page_num].page);
  
 -              mapped_buffer = kmap_atomic(sblock->pagev[page_num].page,
 -                                          KM_USER0);
 +              mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
        }
  
        btrfs_csum_final(crc, calculated_csum);
@@@ -1234,7 -1242,7 +1241,7 @@@ static int scrub_checksum_data(struct s
  
        on_disk_csum = sblock->pagev[0].csum;
        page = sblock->pagev[0].page;
 -      buffer = kmap_atomic(page, KM_USER0);
 +      buffer = kmap_atomic(page);
  
        len = sdev->sectorsize;
        index = 0;
                u64 l = min_t(u64, len, PAGE_SIZE);
  
                crc = btrfs_csum_data(root, buffer, crc, l);
 -              kunmap_atomic(buffer, KM_USER0);
 +              kunmap_atomic(buffer);
                len -= l;
                if (len == 0)
                        break;
                BUG_ON(index >= sblock->page_count);
                BUG_ON(!sblock->pagev[index].page);
                page = sblock->pagev[index].page;
 -              buffer = kmap_atomic(page, KM_USER0);
 +              buffer = kmap_atomic(page);
        }
  
        btrfs_csum_final(crc, csum);
@@@ -1280,7 -1288,7 +1287,7 @@@ static int scrub_checksum_tree_block(st
  
        BUG_ON(sblock->page_count < 1);
        page = sblock->pagev[0].page;
 -      mapped_buffer = kmap_atomic(page, KM_USER0);
 +      mapped_buffer = kmap_atomic(page);
        h = (struct btrfs_header *)mapped_buffer;
        memcpy(on_disk_csum, h->csum, sdev->csum_size);
  
                u64 l = min_t(u64, len, mapped_size);
  
                crc = btrfs_csum_data(root, p, crc, l);
 -              kunmap_atomic(mapped_buffer, KM_USER0);
 +              kunmap_atomic(mapped_buffer);
                len -= l;
                if (len == 0)
                        break;
                BUG_ON(index >= sblock->page_count);
                BUG_ON(!sblock->pagev[index].page);
                page = sblock->pagev[index].page;
 -              mapped_buffer = kmap_atomic(page, KM_USER0);
 +              mapped_buffer = kmap_atomic(page);
                mapped_size = PAGE_SIZE;
                p = mapped_buffer;
        }
@@@ -1351,7 -1359,7 +1358,7 @@@ static int scrub_checksum_super(struct 
  
        BUG_ON(sblock->page_count < 1);
        page = sblock->pagev[0].page;
 -      mapped_buffer = kmap_atomic(page, KM_USER0);
 +      mapped_buffer = kmap_atomic(page);
        s = (struct btrfs_super_block *)mapped_buffer;
        memcpy(on_disk_csum, s->csum, sdev->csum_size);
  
                u64 l = min_t(u64, len, mapped_size);
  
                crc = btrfs_csum_data(root, p, crc, l);
 -              kunmap_atomic(mapped_buffer, KM_USER0);
 +              kunmap_atomic(mapped_buffer);
                len -= l;
                if (len == 0)
                        break;
                BUG_ON(index >= sblock->page_count);
                BUG_ON(!sblock->pagev[index].page);
                page = sblock->pagev[index].page;
 -              mapped_buffer = kmap_atomic(page, KM_USER0);
 +              mapped_buffer = kmap_atomic(page);
                mapped_size = PAGE_SIZE;
                p = mapped_buffer;
        }