if (zero_offset) {
iosize = PAGE_CACHE_SIZE - zero_offset;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + zero_offset, 0, iosize);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
}
}
while (cur <= end) {
struct extent_state *cached = NULL;
iosize = PAGE_CACHE_SIZE - pg_offset;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
unlock_extent_cached(tree, cur, cur + iosize - 1,
char *userpage;
struct extent_state *cached = NULL;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
if (page->index == end_index) {
char *userpage;
- userpage = kmap_atomic(page, KM_USER0);
+ userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0,
PAGE_CACHE_SIZE - pg_offset);
- kunmap_atomic(userpage, KM_USER0);
+ kunmap_atomic(userpage);
flush_dcache_page(page);
}
pg_offset = 0;
if (atomic_inc_not_zero(&exists->refs)) {
spin_unlock(&mapping->private_lock);
unlock_page(p);
+ page_cache_release(p);
mark_extent_buffer_accessed(exists);
goto free_eb;
}
unlock_page(eb->pages[i]);
}
- if (!atomic_dec_and_test(&eb->refs))
- return exists;
+ WARN_ON(!atomic_dec_and_test(&eb->refs));
btrfs_release_extent_buffer(eb);
return exists;
}
page = sblock->pagev + page_index;
page->logical = logical;
page->physical = bbio->stripes[mirror_index].physical;
+ /* for missing devices, bdev is NULL */
page->bdev = bbio->stripes[mirror_index].dev->bdev;
page->mirror_num = mirror_index + 1;
page->page = alloc_page(GFP_NOFS);
struct scrub_page *page = sblock->pagev + page_num;
DECLARE_COMPLETION_ONSTACK(complete);
+ if (page->bdev == NULL) {
+ page->io_error = 1;
+ sblock->no_io_error_seen = 0;
+ continue;
+ }
+
BUG_ON(!page->page);
bio = bio_alloc(GFP_NOFS, 1);
if (!bio)
if (is_metadata) {
struct btrfs_header *h;
- mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
+ mapped_buffer = kmap_atomic(sblock->pagev[0].page);
h = (struct btrfs_header *)mapped_buffer;
if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
if (!have_csum)
return;
- mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0);
+ mapped_buffer = kmap_atomic(sblock->pagev[0].page);
}
for (page_num = 0;;) {
crc = btrfs_csum_data(root, mapped_buffer, crc,
PAGE_SIZE);
- kunmap_atomic(mapped_buffer, KM_USER0);
+ kunmap_atomic(mapped_buffer);
page_num++;
if (page_num >= sblock->page_count)
break;
BUG_ON(!sblock->pagev[page_num].page);
- mapped_buffer = kmap_atomic(sblock->pagev[page_num].page,
- KM_USER0);
+ mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
}
btrfs_csum_final(crc, calculated_csum);
on_disk_csum = sblock->pagev[0].csum;
page = sblock->pagev[0].page;
- buffer = kmap_atomic(page, KM_USER0);
+ buffer = kmap_atomic(page);
len = sdev->sectorsize;
index = 0;
u64 l = min_t(u64, len, PAGE_SIZE);
crc = btrfs_csum_data(root, buffer, crc, l);
- kunmap_atomic(buffer, KM_USER0);
+ kunmap_atomic(buffer);
len -= l;
if (len == 0)
break;
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index].page);
page = sblock->pagev[index].page;
- buffer = kmap_atomic(page, KM_USER0);
+ buffer = kmap_atomic(page);
}
btrfs_csum_final(crc, csum);
BUG_ON(sblock->page_count < 1);
page = sblock->pagev[0].page;
- mapped_buffer = kmap_atomic(page, KM_USER0);
+ mapped_buffer = kmap_atomic(page);
h = (struct btrfs_header *)mapped_buffer;
memcpy(on_disk_csum, h->csum, sdev->csum_size);
u64 l = min_t(u64, len, mapped_size);
crc = btrfs_csum_data(root, p, crc, l);
- kunmap_atomic(mapped_buffer, KM_USER0);
+ kunmap_atomic(mapped_buffer);
len -= l;
if (len == 0)
break;
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index].page);
page = sblock->pagev[index].page;
- mapped_buffer = kmap_atomic(page, KM_USER0);
+ mapped_buffer = kmap_atomic(page);
mapped_size = PAGE_SIZE;
p = mapped_buffer;
}
BUG_ON(sblock->page_count < 1);
page = sblock->pagev[0].page;
- mapped_buffer = kmap_atomic(page, KM_USER0);
+ mapped_buffer = kmap_atomic(page);
s = (struct btrfs_super_block *)mapped_buffer;
memcpy(on_disk_csum, s->csum, sdev->csum_size);
u64 l = min_t(u64, len, mapped_size);
crc = btrfs_csum_data(root, p, crc, l);
- kunmap_atomic(mapped_buffer, KM_USER0);
+ kunmap_atomic(mapped_buffer);
len -= l;
if (len == 0)
break;
BUG_ON(index >= sblock->page_count);
BUG_ON(!sblock->pagev[index].page);
page = sblock->pagev[index].page;
- mapped_buffer = kmap_atomic(page, KM_USER0);
+ mapped_buffer = kmap_atomic(page);
mapped_size = PAGE_SIZE;
p = mapped_buffer;
}