*/
if (dio->page_errors == 0)
dio->page_errors = ret;
- page_cache_get(page);
+ get_page(page);
dio->pages[0] = page;
sdio->head = 0;
sdio->tail = 1;
* filesystems can use it to hold additional state between get_block calls and
* dio_complete.
*/
-static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
- bool is_async)
+static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
{
+ loff_t offset = dio->iocb->ki_pos;
ssize_t transferred = 0;
/*
if (dio->end_io) {
int err;
+ // XXX: ki_pos??
err = dio->end_io(dio->iocb, offset, ret, dio->private);
if (err)
ret = err;
inode_dio_end(dio->inode);
if (is_async) {
- if (dio->rw & WRITE) {
- int err;
-
- err = generic_write_sync(dio->iocb->ki_filp, offset,
- transferred);
- if (err < 0 && ret > 0)
- ret = err;
- }
+ /*
+ * generic_write_sync expects ki_pos to have been updated
+ * already, but the submission path only does this for
+ * synchronous I/O.
+ */
+ dio->iocb->ki_pos += transferred;
+ if (dio->rw & WRITE)
+ ret = generic_write_sync(dio->iocb, transferred);
dio->iocb->ki_complete(dio->iocb, ret, 0);
}
{
struct dio *dio = container_of(work, struct dio, complete_work);
- dio_complete(dio, dio->iocb->ki_pos, 0, true);
+ dio_complete(dio, 0, true);
}
static int dio_bio_complete(struct dio *dio, struct bio *bio);
queue_work(dio->inode->i_sb->s_dio_done_wq,
&dio->complete_work);
} else {
- dio_complete(dio, dio->iocb->ki_pos, 0, true);
+ dio_complete(dio, 0, true);
}
}
}
static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
{
while (sdio->head < sdio->tail)
- page_cache_release(dio->pages[sdio->head++]);
+ put_page(dio->pages[sdio->head++]);
}
/*
if (dio->rw == READ && !PageCompound(page) &&
dio->should_dirty)
set_page_dirty_lock(page);
- page_cache_release(page);
+ put_page(page);
}
err = bio->bi_error;
bio_put(bio);
*/
if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
sdio->pages_in_io--;
- page_cache_get(sdio->cur_page);
+ get_page(sdio->cur_page);
sdio->final_block_in_bio = sdio->cur_page_block +
(sdio->cur_page_len >> sdio->blkbits);
ret = 0;
*/
if (sdio->cur_page) {
ret = dio_send_cur_page(dio, sdio, map_bh);
- page_cache_release(sdio->cur_page);
+ put_page(sdio->cur_page);
sdio->cur_page = NULL;
if (ret)
return ret;
}
- page_cache_get(page); /* It is in dio */
+ get_page(page); /* It is in dio */
sdio->cur_page = page;
sdio->cur_page_offset = offset;
sdio->cur_page_len = len;
if (sdio->boundary) {
ret = dio_send_cur_page(dio, sdio, map_bh);
dio_bio_submit(dio, sdio);
- page_cache_release(sdio->cur_page);
+ put_page(sdio->cur_page);
sdio->cur_page = NULL;
}
return ret;
ret = get_more_blocks(dio, sdio, map_bh);
if (ret) {
- page_cache_release(page);
+ put_page(page);
goto out;
}
if (!buffer_mapped(map_bh))
/* AKPM: eargh, -ENOTBLK is a hack */
if (dio->rw & WRITE) {
- page_cache_release(page);
+ put_page(page);
return -ENOTBLK;
}
if (sdio->block_in_file >=
i_size_aligned >> blkbits) {
/* We hit eof */
- page_cache_release(page);
+ put_page(page);
goto out;
}
zero_user(page, from, 1 << blkbits);
sdio->next_block_for_io,
map_bh);
if (ret) {
- page_cache_release(page);
+ put_page(page);
goto out;
}
sdio->next_block_for_io += this_chunk_blocks;
}
/* Drop the ref which was taken in get_user_pages() */
- page_cache_release(page);
+ put_page(page);
}
out:
return ret;
static inline ssize_t
do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
- loff_t offset, get_block_t get_block, dio_iodone_t end_io,
+ get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
unsigned blocksize_mask = (1 << blkbits) - 1;
ssize_t retval = -EINVAL;
size_t count = iov_iter_count(iter);
+ loff_t offset = iocb->ki_pos;
loff_t end = offset + count;
struct dio *dio;
struct dio_submit sdio = { 0, };
ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
if (retval == 0)
retval = ret2;
- page_cache_release(sdio.cur_page);
+ put_page(sdio.cur_page);
sdio.cur_page = NULL;
}
if (sdio.bio)
dio_await_completion(dio);
if (drop_refcount(dio) == 0) {
- retval = dio_complete(dio, offset, retval, false);
+ retval = dio_complete(dio, retval, false);
} else
BUG_ON(retval != -EIOCBQUEUED);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
- loff_t offset, get_block_t get_block,
+ get_block_t get_block,
dio_iodone_t end_io, dio_submit_t submit_io,
int flags)
{
prefetch(bdev->bd_queue);
prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
- return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
+ return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
end_io, submit_io, flags);
}