]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Jul 2016 22:03:07 +0000 (15:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Jul 2016 22:03:07 +0000 (15:03 -0700)
Pull core block updates from Jens Axboe:

   - the big change is the cleanup from Mike Christie, cleaning up our
     uses of command types and modified flags.  This is what will throw
     some merge conflicts

   - regression fix for the above for btrfs, from Vincent

   - following up to the above, better packing of struct request from
     Christoph

   - a 2038 fix for blktrace from Arnd

   - a few trivial/spelling fixes from Bart Van Assche

   - a front merge check fix from Damien, which could cause issues on
     SMR drives

   - Atari partition fix from Gabriel

   - convert cfq to highres timers, since jiffies isn't granular enough
     for some devices these days.  From Jan and Jeff

   - CFQ priority boost fix idle classes, from me

   - cleanup series from Ming, improving our bio/bvec iteration

   - a direct issue fix for blk-mq from Omar

   - fix for plug merging not involving the IO scheduler, like we do for
     other types of merges.  From Tahsin

   - expose DAX type internally and through sysfs.  From Toshi and Yigal

* 'for-4.8/core' of git://git.kernel.dk/linux-block: (76 commits)
  block: Fix front merge check
  block: do not merge requests without consulting with io scheduler
  block: Fix spelling in a source code comment
  block: expose QUEUE_FLAG_DAX in sysfs
  block: add QUEUE_FLAG_DAX for devices to advertise their DAX support
  Btrfs: fix comparison in __btrfs_map_block()
  block: atari: Return early for unsupported sector size
  Doc: block: Fix a typo in queue-sysfs.txt
  cfq-iosched: Charge at least 1 jiffie instead of 1 ns
  cfq-iosched: Fix regression in bonnie++ rewrite performance
  cfq-iosched: Convert slice_resid from u64 to s64
  block: Convert fifo_time from ulong to u64
  blktrace: avoid using timespec
  block/blk-cgroup.c: Declare local symbols static
  block/bio-integrity.c: Add #include "blk.h"
  block/partition-generic.c: Remove a set-but-not-used variable
  block: bio: kill BIO_MAX_SIZE
  cfq-iosched: temporarily boost queue priority for idle classes
  block: drbd: avoid to use BIO_MAX_SIZE
  block: bio: remove BIO_MAX_SECTORS
  ...

199 files changed:
Documentation/block/queue-sysfs.txt
Documentation/block/writeback_cache_control.txt
Documentation/device-mapper/log-writes.txt
arch/um/drivers/ubd_kern.c
block/bio-integrity.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-lib.c
block/blk-map.c
block/blk-merge.c
block/blk-mq.c
block/blk-sysfs.c
block/cfq-iosched.c
block/deadline-iosched.c
block/elevator.c
block/partition-generic.c
block/partitions/atari.c
drivers/ata/libata-scsi.c
drivers/block/brd.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_protocol.h
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nbd.c
drivers/block/osdblk.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/rbd.c
drivers/block/rsxx/dma.c
drivers/block/skd_main.c
drivers/block/umem.c
drivers/block/virtio_blk.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/block/zram/zram_drv.c
drivers/ide/ide-cd_ioctl.c
drivers/ide/ide-disk.c
drivers/ide/ide-floppy.c
drivers/lightnvm/rrpc.c
drivers/md/bcache/btree.c
drivers/md/bcache/debug.c
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c
drivers/md/bitmap.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-era-target.c
drivers/md/dm-flakey.c
drivers/md/dm-io.c
drivers/md/dm-kcopyd.c
drivers/md/dm-log-writes.c
drivers/md/dm-log.c
drivers/md/dm-raid.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap.c
drivers/md/dm-stats.c
drivers/md/dm-stripe.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.c
drivers/mmc/card/queue.h
drivers/mtd/mtd_blkdevs.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/s390/block/dcssblk.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/sd.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pscsi.c
fs/block_dev.c
fs/btrfs/check-integrity.c
fs/btrfs/check-integrity.h
fs/btrfs/compression.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/inode.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/buffer.c
fs/crypto/crypto.c
fs/direct-io.c
fs/exofs/ore.c
fs/ext4/balloc.c
fs/ext4/crypto.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/mmp.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/readpage.c
fs/ext4/super.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/node.c
fs/f2fs/segment.c
fs/f2fs/trace.c
fs/fat/misc.c
fs/gfs2/bmap.c
fs/gfs2/dir.c
fs/gfs2/log.c
fs/gfs2/lops.c
fs/gfs2/lops.h
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/gfs2/quota.c
fs/hfsplus/hfsplus_fs.h
fs/hfsplus/part_tbl.c
fs/hfsplus/super.c
fs/hfsplus/wrapper.c
fs/isofs/compress.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/recovery.c
fs/jfs/jfs_logmgr.c
fs/jfs/jfs_metapage.c
fs/logfs/dev_bdev.c
fs/mpage.c
fs/nfs/blocklayout/blocklayout.c
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/btree.c
fs/nilfs2/gcinode.c
fs/nilfs2/mdt.c
fs/nilfs2/segbuf.c
fs/ntfs/aops.c
fs/ntfs/compress.c
fs/ntfs/file.c
fs/ntfs/logfile.c
fs/ntfs/mft.c
fs/ocfs2/aops.c
fs/ocfs2/buffer_head_io.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/super.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/reiserfs/stree.c
fs/reiserfs/super.c
fs/squashfs/block.c
fs/udf/dir.c
fs/udf/directory.c
fs/udf/inode.c
fs/ufs/balloc.c
fs/ufs/util.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
include/linux/bio.h
include/linux/blk-cgroup.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/blktrace_api.h
include/linux/buffer_head.h
include/linux/bvec.h [new file with mode: 0644]
include/linux/dm-io.h
include/linux/elevator.h
include/linux/fs.h
include/trace/events/bcache.h
include/trace/events/block.h
include/trace/events/f2fs.h
kernel/power/swap.c
kernel/trace/blktrace.c
lib/iov_iter.c
mm/page_io.c

index dce25d848d92c67074ebd26ffaa526217f8f3957..d515d58962b9df66b39bdcf0fc6f981090ed4630 100644 (file)
@@ -53,7 +53,7 @@ disk.
 
 logical_block_size (RO)
 -----------------------
-This is the logcal block size of the device, in bytes.
+This is the logical block size of the device, in bytes.
 
 max_hw_sectors_kb (RO)
 ----------------------
index 59e0516cbf6b68618b692979ed1c14b30da3fb2f..8a6bdada5f6b3710032cdf5df5baa3b3f4c90adb 100644 (file)
@@ -20,11 +20,11 @@ a forced cache flush, and the Force Unit Access (FUA) flag for requests.
 Explicit cache flushes
 ----------------------
 
-The REQ_FLUSH flag can be OR ed into the r/w flags of a bio submitted from
+The REQ_PREFLUSH flag can be OR ed into the r/w flags of a bio submitted from
 the filesystem and will make sure the volatile cache of the storage device
 has been flushed before the actual I/O operation is started.  This explicitly
 guarantees that previously completed write requests are on non-volatile
-storage before the flagged bio starts. In addition the REQ_FLUSH flag can be
+storage before the flagged bio starts. In addition the REQ_PREFLUSH flag can be
 set on an otherwise empty bio structure, which causes only an explicit cache
 flush without any dependent I/O.  It is recommend to use
 the blkdev_issue_flush() helper for a pure cache flush.
@@ -41,21 +41,21 @@ signaled after the data has been committed to non-volatile storage.
 Implementation details for filesystems
 --------------------------------------
 
-Filesystems can simply set the REQ_FLUSH and REQ_FUA bits and do not have to
+Filesystems can simply set the REQ_PREFLUSH and REQ_FUA bits and do not have to
 worry if the underlying devices need any explicit cache flushing and how
-the Forced Unit Access is implemented.  The REQ_FLUSH and REQ_FUA flags
+the Forced Unit Access is implemented.  The REQ_PREFLUSH and REQ_FUA flags
 may both be set on a single bio.
 
 
 Implementation details for make_request_fn based block drivers
 --------------------------------------------------------------
 
-These drivers will always see the REQ_FLUSH and REQ_FUA bits as they sit
+These drivers will always see the REQ_PREFLUSH and REQ_FUA bits as they sit
 directly below the submit_bio interface.  For remapping drivers the REQ_FUA
 bits need to be propagated to underlying devices, and a global flush needs
-to be implemented for bios with the REQ_FLUSH bit set.  For real device
-drivers that do not have a volatile cache the REQ_FLUSH and REQ_FUA bits
-on non-empty bios can simply be ignored, and REQ_FLUSH requests without
+to be implemented for bios with the REQ_PREFLUSH bit set.  For real device
+drivers that do not have a volatile cache the REQ_PREFLUSH and REQ_FUA bits
+on non-empty bios can simply be ignored, and REQ_PREFLUSH requests without
 data can be completed successfully without doing any work.  Drivers for
 devices with volatile caches need to implement the support for these
 flags themselves without any help from the block layer.
@@ -65,17 +65,17 @@ Implementation details for request_fn based block drivers
 --------------------------------------------------------------
 
 For devices that do not support volatile write caches there is no driver
-support required, the block layer completes empty REQ_FLUSH requests before
-entering the driver and strips off the REQ_FLUSH and REQ_FUA bits from
+support required, the block layer completes empty REQ_PREFLUSH requests before
+entering the driver and strips off the REQ_PREFLUSH and REQ_FUA bits from
 requests that have a payload.  For devices with volatile write caches the
 driver needs to tell the block layer that it supports flushing caches by
 doing:
 
        blk_queue_write_cache(sdkp->disk->queue, true, false);
 
-and handle empty REQ_FLUSH requests in its prep_fn/request_fn.  Note that
-REQ_FLUSH requests with a payload are automatically turned into a sequence
-of an empty REQ_FLUSH request followed by the actual write by the block
+and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn.  Note that
+REQ_PREFLUSH requests with a payload are automatically turned into a sequence
+of an empty REQ_OP_FLUSH request followed by the actual write by the block
 layer.  For devices that also support the FUA bit the block layer needs
 to be told to pass through the REQ_FUA bit using:
 
@@ -83,4 +83,4 @@ to be told to pass through the REQ_FUA bit using:
 
 and the driver must handle write requests that have the REQ_FUA bit set
 in prep_fn/request_fn.  If the FUA bit is not natively supported the block
-layer turns it into an empty REQ_FLUSH request after the actual write.
+layer turns it into an empty REQ_OP_FLUSH request after the actual write.
index c10f30c9b534ef7eb1c8a5c838c914148a6cd105..f4ebcbaf50f308313770e79a37d180ac5acd6542 100644 (file)
@@ -14,14 +14,14 @@ Log Ordering
 
 We log things in order of completion once we are sure the write is no longer in
 cache.  This means that normal WRITE requests are not actually logged until the
-next REQ_FLUSH request.  This is to make it easier for userspace to replay the
-log in a way that correlates to what is on disk and not what is in cache, to
-make it easier to detect improper waiting/flushing.
+next REQ_PREFLUSH request.  This is to make it easier for userspace to replay
+the log in a way that correlates to what is on disk and not what is in cache,
+to make it easier to detect improper waiting/flushing.
 
 This works by attaching all WRITE requests to a list once the write completes.
-Once we see a REQ_FLUSH request we splice this list onto the request and once
+Once we see a REQ_PREFLUSH request we splice this list onto the request and once
 the FLUSH request completes we log all of the WRITEs and then the FLUSH.  Only
-completed WRITEs, at the time the REQ_FLUSH is issued, are added in order to
+completed WRITEs, at the time the REQ_PREFLUSH is issued, are added in order to
 simulate the worst case scenario with regard to power failures.  Consider the
 following example (W means write, C means complete):
 
index 17e96dc29596ccefd8120a01a9f6c20ff6f17886..ef6b4d960badeba6f477eca9b745f4c0f104cd4a 100644 (file)
@@ -1286,7 +1286,7 @@ static void do_ubd_request(struct request_queue *q)
 
                req = dev->request;
 
-               if (req->cmd_flags & REQ_FLUSH) {
+               if (req_op(req) == REQ_OP_FLUSH) {
                        io_req = kmalloc(sizeof(struct io_thread_req),
                                         GFP_ATOMIC);
                        if (io_req == NULL) {
index 711e4d8de6fa06020432da0f37a9edcdd67a6151..15d37b1cd500b39750f2959c5001011e519e99af 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/bio.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
+#include "blk.h"
 
 #define BIP_INLINE_VECS        4
 
index 0e4aa42bc30dc7919a58b5c93b4470d43cf01f85..848cd351513bad7fb44b176c0205a614fa314a52 100644 (file)
@@ -656,16 +656,15 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
        bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
        if (!bio)
                return NULL;
-
        bio->bi_bdev            = bio_src->bi_bdev;
        bio->bi_rw              = bio_src->bi_rw;
        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
 
-       if (bio->bi_rw & REQ_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD)
                goto integrity_clone;
 
-       if (bio->bi_rw & REQ_WRITE_SAME) {
+       if (bio_op(bio) == REQ_OP_WRITE_SAME) {
                bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
                goto integrity_clone;
        }
@@ -854,21 +853,20 @@ static void submit_bio_wait_endio(struct bio *bio)
 
 /**
  * submit_bio_wait - submit a bio, and wait until it completes
- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
  * @bio: The &struct bio which describes the I/O
  *
  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
  * bio_endio() on failure.
  */
-int submit_bio_wait(int rw, struct bio *bio)
+int submit_bio_wait(struct bio *bio)
 {
        struct submit_bio_ret ret;
 
-       rw |= REQ_SYNC;
        init_completion(&ret.event);
        bio->bi_private = &ret;
        bio->bi_end_io = submit_bio_wait_endio;
-       submit_bio(rw, bio);
+       bio->bi_rw |= REQ_SYNC;
+       submit_bio(bio);
        wait_for_completion_io(&ret.event);
 
        return ret.error;
@@ -1167,7 +1165,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
                goto out_bmd;
 
        if (iter->type & WRITE)
-               bio->bi_rw |= REQ_WRITE;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
        ret = 0;
 
@@ -1337,7 +1335,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
         * set data direction, and check if mapped pages need bouncing
         */
        if (iter->type & WRITE)
-               bio->bi_rw |= REQ_WRITE;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
        bio_set_flag(bio, BIO_USER_MAPPED);
 
@@ -1530,7 +1528,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
                bio->bi_private = data;
        } else {
                bio->bi_end_io = bio_copy_kern_endio;
-               bio->bi_rw |= REQ_WRITE;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        }
 
        return bio;
@@ -1785,7 +1783,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
         * Discards need a mutable bio_vec to accommodate the payload
         * required by the DSM TRIM and UNMAP commands.
         */
-       if (bio->bi_rw & REQ_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD)
                split = bio_clone_bioset(bio, gfp, bs);
        else
                split = bio_clone_fast(bio, gfp, bs);
index 66e6f1aae02eeb14de30ada1e6696622082729c1..dd38e5ced4a3fa7510f00c2ee7ff9dc636b2a7fc 100644 (file)
@@ -905,7 +905,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
        return 0;
 }
 
-struct cftype blkcg_files[] = {
+static struct cftype blkcg_files[] = {
        {
                .name = "stat",
                .flags = CFTYPE_NOT_ON_ROOT,
@@ -914,7 +914,7 @@ struct cftype blkcg_files[] = {
        { }     /* terminate */
 };
 
-struct cftype blkcg_legacy_files[] = {
+static struct cftype blkcg_legacy_files[] = {
        {
                .name = "reset_stats",
                .write_u64 = blkcg_reset_stats,
index 2475b1c72773c53c0c60ebfb466929c82b2a0d2c..3cfd67d006fb4355fa4dc845f158088237347cc2 100644 (file)
@@ -959,10 +959,10 @@ static void __freed_request(struct request_list *rl, int sync)
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(struct request_list *rl, unsigned int flags)
+static void freed_request(struct request_list *rl, int op, unsigned int flags)
 {
        struct request_queue *q = rl->q;
-       int sync = rw_is_sync(flags);
+       int sync = rw_is_sync(op, flags);
 
        q->nr_rqs[sync]--;
        rl->count[sync]--;
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
         * Flush requests do not use the elevator so skip initialization.
         * This allows a request to share the flush and elevator data.
         */
-       if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
+       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
                return false;
 
        return true;
@@ -1054,7 +1054,8 @@ static struct io_context *rq_ioc(struct bio *bio)
 /**
  * __get_request - get a free request
  * @rl: request list to allocate from
- * @rw_flags: RW and SYNC flags
+ * @op: REQ_OP_READ/REQ_OP_WRITE
+ * @op_flags: rq_flag_bits
  * @bio: bio to allocate request for (can be %NULL)
  * @gfp_mask: allocation mask
  *
@@ -1065,21 +1066,22 @@ static struct io_context *rq_ioc(struct bio *bio)
  * Returns ERR_PTR on failure, with @q->queue_lock held.
  * Returns request pointer on success, with @q->queue_lock *not held*.
  */
-static struct request *__get_request(struct request_list *rl, int rw_flags,
-                                    struct bio *bio, gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, int op,
+                                    int op_flags, struct bio *bio,
+                                    gfp_t gfp_mask)
 {
        struct request_queue *q = rl->q;
        struct request *rq;
        struct elevator_type *et = q->elevator->type;
        struct io_context *ioc = rq_ioc(bio);
        struct io_cq *icq = NULL;
-       const bool is_sync = rw_is_sync(rw_flags) != 0;
+       const bool is_sync = rw_is_sync(op, op_flags) != 0;
        int may_queue;
 
        if (unlikely(blk_queue_dying(q)))
                return ERR_PTR(-ENODEV);
 
-       may_queue = elv_may_queue(q, rw_flags);
+       may_queue = elv_may_queue(q, op, op_flags);
        if (may_queue == ELV_MQUEUE_NO)
                goto rq_starved;
 
@@ -1123,7 +1125,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
 
        /*
         * Decide whether the new request will be managed by elevator.  If
-        * so, mark @rw_flags and increment elvpriv.  Non-zero elvpriv will
+        * so, mark @op_flags and increment elvpriv.  Non-zero elvpriv will
         * prevent the current elevator from being destroyed until the new
         * request is freed.  This guarantees icq's won't be destroyed and
         * makes creating new ones safe.
@@ -1132,14 +1134,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
         * it will be created after releasing queue_lock.
         */
        if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
-               rw_flags |= REQ_ELVPRIV;
+               op_flags |= REQ_ELVPRIV;
                q->nr_rqs_elvpriv++;
                if (et->icq_cache && ioc)
                        icq = ioc_lookup_icq(ioc, q);
        }
 
        if (blk_queue_io_stat(q))
-               rw_flags |= REQ_IO_STAT;
+               op_flags |= REQ_IO_STAT;
        spin_unlock_irq(q->queue_lock);
 
        /* allocate and init request */
@@ -1149,10 +1151,10 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
 
        blk_rq_init(q, rq);
        blk_rq_set_rl(rq, rl);
-       rq->cmd_flags = rw_flags | REQ_ALLOCED;
+       req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED);
 
        /* init elvpriv */
-       if (rw_flags & REQ_ELVPRIV) {
+       if (op_flags & REQ_ELVPRIV) {
                if (unlikely(et->icq_cache && !icq)) {
                        if (ioc)
                                icq = ioc_create_icq(ioc, q, gfp_mask);
@@ -1178,7 +1180,7 @@ out:
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
 
-       trace_block_getrq(q, bio, rw_flags & 1);
+       trace_block_getrq(q, bio, op);
        return rq;
 
 fail_elvpriv:
@@ -1208,7 +1210,7 @@ fail_alloc:
         * queue, but this is pretty rare.
         */
        spin_lock_irq(q->queue_lock);
-       freed_request(rl, rw_flags);
+       freed_request(rl, op, op_flags);
 
        /*
         * in the very unlikely event that allocation failed and no
@@ -1226,7 +1228,8 @@ rq_starved:
 /**
  * get_request - get a free request
  * @q: request_queue to allocate request from
- * @rw_flags: RW and SYNC flags
+ * @op: REQ_OP_READ/REQ_OP_WRITE
+ * @op_flags: rq_flag_bits
  * @bio: bio to allocate request for (can be %NULL)
  * @gfp_mask: allocation mask
  *
@@ -1237,17 +1240,18 @@ rq_starved:
  * Returns ERR_PTR on failure, with @q->queue_lock held.
  * Returns request pointer on success, with @q->queue_lock *not held*.
  */
-static struct request *get_request(struct request_queue *q, int rw_flags,
-                                  struct bio *bio, gfp_t gfp_mask)
+static struct request *get_request(struct request_queue *q, int op,
+                                  int op_flags, struct bio *bio,
+                                  gfp_t gfp_mask)
 {
-       const bool is_sync = rw_is_sync(rw_flags) != 0;
+       const bool is_sync = rw_is_sync(op, op_flags) != 0;
        DEFINE_WAIT(wait);
        struct request_list *rl;
        struct request *rq;
 
        rl = blk_get_rl(q, bio);        /* transferred to @rq on success */
 retry:
-       rq = __get_request(rl, rw_flags, bio, gfp_mask);
+       rq = __get_request(rl, op, op_flags, bio, gfp_mask);
        if (!IS_ERR(rq))
                return rq;
 
@@ -1260,7 +1264,7 @@ retry:
        prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
                                  TASK_UNINTERRUPTIBLE);
 
-       trace_block_sleeprq(q, bio, rw_flags & 1);
+       trace_block_sleeprq(q, bio, op);
 
        spin_unlock_irq(q->queue_lock);
        io_schedule();
@@ -1289,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
        create_io_context(gfp_mask, q->node);
 
        spin_lock_irq(q->queue_lock);
-       rq = get_request(q, rw, NULL, gfp_mask);
+       rq = get_request(q, rw, 0, NULL, gfp_mask);
        if (IS_ERR(rq))
                spin_unlock_irq(q->queue_lock);
        /* q->queue_lock is unlocked at this point */
@@ -1491,13 +1495,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
         */
        if (req->cmd_flags & REQ_ALLOCED) {
                unsigned int flags = req->cmd_flags;
+               int op = req_op(req);
                struct request_list *rl = blk_rq_rl(req);
 
                BUG_ON(!list_empty(&req->queuelist));
                BUG_ON(ELV_ON_HASH(req));
 
                blk_free_request(rl, req);
-               freed_request(rl, flags);
+               freed_request(rl, op, flags);
                blk_put_rl(rl);
        }
 }
@@ -1712,7 +1717,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
        const bool sync = !!(bio->bi_rw & REQ_SYNC);
        struct blk_plug *plug;
-       int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
+       int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
        struct request *req;
        unsigned int request_count = 0;
 
@@ -1731,7 +1736,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
                return BLK_QC_T_NONE;
        }
 
-       if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
+       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
                spin_lock_irq(q->queue_lock);
                where = ELEVATOR_INSERT_FLUSH;
                goto get_rq;
@@ -1772,15 +1777,19 @@ get_rq:
         * but we need to set it earlier to expose the sync flag to the
         * rq allocator and io schedulers.
         */
-       rw_flags = bio_data_dir(bio);
        if (sync)
                rw_flags |= REQ_SYNC;
 
+       /*
+        * Add in META/PRIO flags, if set, before we get to the IO scheduler
+        */
+       rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
+
        /*
         * Grab a free request. This is might sleep but can not fail.
         * Returns with the queue unlocked.
         */
-       req = get_request(q, rw_flags, bio, GFP_NOIO);
+       req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
        if (IS_ERR(req)) {
                bio->bi_error = PTR_ERR(req);
                bio_endio(bio);
@@ -1849,7 +1858,7 @@ static void handle_bad_sector(struct bio *bio)
        char b[BDEVNAME_SIZE];
 
        printk(KERN_INFO "attempt to access beyond end of device\n");
-       printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
+       printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
                        bdevname(bio->bi_bdev, b),
                        bio->bi_rw,
                        (unsigned long long)bio_end_sector(bio),
@@ -1964,23 +1973,23 @@ generic_make_request_checks(struct bio *bio)
         * drivers without flush support don't have to worry
         * about them.
         */
-       if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+       if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
            !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
-               bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
+               bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
                if (!nr_sectors) {
                        err = 0;
                        goto end_io;
                }
        }
 
-       if ((bio->bi_rw & REQ_DISCARD) &&
+       if ((bio_op(bio) == REQ_OP_DISCARD) &&
            (!blk_queue_discard(q) ||
             ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
                err = -EOPNOTSUPP;
                goto end_io;
        }
 
-       if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
+       if (bio_op(bio) == REQ_OP_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
                err = -EOPNOTSUPP;
                goto end_io;
        }
@@ -2094,7 +2103,6 @@ EXPORT_SYMBOL(generic_make_request);
 
 /**
  * submit_bio - submit a bio to the block device layer for I/O
- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
  * @bio: The &struct bio which describes the I/O
  *
  * submit_bio() is very similar in purpose to generic_make_request(), and
@@ -2102,10 +2110,8 @@ EXPORT_SYMBOL(generic_make_request);
  * interfaces; @bio must be presetup and ready for I/O.
  *
  */
-blk_qc_t submit_bio(int rw, struct bio *bio)
+blk_qc_t submit_bio(struct bio *bio)
 {
-       bio->bi_rw |= rw;
-
        /*
         * If it's a regular read/write or a barrier with data attached,
         * go through the normal accounting stuff before submission.
@@ -2113,12 +2119,12 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
        if (bio_has_data(bio)) {
                unsigned int count;
 
-               if (unlikely(rw & REQ_WRITE_SAME))
+               if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
                        count = bdev_logical_block_size(bio->bi_bdev) >> 9;
                else
                        count = bio_sectors(bio);
 
-               if (rw & WRITE) {
+               if (op_is_write(bio_op(bio))) {
                        count_vm_events(PGPGOUT, count);
                } else {
                        task_io_account_read(bio->bi_iter.bi_size);
@@ -2129,7 +2135,7 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
                        char b[BDEVNAME_SIZE];
                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
                        current->comm, task_pid_nr(current),
-                               (rw & WRITE) ? "WRITE" : "READ",
+                               op_is_write(bio_op(bio)) ? "WRITE" : "READ",
                                (unsigned long long)bio->bi_iter.bi_sector,
                                bdevname(bio->bi_bdev, b),
                                count);
@@ -2160,7 +2166,7 @@ EXPORT_SYMBOL(submit_bio);
 static int blk_cloned_rq_check_limits(struct request_queue *q,
                                      struct request *rq)
 {
-       if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
+       if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
        }
@@ -2216,7 +2222,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
         */
        BUG_ON(blk_queued_rq(rq));
 
-       if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
+       if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
                where = ELEVATOR_INSERT_FLUSH;
 
        add_acct_request(q, rq, where);
@@ -2979,8 +2985,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
 {
-       /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
-       rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
+       req_set_op(rq, bio_op(bio));
 
        if (bio_has_data(bio))
                rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -3065,7 +3070,8 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
 static void __blk_rq_prep_clone(struct request *dst, struct request *src)
 {
        dst->cpu = src->cpu;
-       dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
+       req_set_op_attrs(dst, req_op(src),
+                        (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE);
        dst->cmd_type = src->cmd_type;
        dst->__sector = blk_rq_pos(src);
        dst->__data_len = blk_rq_bytes(src);
@@ -3310,7 +3316,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                /*
                 * rq is already accounted, so use raw insert
                 */
-               if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
+               if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
                        __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
                else
                        __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
index 3fec8a29d0fae16f3c0398ff292a75dbce38cf77..7ea04325d02f56c8f830292005d0096b033d8fe1 100644 (file)
@@ -62,7 +62,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 
        /*
         * don't check dying flag for MQ because the request won't
-        * be resued after dying flag is set
+        * be reused after dying flag is set
         */
        if (q->mq_ops) {
                blk_mq_insert_request(rq, at_head, true, false);
index b1c91d229e5ed905fd0cdbb7354457eed07889df..d308def812db9b3794fc10e07da9a303dd31eb70 100644 (file)
@@ -10,8 +10,8 @@
  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
  * properties and hardware capability.
  *
- * If a request doesn't have data, only REQ_FLUSH makes sense, which
- * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
+ * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
+ * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
  * that the device cache should be flushed before the data is executed, and
  * REQ_FUA means that the data must be on non-volatile media on request
  * completion.
  * difference.  The requests are either completed immediately if there's no
  * data or executed as normal requests otherwise.
  *
- * If the device has writeback cache and supports FUA, REQ_FLUSH is
+ * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
  *
- * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
- * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
+ * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
+ * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
  *
  * The actual execution of flush is double buffered.  Whenever a request
  * needs to execute PRE or POSTFLUSH, it queues at
  * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
- * flush is issued and the pending_idx is toggled.  When the flush
+ * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
  * completes, all the requests which were pending are proceeded to the next
  * step.  This allows arbitrary merging of different types of FLUSH/FUA
  * requests.
@@ -103,7 +103,7 @@ static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
                policy |= REQ_FSEQ_DATA;
 
        if (fflags & (1UL << QUEUE_FLAG_WC)) {
-               if (rq->cmd_flags & REQ_FLUSH)
+               if (rq->cmd_flags & REQ_PREFLUSH)
                        policy |= REQ_FSEQ_PREFLUSH;
                if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
                    (rq->cmd_flags & REQ_FUA))
@@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
        }
 
        flush_rq->cmd_type = REQ_TYPE_FS;
-       flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+       req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ);
        flush_rq->rq_disk = first_rq->rq_disk;
        flush_rq->end_io = flush_end_io;
 
@@ -391,9 +391,9 @@ void blk_insert_flush(struct request *rq)
 
        /*
         * @policy now records what operations need to be done.  Adjust
-        * REQ_FLUSH and FUA for the driver.
+        * REQ_PREFLUSH and FUA for the driver.
         */
-       rq->cmd_flags &= ~REQ_FLUSH;
+       rq->cmd_flags &= ~REQ_PREFLUSH;
        if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
                rq->cmd_flags &= ~REQ_FUA;
 
@@ -485,8 +485,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 
        bio = bio_alloc(gfp_mask, 0);
        bio->bi_bdev = bdev;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
 
-       ret = submit_bio_wait(WRITE_FLUSH, bio);
+       ret = submit_bio_wait(bio);
 
        /*
         * The driver must store the error location in ->bi_sector, if
index 9e29dc35169560a223ae7eea9dffc80da15fc7f9..9031d2af0b47c4210fe1db182545e1a806def18b 100644 (file)
@@ -9,21 +9,22 @@
 
 #include "blk.h"
 
-static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
+static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
                gfp_t gfp)
 {
        struct bio *new = bio_alloc(gfp, nr_pages);
 
        if (bio) {
                bio_chain(bio, new);
-               submit_bio(rw, bio);
+               submit_bio(bio);
        }
 
        return new;
 }
 
 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
-               sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
+               sector_t nr_sects, gfp_t gfp_mask, int op_flags,
+               struct bio **biop)
 {
        struct request_queue *q = bdev_get_queue(bdev);
        struct bio *bio = *biop;
@@ -34,7 +35,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                return -ENXIO;
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
-       if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
+       if ((op_flags & REQ_SECURE) && !blk_queue_secdiscard(q))
                return -EOPNOTSUPP;
 
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
@@ -62,9 +63,10 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                        req_sects = end_sect - sector;
                }
 
-               bio = next_bio(bio, type, 1, gfp_mask);
+               bio = next_bio(bio, 1, gfp_mask);
                bio->bi_iter.bi_sector = sector;
                bio->bi_bdev = bdev;
+               bio_set_op_attrs(bio, REQ_OP_DISCARD, op_flags);
 
                bio->bi_iter.bi_size = req_sects << 9;
                nr_sects -= req_sects;
@@ -98,19 +100,19 @@ EXPORT_SYMBOL(__blkdev_issue_discard);
 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
 {
-       int type = REQ_WRITE | REQ_DISCARD;
+       int op_flags = 0;
        struct bio *bio = NULL;
        struct blk_plug plug;
        int ret;
 
        if (flags & BLKDEV_DISCARD_SECURE)
-               type |= REQ_SECURE;
+               op_flags |= REQ_SECURE;
 
        blk_start_plug(&plug);
-       ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
+       ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, op_flags,
                        &bio);
        if (!ret && bio) {
-               ret = submit_bio_wait(type, bio);
+               ret = submit_bio_wait(bio);
                if (ret == -EOPNOTSUPP)
                        ret = 0;
                bio_put(bio);
@@ -148,13 +150,14 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
        max_write_same_sectors = UINT_MAX >> 9;
 
        while (nr_sects) {
-               bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
+               bio = next_bio(bio, 1, gfp_mask);
                bio->bi_iter.bi_sector = sector;
                bio->bi_bdev = bdev;
                bio->bi_vcnt = 1;
                bio->bi_io_vec->bv_page = page;
                bio->bi_io_vec->bv_offset = 0;
                bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
+               bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
 
                if (nr_sects > max_write_same_sectors) {
                        bio->bi_iter.bi_size = max_write_same_sectors << 9;
@@ -167,7 +170,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
        }
 
        if (bio) {
-               ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+               ret = submit_bio_wait(bio);
                bio_put(bio);
        }
        return ret != -EOPNOTSUPP ? ret : 0;
@@ -193,11 +196,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        unsigned int sz;
 
        while (nr_sects != 0) {
-               bio = next_bio(bio, WRITE,
-                               min(nr_sects, (sector_t)BIO_MAX_PAGES),
+               bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
                                gfp_mask);
                bio->bi_iter.bi_sector = sector;
                bio->bi_bdev   = bdev;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
                while (nr_sects != 0) {
                        sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -210,7 +213,7 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        }
 
        if (bio) {
-               ret = submit_bio_wait(WRITE, bio);
+               ret = submit_bio_wait(bio);
                bio_put(bio);
                return ret;
        }
index b9f88b7751fbd87742b1d1439a1d89c97818f9ce..61733a660c3afcbf5ae5362bbe3b3df04f0a24de 100644 (file)
@@ -224,7 +224,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
                return PTR_ERR(bio);
 
        if (!reading)
-               bio->bi_rw |= REQ_WRITE;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
        if (do_copy)
                rq->cmd_flags |= REQ_COPY_USER;
index 261353166dcf33a042ea4466ecae044388dddc98..5e4d93edeaf7cbb1c1e232ba8ea2ca5e6f7182c2 100644 (file)
@@ -172,9 +172,9 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
        struct bio *split, *res;
        unsigned nsegs;
 
-       if ((*bio)->bi_rw & REQ_DISCARD)
+       if (bio_op(*bio) == REQ_OP_DISCARD)
                split = blk_bio_discard_split(q, *bio, bs, &nsegs);
-       else if ((*bio)->bi_rw & REQ_WRITE_SAME)
+       else if (bio_op(*bio) == REQ_OP_WRITE_SAME)
                split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
        else
                split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
@@ -213,10 +213,10 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
         * This should probably be returning 0, but blk_add_request_payload()
         * (Christoph!!!!)
         */
-       if (bio->bi_rw & REQ_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD)
                return 1;
 
-       if (bio->bi_rw & REQ_WRITE_SAME)
+       if (bio_op(bio) == REQ_OP_WRITE_SAME)
                return 1;
 
        fbio = bio;
@@ -385,7 +385,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
        nsegs = 0;
        cluster = blk_queue_cluster(q);
 
-       if (bio->bi_rw & REQ_DISCARD) {
+       if (bio_op(bio) == REQ_OP_DISCARD) {
                /*
                 * This is a hack - drivers should be neither modifying the
                 * biovec, nor relying on bi_vcnt - but because of
@@ -400,7 +400,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
                return 0;
        }
 
-       if (bio->bi_rw & REQ_WRITE_SAME) {
+       if (bio_op(bio) == REQ_OP_WRITE_SAME) {
 single_segment:
                *sg = sglist;
                bvec = bio_iovec(bio);
@@ -439,7 +439,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        }
 
        if (q->dma_drain_size && q->dma_drain_needed(rq)) {
-               if (rq->cmd_flags & REQ_WRITE)
+               if (op_is_write(req_op(rq)))
                        memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
                sg_unmark_end(sg);
@@ -500,7 +500,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
            integrity_req_gap_back_merge(req, bio))
                return 0;
        if (blk_rq_sectors(req) + bio_sectors(bio) >
-           blk_rq_get_max_sectors(req)) {
+           blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -524,7 +524,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
            integrity_req_gap_front_merge(req, bio))
                return 0;
        if (blk_rq_sectors(req) + bio_sectors(bio) >
-           blk_rq_get_max_sectors(req)) {
+           blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -570,7 +570,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
         * Will it become too large?
         */
        if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
-           blk_rq_get_max_sectors(req))
+           blk_rq_get_max_sectors(req, blk_rq_pos(req)))
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -649,7 +649,8 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return 0;
 
-       if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
+       if (!blk_check_merge_flags(req->cmd_flags, req_op(req), next->cmd_flags,
+                                  req_op(next)))
                return 0;
 
        /*
@@ -663,7 +664,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            || req_no_special_merge(next))
                return 0;
 
-       if (req->cmd_flags & REQ_WRITE_SAME &&
+       if (req_op(req) == REQ_OP_WRITE_SAME &&
            !blk_write_same_mergeable(req->bio, next->bio))
                return 0;
 
@@ -743,6 +744,12 @@ int attempt_front_merge(struct request_queue *q, struct request *rq)
 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
                          struct request *next)
 {
+       struct elevator_queue *e = q->elevator;
+
+       if (e->type->ops.elevator_allow_rq_merge_fn)
+               if (!e->type->ops.elevator_allow_rq_merge_fn(q, rq, next))
+                       return 0;
+
        return attempt_merge(q, rq, next);
 }
 
@@ -751,7 +758,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
        if (!rq_mergeable(rq) || !bio_mergeable(bio))
                return false;
 
-       if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
+       if (!blk_check_merge_flags(rq->cmd_flags, req_op(rq), bio->bi_rw,
+                                  bio_op(bio)))
                return false;
 
        /* different data direction or already started, don't merge */
@@ -767,7 +775,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
                return false;
 
        /* must be using the same buffer */
-       if (rq->cmd_flags & REQ_WRITE_SAME &&
+       if (req_op(rq) == REQ_OP_WRITE_SAME &&
            !blk_write_same_mergeable(rq->bio, bio))
                return false;
 
index f9b9049b1284cc8adf65c1eed611e4c09d2d6584..2a1920c6d6e5ea4e6305cfb924ddf3a955f216ab 100644 (file)
@@ -159,16 +159,17 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 EXPORT_SYMBOL(blk_mq_can_queue);
 
 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-                              struct request *rq, unsigned int rw_flags)
+                              struct request *rq, int op,
+                              unsigned int op_flags)
 {
        if (blk_queue_io_stat(q))
-               rw_flags |= REQ_IO_STAT;
+               op_flags |= REQ_IO_STAT;
 
        INIT_LIST_HEAD(&rq->queuelist);
        /* csd/requeue_work/fifo_time is initialized before use */
        rq->q = q;
        rq->mq_ctx = ctx;
-       rq->cmd_flags |= rw_flags;
+       req_set_op_attrs(rq, op, op_flags);
        /* do not touch atomic flags, it needs atomic ops against the timer */
        rq->cpu = -1;
        INIT_HLIST_NODE(&rq->hash);
@@ -203,11 +204,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
        rq->end_io_data = NULL;
        rq->next_rq = NULL;
 
-       ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
+       ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
 }
 
 static struct request *
-__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
+__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
 {
        struct request *rq;
        unsigned int tag;
@@ -222,7 +223,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
                }
 
                rq->tag = tag;
-               blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
+               blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
                return rq;
        }
 
@@ -246,7 +247,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
        blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
 
-       rq = __blk_mq_alloc_request(&alloc_data, rw);
+       rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
        if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) {
                __blk_mq_run_hw_queue(hctx);
                blk_mq_put_ctx(ctx);
@@ -254,7 +255,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
                ctx = blk_mq_get_ctx(q);
                hctx = q->mq_ops->map_queue(q, ctx->cpu);
                blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
-               rq =  __blk_mq_alloc_request(&alloc_data, rw);
+               rq =  __blk_mq_alloc_request(&alloc_data, rw, 0);
                ctx = alloc_data.ctx;
        }
        blk_mq_put_ctx(ctx);
@@ -784,7 +785,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
                switch (ret) {
                case BLK_MQ_RQ_QUEUE_OK:
                        queued++;
-                       continue;
+                       break;
                case BLK_MQ_RQ_QUEUE_BUSY:
                        list_add(&rq->queuelist, &rq_list);
                        __blk_mq_requeue_request(rq);
@@ -1169,28 +1170,29 @@ static struct request *blk_mq_map_request(struct request_queue *q,
        struct blk_mq_hw_ctx *hctx;
        struct blk_mq_ctx *ctx;
        struct request *rq;
-       int rw = bio_data_dir(bio);
+       int op = bio_data_dir(bio);
+       int op_flags = 0;
        struct blk_mq_alloc_data alloc_data;
 
        blk_queue_enter_live(q);
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-       if (rw_is_sync(bio->bi_rw))
-               rw |= REQ_SYNC;
+       if (rw_is_sync(bio_op(bio), bio->bi_rw))
+               op_flags |= REQ_SYNC;
 
-       trace_block_getrq(q, bio, rw);
+       trace_block_getrq(q, bio, op);
        blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx);
-       rq = __blk_mq_alloc_request(&alloc_data, rw);
+       rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
        if (unlikely(!rq)) {
                __blk_mq_run_hw_queue(hctx);
                blk_mq_put_ctx(ctx);
-               trace_block_sleeprq(q, bio, rw);
+               trace_block_sleeprq(q, bio, op);
 
                ctx = blk_mq_get_ctx(q);
                hctx = q->mq_ops->map_queue(q, ctx->cpu);
                blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
-               rq = __blk_mq_alloc_request(&alloc_data, rw);
+               rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
                ctx = alloc_data.ctx;
                hctx = alloc_data.hctx;
        }
@@ -1244,8 +1246,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
  */
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 {
-       const int is_sync = rw_is_sync(bio->bi_rw);
-       const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+       const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
+       const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
        struct blk_map_ctx data;
        struct request *rq;
        unsigned int request_count = 0;
@@ -1338,8 +1340,8 @@ done:
  */
 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 {
-       const int is_sync = rw_is_sync(bio->bi_rw);
-       const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+       const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
+       const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
        struct blk_plug *plug;
        unsigned int request_count = 0;
        struct blk_map_ctx data;
index 99205965f5596c2c935045c049f5cf9683c7bf42..f87a7e747d36003b2c78badc784483d6a9a83081 100644 (file)
@@ -379,6 +379,11 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
        return count;
 }
 
+static ssize_t queue_dax_show(struct request_queue *q, char *page)
+{
+       return queue_var_show(blk_queue_dax(q), page);
+}
+
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -516,6 +521,11 @@ static struct queue_sysfs_entry queue_wc_entry = {
        .store = queue_wc_store,
 };
 
+static struct queue_sysfs_entry queue_dax_entry = {
+       .attr = {.name = "dax", .mode = S_IRUGO },
+       .show = queue_dax_show,
+};
+
 static struct attribute *default_attrs[] = {
        &queue_requests_entry.attr,
        &queue_ra_entry.attr,
@@ -542,6 +552,7 @@ static struct attribute *default_attrs[] = {
        &queue_random_entry.attr,
        &queue_poll_entry.attr,
        &queue_wc_entry.attr,
+       &queue_dax_entry.attr,
        NULL,
 };
 
index 4a349787bc6280b30d224c87d6800bef0ddd9a60..acabba198de936cd9fc4b2e679947fb526c9c0f1 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
-#include <linux/jiffies.h>
+#include <linux/ktime.h>
 #include <linux/rbtree.h>
 #include <linux/ioprio.h>
 #include <linux/blktrace_api.h>
  */
 /* max queue in one round of service */
 static const int cfq_quantum = 8;
-static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
+static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
 /* maximum backwards seek, in KiB */
 static const int cfq_back_max = 16 * 1024;
 /* penalty of a backwards seek */
 static const int cfq_back_penalty = 2;
-static const int cfq_slice_sync = HZ / 10;
-static int cfq_slice_async = HZ / 25;
+static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;
+static u64 cfq_slice_async = NSEC_PER_SEC / 25;
 static const int cfq_slice_async_rq = 2;
-static int cfq_slice_idle = HZ / 125;
-static int cfq_group_idle = HZ / 125;
-static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
+static u64 cfq_slice_idle = NSEC_PER_SEC / 125;
+static u64 cfq_group_idle = NSEC_PER_SEC / 125;
+static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
 /*
  * offset from end of service tree
  */
-#define CFQ_IDLE_DELAY         (HZ / 5)
+#define CFQ_IDLE_DELAY         (NSEC_PER_SEC / 5)
 
 /*
  * below this threshold, we consider thinktime immediate
  */
-#define CFQ_MIN_TT             (2)
+#define CFQ_MIN_TT             (2 * NSEC_PER_SEC / HZ)
 
 #define CFQ_SLICE_SCALE                (5)
 #define CFQ_HW_QUEUE_MIN       (5)
@@ -73,11 +73,11 @@ static struct kmem_cache *cfq_pool;
 #define CFQ_WEIGHT_LEGACY_MAX  1000
 
 struct cfq_ttime {
-       unsigned long last_end_request;
+       u64 last_end_request;
 
-       unsigned long ttime_total;
+       u64 ttime_total;
+       u64 ttime_mean;
        unsigned long ttime_samples;
-       unsigned long ttime_mean;
 };
 
 /*
@@ -94,7 +94,7 @@ struct cfq_rb_root {
        struct cfq_ttime ttime;
 };
 #define CFQ_RB_ROOT    (struct cfq_rb_root) { .rb = RB_ROOT, \
-                       .ttime = {.last_end_request = jiffies,},}
+                       .ttime = {.last_end_request = ktime_get_ns(),},}
 
 /*
  * Per process-grouping structure
@@ -109,7 +109,7 @@ struct cfq_queue {
        /* service_tree member */
        struct rb_node rb_node;
        /* service_tree key */
-       unsigned long rb_key;
+       u64 rb_key;
        /* prio tree member */
        struct rb_node p_node;
        /* prio tree root we belong to, if any */
@@ -126,13 +126,13 @@ struct cfq_queue {
        struct list_head fifo;
 
        /* time when queue got scheduled in to dispatch first request. */
-       unsigned long dispatch_start;
-       unsigned int allocated_slice;
-       unsigned int slice_dispatch;
+       u64 dispatch_start;
+       u64 allocated_slice;
+       u64 slice_dispatch;
        /* time when first request from queue completed and slice started. */
-       unsigned long slice_start;
-       unsigned long slice_end;
-       long slice_resid;
+       u64 slice_start;
+       u64 slice_end;
+       s64 slice_resid;
 
        /* pending priority requests */
        int prio_pending;
@@ -141,7 +141,7 @@ struct cfq_queue {
 
        /* io prio of this group */
        unsigned short ioprio, org_ioprio;
-       unsigned short ioprio_class;
+       unsigned short ioprio_class, org_ioprio_class;
 
        pid_t pid;
 
@@ -290,7 +290,7 @@ struct cfq_group {
        struct cfq_rb_root service_trees[2][3];
        struct cfq_rb_root service_tree_idle;
 
-       unsigned long saved_wl_slice;
+       u64 saved_wl_slice;
        enum wl_type_t saved_wl_type;
        enum wl_class_t saved_wl_class;
 
@@ -329,7 +329,7 @@ struct cfq_data {
         */
        enum wl_class_t serving_wl_class;
        enum wl_type_t serving_wl_type;
-       unsigned long workload_expires;
+       u64 workload_expires;
        struct cfq_group *serving_group;
 
        /*
@@ -362,7 +362,7 @@ struct cfq_data {
        /*
         * idle window management
         */
-       struct timer_list idle_slice_timer;
+       struct hrtimer idle_slice_timer;
        struct work_struct unplug_work;
 
        struct cfq_queue *active_queue;
@@ -374,22 +374,22 @@ struct cfq_data {
         * tunables, see top of file
         */
        unsigned int cfq_quantum;
-       unsigned int cfq_fifo_expire[2];
        unsigned int cfq_back_penalty;
        unsigned int cfq_back_max;
-       unsigned int cfq_slice[2];
        unsigned int cfq_slice_async_rq;
-       unsigned int cfq_slice_idle;
-       unsigned int cfq_group_idle;
        unsigned int cfq_latency;
-       unsigned int cfq_target_latency;
+       u64 cfq_fifo_expire[2];
+       u64 cfq_slice[2];
+       u64 cfq_slice_idle;
+       u64 cfq_group_idle;
+       u64 cfq_target_latency;
 
        /*
         * Fallback dummy cfqq for extreme OOM conditions
         */
        struct cfq_queue oom_cfqq;
 
-       unsigned long last_delayed_sync;
+       u64 last_delayed_sync;
 };
 
 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
@@ -667,15 +667,16 @@ static inline void cfqg_put(struct cfq_group *cfqg)
 } while (0)
 
 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
-                                           struct cfq_group *curr_cfqg, int rw)
+                                           struct cfq_group *curr_cfqg, int op,
+                                           int op_flags)
 {
-       blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
+       blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1);
        cfqg_stats_end_empty_time(&cfqg->stats);
        cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 }
 
 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
-                       unsigned long time, unsigned long unaccounted_time)
+                       uint64_t time, unsigned long unaccounted_time)
 {
        blkg_stat_add(&cfqg->stats.time, time);
 #ifdef CONFIG_DEBUG_BLK_CGROUP
@@ -683,26 +684,30 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 #endif
 }
 
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
+                                              int op_flags)
 {
-       blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
+       blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1);
 }
 
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
+                                              int op_flags)
 {
-       blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
+       blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1);
 }
 
 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
-                       uint64_t start_time, uint64_t io_start_time, int rw)
+                       uint64_t start_time, uint64_t io_start_time, int op,
+                       int op_flags)
 {
        struct cfqg_stats *stats = &cfqg->stats;
        unsigned long long now = sched_clock();
 
        if (time_after64(now, io_start_time))
-               blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
+               blkg_rwstat_add(&stats->service_time, op, op_flags,
+                               now - io_start_time);
        if (time_after64(io_start_time, start_time))
-               blkg_rwstat_add(&stats->wait_time, rw,
+               blkg_rwstat_add(&stats->wait_time, op, op_flags,
                                io_start_time - start_time);
 }
 
@@ -781,13 +786,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) { }
 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)         do {} while (0)
 
 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
-                       struct cfq_group *curr_cfqg, int rw) { }
+                       struct cfq_group *curr_cfqg, int op, int op_flags) { }
 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
-                       unsigned long time, unsigned long unaccounted_time) { }
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
+                       uint64_t time, unsigned long unaccounted_time) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
+                       int op_flags) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
+                       int op_flags) { }
 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
-                       uint64_t start_time, uint64_t io_start_time, int rw) { }
+                       uint64_t start_time, uint64_t io_start_time, int op,
+                       int op_flags) { }
 
 #endif /* CONFIG_CFQ_GROUP_IOSCHED */
 
@@ -807,7 +815,7 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
        struct cfq_ttime *ttime, bool group_idle)
 {
-       unsigned long slice;
+       u64 slice;
        if (!sample_valid(ttime->ttime_samples))
                return false;
        if (group_idle)
@@ -930,17 +938,18 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
  * if a queue is marked sync and has sync io queued. A sync queue with async
  * io only, should not get full sync slice length.
  */
-static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
+static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync,
                                 unsigned short prio)
 {
-       const int base_slice = cfqd->cfq_slice[sync];
+       u64 base_slice = cfqd->cfq_slice[sync];
+       u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE);
 
        WARN_ON(prio >= IOPRIO_BE_NR);
 
-       return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
+       return base_slice + (slice * (4 - prio));
 }
 
-static inline int
+static inline u64
 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
@@ -958,15 +967,14 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  *
  * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
  */
-static inline u64 cfqg_scale_charge(unsigned long charge,
+static inline u64 cfqg_scale_charge(u64 charge,
                                    unsigned int vfraction)
 {
        u64 c = charge << CFQ_SERVICE_SHIFT;    /* make it fixed point */
 
        /* charge / vfraction */
        c <<= CFQ_SERVICE_SHIFT;
-       do_div(c, vfraction);
-       return c;
+       return div_u64(c, vfraction);
 }
 
 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
@@ -1019,16 +1027,16 @@ static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
        return cfqg->busy_queues_avg[rt];
 }
 
-static inline unsigned
+static inline u64
 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
        return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
 }
 
-static inline unsigned
+static inline u64
 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
+       u64 slice = cfq_prio_to_slice(cfqd, cfqq);
        if (cfqd->cfq_latency) {
                /*
                 * interested queues (we consider only the ones with the same
@@ -1036,20 +1044,22 @@ cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
                 */
                unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
                                                cfq_class_rt(cfqq));
-               unsigned sync_slice = cfqd->cfq_slice[1];
-               unsigned expect_latency = sync_slice * iq;
-               unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
+               u64 sync_slice = cfqd->cfq_slice[1];
+               u64 expect_latency = sync_slice * iq;
+               u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
 
                if (expect_latency > group_slice) {
-                       unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
+                       u64 base_low_slice = 2 * cfqd->cfq_slice_idle;
+                       u64 low_slice;
+
                        /* scale low_slice according to IO priority
                         * and sync vs async */
-                       unsigned low_slice =
-                               min(slice, base_low_slice * slice / sync_slice);
+                       low_slice = div64_u64(base_low_slice*slice, sync_slice);
+                       low_slice = min(slice, low_slice);
                        /* the adapted slice value is scaled to fit all iqs
                         * into the target latency */
-                       slice = max(slice * group_slice / expect_latency,
-                                   low_slice);
+                       slice = div64_u64(slice*group_slice, expect_latency);
+                       slice = max(slice, low_slice);
                }
        }
        return slice;
@@ -1058,12 +1068,13 @@ cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 static inline void
 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
+       u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
+       u64 now = ktime_get_ns();
 
-       cfqq->slice_start = jiffies;
-       cfqq->slice_end = jiffies + slice;
+       cfqq->slice_start = now;
+       cfqq->slice_end = now + slice;
        cfqq->allocated_slice = slice;
-       cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
+       cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now);
 }
 
 /*
@@ -1075,7 +1086,7 @@ static inline bool cfq_slice_used(struct cfq_queue *cfqq)
 {
        if (cfq_cfqq_slice_new(cfqq))
                return false;
-       if (time_before(jiffies, cfqq->slice_end))
+       if (ktime_get_ns() < cfqq->slice_end)
                return false;
 
        return true;
@@ -1241,8 +1252,8 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
 }
 
-static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
-                                     struct cfq_queue *cfqq)
+static u64 cfq_slice_offset(struct cfq_data *cfqd,
+                           struct cfq_queue *cfqq)
 {
        /*
         * just an approximation, should be ok.
@@ -1435,31 +1446,32 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
        cfqg_stats_update_dequeue(cfqg);
 }
 
-static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
-                                               unsigned int *unaccounted_time)
+static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
+                                      u64 *unaccounted_time)
 {
-       unsigned int slice_used;
+       u64 slice_used;
+       u64 now = ktime_get_ns();
 
        /*
         * Queue got expired before even a single request completed or
         * got expired immediately after first request completion.
         */
-       if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
+       if (!cfqq->slice_start || cfqq->slice_start == now) {
                /*
                 * Also charge the seek time incurred to the group, otherwise
                 * if there are mutiple queues in the group, each can dispatch
                 * a single request on seeky media and cause lots of seek time
                 * and group will never know it.
                 */
-               slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
-                                       1);
+               slice_used = max_t(u64, (now - cfqq->dispatch_start),
+                                       jiffies_to_nsecs(1));
        } else {
-               slice_used = jiffies - cfqq->slice_start;
+               slice_used = now - cfqq->slice_start;
                if (slice_used > cfqq->allocated_slice) {
                        *unaccounted_time = slice_used - cfqq->allocated_slice;
                        slice_used = cfqq->allocated_slice;
                }
-               if (time_after(cfqq->slice_start, cfqq->dispatch_start))
+               if (cfqq->slice_start > cfqq->dispatch_start)
                        *unaccounted_time += cfqq->slice_start -
                                        cfqq->dispatch_start;
        }
@@ -1471,10 +1483,11 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
                                struct cfq_queue *cfqq)
 {
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
-       unsigned int used_sl, charge, unaccounted_sl = 0;
+       u64 used_sl, charge, unaccounted_sl = 0;
        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
                        - cfqg->service_tree_idle.count;
        unsigned int vfr;
+       u64 now = ktime_get_ns();
 
        BUG_ON(nr_sync < 0);
        used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
@@ -1496,9 +1509,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
        cfq_group_service_tree_add(st, cfqg);
 
        /* This group is being expired. Save the context */
-       if (time_after(cfqd->workload_expires, jiffies)) {
-               cfqg->saved_wl_slice = cfqd->workload_expires
-                                               - jiffies;
+       if (cfqd->workload_expires > now) {
+               cfqg->saved_wl_slice = cfqd->workload_expires - now;
                cfqg->saved_wl_type = cfqd->serving_wl_type;
                cfqg->saved_wl_class = cfqd->serving_wl_class;
        } else
@@ -1507,7 +1519,7 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
                                        st->min_vdisktime);
        cfq_log_cfqq(cfqq->cfqd, cfqq,
-                    "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
+                    "sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu",
                     used_sl, cfqq->slice_dispatch, charge,
                     iops_mode(cfqd), cfqq->nr_sectors);
        cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
@@ -1530,7 +1542,7 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg)
                *st = CFQ_RB_ROOT;
        RB_CLEAR_NODE(&cfqg->rb_node);
 
-       cfqg->ttime.last_end_request = jiffies;
+       cfqg->ttime.last_end_request = ktime_get_ns();
 }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -2213,10 +2225,11 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 {
        struct rb_node **p, *parent;
        struct cfq_queue *__cfqq;
-       unsigned long rb_key;
+       u64 rb_key;
        struct cfq_rb_root *st;
        int left;
        int new_cfqq = 1;
+       u64 now = ktime_get_ns();
 
        st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
        if (cfq_class_idle(cfqq)) {
@@ -2226,7 +2239,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
                        rb_key += __cfqq->rb_key;
                } else
-                       rb_key += jiffies;
+                       rb_key += now;
        } else if (!add_front) {
                /*
                 * Get our rb key offset. Subtract any residual slice
@@ -2234,13 +2247,13 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * count indicates slice overrun, and this should position
                 * the next service time further away in the tree.
                 */
-               rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
+               rb_key = cfq_slice_offset(cfqd, cfqq) + now;
                rb_key -= cfqq->slice_resid;
                cfqq->slice_resid = 0;
        } else {
-               rb_key = -HZ;
+               rb_key = -NSEC_PER_SEC;
                __cfqq = cfq_rb_first(st);
-               rb_key += __cfqq ? __cfqq->rb_key : jiffies;
+               rb_key += __cfqq ? __cfqq->rb_key : now;
        }
 
        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
@@ -2266,7 +2279,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                /*
                 * sort by key, that represents service time.
                 */
-               if (time_before(rb_key, __cfqq->rb_key))
+               if (rb_key < __cfqq->rb_key)
                        p = &parent->rb_left;
                else {
                        p = &parent->rb_right;
@@ -2461,10 +2474,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
 {
        elv_rb_del(&cfqq->sort_list, rq);
        cfqq->queued[rq_is_sync(rq)]--;
-       cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
+       cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
        cfq_add_rq_rb(rq);
        cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
-                                rq->cmd_flags);
+                                req_op(rq), rq->cmd_flags);
 }
 
 static struct request *
@@ -2517,7 +2530,7 @@ static void cfq_remove_request(struct request *rq)
        cfq_del_rq_rb(rq);
 
        cfqq->cfqd->rq_queued--;
-       cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
+       cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
        if (rq->cmd_flags & REQ_PRIO) {
                WARN_ON(!cfqq->prio_pending);
                cfqq->prio_pending--;
@@ -2531,7 +2544,7 @@ static int cfq_merge(struct request_queue *q, struct request **req,
        struct request *__rq;
 
        __rq = cfq_find_rq_fmerge(cfqd, bio);
-       if (__rq && elv_rq_merge_ok(__rq, bio)) {
+       if (__rq && elv_bio_merge_ok(__rq, bio)) {
                *req = __rq;
                return ELEVATOR_FRONT_MERGE;
        }
@@ -2552,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
 static void cfq_bio_merged(struct request_queue *q, struct request *req,
                                struct bio *bio)
 {
-       cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
+       cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
 }
 
 static void
@@ -2566,7 +2579,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
         * reposition in fifo if next is older than rq
         */
        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
-           time_before(next->fifo_time, rq->fifo_time) &&
+           next->fifo_time < rq->fifo_time &&
            cfqq == RQ_CFQQ(next)) {
                list_move(&rq->queuelist, &next->queuelist);
                rq->fifo_time = next->fifo_time;
@@ -2575,7 +2588,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
        if (cfqq->next_rq == next)
                cfqq->next_rq = rq;
        cfq_remove_request(next);
-       cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
+       cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags);
 
        cfqq = RQ_CFQQ(next);
        /*
@@ -2588,8 +2601,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
                cfq_del_cfqq_rr(cfqd, cfqq);
 }
 
-static int cfq_allow_merge(struct request_queue *q, struct request *rq,
-                          struct bio *bio)
+static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+                              struct bio *bio)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_io_cq *cic;
@@ -2613,9 +2626,15 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
        return cfqq == RQ_CFQQ(rq);
 }
 
+static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq,
+                             struct request *next)
+{
+       return RQ_CFQQ(rq) == RQ_CFQQ(next);
+}
+
 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       del_timer(&cfqd->idle_slice_timer);
+       hrtimer_try_to_cancel(&cfqd->idle_slice_timer);
        cfqg_stats_update_idle_time(cfqq->cfqg);
 }
 
@@ -2627,7 +2646,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
                                cfqd->serving_wl_class, cfqd->serving_wl_type);
                cfqg_stats_update_avg_queue_size(cfqq->cfqg);
                cfqq->slice_start = 0;
-               cfqq->dispatch_start = jiffies;
+               cfqq->dispatch_start = ktime_get_ns();
                cfqq->allocated_slice = 0;
                cfqq->slice_end = 0;
                cfqq->slice_dispatch = 0;
@@ -2676,8 +2695,8 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                if (cfq_cfqq_slice_new(cfqq))
                        cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
                else
-                       cfqq->slice_resid = cfqq->slice_end - jiffies;
-               cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
+                       cfqq->slice_resid = cfqq->slice_end - ktime_get_ns();
+               cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid);
        }
 
        cfq_group_served(cfqd, cfqq->cfqg, cfqq);
@@ -2911,7 +2930,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        struct cfq_queue *cfqq = cfqd->active_queue;
        struct cfq_rb_root *st = cfqq->service_tree;
        struct cfq_io_cq *cic;
-       unsigned long sl, group_idle = 0;
+       u64 sl, group_idle = 0;
+       u64 now = ktime_get_ns();
 
        /*
         * SSD device without seek penalty, disable idling. But only do so
@@ -2954,8 +2974,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
         * time slice.
         */
        if (sample_valid(cic->ttime.ttime_samples) &&
-           (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
-               cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
+           (cfqq->slice_end - now < cic->ttime.ttime_mean)) {
+               cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu",
                             cic->ttime.ttime_mean);
                return;
        }
@@ -2976,9 +2996,10 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        else
                sl = cfqd->cfq_slice_idle;
 
-       mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
+       hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl),
+                     HRTIMER_MODE_REL);
        cfqg_stats_set_start_idle_time(cfqq->cfqg);
-       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
+       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl,
                        group_idle ? 1 : 0);
 }
 
@@ -3018,7 +3039,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
                return NULL;
 
        rq = rq_entry_fifo(cfqq->fifo.next);
-       if (time_before(jiffies, rq->fifo_time))
+       if (ktime_get_ns() < rq->fifo_time)
                rq = NULL;
 
        cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
@@ -3096,14 +3117,14 @@ static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
        struct cfq_queue *queue;
        int i;
        bool key_valid = false;
-       unsigned long lowest_key = 0;
+       u64 lowest_key = 0;
        enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
 
        for (i = 0; i <= SYNC_WORKLOAD; ++i) {
                /* select the one with lowest rb_key */
                queue = cfq_rb_first(st_for(cfqg, wl_class, i));
                if (queue &&
-                   (!key_valid || time_before(queue->rb_key, lowest_key))) {
+                   (!key_valid || queue->rb_key < lowest_key)) {
                        lowest_key = queue->rb_key;
                        cur_best = i;
                        key_valid = true;
@@ -3116,11 +3137,12 @@ static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
 static void
 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
-       unsigned slice;
+       u64 slice;
        unsigned count;
        struct cfq_rb_root *st;
-       unsigned group_slice;
+       u64 group_slice;
        enum wl_class_t original_class = cfqd->serving_wl_class;
+       u64 now = ktime_get_ns();
 
        /* Choose next priority. RT > BE > IDLE */
        if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
@@ -3129,7 +3151,7 @@ choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
                cfqd->serving_wl_class = BE_WORKLOAD;
        else {
                cfqd->serving_wl_class = IDLE_WORKLOAD;
-               cfqd->workload_expires = jiffies + 1;
+               cfqd->workload_expires = now + jiffies_to_nsecs(1);
                return;
        }
 
@@ -3147,7 +3169,7 @@ choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
        /*
         * check workload expiration, and that we still have other queues ready
         */
-       if (count && !time_after(jiffies, cfqd->workload_expires))
+       if (count && !(now > cfqd->workload_expires))
                return;
 
 new_workload:
@@ -3164,13 +3186,13 @@ new_workload:
         */
        group_slice = cfq_group_slice(cfqd, cfqg);
 
-       slice = group_slice * count /
+       slice = div_u64(group_slice * count,
                max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
                      cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
-                                       cfqg));
+                                       cfqg)));
 
        if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
-               unsigned int tmp;
+               u64 tmp;
 
                /*
                 * Async queues are currently system wide. Just taking
@@ -3181,19 +3203,19 @@ new_workload:
                 */
                tmp = cfqd->cfq_target_latency *
                        cfqg_busy_async_queues(cfqd, cfqg);
-               tmp = tmp/cfqd->busy_queues;
-               slice = min_t(unsigned, slice, tmp);
+               tmp = div_u64(tmp, cfqd->busy_queues);
+               slice = min_t(u64, slice, tmp);
 
                /* async workload slice is scaled down according to
                 * the sync/async slice ratio. */
-               slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
+               slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]);
        } else
                /* sync workload slice is at least 2 * cfq_slice_idle */
                slice = max(slice, 2 * cfqd->cfq_slice_idle);
 
-       slice = max_t(unsigned, slice, CFQ_MIN_TT);
-       cfq_log(cfqd, "workload slice:%d", slice);
-       cfqd->workload_expires = jiffies + slice;
+       slice = max_t(u64, slice, CFQ_MIN_TT);
+       cfq_log(cfqd, "workload slice:%llu", slice);
+       cfqd->workload_expires = now + slice;
 }
 
 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
@@ -3211,16 +3233,17 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
 static void cfq_choose_cfqg(struct cfq_data *cfqd)
 {
        struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
+       u64 now = ktime_get_ns();
 
        cfqd->serving_group = cfqg;
 
        /* Restore the workload type data */
        if (cfqg->saved_wl_slice) {
-               cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
+               cfqd->workload_expires = now + cfqg->saved_wl_slice;
                cfqd->serving_wl_type = cfqg->saved_wl_type;
                cfqd->serving_wl_class = cfqg->saved_wl_class;
        } else
-               cfqd->workload_expires = jiffies - 1;
+               cfqd->workload_expires = now - 1;
 
        choose_wl_class_and_type(cfqd, cfqg);
 }
@@ -3232,6 +3255,7 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
 {
        struct cfq_queue *cfqq, *new_cfqq = NULL;
+       u64 now = ktime_get_ns();
 
        cfqq = cfqd->active_queue;
        if (!cfqq)
@@ -3292,7 +3316,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
         * flight or is idling for a new request, allow either of these
         * conditions to happen (or time out) before selecting a new queue.
         */
-       if (timer_pending(&cfqd->idle_slice_timer)) {
+       if (hrtimer_active(&cfqd->idle_slice_timer)) {
                cfqq = NULL;
                goto keep_queue;
        }
@@ -3303,7 +3327,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
         **/
        if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
            (cfq_cfqq_slice_new(cfqq) ||
-           (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
+           (cfqq->slice_end - now > now - cfqq->slice_start))) {
                cfq_clear_cfqq_deep(cfqq);
                cfq_clear_cfqq_idle_window(cfqq);
        }
@@ -3381,11 +3405,12 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
        struct cfq_queue *cfqq)
 {
+       u64 now = ktime_get_ns();
+
        /* the queue hasn't finished any request, can't estimate */
        if (cfq_cfqq_slice_new(cfqq))
                return true;
-       if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
-               cfqq->slice_end))
+       if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end)
                return true;
 
        return false;
@@ -3460,10 +3485,10 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
         * based on the last sync IO we serviced
         */
        if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
-               unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
+               u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync;
                unsigned int depth;
 
-               depth = last_sync / cfqd->cfq_slice[1];
+               depth = div64_u64(last_sync, cfqd->cfq_slice[1]);
                if (!depth && !cfqq->dispatched)
                        depth = 1;
                if (depth < max_dispatch)
@@ -3546,7 +3571,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
        if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
            cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
            cfq_class_idle(cfqq))) {
-               cfqq->slice_end = jiffies + 1;
+               cfqq->slice_end = ktime_get_ns() + 1;
                cfq_slice_expired(cfqd, 0);
        }
 
@@ -3624,7 +3649,7 @@ static void cfq_init_icq(struct io_cq *icq)
 {
        struct cfq_io_cq *cic = icq_to_cic(icq);
 
-       cic->ttime.last_end_request = jiffies;
+       cic->ttime.last_end_request = ktime_get_ns();
 }
 
 static void cfq_exit_icq(struct io_cq *icq)
@@ -3682,6 +3707,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
         * elevate the priority of this queue
         */
        cfqq->org_ioprio = cfqq->ioprio;
+       cfqq->org_ioprio_class = cfqq->ioprio_class;
        cfq_clear_cfqq_prio_changed(cfqq);
 }
 
@@ -3845,14 +3871,15 @@ out:
 }
 
 static void
-__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
+__cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle)
 {
-       unsigned long elapsed = jiffies - ttime->last_end_request;
+       u64 elapsed = ktime_get_ns() - ttime->last_end_request;
        elapsed = min(elapsed, 2UL * slice_idle);
 
        ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
-       ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
-       ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
+       ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed,  8);
+       ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
+                                    ttime->ttime_samples);
 }
 
 static void
@@ -4105,10 +4132,10 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
        cfq_log_cfqq(cfqd, cfqq, "insert_request");
        cfq_init_prio_data(cfqq, RQ_CIC(rq));
 
-       rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
+       rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
        list_add_tail(&rq->queuelist, &cfqq->fifo);
        cfq_add_rq_rb(rq);
-       cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
+       cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq),
                                 rq->cmd_flags);
        cfq_rq_enqueued(cfqd, cfqq, rq);
 }
@@ -4153,6 +4180,7 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        struct cfq_io_cq *cic = cfqd->active_cic;
+       u64 now = ktime_get_ns();
 
        /* If the queue already has requests, don't wait */
        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
@@ -4171,7 +4199,7 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
        /* if slice left is less than think time, wait busy */
        if (cic && sample_valid(cic->ttime.ttime_samples)
-           && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
+           && (cfqq->slice_end - now < cic->ttime.ttime_mean))
                return true;
 
        /*
@@ -4181,7 +4209,7 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
         * case where think time is less than a jiffy, mark the queue wait
         * busy if only 1 jiffy is left in the slice.
         */
-       if (cfqq->slice_end - jiffies == 1)
+       if (cfqq->slice_end - now <= jiffies_to_nsecs(1))
                return true;
 
        return false;
@@ -4192,9 +4220,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
        struct cfq_data *cfqd = cfqq->cfqd;
        const int sync = rq_is_sync(rq);
-       unsigned long now;
+       u64 now = ktime_get_ns();
 
-       now = jiffies;
        cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
                     !!(rq->cmd_flags & REQ_NOIDLE));
 
@@ -4206,7 +4233,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        cfqq->dispatched--;
        (RQ_CFQG(rq))->dispatched--;
        cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
-                                    rq_io_start_time_ns(rq), rq->cmd_flags);
+                                    rq_io_start_time_ns(rq), req_op(rq),
+                                    rq->cmd_flags);
 
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
 
@@ -4222,7 +4250,16 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                                        cfqq_type(cfqq));
 
                st->ttime.last_end_request = now;
-               if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
+               /*
+                * We have to do this check in jiffies since start_time is in
+                * jiffies and it is not trivial to convert to ns. If
+                * cfq_fifo_expire[1] ever comes close to 1 jiffie, this test
+                * will become problematic but so far we are fine (the default
+                * is 128 ms).
+                */
+               if (!time_after(rq->start_time +
+                                 nsecs_to_jiffies(cfqd->cfq_fifo_expire[1]),
+                               jiffies))
                        cfqd->last_delayed_sync = now;
        }
 
@@ -4247,10 +4284,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                 * the queue.
                 */
                if (cfq_should_wait_busy(cfqd, cfqq)) {
-                       unsigned long extend_sl = cfqd->cfq_slice_idle;
+                       u64 extend_sl = cfqd->cfq_slice_idle;
                        if (!cfqd->cfq_slice_idle)
                                extend_sl = cfqd->cfq_group_idle;
-                       cfqq->slice_end = jiffies + extend_sl;
+                       cfqq->slice_end = now + extend_sl;
                        cfq_mark_cfqq_wait_busy(cfqq);
                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
                }
@@ -4275,6 +4312,24 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                cfq_schedule_dispatch(cfqd);
 }
 
+static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags)
+{
+       /*
+        * If REQ_PRIO is set, boost class and prio level, if it's below
+        * BE/NORM. If prio is not set, restore the potentially boosted
+        * class/prio level.
+        */
+       if (!(op_flags & REQ_PRIO)) {
+               cfqq->ioprio_class = cfqq->org_ioprio_class;
+               cfqq->ioprio = cfqq->org_ioprio;
+       } else {
+               if (cfq_class_idle(cfqq))
+                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
+               if (cfqq->ioprio > IOPRIO_NORM)
+                       cfqq->ioprio = IOPRIO_NORM;
+       }
+}
+
 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
 {
        if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
@@ -4285,7 +4340,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
        return ELV_MQUEUE_MAY;
 }
 
-static int cfq_may_queue(struct request_queue *q, int rw)
+static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct task_struct *tsk = current;
@@ -4302,9 +4357,10 @@ static int cfq_may_queue(struct request_queue *q, int rw)
        if (!cic)
                return ELV_MQUEUE_MAY;
 
-       cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
+       cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags));
        if (cfqq) {
                cfq_init_prio_data(cfqq, cic);
+               cfqq_boost_on_prio(cfqq, op_flags);
 
                return __cfq_may_queue(cfqq);
        }
@@ -4435,9 +4491,10 @@ static void cfq_kick_queue(struct work_struct *work)
 /*
  * Timer running if the active_queue is currently idling inside its time slice
  */
-static void cfq_idle_slice_timer(unsigned long data)
+static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer)
 {
-       struct cfq_data *cfqd = (struct cfq_data *) data;
+       struct cfq_data *cfqd = container_of(timer, struct cfq_data,
+                                            idle_slice_timer);
        struct cfq_queue *cfqq;
        unsigned long flags;
        int timed_out = 1;
@@ -4486,11 +4543,12 @@ out_kick:
        cfq_schedule_dispatch(cfqd);
 out_cont:
        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+       return HRTIMER_NORESTART;
 }
 
 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
 {
-       del_timer_sync(&cfqd->idle_slice_timer);
+       hrtimer_cancel(&cfqd->idle_slice_timer);
        cancel_work_sync(&cfqd->unplug_work);
 }
 
@@ -4586,9 +4644,9 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
        cfqg_put(cfqd->root_group);
        spin_unlock_irq(q->queue_lock);
 
-       init_timer(&cfqd->idle_slice_timer);
+       hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_REL);
        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
-       cfqd->idle_slice_timer.data = (unsigned long) cfqd;
 
        INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
@@ -4609,7 +4667,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
         * we optimistically start assuming sync ops weren't delayed in last
         * second, in order to have larger depth for async operations.
         */
-       cfqd->last_delayed_sync = jiffies - HZ;
+       cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC;
        return 0;
 
 out_free:
@@ -4652,9 +4710,9 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
 static ssize_t __FUNC(struct elevator_queue *e, char *page)            \
 {                                                                      \
        struct cfq_data *cfqd = e->elevator_data;                       \
-       unsigned int __data = __VAR;                                    \
+       u64 __data = __VAR;                                             \
        if (__CONV)                                                     \
-               __data = jiffies_to_msecs(__data);                      \
+               __data = div_u64(__data, NSEC_PER_MSEC);                        \
        return cfq_var_show(__data, (page));                            \
 }
 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
@@ -4671,6 +4729,21 @@ SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
 SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
 #undef SHOW_FUNCTION
 
+#define USEC_SHOW_FUNCTION(__FUNC, __VAR)                              \
+static ssize_t __FUNC(struct elevator_queue *e, char *page)            \
+{                                                                      \
+       struct cfq_data *cfqd = e->elevator_data;                       \
+       u64 __data = __VAR;                                             \
+       __data = div_u64(__data, NSEC_PER_USEC);                        \
+       return cfq_var_show(__data, (page));                            \
+}
+USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle);
+USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle);
+USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]);
+USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]);
+USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
+#undef USEC_SHOW_FUNCTION
+
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
 {                                                                      \
@@ -4682,7 +4755,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
        else if (__data > (MAX))                                        \
                __data = (MAX);                                         \
        if (__CONV)                                                     \
-               *(__PTR) = msecs_to_jiffies(__data);                    \
+               *(__PTR) = (u64)__data * NSEC_PER_MSEC;                 \
        else                                                            \
                *(__PTR) = __data;                                      \
        return ret;                                                     \
@@ -4705,6 +4778,26 @@ STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
 STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
 #undef STORE_FUNCTION
 
+#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                   \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
+{                                                                      \
+       struct cfq_data *cfqd = e->elevator_data;                       \
+       unsigned int __data;                                            \
+       int ret = cfq_var_store(&__data, (page), count);                \
+       if (__data < (MIN))                                             \
+               __data = (MIN);                                         \
+       else if (__data > (MAX))                                        \
+               __data = (MAX);                                         \
+       *(__PTR) = (u64)__data * NSEC_PER_USEC;                         \
+       return ret;                                                     \
+}
+USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
+USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
+USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX);
+USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX);
+USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX);
+#undef USEC_STORE_FUNCTION
+
 #define CFQ_ATTR(name) \
        __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
 
@@ -4715,12 +4808,17 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(back_seek_max),
        CFQ_ATTR(back_seek_penalty),
        CFQ_ATTR(slice_sync),
+       CFQ_ATTR(slice_sync_us),
        CFQ_ATTR(slice_async),
+       CFQ_ATTR(slice_async_us),
        CFQ_ATTR(slice_async_rq),
        CFQ_ATTR(slice_idle),
+       CFQ_ATTR(slice_idle_us),
        CFQ_ATTR(group_idle),
+       CFQ_ATTR(group_idle_us),
        CFQ_ATTR(low_latency),
        CFQ_ATTR(target_latency),
+       CFQ_ATTR(target_latency_us),
        __ATTR_NULL
 };
 
@@ -4729,7 +4827,8 @@ static struct elevator_type iosched_cfq = {
                .elevator_merge_fn =            cfq_merge,
                .elevator_merged_fn =           cfq_merged_request,
                .elevator_merge_req_fn =        cfq_merged_requests,
-               .elevator_allow_merge_fn =      cfq_allow_merge,
+               .elevator_allow_bio_merge_fn =  cfq_allow_bio_merge,
+               .elevator_allow_rq_merge_fn =   cfq_allow_rq_merge,
                .elevator_bio_merged_fn =       cfq_bio_merged,
                .elevator_dispatch_fn =         cfq_dispatch_requests,
                .elevator_add_req_fn =          cfq_insert_request,
@@ -4776,18 +4875,7 @@ static int __init cfq_init(void)
 {
        int ret;
 
-       /*
-        * could be 0 on HZ < 1000 setups
-        */
-       if (!cfq_slice_async)
-               cfq_slice_async = 1;
-       if (!cfq_slice_idle)
-               cfq_slice_idle = 1;
-
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-       if (!cfq_group_idle)
-               cfq_group_idle = 1;
-
        ret = blkcg_policy_register(&blkcg_policy_cfq);
        if (ret)
                return ret;
index d0dd7882d8c7fa7ffe80ea2e9a6ad3c0f5e97a68..55e0bb6d7da796e3b526a16832914ca88fcb879f 100644 (file)
@@ -137,7 +137,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
                if (__rq) {
                        BUG_ON(sector != blk_rq_pos(__rq));
 
-                       if (elv_rq_merge_ok(__rq, bio)) {
+                       if (elv_bio_merge_ok(__rq, bio)) {
                                ret = ELEVATOR_FRONT_MERGE;
                                goto out;
                        }
@@ -173,7 +173,8 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
         * and move into next position (next will be deleted) in fifo
         */
        if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
-               if (time_before(next->fifo_time, req->fifo_time)) {
+               if (time_before((unsigned long)next->fifo_time,
+                               (unsigned long)req->fifo_time)) {
                        list_move(&req->queuelist, &next->queuelist);
                        req->fifo_time = next->fifo_time;
                }
@@ -227,7 +228,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
        /*
         * rq is expired!
         */
-       if (time_after_eq(jiffies, rq->fifo_time))
+       if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
                return 1;
 
        return 0;
index c3555c9c672f94c1f13c3cd3c75c037e7c8110a7..7096c22041e7e6680b320ca08e4be831591b6477 100644 (file)
@@ -53,13 +53,13 @@ static LIST_HEAD(elv_list);
  * Query io scheduler to see if the current process issuing bio may be
  * merged with rq.
  */
-static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
+static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
 {
        struct request_queue *q = rq->q;
        struct elevator_queue *e = q->elevator;
 
-       if (e->type->ops.elevator_allow_merge_fn)
-               return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
+       if (e->type->ops.elevator_allow_bio_merge_fn)
+               return e->type->ops.elevator_allow_bio_merge_fn(q, rq, bio);
 
        return 1;
 }
@@ -67,17 +67,17 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
 /*
  * can we safely merge with this request?
  */
-bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
+bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
 {
        if (!blk_rq_merge_ok(rq, bio))
-               return 0;
+               return false;
 
-       if (!elv_iosched_allow_merge(rq, bio))
-               return 0;
+       if (!elv_iosched_allow_bio_merge(rq, bio))
+               return false;
 
-       return 1;
+       return true;
 }
-EXPORT_SYMBOL(elv_rq_merge_ok);
+EXPORT_SYMBOL(elv_bio_merge_ok);
 
 static struct elevator_type *elevator_find(const char *name)
 {
@@ -366,8 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
        list_for_each_prev(entry, &q->queue_head) {
                struct request *pos = list_entry_rq(entry);
 
-               if ((rq->cmd_flags & REQ_DISCARD) !=
-                   (pos->cmd_flags & REQ_DISCARD))
+               if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD))
                        break;
                if (rq_data_dir(rq) != rq_data_dir(pos))
                        break;
@@ -426,7 +425,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
        /*
         * First try one-hit cache.
         */
-       if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
+       if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
                ret = blk_try_merge(q->last_merge, bio);
                if (ret != ELEVATOR_NO_MERGE) {
                        *req = q->last_merge;
@@ -441,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
         * See if our hash lookup can find a potential backmerge.
         */
        __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
-       if (__rq && elv_rq_merge_ok(__rq, bio)) {
+       if (__rq && elv_bio_merge_ok(__rq, bio)) {
                *req = __rq;
                return ELEVATOR_BACK_MERGE;
        }
@@ -717,12 +716,12 @@ void elv_put_request(struct request_queue *q, struct request *rq)
                e->type->ops.elevator_put_req_fn(rq);
 }
 
-int elv_may_queue(struct request_queue *q, int rw)
+int elv_may_queue(struct request_queue *q, int op, int op_flags)
 {
        struct elevator_queue *e = q->elevator;
 
        if (e->type->ops.elevator_may_queue_fn)
-               return e->type->ops.elevator_may_queue_fn(q, rw);
+               return e->type->ops.elevator_may_queue_fn(q, op, op_flags);
 
        return ELV_MQUEUE_MAY;
 }
index d7eb77e1e3a8f4be13f6016ed0e7f7ab7c051da8..71d9ed9df8daeae8b54234634a5eab45bf0df00d 100644 (file)
@@ -495,7 +495,6 @@ rescan:
        /* add partitions */
        for (p = 1; p < state->limit; p++) {
                sector_t size, from;
-               struct partition_meta_info *info = NULL;
 
                size = state->parts[p].size;
                if (!size)
@@ -530,8 +529,6 @@ rescan:
                        }
                }
 
-               if (state->parts[p].has_info)
-                       info = &state->parts[p].info;
                part = add_partition(disk, p, from, size,
                                     state->parts[p].flags,
                                     &state->parts[p].info);
index 9875b05e80a2cfb8bbdd859db09308cf70286814..ff1fb93712c1a84eb2fa605678d39feca7acc09e 100644 (file)
@@ -42,6 +42,13 @@ int atari_partition(struct parsed_partitions *state)
        int part_fmt = 0; /* 0:unknown, 1:AHDI, 2:ICD/Supra */
 #endif
 
+       /*
+        * ATARI partition scheme supports 512 lba only.  If this is not
+        * the case, bail early to avoid miscalculating hd_size.
+        */
+       if (bdev_logical_block_size(state->bdev) != 512)
+               return 0;
+
        rs = read_part_sector(state, 0, &sect);
        if (!rs)
                return -1;
index 2bdb5dab922b734ae2daa80961ecd1fe36fab89a..e207b33e4ce9d602ca5d72144a7332c91f2515c9 100644 (file)
@@ -1190,7 +1190,7 @@ static int atapi_drain_needed(struct request *rq)
        if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
                return 0;
 
-       if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
+       if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
                return 0;
 
        return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
index c04bd9bc39fd0565e1d11c6af9b15632c110148e..dd96a935fba04ea8a4d55dbe7d7038889cb08561 100644 (file)
@@ -339,7 +339,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
        if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
                goto io_error;
 
-       if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+       if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
                if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
                    bio->bi_iter.bi_size & ~PAGE_MASK)
                        goto io_error;
@@ -509,7 +509,9 @@ static struct brd_device *brd_alloc(int i)
        blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
        brd->brd_queue->limits.discard_zeroes_data = 1;
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
-
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+       queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
+#endif
        disk = brd->brd_disk = alloc_disk(max_part);
        if (!disk)
                goto out_free_queue;
index 10459a14506224ede01f9bbca97be127397142cd..d524973f94b3c7f94cba9cc712274d6cee815cf3 100644 (file)
@@ -137,19 +137,19 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b
 
 static int _drbd_md_sync_page_io(struct drbd_device *device,
                                 struct drbd_backing_dev *bdev,
-                                sector_t sector, int rw)
+                                sector_t sector, int op)
 {
        struct bio *bio;
        /* we do all our meta data IO in aligned 4k blocks. */
        const int size = 4096;
-       int err;
+       int err, op_flags = 0;
 
        device->md_io.done = 0;
        device->md_io.error = -ENODEV;
 
-       if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
-               rw |= REQ_FUA | REQ_FLUSH;
-       rw |= REQ_SYNC | REQ_NOIDLE;
+       if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
+               op_flags |= REQ_FUA | REQ_PREFLUSH;
+       op_flags |= REQ_SYNC | REQ_NOIDLE;
 
        bio = bio_alloc_drbd(GFP_NOIO);
        bio->bi_bdev = bdev->md_bdev;
@@ -159,9 +159,9 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
                goto out;
        bio->bi_private = device;
        bio->bi_end_io = drbd_md_endio;
-       bio->bi_rw = rw;
+       bio_set_op_attrs(bio, op, op_flags);
 
-       if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
+       if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
                /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
                ;
        else if (!get_ldev_if_state(device, D_ATTACHING)) {
@@ -174,10 +174,10 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
        bio_get(bio); /* one bio_put() is in the completion handler */
        atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
        device->md_io.submit_jif = jiffies;
-       if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
+       if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
                bio_io_error(bio);
        else
-               submit_bio(rw, bio);
+               submit_bio(bio);
        wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
        if (!bio->bi_error)
                err = device->md_io.error;
@@ -188,7 +188,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
 }
 
 int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
-                        sector_t sector, int rw)
+                        sector_t sector, int op)
 {
        int err;
        D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
@@ -197,19 +197,21 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd
 
        dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
             current->comm, current->pid, __func__,
-            (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
+            (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ",
             (void*)_RET_IP_ );
 
        if (sector < drbd_md_first_sector(bdev) ||
            sector + 7 > drbd_md_last_sector(bdev))
                drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
                     current->comm, current->pid, __func__,
-                    (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
+                    (unsigned long long)sector,
+                    (op == REQ_OP_WRITE) ? "WRITE" : "READ");
 
-       err = _drbd_md_sync_page_io(device, bdev, sector, rw);
+       err = _drbd_md_sync_page_io(device, bdev, sector, op);
        if (err) {
                drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
-                   (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
+                   (unsigned long long)sector,
+                   (op == REQ_OP_WRITE) ? "WRITE" : "READ", err);
        }
        return err;
 }
@@ -845,7 +847,7 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
        unsigned long count = 0;
        sector_t esector, nr_sectors;
 
-       /* This would be an empty REQ_FLUSH, be silent. */
+       /* This would be an empty REQ_PREFLUSH, be silent. */
        if ((mode == SET_OUT_OF_SYNC) && size == 0)
                return 0;
 
index 92d6fc020a657c0694cc02c99d134fdf47c85c12..e5d89f623b90f90c956a04c883a0f6213a023b6c 100644 (file)
@@ -980,7 +980,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
        struct drbd_bitmap *b = device->bitmap;
        struct page *page;
        unsigned int len;
-       unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE;
+       unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
 
        sector_t on_disk_sector =
                device->ldev->md.md_offset + device->ldev->md.bm_offset;
@@ -1011,12 +1011,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
        bio_add_page(bio, page, len, 0);
        bio->bi_private = ctx;
        bio->bi_end_io = drbd_bm_endio;
+       bio_set_op_attrs(bio, op, 0);
 
-       if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
-               bio->bi_rw |= rw;
+       if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
                bio_io_error(bio);
        } else {
-               submit_bio(rw, bio);
+               submit_bio(bio);
                /* this should not count as user activity and cause the
                 * resync to throttle -- see drbd_rs_should_slow_down(). */
                atomic_add(len >> 9, &device->rs_sect_ev);
index 7a1cf7eaa71dc8adccd1a5a5d332edacda8df40f..a64c645b4184b5170e02d043cb57dafafbee973b 100644 (file)
@@ -1327,14 +1327,14 @@ struct bm_extent {
 #endif
 #endif
 
-/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
+/* Estimate max bio size as 256 * PAGE_SIZE,
  * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
  * Since we may live in a mixed-platform cluster,
  * we limit us to a platform agnostic constant here for now.
  * A followup commit may allow even bigger BIO sizes,
  * once we thought that through. */
 #define DRBD_MAX_BIO_SIZE (1U << 20)
-#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
+#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
 #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
 #endif
 #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)       /* Works always = 4k */
@@ -1507,7 +1507,7 @@ extern int drbd_resync_finished(struct drbd_device *device);
 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
 extern void drbd_md_put_buffer(struct drbd_device *device);
 extern int drbd_md_sync_page_io(struct drbd_device *device,
-               struct drbd_backing_dev *bdev, sector_t sector, int rw);
+               struct drbd_backing_dev *bdev, sector_t sector, int op);
 extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
 extern void wait_until_done_or_force_detached(struct drbd_device *device,
                struct drbd_backing_dev *bdev, unsigned int *done);
@@ -1557,7 +1557,7 @@ extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector
                bool throttle_if_app_is_waiting);
 extern int drbd_submit_peer_request(struct drbd_device *,
                                    struct drbd_peer_request *, const unsigned,
-                                   const int);
+                                   const unsigned, const int);
 extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
 extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
                                                     sector_t, unsigned int,
index 2ba1494b279997cb730f29c9e4600dcd8ac3f68e..2b37744db0fa3232443122dbe4958231ad9f47f4 100644 (file)
@@ -1603,15 +1603,16 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
        return 0;
 }
 
-static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
+static u32 bio_flags_to_wire(struct drbd_connection *connection,
+                            struct bio *bio)
 {
        if (connection->agreed_pro_version >= 95)
-               return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
-                       (bi_rw & REQ_FUA ? DP_FUA : 0) |
-                       (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
-                       (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
+               return  (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
+                       (bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
+                       (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
+                       (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
        else
-               return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
+               return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
 }
 
 /* Used to send write or TRIM aka REQ_DISCARD requests
@@ -1636,7 +1637,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
        p->sector = cpu_to_be64(req->i.sector);
        p->block_id = (unsigned long)req;
        p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
-       dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
+       dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
        if (device->state.conn >= C_SYNC_SOURCE &&
            device->state.conn <= C_PAUSED_SYNC_T)
                dp_flags |= DP_MAY_SET_IN_SYNC;
@@ -3061,7 +3062,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
        D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
        sector = device->ldev->md.md_offset;
 
-       if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
+       if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
                /* this was a try anyways ... */
                drbd_err(device, "meta data update failed!\n");
                drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
@@ -3263,7 +3264,8 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
         * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
        bdev->md.md_size_sect = 8;
 
-       if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
+       if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
+                                REQ_OP_READ)) {
                /* NOTE: can't do normal error processing here as this is
                   called BEFORE disk is attached */
                drbd_err(device, "Error while reading metadata.\n");
index ef9245363dccc6183e680083d764404e2e6acd16..129f8c76c9b1047a6a755cae3bad38c634ce8393 100644 (file)
@@ -112,7 +112,7 @@ struct p_header100 {
 #define DP_MAY_SET_IN_SYNC    4
 #define DP_UNPLUG             8 /* not used anymore   */
 #define DP_FUA               16 /* equals REQ_FUA     */
-#define DP_FLUSH             32 /* equals REQ_FLUSH   */
+#define DP_FLUSH             32 /* equals REQ_PREFLUSH   */
 #define DP_DISCARD           64 /* equals REQ_DISCARD */
 #define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
 #define DP_SEND_WRITE_ACK   256 /* This is a proto C write request */
index 050aaa1c03504e7bb1f90be3628997385b8fe4cc..1ee002352ea2ace9f778458f2980185c09e4680b 100644 (file)
@@ -1398,7 +1398,8 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
 /* TODO allocate from our own bio_set. */
 int drbd_submit_peer_request(struct drbd_device *device,
                             struct drbd_peer_request *peer_req,
-                            const unsigned rw, const int fault_type)
+                            const unsigned op, const unsigned op_flags,
+                            const int fault_type)
 {
        struct bio *bios = NULL;
        struct bio *bio;
@@ -1450,7 +1451,7 @@ next_bio:
        /* > peer_req->i.sector, unless this is the first bio */
        bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = device->ldev->backing_bdev;
-       bio->bi_rw = rw;
+       bio_set_op_attrs(bio, op, op_flags);
        bio->bi_private = peer_req;
        bio->bi_end_io = drbd_peer_request_endio;
 
@@ -1458,7 +1459,7 @@ next_bio:
        bios = bio;
        ++n_bios;
 
-       if (rw & REQ_DISCARD) {
+       if (op == REQ_OP_DISCARD) {
                bio->bi_iter.bi_size = data_size;
                goto submit;
        }
@@ -1830,7 +1831,8 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
        spin_unlock_irq(&device->resource->req_lock);
 
        atomic_add(pi->size >> 9, &device->rs_sect_ev);
-       if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+       if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
+                                    DRBD_FAULT_RS_WR) == 0)
                return 0;
 
        /* don't care for the reason here */
@@ -2152,12 +2154,19 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co
 /* see also bio_flags_to_wire()
  * DRBD_REQ_*, because we need to semantically map the flags to data packet
  * flags and back. We may replicate to other kernel versions. */
-static unsigned long wire_flags_to_bio(u32 dpf)
+static unsigned long wire_flags_to_bio_flags(u32 dpf)
 {
        return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
                (dpf & DP_FUA ? REQ_FUA : 0) |
-               (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
-               (dpf & DP_DISCARD ? REQ_DISCARD : 0);
+               (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
+}
+
+static unsigned long wire_flags_to_bio_op(u32 dpf)
+{
+       if (dpf & DP_DISCARD)
+               return REQ_OP_DISCARD;
+       else
+               return REQ_OP_WRITE;
 }
 
 static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
@@ -2303,7 +2312,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
        struct drbd_peer_request *peer_req;
        struct p_data *p = pi->data;
        u32 peer_seq = be32_to_cpu(p->seq_num);
-       int rw = WRITE;
+       int op, op_flags;
        u32 dp_flags;
        int err, tp;
 
@@ -2342,14 +2351,15 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
        peer_req->flags |= EE_APPLICATION;
 
        dp_flags = be32_to_cpu(p->dp_flags);
-       rw |= wire_flags_to_bio(dp_flags);
+       op = wire_flags_to_bio_op(dp_flags);
+       op_flags = wire_flags_to_bio_flags(dp_flags);
        if (pi->cmd == P_TRIM) {
                struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
                peer_req->flags |= EE_IS_TRIM;
                if (!blk_queue_discard(q))
                        peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
                D_ASSERT(peer_device, peer_req->i.size > 0);
-               D_ASSERT(peer_device, rw & REQ_DISCARD);
+               D_ASSERT(peer_device, op == REQ_OP_DISCARD);
                D_ASSERT(peer_device, peer_req->pages == NULL);
        } else if (peer_req->pages == NULL) {
                D_ASSERT(device, peer_req->i.size == 0);
@@ -2433,7 +2443,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
                peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
        }
 
-       err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR);
+       err = drbd_submit_peer_request(device, peer_req, op, op_flags,
+                                      DRBD_FAULT_DT_WR);
        if (!err)
                return 0;
 
@@ -2723,7 +2734,8 @@ submit_for_resync:
 submit:
        update_receiver_timing_details(connection, drbd_submit_peer_request);
        inc_unacked(device);
-       if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
+       if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+                                    fault_type) == 0)
                return 0;
 
        /* don't care for the reason here */
index 2255dcfebd2b514d2373424718e1ad4831a0c3ab..eef6e9575b4e17c5d2b54e810206b416511a4d46 100644 (file)
@@ -1132,7 +1132,7 @@ static int drbd_process_write_request(struct drbd_request *req)
         * replicating, in which case there is no point. */
        if (unlikely(req->i.size == 0)) {
                /* The only size==0 bios we expect are empty flushes. */
-               D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
+               D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
                if (remote)
                        _req_mod(req, QUEUE_AS_DRBD_BARRIER);
                return remote;
index 4d87499f0d54829bd22594f97202f77fdd533f2e..51fab978eb61587903ec08f2f08972c26337b581 100644 (file)
@@ -174,7 +174,7 @@ void drbd_peer_request_endio(struct bio *bio)
        struct drbd_peer_request *peer_req = bio->bi_private;
        struct drbd_device *device = peer_req->peer_device->device;
        int is_write = bio_data_dir(bio) == WRITE;
-       int is_discard = !!(bio->bi_rw & REQ_DISCARD);
+       int is_discard = !!(bio_op(bio) == REQ_OP_DISCARD);
 
        if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
                drbd_warn(device, "%s: error=%d s=%llus\n",
@@ -248,7 +248,7 @@ void drbd_request_endio(struct bio *bio)
 
        /* to avoid recursion in __req_mod */
        if (unlikely(bio->bi_error)) {
-               if (bio->bi_rw & REQ_DISCARD)
+               if (bio_op(bio) == REQ_OP_DISCARD)
                        what = (bio->bi_error == -EOPNOTSUPP)
                                ? DISCARD_COMPLETED_NOTSUPP
                                : DISCARD_COMPLETED_WITH_ERROR;
@@ -397,7 +397,8 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
        spin_unlock_irq(&device->resource->req_lock);
 
        atomic_add(size >> 9, &device->rs_sect_ev);
-       if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
+       if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+                                    DRBD_FAULT_RS_RD) == 0)
                return 0;
 
        /* If it failed because of ENOMEM, retry should help.  If it failed
index 84708a5f8c520cb86db4ad62a4b845a5ec83b88f..f9bfecd733a8fe28a2cb6bfb56871576044fef74 100644 (file)
@@ -3822,8 +3822,9 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
        bio.bi_flags |= (1 << BIO_QUIET);
        bio.bi_private = &cbdata;
        bio.bi_end_io = floppy_rb0_cb;
+       bio_set_op_attrs(&bio, REQ_OP_READ, 0);
 
-       submit_bio(READ, &bio);
+       submit_bio(&bio);
        process_fd_request();
 
        init_completion(&cbdata.complete);
index 1fa8cc235977f404bc995d73659714fbccad7066..364d491d4bdd4643d93329c57ea3589bc7840ddc 100644 (file)
@@ -447,7 +447,7 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
 
 static inline void handle_partial_read(struct loop_cmd *cmd, long bytes)
 {
-       if (bytes < 0 || (cmd->rq->cmd_flags & REQ_WRITE))
+       if (bytes < 0 || op_is_write(req_op(cmd->rq)))
                return;
 
        if (unlikely(bytes < blk_rq_bytes(cmd->rq))) {
@@ -541,10 +541,10 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
 
        pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
 
-       if (rq->cmd_flags & REQ_WRITE) {
-               if (rq->cmd_flags & REQ_FLUSH)
+       if (op_is_write(req_op(rq))) {
+               if (req_op(rq) == REQ_OP_FLUSH)
                        ret = lo_req_flush(lo, rq);
-               else if (rq->cmd_flags & REQ_DISCARD)
+               else if (req_op(rq) == REQ_OP_DISCARD)
                        ret = lo_discard(lo, rq, pos);
                else if (lo->transfer)
                        ret = lo_write_transfer(lo, rq, pos);
@@ -1659,8 +1659,8 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (lo->lo_state != Lo_bound)
                return -EIO;
 
-       if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH |
-                                       REQ_DISCARD)))
+       if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH ||
+           req_op(cmd->rq) == REQ_OP_DISCARD))
                cmd->use_aio = true;
        else
                cmd->use_aio = false;
@@ -1672,7 +1672,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 static void loop_handle_cmd(struct loop_cmd *cmd)
 {
-       const bool write = cmd->rq->cmd_flags & REQ_WRITE;
+       const bool write = op_is_write(req_op(cmd->rq));
        struct loop_device *lo = cmd->rq->q->queuedata;
        int ret = 0;
 
index 6053e4659fa2f3d014ca125644992a3d676879e0..8e3e708cb9ee9e85fbf449bec55647541fb6b848 100644 (file)
@@ -3765,7 +3765,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
                        return -ENODATA;
        }
 
-       if (rq->cmd_flags & REQ_DISCARD) {
+       if (req_op(rq) == REQ_OP_DISCARD) {
                int err;
 
                err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
index 6a48ed41963ff9d215665b1423d20567fc50ee06..6f55b262b5ced7c371115091e248421248b35c18 100644 (file)
@@ -282,9 +282,9 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
 
        if (req->cmd_type == REQ_TYPE_DRV_PRIV)
                type = NBD_CMD_DISC;
-       else if (req->cmd_flags & REQ_DISCARD)
+       else if (req_op(req) == REQ_OP_DISCARD)
                type = NBD_CMD_TRIM;
-       else if (req->cmd_flags & REQ_FLUSH)
+       else if (req_op(req) == REQ_OP_FLUSH)
                type = NBD_CMD_FLUSH;
        else if (rq_data_dir(req) == WRITE)
                type = NBD_CMD_WRITE;
index c2854a2bfdb0bd1027fb4f10f2c39c8761d7d709..92900f5f0b4725ebc1d1edbf3da30138a3e2979b 100644 (file)
@@ -321,7 +321,7 @@ static void osdblk_rq_fn(struct request_queue *q)
                 * driver-specific, etc.
                 */
 
-               do_flush = rq->cmd_flags & REQ_FLUSH;
+               do_flush = (req_op(rq) == REQ_OP_FLUSH);
                do_write = (rq_data_dir(rq) == WRITE);
 
                if (!do_flush) { /* osd_flush does not use a bio */
index d06c62eccdf00b81241fba6b3e6d3a05e4e4c923..9393bc730acf96e83f1ae9412f5304c27c2ec8ae 100644 (file)
@@ -1074,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
                        BUG();
 
                atomic_inc(&pkt->io_wait);
-               bio->bi_rw = READ;
+               bio_set_op_attrs(bio, REQ_OP_READ, 0);
                pkt_queue_bio(pd, bio);
                frames_read++;
        }
@@ -1336,7 +1336,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 
        /* Start the write request */
        atomic_set(&pkt->io_wait, 1);
-       pkt->w_bio->bi_rw = WRITE;
+       bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
        pkt_queue_bio(pd, pkt->w_bio);
 }
 
index 4b7e405830d7ec097fcf26a51cb5edc268a74812..acb44529c05e8e6a50d8ee707000042a5fc817d0 100644 (file)
@@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
        dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
 
        while ((req = blk_fetch_request(q))) {
-               if (req->cmd_flags & REQ_FLUSH) {
+               if (req_op(req) == REQ_OP_FLUSH) {
                        if (ps3disk_submit_flush_request(dev, req))
                                break;
                } else if (req->cmd_type == REQ_TYPE_FS) {
@@ -256,7 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
                return IRQ_HANDLED;
        }
 
-       if (req->cmd_flags & REQ_FLUSH) {
+       if (req_op(req) == REQ_OP_FLUSH) {
                read = 0;
                op = "flush";
        } else {
index 81666a56415e2bc6f960a3f5ac2220108c2b7ed1..450662055d97338996720d4aa93efa0aefc16e65 100644 (file)
@@ -3286,9 +3286,9 @@ static void rbd_queue_workfn(struct work_struct *work)
                goto err;
        }
 
-       if (rq->cmd_flags & REQ_DISCARD)
+       if (req_op(rq) == REQ_OP_DISCARD)
                op_type = OBJ_OP_DISCARD;
-       else if (rq->cmd_flags & REQ_WRITE)
+       else if (req_op(rq) == REQ_OP_WRITE)
                op_type = OBJ_OP_WRITE;
        else
                op_type = OBJ_OP_READ;
index cf8cd293abb51d338cd6d0ae7762070c80be99a2..5a20385f87d045af1704205dea18b0aa9e7a1260 100644 (file)
@@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                dma_cnt[i] = 0;
        }
 
-       if (bio->bi_rw & REQ_DISCARD) {
+       if (bio_op(bio) == REQ_OP_DISCARD) {
                bv_len = bio->bi_iter.bi_size;
 
                while (bv_len > 0) {
index 910e065918af13d2536f6fe6736a4fad3303e243..5c07a23e2adabf51ae2ed7463d6a27fdd9e0bf25 100644 (file)
@@ -597,7 +597,7 @@ static void skd_request_fn(struct request_queue *q)
                data_dir = rq_data_dir(req);
                io_flags = req->cmd_flags;
 
-               if (io_flags & REQ_FLUSH)
+               if (req_op(req) == REQ_OP_FLUSH)
                        flush++;
 
                if (io_flags & REQ_FUA)
index 7939b9f8744135d137e222c6593cc0a96b94c18a..4b3ba74e9d22f3ec90ad4d8b39cdb0fcfac2ab83 100644 (file)
@@ -462,7 +462,7 @@ static void process_page(unsigned long data)
                                le32_to_cpu(desc->local_addr)>>9,
                                le32_to_cpu(desc->transfer_size));
                        dump_dmastat(card, control);
-               } else if ((bio->bi_rw & REQ_WRITE) &&
+               } else if (op_is_write(bio_op(bio)) &&
                           le32_to_cpu(desc->local_addr) >> 9 ==
                                card->init_size) {
                        card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
index 42758b52768cf894119e4de41902b4de7ecdd9d3..18e4069dd24b2fe7d3fdd80e290994832fbe55de 100644 (file)
@@ -172,7 +172,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
 
        vbr->req = req;
-       if (req->cmd_flags & REQ_FLUSH) {
+       if (req_op(req) == REQ_OP_FLUSH) {
                vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
                vbr->out_hdr.sector = 0;
                vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
index 4809c1501d7eda82081d922fa1944d8cd7c76c84..4a80ee752597f02adfc8096e5d14ce9a27e03f0e 100644 (file)
@@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
        struct xen_vbd *vbd = &blkif->vbd;
        int rc = -EACCES;
 
-       if ((operation != READ) && vbd->readonly)
+       if ((operation != REQ_OP_READ) && vbd->readonly)
                goto out;
 
        if (likely(req->nr_sects)) {
@@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
        preq.sector_number = req->u.discard.sector_number;
        preq.nr_sects      = req->u.discard.nr_sectors;
 
-       err = xen_vbd_translate(&preq, blkif, WRITE);
+       err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
        if (err) {
                pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
                        preq.sector_number,
@@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        struct bio **biolist = pending_req->biolist;
        int i, nbio = 0;
        int operation;
+       int operation_flags = 0;
        struct blk_plug plug;
        bool drain = false;
        struct grant_page **pages = pending_req->segments;
@@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        switch (req_operation) {
        case BLKIF_OP_READ:
                ring->st_rd_req++;
-               operation = READ;
+               operation = REQ_OP_READ;
                break;
        case BLKIF_OP_WRITE:
                ring->st_wr_req++;
-               operation = WRITE_ODIRECT;
+               operation = REQ_OP_WRITE;
+               operation_flags = WRITE_ODIRECT;
                break;
        case BLKIF_OP_WRITE_BARRIER:
                drain = true;
        case BLKIF_OP_FLUSH_DISKCACHE:
                ring->st_f_req++;
-               operation = WRITE_FLUSH;
+               operation = REQ_OP_WRITE;
+               operation_flags = WRITE_FLUSH;
                break;
        default:
                operation = 0; /* make gcc happy */
@@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        nseg = req->operation == BLKIF_OP_INDIRECT ?
               req->u.indirect.nr_segments : req->u.rw.nr_segments;
 
-       if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
+       if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
            unlikely((req->operation != BLKIF_OP_INDIRECT) &&
                     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
            unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 
        if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
                pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
-                        operation == READ ? "read" : "write",
+                        operation == REQ_OP_READ ? "read" : "write",
                         preq.sector_number,
                         preq.sector_number + preq.nr_sects,
                         ring->blkif->vbd.pdevice);
@@ -1369,6 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
                        bio->bi_private = pending_req;
                        bio->bi_end_io  = end_block_io_op;
                        bio->bi_iter.bi_sector  = preq.sector_number;
+                       bio_set_op_attrs(bio, operation, operation_flags);
                }
 
                preq.sector_number += seg[i].nsec;
@@ -1376,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 
        /* This will be hit if the operation was a flush or discard. */
        if (!bio) {
-               BUG_ON(operation != WRITE_FLUSH);
+               BUG_ON(operation_flags != WRITE_FLUSH);
 
                bio = bio_alloc(GFP_KERNEL, 0);
                if (unlikely(bio == NULL))
@@ -1386,20 +1390,21 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
                bio->bi_bdev    = preq.bdev;
                bio->bi_private = pending_req;
                bio->bi_end_io  = end_block_io_op;
+               bio_set_op_attrs(bio, operation, operation_flags);
        }
 
        atomic_set(&pending_req->pendcnt, nbio);
        blk_start_plug(&plug);
 
        for (i = 0; i < nbio; i++)
-               submit_bio(operation, biolist[i]);
+               submit_bio(biolist[i]);
 
        /* Let the I/Os go.. */
        blk_finish_plug(&plug);
 
-       if (operation == READ)
+       if (operation == REQ_OP_READ)
                ring->st_rd_sect += preq.nr_sects;
-       else if (operation WRITE)
+       else if (operation == REQ_OP_WRITE)
                ring->st_wr_sect += preq.nr_sects;
 
        return 0;
index fcc5b4e0aef29ed8d5e863e0277687e1abe700e6..da05d3f9bad206b193135e44b7c979894b50e42a 100644 (file)
@@ -196,6 +196,7 @@ struct blkfront_info
        unsigned int nr_ring_pages;
        struct request_queue *rq;
        unsigned int feature_flush;
+       unsigned int feature_fua;
        unsigned int feature_discard:1;
        unsigned int feature_secdiscard:1;
        unsigned int discard_granularity;
@@ -746,7 +747,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
                 * The indirect operation can only be a BLKIF_OP_READ or
                 * BLKIF_OP_WRITE
                 */
-               BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
+               BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
                ring_req->operation = BLKIF_OP_INDIRECT;
                ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
                        BLKIF_OP_WRITE : BLKIF_OP_READ;
@@ -758,7 +759,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
                ring_req->u.rw.handle = info->handle;
                ring_req->operation = rq_data_dir(req) ?
                        BLKIF_OP_WRITE : BLKIF_OP_READ;
-               if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+               if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
                        /*
                         * Ideally we can do an unordered flush-to-disk.
                         * In case the backend onlysupports barriers, use that.
@@ -766,19 +767,14 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
                         * implement it the same way.  (It's also a FLUSH+FUA,
                         * since it is guaranteed ordered WRT previous writes.)
                         */
-                       switch (info->feature_flush &
-                               ((REQ_FLUSH|REQ_FUA))) {
-                       case REQ_FLUSH|REQ_FUA:
+                       if (info->feature_flush && info->feature_fua)
                                ring_req->operation =
                                        BLKIF_OP_WRITE_BARRIER;
-                               break;
-                       case REQ_FLUSH:
+                       else if (info->feature_flush)
                                ring_req->operation =
                                        BLKIF_OP_FLUSH_DISKCACHE;
-                               break;
-                       default:
+                       else
                                ring_req->operation = 0;
-                       }
                }
                ring_req->u.rw.nr_segments = num_grant;
                if (unlikely(require_extra_req)) {
@@ -847,7 +843,8 @@ static int blkif_queue_request(struct request *req, struct blkfront_ring_info *r
        if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
                return 1;
 
-       if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
+       if (unlikely(req_op(req) == REQ_OP_DISCARD ||
+                    req->cmd_flags & REQ_SECURE))
                return blkif_queue_discard_req(req, rinfo);
        else
                return blkif_queue_rw_req(req, rinfo);
@@ -867,10 +864,10 @@ static inline bool blkif_request_flush_invalid(struct request *req,
                                               struct blkfront_info *info)
 {
        return ((req->cmd_type != REQ_TYPE_FS) ||
-               ((req->cmd_flags & REQ_FLUSH) &&
-                !(info->feature_flush & REQ_FLUSH)) ||
+               ((req_op(req) == REQ_OP_FLUSH) &&
+                !info->feature_flush) ||
                ((req->cmd_flags & REQ_FUA) &&
-                !(info->feature_flush & REQ_FUA)));
+                !info->feature_fua));
 }
 
 static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -981,24 +978,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
        return 0;
 }
 
-static const char *flush_info(unsigned int feature_flush)
+static const char *flush_info(struct blkfront_info *info)
 {
-       switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
-       case REQ_FLUSH|REQ_FUA:
+       if (info->feature_flush && info->feature_fua)
                return "barrier: enabled;";
-       case REQ_FLUSH:
+       else if (info->feature_flush)
                return "flush diskcache: enabled;";
-       default:
+       else
                return "barrier or flush: disabled;";
-       }
 }
 
 static void xlvbd_flush(struct blkfront_info *info)
 {
-       blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
-                               info->feature_flush & REQ_FUA);
+       blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
+                             info->feature_fua ? true : false);
        pr_info("blkfront: %s: %s %s %s %s %s\n",
-               info->gd->disk_name, flush_info(info->feature_flush),
+               info->gd->disk_name, flush_info(info),
                "persistent grants:", info->feature_persistent ?
                "enabled;" : "disabled;", "indirect descriptors:",
                info->max_indirect_segments ? "enabled;" : "disabled;");
@@ -1617,6 +1612,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                        if (unlikely(error)) {
                                if (error == -EOPNOTSUPP)
                                        error = 0;
+                               info->feature_fua = 0;
                                info->feature_flush = 0;
                                xlvbd_flush(info);
                        }
@@ -2064,7 +2060,7 @@ static int blkif_recover(struct blkfront_info *info)
                                bio_trim(cloned_bio, offset, size);
                                cloned_bio->bi_private = split_bio;
                                cloned_bio->bi_end_io = split_bio_end;
-                               submit_bio(cloned_bio->bi_rw, cloned_bio);
+                               submit_bio(cloned_bio);
                        }
                        /*
                         * Now we have to wait for all those smaller bios to
@@ -2073,7 +2069,7 @@ static int blkif_recover(struct blkfront_info *info)
                        continue;
                }
                /* We don't need to split this bio */
-               submit_bio(bio->bi_rw, bio);
+               submit_bio(bio);
        }
 
        return 0;
@@ -2108,8 +2104,10 @@ static int blkfront_resume(struct xenbus_device *dev)
                        /*
                         * Get the bios in the request so we can re-queue them.
                         */
-                       if (shadow[j].request->cmd_flags &
-                                       (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+                       if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
+                           req_op(shadow[i].request) == REQ_OP_DISCARD ||
+                           shadow[j].request->cmd_flags & (REQ_FUA | REQ_SECURE)) {
+                           
                                /*
                                 * Flush operations don't contain bios, so
                                 * we need to requeue the whole request
@@ -2298,6 +2296,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
        unsigned int indirect_segments;
 
        info->feature_flush = 0;
+       info->feature_fua = 0;
 
        err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
                        "feature-barrier", "%d", &barrier,
@@ -2310,8 +2309,11 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
         *
         * If there are barriers, then we use flush.
         */
-       if (!err && barrier)
-               info->feature_flush = REQ_FLUSH | REQ_FUA;
+       if (!err && barrier) {
+               info->feature_flush = 1;
+               info->feature_fua = 1;
+       }
+
        /*
         * And if there is "feature-flush-cache" use that above
         * barriers.
@@ -2320,8 +2322,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
                        "feature-flush-cache", "%d", &flush,
                        NULL);
 
-       if (!err && flush)
-               info->feature_flush = REQ_FLUSH;
+       if (!err && flush) {
+               info->feature_flush = 1;
+               info->feature_fua = 0;
+       }
 
        err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
                        "feature-discard", "%d", &discard,
index 8fcad8b761f14b5686644317d3da018d9bc326e0..e5e5d19f2172b78575f2dc61b3ebadd6cc8f08b6 100644 (file)
@@ -874,7 +874,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
        offset = (bio->bi_iter.bi_sector &
                  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 
-       if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+       if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
                zram_bio_discard(zram, index, offset, bio);
                bio_endio(bio);
                return;
index 474173eb31bb345c86bf31fcb195a44432ee14ad..5887a7a09e3702a1d13f14e7adf9e04f1eea4250 100644 (file)
@@ -459,9 +459,6 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
           layer. the packet must be complete, as we do not
           touch it at all. */
 
-       if (cgc->data_direction == CGC_DATA_WRITE)
-               flags |= REQ_WRITE;
-
        if (cgc->sense)
                memset(cgc->sense, 0, sizeof(struct request_sense));
 
index 05dbcce70b0e33736c9ac33f9b5103c40d6a45cc..e378ef70ed638f040c4e25bb4765f6bb86c1cfd9 100644 (file)
@@ -431,7 +431,7 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
        ide_drive_t *drive = q->queuedata;
        struct ide_cmd *cmd;
 
-       if (!(rq->cmd_flags & REQ_FLUSH))
+       if (req_op(rq) != REQ_OP_FLUSH)
                return BLKPREP_OK;
 
        if (rq->special) {
index 2fb5350c54105000d57bf1672b101e3e40fdd9ca..f079d8d1d8569a43a035adebbba2a63ee8ca5379 100644 (file)
@@ -206,7 +206,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
        memcpy(rq->cmd, pc->c, 12);
 
        pc->rq = rq;
-       if (rq->cmd_flags & REQ_WRITE)
+       if (cmd == WRITE)
                pc->flags |= PC_FLAG_WRITING;
 
        pc->flags |= PC_FLAG_DMA_OK;
index 2103e97a974f6e3598f38bceea3852577a52f1b9..de86d72dcdf0bd90b63c317dd6de6e6bc736c062 100644 (file)
@@ -342,7 +342,7 @@ try:
 
                /* Perform read to do GC */
                bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
-               bio->bi_rw = READ;
+               bio_set_op_attrs(bio,  REQ_OP_READ, 0);
                bio->bi_private = &wait;
                bio->bi_end_io = rrpc_end_sync_bio;
 
@@ -364,7 +364,7 @@ try:
                reinit_completion(&wait);
 
                bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
-               bio->bi_rw = WRITE;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                bio->bi_private = &wait;
                bio->bi_end_io = rrpc_end_sync_bio;
 
@@ -908,7 +908,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
        struct nvm_rq *rqd;
        int err;
 
-       if (bio->bi_rw & REQ_DISCARD) {
+       if (bio_op(bio) == REQ_OP_DISCARD) {
                rrpc_discard(rrpc, bio);
                return BLK_QC_T_NONE;
        }
index eab505ee002758c4a895732eb9e4e80a340ce556..76f7534d1dd1586810341283c0eb82deabcdb405 100644 (file)
@@ -294,10 +294,10 @@ static void bch_btree_node_read(struct btree *b)
        closure_init_stack(&cl);
 
        bio = bch_bbio_alloc(b->c);
-       bio->bi_rw      = REQ_META|READ_SYNC;
        bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
        bio->bi_end_io  = btree_node_read_endio;
        bio->bi_private = &cl;
+       bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
 
        bch_bio_map(bio, b->keys.set[0].data);
 
@@ -396,8 +396,8 @@ static void do_btree_node_write(struct btree *b)
 
        b->bio->bi_end_io       = btree_node_write_endio;
        b->bio->bi_private      = cl;
-       b->bio->bi_rw           = REQ_META|WRITE_SYNC|REQ_FUA;
        b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
+       bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
        bch_bio_map(b->bio, i);
 
        /*
index 8b1f1d5c18198f078dea917f40303c98fce1ee94..c28df164701e7b4ec472d676e7285ef40e5a30c4 100644 (file)
@@ -52,9 +52,10 @@ void bch_btree_verify(struct btree *b)
        bio->bi_bdev            = PTR_CACHE(b->c, &b->key, 0)->bdev;
        bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
        bio->bi_iter.bi_size    = KEY_SIZE(&v->key) << 9;
+       bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
        bch_bio_map(bio, sorted);
 
-       submit_bio_wait(REQ_META|READ_SYNC, bio);
+       submit_bio_wait(bio);
        bch_bbio_free(bio, b->c);
 
        memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
@@ -113,11 +114,12 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
        check = bio_clone(bio, GFP_NOIO);
        if (!check)
                return;
+       bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC);
 
        if (bio_alloc_pages(check, GFP_NOIO))
                goto out_put;
 
-       submit_bio_wait(READ_SYNC, check);
+       submit_bio_wait(check);
 
        bio_for_each_segment(bv, bio, iter) {
                void *p1 = kmap_atomic(bv.bv_page);
index 86a0bb87124e3849df567a49d0de6afbfe1a7175..fd885cc2afad4a5caae3042f73c6d862db2e3bf1 100644 (file)
@@ -111,7 +111,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
        struct bbio *b = container_of(bio, struct bbio, bio);
        struct cache *ca = PTR_CACHE(c, &b->key, 0);
 
-       unsigned threshold = bio->bi_rw & REQ_WRITE
+       unsigned threshold = op_is_write(bio_op(bio))
                ? c->congested_write_threshold_us
                : c->congested_read_threshold_us;
 
index 29eba7219b01a17fd013109eff759b06d6fac34e..6925023e12d45656ce6802ea5237c0069b9beb29 100644 (file)
@@ -54,11 +54,11 @@ reread:             left = ca->sb.bucket_size - offset;
                bio_reset(bio);
                bio->bi_iter.bi_sector  = bucket + offset;
                bio->bi_bdev    = ca->bdev;
-               bio->bi_rw      = READ;
                bio->bi_iter.bi_size    = len << 9;
 
                bio->bi_end_io  = journal_read_endio;
                bio->bi_private = &cl;
+               bio_set_op_attrs(bio, REQ_OP_READ, 0);
                bch_bio_map(bio, data);
 
                closure_bio_submit(bio, &cl);
@@ -418,7 +418,7 @@ static void journal_discard_work(struct work_struct *work)
        struct journal_device *ja =
                container_of(work, struct journal_device, discard_work);
 
-       submit_bio(0, &ja->discard_bio);
+       submit_bio(&ja->discard_bio);
 }
 
 static void do_journal_discard(struct cache *ca)
@@ -449,10 +449,10 @@ static void do_journal_discard(struct cache *ca)
                atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
 
                bio_init(bio);
+               bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
                bio->bi_iter.bi_sector  = bucket_to_sector(ca->set,
                                                ca->sb.d[ja->discard_idx]);
                bio->bi_bdev            = ca->bdev;
-               bio->bi_rw              = REQ_WRITE|REQ_DISCARD;
                bio->bi_max_vecs        = 1;
                bio->bi_io_vec          = bio->bi_inline_vecs;
                bio->bi_iter.bi_size    = bucket_bytes(ca);
@@ -626,11 +626,12 @@ static void journal_write_unlocked(struct closure *cl)
                bio_reset(bio);
                bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
                bio->bi_bdev    = ca->bdev;
-               bio->bi_rw      = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
                bio->bi_iter.bi_size = sectors << 9;
 
                bio->bi_end_io  = journal_write_endio;
                bio->bi_private = w;
+               bio_set_op_attrs(bio, REQ_OP_WRITE,
+                                REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
                bch_bio_map(bio, w->data);
 
                trace_bcache_journal_write(bio);
index b929fc944e9c605a523766d20e711747a5f31fd0..1881319f2298f6d6e4ee901e5edf331924b96359 100644 (file)
@@ -163,7 +163,7 @@ static void read_moving(struct cache_set *c)
                moving_init(io);
                bio = &io->bio.bio;
 
-               bio->bi_rw      = READ;
+               bio_set_op_attrs(bio, REQ_OP_READ, 0);
                bio->bi_end_io  = read_moving_endio;
 
                if (bio_alloc_pages(bio, GFP_KERNEL))
index 25fa8445bb2422ba29f745aa2c0b68e37b3ec326..69f16f43f8ab12fda3ddb9a2cfefa1d2abd1bf7b 100644 (file)
@@ -205,10 +205,10 @@ static void bch_data_insert_start(struct closure *cl)
                return bch_data_invalidate(cl);
 
        /*
-        * Journal writes are marked REQ_FLUSH; if the original write was a
+        * Journal writes are marked REQ_PREFLUSH; if the original write was a
         * flush, it'll wait on the journal write.
         */
-       bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
+       bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
 
        do {
                unsigned i;
@@ -253,7 +253,7 @@ static void bch_data_insert_start(struct closure *cl)
                trace_bcache_cache_insert(k);
                bch_keylist_push(&op->insert_keys);
 
-               n->bi_rw |= REQ_WRITE;
+               bio_set_op_attrs(n, REQ_OP_WRITE, 0);
                bch_submit_bbio(n, op->c, k, 0);
        } while (n != bio);
 
@@ -378,12 +378,12 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 
        if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
            c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
-           (bio->bi_rw & REQ_DISCARD))
+           (bio_op(bio) == REQ_OP_DISCARD))
                goto skip;
 
        if (mode == CACHE_MODE_NONE ||
            (mode == CACHE_MODE_WRITEAROUND &&
-            (bio->bi_rw & REQ_WRITE)))
+            op_is_write(bio_op(bio))))
                goto skip;
 
        if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
@@ -404,7 +404,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 
        if (!congested &&
            mode == CACHE_MODE_WRITEBACK &&
-           (bio->bi_rw & REQ_WRITE) &&
+           op_is_write(bio_op(bio)) &&
            (bio->bi_rw & REQ_SYNC))
                goto rescale;
 
@@ -657,7 +657,7 @@ static inline struct search *search_alloc(struct bio *bio,
        s->cache_miss           = NULL;
        s->d                    = d;
        s->recoverable          = 1;
-       s->write                = (bio->bi_rw & REQ_WRITE) != 0;
+       s->write                = op_is_write(bio_op(bio));
        s->read_dirty_data      = 0;
        s->start_time           = jiffies;
 
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
        s->iop.write_prio       = 0;
        s->iop.error            = 0;
        s->iop.flags            = 0;
-       s->iop.flush_journal    = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+       s->iop.flush_journal    = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
        s->iop.wq               = bcache_wq;
 
        return s;
@@ -899,7 +899,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
         * But check_overlapping drops dirty keys for which io hasn't started,
         * so we still want to call it.
         */
-       if (bio->bi_rw & REQ_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD)
                s->iop.bypass = true;
 
        if (should_writeback(dc, s->orig_bio,
@@ -913,22 +913,22 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                s->iop.bio = s->orig_bio;
                bio_get(s->iop.bio);
 
-               if (!(bio->bi_rw & REQ_DISCARD) ||
+               if ((bio_op(bio) != REQ_OP_DISCARD) ||
                    blk_queue_discard(bdev_get_queue(dc->bdev)))
                        closure_bio_submit(bio, cl);
        } else if (s->iop.writeback) {
                bch_writeback_add(dc);
                s->iop.bio = bio;
 
-               if (bio->bi_rw & REQ_FLUSH) {
+               if (bio->bi_rw & REQ_PREFLUSH) {
                        /* Also need to send a flush to the backing device */
                        struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
                                                             dc->disk.bio_split);
 
-                       flush->bi_rw    = WRITE_FLUSH;
                        flush->bi_bdev  = bio->bi_bdev;
                        flush->bi_end_io = request_endio;
                        flush->bi_private = cl;
+                       bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
 
                        closure_bio_submit(flush, cl);
                }
@@ -992,7 +992,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
                                cached_dev_read(dc, s);
                }
        } else {
-               if ((bio->bi_rw & REQ_DISCARD) &&
+               if ((bio_op(bio) == REQ_OP_DISCARD) &&
                    !blk_queue_discard(bdev_get_queue(dc->bdev)))
                        bio_endio(bio);
                else
@@ -1103,7 +1103,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
                                        &KEY(d->id, bio->bi_iter.bi_sector, 0),
                                        &KEY(d->id, bio_end_sector(bio), 0));
 
-               s->iop.bypass           = (bio->bi_rw & REQ_DISCARD) != 0;
+               s->iop.bypass           = (bio_op(bio) == REQ_OP_DISCARD) != 0;
                s->iop.writeback        = true;
                s->iop.bio              = bio;
 
index f5dbb4e884d893240472ff7dff17cc3489936c66..c944daf75dd000bdc1c5d5a64254f8e58d74bfc9 100644 (file)
@@ -212,8 +212,8 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
        unsigned i;
 
        bio->bi_iter.bi_sector  = SB_SECTOR;
-       bio->bi_rw              = REQ_SYNC|REQ_META;
        bio->bi_iter.bi_size    = SB_SIZE;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
        bch_bio_map(bio, NULL);
 
        out->offset             = cpu_to_le64(sb->offset);
@@ -238,7 +238,7 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
        pr_debug("ver %llu, flags %llu, seq %llu",
                 sb->version, sb->flags, sb->seq);
 
-       submit_bio(REQ_WRITE, bio);
+       submit_bio(bio);
 }
 
 static void bch_write_bdev_super_unlock(struct closure *cl)
@@ -333,7 +333,7 @@ static void uuid_io_unlock(struct closure *cl)
        up(&c->uuid_write_mutex);
 }
 
-static void uuid_io(struct cache_set *c, unsigned long rw,
+static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
                    struct bkey *k, struct closure *parent)
 {
        struct closure *cl = &c->uuid_write;
@@ -348,21 +348,22 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
        for (i = 0; i < KEY_PTRS(k); i++) {
                struct bio *bio = bch_bbio_alloc(c);
 
-               bio->bi_rw      = REQ_SYNC|REQ_META|rw;
+               bio->bi_rw      = REQ_SYNC|REQ_META|op_flags;
                bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
 
                bio->bi_end_io  = uuid_endio;
                bio->bi_private = cl;
+               bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
                bch_bio_map(bio, c->uuids);
 
                bch_submit_bbio(bio, c, k, i);
 
-               if (!(rw & WRITE))
+               if (op != REQ_OP_WRITE)
                        break;
        }
 
        bch_extent_to_text(buf, sizeof(buf), k);
-       pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
+       pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
 
        for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
                if (!bch_is_zero(u->uuid, 16))
@@ -381,7 +382,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
                return "bad uuid pointer";
 
        bkey_copy(&c->uuid_bucket, k);
-       uuid_io(c, READ_SYNC, k, cl);
+       uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
 
        if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
                struct uuid_entry_v0    *u0 = (void *) c->uuids;
@@ -426,7 +427,7 @@ static int __uuid_write(struct cache_set *c)
                return 1;
 
        SET_KEY_SIZE(&k.key, c->sb.bucket_size);
-       uuid_io(c, REQ_WRITE, &k.key, &cl);
+       uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
        closure_sync(&cl);
 
        bkey_copy(&c->uuid_bucket, &k.key);
@@ -498,7 +499,8 @@ static void prio_endio(struct bio *bio)
        closure_put(&ca->prio);
 }
 
-static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
+static void prio_io(struct cache *ca, uint64_t bucket, int op,
+                   unsigned long op_flags)
 {
        struct closure *cl = &ca->prio;
        struct bio *bio = bch_bbio_alloc(ca->set);
@@ -507,11 +509,11 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
 
        bio->bi_iter.bi_sector  = bucket * ca->sb.bucket_size;
        bio->bi_bdev            = ca->bdev;
-       bio->bi_rw              = REQ_SYNC|REQ_META|rw;
        bio->bi_iter.bi_size    = bucket_bytes(ca);
 
        bio->bi_end_io  = prio_endio;
        bio->bi_private = ca;
+       bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
        bch_bio_map(bio, ca->disk_buckets);
 
        closure_bio_submit(bio, &ca->prio);
@@ -557,7 +559,7 @@ void bch_prio_write(struct cache *ca)
                BUG_ON(bucket == -1);
 
                mutex_unlock(&ca->set->bucket_lock);
-               prio_io(ca, bucket, REQ_WRITE);
+               prio_io(ca, bucket, REQ_OP_WRITE, 0);
                mutex_lock(&ca->set->bucket_lock);
 
                ca->prio_buckets[i] = bucket;
@@ -599,7 +601,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
                        ca->prio_last_buckets[bucket_nr] = bucket;
                        bucket_nr++;
 
-                       prio_io(ca, bucket, READ_SYNC);
+                       prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
 
                        if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
                                pr_warn("bad csum reading priorities");
index 60123677b382a6c55fb00d734ca433adb7770f7a..d9fd2a62e5f6f926a898d5550805f9013efe477f 100644 (file)
@@ -182,7 +182,7 @@ static void write_dirty(struct closure *cl)
        struct keybuf_key *w = io->bio.bi_private;
 
        dirty_init(w);
-       io->bio.bi_rw           = WRITE;
+       bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
        io->bio.bi_iter.bi_sector = KEY_START(&w->key);
        io->bio.bi_bdev         = io->dc->bdev;
        io->bio.bi_end_io       = dirty_endio;
@@ -251,10 +251,10 @@ static void read_dirty(struct cached_dev *dc)
                io->dc          = dc;
 
                dirty_init(w);
+               bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
                io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
                io->bio.bi_bdev         = PTR_CACHE(dc->disk.c,
                                                    &w->key, 0)->bdev;
-               io->bio.bi_rw           = READ;
                io->bio.bi_end_io       = read_dirty_endio;
 
                if (bio_alloc_pages(&io->bio, GFP_KERNEL))
index d8129ec93ebdebea943aac2b43147a786f445055..6fff794e0c723c0d8e11d1f1899ba109c0bcef29 100644 (file)
@@ -162,7 +162,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
 
                if (sync_page_io(rdev, target,
                                 roundup(size, bdev_logical_block_size(rdev->bdev)),
-                                page, READ, true)) {
+                                page, REQ_OP_READ, 0, true)) {
                        page->index = index;
                        return 0;
                }
@@ -297,7 +297,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
                        atomic_inc(&bitmap->pending_writes);
                        set_buffer_locked(bh);
                        set_buffer_mapped(bh);
-                       submit_bh(WRITE | REQ_SYNC, bh);
+                       submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
                        bh = bh->b_this_page;
                }
 
@@ -392,7 +392,7 @@ static int read_page(struct file *file, unsigned long index,
                        atomic_inc(&bitmap->pending_writes);
                        set_buffer_locked(bh);
                        set_buffer_mapped(bh);
-                       submit_bh(READ, bh);
+                       submit_bh(REQ_OP_READ, 0, bh);
                }
                block++;
                bh = bh->b_this_page;
index cd77216beff166651c67b0881b747dcdd720c9e7..6571c81465e1867772d042316a42042afb2a702c 100644 (file)
@@ -574,7 +574,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
 {
        int r;
        struct dm_io_request io_req = {
-               .bi_rw = rw,
+               .bi_op = rw,
+               .bi_op_flags = 0,
                .notify.fn = dmio_complete,
                .notify.context = b,
                .client = b->c->dm_io,
@@ -634,6 +635,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
         * the dm_buffer's inline bio is local to bufio.
         */
        b->bio.bi_private = end_io;
+       bio_set_op_attrs(&b->bio, rw, 0);
 
        /*
         * We assume that if len >= PAGE_SIZE ptr is page-aligned.
@@ -660,7 +662,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
                ptr += PAGE_SIZE;
        } while (len > 0);
 
-       submit_bio(rw, &b->bio);
+       submit_bio(&b->bio);
 }
 
 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
@@ -1326,7 +1328,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
 int dm_bufio_issue_flush(struct dm_bufio_client *c)
 {
        struct dm_io_request io_req = {
-               .bi_rw = WRITE_FLUSH,
+               .bi_op = REQ_OP_WRITE,
+               .bi_op_flags = WRITE_FLUSH,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = c->dm_io,
index ee0510f9a85e9f36eb10d309bd2962ae3cfa0c2b..718744db62df7f37ddbdaaebecd87bd9b7707002 100644 (file)
@@ -788,7 +788,8 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
 
        spin_lock_irqsave(&cache->lock, flags);
        if (cache->need_tick_bio &&
-           !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
+           !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) &&
+           bio_op(bio) != REQ_OP_DISCARD) {
                pb->tick = true;
                cache->need_tick_bio = false;
        }
@@ -829,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
 
 static int bio_triggers_commit(struct cache *cache, struct bio *bio)
 {
-       return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+       return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
 }
 
 /*
@@ -851,7 +852,7 @@ static void inc_ds(struct cache *cache, struct bio *bio,
 static bool accountable_bio(struct cache *cache, struct bio *bio)
 {
        return ((bio->bi_bdev == cache->origin_dev->bdev) &&
-               !(bio->bi_rw & REQ_DISCARD));
+               bio_op(bio) != REQ_OP_DISCARD);
 }
 
 static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -1067,7 +1068,8 @@ static void dec_io_migrations(struct cache *cache)
 
 static bool discard_or_flush(struct bio *bio)
 {
-       return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
+       return bio_op(bio) == REQ_OP_DISCARD ||
+              bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
 }
 
 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1612,8 +1614,8 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
                remap_to_cache(cache, bio, 0);
 
        /*
-        * REQ_FLUSH is not directed at any particular block so we don't
-        * need to inc_ds().  REQ_FUA's are split into a write + REQ_FLUSH
+        * REQ_PREFLUSH is not directed at any particular block so we don't
+        * need to inc_ds().  REQ_FUA's are split into a write + REQ_PREFLUSH
         * by dm-core.
         */
        issue(cache, bio);
@@ -1978,9 +1980,9 @@ static void process_deferred_bios(struct cache *cache)
 
                bio = bio_list_pop(&bios);
 
-               if (bio->bi_rw & REQ_FLUSH)
+               if (bio->bi_rw & REQ_PREFLUSH)
                        process_flush_bio(cache, bio);
-               else if (bio->bi_rw & REQ_DISCARD)
+               else if (bio_op(bio) == REQ_OP_DISCARD)
                        process_discard_bio(cache, &structs, bio);
                else
                        process_bio(cache, &structs, bio);
index 4f3cb355494469e4abfc259a0042cafd187e36cc..96dd5d7e454ac01a5c062f1f30a4d5294be2822c 100644 (file)
@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
        clone->bi_private = io;
        clone->bi_end_io  = crypt_endio;
        clone->bi_bdev    = cc->dev->bdev;
-       clone->bi_rw      = io->base_bio->bi_rw;
+       bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw);
 }
 
 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1911,11 +1911,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
        struct crypt_config *cc = ti->private;
 
        /*
-        * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
-        * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
-        * - for REQ_DISCARD caller must use flush if IO ordering matters
+        * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
+        * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
+        * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
         */
-       if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
+       if (unlikely(bio->bi_rw & REQ_PREFLUSH ||
+           bio_op(bio) == REQ_OP_DISCARD)) {
                bio->bi_bdev = cc->dev->bdev;
                if (bio_sectors(bio))
                        bio->bi_iter.bi_sector = cc->start +
index 665bf32856182e73bb390e3446b8280982bae020..2faf49d8f4d768467dbf2c376bce6d784ea3fed8 100644 (file)
@@ -1540,9 +1540,9 @@ static int era_map(struct dm_target *ti, struct bio *bio)
        remap_to_origin(era, bio);
 
        /*
-        * REQ_FLUSH bios carry no data, so we're not interested in them.
+        * REQ_PREFLUSH bios carry no data, so we're not interested in them.
         */
-       if (!(bio->bi_rw & REQ_FLUSH) &&
+       if (!(bio->bi_rw & REQ_PREFLUSH) &&
            (bio_data_dir(bio) == WRITE) &&
            !metadata_current_marked(era->md, block)) {
                defer_bio(era, bio);
index b7341de87015e774c30e93324c5e05560b7f6c3f..29b99fb6a16a4c29670d947eb32ec40f1f270f98 100644 (file)
@@ -266,7 +266,7 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
                data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
 
                DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
-                       "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
+                       "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n",
                        bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
                        (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
                        (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
index 06d426eb5a306b79c1ef9bbac00c9a36578226fb..0e225fd4a8d1142bc6eee5cb159afe1c771b3fc2 100644 (file)
@@ -278,8 +278,9 @@ static void km_dp_init(struct dpages *dp, void *data)
 /*-----------------------------------------------------------------
  * IO routines that accept a list of pages.
  *---------------------------------------------------------------*/
-static void do_region(int rw, unsigned region, struct dm_io_region *where,
-                     struct dpages *dp, struct io *io)
+static void do_region(int op, int op_flags, unsigned region,
+                     struct dm_io_region *where, struct dpages *dp,
+                     struct io *io)
 {
        struct bio *bio;
        struct page *page;
@@ -295,24 +296,25 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
        /*
         * Reject unsupported discard and write same requests.
         */
-       if (rw & REQ_DISCARD)
+       if (op == REQ_OP_DISCARD)
                special_cmd_max_sectors = q->limits.max_discard_sectors;
-       else if (rw & REQ_WRITE_SAME)
+       else if (op == REQ_OP_WRITE_SAME)
                special_cmd_max_sectors = q->limits.max_write_same_sectors;
-       if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
+       if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
+           special_cmd_max_sectors == 0) {
                dec_count(io, region, -EOPNOTSUPP);
                return;
        }
 
        /*
-        * where->count may be zero if rw holds a flush and we need to
+        * where->count may be zero if op holds a flush and we need to
         * send a zero-sized flush.
         */
        do {
                /*
                 * Allocate a suitably sized-bio.
                 */
-               if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
+               if ((op == REQ_OP_DISCARD) || (op == REQ_OP_WRITE_SAME))
                        num_bvecs = 1;
                else
                        num_bvecs = min_t(int, BIO_MAX_PAGES,
@@ -322,13 +324,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
                bio->bi_bdev = where->bdev;
                bio->bi_end_io = endio;
+               bio_set_op_attrs(bio, op, op_flags);
                store_io_and_region_in_bio(bio, io, region);
 
-               if (rw & REQ_DISCARD) {
+               if (op == REQ_OP_DISCARD) {
                        num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
                        bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
                        remaining -= num_sectors;
-               } else if (rw & REQ_WRITE_SAME) {
+               } else if (op == REQ_OP_WRITE_SAME) {
                        /*
                         * WRITE SAME only uses a single page.
                         */
@@ -355,11 +358,11 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                }
 
                atomic_inc(&io->count);
-               submit_bio(rw, bio);
+               submit_bio(bio);
        } while (remaining);
 }
 
-static void dispatch_io(int rw, unsigned int num_regions,
+static void dispatch_io(int op, int op_flags, unsigned int num_regions,
                        struct dm_io_region *where, struct dpages *dp,
                        struct io *io, int sync)
 {
@@ -369,7 +372,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
        BUG_ON(num_regions > DM_IO_MAX_REGIONS);
 
        if (sync)
-               rw |= REQ_SYNC;
+               op_flags |= REQ_SYNC;
 
        /*
         * For multiple regions we need to be careful to rewind
@@ -377,8 +380,8 @@ static void dispatch_io(int rw, unsigned int num_regions,
         */
        for (i = 0; i < num_regions; i++) {
                *dp = old_pages;
-               if (where[i].count || (rw & REQ_FLUSH))
-                       do_region(rw, i, where + i, dp, io);
+               if (where[i].count || (op_flags & REQ_PREFLUSH))
+                       do_region(op, op_flags, i, where + i, dp, io);
        }
 
        /*
@@ -402,13 +405,13 @@ static void sync_io_complete(unsigned long error, void *context)
 }
 
 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
-                  struct dm_io_region *where, int rw, struct dpages *dp,
-                  unsigned long *error_bits)
+                  struct dm_io_region *where, int op, int op_flags,
+                  struct dpages *dp, unsigned long *error_bits)
 {
        struct io *io;
        struct sync_io sio;
 
-       if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
+       if (num_regions > 1 && !op_is_write(op)) {
                WARN_ON(1);
                return -EIO;
        }
@@ -425,7 +428,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
        io->vma_invalidate_address = dp->vma_invalidate_address;
        io->vma_invalidate_size = dp->vma_invalidate_size;
 
-       dispatch_io(rw, num_regions, where, dp, io, 1);
+       dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
 
        wait_for_completion_io(&sio.wait);
 
@@ -436,12 +439,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
 }
 
 static int async_io(struct dm_io_client *client, unsigned int num_regions,
-                   struct dm_io_region *where, int rw, struct dpages *dp,
-                   io_notify_fn fn, void *context)
+                   struct dm_io_region *where, int op, int op_flags,
+                   struct dpages *dp, io_notify_fn fn, void *context)
 {
        struct io *io;
 
-       if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
+       if (num_regions > 1 && !op_is_write(op)) {
                WARN_ON(1);
                fn(1, context);
                return -EIO;
@@ -457,7 +460,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
        io->vma_invalidate_address = dp->vma_invalidate_address;
        io->vma_invalidate_size = dp->vma_invalidate_size;
 
-       dispatch_io(rw, num_regions, where, dp, io, 0);
+       dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
        return 0;
 }
 
@@ -480,7 +483,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
 
        case DM_IO_VMA:
                flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
-               if ((io_req->bi_rw & RW_MASK) == READ) {
+               if (io_req->bi_op == REQ_OP_READ) {
                        dp->vma_invalidate_address = io_req->mem.ptr.vma;
                        dp->vma_invalidate_size = size;
                }
@@ -518,10 +521,12 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
 
        if (!io_req->notify.fn)
                return sync_io(io_req->client, num_regions, where,
-                              io_req->bi_rw, &dp, sync_error_bits);
+                              io_req->bi_op, io_req->bi_op_flags, &dp,
+                              sync_error_bits);
 
-       return async_io(io_req->client, num_regions, where, io_req->bi_rw,
-                       &dp, io_req->notify.fn, io_req->notify.context);
+       return async_io(io_req->client, num_regions, where, io_req->bi_op,
+                       io_req->bi_op_flags, &dp, io_req->notify.fn,
+                       io_req->notify.context);
 }
 EXPORT_SYMBOL(dm_io);
 
index 1452ed9aacb4222e4ee86c28480ddf373ac5e3c7..9da1d54ac6cb0fb9e94d1834fd873d8564af1ecd 100644 (file)
@@ -465,7 +465,7 @@ static void complete_io(unsigned long error, void *context)
        io_job_finish(kc->throttle);
 
        if (error) {
-               if (job->rw & WRITE)
+               if (op_is_write(job->rw))
                        job->write_err |= error;
                else
                        job->read_err = 1;
@@ -477,7 +477,7 @@ static void complete_io(unsigned long error, void *context)
                }
        }
 
-       if (job->rw & WRITE)
+       if (op_is_write(job->rw))
                push(&kc->complete_jobs, job);
 
        else {
@@ -496,7 +496,8 @@ static int run_io_job(struct kcopyd_job *job)
 {
        int r;
        struct dm_io_request io_req = {
-               .bi_rw = job->rw,
+               .bi_op = job->rw,
+               .bi_op_flags = 0,
                .mem.type = DM_IO_PAGE_LIST,
                .mem.ptr.pl = job->pages,
                .mem.offset = 0,
@@ -550,7 +551,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
 
                if (r < 0) {
                        /* error this rogue job */
-                       if (job->rw & WRITE)
+                       if (op_is_write(job->rw))
                                job->write_err = (unsigned long) -1L;
                        else
                                job->read_err = 1;
@@ -734,7 +735,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
                /*
                 * Use WRITE SAME to optimize zeroing if all dests support it.
                 */
-               job->rw = WRITE | REQ_WRITE_SAME;
+               job->rw = REQ_OP_WRITE_SAME;
                for (i = 0; i < job->num_dests; i++)
                        if (!bdev_write_same(job->dests[i].bdev)) {
                                job->rw = WRITE;
index 608302e222af0c3016c5d6d59ceb330294d1644b..b5dbf7a0515e0c7cb18ab149ba887dae93bfac39 100644 (file)
@@ -205,6 +205,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
        bio->bi_bdev = lc->logdev->bdev;
        bio->bi_end_io = log_end_io;
        bio->bi_private = lc;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
        page = alloc_page(GFP_KERNEL);
        if (!page) {
@@ -226,7 +227,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
                DMERR("Couldn't add page to the log block");
                goto error_bio;
        }
-       submit_bio(WRITE, bio);
+       submit_bio(bio);
        return 0;
 error_bio:
        bio_put(bio);
@@ -269,6 +270,7 @@ static int log_one_block(struct log_writes_c *lc,
        bio->bi_bdev = lc->logdev->bdev;
        bio->bi_end_io = log_end_io;
        bio->bi_private = lc;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
        for (i = 0; i < block->vec_cnt; i++) {
                /*
@@ -279,7 +281,7 @@ static int log_one_block(struct log_writes_c *lc,
                                   block->vecs[i].bv_len, 0);
                if (ret != block->vecs[i].bv_len) {
                        atomic_inc(&lc->io_blocks);
-                       submit_bio(WRITE, bio);
+                       submit_bio(bio);
                        bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
                        if (!bio) {
                                DMERR("Couldn't alloc log bio");
@@ -290,6 +292,7 @@ static int log_one_block(struct log_writes_c *lc,
                        bio->bi_bdev = lc->logdev->bdev;
                        bio->bi_end_io = log_end_io;
                        bio->bi_private = lc;
+                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
                        ret = bio_add_page(bio, block->vecs[i].bv_page,
                                           block->vecs[i].bv_len, 0);
@@ -301,7 +304,7 @@ static int log_one_block(struct log_writes_c *lc,
                }
                sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
        }
-       submit_bio(WRITE, bio);
+       submit_bio(bio);
 out:
        kfree(block->data);
        kfree(block);
@@ -552,9 +555,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
        struct bio_vec bv;
        size_t alloc_size;
        int i = 0;
-       bool flush_bio = (bio->bi_rw & REQ_FLUSH);
+       bool flush_bio = (bio->bi_rw & REQ_PREFLUSH);
        bool fua_bio = (bio->bi_rw & REQ_FUA);
-       bool discard_bio = (bio->bi_rw & REQ_DISCARD);
+       bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
 
        pb->block = NULL;
 
index 627d19186d5a1719f1780aaa21f7406edf6e99fd..4ca2d1df5b44c0811555d35651d59d62513a9584 100644 (file)
@@ -293,7 +293,7 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
 
 static int rw_header(struct log_c *lc, int rw)
 {
-       lc->io_req.bi_rw = rw;
+       lc->io_req.bi_op = rw;
 
        return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
 }
@@ -306,7 +306,8 @@ static int flush_header(struct log_c *lc)
                .count = 0,
        };
 
-       lc->io_req.bi_rw = WRITE_FLUSH;
+       lc->io_req.bi_op = REQ_OP_WRITE;
+       lc->io_req.bi_op_flags = WRITE_FLUSH;
 
        return dm_io(&lc->io_req, 1, &null_location, NULL);
 }
index 52532745a50f85d6eb765d8d6fcaa838301334b6..8cbac62b1602986ac19d652db93d50b9d2cfd8ea 100644 (file)
@@ -792,7 +792,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
        if (rdev->sb_loaded)
                return 0;
 
-       if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
+       if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) {
                DMERR("Failed to read superblock of device at position %d",
                      rdev->raid_disk);
                md_error(rdev->mddev, rdev);
@@ -1651,7 +1651,8 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
        for (i = 0; i < rs->md.raid_disks; i++) {
                r = &rs->dev[i].rdev;
                if (test_bit(Faulty, &r->flags) && r->sb_page &&
-                   sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
+                   sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0,
+                                1)) {
                        DMINFO("Faulty %s device #%d has readable super block."
                               "  Attempting to revive it.",
                               rs->raid_type->name, i);
index b3ccf1e0d4f218d8cb55f5d895e1bf46ac26ac8b..9f5f460c0e920af5bb49338e2c47a70c525c9262 100644 (file)
@@ -260,7 +260,8 @@ static int mirror_flush(struct dm_target *ti)
        struct dm_io_region io[ms->nr_mirrors];
        struct mirror *m;
        struct dm_io_request io_req = {
-               .bi_rw = WRITE_FLUSH,
+               .bi_op = REQ_OP_WRITE,
+               .bi_op_flags = WRITE_FLUSH,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = ms->io_client,
@@ -541,7 +542,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
 {
        struct dm_io_region io;
        struct dm_io_request io_req = {
-               .bi_rw = READ,
+               .bi_op = REQ_OP_READ,
+               .bi_op_flags = 0,
                .mem.type = DM_IO_BIO,
                .mem.ptr.bio = bio,
                .notify.fn = read_callback,
@@ -624,7 +626,7 @@ static void write_callback(unsigned long error, void *context)
         * If the bio is discard, return an error, but do not
         * degrade the array.
         */
-       if (bio->bi_rw & REQ_DISCARD) {
+       if (bio_op(bio) == REQ_OP_DISCARD) {
                bio->bi_error = -EOPNOTSUPP;
                bio_endio(bio);
                return;
@@ -654,7 +656,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
        struct dm_io_region io[ms->nr_mirrors], *dest = io;
        struct mirror *m;
        struct dm_io_request io_req = {
-               .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
+               .bi_op = REQ_OP_WRITE,
+               .bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA,
                .mem.type = DM_IO_BIO,
                .mem.ptr.bio = bio,
                .notify.fn = write_callback,
@@ -662,8 +665,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
                .client = ms->io_client,
        };
 
-       if (bio->bi_rw & REQ_DISCARD) {
-               io_req.bi_rw |= REQ_DISCARD;
+       if (bio_op(bio) == REQ_OP_DISCARD) {
+               io_req.bi_op = REQ_OP_DISCARD;
                io_req.mem.type = DM_IO_KMEM;
                io_req.mem.ptr.addr = NULL;
        }
@@ -701,8 +704,8 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
        bio_list_init(&requeue);
 
        while ((bio = bio_list_pop(writes))) {
-               if ((bio->bi_rw & REQ_FLUSH) ||
-                   (bio->bi_rw & REQ_DISCARD)) {
+               if ((bio->bi_rw & REQ_PREFLUSH) ||
+                   (bio_op(bio) == REQ_OP_DISCARD)) {
                        bio_list_add(&sync, bio);
                        continue;
                }
@@ -1250,7 +1253,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
         * We need to dec pending if this was a write.
         */
        if (rw == WRITE) {
-               if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
+               if (!(bio->bi_rw & REQ_PREFLUSH) &&
+                   bio_op(bio) != REQ_OP_DISCARD)
                        dm_rh_dec(ms->rh, bio_record->write_region);
                return error;
        }
index 74cb7b991d41d80384e3aaf34baf030579cc93d6..b11813431f31eb170955ee1a0b14329f403b15f1 100644 (file)
@@ -398,12 +398,12 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
        region_t region = dm_rh_bio_to_region(rh, bio);
        int recovering = 0;
 
-       if (bio->bi_rw & REQ_FLUSH) {
+       if (bio->bi_rw & REQ_PREFLUSH) {
                rh->flush_failure = 1;
                return;
        }
 
-       if (bio->bi_rw & REQ_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD)
                return;
 
        /* We must inform the log that the sync count has changed. */
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
        struct bio *bio;
 
        for (bio = bios->head; bio; bio = bio->bi_next) {
-               if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
+               if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
                        continue;
                rh_inc(rh, dm_rh_bio_to_region(rh, bio));
        }
index 4d3909393f2cce5488ced8843ccab1375d40d2d9..b8cf956b577b4a2f235f4910b15ab2ef1942dd9b 100644 (file)
@@ -226,8 +226,8 @@ static void do_metadata(struct work_struct *work)
 /*
  * Read or write a chunk aligned and sized block of data from a device.
  */
-static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
-                   int metadata)
+static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
+                   int op_flags, int metadata)
 {
        struct dm_io_region where = {
                .bdev = dm_snap_cow(ps->store->snap)->bdev,
@@ -235,7 +235,8 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
                .count = ps->store->chunk_size,
        };
        struct dm_io_request io_req = {
-               .bi_rw = rw,
+               .bi_op = op,
+               .bi_op_flags = op_flags,
                .mem.type = DM_IO_VMA,
                .mem.ptr.vma = area,
                .client = ps->io_client,
@@ -281,14 +282,14 @@ static void skip_metadata(struct pstore *ps)
  * Read or write a metadata area.  Remembering to skip the first
  * chunk which holds the header.
  */
-static int area_io(struct pstore *ps, int rw)
+static int area_io(struct pstore *ps, int op, int op_flags)
 {
        int r;
        chunk_t chunk;
 
        chunk = area_location(ps, ps->current_area);
 
-       r = chunk_io(ps, ps->area, chunk, rw, 0);
+       r = chunk_io(ps, ps->area, chunk, op, op_flags, 0);
        if (r)
                return r;
 
@@ -302,7 +303,8 @@ static void zero_memory_area(struct pstore *ps)
 
 static int zero_disk_area(struct pstore *ps, chunk_t area)
 {
-       return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
+       return chunk_io(ps, ps->zero_area, area_location(ps, area),
+                       REQ_OP_WRITE, 0, 0);
 }
 
 static int read_header(struct pstore *ps, int *new_snapshot)
@@ -334,7 +336,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
        if (r)
                return r;
 
-       r = chunk_io(ps, ps->header_area, 0, READ, 1);
+       r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1);
        if (r)
                goto bad;
 
@@ -395,7 +397,7 @@ static int write_header(struct pstore *ps)
        dh->version = cpu_to_le32(ps->version);
        dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
 
-       return chunk_io(ps, ps->header_area, 0, WRITE, 1);
+       return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1);
 }
 
 /*
@@ -739,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        /*
         * Commit exceptions to disk.
         */
-       if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
+       if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA))
                ps->valid = 0;
 
        /*
@@ -779,7 +781,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
                        return 0;
 
                ps->current_area--;
-               r = area_io(ps, READ);
+               r = area_io(ps, REQ_OP_READ, 0);
                if (r < 0)
                        return r;
                ps->current_committed = ps->exceptions_per_area;
@@ -816,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
        for (i = 0; i < nr_merged; i++)
                clear_exception(ps, ps->current_committed - 1 - i);
 
-       r = area_io(ps, WRITE_FLUSH_FUA);
+       r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA);
        if (r < 0)
                return r;
 
index 70bb0e8b62ce38c36975949923a6be13ef789fe5..69ab1ff5f5c996ce542d0025943231ed54e6fe59 100644 (file)
@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
        init_tracked_chunk(bio);
 
-       if (bio->bi_rw & REQ_FLUSH) {
+       if (bio->bi_rw & REQ_PREFLUSH) {
                bio->bi_bdev = s->cow->bdev;
                return DM_MAPIO_REMAPPED;
        }
@@ -1799,7 +1799,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
 
        init_tracked_chunk(bio);
 
-       if (bio->bi_rw & REQ_FLUSH) {
+       if (bio->bi_rw & REQ_PREFLUSH) {
                if (!dm_bio_get_target_bio_nr(bio))
                        bio->bi_bdev = s->origin->bdev;
                else
@@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
 
        bio->bi_bdev = o->dev->bdev;
 
-       if (unlikely(bio->bi_rw & REQ_FLUSH))
+       if (unlikely(bio->bi_rw & REQ_PREFLUSH))
                return DM_MAPIO_REMAPPED;
 
        if (bio_rw(bio) != WRITE)
index 8289804ccd998a1d4d9be9fce2a32bd4bfa6cad8..4fba26cd6bdb2bb44b94d60c72b6863e21e27086 100644 (file)
@@ -514,11 +514,10 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
 }
 
 static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
-                             unsigned long bi_rw, sector_t len,
+                             int idx, sector_t len,
                              struct dm_stats_aux *stats_aux, bool end,
                              unsigned long duration_jiffies)
 {
-       unsigned long idx = bi_rw & REQ_WRITE;
        struct dm_stat_shared *shared = &s->stat_shared[entry];
        struct dm_stat_percpu *p;
 
@@ -584,7 +583,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
 #endif
 }
 
-static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
+static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
                          sector_t bi_sector, sector_t end_sector,
                          bool end, unsigned long duration_jiffies,
                          struct dm_stats_aux *stats_aux)
@@ -645,8 +644,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
                last = raw_cpu_ptr(stats->last);
                stats_aux->merged =
                        (bi_sector == (ACCESS_ONCE(last->last_sector) &&
-                                      ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
-                                       (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
+                                      ((bi_rw == WRITE) ==
+                                       (ACCESS_ONCE(last->last_rw) == WRITE))
                                       ));
                ACCESS_ONCE(last->last_sector) = end_sector;
                ACCESS_ONCE(last->last_rw) = bi_rw;
index 797ddb900b062079cae7aca92b0b7bad2ead619f..48f1c01d7b9fbeba59984ddcc7b948c58b62a31a 100644 (file)
@@ -286,14 +286,14 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
        uint32_t stripe;
        unsigned target_bio_nr;
 
-       if (bio->bi_rw & REQ_FLUSH) {
+       if (bio->bi_rw & REQ_PREFLUSH) {
                target_bio_nr = dm_bio_get_target_bio_nr(bio);
                BUG_ON(target_bio_nr >= sc->stripes);
                bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
                return DM_MAPIO_REMAPPED;
        }
-       if (unlikely(bio->bi_rw & REQ_DISCARD) ||
-           unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
+       if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
+           unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
                target_bio_nr = dm_bio_get_target_bio_nr(bio);
                BUG_ON(target_bio_nr >= sc->stripes);
                return stripe_map_range(sc, bio, target_bio_nr);
index fc803d50f9f026e45932f8b131764ad479831237..5f9e3d799d66903b52649c6f8047ced0d84c73b3 100644 (file)
@@ -360,7 +360,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
        sector_t len = block_to_sectors(tc->pool, data_e - data_b);
 
        return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
-                                     GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio);
+                                     GFP_NOWAIT, 0, &op->bio);
 }
 
 static void end_discard(struct discard_op *op, int r)
@@ -371,7 +371,8 @@ static void end_discard(struct discard_op *op, int r)
                 * need to wait for the chain to complete.
                 */
                bio_chain(op->bio, op->parent_bio);
-               submit_bio(REQ_WRITE | REQ_DISCARD, op->bio);
+               bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
+               submit_bio(op->bio);
        }
 
        blk_finish_plug(&op->plug);
@@ -696,7 +697,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
 
 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
 {
-       return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+       return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
                dm_thin_changed_this_transaction(tc->td);
 }
 
@@ -704,7 +705,7 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio)
 {
        struct dm_thin_endio_hook *h;
 
-       if (bio->bi_rw & REQ_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD)
                return;
 
        h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -867,7 +868,8 @@ static void __inc_remap_and_issue_cell(void *context,
        struct bio *bio;
 
        while ((bio = bio_list_pop(&cell->bios))) {
-               if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
+               if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+                   bio_op(bio) == REQ_OP_DISCARD)
                        bio_list_add(&info->defer_bios, bio);
                else {
                        inc_all_io_entry(info->tc->pool, bio);
@@ -1639,7 +1641,8 @@ static void __remap_and_issue_shared_cell(void *context,
 
        while ((bio = bio_list_pop(&cell->bios))) {
                if ((bio_data_dir(bio) == WRITE) ||
-                   (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
+                   (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+                    bio_op(bio) == REQ_OP_DISCARD))
                        bio_list_add(&info->defer_bios, bio);
                else {
                        struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
@@ -2028,7 +2031,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
                        break;
                }
 
-               if (bio->bi_rw & REQ_DISCARD)
+               if (bio_op(bio) == REQ_OP_DISCARD)
                        pool->process_discard(tc, bio);
                else
                        pool->process_bio(tc, bio);
@@ -2115,7 +2118,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
                                return;
                        }
 
-                       if (cell->holder->bi_rw & REQ_DISCARD)
+                       if (bio_op(cell->holder) == REQ_OP_DISCARD)
                                pool->process_discard_cell(tc, cell);
                        else
                                pool->process_cell(tc, cell);
@@ -2553,7 +2556,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_SUBMITTED;
        }
 
-       if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
+       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+           bio_op(bio) == REQ_OP_DISCARD) {
                thin_defer_bio_with_throttle(tc, bio);
                return DM_MAPIO_SUBMITTED;
        }
index 1b2f96205361e28d3332fe3b71dbd5947cb4b9c8..aba7ed9abb3ab774dfcac7d710a50290888cf230 100644 (file)
@@ -723,8 +723,9 @@ static void start_io_acct(struct dm_io *io)
                atomic_inc_return(&md->pending[rw]));
 
        if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
-                                   bio_sectors(bio), false, 0, &io->stats_aux);
+               dm_stats_account_io(&md->stats, bio_data_dir(bio),
+                                   bio->bi_iter.bi_sector, bio_sectors(bio),
+                                   false, 0, &io->stats_aux);
 }
 
 static void end_io_acct(struct dm_io *io)
@@ -738,8 +739,9 @@ static void end_io_acct(struct dm_io *io)
        generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
 
        if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
-                                   bio_sectors(bio), true, duration, &io->stats_aux);
+               dm_stats_account_io(&md->stats, bio_data_dir(bio),
+                                   bio->bi_iter.bi_sector, bio_sectors(bio),
+                                   true, duration, &io->stats_aux);
 
        /*
         * After this is decremented the bio must not be touched if it is
@@ -1001,12 +1003,12 @@ static void dec_pending(struct dm_io *io, int error)
                if (io_error == DM_ENDIO_REQUEUE)
                        return;
 
-               if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
+               if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
                        /*
                         * Preflush done for flush with data, reissue
-                        * without REQ_FLUSH.
+                        * without REQ_PREFLUSH.
                         */
-                       bio->bi_rw &= ~REQ_FLUSH;
+                       bio->bi_rw &= ~REQ_PREFLUSH;
                        queue_io(md, bio);
                } else {
                        /* done with normal IO or empty flush */
@@ -1051,7 +1053,7 @@ static void clone_endio(struct bio *bio)
                }
        }
 
-       if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
+       if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) &&
                     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
                disable_write_same(md);
 
@@ -1121,9 +1123,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
        if (unlikely(dm_stats_used(&md->stats))) {
                struct dm_rq_target_io *tio = tio_from_request(orig);
                tio->duration_jiffies = jiffies - tio->duration_jiffies;
-               dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
-                                   tio->n_sectors, true, tio->duration_jiffies,
-                                   &tio->stats_aux);
+               dm_stats_account_io(&md->stats, rq_data_dir(orig),
+                                   blk_rq_pos(orig), tio->n_sectors, true,
+                                   tio->duration_jiffies, &tio->stats_aux);
        }
 }
 
@@ -1320,7 +1322,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
                        r = rq_end_io(tio->ti, clone, error, &tio->info);
        }
 
-       if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
+       if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
                     !clone->q->limits.max_write_same_sectors))
                disable_write_same(tio->md);
 
@@ -1475,7 +1477,7 @@ EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
 
 /*
  * A target may call dm_accept_partial_bio only from the map routine.  It is
- * allowed for all bio types except REQ_FLUSH.
+ * allowed for all bio types except REQ_PREFLUSH.
  *
  * dm_accept_partial_bio informs the dm that the target only wants to process
  * additional n_sectors sectors of the bio and the rest of the data should be
@@ -1505,7 +1507,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
 {
        struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
        unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
-       BUG_ON(bio->bi_rw & REQ_FLUSH);
+       BUG_ON(bio->bi_rw & REQ_PREFLUSH);
        BUG_ON(bi_size > *tio->len_ptr);
        BUG_ON(n_sectors > bi_size);
        *tio->len_ptr -= bi_size - n_sectors;
@@ -1746,9 +1748,9 @@ static int __split_and_process_non_flush(struct clone_info *ci)
        unsigned len;
        int r;
 
-       if (unlikely(bio->bi_rw & REQ_DISCARD))
+       if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
                return __send_discard(ci);
-       else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+       else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
                return __send_write_same(ci);
 
        ti = dm_table_find_target(ci->map, ci->sector);
@@ -1793,7 +1795,7 @@ static void __split_and_process_bio(struct mapped_device *md,
 
        start_io_acct(ci.io);
 
-       if (bio->bi_rw & REQ_FLUSH) {
+       if (bio->bi_rw & REQ_PREFLUSH) {
                ci.bio = &ci.md->flush_bio;
                ci.sector_count = 0;
                error = __send_empty_flush(&ci);
@@ -2082,8 +2084,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
                struct dm_rq_target_io *tio = tio_from_request(orig);
                tio->duration_jiffies = jiffies;
                tio->n_sectors = blk_rq_sectors(orig);
-               dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
-                                   tio->n_sectors, false, 0, &tio->stats_aux);
+               dm_stats_account_io(&md->stats, rq_data_dir(orig),
+                                   blk_rq_pos(orig), tio->n_sectors, false, 0,
+                                   &tio->stats_aux);
        }
 
        /*
@@ -2168,7 +2171,7 @@ static void dm_request_fn(struct request_queue *q)
 
                /* always use block 0 to find the target for flushes for now */
                pos = 0;
-               if (!(rq->cmd_flags & REQ_FLUSH))
+               if (req_op(rq) != REQ_OP_FLUSH)
                        pos = blk_rq_pos(rq);
 
                if ((dm_request_peeked_before_merge_deadline(md) &&
@@ -2412,7 +2415,7 @@ static struct mapped_device *alloc_dev(int minor)
 
        bio_init(&md->flush_bio);
        md->flush_bio.bi_bdev = md->bdev;
-       md->flush_bio.bi_rw = WRITE_FLUSH;
+       bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
 
        dm_stats_init(&md->stats);
 
index b7fe7e9fc77730a4f7a1aca6e932aa1420b90967..70ff888d25d0864e1df9f181430e75ba42e89ccd 100644 (file)
@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
        struct bio *split;
        sector_t start_sector, end_sector, data_offset;
 
-       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+       if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
@@ -252,7 +252,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
                split->bi_iter.bi_sector = split->bi_iter.bi_sector -
                        start_sector + data_offset;
 
-               if (unlikely((split->bi_rw & REQ_DISCARD) &&
+               if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
                         !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
                        /* Just ignore it */
                        bio_endio(split);
index 866825f10b4c933f75eb4cbaff46c624f6267826..1f123f5a29da2cb4859060fab44d69e32c388606 100644 (file)
@@ -394,8 +394,9 @@ static void submit_flushes(struct work_struct *ws)
                        bi->bi_end_io = md_end_flush;
                        bi->bi_private = rdev;
                        bi->bi_bdev = rdev->bdev;
+                       bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
                        atomic_inc(&mddev->flush_pending);
-                       submit_bio(WRITE_FLUSH, bi);
+                       submit_bio(bi);
                        rcu_read_lock();
                        rdev_dec_pending(rdev, mddev);
                }
@@ -413,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
                /* an empty barrier - all done */
                bio_endio(bio);
        else {
-               bio->bi_rw &= ~REQ_FLUSH;
+               bio->bi_rw &= ~REQ_PREFLUSH;
                mddev->pers->make_request(mddev, bio);
        }
 
@@ -742,9 +743,10 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
        bio_add_page(bio, page, size, 0);
        bio->bi_private = rdev;
        bio->bi_end_io = super_written;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
 
        atomic_inc(&mddev->pending_writes);
-       submit_bio(WRITE_FLUSH_FUA, bio);
+       submit_bio(bio);
 }
 
 void md_super_wait(struct mddev *mddev)
@@ -754,13 +756,14 @@ void md_super_wait(struct mddev *mddev)
 }
 
 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
-                struct page *page, int rw, bool metadata_op)
+                struct page *page, int op, int op_flags, bool metadata_op)
 {
        struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
        int ret;
 
        bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
                rdev->meta_bdev : rdev->bdev;
+       bio_set_op_attrs(bio, op, op_flags);
        if (metadata_op)
                bio->bi_iter.bi_sector = sector + rdev->sb_start;
        else if (rdev->mddev->reshape_position != MaxSector &&
@@ -770,7 +773,8 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
        else
                bio->bi_iter.bi_sector = sector + rdev->data_offset;
        bio_add_page(bio, page, size, 0);
-       submit_bio_wait(rw, bio);
+
+       submit_bio_wait(bio);
 
        ret = !bio->bi_error;
        bio_put(bio);
@@ -785,7 +789,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
        if (rdev->sb_loaded)
                return 0;
 
-       if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
+       if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
                goto fail;
        rdev->sb_loaded = 1;
        return 0;
@@ -1471,7 +1475,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
                        return -EINVAL;
                bb_sector = (long long)offset;
                if (!sync_page_io(rdev, bb_sector, sectors << 9,
-                                 rdev->bb_page, READ, true))
+                                 rdev->bb_page, REQ_OP_READ, 0, true))
                        return -EIO;
                bbp = (u64 *)page_address(rdev->bb_page);
                rdev->badblocks.shift = sb->bblog_shift;
index b5c4be73e6e425ead8cca186abdd100e735326e0..b4f335245bd60f851fac610592bd447dcfe434b4 100644 (file)
@@ -424,7 +424,7 @@ struct mddev {
 
        /* Generic flush handling.
         * The last to finish preflush schedules a worker to submit
-        * the rest of the request (without the REQ_FLUSH flag).
+        * the rest of the request (without the REQ_PREFLUSH flag).
         */
        struct bio *flush_bio;
        atomic_t flush_pending;
@@ -618,7 +618,8 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
                           sector_t sector, int size, struct page *page);
 extern void md_super_wait(struct mddev *mddev);
 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
-                       struct page *page, int rw, bool metadata_op);
+                       struct page *page, int op, int op_flags,
+                       bool metadata_op);
 extern void md_do_sync(struct md_thread *thread);
 extern void md_new_event(struct mddev *mddev);
 extern int md_allow_write(struct mddev *mddev);
index dd483bb2e111ee400eee173b4a21846332e53977..72ea98e89e5787fd96b7ebfef2cc68cbf1a02107 100644 (file)
@@ -111,7 +111,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
        struct multipath_bh * mp_bh;
        struct multipath_info *multipath;
 
-       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+       if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
index 34783a3c8b3c1af780a092b8d11018f702541481..c3d439083212357e1b8a1f26f92f45546a121884 100644 (file)
@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
        struct md_rdev *tmp_dev;
        struct bio *split;
 
-       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+       if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
@@ -488,7 +488,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
                split->bi_iter.bi_sector = sector + zone->dev_start +
                        tmp_dev->data_offset;
 
-               if (unlikely((split->bi_rw & REQ_DISCARD) &&
+               if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
                         !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
                        /* Just ignore it */
                        bio_endio(split);
index c7c8cde0ab21128527cd74a1d7df4a0e388dc718..10e53cd6a995aff9d142301b1ace556b2186be36 100644 (file)
@@ -759,7 +759,7 @@ static void flush_pending_writes(struct r1conf *conf)
                while (bio) { /* submit pending writes */
                        struct bio *next = bio->bi_next;
                        bio->bi_next = NULL;
-                       if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+                       if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
                            !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
                                /* Just ignore it */
                                bio_endio(bio);
@@ -1033,7 +1033,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
        while (bio) { /* submit pending writes */
                struct bio *next = bio->bi_next;
                bio->bi_next = NULL;
-               if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+               if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
                        /* Just ignore it */
                        bio_endio(bio);
@@ -1053,12 +1053,12 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
        int i, disks;
        struct bitmap *bitmap;
        unsigned long flags;
+       const int op = bio_op(bio);
        const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
-       const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
-       const unsigned long do_discard = (bio->bi_rw
-                                         & (REQ_DISCARD | REQ_SECURE));
-       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
+       const unsigned long do_flush_fua = (bio->bi_rw &
+                                               (REQ_PREFLUSH | REQ_FUA));
+       const unsigned long do_sec = (bio->bi_rw & REQ_SECURE);
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
        struct raid1_plug_cb *plug = NULL;
@@ -1166,7 +1166,7 @@ read_again:
                        mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid1_end_read_request;
-               read_bio->bi_rw = READ | do_sync;
+               bio_set_op_attrs(read_bio, op, do_sync);
                read_bio->bi_private = r1_bio;
 
                if (max_sectors < r1_bio->sectors) {
@@ -1376,8 +1376,7 @@ read_again:
                                   conf->mirrors[i].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
-               mbio->bi_rw =
-                       WRITE | do_flush_fua | do_sync | do_discard | do_same;
+               bio_set_op_attrs(mbio, op, do_flush_fua | do_sync | do_sec);
                mbio->bi_private = r1_bio;
 
                atomic_inc(&r1_bio->remaining);
@@ -1771,7 +1770,7 @@ static void end_sync_write(struct bio *bio)
 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
                            int sectors, struct page *page, int rw)
 {
-       if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+       if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
                /* success */
                return 1;
        if (rw == WRITE) {
@@ -1825,7 +1824,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
                                rdev = conf->mirrors[d].rdev;
                                if (sync_page_io(rdev, sect, s<<9,
                                                 bio->bi_io_vec[idx].bv_page,
-                                                READ, false)) {
+                                                REQ_OP_READ, 0, false)) {
                                        success = 1;
                                        break;
                                }
@@ -2030,7 +2029,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
                      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
                        continue;
 
-               wbio->bi_rw = WRITE;
+               bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
                wbio->bi_end_io = end_sync_write;
                atomic_inc(&r1_bio->remaining);
                md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@@ -2090,7 +2089,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                            is_badblock(rdev, sect, s,
                                        &first_bad, &bad_sectors) == 0 &&
                            sync_page_io(rdev, sect, s<<9,
-                                        conf->tmppage, READ, false))
+                                        conf->tmppage, REQ_OP_READ, 0, false))
                                success = 1;
                        else {
                                d++;
@@ -2201,14 +2200,15 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
                        wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
                }
 
-               wbio->bi_rw = WRITE;
+               bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
                wbio->bi_iter.bi_sector = r1_bio->sector;
                wbio->bi_iter.bi_size = r1_bio->sectors << 9;
 
                bio_trim(wbio, sector - r1_bio->sector, sectors);
                wbio->bi_iter.bi_sector += rdev->data_offset;
                wbio->bi_bdev = rdev->bdev;
-               if (submit_bio_wait(WRITE, wbio) < 0)
+
+               if (submit_bio_wait(wbio) < 0)
                        /* failure! */
                        ok = rdev_set_badblocks(rdev, sector,
                                                sectors, 0)
@@ -2343,7 +2343,7 @@ read_more:
                bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
                bio->bi_bdev = rdev->bdev;
                bio->bi_end_io = raid1_end_read_request;
-               bio->bi_rw = READ | do_sync;
+               bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
                bio->bi_private = r1_bio;
                if (max_sectors < r1_bio->sectors) {
                        /* Drat - have to split this up more */
@@ -2571,7 +2571,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                        if (i < conf->raid_disks)
                                still_degraded = 1;
                } else if (!test_bit(In_sync, &rdev->flags)) {
-                       bio->bi_rw = WRITE;
+                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                        bio->bi_end_io = end_sync_write;
                        write_targets ++;
                } else {
@@ -2598,7 +2598,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                                        if (disk < 0)
                                                disk = i;
                                }
-                               bio->bi_rw = READ;
+                               bio_set_op_attrs(bio, REQ_OP_READ, 0);
                                bio->bi_end_io = end_sync_read;
                                read_targets++;
                        } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
@@ -2610,7 +2610,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                                 * if we are doing resync or repair. Otherwise, leave
                                 * this device alone for this sync request.
                                 */
-                               bio->bi_rw = WRITE;
+                               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                                bio->bi_end_io = end_sync_write;
                                write_targets++;
                        }
index c7de2a53e6259499dc18f64d7783d3cacad5c029..245640b501539c439dca218d45b68ad15601c682 100644 (file)
@@ -865,7 +865,7 @@ static void flush_pending_writes(struct r10conf *conf)
                while (bio) { /* submit pending writes */
                        struct bio *next = bio->bi_next;
                        bio->bi_next = NULL;
-                       if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+                       if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
                            !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
                                /* Just ignore it */
                                bio_endio(bio);
@@ -1041,7 +1041,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
        while (bio) { /* submit pending writes */
                struct bio *next = bio->bi_next;
                bio->bi_next = NULL;
-               if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+               if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
                        /* Just ignore it */
                        bio_endio(bio);
@@ -1058,12 +1058,11 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        struct r10bio *r10_bio;
        struct bio *read_bio;
        int i;
+       const int op = bio_op(bio);
        const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
-       const unsigned long do_discard = (bio->bi_rw
-                                         & (REQ_DISCARD | REQ_SECURE));
-       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
+       const unsigned long do_sec = (bio->bi_rw & REQ_SECURE);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
@@ -1156,7 +1155,7 @@ read_again:
                        choose_data_offset(r10_bio, rdev);
                read_bio->bi_bdev = rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
-               read_bio->bi_rw = READ | do_sync;
+               bio_set_op_attrs(read_bio, op, do_sync);
                read_bio->bi_private = r10_bio;
 
                if (max_sectors < r10_bio->sectors) {
@@ -1363,8 +1362,7 @@ retry_write:
                                                              rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw =
-                               WRITE | do_sync | do_fua | do_discard | do_same;
+                       bio_set_op_attrs(mbio, op, do_sync | do_fua | do_sec);
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
@@ -1406,8 +1404,7 @@ retry_write:
                                                   r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw =
-                               WRITE | do_sync | do_fua | do_discard | do_same;
+                       bio_set_op_attrs(mbio, op, do_sync | do_fua | do_sec);
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
@@ -1450,7 +1447,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
 
        struct bio *split;
 
-       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+       if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
@@ -1992,10 +1989,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 
                tbio->bi_vcnt = vcnt;
                tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
-               tbio->bi_rw = WRITE;
                tbio->bi_private = r10_bio;
                tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
                tbio->bi_end_io = end_sync_write;
+               bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
 
                bio_copy_data(tbio, fbio);
 
@@ -2078,7 +2075,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
                                  addr,
                                  s << 9,
                                  bio->bi_io_vec[idx].bv_page,
-                                 READ, false);
+                                 REQ_OP_READ, 0, false);
                if (ok) {
                        rdev = conf->mirrors[dw].rdev;
                        addr = r10_bio->devs[1].addr + sect;
@@ -2086,7 +2083,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
                                          addr,
                                          s << 9,
                                          bio->bi_io_vec[idx].bv_page,
-                                         WRITE, false);
+                                         REQ_OP_WRITE, 0, false);
                        if (!ok) {
                                set_bit(WriteErrorSeen, &rdev->flags);
                                if (!test_and_set_bit(WantReplacement,
@@ -2213,7 +2210,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
        if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
            && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
                return -1;
-       if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+       if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
                /* success */
                return 1;
        if (rw == WRITE) {
@@ -2299,7 +2296,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                                                       r10_bio->devs[sl].addr +
                                                       sect,
                                                       s<<9,
-                                                      conf->tmppage, READ, false);
+                                                      conf->tmppage,
+                                                      REQ_OP_READ, 0, false);
                                rdev_dec_pending(rdev, mddev);
                                rcu_read_lock();
                                if (success)
@@ -2474,7 +2472,9 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
                                   choose_data_offset(r10_bio, rdev) +
                                   (sector - r10_bio->sector));
                wbio->bi_bdev = rdev->bdev;
-               if (submit_bio_wait(WRITE, wbio) < 0)
+               bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+
+               if (submit_bio_wait(wbio) < 0)
                        /* Failure! */
                        ok = rdev_set_badblocks(rdev, sector,
                                                sectors, 0)
@@ -2548,7 +2548,7 @@ read_more:
        bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
                + choose_data_offset(r10_bio, rdev);
        bio->bi_bdev = rdev->bdev;
-       bio->bi_rw = READ | do_sync;
+       bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
        bio->bi_private = r10_bio;
        bio->bi_end_io = raid10_end_read_request;
        if (max_sectors < r10_bio->sectors) {
@@ -3038,7 +3038,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                                biolist = bio;
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = end_sync_read;
-                               bio->bi_rw = READ;
+                               bio_set_op_attrs(bio, REQ_OP_READ, 0);
                                from_addr = r10_bio->devs[j].addr;
                                bio->bi_iter.bi_sector = from_addr +
                                        rdev->data_offset;
@@ -3064,7 +3064,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                                        biolist = bio;
                                        bio->bi_private = r10_bio;
                                        bio->bi_end_io = end_sync_write;
-                                       bio->bi_rw = WRITE;
+                                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                                        bio->bi_iter.bi_sector = to_addr
                                                + rdev->data_offset;
                                        bio->bi_bdev = rdev->bdev;
@@ -3093,7 +3093,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                                biolist = bio;
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = end_sync_write;
-                               bio->bi_rw = WRITE;
+                               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                                bio->bi_iter.bi_sector = to_addr +
                                        rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
@@ -3213,7 +3213,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                        biolist = bio;
                        bio->bi_private = r10_bio;
                        bio->bi_end_io = end_sync_read;
-                       bio->bi_rw = READ;
+                       bio_set_op_attrs(bio, REQ_OP_READ, 0);
                        bio->bi_iter.bi_sector = sector +
                                conf->mirrors[d].rdev->data_offset;
                        bio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -3235,7 +3235,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                        biolist = bio;
                        bio->bi_private = r10_bio;
                        bio->bi_end_io = end_sync_write;
-                       bio->bi_rw = WRITE;
+                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                        bio->bi_iter.bi_sector = sector +
                                conf->mirrors[d].replacement->data_offset;
                        bio->bi_bdev = conf->mirrors[d].replacement->bdev;
@@ -4320,7 +4320,7 @@ read_more:
                               + rdev->data_offset);
        read_bio->bi_private = r10_bio;
        read_bio->bi_end_io = end_sync_read;
-       read_bio->bi_rw = READ;
+       bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
        read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
        read_bio->bi_error = 0;
        read_bio->bi_vcnt = 0;
@@ -4354,7 +4354,7 @@ read_more:
                        rdev2->new_data_offset;
                b->bi_private = r10_bio;
                b->bi_end_io = end_reshape_write;
-               b->bi_rw = WRITE;
+               bio_set_op_attrs(b, REQ_OP_WRITE, 0);
                b->bi_next = blist;
                blist = b;
        }
@@ -4522,7 +4522,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
                                               addr,
                                               s << 9,
                                               bvec[idx].bv_page,
-                                              READ, false);
+                                              REQ_OP_READ, 0, false);
                        if (success)
                                break;
                failed:
index e889e2deb7b3525ff226a7d7053df1fe8c54f115..5504ce2bac06302712eedee6992f92b717def2dc 100644 (file)
@@ -254,14 +254,14 @@ static void r5l_submit_current_io(struct r5l_log *log)
        __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
        spin_unlock_irqrestore(&log->io_list_lock, flags);
 
-       submit_bio(WRITE, io->current_bio);
+       submit_bio(io->current_bio);
 }
 
 static struct bio *r5l_bio_alloc(struct r5l_log *log)
 {
        struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
 
-       bio->bi_rw = WRITE;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        bio->bi_bdev = log->rdev->bdev;
        bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
 
@@ -373,7 +373,7 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
                io->current_bio = r5l_bio_alloc(log);
                bio_chain(io->current_bio, prev);
 
-               submit_bio(WRITE, prev);
+               submit_bio(prev);
        }
 
        if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
@@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
                bio_endio(bio);
                return 0;
        }
-       bio->bi_rw &= ~REQ_FLUSH;
+       bio->bi_rw &= ~REQ_PREFLUSH;
        return -EAGAIN;
 }
 
@@ -686,7 +686,8 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
        bio_reset(&log->flush_bio);
        log->flush_bio.bi_bdev = log->rdev->bdev;
        log->flush_bio.bi_end_io = r5l_log_flush_endio;
-       submit_bio(WRITE_FLUSH, &log->flush_bio);
+       bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+       submit_bio(&log->flush_bio);
 }
 
 static void r5l_write_super(struct r5l_log *log, sector_t cp);
@@ -881,7 +882,8 @@ static int r5l_read_meta_block(struct r5l_log *log,
        struct r5l_meta_block *mb;
        u32 crc, stored_crc;
 
-       if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
+       if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0,
+                         false))
                return -EIO;
 
        mb = page_address(page);
@@ -926,7 +928,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
                                             &disk_index, sh);
 
                        sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
-                                    sh->dev[disk_index].page, READ, false);
+                                    sh->dev[disk_index].page, REQ_OP_READ, 0,
+                                    false);
                        sh->dev[disk_index].log_checksum =
                                le32_to_cpu(payload->checksum[0]);
                        set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
@@ -934,7 +937,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
                } else {
                        disk_index = sh->pd_idx;
                        sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
-                                    sh->dev[disk_index].page, READ, false);
+                                    sh->dev[disk_index].page, REQ_OP_READ, 0,
+                                    false);
                        sh->dev[disk_index].log_checksum =
                                le32_to_cpu(payload->checksum[0]);
                        set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
@@ -944,7 +948,7 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
                                sync_page_io(log->rdev,
                                             r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
                                             PAGE_SIZE, sh->dev[disk_index].page,
-                                            READ, false);
+                                            REQ_OP_READ, 0, false);
                                sh->dev[disk_index].log_checksum =
                                        le32_to_cpu(payload->checksum[1]);
                                set_bit(R5_Wantwrite,
@@ -986,11 +990,13 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
                rdev = rcu_dereference(conf->disks[disk_index].rdev);
                if (rdev)
                        sync_page_io(rdev, stripe_sect, PAGE_SIZE,
-                                    sh->dev[disk_index].page, WRITE, false);
+                                    sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+                                    false);
                rrdev = rcu_dereference(conf->disks[disk_index].replacement);
                if (rrdev)
                        sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
-                                    sh->dev[disk_index].page, WRITE, false);
+                                    sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+                                    false);
        }
        raid5_release_stripe(sh);
        return 0;
@@ -1062,7 +1068,8 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
        crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
        mb->checksum = cpu_to_le32(crc);
 
-       if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
+       if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+                         WRITE_FUA, false)) {
                __free_page(page);
                return -EIO;
        }
@@ -1137,7 +1144,7 @@ static int r5l_load_log(struct r5l_log *log)
        if (!page)
                return -ENOMEM;
 
-       if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
+       if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
                ret = -EIO;
                goto ioerr;
        }
index 8959e6dd31dd1c056f8ef6e89d466738a1ade881..7aacf5b55e1540f9bacab18c06e39a1ea8105417 100644 (file)
@@ -806,7 +806,8 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
        dd_idx = 0;
        while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
                dd_idx++;
-       if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
+       if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
+           bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
                goto unlock_out;
 
        if (head->batch_head) {
@@ -891,29 +892,28 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
        if (r5l_write_stripe(conf->log, sh) == 0)
                return;
        for (i = disks; i--; ) {
-               int rw;
+               int op, op_flags = 0;
                int replace_only = 0;
                struct bio *bi, *rbi;
                struct md_rdev *rdev, *rrdev = NULL;
 
                sh = head_sh;
                if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+                       op = REQ_OP_WRITE;
                        if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
-                               rw = WRITE_FUA;
-                       else
-                               rw = WRITE;
+                               op_flags = WRITE_FUA;
                        if (test_bit(R5_Discard, &sh->dev[i].flags))
-                               rw |= REQ_DISCARD;
+                               op = REQ_OP_DISCARD;
                } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
-                       rw = READ;
+                       op = REQ_OP_READ;
                else if (test_and_clear_bit(R5_WantReplace,
                                            &sh->dev[i].flags)) {
-                       rw = WRITE;
+                       op = REQ_OP_WRITE;
                        replace_only = 1;
                } else
                        continue;
                if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
-                       rw |= REQ_SYNC;
+                       op_flags |= REQ_SYNC;
 
 again:
                bi = &sh->dev[i].req;
@@ -927,7 +927,7 @@ again:
                        rdev = rrdev;
                        rrdev = NULL;
                }
-               if (rw & WRITE) {
+               if (op_is_write(op)) {
                        if (replace_only)
                                rdev = NULL;
                        if (rdev == rrdev)
@@ -953,7 +953,7 @@ again:
                 * need to check for writes.  We never accept write errors
                 * on the replacement, so we don't to check rrdev.
                 */
-               while ((rw & WRITE) && rdev &&
+               while (op_is_write(op) && rdev &&
                       test_bit(WriteErrorSeen, &rdev->flags)) {
                        sector_t first_bad;
                        int bad_sectors;
@@ -995,13 +995,13 @@ again:
 
                        bio_reset(bi);
                        bi->bi_bdev = rdev->bdev;
-                       bi->bi_rw = rw;
-                       bi->bi_end_io = (rw & WRITE)
+                       bio_set_op_attrs(bi, op, op_flags);
+                       bi->bi_end_io = op_is_write(op)
                                ? raid5_end_write_request
                                : raid5_end_read_request;
                        bi->bi_private = sh;
 
-                       pr_debug("%s: for %llu schedule op %ld on disc %d\n",
+                       pr_debug("%s: for %llu schedule op %d on disc %d\n",
                                __func__, (unsigned long long)sh->sector,
                                bi->bi_rw, i);
                        atomic_inc(&sh->count);
@@ -1027,7 +1027,7 @@ again:
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
                         */
-                       if (rw & REQ_DISCARD)
+                       if (op == REQ_OP_DISCARD)
                                bi->bi_vcnt = 0;
                        if (rrdev)
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
@@ -1047,12 +1047,12 @@ again:
 
                        bio_reset(rbi);
                        rbi->bi_bdev = rrdev->bdev;
-                       rbi->bi_rw = rw;
-                       BUG_ON(!(rw & WRITE));
+                       bio_set_op_attrs(rbi, op, op_flags);
+                       BUG_ON(!op_is_write(op));
                        rbi->bi_end_io = raid5_end_write_request;
                        rbi->bi_private = sh;
 
-                       pr_debug("%s: for %llu schedule op %ld on "
+                       pr_debug("%s: for %llu schedule op %d on "
                                 "replacement disc %d\n",
                                __func__, (unsigned long long)sh->sector,
                                rbi->bi_rw, i);
@@ -1076,7 +1076,7 @@ again:
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
                         */
-                       if (rw & REQ_DISCARD)
+                       if (op == REQ_OP_DISCARD)
                                rbi->bi_vcnt = 0;
                        if (conf->mddev->gendisk)
                                trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
@@ -1085,9 +1085,9 @@ again:
                        generic_make_request(rbi);
                }
                if (!rdev && !rrdev) {
-                       if (rw & WRITE)
+                       if (op_is_write(op))
                                set_bit(STRIPE_DEGRADED, &sh->state);
-                       pr_debug("skip op %ld on disc %d for sector %llu\n",
+                       pr_debug("skip op %d on disc %d for sector %llu\n",
                                bi->bi_rw, i, (unsigned long long)sh->sector);
                        clear_bit(R5_LOCKED, &sh->dev[i].flags);
                        set_bit(STRIPE_HANDLE, &sh->state);
@@ -1623,7 +1623,7 @@ again:
                                        set_bit(R5_WantFUA, &dev->flags);
                                if (wbi->bi_rw & REQ_SYNC)
                                        set_bit(R5_SyncIO, &dev->flags);
-                               if (wbi->bi_rw & REQ_DISCARD)
+                               if (bio_op(wbi) == REQ_OP_DISCARD)
                                        set_bit(R5_Discard, &dev->flags);
                                else {
                                        tx = async_copy_data(1, wbi, &dev->page,
@@ -5150,7 +5150,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
        DEFINE_WAIT(w);
        bool do_prepare;
 
-       if (unlikely(bi->bi_rw & REQ_FLUSH)) {
+       if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
                int ret = r5l_handle_flush_request(conf->log, bi);
 
                if (ret == 0)
@@ -5176,7 +5176,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                        return;
        }
 
-       if (unlikely(bi->bi_rw & REQ_DISCARD)) {
+       if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
                make_discard_request(mddev, bi);
                return;
        }
index c5472e3c923126097fd93f2abf31402a1717c228..11ee4145983b4397875f576d6a0b7398c480c09f 100644 (file)
@@ -1724,8 +1724,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
                    !IS_ALIGNED(blk_rq_sectors(next), 8))
                        break;
 
-               if (next->cmd_flags & REQ_DISCARD ||
-                   next->cmd_flags & REQ_FLUSH)
+               if (req_op(next) == REQ_OP_DISCARD ||
+                   req_op(next) == REQ_OP_FLUSH)
                        break;
 
                if (rq_data_dir(cur) != rq_data_dir(next))
@@ -2150,7 +2150,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_card *card = md->queue.card;
        struct mmc_host *host = card->host;
        unsigned long flags;
-       unsigned int cmd_flags = req ? req->cmd_flags : 0;
 
        if (req && !mq->mqrq_prev->req)
                /* claim host only for the first request */
@@ -2166,7 +2165,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        }
 
        mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
-       if (cmd_flags & REQ_DISCARD) {
+       if (req && req_op(req) == REQ_OP_DISCARD) {
                /* complete ongoing async transfer before issuing discard */
                if (card->host->areq)
                        mmc_blk_issue_rw_rq(mq, NULL);
@@ -2174,7 +2173,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                        ret = mmc_blk_issue_secdiscard_rq(mq, req);
                else
                        ret = mmc_blk_issue_discard_rq(mq, req);
-       } else if (cmd_flags & REQ_FLUSH) {
+       } else if (req && req_op(req) == REQ_OP_FLUSH) {
                /* complete ongoing async transfer before issuing flush */
                if (card->host->areq)
                        mmc_blk_issue_rw_rq(mq, NULL);
@@ -2190,7 +2189,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 
 out:
        if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
-            (cmd_flags & MMC_REQ_SPECIAL_MASK))
+           mmc_req_is_special(req))
                /*
                 * Release host when there are no more requests
                 * and after special request(discard, flush) is done.
index 6f4323c6d6536c8855ca2a8bef8f8087bb5d252e..c2d5f6f35145e7c7f56cb96b8d907c0102ceb752 100644 (file)
@@ -33,7 +33,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
        /*
         * We only like normal block requests and discards.
         */
-       if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
+       if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) {
                blk_dump_rq_flags(req, "MMC bad request");
                return BLKPREP_KILL;
        }
@@ -56,7 +56,6 @@ static int mmc_queue_thread(void *d)
        down(&mq->thread_sem);
        do {
                struct request *req = NULL;
-               unsigned int cmd_flags = 0;
 
                spin_lock_irq(q->queue_lock);
                set_current_state(TASK_INTERRUPTIBLE);
@@ -66,7 +65,6 @@ static int mmc_queue_thread(void *d)
 
                if (req || mq->mqrq_prev->req) {
                        set_current_state(TASK_RUNNING);
-                       cmd_flags = req ? req->cmd_flags : 0;
                        mq->issue_fn(mq, req);
                        cond_resched();
                        if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
@@ -81,7 +79,7 @@ static int mmc_queue_thread(void *d)
                         * has been finished. Do not assign it to previous
                         * request.
                         */
-                       if (cmd_flags & MMC_REQ_SPECIAL_MASK)
+                       if (mmc_req_is_special(req))
                                mq->mqrq_cur->req = NULL;
 
                        mq->mqrq_prev->brq.mrq.data = NULL;
index 36cddab57d776322c3912241f274e06620a77251..d62531124d542c0ff82893ed04226e2a0c04016e 100644 (file)
@@ -1,7 +1,11 @@
 #ifndef MMC_QUEUE_H
 #define MMC_QUEUE_H
 
-#define MMC_REQ_SPECIAL_MASK   (REQ_DISCARD | REQ_FLUSH)
+static inline bool mmc_req_is_special(struct request *req)
+{
+       return req &&
+               (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD);
+}
 
 struct request;
 struct task_struct;
index 74ae24364a8db33e4b76e027957951e18b44aec0..78b3eb45faf602ebe4681590ce2c30cd77228349 100644 (file)
@@ -87,14 +87,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
        if (req->cmd_type != REQ_TYPE_FS)
                return -EIO;
 
-       if (req->cmd_flags & REQ_FLUSH)
+       if (req_op(req) == REQ_OP_FLUSH)
                return tr->flush(dev);
 
        if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
            get_capacity(req->rq_disk))
                return -EIO;
 
-       if (req->cmd_flags & REQ_DISCARD)
+       if (req_op(req) == REQ_OP_DISCARD)
                return tr->discard(dev, block, nsect);
 
        if (rq_data_dir(req) == READ) {
index 608fc4464574e1d9edb4037c53b6998afdb11ebe..53b701b2f73ee7d6a75e5407a18bdce8c3cbae13 100644 (file)
@@ -283,6 +283,7 @@ static int pmem_attach_disk(struct device *dev,
        blk_queue_max_hw_sectors(q, UINT_MAX);
        blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+       queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
        q->queuedata = pmem;
 
        disk = alloc_disk_node(0, nid);
index d5fb55c0a9d95cdd2e6ac9cc99ca17f44b2d6ef7..1c5a032d490d22116433ac8ff59945a7bdf749b3 100644 (file)
@@ -290,9 +290,9 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 
        if (req->cmd_type == REQ_TYPE_DRV_PRIV)
                memcpy(cmd, req->cmd, sizeof(*cmd));
-       else if (req->cmd_flags & REQ_FLUSH)
+       else if (req_op(req) == REQ_OP_FLUSH)
                nvme_setup_flush(ns, cmd);
-       else if (req->cmd_flags & REQ_DISCARD)
+       else if (req_op(req) == REQ_OP_DISCARD)
                ret = nvme_setup_discard(ns, req, cmd);
        else
                nvme_setup_rw(ns, req, cmd);
index 1daa0482de0e70616c4c97c34ad1e572c2ece649..4d196d2d57da6ac4306fc5947c3cbb38b7e018ac 100644 (file)
@@ -177,7 +177,7 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
 
 static inline unsigned nvme_map_len(struct request *rq)
 {
-       if (rq->cmd_flags & REQ_DISCARD)
+       if (req_op(rq) == REQ_OP_DISCARD)
                return sizeof(struct nvme_dsm_range);
        else
                return blk_rq_bytes(rq);
@@ -185,7 +185,7 @@ static inline unsigned nvme_map_len(struct request *rq)
 
 static inline void nvme_cleanup_cmd(struct request *req)
 {
-       if (req->cmd_flags & REQ_DISCARD)
+       if (req_op(req) == REQ_OP_DISCARD)
                kfree(req->completion_data);
 }
 
index bed53c46dd90657f5432dd053f4cee8e774763b8..093e9e18e7e745246454b9bf02a2a70a4898e68a 100644 (file)
@@ -618,6 +618,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
        dev_info->gd->driverfs_dev = &dev_info->dev;
        blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
        blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
+       queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
 
        seg_byte_size = (dev_info->end - dev_info->start + 1);
        set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
index 3b11aad0375226d2afed99157546abe98200c972..daa4dc17f172f9cc638644066b5ea871ccb520b7 100644 (file)
@@ -726,7 +726,7 @@ static int _osd_req_list_objects(struct osd_request *or,
                return PTR_ERR(bio);
        }
 
-       bio->bi_rw &= ~REQ_WRITE;
+       bio_set_op_attrs(bio, REQ_OP_READ, 0);
        or->in.bio = bio;
        or->in.total_bytes = bio->bi_iter.bi_size;
        return 0;
@@ -824,7 +824,7 @@ void osd_req_write(struct osd_request *or,
 {
        _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
        WARN_ON(or->out.bio || or->out.total_bytes);
-       WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
+       WARN_ON(!op_is_write(bio_op(bio)));
        or->out.bio = bio;
        or->out.total_bytes = len;
 }
@@ -839,7 +839,7 @@ int osd_req_write_kern(struct osd_request *or,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        osd_req_write(or, obj, offset, bio, len);
        return 0;
 }
@@ -875,7 +875,7 @@ void osd_req_read(struct osd_request *or,
 {
        _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
        WARN_ON(or->in.bio || or->in.total_bytes);
-       WARN_ON(bio->bi_rw & REQ_WRITE);
+       WARN_ON(op_is_write(bio_op(bio)));
        or->in.bio = bio;
        or->in.total_bytes = len;
 }
@@ -956,7 +956,7 @@ static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       bio->bi_rw |= REQ_WRITE;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
        /* integrity check the continuation before the bio is linked
         * with the other data segments since the continuation
@@ -1077,7 +1077,7 @@ int osd_req_write_sg_kern(struct osd_request *or,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       bio->bi_rw |= REQ_WRITE;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        osd_req_write_sg(or, obj, bio, sglist, numentries);
 
        return 0;
index 60bff78e9ead8703e1e912307c504330d1a76d1f..0609d6802d9371322857e6aca4909829426949eb 100644 (file)
@@ -1012,7 +1012,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
        } else if (rq_data_dir(rq) == READ) {
                SCpnt->cmnd[0] = READ_6;
        } else {
-               scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
+               scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n",
+                           req_op(rq), (unsigned long long) rq->cmd_flags);
                goto out;
        }
 
@@ -1137,21 +1138,26 @@ static int sd_init_command(struct scsi_cmnd *cmd)
 {
        struct request *rq = cmd->request;
 
-       if (rq->cmd_flags & REQ_DISCARD)
+       switch (req_op(rq)) {
+       case REQ_OP_DISCARD:
                return sd_setup_discard_cmnd(cmd);
-       else if (rq->cmd_flags & REQ_WRITE_SAME)
+       case REQ_OP_WRITE_SAME:
                return sd_setup_write_same_cmnd(cmd);
-       else if (rq->cmd_flags & REQ_FLUSH)
+       case REQ_OP_FLUSH:
                return sd_setup_flush_cmnd(cmd);
-       else
+       case REQ_OP_READ:
+       case REQ_OP_WRITE:
                return sd_setup_read_write_cmnd(cmd);
+       default:
+               BUG();
+       }
 }
 
 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
 {
        struct request *rq = SCpnt->request;
 
-       if (rq->cmd_flags & REQ_DISCARD)
+       if (req_op(rq) == REQ_OP_DISCARD)
                __free_page(rq->completion_data);
 
        if (SCpnt->cmnd != rq->cmd) {
@@ -1774,7 +1780,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
        unsigned char op = SCpnt->cmnd[0];
        unsigned char unmap = SCpnt->cmnd[1] & 8;
 
-       if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
+       if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) {
                if (!result) {
                        good_bytes = blk_rq_bytes(req);
                        scsi_set_resid(SCpnt, 0);
index 7c4efb4417b0fc5e9c2cd9700eb83dd024e3db92..22af12f8b8eb7ff13d079fb3291c680bb3d6dec0 100644 (file)
@@ -312,7 +312,8 @@ static void iblock_bio_done(struct bio *bio)
 }
 
 static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
+              int op_flags)
 {
        struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
        struct bio *bio;
@@ -334,18 +335,19 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
        bio->bi_private = cmd;
        bio->bi_end_io = &iblock_bio_done;
        bio->bi_iter.bi_sector = lba;
+       bio_set_op_attrs(bio, op, op_flags);
 
        return bio;
 }
 
-static void iblock_submit_bios(struct bio_list *list, int rw)
+static void iblock_submit_bios(struct bio_list *list)
 {
        struct blk_plug plug;
        struct bio *bio;
 
        blk_start_plug(&plug);
        while ((bio = bio_list_pop(list)))
-               submit_bio(rw, bio);
+               submit_bio(bio);
        blk_finish_plug(&plug);
 }
 
@@ -387,9 +389,10 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
        bio = bio_alloc(GFP_KERNEL, 0);
        bio->bi_end_io = iblock_end_io_flush;
        bio->bi_bdev = ib_dev->ibd_bd;
+       bio->bi_rw = WRITE_FLUSH;
        if (!immed)
                bio->bi_private = cmd;
-       submit_bio(WRITE_FLUSH, bio);
+       submit_bio(bio);
        return 0;
 }
 
@@ -478,7 +481,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
                goto fail;
        cmd->priv = ibr;
 
-       bio = iblock_get_bio(cmd, block_lba, 1);
+       bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
        if (!bio)
                goto fail_free_ibr;
 
@@ -491,7 +494,8 @@ iblock_execute_write_same(struct se_cmd *cmd)
                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
                                != sg->length) {
 
-                       bio = iblock_get_bio(cmd, block_lba, 1);
+                       bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
+                                            0);
                        if (!bio)
                                goto fail_put_bios;
 
@@ -504,7 +508,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
                sectors -= 1;
        }
 
-       iblock_submit_bios(&list, WRITE);
+       iblock_submit_bios(&list);
        return 0;
 
 fail_put_bios:
@@ -677,8 +681,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        struct scatterlist *sg;
        u32 sg_num = sgl_nents;
        unsigned bio_cnt;
-       int rw = 0;
-       int i;
+       int i, op, op_flags = 0;
 
        if (data_direction == DMA_TO_DEVICE) {
                struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -687,18 +690,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                 * Force writethrough using WRITE_FUA if a volatile write cache
                 * is not enabled, or if initiator set the Force Unit Access bit.
                 */
+               op = REQ_OP_WRITE;
                if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
                        if (cmd->se_cmd_flags & SCF_FUA)
-                               rw = WRITE_FUA;
+                               op_flags = WRITE_FUA;
                        else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
-                               rw = WRITE_FUA;
-                       else
-                               rw = WRITE;
-               } else {
-                       rw = WRITE;
+                               op_flags = WRITE_FUA;
                }
        } else {
-               rw = READ;
+               op = REQ_OP_READ;
        }
 
        ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -712,7 +712,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                return 0;
        }
 
-       bio = iblock_get_bio(cmd, block_lba, sgl_nents);
+       bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
        if (!bio)
                goto fail_free_ibr;
 
@@ -732,11 +732,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
                                != sg->length) {
                        if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
-                               iblock_submit_bios(&list, rw);
+                               iblock_submit_bios(&list);
                                bio_cnt = 0;
                        }
 
-                       bio = iblock_get_bio(cmd, block_lba, sg_num);
+                       bio = iblock_get_bio(cmd, block_lba, sg_num, op,
+                                            op_flags);
                        if (!bio)
                                goto fail_put_bios;
 
@@ -756,7 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                        goto fail_put_bios;
        }
 
-       iblock_submit_bios(&list, rw);
+       iblock_submit_bios(&list);
        iblock_complete_cmd(cmd);
        return 0;
 
index de18790eb21c0d79cc36e7479a2876b13ca195dc..81564c87f24b7be3d3a1c8713099c76c3ea4e702 100644 (file)
@@ -922,7 +922,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                                        goto fail;
 
                                if (rw)
-                                       bio->bi_rw |= REQ_WRITE;
+                                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
                                pr_debug("PSCSI: Allocated bio: %p,"
                                        " dir: %s nr_vecs: %d\n", bio,
index 71ccab1d22c6133623ac640dffe30ad858afabe4..d012be4ab977970edd889653e5edc89c03aa45e6 100644 (file)
@@ -493,7 +493,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
 
        if (size < 0)
                return size;
-       if (!ops->direct_access)
+       if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
                return -EOPNOTSUPP;
        if ((sector + DIV_ROUND_UP(size, 512)) >
                                        part_nr_sects_read(bdev->bd_part))
@@ -1287,7 +1287,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                bdev->bd_disk = disk;
                bdev->bd_queue = disk->queue;
                bdev->bd_contains = bdev;
-               if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
+               if (IS_ENABLED(CONFIG_BLK_DEV_DAX) &&
+                   blk_queue_dax(disk->queue))
                        bdev->bd_inode->i_flags = S_DAX;
                else
                        bdev->bd_inode->i_flags = 0;
index 7706c8dc5fa637632dce3c9e31f50aa2c9d5f400..5d5cae05818da1a450ae7e1eafdc4db233ca8186 100644 (file)
@@ -1673,6 +1673,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                }
                bio->bi_bdev = block_ctx->dev->bdev;
                bio->bi_iter.bi_sector = dev_bytenr >> 9;
+               bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
                for (j = i; j < num_pages; j++) {
                        ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -1685,7 +1686,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                               "btrfsic: error, failed to add a single page!\n");
                        return -1;
                }
-               if (submit_bio_wait(READ, bio)) {
+               if (submit_bio_wait(bio)) {
                        printk(KERN_INFO
                               "btrfsic: read error at logical %llu dev %s!\n",
                               block_ctx->start, block_ctx->dev->name);
@@ -2206,7 +2207,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
                               block->dev_bytenr, block->mirror_num);
                next_block = block->next_in_same_bio;
                block->iodone_w_error = iodone_w_error;
-               if (block->submit_bio_bh_rw & REQ_FLUSH) {
+               if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
                        dev_state->last_flush_gen++;
                        if ((dev_state->state->print_mask &
                             BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
@@ -2242,7 +2243,7 @@ static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
                       block->dev_bytenr, block->mirror_num);
 
        block->iodone_w_error = iodone_w_error;
-       if (block->submit_bio_bh_rw & REQ_FLUSH) {
+       if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
                dev_state->last_flush_gen++;
                if ((dev_state->state->print_mask &
                     BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
@@ -2855,12 +2856,12 @@ static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
        return ds;
 }
 
-int btrfsic_submit_bh(int rw, struct buffer_head *bh)
+int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
 {
        struct btrfsic_dev_state *dev_state;
 
        if (!btrfsic_is_initialized)
-               return submit_bh(rw, bh);
+               return submit_bh(op, op_flags, bh);
 
        mutex_lock(&btrfsic_mutex);
        /* since btrfsic_submit_bh() might also be called before
@@ -2869,26 +2870,26 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
 
        /* Only called to write the superblock (incl. FLUSH/FUA) */
        if (NULL != dev_state &&
-           (rw & WRITE) && bh->b_size > 0) {
+           (op == REQ_OP_WRITE) && bh->b_size > 0) {
                u64 dev_bytenr;
 
                dev_bytenr = 4096 * bh->b_blocknr;
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
                        printk(KERN_INFO
-                              "submit_bh(rw=0x%x, blocknr=%llu (bytenr %llu),"
-                              " size=%zu, data=%p, bdev=%p)\n",
-                              rw, (unsigned long long)bh->b_blocknr,
+                              "submit_bh(op=0x%x,0x%x, blocknr=%llu "
+                              "(bytenr %llu), size=%zu, data=%p, bdev=%p)\n",
+                              op, op_flags, (unsigned long long)bh->b_blocknr,
                               dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev);
                btrfsic_process_written_block(dev_state, dev_bytenr,
                                              &bh->b_data, 1, NULL,
-                                             NULL, bh, rw);
-       } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
+                                             NULL, bh, op_flags);
+       } else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) {
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
                        printk(KERN_INFO
-                              "submit_bh(rw=0x%x FLUSH, bdev=%p)\n",
-                              rw, bh->b_bdev);
+                              "submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n",
+                              op, op_flags, bh->b_bdev);
                if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
                        if ((dev_state->state->print_mask &
                             (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
@@ -2906,7 +2907,7 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
                        block->never_written = 0;
                        block->iodone_w_error = 0;
                        block->flush_gen = dev_state->last_flush_gen + 1;
-                       block->submit_bio_bh_rw = rw;
+                       block->submit_bio_bh_rw = op_flags;
                        block->orig_bio_bh_private = bh->b_private;
                        block->orig_bio_bh_end_io.bh = bh->b_end_io;
                        block->next_in_same_bio = NULL;
@@ -2915,10 +2916,10 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
                }
        }
        mutex_unlock(&btrfsic_mutex);
-       return submit_bh(rw, bh);
+       return submit_bh(op, op_flags, bh);
 }
 
-static void __btrfsic_submit_bio(int rw, struct bio *bio)
+static void __btrfsic_submit_bio(struct bio *bio)
 {
        struct btrfsic_dev_state *dev_state;
 
@@ -2930,7 +2931,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
         * btrfsic_mount(), this might return NULL */
        dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
        if (NULL != dev_state &&
-           (rw & WRITE) && NULL != bio->bi_io_vec) {
+           (bio_op(bio) == REQ_OP_WRITE) && NULL != bio->bi_io_vec) {
                unsigned int i;
                u64 dev_bytenr;
                u64 cur_bytenr;
@@ -2942,9 +2943,9 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
                        printk(KERN_INFO
-                              "submit_bio(rw=0x%x, bi_vcnt=%u,"
+                              "submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
                               " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
-                              rw, bio->bi_vcnt,
+                              bio_op(bio), bio->bi_rw, bio->bi_vcnt,
                               (unsigned long long)bio->bi_iter.bi_sector,
                               dev_bytenr, bio->bi_bdev);
 
@@ -2975,18 +2976,18 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                btrfsic_process_written_block(dev_state, dev_bytenr,
                                              mapped_datav, bio->bi_vcnt,
                                              bio, &bio_is_patched,
-                                             NULL, rw);
+                                             NULL, bio->bi_rw);
                while (i > 0) {
                        i--;
                        kunmap(bio->bi_io_vec[i].bv_page);
                }
                kfree(mapped_datav);
-       } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
+       } else if (NULL != dev_state && (bio->bi_rw & REQ_PREFLUSH)) {
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
                        printk(KERN_INFO
-                              "submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
-                              rw, bio->bi_bdev);
+                              "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
+                              bio_op(bio), bio->bi_rw, bio->bi_bdev);
                if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
                        if ((dev_state->state->print_mask &
                             (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
@@ -3004,7 +3005,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                        block->never_written = 0;
                        block->iodone_w_error = 0;
                        block->flush_gen = dev_state->last_flush_gen + 1;
-                       block->submit_bio_bh_rw = rw;
+                       block->submit_bio_bh_rw = bio->bi_rw;
                        block->orig_bio_bh_private = bio->bi_private;
                        block->orig_bio_bh_end_io.bio = bio->bi_end_io;
                        block->next_in_same_bio = NULL;
@@ -3016,16 +3017,16 @@ leave:
        mutex_unlock(&btrfsic_mutex);
 }
 
-void btrfsic_submit_bio(int rw, struct bio *bio)
+void btrfsic_submit_bio(struct bio *bio)
 {
-       __btrfsic_submit_bio(rw, bio);
-       submit_bio(rw, bio);
+       __btrfsic_submit_bio(bio);
+       submit_bio(bio);
 }
 
-int btrfsic_submit_bio_wait(int rw, struct bio *bio)
+int btrfsic_submit_bio_wait(struct bio *bio)
 {
-       __btrfsic_submit_bio(rw, bio);
-       return submit_bio_wait(rw, bio);
+       __btrfsic_submit_bio(bio);
+       return submit_bio_wait(bio);
 }
 
 int btrfsic_mount(struct btrfs_root *root,
index 13b8566c97ab433f7455eeb6762942d5e623d16a..f78dff1c7e86c29c0df865f7d19227e4af510a38 100644 (file)
@@ -20,9 +20,9 @@
 #define __BTRFS_CHECK_INTEGRITY__
 
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-int btrfsic_submit_bh(int rw, struct buffer_head *bh);
-void btrfsic_submit_bio(int rw, struct bio *bio);
-int btrfsic_submit_bio_wait(int rw, struct bio *bio);
+int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh);
+void btrfsic_submit_bio(struct bio *bio);
+int btrfsic_submit_bio_wait(struct bio *bio);
 #else
 #define btrfsic_submit_bh submit_bh
 #define btrfsic_submit_bio submit_bio
index 658c39b70fba2e5878693aadbb317389b0970969..cefedabf0a92fd6aaa4c4986d4899fc9cac3b859 100644 (file)
@@ -363,6 +363,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                kfree(cb);
                return -ENOMEM;
        }
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        bio->bi_private = cb;
        bio->bi_end_io = end_compressed_bio_write;
        atomic_inc(&cb->pending_bios);
@@ -373,7 +374,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                page = compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                if (bio->bi_iter.bi_size)
-                       ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
+                       ret = io_tree->ops->merge_bio_hook(page, 0,
                                                           PAGE_SIZE,
                                                           bio, 0);
                else
@@ -401,13 +402,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                BUG_ON(ret); /* -ENOMEM */
                        }
 
-                       ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
+                       ret = btrfs_map_bio(root, bio, 0, 1);
                        BUG_ON(ret); /* -ENOMEM */
 
                        bio_put(bio);
 
                        bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
                        BUG_ON(!bio);
+                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                        bio->bi_private = cb;
                        bio->bi_end_io = end_compressed_bio_write;
                        bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -431,7 +433,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                BUG_ON(ret); /* -ENOMEM */
        }
 
-       ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
+       ret = btrfs_map_bio(root, bio, 0, 1);
        BUG_ON(ret); /* -ENOMEM */
 
        bio_put(bio);
@@ -646,6 +648,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
        if (!comp_bio)
                goto fail2;
+       bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
        comp_bio->bi_private = cb;
        comp_bio->bi_end_io = end_compressed_bio_read;
        atomic_inc(&cb->pending_bios);
@@ -656,7 +659,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                page->index = em_start >> PAGE_SHIFT;
 
                if (comp_bio->bi_iter.bi_size)
-                       ret = tree->ops->merge_bio_hook(READ, page, 0,
+                       ret = tree->ops->merge_bio_hook(page, 0,
                                                        PAGE_SIZE,
                                                        comp_bio, 0);
                else
@@ -687,8 +690,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                        sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
                                             root->sectorsize);
 
-                       ret = btrfs_map_bio(root, READ, comp_bio,
-                                           mirror_num, 0);
+                       ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
                        if (ret) {
                                bio->bi_error = ret;
                                bio_endio(comp_bio);
@@ -699,6 +701,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                        comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
                                                        GFP_NOFS);
                        BUG_ON(!comp_bio);
+                       bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
                        comp_bio->bi_private = cb;
                        comp_bio->bi_end_io = end_compressed_bio_read;
 
@@ -717,7 +720,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                BUG_ON(ret); /* -ENOMEM */
        }
 
-       ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
+       ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
        if (ret) {
                bio->bi_error = ret;
                bio_endio(comp_bio);
index 4274a7bfdaed8db3ef94d07d38e755db9125b4d0..b2620d1f883ff14ebb23788db6888d1877191f3a 100644 (file)
@@ -3091,7 +3091,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
                             struct btrfs_root *new_root,
                             struct btrfs_root *parent_root,
                             u64 new_dirid);
-int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
+int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
                         size_t size, struct bio *bio,
                         unsigned long bio_flags);
 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
index 60ce1190307bb976a8fc9229e05ad21b8bedba55..9a726ded2c6d150e73bd2719a09cb95a8634ab57 100644 (file)
@@ -124,7 +124,6 @@ struct async_submit_bio {
        struct list_head list;
        extent_submit_bio_hook_t *submit_bio_start;
        extent_submit_bio_hook_t *submit_bio_done;
-       int rw;
        int mirror_num;
        unsigned long bio_flags;
        /*
@@ -727,7 +726,7 @@ static void end_workqueue_bio(struct bio *bio)
        fs_info = end_io_wq->info;
        end_io_wq->error = bio->bi_error;
 
-       if (bio->bi_rw & REQ_WRITE) {
+       if (bio_op(bio) == REQ_OP_WRITE) {
                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
                        wq = fs_info->endio_meta_write_workers;
                        func = btrfs_endio_meta_write_helper;
@@ -797,7 +796,7 @@ static void run_one_async_start(struct btrfs_work *work)
        int ret;
 
        async = container_of(work, struct  async_submit_bio, work);
-       ret = async->submit_bio_start(async->inode, async->rw, async->bio,
+       ret = async->submit_bio_start(async->inode, async->bio,
                                      async->mirror_num, async->bio_flags,
                                      async->bio_offset);
        if (ret)
@@ -830,9 +829,8 @@ static void run_one_async_done(struct btrfs_work *work)
                return;
        }
 
-       async->submit_bio_done(async->inode, async->rw, async->bio,
-                              async->mirror_num, async->bio_flags,
-                              async->bio_offset);
+       async->submit_bio_done(async->inode, async->bio, async->mirror_num,
+                              async->bio_flags, async->bio_offset);
 }
 
 static void run_one_async_free(struct btrfs_work *work)
@@ -844,7 +842,7 @@ static void run_one_async_free(struct btrfs_work *work)
 }
 
 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
-                       int rw, struct bio *bio, int mirror_num,
+                       struct bio *bio, int mirror_num,
                        unsigned long bio_flags,
                        u64 bio_offset,
                        extent_submit_bio_hook_t *submit_bio_start,
@@ -857,7 +855,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
                return -ENOMEM;
 
        async->inode = inode;
-       async->rw = rw;
        async->bio = bio;
        async->mirror_num = mirror_num;
        async->submit_bio_start = submit_bio_start;
@@ -873,7 +870,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 
        atomic_inc(&fs_info->nr_async_submits);
 
-       if (rw & REQ_SYNC)
+       if (bio->bi_rw & REQ_SYNC)
                btrfs_set_work_high_priority(&async->work);
 
        btrfs_queue_work(fs_info->workers, &async->work);
@@ -903,9 +900,8 @@ static int btree_csum_one_bio(struct bio *bio)
        return ret;
 }
 
-static int __btree_submit_bio_start(struct inode *inode, int rw,
-                                   struct bio *bio, int mirror_num,
-                                   unsigned long bio_flags,
+static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
+                                   int mirror_num, unsigned long bio_flags,
                                    u64 bio_offset)
 {
        /*
@@ -915,7 +911,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,
        return btree_csum_one_bio(bio);
 }
 
-static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
+static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags,
                                 u64 bio_offset)
 {
@@ -925,7 +921,7 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
         * when we're called for a write, we're already in the async
         * submission context.  Just jump into btrfs_map_bio
         */
-       ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
+       ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1);
        if (ret) {
                bio->bi_error = ret;
                bio_endio(bio);
@@ -944,14 +940,14 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
        return 1;
 }
 
-static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags,
                                 u64 bio_offset)
 {
        int async = check_async_write(inode, bio_flags);
        int ret;
 
-       if (!(rw & REQ_WRITE)) {
+       if (bio_op(bio) != REQ_OP_WRITE) {
                /*
                 * called for a read, do the setup so that checksum validation
                 * can happen in the async kernel threads
@@ -960,21 +956,19 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                                          bio, BTRFS_WQ_ENDIO_METADATA);
                if (ret)
                        goto out_w_error;
-               ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
-                                   mirror_num, 0);
+               ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
        } else if (!async) {
                ret = btree_csum_one_bio(bio);
                if (ret)
                        goto out_w_error;
-               ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
-                                   mirror_num, 0);
+               ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
        } else {
                /*
                 * kthread helpers are used to submit writes so that
                 * checksumming can happen in parallel across all CPUs
                 */
                ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
-                                         inode, rw, bio, mirror_num, 0,
+                                         inode, bio, mirror_num, 0,
                                          bio_offset,
                                          __btree_submit_bio_start,
                                          __btree_submit_bio_done);
@@ -3418,9 +3412,9 @@ static int write_dev_supers(struct btrfs_device *device,
                 * to go down lazy.
                 */
                if (i == 0)
-                       ret = btrfsic_submit_bh(WRITE_FUA, bh);
+                       ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh);
                else
-                       ret = btrfsic_submit_bh(WRITE_SYNC, bh);
+                       ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
                if (ret)
                        errors++;
        }
@@ -3484,12 +3478,13 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 
        bio->bi_end_io = btrfs_end_empty_barrier;
        bio->bi_bdev = device->bdev;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
        init_completion(&device->flush_wait);
        bio->bi_private = &device->flush_wait;
        device->flush_bio = bio;
 
        bio_get(bio);
-       btrfsic_submit_bio(WRITE_FLUSH, bio);
+       btrfsic_submit_bio(bio);
 
        return 0;
 }
index acba821499a909e3a8d42e62e34c8f8e3f27bfa2..dbf3e1aab69e904a75a6828bbcb6f955166c8b25 100644 (file)
@@ -122,7 +122,7 @@ void btrfs_csum_final(u32 crc, char *result);
 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
                        enum btrfs_wq_endio_type metadata);
 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
-                       int rw, struct bio *bio, int mirror_num,
+                       struct bio *bio, int mirror_num,
                        unsigned long bio_flags, u64 bio_offset,
                        extent_submit_bio_hook_t *submit_bio_start,
                        extent_submit_bio_hook_t *submit_bio_done);
index 82b912a293ab3f2f06cd54aacf14fcda0827e85d..b480fd55577480d11b7bdf3440eac2cb954af785 100644 (file)
@@ -2048,7 +2048,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
         */
        btrfs_bio_counter_inc_blocked(root->fs_info);
        /* Tell the block device(s) that the sectors can be discarded */
-       ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
+       ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
                              bytenr, &num_bytes, &bbio, 0);
        /* Error condition is -ENOMEM */
        if (!ret) {
index 75533adef9988ae18b5a3ccab9a1b6128d7d02d8..27c214941004e4db45ae1b0b2740b8b88baac250 100644 (file)
@@ -2049,9 +2049,10 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
                return -EIO;
        }
        bio->bi_bdev = dev->bdev;
+       bio->bi_rw = WRITE_SYNC;
        bio_add_page(bio, page, length, pg_offset);
 
-       if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
+       if (btrfsic_submit_bio_wait(bio)) {
                /* try to remap that extent elsewhere? */
                btrfs_bio_counter_dec(fs_info);
                bio_put(bio);
@@ -2386,7 +2387,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
        int read_mode;
        int ret;
 
-       BUG_ON(failed_bio->bi_rw & REQ_WRITE);
+       BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
 
        ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
        if (ret)
@@ -2412,12 +2413,12 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
                free_io_failure(inode, failrec);
                return -EIO;
        }
+       bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
 
        pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
                 read_mode, failrec->this_mirror, failrec->in_validation);
 
-       ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
-                                        failrec->this_mirror,
+       ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
                                         failrec->bio_flags, 0);
        if (ret) {
                free_io_failure(inode, failrec);
@@ -2723,8 +2724,8 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 }
 
 
-static int __must_check submit_one_bio(int rw, struct bio *bio,
-                                      int mirror_num, unsigned long bio_flags)
+static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
+                                      unsigned long bio_flags)
 {
        int ret = 0;
        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -2735,33 +2736,32 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
        start = page_offset(page) + bvec->bv_offset;
 
        bio->bi_private = NULL;
-
        bio_get(bio);
 
        if (tree->ops && tree->ops->submit_bio_hook)
-               ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
+               ret = tree->ops->submit_bio_hook(page->mapping->host, bio,
                                           mirror_num, bio_flags, start);
        else
-               btrfsic_submit_bio(rw, bio);
+               btrfsic_submit_bio(bio);
 
        bio_put(bio);
        return ret;
 }
 
-static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
+static int merge_bio(struct extent_io_tree *tree, struct page *page,
                     unsigned long offset, size_t size, struct bio *bio,
                     unsigned long bio_flags)
 {
        int ret = 0;
        if (tree->ops && tree->ops->merge_bio_hook)
-               ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
+               ret = tree->ops->merge_bio_hook(page, offset, size, bio,
                                                bio_flags);
        BUG_ON(ret < 0);
        return ret;
 
 }
 
-static int submit_extent_page(int rw, struct extent_io_tree *tree,
+static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree,
                              struct writeback_control *wbc,
                              struct page *page, sector_t sector,
                              size_t size, unsigned long offset,
@@ -2789,10 +2789,9 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
 
                if (prev_bio_flags != bio_flags || !contig ||
                    force_bio_submit ||
-                   merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
+                   merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
                    bio_add_page(bio, page, page_size, offset) < page_size) {
-                       ret = submit_one_bio(rw, bio, mirror_num,
-                                            prev_bio_flags);
+                       ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
                        if (ret < 0) {
                                *bio_ret = NULL;
                                return ret;
@@ -2813,6 +2812,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
        bio_add_page(bio, page, page_size, offset);
        bio->bi_end_io = end_io_func;
        bio->bi_private = tree;
+       bio_set_op_attrs(bio, op, op_flags);
        if (wbc) {
                wbc_init_bio(wbc, bio);
                wbc_account_io(wbc, page, page_size);
@@ -2821,7 +2821,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
        if (bio_ret)
                *bio_ret = bio;
        else
-               ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
+               ret = submit_one_bio(bio, mirror_num, bio_flags);
 
        return ret;
 }
@@ -2885,7 +2885,7 @@ static int __do_readpage(struct extent_io_tree *tree,
                         get_extent_t *get_extent,
                         struct extent_map **em_cached,
                         struct bio **bio, int mirror_num,
-                        unsigned long *bio_flags, int rw,
+                        unsigned long *bio_flags, int read_flags,
                         u64 *prev_em_start)
 {
        struct inode *inode = page->mapping->host;
@@ -3068,8 +3068,8 @@ static int __do_readpage(struct extent_io_tree *tree,
                }
 
                pnr -= page->index;
-               ret = submit_extent_page(rw, tree, NULL, page,
-                                        sector, disk_io_size, pg_offset,
+               ret = submit_extent_page(REQ_OP_READ, read_flags, tree, NULL,
+                                        page, sector, disk_io_size, pg_offset,
                                         bdev, bio, pnr,
                                         end_bio_extent_readpage, mirror_num,
                                         *bio_flags,
@@ -3100,7 +3100,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
                                             get_extent_t *get_extent,
                                             struct extent_map **em_cached,
                                             struct bio **bio, int mirror_num,
-                                            unsigned long *bio_flags, int rw,
+                                            unsigned long *bio_flags,
                                             u64 *prev_em_start)
 {
        struct inode *inode;
@@ -3121,7 +3121,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
 
        for (index = 0; index < nr_pages; index++) {
                __do_readpage(tree, pages[index], get_extent, em_cached, bio,
-                             mirror_num, bio_flags, rw, prev_em_start);
+                             mirror_num, bio_flags, 0, prev_em_start);
                put_page(pages[index]);
        }
 }
@@ -3131,7 +3131,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
                               int nr_pages, get_extent_t *get_extent,
                               struct extent_map **em_cached,
                               struct bio **bio, int mirror_num,
-                              unsigned long *bio_flags, int rw,
+                              unsigned long *bio_flags,
                               u64 *prev_em_start)
 {
        u64 start = 0;
@@ -3153,7 +3153,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
                                                  index - first_index, start,
                                                  end, get_extent, em_cached,
                                                  bio, mirror_num, bio_flags,
-                                                 rw, prev_em_start);
+                                                 prev_em_start);
                        start = page_start;
                        end = start + PAGE_SIZE - 1;
                        first_index = index;
@@ -3164,7 +3164,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
                __do_contiguous_readpages(tree, &pages[first_index],
                                          index - first_index, start,
                                          end, get_extent, em_cached, bio,
-                                         mirror_num, bio_flags, rw,
+                                         mirror_num, bio_flags,
                                          prev_em_start);
 }
 
@@ -3172,7 +3172,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                                   struct page *page,
                                   get_extent_t *get_extent,
                                   struct bio **bio, int mirror_num,
-                                  unsigned long *bio_flags, int rw)
+                                  unsigned long *bio_flags, int read_flags)
 {
        struct inode *inode = page->mapping->host;
        struct btrfs_ordered_extent *ordered;
@@ -3192,7 +3192,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
        }
 
        ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
-                           bio_flags, rw, NULL);
+                           bio_flags, read_flags, NULL);
        return ret;
 }
 
@@ -3204,9 +3204,9 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
        int ret;
 
        ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
-                                     &bio_flags, READ);
+                                     &bio_flags, 0);
        if (bio)
-               ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
+               ret = submit_one_bio(bio, mirror_num, bio_flags);
        return ret;
 }
 
@@ -3440,8 +3440,8 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
                               page->index, cur, end);
                }
 
-               ret = submit_extent_page(write_flags, tree, wbc, page,
-                                        sector, iosize, pg_offset,
+               ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc,
+                                        page, sector, iosize, pg_offset,
                                         bdev, &epd->bio, max_nr,
                                         end_bio_extent_writepage,
                                         0, 0, 0, false);
@@ -3480,13 +3480,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        size_t pg_offset = 0;
        loff_t i_size = i_size_read(inode);
        unsigned long end_index = i_size >> PAGE_SHIFT;
-       int write_flags;
+       int write_flags = 0;
        unsigned long nr_written = 0;
 
        if (wbc->sync_mode == WB_SYNC_ALL)
                write_flags = WRITE_SYNC;
-       else
-               write_flags = WRITE;
 
        trace___extent_writepage(page, inode, wbc);
 
@@ -3730,7 +3728,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
        u64 offset = eb->start;
        unsigned long i, num_pages;
        unsigned long bio_flags = 0;
-       int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
+       int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
        int ret = 0;
 
        clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
@@ -3744,9 +3742,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
 
                clear_page_dirty_for_io(p);
                set_page_writeback(p);
-               ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
-                                        PAGE_SIZE, 0, bdev, &epd->bio,
-                                        -1, end_bio_extent_buffer_writepage,
+               ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc,
+                                        p, offset >> 9, PAGE_SIZE, 0, bdev,
+                                        &epd->bio, -1,
+                                        end_bio_extent_buffer_writepage,
                                         0, epd->bio_flags, bio_flags, false);
                epd->bio_flags = bio_flags;
                if (ret) {
@@ -4056,13 +4055,12 @@ retry:
 static void flush_epd_write_bio(struct extent_page_data *epd)
 {
        if (epd->bio) {
-               int rw = WRITE;
                int ret;
 
-               if (epd->sync_io)
-                       rw = WRITE_SYNC;
+               bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
+                                epd->sync_io ? WRITE_SYNC : 0);
 
-               ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
+               ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
                BUG_ON(ret < 0); /* -ENOMEM */
                epd->bio = NULL;
        }
@@ -4189,19 +4187,19 @@ int extent_readpages(struct extent_io_tree *tree,
                if (nr < ARRAY_SIZE(pagepool))
                        continue;
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ, &prev_em_start);
+                                  &bio, 0, &bio_flags, &prev_em_start);
                nr = 0;
        }
        if (nr)
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ, &prev_em_start);
+                                  &bio, 0, &bio_flags, &prev_em_start);
 
        if (em_cached)
                free_extent_map(em_cached);
 
        BUG_ON(!list_empty(pages));
        if (bio)
-               return submit_one_bio(READ, bio, 0, bio_flags);
+               return submit_one_bio(bio, 0, bio_flags);
        return 0;
 }
 
@@ -5236,7 +5234,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
                        err = __extent_read_full_page(tree, page,
                                                      get_extent, &bio,
                                                      mirror_num, &bio_flags,
-                                                     READ | REQ_META);
+                                                     REQ_META);
                        if (err)
                                ret = err;
                } else {
@@ -5245,8 +5243,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
        }
 
        if (bio) {
-               err = submit_one_bio(READ | REQ_META, bio, mirror_num,
-                                    bio_flags);
+               err = submit_one_bio(bio, mirror_num, bio_flags);
                if (err)
                        return err;
        }
index c0c1c4fef6cea0a6ab542a38596093b186b5d7d1..bc2729a7612db5e472fda6b359a7862bcead0cd4 100644 (file)
@@ -63,16 +63,16 @@ struct btrfs_root;
 struct btrfs_io_bio;
 struct io_failure_record;
 
-typedef        int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
-                                      struct bio *bio, int mirror_num,
-                                      unsigned long bio_flags, u64 bio_offset);
+typedef        int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio,
+                                      int mirror_num, unsigned long bio_flags,
+                                      u64 bio_offset);
 struct extent_io_ops {
        int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
                             u64 start, u64 end, int *page_started,
                             unsigned long *nr_written);
        int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
        extent_submit_bio_hook_t *submit_bio_hook;
-       int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
+       int (*merge_bio_hook)(struct page *page, unsigned long offset,
                              size_t size, struct bio *bio,
                              unsigned long bio_flags);
        int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
index 4421954720b8b719a5c91dcc747c3a494884143c..df731c0ebec7964844121eafa45a12d7bec1408b 100644 (file)
@@ -1823,7 +1823,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
  * we don't create bios that span stripes or chunks
  */
-int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
+int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
                         size_t size, struct bio *bio,
                         unsigned long bio_flags)
 {
@@ -1838,7 +1838,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
 
        length = bio->bi_iter.bi_size;
        map_length = length;
-       ret = btrfs_map_block(root->fs_info, rw, logical,
+       ret = btrfs_map_block(root->fs_info, bio_op(bio), logical,
                              &map_length, NULL, 0);
        /* Will always return 0 with map_multi == NULL */
        BUG_ON(ret < 0);
@@ -1855,9 +1855,8 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
  * At IO completion time the cums attached on the ordered extent record
  * are inserted into the btree
  */
-static int __btrfs_submit_bio_start(struct inode *inode, int rw,
-                                   struct bio *bio, int mirror_num,
-                                   unsigned long bio_flags,
+static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
+                                   int mirror_num, unsigned long bio_flags,
                                    u64 bio_offset)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -1876,14 +1875,14 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw,
  * At IO completion time the cums attached on the ordered extent record
  * are inserted into the btree
  */
-static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
+static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
                          int mirror_num, unsigned long bio_flags,
                          u64 bio_offset)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret;
 
-       ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
+       ret = btrfs_map_bio(root, bio, mirror_num, 1);
        if (ret) {
                bio->bi_error = ret;
                bio_endio(bio);
@@ -1895,7 +1894,7 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
  * extent_io.c submission hook. This does the right thing for csum calculation
  * on write, or reading the csums from the tree before a read
  */
-static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
                          int mirror_num, unsigned long bio_flags,
                          u64 bio_offset)
 {
@@ -1910,7 +1909,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
        if (btrfs_is_free_space_inode(inode))
                metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
 
-       if (!(rw & REQ_WRITE)) {
+       if (bio_op(bio) != REQ_OP_WRITE) {
                ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
                if (ret)
                        goto out;
@@ -1932,7 +1931,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                        goto mapit;
                /* we're doing a write, do the async checksumming */
                ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
-                                  inode, rw, bio, mirror_num,
+                                  inode, bio, mirror_num,
                                   bio_flags, bio_offset,
                                   __btrfs_submit_bio_start,
                                   __btrfs_submit_bio_done);
@@ -1944,7 +1943,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
        }
 
 mapit:
-       ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
+       ret = btrfs_map_bio(root, bio, mirror_num, 0);
 
 out:
        if (ret < 0) {
@@ -7790,12 +7789,12 @@ err:
 }
 
 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
-                                       int rw, int mirror_num)
+                                       int mirror_num)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret;
 
-       BUG_ON(rw & REQ_WRITE);
+       BUG_ON(bio_op(bio) == REQ_OP_WRITE);
 
        bio_get(bio);
 
@@ -7804,7 +7803,7 @@ static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
        if (ret)
                goto err;
 
-       ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
+       ret = btrfs_map_bio(root, bio, mirror_num, 0);
 err:
        bio_put(bio);
        return ret;
@@ -7855,7 +7854,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
        int read_mode;
        int ret;
 
-       BUG_ON(failed_bio->bi_rw & REQ_WRITE);
+       BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
 
        ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
        if (ret)
@@ -7883,13 +7882,13 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
                free_io_failure(inode, failrec);
                return -EIO;
        }
+       bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
 
        btrfs_debug(BTRFS_I(inode)->root->fs_info,
                    "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
                    read_mode, failrec->this_mirror, failrec->in_validation);
 
-       ret = submit_dio_repair_bio(inode, bio, read_mode,
-                                   failrec->this_mirror);
+       ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
        if (ret) {
                free_io_failure(inode, failrec);
                bio_put(bio);
@@ -8179,7 +8178,7 @@ static void btrfs_endio_direct_write(struct bio *bio)
        bio_put(bio);
 }
 
-static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
+static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
                                    struct bio *bio, int mirror_num,
                                    unsigned long bio_flags, u64 offset)
 {
@@ -8197,8 +8196,8 @@ static void btrfs_end_dio_bio(struct bio *bio)
 
        if (err)
                btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
-                          "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
-                          btrfs_ino(dip->inode), bio->bi_rw,
+                          "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
+                          btrfs_ino(dip->inode), bio_op(bio), bio->bi_rw,
                           (unsigned long long)bio->bi_iter.bi_sector,
                           bio->bi_iter.bi_size, err);
 
@@ -8272,11 +8271,11 @@ static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
 }
 
 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
-                                        int rw, u64 file_offset, int skip_sum,
+                                        u64 file_offset, int skip_sum,
                                         int async_submit)
 {
        struct btrfs_dio_private *dip = bio->bi_private;
-       int write = rw & REQ_WRITE;
+       bool write = bio_op(bio) == REQ_OP_WRITE;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret;
 
@@ -8297,8 +8296,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
 
        if (write && async_submit) {
                ret = btrfs_wq_submit_bio(root->fs_info,
-                                  inode, rw, bio, 0, 0,
-                                  file_offset,
+                                  inode, bio, 0, 0, file_offset,
                                   __btrfs_submit_bio_start_direct_io,
                                   __btrfs_submit_bio_done);
                goto err;
@@ -8317,13 +8315,13 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
                        goto err;
        }
 map:
-       ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
+       ret = btrfs_map_bio(root, bio, 0, async_submit);
 err:
        bio_put(bio);
        return ret;
 }
 
-static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
+static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
                                    int skip_sum)
 {
        struct inode *inode = dip->inode;
@@ -8342,8 +8340,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        int i;
 
        map_length = orig_bio->bi_iter.bi_size;
-       ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
-                             &map_length, NULL, 0);
+       ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
+                             start_sector << 9, &map_length, NULL, 0);
        if (ret)
                return -EIO;
 
@@ -8363,6 +8361,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        if (!bio)
                return -ENOMEM;
 
+       bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
        bio->bi_private = dip;
        bio->bi_end_io = btrfs_end_dio_bio;
        btrfs_io_bio(bio)->logical = file_offset;
@@ -8382,7 +8381,7 @@ next_block:
                         * before we're done setting it up
                         */
                        atomic_inc(&dip->pending_bios);
-                       ret = __btrfs_submit_dio_bio(bio, inode, rw,
+                       ret = __btrfs_submit_dio_bio(bio, inode,
                                                     file_offset, skip_sum,
                                                     async_submit);
                        if (ret) {
@@ -8400,12 +8399,13 @@ next_block:
                                                  start_sector, GFP_NOFS);
                        if (!bio)
                                goto out_err;
+                       bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
                        bio->bi_private = dip;
                        bio->bi_end_io = btrfs_end_dio_bio;
                        btrfs_io_bio(bio)->logical = file_offset;
 
                        map_length = orig_bio->bi_iter.bi_size;
-                       ret = btrfs_map_block(root->fs_info, rw,
+                       ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
                                              start_sector << 9,
                                              &map_length, NULL, 0);
                        if (ret) {
@@ -8425,7 +8425,7 @@ next_block:
        }
 
 submit:
-       ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
+       ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
                                     async_submit);
        if (!ret)
                return 0;
@@ -8445,14 +8445,14 @@ out_err:
        return 0;
 }
 
-static void btrfs_submit_direct(int rw, struct bio *dio_bio,
-                               struct inode *inode, loff_t file_offset)
+static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
+                               loff_t file_offset)
 {
        struct btrfs_dio_private *dip = NULL;
        struct bio *io_bio = NULL;
        struct btrfs_io_bio *btrfs_bio;
        int skip_sum;
-       int write = rw & REQ_WRITE;
+       bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
        int ret = 0;
 
        skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
@@ -8503,7 +8503,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
                        dio_data->unsubmitted_oe_range_end;
        }
 
-       ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
+       ret = btrfs_submit_direct_hook(dip, skip_sum);
        if (!ret)
                return;
 
index f8b6d411a034b46a565f7df4066e0f6ecee8f189..cd8d302a1f61588bf24e8a563a53b93ec30f9318 100644 (file)
@@ -1320,7 +1320,9 @@ write_data:
 
                bio->bi_private = rbio;
                bio->bi_end_io = raid_write_end_io;
-               submit_bio(WRITE, bio);
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+               submit_bio(bio);
        }
        return;
 
@@ -1573,11 +1575,12 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
 
                bio->bi_private = rbio;
                bio->bi_end_io = raid_rmw_end_io;
+               bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
                btrfs_bio_wq_end_io(rbio->fs_info, bio,
                                    BTRFS_WQ_ENDIO_RAID56);
 
-               submit_bio(READ, bio);
+               submit_bio(bio);
        }
        /* the actual write will happen once the reads are done */
        return 0;
@@ -2097,11 +2100,12 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
 
                bio->bi_private = rbio;
                bio->bi_end_io = raid_recover_end_io;
+               bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
                btrfs_bio_wq_end_io(rbio->fs_info, bio,
                                    BTRFS_WQ_ENDIO_RAID56);
 
-               submit_bio(READ, bio);
+               submit_bio(bio);
        }
 out:
        return 0;
@@ -2433,7 +2437,9 @@ submit_write:
 
                bio->bi_private = rbio;
                bio->bi_end_io = raid_write_end_io;
-               submit_bio(WRITE, bio);
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+               submit_bio(bio);
        }
        return;
 
@@ -2610,11 +2616,12 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
 
                bio->bi_private = rbio;
                bio->bi_end_io = raid56_parity_scrub_end_io;
+               bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
                btrfs_bio_wq_end_io(rbio->fs_info, bio,
                                    BTRFS_WQ_ENDIO_RAID56);
 
-               submit_bio(READ, bio);
+               submit_bio(bio);
        }
        /* the actual write will happen once the reads are done */
        return;
index 70427ef66b044db3baadd071e9b58b3fa9ce4bfd..e08b6bc676e3faa4d93c211b11a59a90c1a62891 100644 (file)
@@ -1504,8 +1504,9 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
                                sblock->no_io_error_seen = 0;
                } else {
                        bio->bi_iter.bi_sector = page->physical >> 9;
+                       bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
-                       if (btrfsic_submit_bio_wait(READ, bio))
+                       if (btrfsic_submit_bio_wait(bio))
                                sblock->no_io_error_seen = 0;
                }
 
@@ -1583,6 +1584,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                        return -EIO;
                bio->bi_bdev = page_bad->dev->bdev;
                bio->bi_iter.bi_sector = page_bad->physical >> 9;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
                ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
                if (PAGE_SIZE != ret) {
@@ -1590,7 +1592,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                        return -EIO;
                }
 
-               if (btrfsic_submit_bio_wait(WRITE, bio)) {
+               if (btrfsic_submit_bio_wait(bio)) {
                        btrfs_dev_stat_inc_and_print(page_bad->dev,
                                BTRFS_DEV_STAT_WRITE_ERRS);
                        btrfs_dev_replace_stats_inc(
@@ -1684,6 +1686,7 @@ again:
                bio->bi_end_io = scrub_wr_bio_end_io;
                bio->bi_bdev = sbio->dev->bdev;
                bio->bi_iter.bi_sector = sbio->physical >> 9;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                sbio->err = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical_for_dev_replace ||
@@ -1731,7 +1734,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
         * orders the requests before sending them to the driver which
         * doubled the write performance on spinning disks when measured
         * with Linux 3.5 */
-       btrfsic_submit_bio(WRITE, sbio->bio);
+       btrfsic_submit_bio(sbio->bio);
 }
 
 static void scrub_wr_bio_end_io(struct bio *bio)
@@ -2041,7 +2044,7 @@ static void scrub_submit(struct scrub_ctx *sctx)
        sbio = sctx->bios[sctx->curr];
        sctx->curr = -1;
        scrub_pending_bio_inc(sctx);
-       btrfsic_submit_bio(READ, sbio->bio);
+       btrfsic_submit_bio(sbio->bio);
 }
 
 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
@@ -2088,6 +2091,7 @@ again:
                bio->bi_end_io = scrub_bio_end_io;
                bio->bi_bdev = sbio->dev->bdev;
                bio->bi_iter.bi_sector = sbio->physical >> 9;
+               bio_set_op_attrs(bio, REQ_OP_READ, 0);
                sbio->err = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical ||
@@ -4436,6 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
        bio->bi_iter.bi_size = 0;
        bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
        bio->bi_bdev = dev->bdev;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
        ret = bio_add_page(bio, page, PAGE_SIZE, 0);
        if (ret != PAGE_SIZE) {
 leave_with_eio:
@@ -4444,7 +4449,7 @@ leave_with_eio:
                return -EIO;
        }
 
-       if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
+       if (btrfsic_submit_bio_wait(bio))
                goto leave_with_eio;
 
        bio_put(bio);
index 589f128173b10e1648ce2714cd61c042d5c9e6da..0fb4a959012e0db9cff59828d6734d0cdfc761f5 100644 (file)
@@ -462,7 +462,7 @@ loop_lock:
                        sync_pending = 0;
                }
 
-               btrfsic_submit_bio(cur->bi_rw, cur);
+               btrfsic_submit_bio(cur);
                num_run++;
                batch_run++;
 
@@ -5260,7 +5260,7 @@ void btrfs_put_bbio(struct btrfs_bio *bbio)
                kfree(bbio);
 }
 
-static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
                             u64 logical, u64 *length,
                             struct btrfs_bio **bbio_ret,
                             int mirror_num, int need_raid_map)
@@ -5346,7 +5346,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                raid56_full_stripe_start *= full_stripe_len;
        }
 
-       if (rw & REQ_DISCARD) {
+       if (op == REQ_OP_DISCARD) {
                /* we don't discard raid56 yet */
                if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                        ret = -EOPNOTSUPP;
@@ -5359,7 +5359,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                   For other RAID types and for RAID[56] reads, just allow a single
                   stripe (on a single disk). */
                if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
-                   (rw & REQ_WRITE)) {
+                   (op == REQ_OP_WRITE)) {
                        max_len = stripe_len * nr_data_stripes(map) -
                                (offset - raid56_full_stripe_start);
                } else {
@@ -5384,8 +5384,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                btrfs_dev_replace_set_lock_blocking(dev_replace);
 
        if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
-           !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
-           dev_replace->tgtdev != NULL) {
+           op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
+           op != REQ_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
                /*
                 * in dev-replace case, for repair case (that's the only
                 * case where the mirror is selected explicitly when
@@ -5472,15 +5472,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                            (offset + *length);
 
        if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
-               if (rw & REQ_DISCARD)
+               if (op == REQ_OP_DISCARD)
                        num_stripes = min_t(u64, map->num_stripes,
                                            stripe_nr_end - stripe_nr_orig);
                stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
                                &stripe_index);
-               if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
+               if (op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
+                   op != REQ_GET_READ_MIRRORS)
                        mirror_num = 1;
        } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
-               if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
+               if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
+                   op == REQ_GET_READ_MIRRORS)
                        num_stripes = map->num_stripes;
                else if (mirror_num)
                        stripe_index = mirror_num - 1;
@@ -5493,7 +5495,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                }
 
        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
-               if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
+               if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
+                   op == REQ_GET_READ_MIRRORS) {
                        num_stripes = map->num_stripes;
                } else if (mirror_num) {
                        stripe_index = mirror_num - 1;
@@ -5507,9 +5510,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
                stripe_index *= map->sub_stripes;
 
-               if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
+               if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
                        num_stripes = map->sub_stripes;
-               else if (rw & REQ_DISCARD)
+               else if (op == REQ_OP_DISCARD)
                        num_stripes = min_t(u64, map->sub_stripes *
                                            (stripe_nr_end - stripe_nr_orig),
                                            map->num_stripes);
@@ -5527,7 +5530,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
 
        } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                if (need_raid_map &&
-                   ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
+                   (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS ||
                     mirror_num > 1)) {
                        /* push stripe_nr back to the start of the full stripe */
                        stripe_nr = div_u64(raid56_full_stripe_start,
@@ -5555,8 +5558,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                        /* We distribute the parity blocks across stripes */
                        div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
                                        &stripe_index);
-                       if (!(rw & (REQ_WRITE | REQ_DISCARD |
-                                   REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
+                       if ((op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
+                           op != REQ_GET_READ_MIRRORS) && mirror_num <= 1)
                                mirror_num = 1;
                }
        } else {
@@ -5579,9 +5582,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
 
        num_alloc_stripes = num_stripes;
        if (dev_replace_is_ongoing) {
-               if (rw & (REQ_WRITE | REQ_DISCARD))
+               if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD)
                        num_alloc_stripes <<= 1;
-               if (rw & REQ_GET_READ_MIRRORS)
+               if (op == REQ_GET_READ_MIRRORS)
                        num_alloc_stripes++;
                tgtdev_indexes = num_stripes;
        }
@@ -5596,7 +5599,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
 
        /* build raid_map */
        if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
-           need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
+           need_raid_map &&
+           ((op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) ||
            mirror_num > 1)) {
                u64 tmp;
                unsigned rot;
@@ -5621,7 +5625,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                                RAID6_Q_STRIPE;
        }
 
-       if (rw & REQ_DISCARD) {
+       if (op == REQ_OP_DISCARD) {
                u32 factor = 0;
                u32 sub_stripes = 0;
                u64 stripes_per_dev = 0;
@@ -5701,14 +5705,15 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                }
        }
 
-       if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
+       if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
                max_errors = btrfs_chunk_max_errors(map);
 
        if (bbio->raid_map)
                sort_parity_stripes(bbio, num_stripes);
 
        tgtdev_indexes = 0;
-       if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
+       if (dev_replace_is_ongoing &&
+          (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) &&
            dev_replace->tgtdev != NULL) {
                int index_where_to_add;
                u64 srcdev_devid = dev_replace->srcdev->devid;
@@ -5743,7 +5748,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                        }
                }
                num_stripes = index_where_to_add;
-       } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
+       } else if (dev_replace_is_ongoing && (op == REQ_GET_READ_MIRRORS) &&
                   dev_replace->tgtdev != NULL) {
                u64 srcdev_devid = dev_replace->srcdev->devid;
                int index_srcdev = 0;
@@ -5815,21 +5820,21 @@ out:
        return ret;
 }
 
-int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
                      u64 logical, u64 *length,
                      struct btrfs_bio **bbio_ret, int mirror_num)
 {
-       return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
+       return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
                                 mirror_num, 0);
 }
 
 /* For Scrub/replace */
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
                     u64 logical, u64 *length,
                     struct btrfs_bio **bbio_ret, int mirror_num,
                     int need_raid_map)
 {
-       return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
+       return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
                                 mirror_num, need_raid_map);
 }
 
@@ -5943,7 +5948,7 @@ static void btrfs_end_bio(struct bio *bio)
                        BUG_ON(stripe_index >= bbio->num_stripes);
                        dev = bbio->stripes[stripe_index].dev;
                        if (dev->bdev) {
-                               if (bio->bi_rw & WRITE)
+                               if (bio_op(bio) == REQ_OP_WRITE)
                                        btrfs_dev_stat_inc(dev,
                                                BTRFS_DEV_STAT_WRITE_ERRS);
                                else
@@ -5997,7 +6002,7 @@ static void btrfs_end_bio(struct bio *bio)
  */
 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
                                        struct btrfs_device *device,
-                                       int rw, struct bio *bio)
+                                       struct bio *bio)
 {
        int should_queue = 1;
        struct btrfs_pending_bios *pending_bios;
@@ -6008,9 +6013,9 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
        }
 
        /* don't bother with additional async steps for reads, right now */
-       if (!(rw & REQ_WRITE)) {
+       if (bio_op(bio) == REQ_OP_READ) {
                bio_get(bio);
-               btrfsic_submit_bio(rw, bio);
+               btrfsic_submit_bio(bio);
                bio_put(bio);
                return;
        }
@@ -6024,7 +6029,6 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
        atomic_inc(&root->fs_info->nr_async_bios);
        WARN_ON(bio->bi_next);
        bio->bi_next = NULL;
-       bio->bi_rw |= rw;
 
        spin_lock(&device->io_lock);
        if (bio->bi_rw & REQ_SYNC)
@@ -6050,7 +6054,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
 
 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
                              struct bio *bio, u64 physical, int dev_nr,
-                             int rw, int async)
+                             int async)
 {
        struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
 
@@ -6064,8 +6068,8 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
 
                rcu_read_lock();
                name = rcu_dereference(dev->name);
-               pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
-                        "(%s id %llu), size=%u\n", rw,
+               pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
+                        "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rw,
                         (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
                         name->str, dev->devid, bio->bi_iter.bi_size);
                rcu_read_unlock();
@@ -6076,9 +6080,9 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
        btrfs_bio_counter_inc_noblocked(root->fs_info);
 
        if (async)
-               btrfs_schedule_bio(root, dev, rw, bio);
+               btrfs_schedule_bio(root, dev, bio);
        else
-               btrfsic_submit_bio(rw, bio);
+               btrfsic_submit_bio(bio);
 }
 
 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
@@ -6095,7 +6099,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
        }
 }
 
-int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
+int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
                  int mirror_num, int async_submit)
 {
        struct btrfs_device *dev;
@@ -6112,8 +6116,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        map_length = length;
 
        btrfs_bio_counter_inc_blocked(root->fs_info);
-       ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
-                             mirror_num, 1);
+       ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical,
+                               &map_length, &bbio, mirror_num, 1);
        if (ret) {
                btrfs_bio_counter_dec(root->fs_info);
                return ret;
@@ -6127,10 +6131,10 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        atomic_set(&bbio->stripes_pending, bbio->num_stripes);
 
        if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
-           ((rw & WRITE) || (mirror_num > 1))) {
+           ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
                /* In this case, map_length has been set to the length of
                   a single stripe; not the whole write */
-               if (rw & WRITE) {
+               if (bio_op(bio) == REQ_OP_WRITE) {
                        ret = raid56_parity_write(root, bio, bbio, map_length);
                } else {
                        ret = raid56_parity_recover(root, bio, bbio, map_length,
@@ -6149,7 +6153,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 
        for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
                dev = bbio->stripes[dev_nr].dev;
-               if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
+               if (!dev || !dev->bdev ||
+                   (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
                        bbio_error(bbio, first_bio, logical);
                        continue;
                }
@@ -6161,7 +6166,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                        bio = first_bio;
 
                submit_stripe_bio(root, bbio, bio,
-                                 bbio->stripes[dev_nr].physical, dev_nr, rw,
+                                 bbio->stripes[dev_nr].physical, dev_nr,
                                  async_submit);
        }
        btrfs_bio_counter_dec(root->fs_info);
index 0ac90f8d85bdc8565acd1efee6c7c3228c83a5b5..6613e6335ca29e12e4ebd8fc6cc515638ffc416d 100644 (file)
@@ -375,10 +375,10 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
                                   u64 end, u64 *length);
 void btrfs_get_bbio(struct btrfs_bio *bbio);
 void btrfs_put_bbio(struct btrfs_bio *bbio);
-int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
                    u64 logical, u64 *length,
                    struct btrfs_bio **bbio_ret, int mirror_num);
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
                     u64 logical, u64 *length,
                     struct btrfs_bio **bbio_ret, int mirror_num,
                     int need_raid_map);
@@ -391,7 +391,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                      struct btrfs_root *extent_root, u64 type);
 void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
-int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
+int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
                  int mirror_num, int async_submit);
 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                       fmode_t flags, void *holder);
index 6c15012a75d9c6a986710961c07f23fd1dec2b46..e156a36463a1658060c99d6edee0dc32be7d41aa 100644 (file)
@@ -45,7 +45,7 @@
 #include <trace/events/block.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
-static int submit_bh_wbc(int rw, struct buffer_head *bh,
+static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
                         unsigned long bio_flags,
                         struct writeback_control *wbc);
 
@@ -588,7 +588,7 @@ void write_boundary_block(struct block_device *bdev,
        struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
        if (bh) {
                if (buffer_dirty(bh))
-                       ll_rw_block(WRITE, 1, &bh);
+                       ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
                put_bh(bh);
        }
 }
@@ -1225,7 +1225,7 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
        } else {
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ, bh);
+               submit_bh(REQ_OP_READ, 0, bh);
                wait_on_buffer(bh);
                if (buffer_uptodate(bh))
                        return bh;
@@ -1395,7 +1395,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = __getblk(bdev, block, size);
        if (likely(bh)) {
-               ll_rw_block(READA, 1, &bh);
+               ll_rw_block(REQ_OP_READ, READA, 1, &bh);
                brelse(bh);
        }
 }
@@ -1697,7 +1697,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
        struct buffer_head *bh, *head;
        unsigned int blocksize, bbits;
        int nr_underway = 0;
-       int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
+       int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
 
        head = create_page_buffers(page, inode,
                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
@@ -1786,7 +1786,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
-                       submit_bh_wbc(write_op, bh, 0, wbc);
+                       submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc);
                        nr_underway++;
                }
                bh = next;
@@ -1840,7 +1840,7 @@ recover:
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
                        clear_buffer_dirty(bh);
-                       submit_bh_wbc(write_op, bh, 0, wbc);
+                       submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc);
                        nr_underway++;
                }
                bh = next;
@@ -1956,7 +1956,7 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
                if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
                    !buffer_unwritten(bh) &&
                     (block_start < from || block_end > to)) {
-                       ll_rw_block(READ, 1, &bh);
+                       ll_rw_block(REQ_OP_READ, 0, 1, &bh);
                        *wait_bh++=bh;
                }
        }
@@ -2249,7 +2249,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                if (buffer_uptodate(bh))
                        end_buffer_async_read(bh, 1);
                else
-                       submit_bh(READ, bh);
+                       submit_bh(REQ_OP_READ, 0, bh);
        }
        return 0;
 }
@@ -2583,7 +2583,7 @@ int nobh_write_begin(struct address_space *mapping,
                if (block_start < from || block_end > to) {
                        lock_buffer(bh);
                        bh->b_end_io = end_buffer_read_nobh;
-                       submit_bh(READ, bh);
+                       submit_bh(REQ_OP_READ, 0, bh);
                        nr_reads++;
                }
        }
@@ -2853,7 +2853,7 @@ int block_truncate_page(struct address_space *mapping,
 
        if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
                err = -EIO;
-               ll_rw_block(READ, 1, &bh);
+               ll_rw_block(REQ_OP_READ, 0, 1, &bh);
                wait_on_buffer(bh);
                /* Uhhuh. Read error. Complain and punt. */
                if (!buffer_uptodate(bh))
@@ -2950,7 +2950,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
  * errors, this only handles the "we need to be able to
  * do IO at the final sector" case.
  */
-void guard_bio_eod(int rw, struct bio *bio)
+void guard_bio_eod(int op, struct bio *bio)
 {
        sector_t maxsector;
        struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
@@ -2980,13 +2980,13 @@ void guard_bio_eod(int rw, struct bio *bio)
        bvec->bv_len -= truncated_bytes;
 
        /* ..and clear the end of the buffer for reads */
-       if ((rw & RW_MASK) == READ) {
+       if (op == REQ_OP_READ) {
                zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len,
                                truncated_bytes);
        }
 }
 
-static int submit_bh_wbc(int rw, struct buffer_head *bh,
+static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
                         unsigned long bio_flags, struct writeback_control *wbc)
 {
        struct bio *bio;
@@ -3000,7 +3000,7 @@ static int submit_bh_wbc(int rw, struct buffer_head *bh,
        /*
         * Only clear out a write error when rewriting
         */
-       if (test_set_buffer_req(bh) && (rw & WRITE))
+       if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
                clear_buffer_write_io_error(bh);
 
        /*
@@ -3025,32 +3025,35 @@ static int submit_bh_wbc(int rw, struct buffer_head *bh,
        bio->bi_flags |= bio_flags;
 
        /* Take care of bh's that straddle the end of the device */
-       guard_bio_eod(rw, bio);
+       guard_bio_eod(op, bio);
 
        if (buffer_meta(bh))
-               rw |= REQ_META;
+               op_flags |= REQ_META;
        if (buffer_prio(bh))
-               rw |= REQ_PRIO;
+               op_flags |= REQ_PRIO;
+       bio_set_op_attrs(bio, op, op_flags);
 
-       submit_bio(rw, bio);
+       submit_bio(bio);
        return 0;
 }
 
-int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
+int _submit_bh(int op, int op_flags, struct buffer_head *bh,
+              unsigned long bio_flags)
 {
-       return submit_bh_wbc(rw, bh, bio_flags, NULL);
+       return submit_bh_wbc(op, op_flags, bh, bio_flags, NULL);
 }
 EXPORT_SYMBOL_GPL(_submit_bh);
 
-int submit_bh(int rw, struct buffer_head *bh)
+int submit_bh(int op, int op_flags,  struct buffer_head *bh)
 {
-       return submit_bh_wbc(rw, bh, 0, NULL);
+       return submit_bh_wbc(op, op_flags, bh, 0, NULL);
 }
 EXPORT_SYMBOL(submit_bh);
 
 /**
  * ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
+ * @op: whether to %READ or %WRITE
+ * @op_flags: rq_flag_bits or %READA (readahead)
  * @nr: number of &struct buffer_heads in the array
  * @bhs: array of pointers to &struct buffer_head
  *
@@ -3073,7 +3076,7 @@ EXPORT_SYMBOL(submit_bh);
  * All of the buffers must be for the same device, and must also be a
  * multiple of the current approved size for the device.
  */
-void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+void ll_rw_block(int op, int op_flags,  int nr, struct buffer_head *bhs[])
 {
        int i;
 
@@ -3082,18 +3085,18 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
 
                if (!trylock_buffer(bh))
                        continue;
-               if (rw == WRITE) {
+               if (op == WRITE) {
                        if (test_clear_buffer_dirty(bh)) {
                                bh->b_end_io = end_buffer_write_sync;
                                get_bh(bh);
-                               submit_bh(WRITE, bh);
+                               submit_bh(op, op_flags, bh);
                                continue;
                        }
                } else {
                        if (!buffer_uptodate(bh)) {
                                bh->b_end_io = end_buffer_read_sync;
                                get_bh(bh);
-                               submit_bh(rw, bh);
+                               submit_bh(op, op_flags, bh);
                                continue;
                        }
                }
@@ -3102,7 +3105,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
 }
 EXPORT_SYMBOL(ll_rw_block);
 
-void write_dirty_buffer(struct buffer_head *bh, int rw)
+void write_dirty_buffer(struct buffer_head *bh, int op_flags)
 {
        lock_buffer(bh);
        if (!test_clear_buffer_dirty(bh)) {
@@ -3111,7 +3114,7 @@ void write_dirty_buffer(struct buffer_head *bh, int rw)
        }
        bh->b_end_io = end_buffer_write_sync;
        get_bh(bh);
-       submit_bh(rw, bh);
+       submit_bh(REQ_OP_WRITE, op_flags, bh);
 }
 EXPORT_SYMBOL(write_dirty_buffer);
 
@@ -3120,7 +3123,7 @@ EXPORT_SYMBOL(write_dirty_buffer);
  * and then start new I/O and then wait upon it.  The caller must have a ref on
  * the buffer_head.
  */
-int __sync_dirty_buffer(struct buffer_head *bh, int rw)
+int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
 {
        int ret = 0;
 
@@ -3129,7 +3132,7 @@ int __sync_dirty_buffer(struct buffer_head *bh, int rw)
        if (test_clear_buffer_dirty(bh)) {
                get_bh(bh);
                bh->b_end_io = end_buffer_write_sync;
-               ret = submit_bh(rw, bh);
+               ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
                wait_on_buffer(bh);
                if (!ret && !buffer_uptodate(bh))
                        ret = -EIO;
@@ -3392,7 +3395,7 @@ int bh_submit_read(struct buffer_head *bh)
 
        get_bh(bh);
        bh->b_end_io = end_buffer_read_sync;
-       submit_bh(READ, bh);
+       submit_bh(REQ_OP_READ, 0, bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return 0;
index 2fc8c43ce531de02379c93e85c03f95c0d3ca793..c502c116924ca1bd603184089bff67be8f5100a5 100644 (file)
@@ -318,6 +318,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
                bio->bi_bdev = inode->i_sb->s_bdev;
                bio->bi_iter.bi_sector =
                        pblk << (inode->i_sb->s_blocksize_bits - 9);
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                ret = bio_add_page(bio, ciphertext_page,
                                        inode->i_sb->s_blocksize, 0);
                if (ret != inode->i_sb->s_blocksize) {
@@ -327,7 +328,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
                        err = -EIO;
                        goto errout;
                }
-               err = submit_bio_wait(WRITE, bio);
+               err = submit_bio_wait(bio);
                if ((err == 0) && bio->bi_error)
                        err = -EIO;
                bio_put(bio);
index f3b4408be5904a996920397960b2df322fde07f3..7c3ce73cb6170ee820aec560ece2fafc8b48b83c 100644 (file)
@@ -108,7 +108,8 @@ struct dio_submit {
 /* dio_state communicated between submission path and end_io */
 struct dio {
        int flags;                      /* doesn't change */
-       int rw;
+       int op;
+       int op_flags;
        blk_qc_t bio_cookie;
        struct block_device *bio_bdev;
        struct inode *inode;
@@ -163,7 +164,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
        ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
                                &sdio->from);
 
-       if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
+       if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
                struct page *page = ZERO_PAGE(0);
                /*
                 * A memory fault, but the filesystem has some outstanding
@@ -242,7 +243,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
                transferred = dio->result;
 
                /* Check for short read case */
-               if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
+               if ((dio->op == REQ_OP_READ) &&
+                   ((offset + transferred) > dio->i_size))
                        transferred = dio->i_size - offset;
        }
 
@@ -273,7 +275,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
                 */
                dio->iocb->ki_pos += transferred;
 
-               if (dio->rw & WRITE)
+               if (dio->op == REQ_OP_WRITE)
                        ret = generic_write_sync(dio->iocb,  transferred);
                dio->iocb->ki_complete(dio->iocb, ret, 0);
        }
@@ -375,6 +377,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
 
        bio->bi_bdev = bdev;
        bio->bi_iter.bi_sector = first_sector;
+       bio_set_op_attrs(bio, dio->op, dio->op_flags);
        if (dio->is_async)
                bio->bi_end_io = dio_bio_end_aio;
        else
@@ -402,17 +405,16 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
        dio->refcount++;
        spin_unlock_irqrestore(&dio->bio_lock, flags);
 
-       if (dio->is_async && dio->rw == READ && dio->should_dirty)
+       if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
                bio_set_pages_dirty(bio);
 
        dio->bio_bdev = bio->bi_bdev;
 
        if (sdio->submit_io) {
-               sdio->submit_io(dio->rw, bio, dio->inode,
-                              sdio->logical_offset_in_bio);
+               sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
                dio->bio_cookie = BLK_QC_T_NONE;
        } else
-               dio->bio_cookie = submit_bio(dio->rw, bio);
+               dio->bio_cookie = submit_bio(bio);
 
        sdio->bio = NULL;
        sdio->boundary = 0;
@@ -478,14 +480,14 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
        if (bio->bi_error)
                dio->io_error = -EIO;
 
-       if (dio->is_async && dio->rw == READ && dio->should_dirty) {
+       if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
                err = bio->bi_error;
                bio_check_pages_dirty(bio);     /* transfers ownership */
        } else {
                bio_for_each_segment_all(bvec, bio, i) {
                        struct page *page = bvec->bv_page;
 
-                       if (dio->rw == READ && !PageCompound(page) &&
+                       if (dio->op == REQ_OP_READ && !PageCompound(page) &&
                                        dio->should_dirty)
                                set_page_dirty_lock(page);
                        put_page(page);
@@ -638,7 +640,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
                 * which may decide to handle it or also return an unmapped
                 * buffer head.
                 */
-               create = dio->rw & WRITE;
+               create = dio->op == REQ_OP_WRITE;
                if (dio->flags & DIO_SKIP_HOLES) {
                        if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
                                                        i_blkbits))
@@ -788,7 +790,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
 {
        int ret = 0;
 
-       if (dio->rw & WRITE) {
+       if (dio->op == REQ_OP_WRITE) {
                /*
                 * Read accounting is performed in submit_bio()
                 */
@@ -988,7 +990,7 @@ do_holes:
                                loff_t i_size_aligned;
 
                                /* AKPM: eargh, -ENOTBLK is a hack */
-                               if (dio->rw & WRITE) {
+                               if (dio->op == REQ_OP_WRITE) {
                                        put_page(page);
                                        return -ENOTBLK;
                                }
@@ -1202,7 +1204,12 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
                dio->is_async = true;
 
        dio->inode = inode;
-       dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
+       if (iov_iter_rw(iter) == WRITE) {
+               dio->op = REQ_OP_WRITE;
+               dio->op_flags = WRITE_ODIRECT;
+       } else {
+               dio->op = REQ_OP_READ;
+       }
 
        /*
         * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
index 7bd8ac8dfb280ca4c2339f6297385d579b23b93d..8bb72807e70d46ae2ab32e7e8d29de4d034454f5 100644 (file)
@@ -878,7 +878,7 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
                        } else {
                                bio = master_dev->bio;
                                /* FIXME: bio_set_dir() */
-                               bio->bi_rw |= REQ_WRITE;
+                               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                        }
 
                        osd_req_write(or, _ios_obj(ios, cur_comp),
index 3020fd70c392d1f2b55913e374ed11831b2aa967..a806b58e4646573beb31290f97952d4e5b258ec8 100644 (file)
@@ -470,7 +470,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
        trace_ext4_read_block_bitmap_load(sb, block_group);
        bh->b_end_io = ext4_end_bitmap_read;
        get_bh(bh);
-       submit_bh(READ | REQ_META | REQ_PRIO, bh);
+       submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
        return bh;
 verify:
        err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
index 6a6c27373b5467d11dcfc056b3c17dec5cccde3b..d3fa47c2b8a3c926830bc1c717a69f41bba477e4 100644 (file)
@@ -428,6 +428,7 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
                bio->bi_bdev = inode->i_sb->s_bdev;
                bio->bi_iter.bi_sector =
                        pblk << (inode->i_sb->s_blocksize_bits - 9);
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                ret = bio_add_page(bio, ciphertext_page,
                                   inode->i_sb->s_blocksize, 0);
                if (ret != inode->i_sb->s_blocksize) {
@@ -439,7 +440,7 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
                        err = -EIO;
                        goto errout;
                }
-               err = submit_bio_wait(WRITE, bio);
+               err = submit_bio_wait(bio);
                if ((err == 0) && bio->bi_error)
                        err = -EIO;
                bio_put(bio);
index 3da4cf8d18b68ccae8b93984ee1d0d154903a863..1e4b0b7425e5df010f7c6626692cd447610a533c 100644 (file)
@@ -214,7 +214,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
        trace_ext4_load_inode_bitmap(sb, block_group);
        bh->b_end_io = ext4_end_bitmap_read;
        get_bh(bh);
-       submit_bh(READ | REQ_META | REQ_PRIO, bh);
+       submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
        wait_on_buffer(bh);
        if (!buffer_uptodate(bh)) {
                put_bh(bh);
index f7140ca66e3bf2751eb8103a37f249b49521b6ff..ae44916d40e2fb3c949b031a4a0bd51b8af3346c 100644 (file)
@@ -981,7 +981,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
                return bh;
        if (!bh || buffer_uptodate(bh))
                return bh;
-       ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
+       ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
@@ -1135,7 +1135,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
                if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
                    !buffer_unwritten(bh) &&
                    (block_start < from || block_end > to)) {
-                       ll_rw_block(READ, 1, &bh);
+                       ll_rw_block(REQ_OP_READ, 0, 1, &bh);
                        *wait_bh++ = bh;
                        decrypt = ext4_encrypted_inode(inode) &&
                                S_ISREG(inode->i_mode);
@@ -3698,7 +3698,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
 
        if (!buffer_uptodate(bh)) {
                err = -EIO;
-               ll_rw_block(READ, 1, &bh);
+               ll_rw_block(REQ_OP_READ, 0, 1, &bh);
                wait_on_buffer(bh);
                /* Uhhuh. Read error. Complain and punt. */
                if (!buffer_uptodate(bh))
@@ -4281,7 +4281,7 @@ make_io:
                trace_ext4_load_inode(inode);
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ | REQ_META | REQ_PRIO, bh);
+               submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        EXT4_ERROR_INODE_BLOCK(inode, block,
index 23d436d6f8b8fe1c0e69cbe2a957dcd141448abb..d89754ef1aab72075c378fc64300259e73a12ccc 100644 (file)
@@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
        lock_buffer(bh);
        bh->b_end_io = end_buffer_write_sync;
        get_bh(bh);
-       submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
+       submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh);
        wait_on_buffer(bh);
        sb_end_write(sb);
        if (unlikely(!buffer_uptodate(bh)))
@@ -88,7 +88,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
        get_bh(*bh);
        lock_buffer(*bh);
        (*bh)->b_end_io = end_buffer_read_sync;
-       submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
+       submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh);
        wait_on_buffer(*bh);
        if (!buffer_uptodate(*bh)) {
                ret = -EIO;
index ec4c39952e847462c9a4a62f7d4bcc1afb74fd37..6569c6b47da43b045e499d1523e04dbf0f4418b5 100644 (file)
@@ -1443,7 +1443,8 @@ restart:
                                }
                                bh_use[ra_max] = bh;
                                if (bh)
-                                       ll_rw_block(READ | REQ_META | REQ_PRIO,
+                                       ll_rw_block(REQ_OP_READ,
+                                                   REQ_META | REQ_PRIO,
                                                    1, &bh);
                        }
                }
index 2a01df9cc1c3214ee0e106eee262e7a3d1cee284..5185fed40fab60b204a3f4674faa9789a8b8de53 100644 (file)
@@ -340,9 +340,10 @@ void ext4_io_submit(struct ext4_io_submit *io)
        struct bio *bio = io->io_bio;
 
        if (bio) {
-               int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ?
-                           WRITE_SYNC : WRITE;
-               submit_bio(io_op, io->io_bio);
+               int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
+                                 WRITE_SYNC : 0;
+               bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
+               submit_bio(io->io_bio);
        }
        io->io_bio = NULL;
 }
index dc54a4b60eba0faf0b4f8afb290925b5b57e355a..2ced5a823354bcc314b8c737277aeebe3e0b4e3e 100644 (file)
@@ -271,7 +271,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
                 */
                if (bio && (last_block_in_bio != blocks[0] - 1)) {
                submit_and_realloc:
-                       submit_bio(READ, bio);
+                       submit_bio(bio);
                        bio = NULL;
                }
                if (bio == NULL) {
@@ -294,6 +294,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
                        bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
                        bio->bi_end_io = mpage_end_io;
                        bio->bi_private = ctx;
+                       bio_set_op_attrs(bio, REQ_OP_READ, 0);
                }
 
                length = first_hole << blkbits;
@@ -303,14 +304,14 @@ int ext4_mpage_readpages(struct address_space *mapping,
                if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
                     (relative_block == map.m_len)) ||
                    (first_hole != blocks_per_page)) {
-                       submit_bio(READ, bio);
+                       submit_bio(bio);
                        bio = NULL;
                } else
                        last_block_in_bio = blocks[blocks_per_page - 1];
                goto next_page;
        confused:
                if (bio) {
-                       submit_bio(READ, bio);
+                       submit_bio(bio);
                        bio = NULL;
                }
                if (!PageUptodate(page))
@@ -323,6 +324,6 @@ int ext4_mpage_readpages(struct address_space *mapping,
        }
        BUG_ON(pages && !list_empty(pages));
        if (bio)
-               submit_bio(READ, bio);
+               submit_bio(bio);
        return 0;
 }
index 3822a5aedc61b241d937c474192bfa57e5b3b2c5..b1a347100d5483a208c97ec2e70be420f67f94e7 100644 (file)
@@ -4204,7 +4204,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
                goto out_bdev;
        }
        journal->j_private = sb;
-       ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
+       ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
        wait_on_buffer(journal->j_sb_buffer);
        if (!buffer_uptodate(journal->j_sb_buffer)) {
                ext4_msg(sb, KERN_ERR, "I/O error on journal device");
index 3891600499939895600bfc9aed37c6f2b5e1d53d..b6d600e91f39dfe18fa0435fd13a05c5c0d90058 100644 (file)
@@ -63,14 +63,15 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
        struct f2fs_io_info fio = {
                .sbi = sbi,
                .type = META,
-               .rw = READ_SYNC | REQ_META | REQ_PRIO,
+               .op = REQ_OP_READ,
+               .op_flags = READ_SYNC | REQ_META | REQ_PRIO,
                .old_blkaddr = index,
                .new_blkaddr = index,
                .encrypted_page = NULL,
        };
 
        if (unlikely(!is_meta))
-               fio.rw &= ~REQ_META;
+               fio.op_flags &= ~REQ_META;
 repeat:
        page = f2fs_grab_cache_page(mapping, index, false);
        if (!page) {
@@ -157,13 +158,14 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
        struct f2fs_io_info fio = {
                .sbi = sbi,
                .type = META,
-               .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
+               .op = REQ_OP_READ,
+               .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
                .encrypted_page = NULL,
        };
        struct blk_plug plug;
 
        if (unlikely(type == META_POR))
-               fio.rw &= ~REQ_META;
+               fio.op_flags &= ~REQ_META;
 
        blk_start_plug(&plug);
        for (; nrpages-- > 0; blkno++) {
index 9a8bbc1fb1faa6865665285b37217f7993692431..8769e8349dff868fc822190d7bd769bfbab3814a 100644 (file)
@@ -97,12 +97,11 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
        return bio;
 }
 
-static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
-                                               struct bio *bio)
+static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio)
 {
-       if (!is_read_io(rw))
+       if (!is_read_io(bio_op(bio)))
                atomic_inc(&sbi->nr_wb_bios);
-       submit_bio(rw, bio);
+       submit_bio(bio);
 }
 
 static void __submit_merged_bio(struct f2fs_bio_info *io)
@@ -112,12 +111,14 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
        if (!io->bio)
                return;
 
-       if (is_read_io(fio->rw))
+       if (is_read_io(fio->op))
                trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
        else
                trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
 
-       __submit_bio(io->sbi, fio->rw, io->bio);
+       bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
+
+       __submit_bio(io->sbi, io->bio);
        io->bio = NULL;
 }
 
@@ -183,10 +184,12 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
        /* change META to META_FLUSH in the checkpoint procedure */
        if (type >= META_FLUSH) {
                io->fio.type = META_FLUSH;
+               io->fio.op = REQ_OP_WRITE;
                if (test_opt(sbi, NOBARRIER))
-                       io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
+                       io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
                else
-                       io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
+                       io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
+                                                               REQ_PRIO;
        }
        __submit_merged_bio(io);
 out:
@@ -228,14 +231,16 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
        f2fs_trace_ios(fio, 0);
 
        /* Allocate a new bio */
-       bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
+       bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
 
        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
                bio_put(bio);
                return -EFAULT;
        }
+       bio->bi_rw = fio->op_flags;
+       bio_set_op_attrs(bio, fio->op, fio->op_flags);
 
-       __submit_bio(fio->sbi, fio->rw, bio);
+       __submit_bio(fio->sbi, bio);
        return 0;
 }
 
@@ -244,7 +249,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
        struct f2fs_sb_info *sbi = fio->sbi;
        enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
        struct f2fs_bio_info *io;
-       bool is_read = is_read_io(fio->rw);
+       bool is_read = is_read_io(fio->op);
        struct page *bio_page;
 
        io = is_read ? &sbi->read_io : &sbi->write_io[btype];
@@ -256,7 +261,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
        down_write(&io->io_rwsem);
 
        if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
-                                               io->fio.rw != fio->rw))
+           (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
                __submit_merged_bio(io);
 alloc_new:
        if (io->bio == NULL) {
@@ -390,7 +395,7 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
 }
 
 struct page *get_read_data_page(struct inode *inode, pgoff_t index,
-                                               int rw, bool for_write)
+                                               int op_flags, bool for_write)
 {
        struct address_space *mapping = inode->i_mapping;
        struct dnode_of_data dn;
@@ -400,7 +405,8 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index,
        struct f2fs_io_info fio = {
                .sbi = F2FS_I_SB(inode),
                .type = DATA,
-               .rw = rw,
+               .op = REQ_OP_READ,
+               .op_flags = op_flags,
                .encrypted_page = NULL,
        };
 
@@ -1051,7 +1057,7 @@ got_it:
                 */
                if (bio && (last_block_in_bio != block_nr - 1)) {
 submit_and_realloc:
-                       __submit_bio(F2FS_I_SB(inode), READ, bio);
+                       __submit_bio(F2FS_I_SB(inode), bio);
                        bio = NULL;
                }
                if (bio == NULL) {
@@ -1080,6 +1086,7 @@ submit_and_realloc:
                        bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
                        bio->bi_end_io = f2fs_read_end_io;
                        bio->bi_private = ctx;
+                       bio_set_op_attrs(bio, REQ_OP_READ, 0);
                }
 
                if (bio_add_page(bio, page, blocksize, 0) < blocksize)
@@ -1094,7 +1101,7 @@ set_error_page:
                goto next_page;
 confused:
                if (bio) {
-                       __submit_bio(F2FS_I_SB(inode), READ, bio);
+                       __submit_bio(F2FS_I_SB(inode), bio);
                        bio = NULL;
                }
                unlock_page(page);
@@ -1104,7 +1111,7 @@ next_page:
        }
        BUG_ON(pages && !list_empty(pages));
        if (bio)
-               __submit_bio(F2FS_I_SB(inode), READ, bio);
+               __submit_bio(F2FS_I_SB(inode), bio);
        return 0;
 }
 
@@ -1221,7 +1228,8 @@ static int f2fs_write_data_page(struct page *page,
        struct f2fs_io_info fio = {
                .sbi = sbi,
                .type = DATA,
-               .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
+               .op = REQ_OP_WRITE,
+               .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
                .page = page,
                .encrypted_page = NULL,
        };
@@ -1662,7 +1670,8 @@ repeat:
                struct f2fs_io_info fio = {
                        .sbi = sbi,
                        .type = DATA,
-                       .rw = READ_SYNC,
+                       .op = REQ_OP_READ,
+                       .op_flags = READ_SYNC,
                        .old_blkaddr = blkaddr,
                        .new_blkaddr = blkaddr,
                        .page = page,
index 916e7c238e3d14a9da4a91e965ff570d0dc91232..23ae6a81ccd6c4f0ba419d9b735eb08f1bcab7df 100644 (file)
@@ -686,14 +686,15 @@ enum page_type {
 struct f2fs_io_info {
        struct f2fs_sb_info *sbi;       /* f2fs_sb_info pointer */
        enum page_type type;    /* contains DATA/NODE/META/META_FLUSH */
-       int rw;                 /* contains R/RS/W/WS with REQ_META/REQ_PRIO */
+       int op;                 /* contains REQ_OP_ */
+       int op_flags;           /* rq_flag_bits */
        block_t new_blkaddr;    /* new block address to be written */
        block_t old_blkaddr;    /* old block address before Cow */
        struct page *page;      /* page to be written */
        struct page *encrypted_page;    /* encrypted page */
 };
 
-#define is_read_io(rw) (((rw) & 1) == READ)
+#define is_read_io(rw) (rw == READ)
 struct f2fs_bio_info {
        struct f2fs_sb_info *sbi;       /* f2fs superblock */
        struct bio *bio;                /* bios to merge */
index 38d56f678912358e701d301d7cde694beda56563..3649d86bb431e208f5645ed962c5ce9d1bbc817a 100644 (file)
@@ -538,7 +538,8 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
        struct f2fs_io_info fio = {
                .sbi = F2FS_I_SB(inode),
                .type = DATA,
-               .rw = READ_SYNC,
+               .op = REQ_OP_READ,
+               .op_flags = READ_SYNC,
                .encrypted_page = NULL,
        };
        struct dnode_of_data dn;
@@ -612,7 +613,8 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
        /* allocate block address */
        f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
-       fio.rw = WRITE_SYNC;
+       fio.op = REQ_OP_WRITE;
+       fio.op_flags = WRITE_SYNC;
        fio.new_blkaddr = newaddr;
        f2fs_submit_page_mbio(&fio);
 
@@ -649,7 +651,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
                struct f2fs_io_info fio = {
                        .sbi = F2FS_I_SB(inode),
                        .type = DATA,
-                       .rw = WRITE_SYNC,
+                       .op = REQ_OP_WRITE,
+                       .op_flags = WRITE_SYNC,
                        .page = page,
                        .encrypted_page = NULL,
                };
index a4bb155dd00aeaf796ad143f8677590e660eeb77..c15e53c1d794c482faab4626f089af531e91fc5b 100644 (file)
@@ -108,7 +108,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
        struct f2fs_io_info fio = {
                .sbi = F2FS_I_SB(dn->inode),
                .type = DATA,
-               .rw = WRITE_SYNC | REQ_PRIO,
+               .op = REQ_OP_WRITE,
+               .op_flags = WRITE_SYNC | REQ_PRIO,
                .page = page,
                .encrypted_page = NULL,
        };
index 1f21aae80c4015ac4f96be6d268a0613ee3405e0..e53403987f6d1cfcc237f29e17f9b896cb245b80 100644 (file)
@@ -1070,14 +1070,15 @@ fail:
  * 0: f2fs_put_page(page, 0)
  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
  */
-static int read_node_page(struct page *page, int rw)
+static int read_node_page(struct page *page, int op_flags)
 {
        struct f2fs_sb_info *sbi = F2FS_P_SB(page);
        struct node_info ni;
        struct f2fs_io_info fio = {
                .sbi = sbi,
                .type = NODE,
-               .rw = rw,
+               .op = REQ_OP_READ,
+               .op_flags = op_flags,
                .page = page,
                .encrypted_page = NULL,
        };
@@ -1568,7 +1569,8 @@ static int f2fs_write_node_page(struct page *page,
        struct f2fs_io_info fio = {
                .sbi = sbi,
                .type = NODE,
-               .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
+               .op = REQ_OP_WRITE,
+               .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
                .page = page,
                .encrypted_page = NULL,
        };
index 2e6f537a0e7df7df4b449df05f222ff4bc5a90b4..4c2d1fa1e0e2e9e581b09397da28e0e8a54e4d32 100644 (file)
@@ -257,7 +257,8 @@ static int __commit_inmem_pages(struct inode *inode,
        struct f2fs_io_info fio = {
                .sbi = sbi,
                .type = DATA,
-               .rw = WRITE_SYNC | REQ_PRIO,
+               .op = REQ_OP_WRITE,
+               .op_flags = WRITE_SYNC | REQ_PRIO,
                .encrypted_page = NULL,
        };
        bool submit_bio = false;
@@ -406,7 +407,8 @@ repeat:
                fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
 
                bio->bi_bdev = sbi->sb->s_bdev;
-               ret = submit_bio_wait(WRITE_FLUSH, bio);
+               bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+               ret = submit_bio_wait(bio);
 
                llist_for_each_entry_safe(cmd, next,
                                          fcc->dispatch_list, llnode) {
@@ -438,7 +440,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
                int ret;
 
                bio->bi_bdev = sbi->sb->s_bdev;
-               ret = submit_bio_wait(WRITE_FLUSH, bio);
+               bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+               ret = submit_bio_wait(bio);
                bio_put(bio);
                return ret;
        }
@@ -1401,7 +1404,8 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
        struct f2fs_io_info fio = {
                .sbi = sbi,
                .type = META,
-               .rw = WRITE_SYNC | REQ_META | REQ_PRIO,
+               .op = REQ_OP_WRITE,
+               .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO,
                .old_blkaddr = page->index,
                .new_blkaddr = page->index,
                .page = page,
@@ -1409,7 +1413,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
        };
 
        if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
-               fio.rw &= ~REQ_META;
+               fio.op_flags &= ~REQ_META;
 
        set_page_writeback(page);
        f2fs_submit_page_mbio(&fio);
index 562ce0821559f5e8483117032a645dd96d9e8335..73b4e1d1912a7ff7370c1c6f2d649b770ed70acc 100644 (file)
@@ -25,11 +25,11 @@ static inline void __print_last_io(void)
        if (!last_io.len)
                return;
 
-       trace_printk("%3x:%3x %4x %-16s %2x %5x %12x %4x\n",
+       trace_printk("%3x:%3x %4x %-16s %2x %5x %5x %12x %4x\n",
                        last_io.major, last_io.minor,
                        last_io.pid, "----------------",
                        last_io.type,
-                       last_io.fio.rw,
+                       last_io.fio.op, last_io.fio.op_flags,
                        last_io.fio.new_blkaddr,
                        last_io.len);
        memset(&last_io, 0, sizeof(last_io));
@@ -101,7 +101,8 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
        if (last_io.major == major && last_io.minor == minor &&
                        last_io.pid == pid &&
                        last_io.type == __file_type(inode, pid) &&
-                       last_io.fio.rw == fio->rw &&
+                       last_io.fio.op == fio->op &&
+                       last_io.fio.op_flags == fio->op_flags &&
                        last_io.fio.new_blkaddr + last_io.len ==
                                                        fio->new_blkaddr) {
                last_io.len++;
index c4589e9817602ea992bb73c35e68146a7be81a4e..8a8698119ff74dad0081792f0effc0483f2e0a69 100644 (file)
@@ -267,7 +267,7 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
        int i, err = 0;
 
        for (i = 0; i < nr_bhs; i++)
-               write_dirty_buffer(bhs[i], WRITE);
+               write_dirty_buffer(bhs[i], 0);
 
        for (i = 0; i < nr_bhs; i++) {
                wait_on_buffer(bhs[i]);
index 24ce1cdd434abf6a8539d4d326d4774deb351256..fd6389cf0f144f0b0942caef47c1b1a587e83ec7 100644 (file)
@@ -285,7 +285,7 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl,
                if (trylock_buffer(rabh)) {
                        if (!buffer_uptodate(rabh)) {
                                rabh->b_end_io = end_buffer_read_sync;
-                               submit_bh(READA | REQ_META, rabh);
+                               submit_bh(REQ_OP_READ, READA | REQ_META, rabh);
                                continue;
                        }
                        unlock_buffer(rabh);
@@ -974,7 +974,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
 
        if (!buffer_uptodate(bh)) {
                err = -EIO;
-               ll_rw_block(READ, 1, &bh);
+               ll_rw_block(REQ_OP_READ, 0, 1, &bh);
                wait_on_buffer(bh);
                /* Uhhuh. Read error. Complain and punt. */
                if (!buffer_uptodate(bh))
index e30cc9fb2befb3a1344ac03f9b40e6356c10fa32..4d68530d6636f5716b35ba401967b6b816d8b969 100644 (file)
@@ -1513,7 +1513,7 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
                                continue;
                        }
                        bh->b_end_io = end_buffer_read_sync;
-                       submit_bh(READA | REQ_META, bh);
+                       submit_bh(REQ_OP_READ, READA | REQ_META, bh);
                        continue;
                }
                brelse(bh);
index 0ff028c15199a31e5adc4d66acd06fa375d02eb6..e58ccef09c917baa70542036b171dddeceae9d4b 100644 (file)
@@ -657,7 +657,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
        struct gfs2_log_header *lh;
        unsigned int tail;
        u32 hash;
-       int rw = WRITE_FLUSH_FUA | REQ_META;
+       int op_flags = WRITE_FLUSH_FUA | REQ_META;
        struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
        enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
        lh = page_address(page);
@@ -682,12 +682,12 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
                gfs2_ordered_wait(sdp);
                log_flush_wait(sdp);
-               rw = WRITE_SYNC | REQ_META | REQ_PRIO;
+               op_flags = WRITE_SYNC | REQ_META | REQ_PRIO;
        }
 
        sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
        gfs2_log_write_page(sdp, page);
-       gfs2_log_flush_bio(sdp, rw);
+       gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
        log_flush_wait(sdp);
 
        if (sdp->sd_log_tail != tail)
@@ -738,7 +738,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
 
        gfs2_ordered_write(sdp);
        lops_before_commit(sdp, tr);
-       gfs2_log_flush_bio(sdp, WRITE);
+       gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
 
        if (sdp->sd_log_head != sdp->sd_log_flush_head) {
                log_flush_wait(sdp);
index 8e3ba20d5e9dfcf4d7d4450a3bc2c0d9a9c5db44..49d5a1b61b06920ca35b5b4664a3e8fc1c20aaf3 100644 (file)
@@ -230,17 +230,19 @@ static void gfs2_end_log_write(struct bio *bio)
 /**
  * gfs2_log_flush_bio - Submit any pending log bio
  * @sdp: The superblock
- * @rw: The rw flags
+ * @op: REQ_OP
+ * @op_flags: rq_flag_bits
  *
  * Submit any pending part-built or full bio to the block device. If
  * there is no pending bio, then this is a no-op.
  */
 
-void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
+void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
 {
        if (sdp->sd_log_bio) {
                atomic_inc(&sdp->sd_log_in_flight);
-               submit_bio(rw, sdp->sd_log_bio);
+               bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
+               submit_bio(sdp->sd_log_bio);
                sdp->sd_log_bio = NULL;
        }
 }
@@ -299,7 +301,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
                nblk >>= sdp->sd_fsb2bb_shift;
                if (blkno == nblk)
                        return bio;
-               gfs2_log_flush_bio(sdp, WRITE);
+               gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
        }
 
        return gfs2_log_alloc_bio(sdp, blkno);
@@ -328,7 +330,7 @@ static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
        bio = gfs2_log_get_bio(sdp, blkno);
        ret = bio_add_page(bio, page, size, offset);
        if (ret == 0) {
-               gfs2_log_flush_bio(sdp, WRITE);
+               gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
                bio = gfs2_log_alloc_bio(sdp, blkno);
                ret = bio_add_page(bio, page, size, offset);
                WARN_ON(ret == 0);
index a65a7ba32ffdf5f9eaaa4be0d25bad4c67db3c74..e529f536c1179aea749e04cb8638ce1ad1f444ba 100644 (file)
@@ -27,7 +27,7 @@ extern const struct gfs2_log_operations gfs2_databuf_lops;
 
 extern const struct gfs2_log_operations *gfs2_log_ops[];
 extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
-extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw);
+extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags);
 extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
 
 static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
index 8eaadabbc77100bea1906de90b70e201cb4c57cd..052c1132e5b6e821b4f6a75539660d8a1655b1f0 100644 (file)
@@ -37,8 +37,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 {
        struct buffer_head *bh, *head;
        int nr_underway = 0;
-       int write_op = REQ_META | REQ_PRIO |
-               (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
+       int write_flags = REQ_META | REQ_PRIO |
+               (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
 
        BUG_ON(!PageLocked(page));
        BUG_ON(!page_has_buffers(page));
@@ -79,7 +79,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
-                       submit_bh(write_op, bh);
+                       submit_bh(REQ_OP_WRITE, write_flags, bh);
                        nr_underway++;
                }
                bh = next;
@@ -213,7 +213,8 @@ static void gfs2_meta_read_endio(struct bio *bio)
  * Submit several consecutive buffer head I/O requests as a single bio I/O
  * request.  (See submit_bh_wbc.)
  */
-static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num)
+static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
+                           int num)
 {
        struct buffer_head *bh = bhs[0];
        struct bio *bio;
@@ -230,7 +231,8 @@ static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num)
                bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
        }
        bio->bi_end_io = gfs2_meta_read_endio;
-       submit_bio(rw, bio);
+       bio_set_op_attrs(bio, op, op_flags);
+       submit_bio(bio);
 }
 
 /**
@@ -280,7 +282,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
                }
        }
 
-       gfs2_submit_bhs(READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
+       gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
        if (!(flags & DIO_WAIT))
                return 0;
 
@@ -448,7 +450,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
        if (buffer_uptodate(first_bh))
                goto out;
        if (!buffer_locked(first_bh))
-               ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
+               ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh);
 
        dblock++;
        extlen--;
@@ -457,7 +459,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
                bh = gfs2_getbuf(gl, dblock, CREATE);
 
                if (!buffer_uptodate(bh) && !buffer_locked(bh))
-                       ll_rw_block(READA | REQ_META, 1, &bh);
+                       ll_rw_block(REQ_OP_READ, READA | REQ_META, 1, &bh);
                brelse(bh);
                dblock++;
                extlen--;
index b8f6fc9513ef1e1aac4db567c8a4bdf003f2b30b..ef1e1822977f1589e6989a9f5c91d0afa298e000 100644 (file)
@@ -246,7 +246,8 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 
        bio->bi_end_io = end_bio_io_page;
        bio->bi_private = page;
-       submit_bio(READ_SYNC | REQ_META, bio);
+       bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META);
+       submit_bio(bio);
        wait_on_page_locked(page);
        bio_put(bio);
        if (!PageUptodate(page)) {
index 6c657b202501511afa69ef64a9871339dca86bf8..77930ca25303d9c1e040de629ff10bf449574420 100644 (file)
@@ -730,7 +730,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
                if (PageUptodate(page))
                        set_buffer_uptodate(bh);
                if (!buffer_uptodate(bh)) {
-                       ll_rw_block(READ | REQ_META, 1, &bh);
+                       ll_rw_block(REQ_OP_READ, REQ_META, 1, &bh);
                        wait_on_buffer(bh);
                        if (!buffer_uptodate(bh))
                                goto unlock_out;
index fdc3446d934ad17523a6fba3490343e85756702a..047245bd2cd64550bc00cf827473456507b53a08 100644 (file)
@@ -526,7 +526,7 @@ int hfsplus_compare_dentry(const struct dentry *parent,
 
 /* wrapper.c */
 int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf,
-                      void **data, int rw);
+                      void **data, int op, int op_flags);
 int hfsplus_read_wrapper(struct super_block *sb);
 
 /* time macros */
index eb355d81e2798343720051c19e9d5559142a68e1..63164ebc52fa14f56570984908e409d5d0a4ba69 100644 (file)
@@ -112,7 +112,8 @@ static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
                if ((u8 *)pm - (u8 *)buf >= buf_size) {
                        res = hfsplus_submit_bio(sb,
                                                 *part_start + HFS_PMAP_BLK + i,
-                                                buf, (void **)&pm, READ);
+                                                buf, (void **)&pm, REQ_OP_READ,
+                                                0);
                        if (res)
                                return res;
                }
@@ -136,7 +137,7 @@ int hfs_part_find(struct super_block *sb,
                return -ENOMEM;
 
        res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
-                                buf, &data, READ);
+                                buf, &data, REQ_OP_READ, 0);
        if (res)
                goto out;
 
index 755bf30ba1ce5971df4c7693a0afbb2600cd2b00..11854dd84572639e86fc70290be9ff2ee4bd725a 100644 (file)
@@ -220,7 +220,8 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
 
        error2 = hfsplus_submit_bio(sb,
                                   sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
-                                  sbi->s_vhdr_buf, NULL, WRITE_SYNC);
+                                  sbi->s_vhdr_buf, NULL, REQ_OP_WRITE,
+                                  WRITE_SYNC);
        if (!error)
                error = error2;
        if (!write_backup)
@@ -228,7 +229,8 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
 
        error2 = hfsplus_submit_bio(sb,
                                  sbi->part_start + sbi->sect_count - 2,
-                                 sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC);
+                                 sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE,
+                                 WRITE_SYNC);
        if (!error)
                error2 = error;
 out:
index cc623567143769b95d60a45a46b2f17e889773d0..ebb85e5f65499f3098fdecdc76333b40d7ad3d6b 100644 (file)
@@ -30,7 +30,8 @@ struct hfsplus_wd {
  * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
  * @buf: buffer for I/O
  * @data: output pointer for location of requested data
- * @rw: direction of I/O
+ * @op: direction of I/O
+ * @op_flags: request op flags
  *
  * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
  * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
@@ -44,7 +45,7 @@ struct hfsplus_wd {
  * will work correctly.
  */
 int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
-               void *buf, void **data, int rw)
+                      void *buf, void **data, int op, int op_flags)
 {
        struct bio *bio;
        int ret = 0;
@@ -65,8 +66,9 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
        bio = bio_alloc(GFP_NOIO, 1);
        bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = sb->s_bdev;
+       bio_set_op_attrs(bio, op, op_flags);
 
-       if (!(rw & WRITE) && data)
+       if (op != WRITE && data)
                *data = (u8 *)buf + offset;
 
        while (io_size > 0) {
@@ -83,7 +85,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
                buf = (u8 *)buf + len;
        }
 
-       ret = submit_bio_wait(rw, bio);
+       ret = submit_bio_wait(bio);
 out:
        bio_put(bio);
        return ret < 0 ? ret : 0;
@@ -181,7 +183,7 @@ int hfsplus_read_wrapper(struct super_block *sb)
 reread:
        error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
                                   sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
-                                  READ);
+                                  REQ_OP_READ, 0);
        if (error)
                goto out_free_backup_vhdr;
 
@@ -213,7 +215,8 @@ reread:
 
        error = hfsplus_submit_bio(sb, part_start + part_size - 2,
                                   sbi->s_backup_vhdr_buf,
-                                  (void **)&sbi->s_backup_vhdr, READ);
+                                  (void **)&sbi->s_backup_vhdr, REQ_OP_READ,
+                                  0);
        if (error)
                goto out_free_backup_vhdr;
 
index 2e4e834d1a9871377137a1735f7f28548b965304..2ce5b75ee9a5b7d3bc14d00d47464632e8d285a7 100644 (file)
@@ -81,7 +81,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
        blocknum = block_start >> bufshift;
        memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
        haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
-       ll_rw_block(READ, haveblocks, bhs);
+       ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
 
        curbh = 0;
        curpage = 0;
index 70078096117d3e956e86be7ba358b724286989d9..8f7d1339c973df4f27e580ab294423f8d1390e05 100644 (file)
@@ -155,9 +155,9 @@ static int journal_submit_commit_record(journal_t *journal,
 
        if (journal->j_flags & JBD2_BARRIER &&
            !jbd2_has_feature_async_commit(journal))
-               ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
+               ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh);
        else
-               ret = submit_bh(WRITE_SYNC, bh);
+               ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
 
        *cbh = bh;
        return ret;
@@ -718,7 +718,7 @@ start_journal_io:
                                clear_buffer_dirty(bh);
                                set_buffer_uptodate(bh);
                                bh->b_end_io = journal_end_buffer_io_sync;
-                               submit_bh(WRITE_SYNC, bh);
+                               submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
                        }
                        cond_resched();
                        stats.run.rs_blocks_logged += bufs;
index e3ca4b4cac84a4597b913888359cce6a4540ff09..a7c4c101fe3eb2c5029e5d02a45ebca39373821c 100644 (file)
@@ -1346,15 +1346,15 @@ static int journal_reset(journal_t *journal)
        return jbd2_journal_start_thread(journal);
 }
 
-static int jbd2_write_superblock(journal_t *journal, int write_op)
+static int jbd2_write_superblock(journal_t *journal, int write_flags)
 {
        struct buffer_head *bh = journal->j_sb_buffer;
        journal_superblock_t *sb = journal->j_superblock;
        int ret;
 
-       trace_jbd2_write_superblock(journal, write_op);
+       trace_jbd2_write_superblock(journal, write_flags);
        if (!(journal->j_flags & JBD2_BARRIER))
-               write_op &= ~(REQ_FUA | REQ_FLUSH);
+               write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
        lock_buffer(bh);
        if (buffer_write_io_error(bh)) {
                /*
@@ -1374,7 +1374,7 @@ static int jbd2_write_superblock(journal_t *journal, int write_op)
        jbd2_superblock_csum_set(journal, sb);
        get_bh(bh);
        bh->b_end_io = end_buffer_write_sync;
-       ret = submit_bh(write_op, bh);
+       ret = submit_bh(REQ_OP_WRITE, write_flags, bh);
        wait_on_buffer(bh);
        if (buffer_write_io_error(bh)) {
                clear_buffer_write_io_error(bh);
@@ -1498,7 +1498,7 @@ static int journal_get_superblock(journal_t *journal)
 
        J_ASSERT(bh != NULL);
        if (!buffer_uptodate(bh)) {
-               ll_rw_block(READ, 1, &bh);
+               ll_rw_block(REQ_OP_READ, 0, 1, &bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        printk(KERN_ERR
index 805bc6bcd8abb0a6484ef1e808be98bbd51a6295..02dd3360cb20cb39eb37b13e0f8eb4627d6ad1a9 100644 (file)
@@ -104,7 +104,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
                if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
                        bufs[nbufs++] = bh;
                        if (nbufs == MAXBUF) {
-                               ll_rw_block(READ, nbufs, bufs);
+                               ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
                                journal_brelse_array(bufs, nbufs);
                                nbufs = 0;
                        }
@@ -113,7 +113,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
        }
 
        if (nbufs)
-               ll_rw_block(READ, nbufs, bufs);
+               ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
        err = 0;
 
 failed:
index 63759d723920c3c76020a684c6cbe52f1c630a52..a74752146ec901d133323537e4ceeb0d253364ed 100644 (file)
@@ -2002,12 +2002,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
+       bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
        /*check if journaling to disk has been disabled*/
        if (log->no_integrity) {
                bio->bi_iter.bi_size = 0;
                lbmIODone(bio);
        } else {
-               submit_bio(READ_SYNC, bio);
+               submit_bio(bio);
        }
 
        wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
@@ -2145,13 +2146,14 @@ static void lbmStartIO(struct lbuf * bp)
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
 
        /* check if journaling to disk has been disabled */
        if (log->no_integrity) {
                bio->bi_iter.bi_size = 0;
                lbmIODone(bio);
        } else {
-               submit_bio(WRITE_SYNC, bio);
+               submit_bio(bio);
                INCREMENT(lmStat.submitted);
        }
 }
index b60e015cc757505096176114980cdc5461ad9239..e7fa9e5130403dae8e19ec8405d1eb6837e59db6 100644 (file)
@@ -411,7 +411,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
                        inc_io(page);
                        if (!bio->bi_iter.bi_size)
                                goto dump_bio;
-                       submit_bio(WRITE, bio);
+                       submit_bio(bio);
                        nr_underway++;
                        bio = NULL;
                } else
@@ -434,6 +434,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
                bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
                bio->bi_end_io = metapage_write_end_io;
                bio->bi_private = page;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
                /* Don't call bio_add_page yet, we may add to this vec */
                bio_offset = offset;
@@ -448,7 +449,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
                if (!bio->bi_iter.bi_size)
                        goto dump_bio;
 
-               submit_bio(WRITE, bio);
+               submit_bio(bio);
                nr_underway++;
        }
        if (redirty)
@@ -506,7 +507,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
                                insert_metapage(page, NULL);
                        inc_io(page);
                        if (bio)
-                               submit_bio(READ, bio);
+                               submit_bio(bio);
 
                        bio = bio_alloc(GFP_NOFS, 1);
                        bio->bi_bdev = inode->i_sb->s_bdev;
@@ -514,6 +515,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
                                pblock << (inode->i_blkbits - 9);
                        bio->bi_end_io = metapage_read_end_io;
                        bio->bi_private = page;
+                       bio_set_op_attrs(bio, REQ_OP_READ, 0);
                        len = xlen << inode->i_blkbits;
                        offset = block_offset << inode->i_blkbits;
                        if (bio_add_page(bio, page, len, offset) < len)
@@ -523,7 +525,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
                        block_offset++;
        }
        if (bio)
-               submit_bio(READ, bio);
+               submit_bio(bio);
        else
                unlock_page(page);
 
index cc26f8f215f5058b1face75ef5460694bc47fa48..a8329cc47decd627ca26cf608e6f15276697a4f6 100644 (file)
@@ -14,7 +14,7 @@
 
 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
 
-static int sync_request(struct page *page, struct block_device *bdev, int rw)
+static int sync_request(struct page *page, struct block_device *bdev, int op)
 {
        struct bio bio;
        struct bio_vec bio_vec;
@@ -29,8 +29,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
        bio.bi_bdev = bdev;
        bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
        bio.bi_iter.bi_size = PAGE_SIZE;
+       bio_set_op_attrs(&bio, op, 0);
 
-       return submit_bio_wait(rw, &bio);
+       return submit_bio_wait(&bio);
 }
 
 static int bdev_readpage(void *_sb, struct page *page)
@@ -95,8 +96,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                        bio->bi_iter.bi_sector = ofs >> 9;
                        bio->bi_private = sb;
                        bio->bi_end_io = writeseg_end_io;
+                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                        atomic_inc(&super->s_pending_writes);
-                       submit_bio(WRITE, bio);
+                       submit_bio(bio);
 
                        ofs += i * PAGE_SIZE;
                        index += i;
@@ -122,8 +124,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
        bio->bi_iter.bi_sector = ofs >> 9;
        bio->bi_private = sb;
        bio->bi_end_io = writeseg_end_io;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        atomic_inc(&super->s_pending_writes);
-       submit_bio(WRITE, bio);
+       submit_bio(bio);
        return 0;
 }
 
@@ -185,8 +188,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
                        bio->bi_iter.bi_sector = ofs >> 9;
                        bio->bi_private = sb;
                        bio->bi_end_io = erase_end_io;
+                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                        atomic_inc(&super->s_pending_writes);
-                       submit_bio(WRITE, bio);
+                       submit_bio(bio);
 
                        ofs += i * PAGE_SIZE;
                        index += i;
@@ -206,8 +210,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
        bio->bi_iter.bi_sector = ofs >> 9;
        bio->bi_private = sb;
        bio->bi_end_io = erase_end_io;
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        atomic_inc(&super->s_pending_writes);
-       submit_bio(WRITE, bio);
+       submit_bio(bio);
        return 0;
 }
 
index eedc644b78d78338ebb960339f3d1d224b837b9b..37b28280ad04065cebdebe73c309581fb256863b 100644 (file)
@@ -56,11 +56,12 @@ static void mpage_end_io(struct bio *bio)
        bio_put(bio);
 }
 
-static struct bio *mpage_bio_submit(int rw, struct bio *bio)
+static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
 {
        bio->bi_end_io = mpage_end_io;
-       guard_bio_eod(rw, bio);
-       submit_bio(rw, bio);
+       bio_set_op_attrs(bio, op, op_flags);
+       guard_bio_eod(op, bio);
+       submit_bio(bio);
        return NULL;
 }
 
@@ -269,7 +270,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
         * This page will go to BIO.  Do we need to send this BIO off first?
         */
        if (bio && (*last_block_in_bio != blocks[0] - 1))
-               bio = mpage_bio_submit(READ, bio);
+               bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
 
 alloc_new:
        if (bio == NULL) {
@@ -286,7 +287,7 @@ alloc_new:
 
        length = first_hole << blkbits;
        if (bio_add_page(bio, page, length, 0) < length) {
-               bio = mpage_bio_submit(READ, bio);
+               bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
                goto alloc_new;
        }
 
@@ -294,7 +295,7 @@ alloc_new:
        nblocks = map_bh->b_size >> blkbits;
        if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
            (first_hole != blocks_per_page))
-               bio = mpage_bio_submit(READ, bio);
+               bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
        else
                *last_block_in_bio = blocks[blocks_per_page - 1];
 out:
@@ -302,7 +303,7 @@ out:
 
 confused:
        if (bio)
-               bio = mpage_bio_submit(READ, bio);
+               bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
        if (!PageUptodate(page))
                block_read_full_page(page, get_block);
        else
@@ -384,7 +385,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
        }
        BUG_ON(!list_empty(pages));
        if (bio)
-               mpage_bio_submit(READ, bio);
+               mpage_bio_submit(REQ_OP_READ, 0, bio);
        return 0;
 }
 EXPORT_SYMBOL(mpage_readpages);
@@ -405,7 +406,7 @@ int mpage_readpage(struct page *page, get_block_t get_block)
        bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
                        &map_bh, &first_logical_block, get_block, gfp);
        if (bio)
-               mpage_bio_submit(READ, bio);
+               mpage_bio_submit(REQ_OP_READ, 0, bio);
        return 0;
 }
 EXPORT_SYMBOL(mpage_readpage);
@@ -486,7 +487,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
        struct buffer_head map_bh;
        loff_t i_size = i_size_read(inode);
        int ret = 0;
-       int wr = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
+       int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : 0);
 
        if (page_has_buffers(page)) {
                struct buffer_head *head = page_buffers(page);
@@ -595,7 +596,7 @@ page_is_mapped:
         * This page will go to BIO.  Do we need to send this BIO off first?
         */
        if (bio && mpd->last_block_in_bio != blocks[0] - 1)
-               bio = mpage_bio_submit(wr, bio);
+               bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
 
 alloc_new:
        if (bio == NULL) {
@@ -622,7 +623,7 @@ alloc_new:
        wbc_account_io(wbc, page, PAGE_SIZE);
        length = first_unmapped << blkbits;
        if (bio_add_page(bio, page, length, 0) < length) {
-               bio = mpage_bio_submit(wr, bio);
+               bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
                goto alloc_new;
        }
 
@@ -632,7 +633,7 @@ alloc_new:
        set_page_writeback(page);
        unlock_page(page);
        if (boundary || (first_unmapped != blocks_per_page)) {
-               bio = mpage_bio_submit(wr, bio);
+               bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
                if (boundary_block) {
                        write_boundary_block(boundary_bdev,
                                        boundary_block, 1 << blkbits);
@@ -644,7 +645,7 @@ alloc_new:
 
 confused:
        if (bio)
-               bio = mpage_bio_submit(wr, bio);
+               bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
 
        if (mpd->use_writepage) {
                ret = mapping->a_ops->writepage(page, wbc);
@@ -701,9 +702,9 @@ mpage_writepages(struct address_space *mapping,
 
                ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
                if (mpd.bio) {
-                       int wr = (wbc->sync_mode == WB_SYNC_ALL ?
-                                 WRITE_SYNC : WRITE);
-                       mpage_bio_submit(wr, mpd.bio);
+                       int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
+                                 WRITE_SYNC : 0);
+                       mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
                }
        }
        blk_finish_plug(&plug);
@@ -722,9 +723,9 @@ int mpage_writepage(struct page *page, get_block_t get_block,
        };
        int ret = __mpage_writepage(page, wbc, &mpd);
        if (mpd.bio) {
-               int wr = (wbc->sync_mode == WB_SYNC_ALL ?
-                         WRITE_SYNC : WRITE);
-               mpage_bio_submit(wr, mpd.bio);
+               int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
+                         WRITE_SYNC : 0);
+               mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
        }
        return ret;
 }
index 17a42e4eb8728371f4aec957a545c47d332e7fcd..f55a4e7560470d7a1ef8ab790beb6db59232b35f 100644 (file)
@@ -102,14 +102,15 @@ static inline void put_parallel(struct parallel_io *p)
 }
 
 static struct bio *
-bl_submit_bio(int rw, struct bio *bio)
+bl_submit_bio(struct bio *bio)
 {
        if (bio) {
                get_parallel(bio->bi_private);
                dprintk("%s submitting %s bio %u@%llu\n", __func__,
-                       rw == READ ? "read" : "write", bio->bi_iter.bi_size,
+                       bio_op(bio) == READ ? "read" : "write",
+                       bio->bi_iter.bi_size,
                        (unsigned long long)bio->bi_iter.bi_sector);
-               submit_bio(rw, bio);
+               submit_bio(bio);
        }
        return NULL;
 }
@@ -158,7 +159,7 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
        if (disk_addr < map->start || disk_addr >= map->start + map->len) {
                if (!dev->map(dev, disk_addr, map))
                        return ERR_PTR(-EIO);
-               bio = bl_submit_bio(rw, bio);
+               bio = bl_submit_bio(bio);
        }
        disk_addr += map->disk_offset;
        disk_addr -= map->start;
@@ -174,9 +175,10 @@ retry:
                                disk_addr >> SECTOR_SHIFT, end_io, par);
                if (!bio)
                        return ERR_PTR(-ENOMEM);
+               bio_set_op_attrs(bio, rw, 0);
        }
        if (bio_add_page(bio, page, *len, offset) < *len) {
-               bio = bl_submit_bio(rw, bio);
+               bio = bl_submit_bio(bio);
                goto retry;
        }
        return bio;
@@ -252,7 +254,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
        for (i = pg_index; i < header->page_array.npages; i++) {
                if (extent_length <= 0) {
                        /* We've used up the previous extent */
-                       bio = bl_submit_bio(READ, bio);
+                       bio = bl_submit_bio(bio);
 
                        /* Get the next one */
                        if (!ext_tree_lookup(bl, isect, &be, false)) {
@@ -273,7 +275,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
                }
 
                if (is_hole(&be)) {
-                       bio = bl_submit_bio(READ, bio);
+                       bio = bl_submit_bio(bio);
                        /* Fill hole w/ zeroes w/o accessing device */
                        dprintk("%s Zeroing page for hole\n", __func__);
                        zero_user_segment(pages[i], pg_offset, pg_len);
@@ -306,7 +308,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
                header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
        }
 out:
-       bl_submit_bio(READ, bio);
+       bl_submit_bio(bio);
        blk_finish_plug(&plug);
        put_parallel(par);
        return PNFS_ATTEMPTED;
@@ -398,7 +400,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
        for (i = pg_index; i < header->page_array.npages; i++) {
                if (extent_length <= 0) {
                        /* We've used up the previous extent */
-                       bio = bl_submit_bio(WRITE, bio);
+                       bio = bl_submit_bio(bio);
                        /* Get the next one */
                        if (!ext_tree_lookup(bl, isect, &be, true)) {
                                header->pnfs_error = -EINVAL;
@@ -427,7 +429,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
 
        header->res.count = header->args.count;
 out:
-       bl_submit_bio(WRITE, bio);
+       bl_submit_bio(bio);
        blk_finish_plug(&plug);
        put_parallel(par);
        return PNFS_ATTEMPTED;
index 0576033699bc621f525bf744ec32ca387bd4c6a8..4cca998ec7a0068cd55ad1c85e50d1ea0c9736cd 100644 (file)
@@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
 }
 
 int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
-                             sector_t pblocknr, int mode,
+                             sector_t pblocknr, int mode, int mode_flags,
                              struct buffer_head **pbh, sector_t *submit_ptr)
 {
        struct buffer_head *bh;
@@ -95,7 +95,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
                }
        }
 
-       if (mode == READA) {
+       if (mode_flags & REQ_RAHEAD) {
                if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
                        err = -EBUSY; /* internal code */
                        brelse(bh);
@@ -114,7 +114,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
        bh->b_blocknr = pblocknr; /* set block address for read */
        bh->b_end_io = end_buffer_read_sync;
        get_bh(bh);
-       submit_bh(mode, bh);
+       submit_bh(mode, mode_flags, bh);
        bh->b_blocknr = blocknr; /* set back to the given block address */
        *submit_ptr = pblocknr;
        err = 0;
index 2cc1b80e18f72a84003f4c16c90a4d603469c439..4e8aaa1aeb65db70bc1f7fcc9faae7a81f96d09c 100644 (file)
@@ -43,7 +43,7 @@ void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
                                              __u64 blocknr);
 int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, int,
-                             struct buffer_head **, sector_t *);
+                             int, struct buffer_head **, sector_t *);
 void nilfs_btnode_delete(struct buffer_head *);
 int nilfs_btnode_prepare_change_key(struct address_space *,
                                    struct nilfs_btnode_chkey_ctxt *);
index eccb1c89ccbb20b2a597798a11cbe7c4e802a0e9..982d1e3df3a5c4723eebbccca4454e713abbb249 100644 (file)
@@ -476,7 +476,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
        sector_t submit_ptr = 0;
        int ret;
 
-       ret = nilfs_btnode_submit_block(btnc, ptr, 0, READ, &bh, &submit_ptr);
+       ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, 0, &bh,
+                                       &submit_ptr);
        if (ret) {
                if (ret != -EEXIST)
                        return ret;
@@ -492,7 +493,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
                     n > 0 && i < ra->ncmax; n--, i++) {
                        ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax);
 
-                       ret = nilfs_btnode_submit_block(btnc, ptr2, 0, READA,
+                       ret = nilfs_btnode_submit_block(btnc, ptr2, 0,
+                                                       REQ_OP_READ, REQ_RAHEAD,
                                                        &ra_bh, &submit_ptr);
                        if (likely(!ret || ret == -EEXIST))
                                brelse(ra_bh);
index 693aded72498e2019fd36e96630b114db89ea172..e9148f94d696883a8748c7b0474347b186e7060b 100644 (file)
@@ -101,7 +101,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
        bh->b_blocknr = pbn;
        bh->b_end_io = end_buffer_read_sync;
        get_bh(bh);
-       submit_bh(READ, bh);
+       submit_bh(REQ_OP_READ, 0, bh);
        if (vbn)
                bh->b_blocknr = vbn;
  out:
@@ -138,7 +138,8 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
        int ret;
 
        ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
-                                       vbn ? : pbn, pbn, READ, out_bh, &pbn);
+                                       vbn ? : pbn, pbn, REQ_OP_READ, 0,
+                                       out_bh, &pbn);
        if (ret == -EEXIST) /* internal code (cache hit) */
                ret = 0;
        return ret;
index 3417d859a03cec6cc0d4455d2eaabbae08967e9a..0d7b71fbeff8fb5aa4915a652091d95f8f06dd04 100644 (file)
@@ -121,7 +121,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
 
 static int
 nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
-                      int mode, struct buffer_head **out_bh)
+                      int mode, int mode_flags, struct buffer_head **out_bh)
 {
        struct buffer_head *bh;
        __u64 blknum = 0;
@@ -135,7 +135,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
        if (buffer_uptodate(bh))
                goto out;
 
-       if (mode == READA) {
+       if (mode_flags & REQ_RAHEAD) {
                if (!trylock_buffer(bh)) {
                        ret = -EBUSY;
                        goto failed_bh;
@@ -157,7 +157,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
 
        bh->b_end_io = end_buffer_read_sync;
        get_bh(bh);
-       submit_bh(mode, bh);
+       submit_bh(mode, mode_flags, bh);
        ret = 0;
 
        trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode);
@@ -181,7 +181,7 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
        int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
        int err;
 
-       err = nilfs_mdt_submit_block(inode, block, READ, &first_bh);
+       err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, 0, &first_bh);
        if (err == -EEXIST) /* internal code */
                goto out;
 
@@ -191,7 +191,8 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
        if (readahead) {
                blkoff = block + 1;
                for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
-                       err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
+                       err = nilfs_mdt_submit_block(inode, blkoff, REQ_OP_READ,
+                                                    REQ_RAHEAD, &bh);
                        if (likely(!err || err == -EEXIST))
                                brelse(bh);
                        else if (err != -EBUSY)
index bf36df10540b692dc2eee698a07296edcbd0267b..a962d7d834478714cacd52a04c4a78f932f3466f 100644 (file)
@@ -346,7 +346,8 @@ static void nilfs_end_bio_write(struct bio *bio)
 }
 
 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
-                                  struct nilfs_write_info *wi, int mode)
+                                  struct nilfs_write_info *wi, int mode,
+                                  int mode_flags)
 {
        struct bio *bio = wi->bio;
        int err;
@@ -364,7 +365,8 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
 
        bio->bi_end_io = nilfs_end_bio_write;
        bio->bi_private = segbuf;
-       submit_bio(mode, bio);
+       bio_set_op_attrs(bio, mode, mode_flags);
+       submit_bio(bio);
        segbuf->sb_nbio++;
 
        wi->bio = NULL;
@@ -437,7 +439,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
                return 0;
        }
        /* bio is FULL */
-       err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
+       err = nilfs_segbuf_submit_bio(segbuf, wi, mode, 0);
        /* never submit current bh */
        if (likely(!err))
                goto repeat;
@@ -461,19 +463,19 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
 {
        struct nilfs_write_info wi;
        struct buffer_head *bh;
-       int res = 0, rw = WRITE;
+       int res = 0;
 
        wi.nilfs = nilfs;
        nilfs_segbuf_prepare_write(segbuf, &wi);
 
        list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
-               res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
+               res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE);
                if (unlikely(res))
                        goto failed_bio;
        }
 
        list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
-               res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
+               res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE);
                if (unlikely(res))
                        goto failed_bio;
        }
@@ -483,8 +485,8 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
                 * Last BIO is always sent through the following
                 * submission.
                 */
-               rw |= REQ_SYNC;
-               res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
+               res = nilfs_segbuf_submit_bio(segbuf, &wi, REQ_OP_WRITE,
+                                             REQ_SYNC);
        }
 
  failed_bio:
index 97768a1379f2e60fcaab520e800cb332ee28265c..fe251f187ff8ff2a4f88210517470a1c2a0426f8 100644 (file)
@@ -362,7 +362,7 @@ handle_zblock:
                for (i = 0; i < nr; i++) {
                        tbh = arr[i];
                        if (likely(!buffer_uptodate(tbh)))
-                               submit_bh(READ, tbh);
+                               submit_bh(REQ_OP_READ, 0, tbh);
                        else
                                ntfs_end_buffer_async_read(tbh, 1);
                }
@@ -877,7 +877,7 @@ lock_retry_remap:
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
-                       submit_bh(WRITE, bh);
+                       submit_bh(REQ_OP_WRITE, 0, bh);
                        need_end_writeback = false;
                }
                bh = next;
@@ -1202,7 +1202,7 @@ lock_retry_remap:
                BUG_ON(!buffer_mapped(tbh));
                get_bh(tbh);
                tbh->b_end_io = end_buffer_write_sync;
-               submit_bh(WRITE, tbh);
+               submit_bh(REQ_OP_WRITE, 0, tbh);
        }
        /* Synchronize the mft mirror now if not @sync. */
        if (is_mft && !sync)
index f2b5e746f49b747c5c968fbf09d1a09dbd116269..f8eb04387ca4372ee8acce8d28b29c89618477ae 100644 (file)
@@ -670,7 +670,7 @@ lock_retry_remap:
                }
                get_bh(tbh);
                tbh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ, tbh);
+               submit_bh(REQ_OP_READ, 0, tbh);
        }
 
        /* Wait for io completion on all buffer heads. */
index 5622ed5a201e3c23c9fdbf611a6c9dab0a50eb0d..f548629dfaacb426d4d31da3908c1af8a0f4e10d 100644 (file)
@@ -553,7 +553,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
        lock_buffer(bh);
        get_bh(bh);
        bh->b_end_io = end_buffer_read_sync;
-       return submit_bh(READ, bh);
+       return submit_bh(REQ_OP_READ, 0, bh);
 }
 
 /**
index 9d71213ca81e71818c76775b4ebead72ac0670dc..761f12f7f3efcfac4b39247589c1797342c62fcc 100644 (file)
@@ -821,7 +821,7 @@ map_vcn:
                         * completed ignore errors afterwards as we can assume
                         * that if one buffer worked all of them will work.
                         */
-                       submit_bh(WRITE, bh);
+                       submit_bh(REQ_OP_WRITE, 0, bh);
                        if (should_wait) {
                                should_wait = false;
                                wait_on_buffer(bh);
index 37b2501caaa43e31eb3a20dd7ea75f2d0429d945..d15d492ce47b1ea500a4a774577536c9e053c535 100644 (file)
@@ -592,7 +592,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
                        clear_buffer_dirty(tbh);
                        get_bh(tbh);
                        tbh->b_end_io = end_buffer_write_sync;
-                       submit_bh(WRITE, tbh);
+                       submit_bh(REQ_OP_WRITE, 0, tbh);
                }
                /* Wait on i/o completion of buffers. */
                for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
@@ -785,7 +785,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
                clear_buffer_dirty(tbh);
                get_bh(tbh);
                tbh->b_end_io = end_buffer_write_sync;
-               submit_bh(WRITE, tbh);
+               submit_bh(REQ_OP_WRITE, 0, tbh);
        }
        /* Synchronize the mft mirror now if not @sync. */
        if (!sync && ni->mft_no < vol->mftmirr_size)
index c034edf3ef38ed8b14b1054cf6e8753a0c77d063..e97a37179614e78427733046de6e837520603d18 100644 (file)
@@ -640,7 +640,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
                           !buffer_new(bh) &&
                           ocfs2_should_read_blk(inode, page, block_start) &&
                           (block_start < from || block_end > to)) {
-                       ll_rw_block(READ, 1, &bh);
+                       ll_rw_block(REQ_OP_READ, 0, 1, &bh);
                        *wait_bh++=bh;
                }
 
index 498641eed2db83efe31923c2f7d7517a499f836c..8f040f88ade44dd7cb85feacd782f3b31d0fac82 100644 (file)
@@ -79,7 +79,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
 
        get_bh(bh); /* for end_buffer_write_sync() */
        bh->b_end_io = end_buffer_write_sync;
-       submit_bh(WRITE, bh);
+       submit_bh(REQ_OP_WRITE, 0, bh);
 
        wait_on_buffer(bh);
 
@@ -154,7 +154,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
                clear_buffer_uptodate(bh);
                get_bh(bh); /* for end_buffer_read_sync() */
                bh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ, bh);
+               submit_bh(REQ_OP_READ, 0, bh);
        }
 
        for (i = nr; i > 0; i--) {
@@ -310,7 +310,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                        if (validate)
                                set_buffer_needs_validate(bh);
                        bh->b_end_io = end_buffer_read_sync;
-                       submit_bh(READ, bh);
+                       submit_bh(REQ_OP_READ, 0, bh);
                        continue;
                }
        }
@@ -424,7 +424,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
        get_bh(bh); /* for end_buffer_write_sync() */
        bh->b_end_io = end_buffer_write_sync;
        ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
-       submit_bh(WRITE, bh);
+       submit_bh(REQ_OP_WRITE, 0, bh);
 
        wait_on_buffer(bh);
 
index 6aaf3e35139109cff852999e38d4c909b0d7f520..636abcbd46501b9c00ef5e3155d6656a161d7d49 100644 (file)
@@ -530,7 +530,8 @@ static void o2hb_bio_end_io(struct bio *bio)
 static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
                                      struct o2hb_bio_wait_ctxt *wc,
                                      unsigned int *current_slot,
-                                     unsigned int max_slots)
+                                     unsigned int max_slots, int op,
+                                     int op_flags)
 {
        int len, current_page;
        unsigned int vec_len, vec_start;
@@ -556,6 +557,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
        bio->bi_bdev = reg->hr_bdev;
        bio->bi_private = wc;
        bio->bi_end_io = o2hb_bio_end_io;
+       bio_set_op_attrs(bio, op, op_flags);
 
        vec_start = (cs << bits) % PAGE_SIZE;
        while(cs < max_slots) {
@@ -591,7 +593,8 @@ static int o2hb_read_slots(struct o2hb_region *reg,
        o2hb_bio_wait_init(&wc);
 
        while(current_slot < max_slots) {
-               bio = o2hb_setup_one_bio(reg, &wc, &current_slot, max_slots);
+               bio = o2hb_setup_one_bio(reg, &wc, &current_slot, max_slots,
+                                        REQ_OP_READ, 0);
                if (IS_ERR(bio)) {
                        status = PTR_ERR(bio);
                        mlog_errno(status);
@@ -599,7 +602,7 @@ static int o2hb_read_slots(struct o2hb_region *reg,
                }
 
                atomic_inc(&wc.wc_num_reqs);
-               submit_bio(READ, bio);
+               submit_bio(bio);
        }
 
        status = 0;
@@ -623,7 +626,8 @@ static int o2hb_issue_node_write(struct o2hb_region *reg,
 
        slot = o2nm_this_node();
 
-       bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1);
+       bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE,
+                                WRITE_SYNC);
        if (IS_ERR(bio)) {
                status = PTR_ERR(bio);
                mlog_errno(status);
@@ -631,7 +635,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg,
        }
 
        atomic_inc(&write_wc->wc_num_reqs);
-       submit_bio(WRITE_SYNC, bio);
+       submit_bio(bio);
 
        status = 0;
 bail:
index d7cae3327de5caab7c23b64c9e68460010b4317f..3971146228d3ee92c9e889a41ca35609c46003b3 100644 (file)
@@ -1819,7 +1819,7 @@ static int ocfs2_get_sector(struct super_block *sb,
        if (!buffer_dirty(*bh))
                clear_buffer_uptodate(*bh);
        unlock_buffer(*bh);
-       ll_rw_block(READ, 1, bh);
+       ll_rw_block(REQ_OP_READ, 0, 1, bh);
        wait_on_buffer(*bh);
        if (!buffer_uptodate(*bh)) {
                mlog_errno(-EIO);
index 825455d3e4ba1d545cc1085a457ee0ab22035a80..c2c59f9ff04beec2a71b28e30c9b0340df0ede6a 100644 (file)
@@ -2668,7 +2668,7 @@ static int reiserfs_write_full_page(struct page *page,
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
-                       submit_bh(WRITE, bh);
+                       submit_bh(REQ_OP_WRITE, 0, bh);
                        nr++;
                }
                put_bh(bh);
@@ -2728,7 +2728,7 @@ fail:
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
                        clear_buffer_dirty(bh);
-                       submit_bh(WRITE, bh);
+                       submit_bh(REQ_OP_WRITE, 0, bh);
                        nr++;
                }
                put_bh(bh);
index 2ace90e981f07a51c69c38cc16b415e221ca35d0..bc2dde2423c2eb3fdb464031d18c22e4586e81cb 100644 (file)
@@ -652,7 +652,7 @@ static void submit_logged_buffer(struct buffer_head *bh)
                BUG();
        if (!buffer_uptodate(bh))
                BUG();
-       submit_bh(WRITE, bh);
+       submit_bh(REQ_OP_WRITE, 0, bh);
 }
 
 static void submit_ordered_buffer(struct buffer_head *bh)
@@ -662,7 +662,7 @@ static void submit_ordered_buffer(struct buffer_head *bh)
        clear_buffer_dirty(bh);
        if (!buffer_uptodate(bh))
                BUG();
-       submit_bh(WRITE, bh);
+       submit_bh(REQ_OP_WRITE, 0, bh);
 }
 
 #define CHUNK_SIZE 32
@@ -870,7 +870,7 @@ loop_next:
                 */
                if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
                        spin_unlock(lock);
-                       ll_rw_block(WRITE, 1, &bh);
+                       ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
                        spin_lock(lock);
                }
                put_bh(bh);
@@ -1057,7 +1057,7 @@ static int flush_commit_list(struct super_block *s,
                if (tbh) {
                        if (buffer_dirty(tbh)) {
                            depth = reiserfs_write_unlock_nested(s);
-                           ll_rw_block(WRITE, 1, &tbh);
+                           ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh);
                            reiserfs_write_lock_nested(s, depth);
                        }
                        put_bh(tbh) ;
@@ -2244,7 +2244,7 @@ abort_replay:
                }
        }
        /* read in the log blocks, memcpy to the corresponding real block */
-       ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
+       ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks);
        for (i = 0; i < get_desc_trans_len(desc); i++) {
 
                wait_on_buffer(log_blocks[i]);
@@ -2269,7 +2269,7 @@ abort_replay:
        /* flush out the real blocks */
        for (i = 0; i < get_desc_trans_len(desc); i++) {
                set_buffer_dirty(real_blocks[i]);
-               write_dirty_buffer(real_blocks[i], WRITE);
+               write_dirty_buffer(real_blocks[i], 0);
        }
        for (i = 0; i < get_desc_trans_len(desc); i++) {
                wait_on_buffer(real_blocks[i]);
@@ -2346,7 +2346,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
                } else
                        bhlist[j++] = bh;
        }
-       ll_rw_block(READ, j, bhlist);
+       ll_rw_block(REQ_OP_READ, 0, j, bhlist);
        for (i = 1; i < j; i++)
                brelse(bhlist[i]);
        bh = bhlist[0];
index 5feacd689241e25f346756ee2528b4b42b845d42..64b29b592d86e70eb98422db1baebb0d1ace13f6 100644 (file)
@@ -551,7 +551,7 @@ static int search_by_key_reada(struct super_block *s,
                if (!buffer_uptodate(bh[j])) {
                        if (depth == -1)
                                depth = reiserfs_write_unlock_nested(s);
-                       ll_rw_block(READA, 1, bh + j);
+                       ll_rw_block(REQ_OP_READ, READA, 1, bh + j);
                }
                brelse(bh[j]);
        }
@@ -660,7 +660,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
                        if (!buffer_uptodate(bh) && depth == -1)
                                depth = reiserfs_write_unlock_nested(sb);
 
-                       ll_rw_block(READ, 1, &bh);
+                       ll_rw_block(REQ_OP_READ, 0, 1, &bh);
                        wait_on_buffer(bh);
 
                        if (depth != -1)
index c72c16c5a60f9dfd0a151022b7885073fa872270..7a4a85a6821e721f463257c1891c8279ad8ba1ad 100644 (file)
@@ -1666,7 +1666,7 @@ static int read_super_block(struct super_block *s, int offset)
 /* after journal replay, reread all bitmap and super blocks */
 static int reread_meta_blocks(struct super_block *s)
 {
-       ll_rw_block(READ, 1, &SB_BUFFER_WITH_SB(s));
+       ll_rw_block(REQ_OP_READ, 0, 1, &SB_BUFFER_WITH_SB(s));
        wait_on_buffer(SB_BUFFER_WITH_SB(s));
        if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
                reiserfs_warning(s, "reiserfs-2504", "error reading the super");
index 2c2618410d51b92113fe9a00173078abace498f5..ce62a380314f09e036c9468b73812c4fc872676f 100644 (file)
@@ -124,7 +124,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
                                goto block_release;
                        bytes += msblk->devblksize;
                }
-               ll_rw_block(READ, b, bh);
+               ll_rw_block(REQ_OP_READ, 0, b, bh);
        } else {
                /*
                 * Metadata block.
@@ -156,7 +156,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
                                goto block_release;
                        bytes += msblk->devblksize;
                }
-               ll_rw_block(READ, b - 1, bh + 1);
+               ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1);
        }
 
        for (i = 0; i < b; i++) {
index 4c5593abc553e6113a3c423a877f9565a73b8635..80c8a21daed917f445fa10aced32c602cd2b401e 100644 (file)
@@ -113,7 +113,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                                        brelse(tmp);
                        }
                        if (num) {
-                               ll_rw_block(READA, num, bha);
+                               ll_rw_block(REQ_OP_READ, READA, num, bha);
                                for (i = 0; i < num; i++)
                                        brelse(bha[i]);
                        }
index c763fda257bf371ad04a5b4b5aff0a1269a9366e..71f3e0b5b8ab8be8eaecd91bb0d374f002b41b8a 100644 (file)
@@ -87,7 +87,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                                        brelse(tmp);
                        }
                        if (num) {
-                               ll_rw_block(READA, num, bha);
+                               ll_rw_block(REQ_OP_READ, READA, num, bha);
                                for (i = 0; i < num; i++)
                                        brelse(bha[i]);
                        }
index f323aff740effceb669c04c42dfe53b2b326e8f1..55aa587bbc385f162e71536b35d5e826808f3a8b 100644 (file)
@@ -1199,7 +1199,7 @@ struct buffer_head *udf_bread(struct inode *inode, int block,
        if (buffer_uptodate(bh))
                return bh;
 
-       ll_rw_block(READ, 1, &bh);
+       ll_rw_block(REQ_OP_READ, 0, 1, &bh);
 
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
index 0447b949c7f5c65555603dced6de8cfac84eba4a..67e085d591d8398aaf07ab0102e602b70ca9ae8e 100644 (file)
@@ -292,7 +292,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
                        if (!buffer_mapped(bh))
                                        map_bh(bh, inode->i_sb, oldb + pos);
                        if (!buffer_uptodate(bh)) {
-                               ll_rw_block(READ, 1, &bh);
+                               ll_rw_block(REQ_OP_READ, 0, 1, &bh);
                                wait_on_buffer(bh);
                                if (!buffer_uptodate(bh)) {
                                        ufs_error(inode->i_sb, __func__,
index a409e3e7827ab09ee26547cbe3a299ecc5731ece..f41ad0a6106f28a2165c94068f6c7c3d71a67b3e 100644 (file)
@@ -118,7 +118,7 @@ void ubh_sync_block(struct ufs_buffer_head *ubh)
                unsigned i;
 
                for (i = 0; i < ubh->count; i++)
-                       write_dirty_buffer(ubh->bh[i], WRITE);
+                       write_dirty_buffer(ubh->bh[i], 0);
 
                for (i = 0; i < ubh->count; i++)
                        wait_on_buffer(ubh->bh[i]);
index 4c463b99fe574341043cb1f6ab612a59df881d31..87d2b215cbbd665d1d0f60bebae34e294e5f617a 100644 (file)
@@ -438,7 +438,8 @@ xfs_submit_ioend(
 
        ioend->io_bio->bi_private = ioend;
        ioend->io_bio->bi_end_io = xfs_end_bio;
-
+       bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
+                        (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
        /*
         * If we are failing the IO now, just mark the ioend with an
         * error and finish it. This will run IO completion immediately
@@ -451,8 +452,7 @@ xfs_submit_ioend(
                return status;
        }
 
-       submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
-                  ioend->io_bio);
+       submit_bio(ioend->io_bio);
        return 0;
 }
 
@@ -510,8 +510,9 @@ xfs_chain_bio(
 
        bio_chain(ioend->io_bio, new);
        bio_get(ioend->io_bio);         /* for xfs_destroy_ioend */
-       submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
-                  ioend->io_bio);
+       bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
+                         (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
+       submit_bio(ioend->io_bio);
        ioend->io_bio = new;
 }
 
index e71cfbd5acb3c74df23be024c91420a07291a493..a87a0d5477bdf89f18572b265bb62e4d982ead36 100644 (file)
@@ -1127,7 +1127,8 @@ xfs_buf_ioapply_map(
        int             map,
        int             *buf_offset,
        int             *count,
-       int             rw)
+       int             op,
+       int             op_flags)
 {
        int             page_index;
        int             total_nr_pages = bp->b_page_count;
@@ -1157,16 +1158,14 @@ xfs_buf_ioapply_map(
 
 next_chunk:
        atomic_inc(&bp->b_io_remaining);
-       nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
-       if (nr_pages > total_nr_pages)
-               nr_pages = total_nr_pages;
+       nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
 
        bio = bio_alloc(GFP_NOIO, nr_pages);
        bio->bi_bdev = bp->b_target->bt_bdev;
        bio->bi_iter.bi_sector = sector;
        bio->bi_end_io = xfs_buf_bio_end_io;
        bio->bi_private = bp;
-
+       bio_set_op_attrs(bio, op, op_flags);
 
        for (; size && nr_pages; nr_pages--, page_index++) {
                int     rbytes, nbytes = PAGE_SIZE - offset;
@@ -1190,7 +1189,7 @@ next_chunk:
                        flush_kernel_vmap_range(bp->b_addr,
                                                xfs_buf_vmap_len(bp));
                }
-               submit_bio(rw, bio);
+               submit_bio(bio);
                if (size)
                        goto next_chunk;
        } else {
@@ -1210,7 +1209,8 @@ _xfs_buf_ioapply(
        struct xfs_buf  *bp)
 {
        struct blk_plug plug;
-       int             rw;
+       int             op;
+       int             op_flags = 0;
        int             offset;
        int             size;
        int             i;
@@ -1229,14 +1229,13 @@ _xfs_buf_ioapply(
                bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
 
        if (bp->b_flags & XBF_WRITE) {
+               op = REQ_OP_WRITE;
                if (bp->b_flags & XBF_SYNCIO)
-                       rw = WRITE_SYNC;
-               else
-                       rw = WRITE;
+                       op_flags = WRITE_SYNC;
                if (bp->b_flags & XBF_FUA)
-                       rw |= REQ_FUA;
+                       op_flags |= REQ_FUA;
                if (bp->b_flags & XBF_FLUSH)
-                       rw |= REQ_FLUSH;
+                       op_flags |= REQ_PREFLUSH;
 
                /*
                 * Run the write verifier callback function if it exists. If
@@ -1266,13 +1265,14 @@ _xfs_buf_ioapply(
                        }
                }
        } else if (bp->b_flags & XBF_READ_AHEAD) {
-               rw = READA;
+               op = REQ_OP_READ;
+               op_flags = REQ_RAHEAD;
        } else {
-               rw = READ;
+               op = REQ_OP_READ;
        }
 
        /* we only use the buffer cache for meta-data */
-       rw |= REQ_META;
+       op_flags |= REQ_META;
 
        /*
         * Walk all the vectors issuing IO on them. Set up the initial offset
@@ -1284,7 +1284,7 @@ _xfs_buf_ioapply(
        size = BBTOB(bp->b_io_length);
        blk_start_plug(&plug);
        for (i = 0; i < bp->b_map_count; i++) {
-               xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
+               xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
                if (bp->b_error)
                        break;
                if (size <= 0)
index 9faebf7f9a33c04a73506e7e379432ce1cce93bf..b7e1a00810f272e6649a109784625c78219f54e7 100644 (file)
 #endif
 
 #define BIO_MAX_PAGES          256
-#define BIO_MAX_SIZE           (BIO_MAX_PAGES << PAGE_SHIFT)
-#define BIO_MAX_SECTORS                (BIO_MAX_SIZE >> 9)
 
-/*
- * upper 16 bits of bi_rw define the io priority of this bio
- */
-#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
-#define bio_prio(bio)  ((bio)->bi_rw >> BIO_PRIO_SHIFT)
-#define bio_prio_valid(bio)    ioprio_valid(bio_prio(bio))
-
-#define bio_set_prio(bio, prio)                do {                    \
-       WARN_ON(prio >= (1 << IOPRIO_BITS));                    \
-       (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1);          \
-       (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT);     \
-} while (0)
-
-/*
- * various member access, note that bio_data should of course not be used
- * on highmem page vectors
- */
-#define __bvec_iter_bvec(bvec, iter)   (&(bvec)[(iter).bi_idx])
-
-#define bvec_iter_page(bvec, iter)                             \
-       (__bvec_iter_bvec((bvec), (iter))->bv_page)
-
-#define bvec_iter_len(bvec, iter)                              \
-       min((iter).bi_size,                                     \
-           __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
-
-#define bvec_iter_offset(bvec, iter)                           \
-       (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
-
-#define bvec_iter_bvec(bvec, iter)                             \
-((struct bio_vec) {                                            \
-       .bv_page        = bvec_iter_page((bvec), (iter)),       \
-       .bv_len         = bvec_iter_len((bvec), (iter)),        \
-       .bv_offset      = bvec_iter_offset((bvec), (iter)),     \
-})
+#define bio_prio(bio)                  (bio)->bi_ioprio
+#define bio_set_prio(bio, prio)                ((bio)->bi_ioprio = prio)
 
 #define bio_iter_iovec(bio, iter)                              \
        bvec_iter_bvec((bio)->bi_io_vec, (iter))
@@ -106,18 +71,23 @@ static inline bool bio_has_data(struct bio *bio)
 {
        if (bio &&
            bio->bi_iter.bi_size &&
-           !(bio->bi_rw & REQ_DISCARD))
+           bio_op(bio) != REQ_OP_DISCARD)
                return true;
 
        return false;
 }
 
+static inline bool bio_no_advance_iter(struct bio *bio)
+{
+       return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME;
+}
+
 static inline bool bio_is_rw(struct bio *bio)
 {
        if (!bio_has_data(bio))
                return false;
 
-       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+       if (bio_no_advance_iter(bio))
                return false;
 
        return true;
@@ -193,39 +163,12 @@ static inline void *bio_data(struct bio *bio)
 #define bio_for_each_segment_all(bvl, bio, i)                          \
        for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
 
-static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
-                                    unsigned bytes)
-{
-       WARN_ONCE(bytes > iter->bi_size,
-                 "Attempted to advance past end of bvec iter\n");
-
-       while (bytes) {
-               unsigned len = min(bytes, bvec_iter_len(bv, *iter));
-
-               bytes -= len;
-               iter->bi_size -= len;
-               iter->bi_bvec_done += len;
-
-               if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
-                       iter->bi_bvec_done = 0;
-                       iter->bi_idx++;
-               }
-       }
-}
-
-#define for_each_bvec(bvl, bio_vec, iter, start)                       \
-       for (iter = (start);                                            \
-            (iter).bi_size &&                                          \
-               ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
-            bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
-
-
 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
                                    unsigned bytes)
 {
        iter->bi_sector += bytes >> 9;
 
-       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+       if (bio_no_advance_iter(bio))
                iter->bi_size -= bytes;
        else
                bvec_iter_advance(bio->bi_io_vec, iter, bytes);
@@ -253,10 +196,10 @@ static inline unsigned bio_segments(struct bio *bio)
         * differently:
         */
 
-       if (bio->bi_rw & REQ_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD)
                return 1;
 
-       if (bio->bi_rw & REQ_WRITE_SAME)
+       if (bio_op(bio) == REQ_OP_WRITE_SAME)
                return 1;
 
        bio_for_each_segment(bv, bio, iter)
@@ -473,7 +416,7 @@ static inline void bio_io_error(struct bio *bio)
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
-extern int submit_bio_wait(int rw, struct bio *bio);
+extern int submit_bio_wait(struct bio *bio);
 extern void bio_advance(struct bio *, unsigned);
 
 extern void bio_init(struct bio *);
index c02e669945e9279bceb796eb9a1adb938ebf863b..f77150a4a96aca88e2508e18a50973877a8cd0c4 100644 (file)
@@ -590,25 +590,26 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
 /**
  * blkg_rwstat_add - add a value to a blkg_rwstat
  * @rwstat: target blkg_rwstat
- * @rw: mask of REQ_{WRITE|SYNC}
+ * @op: REQ_OP
+ * @op_flags: rq_flag_bits
  * @val: value to add
  *
  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
  * caller is responsible for synchronizing calls to this function.
  */
 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
-                                  int rw, uint64_t val)
+                                  int op, int op_flags, uint64_t val)
 {
        struct percpu_counter *cnt;
 
-       if (rw & REQ_WRITE)
+       if (op_is_write(op))
                cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
        else
                cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
 
        __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
 
-       if (rw & REQ_SYNC)
+       if (op_flags & REQ_SYNC)
                cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
        else
                cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
@@ -713,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
 
        if (!throtl) {
                blkg = blkg ?: q->root_blkg;
-               blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
+               blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw,
                                bio->bi_iter.bi_size);
-               blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
+               blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1);
        }
 
        rcu_read_unlock();
index 77e5d81f07aaf36533e3e016267229ae64a21a74..b588e968dc01f6199e9858e555fdeb6c684fbaaf 100644 (file)
@@ -6,6 +6,7 @@
 #define __LINUX_BLK_TYPES_H
 
 #include <linux/types.h>
+#include <linux/bvec.h>
 
 struct bio_set;
 struct bio;
@@ -17,28 +18,7 @@ struct cgroup_subsys_state;
 typedef void (bio_end_io_t) (struct bio *);
 typedef void (bio_destructor_t) (struct bio *);
 
-/*
- * was unsigned short, but we might as well be ready for > 64kB I/O pages
- */
-struct bio_vec {
-       struct page     *bv_page;
-       unsigned int    bv_len;
-       unsigned int    bv_offset;
-};
-
 #ifdef CONFIG_BLOCK
-
-struct bvec_iter {
-       sector_t                bi_sector;      /* device address in 512 byte
-                                                  sectors */
-       unsigned int            bi_size;        /* residual I/O count */
-
-       unsigned int            bi_idx;         /* current index into bvl_vec */
-
-       unsigned int            bi_bvec_done;   /* number of bytes completed in
-                                                  current bvec */
-};
-
 /*
  * main unit of I/O for the block layer and lower layers (ie drivers and
  * stacking drivers)
@@ -48,9 +28,10 @@ struct bio {
        struct block_device     *bi_bdev;
        unsigned int            bi_flags;       /* status, command, etc */
        int                     bi_error;
-       unsigned long           bi_rw;          /* bottom bits READ/WRITE,
-                                                * top bits priority
+       unsigned int            bi_rw;          /* bottom bits req flags,
+                                                * top bits REQ_OP
                                                 */
+       unsigned short          bi_ioprio;
 
        struct bvec_iter        bi_iter;
 
@@ -107,6 +88,16 @@ struct bio {
        struct bio_vec          bi_inline_vecs[0];
 };
 
+#define BIO_OP_SHIFT   (8 * sizeof(unsigned int) - REQ_OP_BITS)
+#define bio_op(bio)    ((bio)->bi_rw >> BIO_OP_SHIFT)
+
+#define bio_set_op_attrs(bio, op, op_flags) do {               \
+       WARN_ON(op >= (1 << REQ_OP_BITS));                      \
+       (bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1);              \
+       (bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT);  \
+       (bio)->bi_rw |= op_flags;                               \
+} while (0)
+
 #define BIO_RESET_BYTES                offsetof(struct bio, bi_max_vecs)
 
 /*
@@ -145,7 +136,6 @@ struct bio {
  */
 enum rq_flag_bits {
        /* common flags */
-       __REQ_WRITE,            /* not set, read. set, write */
        __REQ_FAILFAST_DEV,     /* no driver retries of device errors */
        __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
        __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
@@ -153,14 +143,12 @@ enum rq_flag_bits {
        __REQ_SYNC,             /* request is sync (sync write or read) */
        __REQ_META,             /* metadata io request */
        __REQ_PRIO,             /* boost priority in cfq */
-       __REQ_DISCARD,          /* request to discard sectors */
-       __REQ_SECURE,           /* secure discard (used with __REQ_DISCARD) */
-       __REQ_WRITE_SAME,       /* write same block many times */
+       __REQ_SECURE,           /* secure discard (used with REQ_OP_DISCARD) */
 
        __REQ_NOIDLE,           /* don't anticipate more IO after this one */
        __REQ_INTEGRITY,        /* I/O includes block integrity payload */
        __REQ_FUA,              /* forced unit access */
-       __REQ_FLUSH,            /* request for cache flush */
+       __REQ_PREFLUSH,         /* request for cache flush */
 
        /* bio only flags */
        __REQ_RAHEAD,           /* read ahead, can fail anytime */
@@ -191,31 +179,25 @@ enum rq_flag_bits {
        __REQ_NR_BITS,          /* stops here */
 };
 
-#define REQ_WRITE              (1ULL << __REQ_WRITE)
 #define REQ_FAILFAST_DEV       (1ULL << __REQ_FAILFAST_DEV)
 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
 #define REQ_FAILFAST_DRIVER    (1ULL << __REQ_FAILFAST_DRIVER)
 #define REQ_SYNC               (1ULL << __REQ_SYNC)
 #define REQ_META               (1ULL << __REQ_META)
 #define REQ_PRIO               (1ULL << __REQ_PRIO)
-#define REQ_DISCARD            (1ULL << __REQ_DISCARD)
-#define REQ_WRITE_SAME         (1ULL << __REQ_WRITE_SAME)
 #define REQ_NOIDLE             (1ULL << __REQ_NOIDLE)
 #define REQ_INTEGRITY          (1ULL << __REQ_INTEGRITY)
 
 #define REQ_FAILFAST_MASK \
        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 #define REQ_COMMON_MASK \
-       (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
-        REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
-        REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE)
+       (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
+        REQ_PREFLUSH | REQ_FUA | REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE)
 #define REQ_CLONE_MASK         REQ_COMMON_MASK
 
-#define BIO_NO_ADVANCE_ITER_MASK       (REQ_DISCARD|REQ_WRITE_SAME)
-
 /* This mask is used for both bio and request merge checking */
 #define REQ_NOMERGE_FLAGS \
-       (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
+       (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ)
 
 #define REQ_RAHEAD             (1ULL << __REQ_RAHEAD)
 #define REQ_THROTTLED          (1ULL << __REQ_THROTTLED)
@@ -233,7 +215,7 @@ enum rq_flag_bits {
 #define REQ_PREEMPT            (1ULL << __REQ_PREEMPT)
 #define REQ_ALLOCED            (1ULL << __REQ_ALLOCED)
 #define REQ_COPY_USER          (1ULL << __REQ_COPY_USER)
-#define REQ_FLUSH              (1ULL << __REQ_FLUSH)
+#define REQ_PREFLUSH           (1ULL << __REQ_PREFLUSH)
 #define REQ_FLUSH_SEQ          (1ULL << __REQ_FLUSH_SEQ)
 #define REQ_IO_STAT            (1ULL << __REQ_IO_STAT)
 #define REQ_MIXED_MERGE                (1ULL << __REQ_MIXED_MERGE)
@@ -242,6 +224,16 @@ enum rq_flag_bits {
 #define REQ_HASHED             (1ULL << __REQ_HASHED)
 #define REQ_MQ_INFLIGHT                (1ULL << __REQ_MQ_INFLIGHT)
 
+enum req_op {
+       REQ_OP_READ,
+       REQ_OP_WRITE,
+       REQ_OP_DISCARD,         /* request to discard sectors */
+       REQ_OP_WRITE_SAME,      /* write same block many times */
+       REQ_OP_FLUSH,           /* request for cache flush */
+};
+
+#define REQ_OP_BITS 3
+
 typedef unsigned int blk_qc_t;
 #define BLK_QC_T_NONE  -1U
 #define BLK_QC_T_SHIFT 16
index 3d9cf326574fbb423de0c02617cddb464d6b4156..48f05d768a53bb4801d26d3a6c6a24d2fd628c27 100644 (file)
@@ -90,18 +90,17 @@ struct request {
        struct list_head queuelist;
        union {
                struct call_single_data csd;
-               unsigned long fifo_time;
+               u64 fifo_time;
        };
 
        struct request_queue *q;
        struct blk_mq_ctx *mq_ctx;
 
-       u64 cmd_flags;
+       int cpu;
        unsigned cmd_type;
+       u64 cmd_flags;
        unsigned long atomic_flags;
 
-       int cpu;
-
        /* the following two fields are internal, NEVER access directly */
        unsigned int __data_len;        /* total data len */
        sector_t __sector;              /* sector cursor */
@@ -200,6 +199,20 @@ struct request {
        struct request *next_rq;
 };
 
+#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
+#define req_op(req)  ((req)->cmd_flags >> REQ_OP_SHIFT)
+
+#define req_set_op(req, op) do {                               \
+       WARN_ON(op >= (1 << REQ_OP_BITS));                      \
+       (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1);       \
+       (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT);       \
+} while (0)
+
+#define req_set_op_attrs(req, op, flags) do {  \
+       req_set_op(req, op);                    \
+       (req)->cmd_flags |= flags;              \
+} while (0)
+
 static inline unsigned short req_get_ioprio(struct request *req)
 {
        return req->ioprio;
@@ -492,6 +505,7 @@ struct request_queue {
 #define QUEUE_FLAG_WC         23       /* Write back caching */
 #define QUEUE_FLAG_FUA        24       /* device supports FUA writes */
 #define QUEUE_FLAG_FLUSH_NQ    25      /* flush not queueuable */
+#define QUEUE_FLAG_DAX         26      /* device supports DAX */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -581,6 +595,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 #define blk_queue_discard(q)   test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
 #define blk_queue_secdiscard(q)        (blk_queue_discard(q) && \
        test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
+#define blk_queue_dax(q)       test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
 
 #define blk_noretry_request(rq) \
        ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -597,7 +612,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 
 #define list_entry_rq(ptr)     list_entry((ptr), struct request, queuelist)
 
-#define rq_data_dir(rq)                ((int)((rq)->cmd_flags & 1))
+#define rq_data_dir(rq)                (op_is_write(req_op(rq)) ? WRITE : READ)
 
 /*
  * Driver can handle struct request, if it either has an old style
@@ -616,14 +631,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q)
 /*
  * We regard a request as sync, if either a read or a sync write
  */
-static inline bool rw_is_sync(unsigned int rw_flags)
+static inline bool rw_is_sync(int op, unsigned int rw_flags)
 {
-       return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
+       return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
 }
 
 static inline bool rq_is_sync(struct request *rq)
 {
-       return rw_is_sync(rq->cmd_flags);
+       return rw_is_sync(req_op(rq), rq->cmd_flags);
 }
 
 static inline bool blk_rl_full(struct request_list *rl, bool sync)
@@ -652,22 +667,25 @@ static inline bool rq_mergeable(struct request *rq)
        if (rq->cmd_type != REQ_TYPE_FS)
                return false;
 
+       if (req_op(rq) == REQ_OP_FLUSH)
+               return false;
+
        if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
                return false;
 
        return true;
 }
 
-static inline bool blk_check_merge_flags(unsigned int flags1,
-                                        unsigned int flags2)
+static inline bool blk_check_merge_flags(unsigned int flags1, unsigned int op1,
+                                        unsigned int flags2, unsigned int op2)
 {
-       if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
+       if ((op1 == REQ_OP_DISCARD) != (op2 == REQ_OP_DISCARD))
                return false;
 
        if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
                return false;
 
-       if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
+       if ((op1 == REQ_OP_WRITE_SAME) != (op2 == REQ_OP_WRITE_SAME))
                return false;
 
        return true;
@@ -879,12 +897,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
 }
 
 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
-                                                    unsigned int cmd_flags)
+                                                    int op)
 {
-       if (unlikely(cmd_flags & REQ_DISCARD))
+       if (unlikely(op == REQ_OP_DISCARD))
                return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
 
-       if (unlikely(cmd_flags & REQ_WRITE_SAME))
+       if (unlikely(op == REQ_OP_WRITE_SAME))
                return q->limits.max_write_same_sectors;
 
        return q->limits.max_sectors;
@@ -904,18 +922,19 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
                        (offset & (q->limits.chunk_sectors - 1));
 }
 
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
+static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
+                                                 sector_t offset)
 {
        struct request_queue *q = rq->q;
 
        if (unlikely(rq->cmd_type != REQ_TYPE_FS))
                return q->limits.max_hw_sectors;
 
-       if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
-               return blk_queue_get_max_sectors(q, rq->cmd_flags);
+       if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
+               return blk_queue_get_max_sectors(q, req_op(rq));
 
-       return min(blk_max_size_offset(q, blk_rq_pos(rq)),
-                       blk_queue_get_max_sectors(q, rq->cmd_flags));
+       return min(blk_max_size_offset(q, offset),
+                       blk_queue_get_max_sectors(q, req_op(rq)));
 }
 
 static inline unsigned int blk_rq_count_bios(struct request *rq)
@@ -1141,7 +1160,8 @@ extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
 extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
-               sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop);
+               sector_t nr_sects, gfp_t gfp_mask, int op_flags,
+               struct bio **biop);
 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, struct page *page);
 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
index 0f3172b8b22597309cacf6aa8567786996ce7aa5..cceb72f9e29f539b4d61df0de2250f2c514cfa3a 100644 (file)
@@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq)
 }
 
 extern void blk_dump_cmd(char *buf, struct request *rq);
-extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
+extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes);
 
 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
 
index 7e14e545c4b6aa24218f680f978ae7818d725ee1..ebbacd14d4504a192d7c3f7443012edac433485d 100644 (file)
@@ -187,12 +187,13 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
 void free_buffer_head(struct buffer_head * bh);
 void unlock_buffer(struct buffer_head *bh);
 void __lock_buffer(struct buffer_head *bh);
-void ll_rw_block(int, int, struct buffer_head * bh[]);
+void ll_rw_block(int, int, int, struct buffer_head * bh[]);
 int sync_dirty_buffer(struct buffer_head *bh);
-int __sync_dirty_buffer(struct buffer_head *bh, int rw);
-void write_dirty_buffer(struct buffer_head *bh, int rw);
-int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags);
-int submit_bh(int, struct buffer_head *);
+int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
+void write_dirty_buffer(struct buffer_head *bh, int op_flags);
+int _submit_bh(int op, int op_flags, struct buffer_head *bh,
+              unsigned long bio_flags);
+int submit_bh(int, int, struct buffer_head *);
 void write_boundary_block(struct block_device *bdev,
                        sector_t bblock, unsigned blocksize);
 int bh_uptodate_or_lock(struct buffer_head *bh);
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
new file mode 100644 (file)
index 0000000..701b64a
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * bvec iterator
+ *
+ * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
+ */
+#ifndef __LINUX_BVEC_ITER_H
+#define __LINUX_BVEC_ITER_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+/*
+ * was unsigned short, but we might as well be ready for > 64kB I/O pages
+ */
+struct bio_vec {
+       struct page     *bv_page;
+       unsigned int    bv_len;
+       unsigned int    bv_offset;
+};
+
+struct bvec_iter {
+       sector_t                bi_sector;      /* device address in 512 byte
+                                                  sectors */
+       unsigned int            bi_size;        /* residual I/O count */
+
+       unsigned int            bi_idx;         /* current index into bvl_vec */
+
+       unsigned int            bi_bvec_done;   /* number of bytes completed in
+                                                  current bvec */
+};
+
+/*
+ * various member access, note that bio_data should of course not be used
+ * on highmem page vectors
+ */
+#define __bvec_iter_bvec(bvec, iter)   (&(bvec)[(iter).bi_idx])
+
+#define bvec_iter_page(bvec, iter)                             \
+       (__bvec_iter_bvec((bvec), (iter))->bv_page)
+
+#define bvec_iter_len(bvec, iter)                              \
+       min((iter).bi_size,                                     \
+           __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
+
+#define bvec_iter_offset(bvec, iter)                           \
+       (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+
+#define bvec_iter_bvec(bvec, iter)                             \
+((struct bio_vec) {                                            \
+       .bv_page        = bvec_iter_page((bvec), (iter)),       \
+       .bv_len         = bvec_iter_len((bvec), (iter)),        \
+       .bv_offset      = bvec_iter_offset((bvec), (iter)),     \
+})
+
+static inline void bvec_iter_advance(const struct bio_vec *bv,
+                                    struct bvec_iter *iter,
+                                    unsigned bytes)
+{
+       WARN_ONCE(bytes > iter->bi_size,
+                 "Attempted to advance past end of bvec iter\n");
+
+       while (bytes) {
+               unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+
+               bytes -= len;
+               iter->bi_size -= len;
+               iter->bi_bvec_done += len;
+
+               if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
+                       iter->bi_bvec_done = 0;
+                       iter->bi_idx++;
+               }
+       }
+}
+
+#define for_each_bvec(bvl, bio_vec, iter, start)                       \
+       for (iter = (start);                                            \
+            (iter).bi_size &&                                          \
+               ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
+            bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+
+#endif /* __LINUX_BVEC_ITER_H */
index a68cbe59e6ad190023e410cb32784b1fb6a67d2a..b91b023deffbb3ab035e3fbc0955f5393cde4027 100644 (file)
@@ -57,7 +57,8 @@ struct dm_io_notify {
  */
 struct dm_io_client;
 struct dm_io_request {
-       int bi_rw;                      /* READ|WRITE - not READA */
+       int bi_op;                      /* REQ_OP */
+       int bi_op_flags;                /* rq_flag_bits */
        struct dm_io_memory mem;        /* Memory to use for io */
        struct dm_io_notify notify;     /* Synchronous if notify.fn is NULL */
        struct dm_io_client *client;    /* Client memory handler */
index 638b324f0291c2466fe285a0c4e94dee11fd87f0..e7f358d2e5fc6ee4dba3853f0657f1c3599b7cf2 100644 (file)
@@ -16,7 +16,11 @@ typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *,
 
 typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
 
-typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
+typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
+                                          struct request *, struct bio *);
+
+typedef int (elevator_allow_rq_merge_fn) (struct request_queue *,
+                                         struct request *, struct request *);
 
 typedef void (elevator_bio_merged_fn) (struct request_queue *,
                                                struct request *, struct bio *);
@@ -26,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int);
 typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
 typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
 typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, int);
+typedef int (elevator_may_queue_fn) (struct request_queue *, int, int);
 
 typedef void (elevator_init_icq_fn) (struct io_cq *);
 typedef void (elevator_exit_icq_fn) (struct io_cq *);
@@ -46,7 +50,8 @@ struct elevator_ops
        elevator_merge_fn *elevator_merge_fn;
        elevator_merged_fn *elevator_merged_fn;
        elevator_merge_req_fn *elevator_merge_req_fn;
-       elevator_allow_merge_fn *elevator_allow_merge_fn;
+       elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
+       elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
        elevator_bio_merged_fn *elevator_bio_merged_fn;
 
        elevator_dispatch_fn *elevator_dispatch_fn;
@@ -134,7 +139,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request
 extern struct request *elv_latter_request(struct request_queue *, struct request *);
 extern int elv_register_queue(struct request_queue *q);
 extern void elv_unregister_queue(struct request_queue *q);
-extern int elv_may_queue(struct request_queue *, int);
+extern int elv_may_queue(struct request_queue *, int, int);
 extern void elv_completed_request(struct request_queue *, struct request *);
 extern int elv_set_request(struct request_queue *q, struct request *rq,
                           struct bio *bio, gfp_t gfp_mask);
@@ -157,7 +162,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
 extern int elevator_init(struct request_queue *, char *);
 extern void elevator_exit(struct elevator_queue *);
 extern int elevator_change(struct request_queue *, const char *);
-extern bool elv_rq_merge_ok(struct request *, struct bio *);
+extern bool elv_bio_merge_ok(struct request *, struct bio *);
 extern struct elevator_queue *elevator_alloc(struct request_queue *,
                                        struct elevator_type *);
 
index dd288148a6b15f2ea958441e628f5ed2a729faff..183024525d40404fdc1300a6053e98cd6c02ef8f 100644 (file)
@@ -152,9 +152,10 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define CHECK_IOVEC_ONLY -1
 
 /*
- * The below are the various read and write types that we support. Some of
+ * The below are the various read and write flags that we support. Some of
  * them include behavioral modifiers that send information down to the
- * block layer and IO scheduler. Terminology:
+ * block layer and IO scheduler. They should be used along with a req_op.
+ * Terminology:
  *
  *     The block layer uses device plugging to defer IO a little bit, in
  *     the hope that we will see more IO very shortly. This increases
@@ -193,19 +194,19 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
  *                     non-volatile media on completion.
  *
  */
-#define RW_MASK                        REQ_WRITE
+#define RW_MASK                        REQ_OP_WRITE
 #define RWA_MASK               REQ_RAHEAD
 
-#define READ                   0
+#define READ                   REQ_OP_READ
 #define WRITE                  RW_MASK
 #define READA                  RWA_MASK
 
-#define READ_SYNC              (READ | REQ_SYNC)
-#define WRITE_SYNC             (WRITE | REQ_SYNC | REQ_NOIDLE)
-#define WRITE_ODIRECT          (WRITE | REQ_SYNC)
-#define WRITE_FLUSH            (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
-#define WRITE_FUA              (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
-#define WRITE_FLUSH_FUA                (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+#define READ_SYNC              REQ_SYNC
+#define WRITE_SYNC             (REQ_SYNC | REQ_NOIDLE)
+#define WRITE_ODIRECT          REQ_SYNC
+#define WRITE_FLUSH            (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
+#define WRITE_FUA              (REQ_SYNC | REQ_NOIDLE | REQ_FUA)
+#define WRITE_FLUSH_FUA                (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
 
 /*
  * Attribute flags.  These should be or-ed together to figure out what
@@ -2464,15 +2465,29 @@ extern void make_bad_inode(struct inode *);
 extern bool is_bad_inode(struct inode *);
 
 #ifdef CONFIG_BLOCK
+static inline bool op_is_write(unsigned int op)
+{
+       return op == REQ_OP_READ ? false : true;
+}
+
 /*
  * return READ, READA, or WRITE
  */
-#define bio_rw(bio)            ((bio)->bi_rw & (RW_MASK | RWA_MASK))
+static inline int bio_rw(struct bio *bio)
+{
+       if (op_is_write(bio_op(bio)))
+               return WRITE;
+
+       return bio->bi_rw & RWA_MASK;
+}
 
 /*
  * return data direction, READ or WRITE
  */
-#define bio_data_dir(bio)      ((bio)->bi_rw & 1)
+static inline int bio_data_dir(struct bio *bio)
+{
+       return op_is_write(bio_op(bio)) ? WRITE : READ;
+}
 
 extern void check_disk_size_change(struct gendisk *disk,
                                   struct block_device *bdev);
@@ -2747,7 +2762,7 @@ static inline void remove_inode_hash(struct inode *inode)
 extern void inode_sb_list_add(struct inode *inode);
 
 #ifdef CONFIG_BLOCK
-extern blk_qc_t submit_bio(int, struct bio *);
+extern blk_qc_t submit_bio(struct bio *);
 extern int bdev_read_only(struct block_device *);
 #endif
 extern int set_blocksize(struct block_device *, int);
@@ -2802,7 +2817,7 @@ extern int generic_file_open(struct inode * inode, struct file * filp);
 extern int nonseekable_open(struct inode * inode, struct file * filp);
 
 #ifdef CONFIG_BLOCK
-typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
+typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
                            loff_t file_offset);
 
 enum {
index 981acf74b14f1fdbf00fc2c7bd915c82a9b37c3c..65673d8b81ac4fc30306c8e1a5bd70a05fbeb64b 100644 (file)
@@ -27,7 +27,8 @@ DECLARE_EVENT_CLASS(bcache_request,
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->orig_sector    = bio->bi_iter.bi_sector - 16;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -101,7 +102,8 @@ DECLARE_EVENT_CLASS(bcache_bio,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d  %s %llu + %u",
@@ -136,7 +138,8 @@ TRACE_EVENT(bcache_read,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
                __entry->cache_hit = hit;
                __entry->bypass = bypass;
        ),
@@ -167,7 +170,8 @@ TRACE_EVENT(bcache_write,
                __entry->inode          = inode;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
                __entry->writeback = writeback;
                __entry->bypass = bypass;
        ),
index e8a5eca1dbe5787be56b31a317b0b6ac9b421a8e..5a2a7592068f2d3fc9d345c498f85183bc65053a 100644 (file)
@@ -84,7 +84,8 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
                                        0 : blk_rq_sectors(rq);
                __entry->errors    = rq->errors;
 
-               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+               blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
+                             blk_rq_bytes(rq));
                blk_dump_cmd(__get_str(cmd), rq);
        ),
 
@@ -162,7 +163,7 @@ TRACE_EVENT(block_rq_complete,
                __entry->nr_sector = nr_bytes >> 9;
                __entry->errors    = rq->errors;
 
-               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
+               blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, nr_bytes);
                blk_dump_cmd(__get_str(cmd), rq);
        ),
 
@@ -198,7 +199,8 @@ DECLARE_EVENT_CLASS(block_rq,
                __entry->bytes     = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
                                        blk_rq_bytes(rq) : 0;
 
-               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+               blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
+                             blk_rq_bytes(rq));
                blk_dump_cmd(__get_str(cmd), rq);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
@@ -272,7 +274,8 @@ TRACE_EVENT(block_bio_bounce,
                                          bio->bi_bdev->bd_dev : 0;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -310,7 +313,8 @@ TRACE_EVENT(block_bio_complete,
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->error          = error;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u [%d]",
@@ -337,7 +341,8 @@ DECLARE_EVENT_CLASS(block_bio_merge,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -404,7 +409,8 @@ TRACE_EVENT(block_bio_queue,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -432,7 +438,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
                __entry->dev            = bio ? bio->bi_bdev->bd_dev : 0;
                __entry->sector         = bio ? bio->bi_iter.bi_sector : 0;
                __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
-               blk_fill_rwbs(__entry->rwbs,
+               blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0,
                              bio ? bio->bi_rw : 0, __entry->nr_sector);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
         ),
@@ -567,7 +573,8 @@ TRACE_EVENT(block_split,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->new_sector     = new_sector;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -610,7 +617,8 @@ TRACE_EVENT(block_bio_remap,
                __entry->nr_sector      = bio_sectors(bio);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+                             bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
@@ -656,7 +664,8 @@ TRACE_EVENT(block_rq_remap,
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
                __entry->nr_bios        = blk_rq_count_bios(rq);
-               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+               blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
+                             blk_rq_bytes(rq));
        ),
 
        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
index 3a09bb4dc3b24080bbcbff9160ab95073eb9b48e..878963a1f0587220d1ca0443c0fccd2d76ba8634 100644 (file)
@@ -31,10 +31,9 @@ TRACE_DEFINE_ENUM(BG_GC);
 TRACE_DEFINE_ENUM(LFS);
 TRACE_DEFINE_ENUM(SSR);
 TRACE_DEFINE_ENUM(__REQ_RAHEAD);
-TRACE_DEFINE_ENUM(__REQ_WRITE);
 TRACE_DEFINE_ENUM(__REQ_SYNC);
 TRACE_DEFINE_ENUM(__REQ_NOIDLE);
-TRACE_DEFINE_ENUM(__REQ_FLUSH);
+TRACE_DEFINE_ENUM(__REQ_PREFLUSH);
 TRACE_DEFINE_ENUM(__REQ_FUA);
 TRACE_DEFINE_ENUM(__REQ_PRIO);
 TRACE_DEFINE_ENUM(__REQ_META);
@@ -56,17 +55,21 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
                { IPU,          "IN-PLACE" },                           \
                { OPU,          "OUT-OF-PLACE" })
 
-#define F2FS_BIO_MASK(t)       (t & (READA | WRITE_FLUSH_FUA))
+#define F2FS_BIO_FLAG_MASK(t)  (t & (READA | WRITE_FLUSH_FUA))
 #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
 
-#define show_bio_type(type)    show_bio_base(type), show_bio_extra(type)
+#define show_bio_type(op, op_flags) show_bio_op(op),                   \
+                       show_bio_op_flags(op_flags), show_bio_extra(op_flags)
 
-#define show_bio_base(type)                                            \
-       __print_symbolic(F2FS_BIO_MASK(type),                           \
+#define show_bio_op(op)                                                        \
+       __print_symbolic(op,                                            \
                { READ,                 "READ" },                       \
+               { WRITE,                "WRITE" })
+
+#define show_bio_op_flags(flags)                                       \
+       __print_symbolic(F2FS_BIO_FLAG_MASK(flags),                     \
                { READA,                "READAHEAD" },                  \
                { READ_SYNC,            "READ_SYNC" },                  \
-               { WRITE,                "WRITE" },                      \
                { WRITE_SYNC,           "WRITE_SYNC" },                 \
                { WRITE_FLUSH,          "WRITE_FLUSH" },                \
                { WRITE_FUA,            "WRITE_FUA" },                  \
@@ -734,7 +737,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
                __field(pgoff_t, index)
                __field(block_t, old_blkaddr)
                __field(block_t, new_blkaddr)
-               __field(int, rw)
+               __field(int, op)
+               __field(int, op_flags)
                __field(int, type)
        ),
 
@@ -744,17 +748,18 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
                __entry->index          = page->index;
                __entry->old_blkaddr    = fio->old_blkaddr;
                __entry->new_blkaddr    = fio->new_blkaddr;
-               __entry->rw             = fio->rw;
+               __entry->op             = fio->op;
+               __entry->op_flags       = fio->op_flags;
                __entry->type           = fio->type;
        ),
 
        TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
-               "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%s, type = %s",
+               "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%si%s, type = %s",
                show_dev_ino(__entry),
                (unsigned long)__entry->index,
                (unsigned long long)__entry->old_blkaddr,
                (unsigned long long)__entry->new_blkaddr,
-               show_bio_type(__entry->rw),
+               show_bio_type(__entry->op, __entry->op_flags),
                show_block_type(__entry->type))
 );
 
@@ -785,7 +790,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
 
        TP_STRUCT__entry(
                __field(dev_t,  dev)
-               __field(int,    rw)
+               __field(int,    op)
+               __field(int,    op_flags)
                __field(int,    type)
                __field(sector_t,       sector)
                __field(unsigned int,   size)
@@ -793,15 +799,16 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
 
        TP_fast_assign(
                __entry->dev            = sb->s_dev;
-               __entry->rw             = fio->rw;
+               __entry->op             = fio->op;
+               __entry->op_flags       = fio->op_flags;
                __entry->type           = fio->type;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->size           = bio->bi_iter.bi_size;
        ),
 
-       TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u",
+       TP_printk("dev = (%d,%d), %s%s%s, %s, sector = %lld, size = %u",
                show_dev(__entry),
-               show_bio_type(__entry->rw),
+               show_bio_type(__entry->op, __entry->op_flags),
                show_block_type(__entry->type),
                (unsigned long long)__entry->sector,
                __entry->size)
index 160e1006640d585f417ae37ecab304e407971e67..c1aaac4310550a4feb6fe70c07c6ec2060ae5587 100644 (file)
@@ -261,7 +261,7 @@ static void hib_end_io(struct bio *bio)
        bio_put(bio);
 }
 
-static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
+static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
                struct hib_bio_batch *hb)
 {
        struct page *page = virt_to_page(addr);
@@ -271,6 +271,7 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
        bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
        bio->bi_bdev = hib_resume_bdev;
+       bio_set_op_attrs(bio, op, op_flags);
 
        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
                printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
@@ -283,9 +284,9 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
                bio->bi_end_io = hib_end_io;
                bio->bi_private = hb;
                atomic_inc(&hb->count);
-               submit_bio(rw, bio);
+               submit_bio(bio);
        } else {
-               error = submit_bio_wait(rw, bio);
+               error = submit_bio_wait(bio);
                bio_put(bio);
        }
 
@@ -306,7 +307,8 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 {
        int error;
 
-       hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
+       hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block,
+                     swsusp_header, NULL);
        if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
            !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
                memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
@@ -315,8 +317,8 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
                swsusp_header->flags = flags;
                if (flags & SF_CRC32_MODE)
                        swsusp_header->crc32 = handle->crc32;
-               error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
-                                       swsusp_header, NULL);
+               error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
+                                     swsusp_resume_block, swsusp_header, NULL);
        } else {
                printk(KERN_ERR "PM: Swap header not found!\n");
                error = -ENODEV;
@@ -389,7 +391,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
        } else {
                src = buf;
        }
-       return hib_submit_io(WRITE_SYNC, offset, src, hb);
+       return hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, offset, src, hb);
 }
 
 static void release_swap_writer(struct swap_map_handle *handle)
@@ -992,7 +994,8 @@ static int get_swap_reader(struct swap_map_handle *handle,
                        return -ENOMEM;
                }
 
-               error = hib_submit_io(READ_SYNC, offset, tmp->map, NULL);
+               error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset,
+                                     tmp->map, NULL);
                if (error) {
                        release_swap_reader(handle);
                        return error;
@@ -1016,7 +1019,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
        offset = handle->cur->entries[handle->k];
        if (!offset)
                return -EFAULT;
-       error = hib_submit_io(READ_SYNC, offset, buf, hb);
+       error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, buf, hb);
        if (error)
                return error;
        if (++handle->k >= MAP_PAGE_ENTRIES) {
@@ -1525,7 +1528,8 @@ int swsusp_check(void)
        if (!IS_ERR(hib_resume_bdev)) {
                set_blocksize(hib_resume_bdev, PAGE_SIZE);
                clear_page(swsusp_header);
-               error = hib_submit_io(READ_SYNC, swsusp_resume_block,
+               error = hib_submit_io(REQ_OP_READ, READ_SYNC,
+                                       swsusp_resume_block,
                                        swsusp_header, NULL);
                if (error)
                        goto put;
@@ -1533,7 +1537,8 @@ int swsusp_check(void)
                if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
                        memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
                        /* Reset swap signature now */
-                       error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
+                       error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
+                                               swsusp_resume_block,
                                                swsusp_header, NULL);
                } else {
                        error = -EINVAL;
@@ -1577,10 +1582,12 @@ int swsusp_unmark(void)
 {
        int error;
 
-       hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
+       hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block,
+                     swsusp_header, NULL);
        if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
                memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
-               error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
+               error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
+                                       swsusp_resume_block,
                                        swsusp_header, NULL);
        } else {
                printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
index 9aef8654e90d12f954368e52d5d11b381ec9724f..bedb84d168d1c7ac85af8baafc4ad0b6bd6da8cc 100644 (file)
@@ -127,12 +127,13 @@ static void trace_note_tsk(struct task_struct *tsk)
 
 static void trace_note_time(struct blk_trace *bt)
 {
-       struct timespec now;
+       struct timespec64 now;
        unsigned long flags;
        u32 words[2];
 
-       getnstimeofday(&now);
-       words[0] = now.tv_sec;
+       /* need to check user space to see if this breaks in y2038 or y2106 */
+       ktime_get_real_ts64(&now);
+       words[0] = (u32)now.tv_sec;
        words[1] = now.tv_nsec;
 
        local_irq_save(flags);
@@ -189,6 +190,7 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
                                 BLK_TC_ACT(BLK_TC_WRITE) };
 
 #define BLK_TC_RAHEAD          BLK_TC_AHEAD
+#define BLK_TC_PREFLUSH                BLK_TC_FLUSH
 
 /* The ilog2() calls fall out because they're constant */
 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
@@ -199,7 +201,8 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
  * blk_io_trace structure and places it in a per-cpu subbuffer.
  */
 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
-                    int rw, u32 what, int error, int pdu_len, void *pdu_data)
+                    int op, int op_flags, u32 what, int error, int pdu_len,
+                    void *pdu_data)
 {
        struct task_struct *tsk = current;
        struct ring_buffer_event *event = NULL;
@@ -214,13 +217,16 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
        if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
                return;
 
-       what |= ddir_act[rw & WRITE];
-       what |= MASK_TC_BIT(rw, SYNC);
-       what |= MASK_TC_BIT(rw, RAHEAD);
-       what |= MASK_TC_BIT(rw, META);
-       what |= MASK_TC_BIT(rw, DISCARD);
-       what |= MASK_TC_BIT(rw, FLUSH);
-       what |= MASK_TC_BIT(rw, FUA);
+       what |= ddir_act[op_is_write(op) ? WRITE : READ];
+       what |= MASK_TC_BIT(op_flags, SYNC);
+       what |= MASK_TC_BIT(op_flags, RAHEAD);
+       what |= MASK_TC_BIT(op_flags, META);
+       what |= MASK_TC_BIT(op_flags, PREFLUSH);
+       what |= MASK_TC_BIT(op_flags, FUA);
+       if (op == REQ_OP_DISCARD)
+               what |= BLK_TC_ACT(BLK_TC_DISCARD);
+       if (op == REQ_OP_FLUSH)
+               what |= BLK_TC_ACT(BLK_TC_FLUSH);
 
        pid = tsk->pid;
        if (act_log_check(bt, what, sector, pid))
@@ -708,11 +714,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
 
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                what |= BLK_TC_ACT(BLK_TC_PC);
-               __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
+               __blk_add_trace(bt, 0, nr_bytes, req_op(rq), rq->cmd_flags,
                                what, rq->errors, rq->cmd_len, rq->cmd);
        } else  {
                what |= BLK_TC_ACT(BLK_TC_FS);
-               __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
+               __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, req_op(rq),
                                rq->cmd_flags, what, rq->errors, 0, NULL);
        }
 }
@@ -770,7 +776,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
                return;
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-                       bio->bi_rw, what, error, 0, NULL);
+                       bio_op(bio), bio->bi_rw, what, error, 0, NULL);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
@@ -818,7 +824,8 @@ static void blk_add_trace_getrq(void *ignore,
                struct blk_trace *bt = q->blk_trace;
 
                if (bt)
-                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
+                       __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
+                                       NULL);
        }
 }
 
@@ -833,7 +840,7 @@ static void blk_add_trace_sleeprq(void *ignore,
                struct blk_trace *bt = q->blk_trace;
 
                if (bt)
-                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
+                       __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
                                        0, 0, NULL);
        }
 }
@@ -843,7 +850,7 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
        struct blk_trace *bt = q->blk_trace;
 
        if (bt)
-               __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
+               __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
 }
 
 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
@@ -860,7 +867,7 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
                else
                        what = BLK_TA_UNPLUG_TIMER;
 
-               __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
+               __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
        }
 }
 
@@ -874,8 +881,9 @@ static void blk_add_trace_split(void *ignore,
                __be64 rpdu = cpu_to_be64(pdu);
 
                __blk_add_trace(bt, bio->bi_iter.bi_sector,
-                               bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
-                               bio->bi_error, sizeof(rpdu), &rpdu);
+                               bio->bi_iter.bi_size, bio_op(bio), bio->bi_rw,
+                               BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
+                               &rpdu);
        }
 }
 
@@ -907,7 +915,7 @@ static void blk_add_trace_bio_remap(void *ignore,
        r.sector_from = cpu_to_be64(from);
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-                       bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
+                       bio_op(bio), bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
                        sizeof(r), &r);
 }
 
@@ -940,7 +948,7 @@ static void blk_add_trace_rq_remap(void *ignore,
        r.sector_from = cpu_to_be64(from);
 
        __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
-                       rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
+                       rq_data_dir(rq), 0, BLK_TA_REMAP, !!rq->errors,
                        sizeof(r), &r);
 }
 
@@ -965,10 +973,10 @@ void blk_add_driver_data(struct request_queue *q,
                return;
 
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
-               __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
+               __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, 0,
                                BLK_TA_DRV_DATA, rq->errors, len, data);
        else
-               __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
+               __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, 0,
                                BLK_TA_DRV_DATA, rq->errors, len, data);
 }
 EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1769,21 +1777,30 @@ void blk_dump_cmd(char *buf, struct request *rq)
        }
 }
 
-void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
+void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
 {
        int i = 0;
 
-       if (rw & REQ_FLUSH)
+       if (rw & REQ_PREFLUSH)
                rwbs[i++] = 'F';
 
-       if (rw & WRITE)
+       switch (op) {
+       case REQ_OP_WRITE:
+       case REQ_OP_WRITE_SAME:
                rwbs[i++] = 'W';
-       else if (rw & REQ_DISCARD)
+               break;
+       case REQ_OP_DISCARD:
                rwbs[i++] = 'D';
-       else if (bytes)
+               break;
+       case REQ_OP_FLUSH:
+               rwbs[i++] = 'F';
+               break;
+       case REQ_OP_READ:
                rwbs[i++] = 'R';
-       else
+               break;
+       default:
                rwbs[i++] = 'N';
+       }
 
        if (rw & REQ_FUA)
                rwbs[i++] = 'F';
index 0cd522753ff5c6830673e6d9f0cb46d70b4c9c23..d67c8288d95dba435bf608b6258c3168c8206d7a 100644 (file)
        n = wanted;                                     \
 }
 
-#define iterate_bvec(i, n, __v, __p, skip, STEP) {     \
-       size_t wanted = n;                              \
-       __p = i->bvec;                                  \
-       __v.bv_len = min_t(size_t, n, __p->bv_len - skip);      \
-       if (likely(__v.bv_len)) {                       \
-               __v.bv_page = __p->bv_page;             \
-               __v.bv_offset = __p->bv_offset + skip;  \
-               (void)(STEP);                           \
-               skip += __v.bv_len;                     \
-               n -= __v.bv_len;                        \
-       }                                               \
-       while (unlikely(n)) {                           \
-               __p++;                                  \
-               __v.bv_len = min_t(size_t, n, __p->bv_len);     \
-               if (unlikely(!__v.bv_len))              \
+#define iterate_bvec(i, n, __v, __bi, skip, STEP) {    \
+       struct bvec_iter __start;                       \
+       __start.bi_size = n;                            \
+       __start.bi_bvec_done = skip;                    \
+       __start.bi_idx = 0;                             \
+       for_each_bvec(__v, i->bvec, __bi, __start) {    \
+               if (!__v.bv_len)                        \
                        continue;                       \
-               __v.bv_page = __p->bv_page;             \
-               __v.bv_offset = __p->bv_offset;         \
                (void)(STEP);                           \
-               skip = __v.bv_len;                      \
-               n -= __v.bv_len;                        \
        }                                               \
-       n = wanted;                                     \
 }
 
 #define iterate_all_kinds(i, n, v, I, B, K) {                  \
        size_t skip = i->iov_offset;                            \
        if (unlikely(i->type & ITER_BVEC)) {                    \
-               const struct bio_vec *bvec;                     \
                struct bio_vec v;                               \
-               iterate_bvec(i, n, v, bvec, skip, (B))          \
+               struct bvec_iter __bi;                          \
+               iterate_bvec(i, n, v, __bi, skip, (B))          \
        } else if (unlikely(i->type & ITER_KVEC)) {             \
                const struct kvec *kvec;                        \
                struct kvec v;                                  \
        if (i->count) {                                         \
                size_t skip = i->iov_offset;                    \
                if (unlikely(i->type & ITER_BVEC)) {            \
-                       const struct bio_vec *bvec;             \
+                       const struct bio_vec *bvec = i->bvec;   \
                        struct bio_vec v;                       \
-                       iterate_bvec(i, n, v, bvec, skip, (B))  \
-                       if (skip == bvec->bv_len) {             \
-                               bvec++;                         \
-                               skip = 0;                       \
-                       }                                       \
-                       i->nr_segs -= bvec - i->bvec;           \
-                       i->bvec = bvec;                         \
+                       struct bvec_iter __bi;                  \
+                       iterate_bvec(i, n, v, __bi, skip, (B))  \
+                       i->bvec = __bvec_iter_bvec(i->bvec, __bi);      \
+                       i->nr_segs -= i->bvec - bvec;           \
+                       skip = __bi.bi_bvec_done;               \
                } else if (unlikely(i->type & ITER_KVEC)) {     \
                        const struct kvec *kvec;                \
                        struct kvec v;                          \
index 242dba07545bc61104a7713f2246f8e0fccebfc4..dcc5d3769608088a8c100f04f4e01dcccf5b74da 100644 (file)
@@ -259,7 +259,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
                bio_end_io_t end_write_func)
 {
        struct bio *bio;
-       int ret, rw = WRITE;
+       int ret;
        struct swap_info_struct *sis = page_swap_info(page);
 
        if (sis->flags & SWP_FILE) {
@@ -317,12 +317,13 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
                ret = -ENOMEM;
                goto out;
        }
+       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        if (wbc->sync_mode == WB_SYNC_ALL)
-               rw |= REQ_SYNC;
+               bio->bi_rw |= REQ_SYNC;
        count_vm_event(PSWPOUT);
        set_page_writeback(page);
        unlock_page(page);
-       submit_bio(rw, bio);
+       submit_bio(bio);
 out:
        return ret;
 }
@@ -369,8 +370,9 @@ int swap_readpage(struct page *page)
                ret = -ENOMEM;
                goto out;
        }
+       bio_set_op_attrs(bio, REQ_OP_READ, 0);
        count_vm_event(PSWPIN);
-       submit_bio(READ, bio);
+       submit_bio(bio);
 out:
        return ret;
 }