]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - drivers/md/dm.c
block: unify flags for struct bio and struct request
[mirror_ubuntu-artful-kernel.git] / drivers / md / dm.c
index 1e0e6dd51501036a4993e96714ca20a3ede1c2c8..d6f77baeafd605527217dd62a056f34be2bc2aed 100644 (file)
@@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error)
                         */
                        spin_lock_irqsave(&md->deferred_lock, flags);
                        if (__noflush_suspending(md)) {
-                               if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
+                               if (!(io->bio->bi_rw & REQ_HARDBARRIER))
                                        bio_list_add_head(&md->deferred,
                                                          io->bio);
                        } else
@@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error)
                io_error = io->error;
                bio = io->bio;
 
-               if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+               if (bio->bi_rw & REQ_HARDBARRIER) {
                        /*
                         * There can be just one barrier request so we use
                         * a per-device variable for error reporting.
@@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
 
        clone->bi_sector = sector;
        clone->bi_bdev = bio->bi_bdev;
-       clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
+       clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
        clone->bi_vcnt = 1;
        clone->bi_size = to_bytes(len);
        clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
 
        clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
        __bio_clone(clone, bio);
-       clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
+       clone->bi_rw &= ~REQ_HARDBARRIER;
        clone->bi_destructor = dm_bio_destructor;
        clone->bi_sector = sector;
        clone->bi_idx = idx;
@@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
 
        ci.map = dm_get_live_table(md);
        if (unlikely(!ci.map)) {
-               if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
+               if (!(bio->bi_rw & REQ_HARDBARRIER))
                        bio_io_error(bio);
                else
                        if (!md->barrier_error)
@@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
         * we have to queue this io for later.
         */
        if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
-           unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+           unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                up_read(&md->io_lock);
 
                if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work)
                if (dm_request_based(md))
                        generic_make_request(c);
                else {
-                       if (bio_rw_flagged(c, BIO_RW_BARRIER))
+                       if (c->bi_rw & REQ_HARDBARRIER)
                                process_barrier(md, c);
                        else
                                __split_and_process_bio(md, c);