]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
block: replace bi_bdev with a gendisk pointer and partitions index
authorChristoph Hellwig <hch@lst.de>
Wed, 23 Aug 2017 17:10:32 +0000 (19:10 +0200)
committerJens Axboe <axboe@kernel.dk>
Wed, 23 Aug 2017 18:49:55 +0000 (12:49 -0600)
This way we don't need a block_device structure to submit I/O.  The
block_device has different life time rules from the gendisk and
request_queue and is usually only available when the block device node
is open.  Other callers need to explicitly create one (e.g. the lightnvm
passthrough code, or the new nvme multipathing code).

For the actual I/O path all that we need is the gendisk, which exists
once per block device.  But given that the block layer also does
partition remapping we additionally need a partition index, which is
used for said remapping in generic_make_request.

Note that all the block drivers generally want request_queue or
sometimes the gendisk, so this removes a layer of indirection all
over the stack.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
99 files changed:
arch/powerpc/sysdev/axonram.c
block/bio-integrity.c
block/bio.c
block/blk-core.c
block/blk-flush.c
block/blk-lib.c
block/blk-merge.c
block/blk-zoned.c
drivers/block/brd.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/floppy.c
drivers/block/pktcdvd.c
drivers/block/xen-blkback/blkback.c
drivers/md/bcache/debug.c
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c
drivers/md/dm-bio-record.h
drivers/md/dm-bufio.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-delay.c
drivers/md/dm-era-target.c
drivers/md/dm-flakey.c
drivers/md/dm-integrity.c
drivers/md/dm-io.c
drivers/md/dm-linear.c
drivers/md/dm-log-writes.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-switch.c
drivers/md/dm-thin.c
drivers/md/dm-verity-target.c
drivers/md/dm-zoned-metadata.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/md/faulty.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/nvdimm/nd.h
drivers/nvme/host/core.c
drivers/nvme/host/lightnvm.c
drivers/nvme/target/io-cmd.c
drivers/s390/block/dcssblk.c
drivers/s390/block/xpram.c
drivers/target/target_core_iblock.c
fs/block_dev.c
fs/btrfs/check-integrity.c
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/buffer.c
fs/crypto/bio.c
fs/direct-io.c
fs/exofs/ore.c
fs/ext4/page-io.c
fs/ext4/readpage.c
fs/f2fs/data.c
fs/f2fs/segment.c
fs/gfs2/lops.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/hfsplus/wrapper.c
fs/iomap.c
fs/jfs/jfs_logmgr.c
fs/jfs/jfs_metapage.c
fs/mpage.c
fs/nfs/blocklayout/blocklayout.c
fs/nilfs2/segbuf.c
fs/ocfs2/cluster/heartbeat.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
include/linux/bio.h
include/linux/blk_types.h
include/trace/events/bcache.h
include/trace/events/block.h
include/trace/events/f2fs.h
kernel/power/swap.c
kernel/trace/blktrace.c
mm/page_io.c

index 2799706106c63a3f5e3bc377d10385f3149a065d..1e15deacccafc73ad94d04472c9d3bcdc20127b6 100644 (file)
@@ -110,7 +110,7 @@ axon_ram_irq_handler(int irq, void *dev)
 static blk_qc_t
 axon_ram_make_request(struct request_queue *queue, struct bio *bio)
 {
-       struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
+       struct axon_ram_bank *bank = bio->bi_disk->private_data;
        unsigned long phys_mem, phys_end;
        void *user_mem;
        struct bio_vec vec;
index 5fa9a740fd99ab4dec3e48e673c90436b7e907b4..fc71e61728696b2560c389cc7f75e49ea34daa0a 100644 (file)
@@ -146,7 +146,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
        iv = bip->bip_vec + bip->bip_vcnt;
 
        if (bip->bip_vcnt &&
-           bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
+           bvec_gap_to_prev(bio->bi_disk->queue,
                             &bip->bip_vec[bip->bip_vcnt - 1], offset))
                return 0;
 
@@ -190,7 +190,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
 static blk_status_t bio_integrity_process(struct bio *bio,
                struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
 {
-       struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+       struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
        struct blk_integrity_iter iter;
        struct bvec_iter bviter;
        struct bio_vec bv;
@@ -199,7 +199,7 @@ static blk_status_t bio_integrity_process(struct bio *bio,
        void *prot_buf = page_address(bip->bip_vec->bv_page) +
                bip->bip_vec->bv_offset;
 
-       iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
+       iter.disk_name = bio->bi_disk->disk_name;
        iter.interval = 1 << bi->interval_exp;
        iter.seed = proc_iter->bi_sector;
        iter.prot_buf = prot_buf;
@@ -236,8 +236,8 @@ static blk_status_t bio_integrity_process(struct bio *bio,
 bool bio_integrity_prep(struct bio *bio)
 {
        struct bio_integrity_payload *bip;
-       struct blk_integrity *bi;
-       struct request_queue *q;
+       struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
+       struct request_queue *q = bio->bi_disk->queue;
        void *buf;
        unsigned long start, end;
        unsigned int len, nr_pages;
@@ -245,11 +245,9 @@ bool bio_integrity_prep(struct bio *bio)
        unsigned int intervals;
        blk_status_t status;
 
-       bi = bdev_get_integrity(bio->bi_bdev);
        if (!bi)
                return true;
 
-       q = bdev_get_queue(bio->bi_bdev);
        if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
                return true;
 
@@ -354,7 +352,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
        struct bio_integrity_payload *bip =
                container_of(work, struct bio_integrity_payload, bip_work);
        struct bio *bio = bip->bip_bio;
-       struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+       struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
        struct bvec_iter iter = bio->bi_iter;
 
        /*
@@ -411,7 +409,7 @@ bool __bio_integrity_endio(struct bio *bio)
 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
 {
        struct bio_integrity_payload *bip = bio_integrity(bio);
-       struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+       struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
        unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
 
        bip->bip_iter.bi_sector += bytes_done >> 9;
@@ -428,7 +426,7 @@ EXPORT_SYMBOL(bio_integrity_advance);
 void bio_integrity_trim(struct bio *bio)
 {
        struct bio_integrity_payload *bip = bio_integrity(bio);
-       struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+       struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
 
        bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
 }
index ecd1a9c7a30160925779cd30a6702721035866c9..6745759028da5eab4f2bcad90c7a480564f38d7b 100644 (file)
@@ -593,10 +593,10 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
        BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
 
        /*
-        * most users will be overriding ->bi_bdev with a new target,
+        * most users will be overriding ->bi_disk with a new target,
         * so we don't set nor calculate new physical/hw segment counts here
         */
-       bio->bi_bdev = bio_src->bi_bdev;
+       bio->bi_disk = bio_src->bi_disk;
        bio_set_flag(bio, BIO_CLONED);
        bio->bi_opf = bio_src->bi_opf;
        bio->bi_write_hint = bio_src->bi_write_hint;
@@ -681,7 +681,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
        bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
        if (!bio)
                return NULL;
-       bio->bi_bdev            = bio_src->bi_bdev;
+       bio->bi_disk            = bio_src->bi_disk;
        bio->bi_opf             = bio_src->bi_opf;
        bio->bi_write_hint      = bio_src->bi_write_hint;
        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
@@ -1830,8 +1830,8 @@ again:
                goto again;
        }
 
-       if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
-               trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio,
+       if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+               trace_block_bio_complete(bio->bi_disk->queue, bio,
                                         blk_status_to_errno(bio->bi_status));
                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
        }
index d579501f24ba7b60c16c6ed7bb1d032e485ce0cc..fc1af9097dff8feacf552392c8c705e00cfa6bde 100644 (file)
@@ -1910,40 +1910,15 @@ out_unlock:
        return BLK_QC_T_NONE;
 }
 
-/*
- * If bio->bi_dev is a partition, remap the location
- */
-static inline void blk_partition_remap(struct bio *bio)
-{
-       struct block_device *bdev = bio->bi_bdev;
-
-       /*
-        * Zone reset does not include bi_size so bio_sectors() is always 0.
-        * Include a test for the reset op code and perform the remap if needed.
-        */
-       if (bdev != bdev->bd_contains &&
-           (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) {
-               struct hd_struct *p = bdev->bd_part;
-
-               bio->bi_iter.bi_sector += p->start_sect;
-               bio->bi_bdev = bdev->bd_contains;
-
-               trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
-                                     bdev->bd_dev,
-                                     bio->bi_iter.bi_sector - p->start_sect);
-       }
-}
-
 static void handle_bad_sector(struct bio *bio)
 {
        char b[BDEVNAME_SIZE];
 
        printk(KERN_INFO "attempt to access beyond end of device\n");
        printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
-                       bdevname(bio->bi_bdev, b),
-                       bio->bi_opf,
+                       bio_devname(bio, b), bio->bi_opf,
                        (unsigned long long)bio_end_sector(bio),
-                       (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
+                       (long long)get_capacity(bio->bi_disk));
 }
 
 #ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -1981,6 +1956,38 @@ static inline bool should_fail_request(struct hd_struct *part,
 
 #endif /* CONFIG_FAIL_MAKE_REQUEST */
 
+/*
+ * Remap block n of partition p to block n+start(p) of the disk.
+ */
+static inline int blk_partition_remap(struct bio *bio)
+{
+       struct hd_struct *p;
+       int ret = 0;
+
+       /*
+        * Zone reset does not include bi_size so bio_sectors() is always 0.
+        * Include a test for the reset op code and perform the remap if needed.
+        */
+       if (!bio->bi_partno ||
+           (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET))
+               return 0;
+
+       rcu_read_lock();
+       p = __disk_get_part(bio->bi_disk, bio->bi_partno);
+       if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) {
+               bio->bi_iter.bi_sector += p->start_sect;
+               bio->bi_partno = 0;
+               trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
+                               bio->bi_iter.bi_sector - p->start_sect);
+       } else {
+               printk("%s: fail for partition %d\n", __func__, bio->bi_partno);
+               ret = -EIO;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 /*
  * Check whether this bio extends beyond the end of the device.
  */
@@ -1992,7 +1999,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
                return 0;
 
        /* Test device or partition size, when known. */
-       maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
+       maxsector = get_capacity(bio->bi_disk);
        if (maxsector) {
                sector_t sector = bio->bi_iter.bi_sector;
 
@@ -2017,20 +2024,18 @@ generic_make_request_checks(struct bio *bio)
        int nr_sectors = bio_sectors(bio);
        blk_status_t status = BLK_STS_IOERR;
        char b[BDEVNAME_SIZE];
-       struct hd_struct *part;
 
        might_sleep();
 
        if (bio_check_eod(bio, nr_sectors))
                goto end_io;
 
-       q = bdev_get_queue(bio->bi_bdev);
+       q = bio->bi_disk->queue;
        if (unlikely(!q)) {
                printk(KERN_ERR
                       "generic_make_request: Trying to access "
                        "nonexistent block-device %s (%Lu)\n",
-                       bdevname(bio->bi_bdev, b),
-                       (long long) bio->bi_iter.bi_sector);
+                       bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
                goto end_io;
        }
 
@@ -2042,17 +2047,11 @@ generic_make_request_checks(struct bio *bio)
        if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
                goto not_supported;
 
-       part = bio->bi_bdev->bd_part;
-       if (should_fail_request(part, bio->bi_iter.bi_size) ||
-           should_fail_request(&part_to_disk(part)->part0,
-                               bio->bi_iter.bi_size))
+       if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
                goto end_io;
 
-       /*
-        * If this device has partitions, remap block n
-        * of partition p to block n+start(p) of the disk.
-        */
-       blk_partition_remap(bio);
+       if (blk_partition_remap(bio))
+               goto end_io;
 
        if (bio_check_eod(bio, nr_sectors))
                goto end_io;
@@ -2081,16 +2080,16 @@ generic_make_request_checks(struct bio *bio)
                        goto not_supported;
                break;
        case REQ_OP_WRITE_SAME:
-               if (!bdev_write_same(bio->bi_bdev))
+               if (!q->limits.max_write_same_sectors)
                        goto not_supported;
                break;
        case REQ_OP_ZONE_REPORT:
        case REQ_OP_ZONE_RESET:
-               if (!bdev_is_zoned(bio->bi_bdev))
+               if (!blk_queue_is_zoned(q))
                        goto not_supported;
                break;
        case REQ_OP_WRITE_ZEROES:
-               if (!bdev_write_zeroes_sectors(bio->bi_bdev))
+               if (!q->limits.max_write_zeroes_sectors)
                        goto not_supported;
                break;
        default:
@@ -2197,7 +2196,7 @@ blk_qc_t generic_make_request(struct bio *bio)
        bio_list_init(&bio_list_on_stack[0]);
        current->bio_list = bio_list_on_stack;
        do {
-               struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+               struct request_queue *q = bio->bi_disk->queue;
 
                if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
                        struct bio_list lower, same;
@@ -2215,7 +2214,7 @@ blk_qc_t generic_make_request(struct bio *bio)
                        bio_list_init(&lower);
                        bio_list_init(&same);
                        while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
-                               if (q == bdev_get_queue(bio->bi_bdev))
+                               if (q == bio->bi_disk->queue)
                                        bio_list_add(&same, bio);
                                else
                                        bio_list_add(&lower, bio);
@@ -2258,7 +2257,7 @@ blk_qc_t submit_bio(struct bio *bio)
                unsigned int count;
 
                if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
-                       count = bdev_logical_block_size(bio->bi_bdev) >> 9;
+                       count = queue_logical_block_size(bio->bi_disk->queue);
                else
                        count = bio_sectors(bio);
 
@@ -2275,8 +2274,7 @@ blk_qc_t submit_bio(struct bio *bio)
                        current->comm, task_pid_nr(current),
                                op_is_write(bio_op(bio)) ? "WRITE" : "READ",
                                (unsigned long long)bio->bi_iter.bi_sector,
-                               bdevname(bio->bi_bdev, b),
-                               count);
+                               bio_devname(bio, b), count);
                }
        }
 
@@ -3049,8 +3047,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
        rq->__data_len = bio->bi_iter.bi_size;
        rq->bio = rq->biotail = bio;
 
-       if (bio->bi_bdev)
-               rq->rq_disk = bio->bi_bdev->bd_disk;
+       if (bio->bi_disk)
+               rq->rq_disk = bio->bi_disk;
 }
 
 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
index ed5fe322abba59df898405fdd5c20305a5b8bf68..83b7d5b41c790c9dee2b7eaacbb5186d98d5cdce 100644 (file)
@@ -525,7 +525,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
                return -ENXIO;
 
        bio = bio_alloc(gfp_mask, 0);
-       bio->bi_bdev = bdev;
+       bio_set_dev(bio, bdev);
        bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
        ret = submit_bio_wait(bio);
index 3fe0aec90597294a15e04b1f89144631b976ab94..e01adb5145b3a0e82e17aa3f72c58074c6a05893 100644 (file)
@@ -77,7 +77,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
                bio = next_bio(bio, 0, gfp_mask);
                bio->bi_iter.bi_sector = sector;
-               bio->bi_bdev = bdev;
+               bio_set_dev(bio, bdev);
                bio_set_op_attrs(bio, op, 0);
 
                bio->bi_iter.bi_size = req_sects << 9;
@@ -168,7 +168,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
        while (nr_sects) {
                bio = next_bio(bio, 1, gfp_mask);
                bio->bi_iter.bi_sector = sector;
-               bio->bi_bdev = bdev;
+               bio_set_dev(bio, bdev);
                bio->bi_vcnt = 1;
                bio->bi_io_vec->bv_page = page;
                bio->bi_io_vec->bv_offset = 0;
@@ -241,7 +241,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
        while (nr_sects) {
                bio = next_bio(bio, 0, gfp_mask);
                bio->bi_iter.bi_sector = sector;
-               bio->bi_bdev = bdev;
+               bio_set_dev(bio, bdev);
                bio->bi_opf = REQ_OP_WRITE_ZEROES;
                if (flags & BLKDEV_ZERO_NOUNMAP)
                        bio->bi_opf |= REQ_NOUNMAP;
@@ -323,7 +323,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
                               gfp_mask);
                bio->bi_iter.bi_sector = sector;
-               bio->bi_bdev   = bdev;
+               bio_set_dev(bio, bdev);
                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
                while (nr_sects != 0) {
index 05f116bfb99dac3763c61536573c49f72c1079d0..aa524cad5bea3a0d46232f6becbfe970947df39e 100644 (file)
@@ -786,7 +786,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
                return false;
 
        /* must be same device and not a special request */
-       if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
+       if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
                return false;
 
        /* only merge integrity protected bio into ditto rq */
index 3bd15d8095b101233455d7985b834f828d89be92..ff57fb51b3380bb14d7c6e449f4dada1cf6ab904 100644 (file)
@@ -116,7 +116,7 @@ int blkdev_report_zones(struct block_device *bdev,
        if (!bio)
                return -ENOMEM;
 
-       bio->bi_bdev = bdev;
+       bio_set_dev(bio, bdev);
        bio->bi_iter.bi_sector = blk_zone_start(q, sector);
        bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0);
 
@@ -234,7 +234,7 @@ int blkdev_reset_zones(struct block_device *bdev,
 
                bio = bio_alloc(gfp_mask, 0);
                bio->bi_iter.bi_sector = sector;
-               bio->bi_bdev = bdev;
+               bio_set_dev(bio, bdev);
                bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
 
                ret = submit_bio_wait(bio);
index 104b71c0490dd0cf377c0bc7d138c0c226fc02c9..006e1cb7e6f0652e81f5ea164fcad032dbf55d70 100644 (file)
@@ -294,14 +294,13 @@ out:
 
 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
 {
-       struct block_device *bdev = bio->bi_bdev;
-       struct brd_device *brd = bdev->bd_disk->private_data;
+       struct brd_device *brd = bio->bi_disk->private_data;
        struct bio_vec bvec;
        sector_t sector;
        struct bvec_iter iter;
 
        sector = bio->bi_iter.bi_sector;
-       if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
+       if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
                goto io_error;
 
        bio_for_each_segment(bvec, bio, iter) {
index e02c45cd3c5a7302054ff63ab95fb4247b368443..5f0eaee8c8a7cc65a8e970c27f0ebe8149545558 100644 (file)
@@ -151,7 +151,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
        op_flags |= REQ_SYNC;
 
        bio = bio_alloc_drbd(GFP_NOIO);
-       bio->bi_bdev = bdev->md_bdev;
+       bio_set_dev(bio, bdev->md_bdev);
        bio->bi_iter.bi_sector = sector;
        err = -EIO;
        if (bio_add_page(bio, device->md_io.page, size, 0) != size)
index 809fd245c3dc8b21240a91de649d253e4ab3f6cd..bd97908c766f5c998e76f54662a270a17972d7d3 100644 (file)
@@ -1019,7 +1019,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
                bm_store_page_idx(page, page_nr);
        } else
                page = b->bm_pages[page_nr];
-       bio->bi_bdev = device->ldev->md_bdev;
+       bio_set_dev(bio, device->ldev->md_bdev);
        bio->bi_iter.bi_sector = on_disk_sector;
        /* bio_add_page of a single page to an empty bio will always succeed,
         * according to api.  Do we want to assert that? */
index d17b6e6393c785fbaafd1b6c564aecda1b594d46..819f9d0bc8756ff2c3fdc23a62b27b5d6dab35ea 100644 (file)
@@ -1628,8 +1628,8 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
                                             int fault_type, struct bio *bio)
 {
        __release(local);
-       if (!bio->bi_bdev) {
-               drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
+       if (!bio->bi_disk) {
+               drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
                bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                return;
index c7e95e6380fb46d1503c6a595f6cb716c4d0f6ef..ece6e5d7dc3f24f763718c0145b0f30b612ec517 100644 (file)
@@ -1265,7 +1265,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
 
        octx->device = device;
        octx->ctx = ctx;
-       bio->bi_bdev = device->ldev->backing_bdev;
+       bio_set_dev(bio, device->ldev->backing_bdev);
        bio->bi_private = octx;
        bio->bi_end_io = one_flush_endio;
        bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
@@ -1548,7 +1548,7 @@ next_bio:
        }
        /* > peer_req->i.sector, unless this is the first bio */
        bio->bi_iter.bi_sector = sector;
-       bio->bi_bdev = device->ldev->backing_bdev;
+       bio_set_dev(bio, device->ldev->backing_bdev);
        bio_set_op_attrs(bio, op, op_flags);
        bio->bi_private = peer_req;
        bio->bi_end_io = drbd_peer_request_endio;
index 8d6b5d137b5e44428a1f3a64c4a43c004b6a53f2..447c975f54819bed6dbd5f44d8898234e0ce9e03 100644 (file)
@@ -1179,7 +1179,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
        else
                type = DRBD_FAULT_DT_RD;
 
-       bio->bi_bdev = device->ldev->backing_bdev;
+       bio_set_dev(bio, device->ldev->backing_bdev);
 
        /* State may have changed since we grabbed our reference on the
         * ->ldev member. Double check, and short-circuit to endio.
index 1d8726a8df340513a6c9006aaebcc12c99d5b284..c268d886c4f0ecaed33a2c83826be8e4725f0dc7 100644 (file)
@@ -1513,7 +1513,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
                drbd_al_begin_io(device, &req->i);
 
        drbd_req_make_private_bio(req, req->master_bio);
-       req->private_bio->bi_bdev = device->ldev->backing_bdev;
+       bio_set_dev(req->private_bio, device->ldev->backing_bdev);
        generic_make_request(req->private_bio);
 
        return 0;
index 9c00f29e40c10e824052b43372d2ff80d1ed9339..60c086a536094d4c19ad31a023d49274d54c3dd5 100644 (file)
@@ -4134,7 +4134,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
        cbdata.drive = drive;
 
        bio_init(&bio, &bio_vec, 1);
-       bio.bi_bdev = bdev;
+       bio_set_dev(&bio, bdev);
        bio_add_page(&bio, page, size, 0);
 
        bio.bi_iter.bi_sector = 0;
index 6b8b097abbb93c9c446d6901dfc41540e7ee0673..67974796c350941c9e7cebdf144d8c13a10b8528 100644 (file)
@@ -1028,7 +1028,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
                bio = pkt->r_bios[f];
                bio_reset(bio);
                bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
-               bio->bi_bdev = pd->bdev;
+               bio_set_dev(bio, pd->bdev);
                bio->bi_end_io = pkt_end_io_read;
                bio->bi_private = pkt;
 
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
        pkt->sector = new_sector;
 
        bio_reset(pkt->bio);
-       pkt->bio->bi_bdev = pd->bdev;
+       bio_set_set(pkt->bio, pd->bdev);
        bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
        pkt->bio->bi_iter.bi_sector = new_sector;
        pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
@@ -1267,7 +1267,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 
        bio_reset(pkt->w_bio);
        pkt->w_bio->bi_iter.bi_sector = pkt->sector;
-       pkt->w_bio->bi_bdev = pd->bdev;
+       bio_set_dev(pkt->w_bio, pd->bdev);
        pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
        pkt->w_bio->bi_private = pkt;
 
@@ -2314,7 +2314,7 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
 
        psd->pd = pd;
        psd->bio = bio;
-       cloned_bio->bi_bdev = pd->bdev;
+       bio_set_dev(cloned_bio, pd->bdev);
        cloned_bio->bi_private = psd;
        cloned_bio->bi_end_io = pkt_end_io_read_cloned;
        pd->stats.secs_r += bio_sectors(bio);
@@ -2415,8 +2415,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
 
        pd = q->queuedata;
        if (!pd) {
-               pr_err("%s incorrect request queue\n",
-                      bdevname(bio->bi_bdev, b));
+               pr_err("%s incorrect request queue\n", bio_devname(bio, b));
                goto end_io;
        }
 
index 5f3a813e7ae0af7de7e0e7b110cb6703b365ab20..987d665e82de4778ef0b646fb8a5ebd3c1403e21 100644 (file)
@@ -1363,7 +1363,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
                                goto fail_put_bio;
 
                        biolist[nbio++] = bio;
-                       bio->bi_bdev    = preq.bdev;
+                       bio_set_dev(bio, preq.bdev);
                        bio->bi_private = pending_req;
                        bio->bi_end_io  = end_block_io_op;
                        bio->bi_iter.bi_sector  = preq.sector_number;
@@ -1382,7 +1382,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
                        goto fail_put_bio;
 
                biolist[nbio++] = bio;
-               bio->bi_bdev    = preq.bdev;
+               bio_set_dev(bio, preq.bdev);
                bio->bi_private = pending_req;
                bio->bi_end_io  = end_block_io_op;
                bio_set_op_attrs(bio, operation, operation_flags);
index 35a5a7210e51c0dc3b5c0e9baa9b1ed878c1a39b..61076eda2e6ddc24924c2d0886c3092eb50841ae 100644 (file)
@@ -49,7 +49,7 @@ void bch_btree_verify(struct btree *b)
        v->keys.ops = b->keys.ops;
 
        bio = bch_bbio_alloc(b->c);
-       bio->bi_bdev            = PTR_CACHE(b->c, &b->key, 0)->bdev;
+       bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
        bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
        bio->bi_iter.bi_size    = KEY_SIZE(&v->key) << 9;
        bio->bi_opf             = REQ_OP_READ | REQ_META;
index 6a9b85095e7b5948d1403830c9b3bd734a1e7485..7e871bdc009755e942da9fc08441fafdd1950d72 100644 (file)
@@ -34,7 +34,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
        struct bbio *b = container_of(bio, struct bbio, bio);
 
        bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
-       bio->bi_bdev            = PTR_CACHE(c, &b->key, 0)->bdev;
+       bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
 
        b->submit_time_us = local_clock_us();
        closure_bio_submit(bio, bio->bi_private);
index 0352d05e495c14509fbb0bd22e13771ad872196b..7e1d1c3ba33abe7edde492a6592d7fc112b35ed7 100644 (file)
@@ -53,7 +53,7 @@ reread:               left = ca->sb.bucket_size - offset;
 
                bio_reset(bio);
                bio->bi_iter.bi_sector  = bucket + offset;
-               bio->bi_bdev    = ca->bdev;
+               bio_set_dev(bio, ca->bdev);
                bio->bi_iter.bi_size    = len << 9;
 
                bio->bi_end_io  = journal_read_endio;
@@ -452,7 +452,7 @@ static void do_journal_discard(struct cache *ca)
                bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
                bio->bi_iter.bi_sector  = bucket_to_sector(ca->set,
                                                ca->sb.d[ja->discard_idx]);
-               bio->bi_bdev            = ca->bdev;
+               bio_set_dev(bio, ca->bdev);
                bio->bi_iter.bi_size    = bucket_bytes(ca);
                bio->bi_end_io          = journal_discard_endio;
 
@@ -623,7 +623,7 @@ static void journal_write_unlocked(struct closure *cl)
 
                bio_reset(bio);
                bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
-               bio->bi_bdev    = ca->bdev;
+               bio_set_dev(bio, ca->bdev);
                bio->bi_iter.bi_size = sectors << 9;
 
                bio->bi_end_io  = journal_write_endio;
index 72eb97176403be7838b19258d8f36c5f4dc88157..0e1463d0c334d779d6a62b6e592c66c1035f2afd 100644 (file)
@@ -607,7 +607,7 @@ static void request_endio(struct bio *bio)
 static void bio_complete(struct search *s)
 {
        if (s->orig_bio) {
-               struct request_queue *q = bdev_get_queue(s->orig_bio->bi_bdev);
+               struct request_queue *q = s->orig_bio->bi_disk->queue;
                generic_end_io_acct(q, bio_data_dir(s->orig_bio),
                                    &s->d->disk->part0, s->start_time);
 
@@ -735,7 +735,7 @@ static void cached_dev_read_done(struct closure *cl)
        if (s->iop.bio) {
                bio_reset(s->iop.bio);
                s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
-               s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
+               bio_copy_dev(s->iop.bio, s->cache_miss);
                s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
                bch_bio_map(s->iop.bio, NULL);
 
@@ -794,7 +794,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
            !(bio->bi_opf & REQ_META) &&
            s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
                reada = min_t(sector_t, dc->readahead >> 9,
-                             bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
+                             get_capacity(bio->bi_disk) - bio_end_sector(bio));
 
        s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 
@@ -820,7 +820,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
                goto out_submit;
 
        cache_bio->bi_iter.bi_sector    = miss->bi_iter.bi_sector;
-       cache_bio->bi_bdev              = miss->bi_bdev;
+       bio_copy_dev(cache_bio, miss);
        cache_bio->bi_iter.bi_size      = s->insert_bio_sectors << 9;
 
        cache_bio->bi_end_io    = request_endio;
@@ -919,7 +919,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                        struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
                                                             dc->disk.bio_split);
 
-                       flush->bi_bdev  = bio->bi_bdev;
+                       bio_copy_dev(flush, bio);
                        flush->bi_end_io = request_endio;
                        flush->bi_private = cl;
                        flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
@@ -956,13 +956,13 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
                                        struct bio *bio)
 {
        struct search *s;
-       struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+       struct bcache_device *d = bio->bi_disk->private_data;
        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
        int rw = bio_data_dir(bio);
 
        generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
 
-       bio->bi_bdev = dc->bdev;
+       bio_set_dev(bio, dc->bdev);
        bio->bi_iter.bi_sector += dc->sb.data_offset;
 
        if (cached_dev_get(dc)) {
@@ -1072,7 +1072,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
 {
        struct search *s;
        struct closure *cl;
-       struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+       struct bcache_device *d = bio->bi_disk->private_data;
        int rw = bio_data_dir(bio);
 
        generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
index 8352fad765f61991c9725007ecbedff323ddacdb..974d832e54a62635c08b21c98a72fe25963b09a6 100644 (file)
@@ -257,7 +257,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
        closure_init(cl, parent);
 
        bio_reset(bio);
-       bio->bi_bdev    = dc->bdev;
+       bio_set_dev(bio, dc->bdev);
        bio->bi_end_io  = write_bdev_super_endio;
        bio->bi_private = dc;
 
@@ -303,7 +303,7 @@ void bcache_write_super(struct cache_set *c)
                SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
 
                bio_reset(bio);
-               bio->bi_bdev    = ca->bdev;
+               bio_set_dev(bio, ca->bdev);
                bio->bi_end_io  = write_super_endio;
                bio->bi_private = ca;
 
@@ -508,7 +508,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
        closure_init_stack(cl);
 
        bio->bi_iter.bi_sector  = bucket * ca->sb.bucket_size;
-       bio->bi_bdev            = ca->bdev;
+       bio_set_dev(bio, ca->bdev);
        bio->bi_iter.bi_size    = bucket_bytes(ca);
 
        bio->bi_end_io  = prio_endio;
index 42c66e76f05e519ba05dc910695011071f929d0b..c49022a8dc9da227afb02994a7dd231479078656 100644 (file)
@@ -181,7 +181,7 @@ static void write_dirty(struct closure *cl)
        dirty_init(w);
        bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
        io->bio.bi_iter.bi_sector = KEY_START(&w->key);
-       io->bio.bi_bdev         = io->dc->bdev;
+       bio_set_dev(&io->bio, io->dc->bdev);
        io->bio.bi_end_io       = dirty_endio;
 
        closure_bio_submit(&io->bio, cl);
@@ -250,8 +250,7 @@ static void read_dirty(struct cached_dev *dc)
                dirty_init(w);
                bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
                io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
-               io->bio.bi_bdev         = PTR_CACHE(dc->disk.c,
-                                                   &w->key, 0)->bdev;
+               bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
                io->bio.bi_end_io       = read_dirty_endio;
 
                if (bio_alloc_pages(&io->bio, GFP_KERNEL))
index dd3646111561512f50728aa915b8a279be1c26ac..c82578af56a5bbff035a272f258327a7fce6ecfd 100644 (file)
  */
 
 struct dm_bio_details {
-       struct block_device *bi_bdev;
+       struct gendisk *bi_disk;
+       u8 bi_partno;
        unsigned long bi_flags;
        struct bvec_iter bi_iter;
 };
 
 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
 {
-       bd->bi_bdev = bio->bi_bdev;
+       bd->bi_disk = bio->bi_disk;
+       bd->bi_partno = bio->bi_partno;
        bd->bi_flags = bio->bi_flags;
        bd->bi_iter = bio->bi_iter;
 }
 
 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
 {
-       bio->bi_bdev = bd->bi_bdev;
+       bio->bi_disk = bd->bi_disk;
+       bio->bi_partno = bd->bi_partno;
        bio->bi_flags = bd->bi_flags;
        bio->bi_iter = bd->bi_iter;
 }
index 44f4a8ac95bd5a3a0f7742291c4827a27e27b3b5..9601225e0ae9add198ed874598fb91121e6c0ede 100644 (file)
@@ -616,7 +616,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
 
        bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
        b->bio.bi_iter.bi_sector = sector;
-       b->bio.bi_bdev = b->c->bdev;
+       bio_set_dev(&b->bio, b->c->bdev);
        b->bio.bi_end_io = inline_endio;
        /*
         * Use of .bi_private isn't a problem here because
index c5ea03fc7ee1537914f222753b5018bf34e4a169..dcac25c2be7a25ef6ba0d67f24d69362e1abe79b 100644 (file)
@@ -833,7 +833,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
  *--------------------------------------------------------------*/
 static void remap_to_origin(struct cache *cache, struct bio *bio)
 {
-       bio->bi_bdev = cache->origin_dev->bdev;
+       bio_set_dev(bio, cache->origin_dev->bdev);
 }
 
 static void remap_to_cache(struct cache *cache, struct bio *bio,
@@ -842,7 +842,7 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
        sector_t bi_sector = bio->bi_iter.bi_sector;
        sector_t block = from_cblock(cblock);
 
-       bio->bi_bdev = cache->cache_dev->bdev;
+       bio_set_dev(bio, cache->cache_dev->bdev);
        if (!block_size_is_power_of_two(cache))
                bio->bi_iter.bi_sector =
                        (block * cache->sectors_per_block) +
index 73c2e270cda61113c9e43600368ddfc99b4fd639..ca99147208a99e5be5415ed697c5681ce7c5d3a4 100644 (file)
@@ -1544,7 +1544,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
 
        clone->bi_private = io;
        clone->bi_end_io  = crypt_endio;
-       clone->bi_bdev    = cc->dev->bdev;
+       bio_set_dev(clone, cc->dev->bdev);
        clone->bi_opf     = io->base_bio->bi_opf;
 }
 
@@ -2793,7 +2793,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
         */
        if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
            bio_op(bio) == REQ_OP_DISCARD)) {
-               bio->bi_bdev = cc->dev->bdev;
+               bio_set_dev(bio, cc->dev->bdev);
                if (bio_sectors(bio))
                        bio->bi_iter.bi_sector = cc->start +
                                dm_target_offset(ti, bio->bi_iter.bi_sector);
index ae3158795d26bd0695d254d6d1a0cf25bc7278fd..2209a9700acd655b93038d3ddc06d4c204f1a441 100644 (file)
@@ -282,7 +282,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
        struct delay_c *dc = ti->private;
 
        if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
-               bio->bi_bdev = dc->dev_write->bdev;
+               bio_set_dev(bio, dc->dev_write->bdev);
                if (bio_sectors(bio))
                        bio->bi_iter.bi_sector = dc->start_write +
                                dm_target_offset(ti, bio->bi_iter.bi_sector);
@@ -290,7 +290,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
                return delay_bio(dc, dc->write_delay, bio);
        }
 
-       bio->bi_bdev = dc->dev_read->bdev;
+       bio_set_dev(bio, dc->dev_read->bdev);
        bio->bi_iter.bi_sector = dc->start_read +
                dm_target_offset(ti, bio->bi_iter.bi_sector);
 
index e7ba89f98d8df8e8fbe5589505a33e3debc3cef2..ba84b8d62cd00f00668006449c4d8f2b2c39b019 100644 (file)
@@ -1192,7 +1192,7 @@ static dm_block_t get_block(struct era *era, struct bio *bio)
 
 static void remap_to_origin(struct era *era, struct bio *bio)
 {
-       bio->bi_bdev = era->origin_dev->bdev;
+       bio_set_dev(bio, era->origin_dev->bdev);
 }
 
 /*----------------------------------------------------------------
index e2c7234931bc1fda713f8d12db1918f7efcc5b5c..7146c2d9762dfdc14f9815b651d59b992c0583e0 100644 (file)
@@ -274,7 +274,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
 {
        struct flakey_c *fc = ti->private;
 
-       bio->bi_bdev = fc->dev->bdev;
+       bio_set_dev(bio, fc->dev->bdev);
        if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
                bio->bi_iter.bi_sector =
                        flakey_map_sector(ti, bio->bi_iter.bi_sector);
index 3acce09bba35c54b1afe4e8af962766bfd90eb73..27c0f223f8ea8f6164293283f2da8ad50d2d034d 100644 (file)
@@ -250,7 +250,8 @@ struct dm_integrity_io {
 
        struct completion *completion;
 
-       struct block_device *orig_bi_bdev;
+       struct gendisk *orig_bi_disk;
+       u8 orig_bi_partno;
        bio_end_io_t *orig_bi_end_io;
        struct bio_integrity_payload *orig_bi_integrity;
        struct bvec_iter orig_bi_iter;
@@ -1164,7 +1165,8 @@ static void integrity_end_io(struct bio *bio)
        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
 
        bio->bi_iter = dio->orig_bi_iter;
-       bio->bi_bdev = dio->orig_bi_bdev;
+       bio->bi_disk = dio->orig_bi_disk;
+       bio->bi_partno = dio->orig_bi_partno;
        if (dio->orig_bi_integrity) {
                bio->bi_integrity = dio->orig_bi_integrity;
                bio->bi_opf |= REQ_INTEGRITY;
@@ -1681,8 +1683,9 @@ sleep:
 
        dio->orig_bi_iter = bio->bi_iter;
 
-       dio->orig_bi_bdev = bio->bi_bdev;
-       bio->bi_bdev = ic->dev->bdev;
+       dio->orig_bi_disk = bio->bi_disk;
+       dio->orig_bi_partno = bio->bi_partno;
+       bio_set_dev(bio, ic->dev->bdev);
 
        dio->orig_bi_integrity = bio_integrity(bio);
        bio->bi_integrity = NULL;
index 25039607f3cb629cdd9a5d432075ebc609455fb9..b4357ed4d541621fa69eec798fdb3a7d85ae68a4 100644 (file)
@@ -347,7 +347,7 @@ static void do_region(int op, int op_flags, unsigned region,
 
                bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
                bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
-               bio->bi_bdev = where->bdev;
+               bio_set_dev(bio, where->bdev);
                bio->bi_end_io = endio;
                bio_set_op_attrs(bio, op, op_flags);
                store_io_and_region_in_bio(bio, io, region);
index 41971a090e34d02ea26a348af0c66fc9615b44a2..405eca206d67c3e8b877f61140bd89a5b7f33320 100644 (file)
@@ -88,7 +88,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
 {
        struct linear_c *lc = ti->private;
 
-       bio->bi_bdev = lc->dev->bdev;
+       bio_set_dev(bio, lc->dev->bdev);
        if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
                bio->bi_iter.bi_sector =
                        linear_map_sector(ti, bio->bi_iter.bi_sector);
index a1da0eb58a93e51355c680bc3a056211042f3f54..534a254eb977381cd6589921e33953f29cfa91fb 100644 (file)
@@ -198,7 +198,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
        }
        bio->bi_iter.bi_size = 0;
        bio->bi_iter.bi_sector = sector;
-       bio->bi_bdev = lc->logdev->bdev;
+       bio_set_dev(bio, lc->logdev->bdev);
        bio->bi_end_io = log_end_io;
        bio->bi_private = lc;
        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -263,7 +263,7 @@ static int log_one_block(struct log_writes_c *lc,
        }
        bio->bi_iter.bi_size = 0;
        bio->bi_iter.bi_sector = sector;
-       bio->bi_bdev = lc->logdev->bdev;
+       bio_set_dev(bio, lc->logdev->bdev);
        bio->bi_end_io = log_end_io;
        bio->bi_private = lc;
        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -285,7 +285,7 @@ static int log_one_block(struct log_writes_c *lc,
                        }
                        bio->bi_iter.bi_size = 0;
                        bio->bi_iter.bi_sector = sector;
-                       bio->bi_bdev = lc->logdev->bdev;
+                       bio_set_dev(bio, lc->logdev->bdev);
                        bio->bi_end_io = log_end_io;
                        bio->bi_private = lc;
                        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -539,7 +539,7 @@ static void normal_map_bio(struct dm_target *ti, struct bio *bio)
 {
        struct log_writes_c *lc = ti->private;
 
-       bio->bi_bdev = lc->dev->bdev;
+       bio_set_dev(bio, lc->dev->bdev);
 }
 
 static int log_writes_map(struct dm_target *ti, struct bio *bio)
index 0e8ab5bb3575fccf24a5734d1f5fe8149210b6aa..573046bd5c460c62c08fade18a742ab719a35078 100644 (file)
@@ -566,7 +566,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
        mpio->nr_bytes = nr_bytes;
 
        bio->bi_status = 0;
-       bio->bi_bdev = pgpath->path.dev->bdev;
+       bio_set_dev(bio, pgpath->path.dev->bdev);
        bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 
        if (pgpath->pg->ps.type->start_io)
index a4fbd911d566e5d3bfc6637f0ebdfc0b15472b73..c0b82136b2d1fb91803f991bf564353ee7471d9a 100644 (file)
@@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
 
 struct dm_raid1_bio_record {
        struct mirror *m;
-       /* if details->bi_bdev == NULL, details were not saved */
+       /* if details->bi_disk == NULL, details were not saved */
        struct dm_bio_details details;
        region_t write_region;
 };
@@ -464,7 +464,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio)
 
 static void map_bio(struct mirror *m, struct bio *bio)
 {
-       bio->bi_bdev = m->dev->bdev;
+       bio_set_dev(bio, m->dev->bdev);
        bio->bi_iter.bi_sector = map_sector(m, bio);
 }
 
@@ -1199,7 +1199,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
        struct dm_raid1_bio_record *bio_record =
          dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
 
-       bio_record->details.bi_bdev = NULL;
+       bio_record->details.bi_disk = NULL;
 
        if (rw == WRITE) {
                /* Save region for mirror_end_io() handler */
@@ -1266,7 +1266,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
                goto out;
 
        if (unlikely(*error)) {
-               if (!bio_record->details.bi_bdev) {
+               if (!bio_record->details.bi_disk) {
                        /*
                         * There wasn't enough memory to record necessary
                         * information for a retry or there was no other
@@ -1291,7 +1291,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
                        bd = &bio_record->details;
 
                        dm_bio_restore(bd, bio);
-                       bio_record->details.bi_bdev = NULL;
+                       bio_record->details.bi_disk = NULL;
                        bio->bi_status = 0;
 
                        queue_bio(ms, bio, rw);
@@ -1301,7 +1301,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
        }
 
 out:
-       bio_record->details.bi_bdev = NULL;
+       bio_record->details.bi_disk = NULL;
 
        return DM_ENDIO_DONE;
 }
index 1ba41048b438b2fb3c470380387f6c16fea9bc55..1113b42e1edae4029f550b71c635ea80c76a46b9 100644 (file)
@@ -1663,7 +1663,7 @@ __find_pending_exception(struct dm_snapshot *s,
 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
                            struct bio *bio, chunk_t chunk)
 {
-       bio->bi_bdev = s->cow->bdev;
+       bio_set_dev(bio, s->cow->bdev);
        bio->bi_iter.bi_sector =
                chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
                                (chunk - e->old_chunk)) +
@@ -1681,7 +1681,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
        init_tracked_chunk(bio);
 
        if (bio->bi_opf & REQ_PREFLUSH) {
-               bio->bi_bdev = s->cow->bdev;
+               bio_set_dev(bio, s->cow->bdev);
                return DM_MAPIO_REMAPPED;
        }
 
@@ -1769,7 +1769,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
                        goto out;
                }
        } else {
-               bio->bi_bdev = s->origin->bdev;
+               bio_set_dev(bio, s->origin->bdev);
                track_chunk(s, bio, chunk);
        }
 
@@ -1802,9 +1802,9 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
 
        if (bio->bi_opf & REQ_PREFLUSH) {
                if (!dm_bio_get_target_bio_nr(bio))
-                       bio->bi_bdev = s->origin->bdev;
+                       bio_set_dev(bio, s->origin->bdev);
                else
-                       bio->bi_bdev = s->cow->bdev;
+                       bio_set_dev(bio, s->cow->bdev);
                return DM_MAPIO_REMAPPED;
        }
 
@@ -1824,7 +1824,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
                    chunk >= s->first_merging_chunk &&
                    chunk < (s->first_merging_chunk +
                             s->num_merging_chunks)) {
-                       bio->bi_bdev = s->origin->bdev;
+                       bio_set_dev(bio, s->origin->bdev);
                        bio_list_add(&s->bios_queued_during_merge, bio);
                        r = DM_MAPIO_SUBMITTED;
                        goto out_unlock;
@@ -1838,7 +1838,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
        }
 
 redirect_to_origin:
-       bio->bi_bdev = s->origin->bdev;
+       bio_set_dev(bio, s->origin->bdev);
 
        if (bio_data_dir(bio) == WRITE) {
                up_write(&s->lock);
@@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
        struct dm_origin *o = ti->private;
        unsigned available_sectors;
 
-       bio->bi_bdev = o->dev->bdev;
+       bio_set_dev(bio, o->dev->bdev);
 
        if (unlikely(bio->bi_opf & REQ_PREFLUSH))
                return DM_MAPIO_REMAPPED;
index a0375530b07f6afc7713f4ac2904e84490027d67..ab50d7c4377f8fd95fe86f875e23c70d22919fb6 100644 (file)
@@ -270,7 +270,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
        stripe_map_range_sector(sc, bio_end_sector(bio),
                                target_stripe, &end);
        if (begin < end) {
-               bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
+               bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev);
                bio->bi_iter.bi_sector = begin +
                        sc->stripe[target_stripe].physical_start;
                bio->bi_iter.bi_size = to_bytes(end - begin);
@@ -291,7 +291,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
        if (bio->bi_opf & REQ_PREFLUSH) {
                target_bio_nr = dm_bio_get_target_bio_nr(bio);
                BUG_ON(target_bio_nr >= sc->stripes);
-               bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
+               bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev);
                return DM_MAPIO_REMAPPED;
        }
        if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
@@ -306,7 +306,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
                          &stripe, &bio->bi_iter.bi_sector);
 
        bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
-       bio->bi_bdev = sc->stripe[stripe].dev->bdev;
+       bio_set_dev(bio, sc->stripe[stripe].dev->bdev);
 
        return DM_MAPIO_REMAPPED;
 }
@@ -430,9 +430,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
                return DM_ENDIO_DONE;
 
        memset(major_minor, 0, sizeof(major_minor));
-       sprintf(major_minor, "%d:%d",
-               MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
-               MINOR(disk_devt(bio->bi_bdev->bd_disk)));
+       sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
 
        /*
         * Test to see which stripe drive triggered the event
index 871c18fe000dfae2ef016ad64a5d8c4e0fc04e66..2dcea4c56f37f7cf0c0fd9c5b8eea8f68b229707 100644 (file)
@@ -322,7 +322,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio)
        sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
        unsigned path_nr = switch_get_path_nr(sctx, offset);
 
-       bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
+       bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
        bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
 
        return DM_MAPIO_REMAPPED;
index 9dec2f8cc739393e9f2e97124bd0e85125eb1406..69d88aee30554d017a6f7164d343bb17ee13f716 100644 (file)
@@ -679,7 +679,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
        struct pool *pool = tc->pool;
        sector_t bi_sector = bio->bi_iter.bi_sector;
 
-       bio->bi_bdev = tc->pool_dev->bdev;
+       bio_set_dev(bio, tc->pool_dev->bdev);
        if (block_size_is_power_of_two(pool))
                bio->bi_iter.bi_sector =
                        (block << pool->sectors_per_block_shift) |
@@ -691,7 +691,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 
 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
 {
-       bio->bi_bdev = tc->origin_dev->bdev;
+       bio_set_dev(bio, tc->origin_dev->bdev);
 }
 
 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
@@ -3313,7 +3313,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
         * As this is a singleton target, ti->begin is always zero.
         */
        spin_lock_irqsave(&pool->lock, flags);
-       bio->bi_bdev = pt->data_dev->bdev;
+       bio_set_dev(bio, pt->data_dev->bdev);
        r = DM_MAPIO_REMAPPED;
        spin_unlock_irqrestore(&pool->lock, flags);
 
index b46705ebf01f6d55cfeb0cff8327d767d4fc7572..1c5b6185c79d049e6621a495ba27e76bbafc5916 100644 (file)
@@ -637,7 +637,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        struct dm_verity *v = ti->private;
        struct dm_verity_io *io;
 
-       bio->bi_bdev = v->data_dev->bdev;
+       bio_set_dev(bio, v->data_dev->bdev);
        bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
 
        if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
index a4fa2ada688365da1f8f3f896bc3475eccc2c9d6..70485de37b669abc7503f759109d4dde6856cfe0 100644 (file)
@@ -409,7 +409,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
        }
 
        bio->bi_iter.bi_sector = dmz_blk2sect(block);
-       bio->bi_bdev = zmd->dev->bdev;
+       bio_set_dev(bio, zmd->dev->bdev);
        bio->bi_private = mblk;
        bio->bi_end_io = dmz_mblock_bio_end_io;
        bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
@@ -564,7 +564,7 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
        set_bit(DMZ_META_WRITING, &mblk->state);
 
        bio->bi_iter.bi_sector = dmz_blk2sect(block);
-       bio->bi_bdev = zmd->dev->bdev;
+       bio_set_dev(bio, zmd->dev->bdev);
        bio->bi_private = mblk;
        bio->bi_end_io = dmz_mblock_bio_end_io;
        bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
@@ -586,7 +586,7 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
                return -ENOMEM;
 
        bio->bi_iter.bi_sector = dmz_blk2sect(block);
-       bio->bi_bdev = zmd->dev->bdev;
+       bio_set_dev(bio, zmd->dev->bdev);
        bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
        bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
        ret = submit_bio_wait(bio);
index b08bbbd4d9027d8c4a7d07fd5e4786b25462f740..b87c1741da4b88c7b21ae7e4a8deaacf667a7d45 100644 (file)
@@ -238,7 +238,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
        struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
 
        /* Setup and submit the BIO */
-       bio->bi_bdev = dmz->dev->bdev;
+       bio_set_dev(bio, dmz->dev->bdev);
        bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
        atomic_inc(&bioctx->ref);
        generic_make_request(bio);
@@ -586,7 +586,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
                      (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
                      (unsigned int)dmz_bio_blocks(bio));
 
-       bio->bi_bdev = dev->bdev;
+       bio_set_dev(bio, dev->bdev);
 
        if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
                return DM_MAPIO_REMAPPED;
index 8612a2d1ccd9c6e5fbe9652a57c403823d597f99..b28b9ce8f4ffc45c801aa97a679b48866c43fed7 100644 (file)
@@ -851,10 +851,10 @@ static void clone_endio(struct bio *bio)
 
        if (unlikely(error == BLK_STS_TARGET)) {
                if (bio_op(bio) == REQ_OP_WRITE_SAME &&
-                   !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
+                   !bio->bi_disk->queue->limits.max_write_same_sectors)
                        disable_write_same(md);
                if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-                   !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
+                   !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
                        disable_write_zeroes(md);
        }
 
@@ -1215,8 +1215,8 @@ static void __map_bio(struct dm_target_io *tio)
                break;
        case DM_MAPIO_REMAPPED:
                /* the bio has been remapped so dispatch it */
-               trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
-                                     tio->io->bio->bi_bdev->bd_dev, sector);
+               trace_block_bio_remap(clone->bi_disk->queue, clone,
+                                     bio_dev(tio->io->bio), sector);
                generic_make_request(clone);
                break;
        case DM_MAPIO_KILL:
@@ -1796,7 +1796,7 @@ static struct mapped_device *alloc_dev(int minor)
                goto bad;
 
        bio_init(&md->flush_bio, NULL, 0);
-       md->flush_bio.bi_bdev = md->bdev;
+       bio_set_dev(&md->flush_bio, md->bdev);
        md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
 
        dm_stats_init(&md->stats);
index 06a64d5d8c6c4831be50daf4e93bff7a55ce2dd4..38264b38420fd9e227f5df9c611977b4e49f487b 100644 (file)
@@ -216,12 +216,12 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
        if (failit) {
                struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
 
-               b->bi_bdev = conf->rdev->bdev;
+               bio_set_dev(b, conf->rdev->bdev);
                b->bi_private = bio;
                b->bi_end_io = faulty_fail;
                bio = b;
        } else
-               bio->bi_bdev = conf->rdev->bdev;
+               bio_set_dev(bio, conf->rdev->bdev);
 
        generic_make_request(bio);
        return true;
index 5f1eb91895429fa1efea50f57356b5cff575ba68..c464fb48039acf6a4039a535b91d0591c2d16cc2 100644 (file)
@@ -275,17 +275,17 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
                bio = split;
        }
 
-       bio->bi_bdev = tmp_dev->rdev->bdev;
+       bio_set_dev(bio, tmp_dev->rdev->bdev);
        bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
                start_sector + data_offset;
 
        if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
-                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
+                    !blk_queue_discard(bio->bi_disk->queue))) {
                /* Just ignore it */
                bio_endio(bio);
        } else {
                if (mddev->gendisk)
-                       trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+                       trace_block_bio_remap(bio->bi_disk->queue,
                                              bio, disk_devt(mddev->gendisk),
                                              bio_sector);
                mddev_check_writesame(mddev, bio);
index c99634612fc408fbc97df9c7c8ef8e028efe2fad..0afdc1bfd7cba9ccd6d4f8f5d3219a5271890211 100644 (file)
@@ -422,7 +422,7 @@ static void submit_flushes(struct work_struct *ws)
                        bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
                        bi->bi_end_io = md_end_flush;
                        bi->bi_private = rdev;
-                       bi->bi_bdev = rdev->bdev;
+                       bio_set_dev(bi, rdev->bdev);
                        bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
                        atomic_inc(&mddev->flush_pending);
                        submit_bio(bi);
@@ -772,7 +772,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
 
        atomic_inc(&rdev->nr_pending);
 
-       bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
+       bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
        bio->bi_iter.bi_sector = sector;
        bio_add_page(bio, page, size, 0);
        bio->bi_private = rdev;
@@ -803,8 +803,10 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
        struct bio *bio = md_bio_alloc_sync(rdev->mddev);
        int ret;
 
-       bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
-               rdev->meta_bdev : rdev->bdev;
+       if (metadata_op && rdev->meta_bdev)
+               bio_set_dev(bio, rdev->meta_bdev);
+       else
+               bio_set_dev(bio, rdev->bdev);
        bio_set_op_attrs(bio, op, op_flags);
        if (metadata_op)
                bio->bi_iter.bi_sector = sector + rdev->sb_start;
index 09db034558017e3d327960e8333d5defa35acdcb..c0d436fb88f0f571eee8b2e8427e03b3cdd51919 100644 (file)
@@ -509,6 +509,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
        atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
 }
 
+static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
+{
+       atomic_add(nr_sectors, &bio->bi_disk->sync_io);
+}
+
 struct md_personality
 {
        char *name;
@@ -721,14 +726,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
 static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
 {
        if (bio_op(bio) == REQ_OP_WRITE_SAME &&
-           !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
+           !bio->bi_disk->queue->limits.max_write_same_sectors)
                mddev->queue->limits.max_write_same_sectors = 0;
 }
 
 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
 {
        if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-           !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
+           !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
                mddev->queue->limits.max_write_zeroes_sectors = 0;
 }
 #endif /* _MD_MD_H */
index 23a162ba6c56399b2fa9cecae58c72d48939b1a6..b68e0666b9b0bb6addccec7ba08d1e014e198dc2 100644 (file)
@@ -134,7 +134,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
        __bio_clone_fast(&mp_bh->bio, bio);
 
        mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
-       mp_bh->bio.bi_bdev = multipath->rdev->bdev;
+       bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
        mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
        mp_bh->bio.bi_end_io = multipath_end_request;
        mp_bh->bio.bi_private = mp_bh;
@@ -345,17 +345,17 @@ static void multipathd(struct md_thread *thread)
 
                if ((mp_bh->path = multipath_map (conf))<0) {
                        pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
-                              bdevname(bio->bi_bdev,b),
+                              bio_devname(bio, b),
                               (unsigned long long)bio->bi_iter.bi_sector);
                        multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
                } else {
                        pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
-                              bdevname(bio->bi_bdev,b),
+                              bio_devname(bio, b),
                               (unsigned long long)bio->bi_iter.bi_sector);
                        *bio = *(mp_bh->master_bio);
                        bio->bi_iter.bi_sector +=
                                conf->multipaths[mp_bh->path].rdev->data_offset;
-                       bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
+                       bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev);
                        bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
                        bio->bi_end_io = multipath_end_request;
                        bio->bi_private = mp_bh;
index 94d9ae9b0fd09030daf663c5817d0b2566ea271d..05a4521b832f8d6040081bc40abbc6a46707ef6d 100644 (file)
@@ -588,14 +588,13 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
 
        zone = find_zone(mddev->private, &sector);
        tmp_dev = map_sector(mddev, zone, sector, &sector);
-       bio->bi_bdev = tmp_dev->bdev;
+       bio_set_dev(bio, tmp_dev->bdev);
        bio->bi_iter.bi_sector = sector + zone->dev_start +
                tmp_dev->data_offset;
 
        if (mddev->gendisk)
-               trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
-                                     bio, disk_devt(mddev->gendisk),
-                                     bio_sector);
+               trace_block_bio_remap(bio->bi_disk->queue, bio,
+                               disk_devt(mddev->gendisk), bio_sector);
        mddev_check_writesame(mddev, bio);
        mddev_check_write_zeroes(mddev, bio);
        generic_make_request(bio);
index f50958ded9f0c4dd9982c440b20d4e8854697b0d..baf5e358d22a9bc60e10896b69160d06ede5e16f 100644 (file)
@@ -786,13 +786,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
 
        while (bio) { /* submit pending writes */
                struct bio *next = bio->bi_next;
-               struct md_rdev *rdev = (void*)bio->bi_bdev;
+               struct md_rdev *rdev = (void *)bio->bi_disk;
                bio->bi_next = NULL;
-               bio->bi_bdev = rdev->bdev;
+               bio_set_dev(bio, rdev->bdev);
                if (test_bit(Faulty, &rdev->flags)) {
                        bio_io_error(bio);
                } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
-                                   !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+                                   !blk_queue_discard(bio->bi_disk->queue)))
                        /* Just ignore it */
                        bio_endio(bio);
                else
@@ -1273,7 +1273,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
 
        read_bio->bi_iter.bi_sector = r1_bio->sector +
                mirror->rdev->data_offset;
-       read_bio->bi_bdev = mirror->rdev->bdev;
+       bio_set_dev(read_bio, mirror->rdev->bdev);
        read_bio->bi_end_io = raid1_end_read_request;
        bio_set_op_attrs(read_bio, op, do_sync);
        if (test_bit(FailFast, &mirror->rdev->flags) &&
@@ -1282,9 +1282,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
        read_bio->bi_private = r1_bio;
 
        if (mddev->gendisk)
-               trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
-                                     read_bio, disk_devt(mddev->gendisk),
-                                     r1_bio->sector);
+               trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
+                               disk_devt(mddev->gendisk), r1_bio->sector);
 
        generic_make_request(read_bio);
 }
@@ -1496,7 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
                mbio->bi_iter.bi_sector = (r1_bio->sector +
                                   conf->mirrors[i].rdev->data_offset);
-               mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+               bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
                mbio->bi_end_io = raid1_end_write_request;
                mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
                if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
@@ -1508,11 +1507,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                atomic_inc(&r1_bio->remaining);
 
                if (mddev->gendisk)
-                       trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+                       trace_block_bio_remap(mbio->bi_disk->queue,
                                              mbio, disk_devt(mddev->gendisk),
                                              r1_bio->sector);
                /* flush_pending_writes() needs access to the rdev so...*/
-               mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
+               mbio->bi_disk = (void *)conf->mirrors[i].rdev;
 
                cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
                if (cb)
@@ -1990,8 +1989,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
                         * Don't fail devices as that won't really help.
                         */
                        pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
-                                           mdname(mddev),
-                                           bdevname(bio->bi_bdev, b),
+                                           mdname(mddev), bio_devname(bio, b),
                                            (unsigned long long)r1_bio->sector);
                        for (d = 0; d < conf->raid_disks * 2; d++) {
                                rdev = conf->mirrors[d].rdev;
@@ -2082,7 +2080,7 @@ static void process_checks(struct r1bio *r1_bio)
                b->bi_status = status;
                b->bi_iter.bi_sector = r1_bio->sector +
                        conf->mirrors[i].rdev->data_offset;
-               b->bi_bdev = conf->mirrors[i].rdev->bdev;
+               bio_set_dev(b, conf->mirrors[i].rdev->bdev);
                b->bi_end_io = end_sync_read;
                rp->raid_bio = r1_bio;
                b->bi_private = rp;
@@ -2350,7 +2348,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
 
                bio_trim(wbio, sector - r1_bio->sector, sectors);
                wbio->bi_iter.bi_sector += rdev->data_offset;
-               wbio->bi_bdev = rdev->bdev;
+               bio_set_dev(wbio, rdev->bdev);
 
                if (submit_bio_wait(wbio) < 0)
                        /* failure! */
@@ -2440,7 +2438,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
        struct mddev *mddev = conf->mddev;
        struct bio *bio;
        struct md_rdev *rdev;
-       dev_t bio_dev;
        sector_t bio_sector;
 
        clear_bit(R1BIO_ReadError, &r1_bio->state);
@@ -2454,7 +2451,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
         */
 
        bio = r1_bio->bios[r1_bio->read_disk];
-       bio_dev = bio->bi_bdev->bd_dev;
        bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
        bio_put(bio);
        r1_bio->bios[r1_bio->read_disk] = NULL;
@@ -2727,7 +2723,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                if (bio->bi_end_io) {
                        atomic_inc(&rdev->nr_pending);
                        bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
-                       bio->bi_bdev = rdev->bdev;
+                       bio_set_dev(bio, rdev->bdev);
                        if (test_bit(FailFast, &rdev->flags))
                                bio->bi_opf |= MD_FAILFAST;
                }
@@ -2853,7 +2849,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                        bio = r1_bio->bios[i];
                        if (bio->bi_end_io == end_sync_read) {
                                read_targets--;
-                               md_sync_acct(bio->bi_bdev, nr_sectors);
+                               md_sync_acct_bio(bio, nr_sectors);
                                if (read_targets == 1)
                                        bio->bi_opf &= ~MD_FAILFAST;
                                generic_make_request(bio);
@@ -2862,7 +2858,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
        } else {
                atomic_set(&r1_bio->remaining, 1);
                bio = r1_bio->bios[r1_bio->read_disk];
-               md_sync_acct(bio->bi_bdev, nr_sectors);
+               md_sync_acct_bio(bio, nr_sectors);
                if (read_targets == 1)
                        bio->bi_opf &= ~MD_FAILFAST;
                generic_make_request(bio);
index f55d4cc085f60daa6b25c9f398e462420dbc6c18..d1f948e371e08efb0d909022cd1d238cb578850d 100644 (file)
@@ -901,13 +901,13 @@ static void flush_pending_writes(struct r10conf *conf)
 
                while (bio) { /* submit pending writes */
                        struct bio *next = bio->bi_next;
-                       struct md_rdev *rdev = (void*)bio->bi_bdev;
+                       struct md_rdev *rdev = (void*)bio->bi_disk;
                        bio->bi_next = NULL;
-                       bio->bi_bdev = rdev->bdev;
+                       bio_set_dev(bio, rdev->bdev);
                        if (test_bit(Faulty, &rdev->flags)) {
                                bio_io_error(bio);
                        } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
-                                           !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+                                           !blk_queue_discard(bio->bi_disk->queue)))
                                /* Just ignore it */
                                bio_endio(bio);
                        else
@@ -1085,13 +1085,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
 
        while (bio) { /* submit pending writes */
                struct bio *next = bio->bi_next;
-               struct md_rdev *rdev = (void*)bio->bi_bdev;
+               struct md_rdev *rdev = (void*)bio->bi_disk;
                bio->bi_next = NULL;
-               bio->bi_bdev = rdev->bdev;
+               bio_set_dev(bio, rdev->bdev);
                if (test_bit(Faulty, &rdev->flags)) {
                        bio_io_error(bio);
                } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
-                                   !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+                                   !blk_queue_discard(bio->bi_disk->queue)))
                        /* Just ignore it */
                        bio_endio(bio);
                else
@@ -1200,7 +1200,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
 
        read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
                choose_data_offset(r10_bio, rdev);
-       read_bio->bi_bdev = rdev->bdev;
+       bio_set_dev(read_bio, rdev->bdev);
        read_bio->bi_end_io = raid10_end_read_request;
        bio_set_op_attrs(read_bio, op, do_sync);
        if (test_bit(FailFast, &rdev->flags) &&
@@ -1209,7 +1209,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
        read_bio->bi_private = r10_bio;
 
        if (mddev->gendisk)
-               trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+               trace_block_bio_remap(read_bio->bi_disk->queue,
                                      read_bio, disk_devt(mddev->gendisk),
                                      r10_bio->sector);
        generic_make_request(read_bio);
@@ -1249,7 +1249,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
 
        mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
                                   choose_data_offset(r10_bio, rdev));
-       mbio->bi_bdev = rdev->bdev;
+       bio_set_dev(mbio, rdev->bdev);
        mbio->bi_end_io = raid10_end_write_request;
        bio_set_op_attrs(mbio, op, do_sync | do_fua);
        if (!replacement && test_bit(FailFast,
@@ -1259,11 +1259,11 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
        mbio->bi_private = r10_bio;
 
        if (conf->mddev->gendisk)
-               trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+               trace_block_bio_remap(mbio->bi_disk->queue,
                                      mbio, disk_devt(conf->mddev->gendisk),
                                      r10_bio->sector);
        /* flush_pending_writes() needs access to the rdev so...*/
-       mbio->bi_bdev = (void *)rdev;
+       mbio->bi_disk = (void *)rdev;
 
        atomic_inc(&r10_bio->remaining);
 
@@ -2094,7 +2094,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
                        tbio->bi_opf |= MD_FAILFAST;
                tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
-               tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
+               bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
                generic_make_request(tbio);
        }
 
@@ -2552,7 +2552,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
                wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
                wbio->bi_iter.bi_sector = wsector +
                                   choose_data_offset(r10_bio, rdev);
-               wbio->bi_bdev = rdev->bdev;
+               bio_set_dev(wbio, rdev->bdev);
                bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
 
                if (submit_bio_wait(wbio) < 0)
@@ -2575,7 +2575,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
        struct bio *bio;
        struct r10conf *conf = mddev->private;
        struct md_rdev *rdev = r10_bio->devs[slot].rdev;
-       dev_t bio_dev;
        sector_t bio_last_sector;
 
        /* we got a read error. Maybe the drive is bad.  Maybe just
@@ -2587,7 +2586,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
         * frozen.
         */
        bio = r10_bio->devs[slot].bio;
-       bio_dev = bio->bi_bdev->bd_dev;
        bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
        bio_put(bio);
        r10_bio->devs[slot].bio = NULL;
@@ -2950,7 +2948,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 
        /* Again, very different code for resync and recovery.
         * Both must result in an r10bio with a list of bios that
-        * have bi_end_io, bi_sector, bi_bdev set,
+        * have bi_end_io, bi_sector, bi_disk set,
         * and bi_private set to the r10bio.
         * For recovery, we may actually create several r10bios
         * with 2 bios in each, that correspond to the bios in the main one.
@@ -3095,7 +3093,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                                from_addr = r10_bio->devs[j].addr;
                                bio->bi_iter.bi_sector = from_addr +
                                        rdev->data_offset;
-                               bio->bi_bdev = rdev->bdev;
+                               bio_set_dev(bio, rdev->bdev);
                                atomic_inc(&rdev->nr_pending);
                                /* and we write to 'i' (if not in_sync) */
 
@@ -3117,7 +3115,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                                        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                                        bio->bi_iter.bi_sector = to_addr
                                                + mrdev->data_offset;
-                                       bio->bi_bdev = mrdev->bdev;
+                                       bio_set_dev(bio, mrdev->bdev);
                                        atomic_inc(&r10_bio->remaining);
                                } else
                                        r10_bio->devs[1].bio->bi_end_io = NULL;
@@ -3143,7 +3141,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                                bio->bi_iter.bi_sector = to_addr +
                                        mreplace->data_offset;
-                               bio->bi_bdev = mreplace->bdev;
+                               bio_set_dev(bio, mreplace->bdev);
                                atomic_inc(&r10_bio->remaining);
                                break;
                        }
@@ -3289,7 +3287,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                        if (test_bit(FailFast, &rdev->flags))
                                bio->bi_opf |= MD_FAILFAST;
                        bio->bi_iter.bi_sector = sector + rdev->data_offset;
-                       bio->bi_bdev = rdev->bdev;
+                       bio_set_dev(bio, rdev->bdev);
                        count++;
 
                        rdev = rcu_dereference(conf->mirrors[d].replacement);
@@ -3311,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                        if (test_bit(FailFast, &rdev->flags))
                                bio->bi_opf |= MD_FAILFAST;
                        bio->bi_iter.bi_sector = sector + rdev->data_offset;
-                       bio->bi_bdev = rdev->bdev;
+                       bio_set_dev(bio, rdev->bdev);
                        count++;
                        rcu_read_unlock();
                }
@@ -3367,7 +3365,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                r10_bio->sectors = nr_sectors;
 
                if (bio->bi_end_io == end_sync_read) {
-                       md_sync_acct(bio->bi_bdev, nr_sectors);
+                       md_sync_acct_bio(bio, nr_sectors);
                        bio->bi_status = 0;
                        generic_make_request(bio);
                }
@@ -4383,7 +4381,7 @@ read_more:
 
        read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
 
-       read_bio->bi_bdev = rdev->bdev;
+       bio_set_dev(read_bio, rdev->bdev);
        read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
                               + rdev->data_offset);
        read_bio->bi_private = r10_bio;
@@ -4417,7 +4415,7 @@ read_more:
                if (!rdev2 || test_bit(Faulty, &rdev2->flags))
                        continue;
 
-               b->bi_bdev = rdev2->bdev;
+               bio_set_dev(b, rdev2->bdev);
                b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
                        rdev2->new_data_offset;
                b->bi_end_io = end_reshape_write;
@@ -4449,7 +4447,7 @@ read_more:
        r10_bio->sectors = nr_sectors;
 
        /* Now submit the read */
-       md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
+       md_sync_acct_bio(read_bio, r10_bio->sectors);
        atomic_inc(&r10_bio->remaining);
        read_bio->bi_next = NULL;
        generic_make_request(read_bio);
@@ -4511,7 +4509,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                }
                atomic_inc(&rdev->nr_pending);
                rcu_read_unlock();
-               md_sync_acct(b->bi_bdev, r10_bio->sectors);
+               md_sync_acct_bio(b, r10_bio->sectors);
                atomic_inc(&r10_bio->remaining);
                b->bi_next = NULL;
                generic_make_request(b);
index bfa1e907c472e49855f9e0abb4c307cd45514d4d..f253a9c583c138b44ca58144088e205073ddc70c 100644 (file)
@@ -728,7 +728,7 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log)
        struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
 
        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-       bio->bi_bdev = log->rdev->bdev;
+       bio_set_dev(bio, log->rdev->bdev);
        bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
 
        return bio;
@@ -1291,7 +1291,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
        if (!do_flush)
                return;
        bio_reset(&log->flush_bio);
-       log->flush_bio.bi_bdev = log->rdev->bdev;
+       bio_set_dev(&log->flush_bio, log->rdev->bdev);
        log->flush_bio.bi_end_io = r5l_log_flush_endio;
        log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
        submit_bio(&log->flush_bio);
@@ -1669,7 +1669,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
                                      sector_t offset)
 {
        bio_reset(ctx->ra_bio);
-       ctx->ra_bio->bi_bdev = log->rdev->bdev;
+       bio_set_dev(ctx->ra_bio, log->rdev->bdev);
        bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
        ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
 
index 44ad5baf320684e61b1aee1549d15b6e68c0a732..1e237c40d6fa26c816825b6f99631ce76bd0e8db 100644 (file)
@@ -415,7 +415,7 @@ static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
        pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
                 __func__, io->seq, bio->bi_iter.bi_size,
                 (unsigned long long)bio->bi_iter.bi_sector,
-                bdevname(bio->bi_bdev, b));
+                bio_devname(bio, b));
 
        submit_bio(bio);
 }
@@ -453,7 +453,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
 
        bio->bi_end_io = ppl_log_endio;
        bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
-       bio->bi_bdev = log->rdev->bdev;
+       bio_set_dev(bio, log->rdev->bdev);
        bio->bi_iter.bi_sector = log->rdev->ppl.sector;
        bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
 
@@ -468,7 +468,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
                        bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
                                               ppl_conf->bs);
                        bio->bi_opf = prev->bi_opf;
-                       bio->bi_bdev = prev->bi_bdev;
+                       bio_copy_dev(bio, prev);
                        bio->bi_iter.bi_sector = bio_end_sector(prev);
                        bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
 
index d687aeb1b5382e00d44fbb2b4ebe17f2ba0303b2..3ae8bbceb6c4762a6bb3b76022b0fc589c0e9bd4 100644 (file)
@@ -1096,7 +1096,7 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       bi->bi_bdev = rdev->bdev;
+                       bio_set_dev(bi, rdev->bdev);
                        bio_set_op_attrs(bi, op, op_flags);
                        bi->bi_end_io = op_is_write(op)
                                ? raid5_end_write_request
@@ -1145,7 +1145,7 @@ again:
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
 
                        if (conf->mddev->gendisk)
-                               trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+                               trace_block_bio_remap(bi->bi_disk->queue,
                                                      bi, disk_devt(conf->mddev->gendisk),
                                                      sh->dev[i].sector);
                        if (should_defer && op_is_write(op))
@@ -1160,7 +1160,7 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       rbi->bi_bdev = rrdev->bdev;
+                       bio_set_dev(rbi, rrdev->bdev);
                        bio_set_op_attrs(rbi, op, op_flags);
                        BUG_ON(!op_is_write(op));
                        rbi->bi_end_io = raid5_end_write_request;
@@ -1193,7 +1193,7 @@ again:
                        if (op == REQ_OP_DISCARD)
                                rbi->bi_vcnt = 0;
                        if (conf->mddev->gendisk)
-                               trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+                               trace_block_bio_remap(rbi->bi_disk->queue,
                                                      rbi, disk_devt(conf->mddev->gendisk),
                                                      sh->dev[i].sector);
                        if (should_defer && op_is_write(op))
@@ -5233,7 +5233,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
                atomic_inc(&rdev->nr_pending);
                rcu_read_unlock();
                raid_bio->bi_next = (void*)rdev;
-               align_bi->bi_bdev =  rdev->bdev;
+               bio_set_dev(align_bi, rdev->bdev);
                bio_clear_flag(align_bi, BIO_SEG_VALID);
 
                if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
@@ -5255,7 +5255,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
                spin_unlock_irq(&conf->device_lock);
 
                if (mddev->gendisk)
-                       trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+                       trace_block_bio_remap(align_bi->bi_disk->queue,
                                              align_bi, disk_devt(mddev->gendisk),
                                              raid_bio->bi_iter.bi_sector);
                generic_make_request(align_bi);
index 73062da3177f631c98a7d9b94ad2734ed57ea125..a87f793f2945ec4b46a283fa482f746b94996c15 100644 (file)
@@ -390,7 +390,7 @@ int nd_region_activate(struct nd_region *nd_region);
 void __nd_iostat_start(struct bio *bio, unsigned long *start);
 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
 {
-       struct gendisk *disk = bio->bi_bdev->bd_disk;
+       struct gendisk *disk = bio->bi_disk;
 
        if (!blk_queue_io_stat(disk->queue))
                return false;
@@ -402,7 +402,7 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
 }
 static inline void nd_iostat_end(struct bio *bio, unsigned long start)
 {
-       struct gendisk *disk = bio->bi_bdev->bd_disk;
+       struct gendisk *disk = bio->bi_disk;
 
        generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0,
                                start);
index c49f1f8b2e57459deb2605f242c9324e8960a95b..f03452db793862e2ccef11df8bda84541919ecfe 100644 (file)
@@ -613,11 +613,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
 
                if (!disk)
                        goto submit;
-               bio->bi_bdev = bdget_disk(disk, 0);
-               if (!bio->bi_bdev) {
-                       ret = -ENODEV;
-                       goto out_unmap;
-               }
+               bio->bi_disk = disk;
 
                if (meta_buffer && meta_len) {
                        struct bio_integrity_payload *bip;
@@ -668,11 +664,8 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
  out_free_meta:
        kfree(meta);
  out_unmap:
-       if (bio) {
-               if (disk && bio->bi_bdev)
-                       bdput(bio->bi_bdev);
+       if (bio)
                blk_rq_unmap_user(bio);
-       }
  out:
        blk_mq_free_request(req);
        return ret;
index be8541335e31edb0d621aa13e0dd3e74670fa168..c1a28569e843c67f6e17c97bb1cf1497ff48b02d 100644 (file)
@@ -643,17 +643,9 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
                        vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
                }
 
-               if (!disk)
-                       goto submit;
-
-               bio->bi_bdev = bdget_disk(disk, 0);
-               if (!bio->bi_bdev) {
-                       ret = -ENODEV;
-                       goto err_meta;
-               }
+               bio->bi_disk = disk;
        }
 
-submit:
        blk_execute_rq(q, NULL, rq, 0);
 
        if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
@@ -673,11 +665,8 @@ err_meta:
        if (meta_buf && meta_len)
                dma_pool_free(dev->dma_pool, metadata, metadata_dma);
 err_map:
-       if (bio) {
-               if (disk && bio->bi_bdev)
-                       bdput(bio->bi_bdev);
+       if (bio)
                blk_rq_unmap_user(bio);
-       }
 err_ppa:
        if (ppa_buf && ppa_len)
                dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
index 3b4d47a6abdb8337419f3e8e054c9a00e1147091..0d4c23dc453247eb79c795895f0b829912ff18fd 100644 (file)
@@ -68,7 +68,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
 
        nvmet_inline_bio_init(req);
        bio = &req->inline_bio;
-       bio->bi_bdev = req->ns->bdev;
+       bio_set_dev(bio, req->ns->bdev);
        bio->bi_iter.bi_sector = sector;
        bio->bi_private = req;
        bio->bi_end_io = nvmet_bio_done;
@@ -80,7 +80,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
                        struct bio *prev = bio;
 
                        bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
-                       bio->bi_bdev = req->ns->bdev;
+                       bio_set_dev(bio, req->ns->bdev);
                        bio->bi_iter.bi_sector = sector;
                        bio_set_op_attrs(bio, op, op_flags);
 
@@ -104,7 +104,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
        nvmet_inline_bio_init(req);
        bio = &req->inline_bio;
 
-       bio->bi_bdev = req->ns->bdev;
+       bio_set_dev(bio, req->ns->bdev);
        bio->bi_private = req;
        bio->bi_end_io = nvmet_bio_done;
        bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
index 68bae4f6bd8881be00fd57a5ec3c691e1d2424bf..7abb240847c07dd0b24f3f2e7f03d221a1416f5f 100644 (file)
@@ -856,14 +856,14 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
        blk_queue_split(q, &bio);
 
        bytes_done = 0;
-       dev_info = bio->bi_bdev->bd_disk->private_data;
+       dev_info = bio->bi_disk->private_data;
        if (dev_info == NULL)
                goto fail;
        if ((bio->bi_iter.bi_sector & 7) != 0 ||
            (bio->bi_iter.bi_size & 4095) != 0)
                /* Request is not page-aligned. */
                goto fail;
-       if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
+       if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
                /* Request beyond end of DCSS segment. */
                goto fail;
        }
index a48f0d40c1d253caf6d2d994307cd08a9e43de5c..571a0709e1e5b98ba14708d13e9f944e5ad85a6a 100644 (file)
@@ -183,7 +183,7 @@ static unsigned long xpram_highest_page_index(void)
  */
 static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
 {
-       xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
+       xpram_device_t *xdev = bio->bi_disk->private_data;
        struct bio_vec bvec;
        struct bvec_iter iter;
        unsigned int index;
index ee7c7fa55dad16ffc43d30fa701062c5f3c84aa4..07c814c42648faa00d7bca39ad339d7ef916ec99 100644 (file)
@@ -338,7 +338,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
                return NULL;
        }
 
-       bio->bi_bdev = ib_dev->ibd_bd;
+       bio_set_dev(bio, ib_dev->ibd_bd);
        bio->bi_private = cmd;
        bio->bi_end_io = &iblock_bio_done;
        bio->bi_iter.bi_sector = lba;
@@ -395,7 +395,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
 
        bio = bio_alloc(GFP_KERNEL, 0);
        bio->bi_end_io = iblock_end_io_flush;
-       bio->bi_bdev = ib_dev->ibd_bd;
+       bio_set_dev(bio, ib_dev->ibd_bd);
        bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
        if (!immed)
                bio->bi_private = cmd;
index d29d1c70f833c93d16444383574cd847cc483b1b..bb715b2fcfb80f7fb50903a61d1b22d405af7ddf 100644 (file)
@@ -223,7 +223,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
        }
 
        bio_init(&bio, vecs, nr_pages);
-       bio.bi_bdev = bdev;
+       bio_set_dev(&bio, bdev);
        bio.bi_iter.bi_sector = pos >> 9;
        bio.bi_write_hint = iocb->ki_hint;
        bio.bi_private = current;
@@ -362,7 +362,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
 
        blk_start_plug(&plug);
        for (;;) {
-               bio->bi_bdev = bdev;
+               bio_set_dev(bio, bdev);
                bio->bi_iter.bi_sector = pos >> 9;
                bio->bi_write_hint = iocb->ki_hint;
                bio->bi_private = dio;
index 9d385483903812963c21e316a3e07d23d8adad13..fb07e3c22b9aaa7362861cd3451ee27f184a7d77 100644 (file)
@@ -1635,7 +1635,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                unsigned int j;
 
                bio = btrfs_io_bio_alloc(num_pages - i);
-               bio->bi_bdev = block_ctx->dev->bdev;
+               bio_set_dev(bio, block_ctx->dev->bdev);
                bio->bi_iter.bi_sector = dev_bytenr >> 9;
                bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
@@ -2803,7 +2803,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
        mutex_lock(&btrfsic_mutex);
        /* since btrfsic_submit_bio() is also called before
         * btrfsic_mount(), this might return NULL */
-       dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev);
+       dev_state = btrfsic_dev_state_lookup(bio_dev(bio));
        if (NULL != dev_state &&
            (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
                unsigned int i = 0;
@@ -2819,10 +2819,10 @@ static void __btrfsic_submit_bio(struct bio *bio)
                bio_is_patched = 0;
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
-                       pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
+                       pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_disk=%p)\n",
                               bio_op(bio), bio->bi_opf, segs,
                               (unsigned long long)bio->bi_iter.bi_sector,
-                              dev_bytenr, bio->bi_bdev);
+                              dev_bytenr, bio->bi_disk);
 
                mapped_datav = kmalloc_array(segs,
                                             sizeof(*mapped_datav), GFP_NOFS);
@@ -2851,8 +2851,8 @@ static void __btrfsic_submit_bio(struct bio *bio)
        } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
-                       pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
-                              bio_op(bio), bio->bi_opf, bio->bi_bdev);
+                       pr_info("submit_bio(rw=%d,0x%x FLUSH, disk=%p)\n",
+                              bio_op(bio), bio->bi_opf, bio->bi_disk);
                if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
                        if ((dev_state->state->print_mask &
                             (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
index 080e2ebb8aa0137baef69edda45aa895bf8b7c7c..0640c27e63e98e634d35be6f3a374c54388d02a2 100644 (file)
@@ -3499,7 +3499,7 @@ static void write_dev_flush(struct btrfs_device *device)
 
        bio_reset(bio);
        bio->bi_end_io = btrfs_end_empty_barrier;
-       bio->bi_bdev = device->bdev;
+       bio_set_dev(bio, device->bdev);
        bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
        init_completion(&device->flush_wait);
        bio->bi_private = &device->flush_wait;
index 0aff9b278c1990f55feb2693a9fff65d5bf693ed..42b12a85ab49d929e29742fcb97be67d0f2d45cc 100644 (file)
@@ -2033,7 +2033,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
                bio_put(bio);
                return -EIO;
        }
-       bio->bi_bdev = dev->bdev;
+       bio_set_dev(bio, dev->bdev);
        bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
        bio_add_page(bio, page, length, pg_offset);
 
@@ -2335,7 +2335,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
        bio = btrfs_io_bio_alloc(1);
        bio->bi_end_io = endio_func;
        bio->bi_iter.bi_sector = failrec->logical >> 9;
-       bio->bi_bdev = fs_info->fs_devices->latest_bdev;
+       bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
        bio->bi_iter.bi_size = 0;
        bio->bi_private = data;
 
@@ -2675,7 +2675,7 @@ struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
        struct bio *bio;
 
        bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset);
-       bio->bi_bdev = bdev;
+       bio_set_dev(bio, bdev);
        bio->bi_iter.bi_sector = first_byte >> 9;
        btrfs_io_bio_init(btrfs_io_bio(bio));
        return bio;
index 208638384cd2abfb1206b2f5927b5763a6330283..d268cb633735bd1d1f3f63b149e4a4b597a86eae 100644 (file)
@@ -1090,7 +1090,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
                 */
                if (last_end == disk_start && stripe->dev->bdev &&
                    !last->bi_status &&
-                   last->bi_bdev == stripe->dev->bdev) {
+                   last->bi_disk == stripe->dev->bdev->bd_disk &&
+                   last->bi_partno == stripe->dev->bdev->bd_partno) {
                        ret = bio_add_page(last, page, PAGE_SIZE, 0);
                        if (ret == PAGE_SIZE)
                                return 0;
@@ -1100,7 +1101,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
        /* put a new bio on the list */
        bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
        bio->bi_iter.bi_size = 0;
-       bio->bi_bdev = stripe->dev->bdev;
+       bio_set_dev(bio, stripe->dev->bdev);
        bio->bi_iter.bi_sector = disk_start >> 9;
 
        bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -1347,7 +1348,8 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
                stripe_start = stripe->physical;
                if (physical >= stripe_start &&
                    physical < stripe_start + rbio->stripe_len &&
-                   bio->bi_bdev == stripe->dev->bdev) {
+                   bio->bi_disk == stripe->dev->bdev->bd_disk &&
+                   bio->bi_partno == stripe->dev->bdev->bd_partno) {
                        return i;
                }
        }
index 6f1e4c984b94a5ed479530bc606ba8d9653bff9a..b0b71e8e4c36d23b50133f4c10073bfcccefcc41 100644 (file)
@@ -1738,7 +1738,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
 
                WARN_ON(!page->page);
                bio = btrfs_io_bio_alloc(1);
-               bio->bi_bdev = page->dev->bdev;
+               bio_set_dev(bio, page->dev->bdev);
 
                bio_add_page(bio, page->page, PAGE_SIZE, 0);
                if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
@@ -1826,7 +1826,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                }
 
                bio = btrfs_io_bio_alloc(1);
-               bio->bi_bdev = page_bad->dev->bdev;
+               bio_set_dev(bio, page_bad->dev->bdev);
                bio->bi_iter.bi_sector = page_bad->physical >> 9;
                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
@@ -1921,7 +1921,7 @@ again:
 
                bio->bi_private = sbio;
                bio->bi_end_io = scrub_wr_bio_end_io;
-               bio->bi_bdev = sbio->dev->bdev;
+               bio_set_dev(bio, sbio->dev->bdev);
                bio->bi_iter.bi_sector = sbio->physical >> 9;
                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                sbio->status = 0;
@@ -1964,7 +1964,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
 
        sbio = sctx->wr_curr_bio;
        sctx->wr_curr_bio = NULL;
-       WARN_ON(!sbio->bio->bi_bdev);
+       WARN_ON(!sbio->bio->bi_disk);
        scrub_pending_bio_inc(sctx);
        /* process all writes in a single worker thread. Then the block layer
         * orders the requests before sending them to the driver which
@@ -2321,7 +2321,7 @@ again:
 
                bio->bi_private = sbio;
                bio->bi_end_io = scrub_bio_end_io;
-               bio->bi_bdev = sbio->dev->bdev;
+               bio_set_dev(bio, sbio->dev->bdev);
                bio->bi_iter.bi_sector = sbio->physical >> 9;
                bio_set_op_attrs(bio, REQ_OP_READ, 0);
                sbio->status = 0;
@@ -4627,7 +4627,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
        bio = btrfs_io_bio_alloc(1);
        bio->bi_iter.bi_size = 0;
        bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
-       bio->bi_bdev = dev->bdev;
+       bio_set_dev(bio, dev->bdev);
        bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
        ret = bio_add_page(bio, page, PAGE_SIZE, 0);
        if (ret != PAGE_SIZE) {
index e8b9a269fddec78fdf42adec32eabaffdf9c3636..f9f0f474a64f07e7b7c1fc3754a086a300cc4ef3 100644 (file)
@@ -6188,7 +6188,7 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
                rcu_read_unlock();
        }
 #endif
-       bio->bi_bdev = dev->bdev;
+       bio_set_dev(bio, dev->bdev);
 
        btrfs_bio_counter_inc_noblocked(fs_info);
 
index 5715dac7821fe1c49a1c1ecd8f6b12192f3f1d1c..50e51a67dc783c3eb51dc42166ba178b47769d23 100644 (file)
@@ -3057,7 +3057,7 @@ void guard_bio_eod(int op, struct bio *bio)
        struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
        unsigned truncated_bytes;
 
-       maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
+       maxsector = get_capacity(bio->bi_disk);
        if (!maxsector)
                return;
 
@@ -3116,7 +3116,7 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
        }
 
        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
-       bio->bi_bdev = bh->b_bdev;
+       bio_set_dev(bio, bh->b_bdev);
        bio->bi_write_hint = write_hint;
 
        bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
index 6181e9526860708df8f906830f76ba454e2bb2af..483784d5eb7362b0a682d7ba745ccbf1eec4a511 100644 (file)
@@ -115,7 +115,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
                        err = -ENOMEM;
                        goto errout;
                }
-               bio->bi_bdev = inode->i_sb->s_bdev;
+               bio_set_dev(bio, inode->i_sb->s_bdev);
                bio->bi_iter.bi_sector =
                        pblk << (inode->i_sb->s_blocksize_bits - 9);
                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
index 08cf27811e5af22b62a5330a2f21b0d0af25de77..5fa2211e49aee2186546d8db7a70c5295c7591f0 100644 (file)
@@ -111,7 +111,7 @@ struct dio {
        int op;
        int op_flags;
        blk_qc_t bio_cookie;
-       struct block_device *bio_bdev;
+       struct gendisk *bio_disk;
        struct inode *inode;
        loff_t i_size;                  /* i_size when submitted */
        dio_iodone_t *end_io;           /* IO completion function */
@@ -377,7 +377,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
         */
        bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
-       bio->bi_bdev = bdev;
+       bio_set_dev(bio, bdev);
        bio->bi_iter.bi_sector = first_sector;
        bio_set_op_attrs(bio, dio->op, dio->op_flags);
        if (dio->is_async)
@@ -412,7 +412,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
        if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
                bio_set_pages_dirty(bio);
 
-       dio->bio_bdev = bio->bi_bdev;
+       dio->bio_disk = bio->bi_disk;
 
        if (sdio->submit_io) {
                sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
@@ -458,7 +458,7 @@ static struct bio *dio_await_one(struct dio *dio)
                dio->waiter = current;
                spin_unlock_irqrestore(&dio->bio_lock, flags);
                if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_mq_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
+                   !blk_mq_poll(dio->bio_disk->queue, dio->bio_cookie))
                        io_schedule();
                /* wake up sets us TASK_RUNNING */
                spin_lock_irqsave(&dio->bio_lock, flags);
index 8bb72807e70d46ae2ab32e7e8d29de4d034454f5..3c6a9c156b7accb06d96905bc454fe41b2e7c857 100644 (file)
@@ -869,7 +869,7 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
                                        goto out;
                                }
 
-                               bio->bi_bdev = NULL;
+                               bio->bi_disk = NULL;
                                bio->bi_next = NULL;
                                per_dev->offset = master_dev->offset;
                                per_dev->length = master_dev->length;
index c2fce4478cca26445c0605ff61a1f1b00302c41c..55ad7dd149d005dca26109fd0f018fdd0e075220 100644 (file)
@@ -300,7 +300,7 @@ static void ext4_end_bio(struct bio *bio)
        char b[BDEVNAME_SIZE];
 
        if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
-                     bdevname(bio->bi_bdev, b),
+                     bio_devname(bio, b),
                      (long long) bio->bi_iter.bi_sector,
                      (unsigned) bio_sectors(bio),
                      bio->bi_status)) {
@@ -375,7 +375,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
                return -ENOMEM;
        wbc_init_bio(io->io_wbc, bio);
        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
-       bio->bi_bdev = bh->b_bdev;
+       bio_set_dev(bio, bh->b_bdev);
        bio->bi_end_io = ext4_end_bio;
        bio->bi_private = ext4_get_io_end(io->io_end);
        io->io_bio = bio;
index 40a5497b0f605c8bbc9b2192591bb49acdb0d2b7..04c90643af7a4763647919188e67f5e0479cec1b 100644 (file)
@@ -254,7 +254,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
                                        fscrypt_release_ctx(ctx);
                                goto set_error_page;
                        }
-                       bio->bi_bdev = bdev;
+                       bio_set_dev(bio, bdev);
                        bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
                        bio->bi_end_io = mpage_end_io;
                        bio->bi_private = ctx;
index 87c1f4150c645c83c64cd7f90243d347121f8886..a791aac4c5af154051becb0b183c448acaf96c2d 100644 (file)
@@ -142,7 +142,7 @@ struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
                }
        }
        if (bio) {
-               bio->bi_bdev = bdev;
+               bio_set_dev(bio, bdev);
                bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
        }
        return bdev;
@@ -161,7 +161,8 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
 static bool __same_bdev(struct f2fs_sb_info *sbi,
                                block_t blk_addr, struct bio *bio)
 {
-       return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
+       struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
+       return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
 }
 
 /*
index f964b68718c18f5953971351a5358ccaa841a6ca..6f8fc4a6e701b355c00c5430f3e2db90e6576e56 100644 (file)
@@ -447,7 +447,7 @@ static int __submit_flush_wait(struct f2fs_sb_info *sbi,
        int ret;
 
        bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
-       bio->bi_bdev = bdev;
+       bio_set_dev(bio, bdev);
        ret = submit_bio_wait(bio);
        bio_put(bio);
 
index 3010f9edd177dffeb3e270515a15e643e37bdd22..720c19ada0f919b8bc1d3b57f7d9e894fe29f960 100644 (file)
@@ -265,7 +265,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
 
        bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
        bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
-       bio->bi_bdev = sb->s_bdev;
+       bio_set_dev(bio, sb->s_bdev);
        bio->bi_end_io = gfs2_end_log_write;
        bio->bi_private = sdp;
 
index fabe1614f879525827290d05eb79ca53ad76a57e..39433a173baace9268e312b296818a05b3fd35aa 100644 (file)
@@ -221,7 +221,7 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
 
                bio = bio_alloc(GFP_NOIO, num);
                bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
-               bio->bi_bdev = bh->b_bdev;
+               bio_set_dev(bio, bh->b_bdev);
                while (num > 0) {
                        bh = *bhs;
                        if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
index e76058d34b7468b3762a6955f401af822c7a8e9a..8155e16076e13aea0e4002af2de052225c569bbc 100644 (file)
@@ -242,7 +242,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 
        bio = bio_alloc(GFP_NOFS, 1);
        bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
-       bio->bi_bdev = sb->s_bdev;
+       bio_set_dev(bio, sb->s_bdev);
        bio_add_page(bio, page, PAGE_SIZE, 0);
 
        bio->bi_end_io = end_bio_io_page;
index e254fa0f069710aae74e88f41484de4d39ef7d0a..10032b919a85c21be47678d7117d61f49fcc81b2 100644 (file)
@@ -65,7 +65,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
 
        bio = bio_alloc(GFP_NOIO, 1);
        bio->bi_iter.bi_sector = sector;
-       bio->bi_bdev = sb->s_bdev;
+       bio_set_dev(bio, sb->s_bdev);
        bio_set_op_attrs(bio, op, op_flags);
 
        if (op != WRITE && data)
index 039266128b7ff0b09ed2a55e621a9a0dc9e38720..77be8850997bbc7fd37d9a29ac8925cd476ccb2d 100644 (file)
@@ -805,7 +805,7 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
        struct bio *bio;
 
        bio = bio_alloc(GFP_KERNEL, 1);
-       bio->bi_bdev = iomap->bdev;
+       bio_set_dev(bio, iomap->bdev);
        bio->bi_iter.bi_sector =
                iomap->blkno + ((pos - iomap->offset) >> 9);
        bio->bi_private = dio;
@@ -884,7 +884,7 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
                        return 0;
 
                bio = bio_alloc(GFP_KERNEL, nr_pages);
-               bio->bi_bdev = iomap->bdev;
+               bio_set_dev(bio, iomap->bdev);
                bio->bi_iter.bi_sector =
                        iomap->blkno + ((pos - iomap->offset) >> 9);
                bio->bi_write_hint = dio->iocb->ki_hint;
index a21f0e9eecd45ef34765a4ae567cd4e3df498dbf..0e5d412c0b01a45cdc67a164f471e8b8b4a193c9 100644 (file)
@@ -1995,7 +1995,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
        bio = bio_alloc(GFP_NOFS, 1);
 
        bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
-       bio->bi_bdev = log->bdev;
+       bio_set_dev(bio, log->bdev);
 
        bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
        BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
@@ -2139,7 +2139,7 @@ static void lbmStartIO(struct lbuf * bp)
 
        bio = bio_alloc(GFP_NOFS, 1);
        bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
-       bio->bi_bdev = log->bdev;
+       bio_set_dev(bio, log->bdev);
 
        bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
        BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
index 65120a4717290ae3ec109cb116172e7f091f9f79..1c4b9ad4d7ab66446dae9241f44abe69429d33fb 100644 (file)
@@ -430,7 +430,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
                len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
 
                bio = bio_alloc(GFP_NOFS, 1);
-               bio->bi_bdev = inode->i_sb->s_bdev;
+               bio_set_dev(bio, inode->i_sb->s_bdev);
                bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
                bio->bi_end_io = metapage_write_end_io;
                bio->bi_private = page;
@@ -510,7 +510,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
                                submit_bio(bio);
 
                        bio = bio_alloc(GFP_NOFS, 1);
-                       bio->bi_bdev = inode->i_sb->s_bdev;
+                       bio_set_dev(bio, inode->i_sb->s_bdev);
                        bio->bi_iter.bi_sector =
                                pblock << (inode->i_blkbits - 9);
                        bio->bi_end_io = metapage_read_end_io;
index 2e4c41ccb5c91a4824465678215d190e228134e6..37bb77c1302c354adf0386a32e3b678fdc93f54c 100644 (file)
@@ -83,7 +83,7 @@ mpage_alloc(struct block_device *bdev,
        }
 
        if (bio) {
-               bio->bi_bdev = bdev;
+               bio_set_dev(bio, bdev);
                bio->bi_iter.bi_sector = first_sector;
        }
        return bio;
index d8863a804b15756632a15dcd3d0076db4a0bf136..995d707537dab942ce9aae0fc1388d114d68699b 100644 (file)
@@ -130,7 +130,7 @@ bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
 
        if (bio) {
                bio->bi_iter.bi_sector = disk_sector;
-               bio->bi_bdev = bdev;
+               bio_set_dev(bio, bdev);
                bio->bi_end_io = end_io;
                bio->bi_private = par;
        }
index e73c86d9855ccd186b4c3f75f7a56b6e61060e47..6c5009cc4e6f619ab8869097f0b7ea379d6b3423 100644 (file)
@@ -400,7 +400,7 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
                        bio = bio_alloc(GFP_NOIO, nr_vecs);
        }
        if (likely(bio)) {
-               bio->bi_bdev = nilfs->ns_bdev;
+               bio_set_dev(bio, nilfs->ns_bdev);
                bio->bi_iter.bi_sector =
                        start << (nilfs->ns_blocksize_bits - 9);
        }
index ffe003982d95622decb1ff65ddaa22605cedea58..6aea15746a5672ca25fb7b6a61731137a02ce98f 100644 (file)
@@ -554,7 +554,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
 
        /* Must put everything in 512 byte sectors for the bio... */
        bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
-       bio->bi_bdev = reg->hr_bdev;
+       bio_set_dev(bio, reg->hr_bdev);
        bio->bi_private = wc;
        bio->bi_end_io = o2hb_bio_end_io;
        bio_set_op_attrs(bio, op, op_flags);
index 6bf120bb1a17da7f3b571964ee58719c9315fae3..c8ca03a5a08feeb7aff455f70e84eb528873580d 100644 (file)
@@ -517,7 +517,7 @@ xfs_init_bio_from_bh(
        struct buffer_head      *bh)
 {
        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
-       bio->bi_bdev = bh->b_bdev;
+       bio_set_dev(bio, bh->b_bdev);
 }
 
 static struct xfs_ioend *
index 72f038492ba8cdb2bd609fd19949d96b95c8ede0..b1c9711e79a46051801a0fe34e95dd0515ff4fef 100644 (file)
@@ -1281,7 +1281,7 @@ next_chunk:
        nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
 
        bio = bio_alloc(GFP_NOIO, nr_pages);
-       bio->bi_bdev = bp->b_target->bt_bdev;
+       bio_set_dev(bio, bp->b_target->bt_bdev);
        bio->bi_iter.bi_sector = sector;
        bio->bi_end_io = xfs_buf_bio_end_io;
        bio->bi_private = bp;
index 9276788a9b249013c74cd2bbe4946b6a3bb75404..a8fe7935332f34869ba2c554575ecfdd4041ad25 100644 (file)
@@ -494,6 +494,24 @@ extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
 
+#define bio_set_dev(bio, bdev)                         \
+do {                                           \
+       (bio)->bi_disk = (bdev)->bd_disk;       \
+       (bio)->bi_partno = (bdev)->bd_partno;   \
+} while (0)
+
+#define bio_copy_dev(dst, src)                 \
+do {                                           \
+       (dst)->bi_disk = (src)->bi_disk;        \
+       (dst)->bi_partno = (src)->bi_partno;    \
+} while (0)
+
+#define bio_dev(bio) \
+       disk_devt((bio)->bi_disk)
+
+#define bio_devname(bio, buf) \
+       __bdevname(bio_dev(bio), (buf))
+
 #ifdef CONFIG_BLK_CGROUP
 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
 int bio_associate_current(struct bio *bio);
index d2eb87c84d82b7eb737d9f8a4e052d687a2dca2e..a2d2aa709cef4afc0bb5c3875df3073b81b368fa 100644 (file)
@@ -48,7 +48,8 @@ struct blk_issue_stat {
  */
 struct bio {
        struct bio              *bi_next;       /* request queue link */
-       struct block_device     *bi_bdev;
+       struct gendisk          *bi_disk;
+       u8                      bi_partno;
        blk_status_t            bi_status;
        unsigned int            bi_opf;         /* bottom bits req flags,
                                                 * top bits REQ_OP. Use
index df3e9ae5ad8d2b4e2aace80bd8c189fd09098c28..daf749138ff80c9fb6c12b1940168fa2f33cea7f 100644 (file)
@@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(bcache_request,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio_dev(bio);
                __entry->orig_major     = d->disk->major;
                __entry->orig_minor     = d->disk->first_minor;
                __entry->sector         = bio->bi_iter.bi_sector;
@@ -98,7 +98,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio_dev(bio);
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@@ -133,7 +133,7 @@ TRACE_EVENT(bcache_read,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio_dev(bio);
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
index d0dbe60d8a6dd5ccbb89029796fcabd23d970ceb..f815aaaef755af6dae63d71b154f5012e6a2a360 100644 (file)
@@ -236,8 +236,7 @@ TRACE_EVENT(block_bio_bounce,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev ?
-                                         bio->bi_bdev->bd_dev : 0;
+               __entry->dev            = bio_dev(bio);
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@@ -274,7 +273,7 @@ TRACE_EVENT(block_bio_complete,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio_dev(bio);
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->error          = error;
@@ -302,7 +301,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio_dev(bio);
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@@ -369,7 +368,7 @@ TRACE_EVENT(block_bio_queue,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio_dev(bio);
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@@ -397,7 +396,8 @@ DECLARE_EVENT_CLASS(block_get_rq,
         ),
 
        TP_fast_assign(
-               __entry->dev            = bio ? bio->bi_bdev->bd_dev : 0;
+               __entry->dev            = bio ? bio_dev(bio) : 0;
+               __entry->dev            = bio_dev(bio);
                __entry->sector         = bio ? bio->bi_iter.bi_sector : 0;
                __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
                blk_fill_rwbs(__entry->rwbs,
@@ -532,7 +532,7 @@ TRACE_EVENT(block_split,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio_dev(bio);
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->new_sector     = new_sector;
                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@@ -573,7 +573,7 @@ TRACE_EVENT(block_bio_remap,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio_dev(bio);
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->old_dev        = dev;
index 6f77a2755abbfa89b4ab93467d69d36364650693..bc4dd7837e4c6651c607955d1d605a0f732be28a 100644 (file)
@@ -829,7 +829,7 @@ DECLARE_EVENT_CLASS(f2fs__bio,
 
        TP_fast_assign(
                __entry->dev            = sb->s_dev;
-               __entry->target         = bio->bi_bdev->bd_dev;
+               __entry->target         = bio_dev(bio);
                __entry->op             = bio_op(bio);
                __entry->op_flags       = bio->bi_opf;
                __entry->type           = type;
index 57d22571f3068bdecf36a3ac0f8d8566d5f7c455..d7cdc426ee3809bbaafdfd9d5698f83db654e23a 100644 (file)
@@ -242,8 +242,7 @@ static void hib_end_io(struct bio *bio)
 
        if (bio->bi_status) {
                printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
-                               imajor(bio->bi_bdev->bd_inode),
-                               iminor(bio->bi_bdev->bd_inode),
+                               MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
                                (unsigned long long)bio->bi_iter.bi_sector);
        }
 
@@ -270,7 +269,7 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
 
        bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
-       bio->bi_bdev = hib_resume_bdev;
+       bio_set_dev(bio, hib_resume_bdev);
        bio_set_op_attrs(bio, op, op_flags);
 
        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
index 7724de18d2feb3eb376610862ae2377f48363ee4..2a685b45b73be4159bd310d8c4530a87df9e4cec 100644 (file)
@@ -963,7 +963,7 @@ static void blk_add_trace_bio_remap(void *ignore,
                return;
 
        r.device_from = cpu_to_be32(dev);
-       r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
+       r.device_to   = cpu_to_be32(bio_dev(bio));
        r.sector_from = cpu_to_be64(from);
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
index b6c4ac388209c945d744f2541326411314a1cceb..9cf1bc751d790ab154332cf7de2af9007f56c7ef 100644 (file)
@@ -31,7 +31,10 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
 
        bio = bio_alloc(gfp_flags, 1);
        if (bio) {
-               bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
+               struct block_device *bdev;
+
+               bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
+               bio_set_dev(bio, bdev);
                bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
                bio->bi_end_io = end_io;
 
@@ -57,8 +60,7 @@ void end_swap_bio_write(struct bio *bio)
                 */
                set_page_dirty(page);
                pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
-                        imajor(bio->bi_bdev->bd_inode),
-                        iminor(bio->bi_bdev->bd_inode),
+                        MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
                         (unsigned long long)bio->bi_iter.bi_sector);
                ClearPageReclaim(page);
        }
@@ -123,8 +125,7 @@ static void end_swap_bio_read(struct bio *bio)
                SetPageError(page);
                ClearPageUptodate(page);
                pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
-                        imajor(bio->bi_bdev->bd_inode),
-                        iminor(bio->bi_bdev->bd_inode),
+                        MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
                         (unsigned long long)bio->bi_iter.bi_sector);
                goto out;
        }
@@ -338,7 +339,7 @@ int swap_readpage(struct page *page, bool do_poll)
        int ret = 0;
        struct swap_info_struct *sis = page_swap_info(page);
        blk_qc_t qc;
-       struct block_device *bdev;
+       struct gendisk *disk;
 
        VM_BUG_ON_PAGE(!PageSwapCache(page), page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -377,7 +378,7 @@ int swap_readpage(struct page *page, bool do_poll)
                ret = -ENOMEM;
                goto out;
        }
-       bdev = bio->bi_bdev;
+       disk = bio->bi_disk;
        bio->bi_private = current;
        bio_set_op_attrs(bio, REQ_OP_READ, 0);
        count_vm_event(PSWPIN);
@@ -388,7 +389,7 @@ int swap_readpage(struct page *page, bool do_poll)
                if (!READ_ONCE(bio->bi_private))
                        break;
 
-               if (!blk_mq_poll(bdev_get_queue(bdev), qc))
+               if (!blk_mq_poll(disk->queue, qc))
                        break;
        }
        __set_current_state(TASK_RUNNING);