]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
block, drivers: add REQ_OP_FLUSH operation
authorMike Christie <mchristi@redhat.com>
Sun, 5 Jun 2016 19:32:23 +0000 (14:32 -0500)
committerJens Axboe <axboe@fb.com>
Tue, 7 Jun 2016 19:41:38 +0000 (13:41 -0600)
This adds a REQ_OP_FLUSH operation that is sent to request_fn
based drivers by the block layer's flush code, instead of
sending requests with the request->cmd_flags REQ_FLUSH bit set.

Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
20 files changed:
Documentation/block/writeback_cache_control.txt
arch/um/drivers/ubd_kern.c
block/blk-flush.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/osdblk.c
drivers/block/ps3disk.c
drivers/block/skd_main.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/ide/ide-disk.c
drivers/md/dm.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.h
drivers/mtd/mtd_blkdevs.c
drivers/nvme/host/core.c
drivers/scsi/sd.c
include/linux/blk_types.h
include/linux/blkdev.h
kernel/trace/blktrace.c

index 59e0516cbf6b68618b692979ed1c14b30da3fb2f..da70bdacd5032c3d32df37a0021aeef99c960265 100644 (file)
@@ -73,9 +73,9 @@ doing:
 
        blk_queue_write_cache(sdkp->disk->queue, true, false);
 
-and handle empty REQ_FLUSH requests in its prep_fn/request_fn.  Note that
+and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn.  Note that
 REQ_FLUSH requests with a payload are automatically turned into a sequence
-of an empty REQ_FLUSH request followed by the actual write by the block
+of an empty REQ_OP_FLUSH request followed by the actual write by the block
 layer.  For devices that also support the FUA bit the block layer needs
 to be told to pass through the REQ_FUA bit using:
 
@@ -83,4 +83,4 @@ to be told to pass through the REQ_FUA bit using:
 
 and the driver must handle write requests that have the REQ_FUA bit set
 in prep_fn/request_fn.  If the FUA bit is not natively supported the block
-layer turns it into an empty REQ_FLUSH request after the actual write.
+layer turns it into an empty REQ_OP_FLUSH request after the actual write.
index 17e96dc29596ccefd8120a01a9f6c20ff6f17886..ef6b4d960badeba6f477eca9b745f4c0f104cd4a 100644 (file)
@@ -1286,7 +1286,7 @@ static void do_ubd_request(struct request_queue *q)
 
                req = dev->request;
 
-               if (req->cmd_flags & REQ_FLUSH) {
+               if (req_op(req) == REQ_OP_FLUSH) {
                        io_req = kmalloc(sizeof(struct io_thread_req),
                                         GFP_ATOMIC);
                        if (io_req == NULL) {
index 9fd1f63a634816b238d6b04b23fe328886a22a07..21f0d5b0d2cacf02b5dfc05f81be9554640502c8 100644 (file)
@@ -29,7 +29,7 @@
  * The actual execution of flush is double buffered.  Whenever a request
  * needs to execute PRE or POSTFLUSH, it queues at
  * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
- * flush is issued and the pending_idx is toggled.  When the flush
+ * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
  * completes, all the requests which were pending are proceeded to the next
  * step.  This allows arbitrary merging of different types of FLUSH/FUA
  * requests.
@@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
        }
 
        flush_rq->cmd_type = REQ_TYPE_FS;
-       flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+       req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ);
        flush_rq->rq_disk = first_rq->rq_disk;
        flush_rq->end_io = flush_end_io;
 
index b9b737cafd5f1836aaf16eb20e3d11154c3e0d88..364d491d4bdd4643d93329c57ea3589bc7840ddc 100644 (file)
@@ -542,7 +542,7 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
        pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
 
        if (op_is_write(req_op(rq))) {
-               if (rq->cmd_flags & REQ_FLUSH)
+               if (req_op(rq) == REQ_OP_FLUSH)
                        ret = lo_req_flush(lo, rq);
                else if (req_op(rq) == REQ_OP_DISCARD)
                        ret = lo_discard(lo, rq, pos);
@@ -1659,7 +1659,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (lo->lo_state != Lo_bound)
                return -EIO;
 
-       if (lo->use_dio && (!(cmd->rq->cmd_flags & REQ_FLUSH) ||
+       if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH ||
            req_op(cmd->rq) == REQ_OP_DISCARD))
                cmd->use_aio = true;
        else
index 6c2c28d124d0bdee954cae41a75a4afe10b777bd..d6f3c9336f2984d05db82d4acede48c1cb440dae 100644 (file)
@@ -284,7 +284,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
                type = NBD_CMD_DISC;
        else if (req_op(req) == REQ_OP_DISCARD)
                type = NBD_CMD_TRIM;
-       else if (req->cmd_flags & REQ_FLUSH)
+       else if (req_op(req) == REQ_OP_FLUSH)
                type = NBD_CMD_FLUSH;
        else if (rq_data_dir(req) == WRITE)
                type = NBD_CMD_WRITE;
index c2854a2bfdb0bd1027fb4f10f2c39c8761d7d709..92900f5f0b4725ebc1d1edbf3da30138a3e2979b 100644 (file)
@@ -321,7 +321,7 @@ static void osdblk_rq_fn(struct request_queue *q)
                 * driver-specific, etc.
                 */
 
-               do_flush = rq->cmd_flags & REQ_FLUSH;
+               do_flush = (req_op(rq) == REQ_OP_FLUSH);
                do_write = (rq_data_dir(rq) == WRITE);
 
                if (!do_flush) { /* osd_flush does not use a bio */
index 4b7e405830d7ec097fcf26a51cb5edc268a74812..acb44529c05e8e6a50d8ee707000042a5fc817d0 100644 (file)
@@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
        dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
 
        while ((req = blk_fetch_request(q))) {
-               if (req->cmd_flags & REQ_FLUSH) {
+               if (req_op(req) == REQ_OP_FLUSH) {
                        if (ps3disk_submit_flush_request(dev, req))
                                break;
                } else if (req->cmd_type == REQ_TYPE_FS) {
@@ -256,7 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
                return IRQ_HANDLED;
        }
 
-       if (req->cmd_flags & REQ_FLUSH) {
+       if (req_op(req) == REQ_OP_FLUSH) {
                read = 0;
                op = "flush";
        } else {
index 910e065918af13d2536f6fe6736a4fad3303e243..5c07a23e2adabf51ae2ed7463d6a27fdd9e0bf25 100644 (file)
@@ -597,7 +597,7 @@ static void skd_request_fn(struct request_queue *q)
                data_dir = rq_data_dir(req);
                io_flags = req->cmd_flags;
 
-               if (io_flags & REQ_FLUSH)
+               if (req_op(req) == REQ_OP_FLUSH)
                        flush++;
 
                if (io_flags & REQ_FUA)
index 42758b52768cf894119e4de41902b4de7ecdd9d3..18e4069dd24b2fe7d3fdd80e290994832fbe55de 100644 (file)
@@ -172,7 +172,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
 
        vbr->req = req;
-       if (req->cmd_flags & REQ_FLUSH) {
+       if (req_op(req) == REQ_OP_FLUSH) {
                vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
                vbr->out_hdr.sector = 0;
                vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
index 6fd160197b7a465d647c5dfe2d0849a6b341e6ad..3aeb25bd505789f14d3dbedc79d48dba28be8781 100644 (file)
@@ -743,7 +743,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
                 * The indirect operation can only be a BLKIF_OP_READ or
                 * BLKIF_OP_WRITE
                 */
-               BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
+               BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
                ring_req->operation = BLKIF_OP_INDIRECT;
                ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
                        BLKIF_OP_WRITE : BLKIF_OP_READ;
@@ -755,7 +755,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
                ring_req->u.rw.handle = info->handle;
                ring_req->operation = rq_data_dir(req) ?
                        BLKIF_OP_WRITE : BLKIF_OP_READ;
-               if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+               if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
                        /*
                         * Ideally we can do an unordered flush-to-disk.
                         * In case the backend onlysupports barriers, use that.
@@ -865,7 +865,7 @@ static inline bool blkif_request_flush_invalid(struct request *req,
                                               struct blkfront_info *info)
 {
        return ((req->cmd_type != REQ_TYPE_FS) ||
-               ((req->cmd_flags & REQ_FLUSH) &&
+               ((req_op(req) == REQ_OP_FLUSH) &&
                 !(info->feature_flush & REQ_FLUSH)) ||
                ((req->cmd_flags & REQ_FUA) &&
                 !(info->feature_flush & REQ_FUA)));
@@ -2055,7 +2055,7 @@ static int blkif_recover(struct blkfront_info *info)
                        /*
                         * Get the bios in the request so we can re-queue them.
                         */
-                       if (copy[i].request->cmd_flags & REQ_FLUSH ||
+                       if (req_op(copy[i].request) == REQ_OP_FLUSH ||
                            req_op(copy[i].request) == REQ_OP_DISCARD ||
                            copy[i].request->cmd_flags & (REQ_FUA | REQ_SECURE)) {
                                /*
index 05dbcce70b0e33736c9ac33f9b5103c40d6a45cc..e378ef70ed638f040c4e25bb4765f6bb86c1cfd9 100644 (file)
@@ -431,7 +431,7 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
        ide_drive_t *drive = q->queuedata;
        struct ide_cmd *cmd;
 
-       if (!(rq->cmd_flags & REQ_FLUSH))
+       if (req_op(rq) != REQ_OP_FLUSH)
                return BLKPREP_OK;
 
        if (rq->special) {
index f6b104c77b6d3d47e46d58f3bc34a4dea346cfef..fcc68c8edba0231c759295d985e410e6b23823bb 100644 (file)
@@ -2171,7 +2171,7 @@ static void dm_request_fn(struct request_queue *q)
 
                /* always use block 0 to find the target for flushes for now */
                pos = 0;
-               if (!(rq->cmd_flags & REQ_FLUSH))
+               if (req_op(rq) != REQ_OP_FLUSH)
                        pos = blk_rq_pos(rq);
 
                if ((dm_request_peeked_before_merge_deadline(md) &&
index 201a8719f6c417473b3e9ee70752d8e6fb58645a..bca20f88a8b2a16d0491d2bf9f37492967d1c2ab 100644 (file)
@@ -1722,7 +1722,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
                    !IS_ALIGNED(blk_rq_sectors(next), 8))
                        break;
 
-               if (req_op(next) == REQ_OP_DISCARD || next->cmd_flags & REQ_FLUSH)
+               if (req_op(next) == REQ_OP_DISCARD ||
+                   req_op(next) == REQ_OP_FLUSH)
                        break;
 
                if (rq_data_dir(cur) != rq_data_dir(next))
@@ -2147,7 +2148,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_card *card = md->queue.card;
        struct mmc_host *host = card->host;
        unsigned long flags;
-       unsigned int cmd_flags = req ? req->cmd_flags : 0;
 
        if (req && !mq->mqrq_prev->req)
                /* claim host only for the first request */
@@ -2171,7 +2171,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                        ret = mmc_blk_issue_secdiscard_rq(mq, req);
                else
                        ret = mmc_blk_issue_discard_rq(mq, req);
-       } else if (cmd_flags & REQ_FLUSH) {
+       } else if (req && req_op(req) == REQ_OP_FLUSH) {
                /* complete ongoing async transfer before issuing flush */
                if (card->host->areq)
                        mmc_blk_issue_rw_rq(mq, NULL);
index 9fb26f20a44d4d5bc414e284c4a2ed456a49c3ef..d62531124d542c0ff82893ed04226e2a0c04016e 100644 (file)
@@ -3,7 +3,8 @@
 
 static inline bool mmc_req_is_special(struct request *req)
 {
-       return req && (req->cmd_flags & REQ_FLUSH || req_op(req) == REQ_OP_DISCARD);
+       return req &&
+               (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD);
 }
 
 struct request;
index 4eb9a5fb151ccd8623919caf1031e65c48765fe4..78b3eb45faf602ebe4681590ce2c30cd77228349 100644 (file)
@@ -87,7 +87,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
        if (req->cmd_type != REQ_TYPE_FS)
                return -EIO;
 
-       if (req->cmd_flags & REQ_FLUSH)
+       if (req_op(req) == REQ_OP_FLUSH)
                return tr->flush(dev);
 
        if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
index 089b8b8aad4f947876c969123d047a746ac8798c..abdfdcfb66f4a1ffdcf8bf7d88de79d909c75209 100644 (file)
@@ -290,7 +290,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 
        if (req->cmd_type == REQ_TYPE_DRV_PRIV)
                memcpy(cmd, req->cmd, sizeof(*cmd));
-       else if (req->cmd_flags & REQ_FLUSH)
+       else if (req_op(req) == REQ_OP_FLUSH)
                nvme_setup_flush(ns, cmd);
        else if (req_op(req) == REQ_OP_DISCARD)
                ret = nvme_setup_discard(ns, req, cmd);
index fad86ad89e6419753ebf5a723345003d148303f9..5a9db0fe1ee0d9cb12954f70545666e3e25ad45b 100644 (file)
@@ -1143,12 +1143,11 @@ static int sd_init_command(struct scsi_cmnd *cmd)
                return sd_setup_discard_cmnd(cmd);
        case REQ_OP_WRITE_SAME:
                return sd_setup_write_same_cmnd(cmd);
+       case REQ_OP_FLUSH:
+               return sd_setup_flush_cmnd(cmd);
        case REQ_OP_READ:
        case REQ_OP_WRITE:
-               if (rq->cmd_flags & REQ_FLUSH)
-                       return sd_setup_flush_cmnd(cmd);
-               else
-                       return sd_setup_read_write_cmnd(cmd);
+               return sd_setup_read_write_cmnd(cmd);
        default:
                BUG();
        }
index 23c1ab2a94753142e71fd02a912d4c788f5cd786..32d87522f349738e445180c3fbee7b44bf7c9e54 100644 (file)
@@ -249,9 +249,10 @@ enum req_op {
        REQ_OP_WRITE,
        REQ_OP_DISCARD,         /* request to discard sectors */
        REQ_OP_WRITE_SAME,      /* write same block many times */
+       REQ_OP_FLUSH,           /* request for cache flush */
 };
 
-#define REQ_OP_BITS 2
+#define REQ_OP_BITS 3
 
 typedef unsigned int blk_qc_t;
 #define BLK_QC_T_NONE  -1U
index 78ae3dbf2de174dc6b4ea852aa4b2d10028f02ea..0c9f8793c87e9e09a2ec0cac13f2a781cffb57b6 100644 (file)
@@ -666,6 +666,9 @@ static inline bool rq_mergeable(struct request *rq)
        if (rq->cmd_type != REQ_TYPE_FS)
                return false;
 
+       if (req_op(rq) == REQ_OP_FLUSH)
+               return false;
+
        if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
                return false;
 
index 2d16fad519b2019bc8985a9dda785acd678cf358..0c70fbb6ea8df673bd80cf449869a1edc0d558a1 100644 (file)
@@ -223,6 +223,8 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
        what |= MASK_TC_BIT(op_flags, FUA);
        if (op == REQ_OP_DISCARD)
                what |= BLK_TC_ACT(BLK_TC_DISCARD);
+       if (op == REQ_OP_FLUSH)
+               what |= BLK_TC_ACT(BLK_TC_FLUSH);
 
        pid = tsk->pid;
        if (act_log_check(bt, what, sector, pid))
@@ -1788,6 +1790,9 @@ void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
        case REQ_OP_DISCARD:
                rwbs[i++] = 'D';
                break;
+       case REQ_OP_FLUSH:
+               rwbs[i++] = 'F';
+               break;
        case REQ_OP_READ:
                rwbs[i++] = 'R';
                break;