]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - block/blk-mq.c
block: hook up writeback throttling
[mirror_ubuntu-zesty-kernel.git] / block / blk-mq.c
index 19795886d46e8a1e2f357d85295a2cc9bf153608..d180c989a0e52238ab140ffdb0648801a662b30d 100644 (file)
@@ -31,6 +31,7 @@
 #include "blk-mq.h"
 #include "blk-mq-tag.h"
 #include "blk-stat.h"
+#include "blk-wbt.h"
 
 static DEFINE_MUTEX(all_q_mutex);
 static LIST_HEAD(all_q_list);
@@ -326,6 +327,8 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 
        if (rq->rq_flags & RQF_MQ_INFLIGHT)
                atomic_dec(&hctx->nr_active);
+
+       wbt_done(q->rq_wb, &rq->issue_stat);
        rq->rq_flags = 0;
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
@@ -354,6 +357,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
        blk_account_io_done(rq);
 
        if (rq->end_io) {
+               wbt_done(rq->q->rq_wb, &rq->issue_stat);
                rq->end_io(rq, error);
        } else {
                if (unlikely(blk_bidi_rq(rq)))
@@ -471,6 +475,7 @@ void blk_mq_start_request(struct request *rq)
        if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
                blk_stat_set_issue_time(&rq->issue_stat);
                rq->rq_flags |= RQF_STATS;
+               wbt_issue(q->rq_wb, &rq->issue_stat);
        }
 
        blk_add_timer(rq);
@@ -508,6 +513,7 @@ static void __blk_mq_requeue_request(struct request *rq)
        struct request_queue *q = rq->q;
 
        trace_block_rq_requeue(q, rq);
+       wbt_requeue(q->rq_wb, &rq->issue_stat);
 
        if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
                if (q->dma_drain_size && blk_rq_bytes(rq))
@@ -1339,6 +1345,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        struct blk_plug *plug;
        struct request *same_queue_rq = NULL;
        blk_qc_t cookie;
+       unsigned int wb_acct;
 
        blk_queue_bounce(q, &bio);
 
@@ -1353,9 +1360,15 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
            blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
                return BLK_QC_T_NONE;
 
+       wb_acct = wbt_wait(q->rq_wb, bio, NULL);
+
        rq = blk_mq_map_request(q, bio, &data);
-       if (unlikely(!rq))
+       if (unlikely(!rq)) {
+               __wbt_done(q->rq_wb, wb_acct);
                return BLK_QC_T_NONE;
+       }
+
+       wbt_track(&rq->issue_stat, wb_acct);
 
        cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
 
@@ -1439,6 +1452,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
        struct blk_mq_alloc_data data;
        struct request *rq;
        blk_qc_t cookie;
+       unsigned int wb_acct;
 
        blk_queue_bounce(q, &bio);
 
@@ -1455,9 +1469,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
        } else
                request_count = blk_plug_queued_count(q);
 
+       wb_acct = wbt_wait(q->rq_wb, bio, NULL);
+
        rq = blk_mq_map_request(q, bio, &data);
-       if (unlikely(!rq))
+       if (unlikely(!rq)) {
+               __wbt_done(q->rq_wb, wb_acct);
                return BLK_QC_T_NONE;
+       }
+
+       wbt_track(&rq->issue_stat, wb_acct);
 
        cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
 
@@ -2139,6 +2159,8 @@ void blk_mq_free_queue(struct request_queue *q)
        list_del_init(&q->all_q_node);
        mutex_unlock(&all_q_mutex);
 
+       wbt_exit(q);
+
        blk_mq_del_queue_tag_set(q);
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);