]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-mq-sched.h
blk-mq-sched: add framework for MQ capable IO schedulers
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq-sched.h
1 #ifndef BLK_MQ_SCHED_H
2 #define BLK_MQ_SCHED_H
3
4 #include "blk-mq.h"
5 #include "blk-mq-tag.h"
6
7 int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
8 int (*init)(struct blk_mq_hw_ctx *),
9 void (*exit)(struct blk_mq_hw_ctx *));
10
11 void blk_mq_sched_free_hctx_data(struct request_queue *q,
12 void (*exit)(struct blk_mq_hw_ctx *));
13
14 struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
15 void blk_mq_sched_put_request(struct request *rq);
16
17 void blk_mq_sched_request_inserted(struct request *rq);
18 bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq);
19 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
20 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22
23 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
24 void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
25 struct list_head *rq_list,
26 struct request *(*get_rq)(struct blk_mq_hw_ctx *));
27
28 int blk_mq_sched_setup(struct request_queue *q);
29 void blk_mq_sched_teardown(struct request_queue *q);
30
31 static inline bool
32 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
33 {
34 struct elevator_queue *e = q->elevator;
35
36 if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
37 return false;
38
39 return __blk_mq_sched_bio_merge(q, bio);
40 }
41
42 static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
43 struct request *rq)
44 {
45 struct elevator_queue *e = q->elevator;
46
47 if (e && e->type->ops.mq.get_rq_priv)
48 return e->type->ops.mq.get_rq_priv(q, rq);
49
50 return 0;
51 }
52
53 static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
54 struct request *rq)
55 {
56 struct elevator_queue *e = q->elevator;
57
58 if (e && e->type->ops.mq.put_rq_priv)
59 e->type->ops.mq.put_rq_priv(q, rq);
60 }
61
62 static inline void
63 blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue,
64 bool async)
65 {
66 struct request_queue *q = rq->q;
67 struct elevator_queue *e = q->elevator;
68 struct blk_mq_ctx *ctx = rq->mq_ctx;
69 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
70
71 if (e && e->type->ops.mq.insert_requests) {
72 LIST_HEAD(list);
73
74 list_add(&rq->queuelist, &list);
75 e->type->ops.mq.insert_requests(hctx, &list, at_head);
76 } else {
77 spin_lock(&ctx->lock);
78 __blk_mq_insert_request(hctx, rq, at_head);
79 spin_unlock(&ctx->lock);
80 }
81
82 if (run_queue)
83 blk_mq_run_hw_queue(hctx, async);
84 }
85
86 static inline void
87 blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx,
88 struct list_head *list, bool run_queue_async)
89 {
90 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
91 struct elevator_queue *e = hctx->queue->elevator;
92
93 if (e && e->type->ops.mq.insert_requests)
94 e->type->ops.mq.insert_requests(hctx, list, false);
95 else
96 blk_mq_insert_requests(hctx, ctx, list);
97
98 blk_mq_run_hw_queue(hctx, run_queue_async);
99 }
100
101 static inline bool
102 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
103 struct bio *bio)
104 {
105 struct elevator_queue *e = q->elevator;
106
107 if (e && e->type->ops.mq.allow_merge)
108 return e->type->ops.mq.allow_merge(q, rq, bio);
109
110 return true;
111 }
112
113 static inline void
114 blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
115 {
116 struct elevator_queue *e = hctx->queue->elevator;
117
118 if (e && e->type->ops.mq.completed_request)
119 e->type->ops.mq.completed_request(hctx, rq);
120
121 BUG_ON(rq->internal_tag == -1);
122
123 blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag);
124
125 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
126 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
127 blk_mq_run_hw_queue(hctx, true);
128 }
129 }
130
131 static inline void blk_mq_sched_started_request(struct request *rq)
132 {
133 struct request_queue *q = rq->q;
134 struct elevator_queue *e = q->elevator;
135
136 if (e && e->type->ops.mq.started_request)
137 e->type->ops.mq.started_request(rq);
138 }
139
140 static inline void blk_mq_sched_requeue_request(struct request *rq)
141 {
142 struct request_queue *q = rq->q;
143 struct elevator_queue *e = q->elevator;
144
145 if (e && e->type->ops.mq.requeue_request)
146 e->type->ops.mq.requeue_request(rq);
147 }
148
149 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
150 {
151 struct elevator_queue *e = hctx->queue->elevator;
152
153 if (e && e->type->ops.mq.has_work)
154 return e->type->ops.mq.has_work(hctx);
155
156 return false;
157 }
158
159 static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx)
160 {
161 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
162 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
163 }
164
165 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
166 {
167 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
168 }
169
170 #endif