]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-mq-sched.h
dm rq: Avoid that request processing stalls sporadically
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq-sched.h
CommitLineData
bd166ef1
JA
1#ifndef BLK_MQ_SCHED_H
2#define BLK_MQ_SCHED_H
3
4#include "blk-mq.h"
5#include "blk-mq-tag.h"
6
7int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
8 int (*init)(struct blk_mq_hw_ctx *),
9 void (*exit)(struct blk_mq_hw_ctx *));
10
11void blk_mq_sched_free_hctx_data(struct request_queue *q,
12 void (*exit)(struct blk_mq_hw_ctx *));
13
14struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
15void blk_mq_sched_put_request(struct request *rq);
16
17void blk_mq_sched_request_inserted(struct request *rq);
e4d750c9
JA
18bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
19 struct request **merged_request);
bd166ef1
JA
20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
50e1dab8 22void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
bd166ef1 23
bd6737f1
JA
24void blk_mq_sched_insert_request(struct request *rq, bool at_head,
25 bool run_queue, bool async, bool can_block);
26void blk_mq_sched_insert_requests(struct request_queue *q,
27 struct blk_mq_ctx *ctx,
28 struct list_head *list, bool run_queue_async);
29
bd166ef1
JA
30void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
31void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
32 struct list_head *rq_list,
33 struct request *(*get_rq)(struct blk_mq_hw_ctx *));
34
6917ff0b 35int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
54d5329d 36void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
bd166ef1 37
93252632
OS
38int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
39 unsigned int hctx_idx);
40void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
41 unsigned int hctx_idx);
42
d3484991
JA
43int blk_mq_sched_init(struct request_queue *q);
44
bd166ef1
JA
45static inline bool
46blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
47{
48 struct elevator_queue *e = q->elevator;
49
50 if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
51 return false;
52
53 return __blk_mq_sched_bio_merge(q, bio);
54}
55
56static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
f1ba8261
PV
57 struct request *rq,
58 struct bio *bio)
bd166ef1
JA
59{
60 struct elevator_queue *e = q->elevator;
61
62 if (e && e->type->ops.mq.get_rq_priv)
f1ba8261 63 return e->type->ops.mq.get_rq_priv(q, rq, bio);
bd166ef1
JA
64
65 return 0;
66}
67
68static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
69 struct request *rq)
70{
71 struct elevator_queue *e = q->elevator;
72
73 if (e && e->type->ops.mq.put_rq_priv)
74 e->type->ops.mq.put_rq_priv(q, rq);
75}
76
bd166ef1
JA
77static inline bool
78blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
79 struct bio *bio)
80{
81 struct elevator_queue *e = q->elevator;
82
83 if (e && e->type->ops.mq.allow_merge)
84 return e->type->ops.mq.allow_merge(q, rq, bio);
85
86 return true;
87}
88
89static inline void
90blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
91{
92 struct elevator_queue *e = hctx->queue->elevator;
93
94 if (e && e->type->ops.mq.completed_request)
95 e->type->ops.mq.completed_request(hctx, rq);
96
97 BUG_ON(rq->internal_tag == -1);
98
99 blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag);
bd166ef1
JA
100}
101
102static inline void blk_mq_sched_started_request(struct request *rq)
103{
104 struct request_queue *q = rq->q;
105 struct elevator_queue *e = q->elevator;
106
107 if (e && e->type->ops.mq.started_request)
108 e->type->ops.mq.started_request(rq);
109}
110
111static inline void blk_mq_sched_requeue_request(struct request *rq)
112{
113 struct request_queue *q = rq->q;
114 struct elevator_queue *e = q->elevator;
115
116 if (e && e->type->ops.mq.requeue_request)
117 e->type->ops.mq.requeue_request(rq);
118}
119
120static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
121{
122 struct elevator_queue *e = hctx->queue->elevator;
123
124 if (e && e->type->ops.mq.has_work)
125 return e->type->ops.mq.has_work(hctx);
126
127 return false;
128}
129
d38d3515
OS
130/*
131 * Mark a hardware queue as needing a restart.
132 */
133static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
bd166ef1 134{
d38d3515 135 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
bd166ef1 136 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
d38d3515
OS
137}
138
139/*
140 * Mark a hardware queue and the request queue it belongs to as needing a
141 * restart.
142 */
143static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
144{
145 struct request_queue *q = hctx->queue;
50e1dab8 146
d38d3515
OS
147 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
148 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
149 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
150 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
bd166ef1
JA
151}
152
153static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
154{
155 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
156}
157
158#endif