]>
Commit | Line | Data |
---|---|---|
bd166ef1 JA |
1 | #ifndef BLK_MQ_SCHED_H |
2 | #define BLK_MQ_SCHED_H | |
3 | ||
4 | #include "blk-mq.h" | |
5 | #include "blk-mq-tag.h" | |
6 | ||
7 | int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size, | |
8 | int (*init)(struct blk_mq_hw_ctx *), | |
9 | void (*exit)(struct blk_mq_hw_ctx *)); | |
10 | ||
11 | void blk_mq_sched_free_hctx_data(struct request_queue *q, | |
12 | void (*exit)(struct blk_mq_hw_ctx *)); | |
13 | ||
14 | struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data); | |
15 | void blk_mq_sched_put_request(struct request *rq); | |
16 | ||
17 | void blk_mq_sched_request_inserted(struct request *rq); | |
bd166ef1 JA |
18 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio); |
19 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); | |
20 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); | |
50e1dab8 | 21 | void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); |
bd166ef1 | 22 | |
bd6737f1 JA |
23 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, |
24 | bool run_queue, bool async, bool can_block); | |
25 | void blk_mq_sched_insert_requests(struct request_queue *q, | |
26 | struct blk_mq_ctx *ctx, | |
27 | struct list_head *list, bool run_queue_async); | |
28 | ||
bd166ef1 JA |
29 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); |
30 | void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, | |
31 | struct list_head *rq_list, | |
32 | struct request *(*get_rq)(struct blk_mq_hw_ctx *)); | |
33 | ||
34 | int blk_mq_sched_setup(struct request_queue *q); | |
35 | void blk_mq_sched_teardown(struct request_queue *q); | |
36 | ||
d3484991 JA |
37 | int blk_mq_sched_init(struct request_queue *q); |
38 | ||
bd166ef1 JA |
39 | static inline bool |
40 | blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) | |
41 | { | |
42 | struct elevator_queue *e = q->elevator; | |
43 | ||
44 | if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio)) | |
45 | return false; | |
46 | ||
47 | return __blk_mq_sched_bio_merge(q, bio); | |
48 | } | |
49 | ||
50 | static inline int blk_mq_sched_get_rq_priv(struct request_queue *q, | |
51 | struct request *rq) | |
52 | { | |
53 | struct elevator_queue *e = q->elevator; | |
54 | ||
55 | if (e && e->type->ops.mq.get_rq_priv) | |
56 | return e->type->ops.mq.get_rq_priv(q, rq); | |
57 | ||
58 | return 0; | |
59 | } | |
60 | ||
61 | static inline void blk_mq_sched_put_rq_priv(struct request_queue *q, | |
62 | struct request *rq) | |
63 | { | |
64 | struct elevator_queue *e = q->elevator; | |
65 | ||
66 | if (e && e->type->ops.mq.put_rq_priv) | |
67 | e->type->ops.mq.put_rq_priv(q, rq); | |
68 | } | |
69 | ||
bd166ef1 JA |
70 | static inline bool |
71 | blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, | |
72 | struct bio *bio) | |
73 | { | |
74 | struct elevator_queue *e = q->elevator; | |
75 | ||
76 | if (e && e->type->ops.mq.allow_merge) | |
77 | return e->type->ops.mq.allow_merge(q, rq, bio); | |
78 | ||
79 | return true; | |
80 | } | |
81 | ||
82 | static inline void | |
83 | blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq) | |
84 | { | |
85 | struct elevator_queue *e = hctx->queue->elevator; | |
86 | ||
87 | if (e && e->type->ops.mq.completed_request) | |
88 | e->type->ops.mq.completed_request(hctx, rq); | |
89 | ||
90 | BUG_ON(rq->internal_tag == -1); | |
91 | ||
92 | blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag); | |
bd166ef1 JA |
93 | } |
94 | ||
95 | static inline void blk_mq_sched_started_request(struct request *rq) | |
96 | { | |
97 | struct request_queue *q = rq->q; | |
98 | struct elevator_queue *e = q->elevator; | |
99 | ||
100 | if (e && e->type->ops.mq.started_request) | |
101 | e->type->ops.mq.started_request(rq); | |
102 | } | |
103 | ||
104 | static inline void blk_mq_sched_requeue_request(struct request *rq) | |
105 | { | |
106 | struct request_queue *q = rq->q; | |
107 | struct elevator_queue *e = q->elevator; | |
108 | ||
109 | if (e && e->type->ops.mq.requeue_request) | |
110 | e->type->ops.mq.requeue_request(rq); | |
111 | } | |
112 | ||
113 | static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) | |
114 | { | |
115 | struct elevator_queue *e = hctx->queue->elevator; | |
116 | ||
117 | if (e && e->type->ops.mq.has_work) | |
118 | return e->type->ops.mq.has_work(hctx); | |
119 | ||
120 | return false; | |
121 | } | |
122 | ||
123 | static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx) | |
124 | { | |
50e1dab8 | 125 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { |
bd166ef1 | 126 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
50e1dab8 JA |
127 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { |
128 | struct request_queue *q = hctx->queue; | |
129 | ||
130 | if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) | |
131 | set_bit(QUEUE_FLAG_RESTART, &q->queue_flags); | |
132 | } | |
133 | } | |
bd166ef1 JA |
134 | } |
135 | ||
136 | static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) | |
137 | { | |
138 | return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | |
139 | } | |
140 | ||
141 | #endif |