]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
320ae51f JA |
2 | #ifndef INT_BLK_MQ_H |
3 | #define INT_BLK_MQ_H | |
4 | ||
cf43e6be | 5 | #include "blk-stat.h" |
244c65a3 | 6 | #include "blk-mq-tag.h" |
cf43e6be | 7 | |
24d2f903 CH |
8 | struct blk_mq_tag_set; |
9 | ||
fe644072 LW |
10 | /** |
11 | * struct blk_mq_ctx - State for a software queue facing the submitting CPUs | |
12 | */ | |
320ae51f JA |
13 | struct blk_mq_ctx { |
14 | struct { | |
15 | spinlock_t lock; | |
16 | struct list_head rq_list; | |
17 | } ____cacheline_aligned_in_smp; | |
18 | ||
19 | unsigned int cpu; | |
20 | unsigned int index_hw; | |
320ae51f JA |
21 | |
22 | /* incremented at dispatch time */ | |
23 | unsigned long rq_dispatched[2]; | |
24 | unsigned long rq_merged; | |
25 | ||
26 | /* incremented at completion time */ | |
27 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; | |
28 | ||
29 | struct request_queue *queue; | |
30 | struct kobject kobj; | |
4bb659b1 | 31 | } ____cacheline_aligned_in_smp; |
320ae51f | 32 | |
1d9bd516 TH |
33 | /* |
34 | * Bits for request->gstate. The lower two bits carry MQ_RQ_* state value | |
35 | * and the upper bits the generation number. | |
36 | */ | |
37 | enum mq_rq_state { | |
38 | MQ_RQ_IDLE = 0, | |
39 | MQ_RQ_IN_FLIGHT = 1, | |
5a61c363 | 40 | MQ_RQ_COMPLETE = 2, |
1d9bd516 TH |
41 | |
42 | MQ_RQ_STATE_BITS = 2, | |
43 | MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1, | |
44 | MQ_RQ_GEN_INC = 1 << MQ_RQ_STATE_BITS, | |
45 | }; | |
46 | ||
780db207 | 47 | void blk_mq_freeze_queue(struct request_queue *q); |
3edcc0ce | 48 | void blk_mq_free_queue(struct request_queue *q); |
e3a2b3f9 | 49 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
aed3ea94 | 50 | void blk_mq_wake_waiters(struct request_queue *q); |
de148297 | 51 | bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); |
2c3ad667 | 52 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); |
bd6737f1 JA |
53 | bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, |
54 | bool wait); | |
b347689f ML |
55 | struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, |
56 | struct blk_mq_ctx *start); | |
2c3ad667 JA |
57 | |
58 | /* | |
59 | * Internal helpers for allocating/freeing the request map | |
60 | */ | |
cc71a6f4 JA |
61 | void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
62 | unsigned int hctx_idx); | |
63 | void blk_mq_free_rq_map(struct blk_mq_tags *tags); | |
64 | struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, | |
65 | unsigned int hctx_idx, | |
66 | unsigned int nr_tags, | |
67 | unsigned int reserved_tags); | |
68 | int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, | |
69 | unsigned int hctx_idx, unsigned int depth); | |
2c3ad667 JA |
70 | |
71 | /* | |
72 | * Internal helpers for request insertion into sw queues | |
73 | */ | |
74 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
75 | bool at_head); | |
b0850297 | 76 | void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); |
bd166ef1 JA |
77 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
78 | struct list_head *list); | |
320ae51f | 79 | |
396eaf21 | 80 | /* Used by blk_insert_cloned_request() to issue request directly */ |
c77ff7fd | 81 | blk_status_t blk_mq_request_issue_directly(struct request *rq); |
396eaf21 | 82 | |
320ae51f JA |
83 | /* |
84 | * CPU -> queue mappings | |
85 | */ | |
f14bbe77 | 86 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
320ae51f | 87 | |
7d7e0f90 CH |
88 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, |
89 | int cpu) | |
90 | { | |
91 | return q->queue_hw_ctx[q->mq_map[cpu]]; | |
92 | } | |
93 | ||
67aec14c JA |
94 | /* |
95 | * sysfs helpers | |
96 | */ | |
737f98cf | 97 | extern void blk_mq_sysfs_init(struct request_queue *q); |
7ea5fe31 | 98 | extern void blk_mq_sysfs_deinit(struct request_queue *q); |
2d0364c8 | 99 | extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); |
67aec14c JA |
100 | extern int blk_mq_sysfs_register(struct request_queue *q); |
101 | extern void blk_mq_sysfs_unregister(struct request_queue *q); | |
868f2f0b | 102 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
67aec14c | 103 | |
e09aae7e ML |
104 | void blk_mq_release(struct request_queue *q); |
105 | ||
1d9bd516 TH |
106 | /** |
107 | * blk_mq_rq_state() - read the current MQ_RQ_* state of a request | |
108 | * @rq: target request. | |
109 | */ | |
110 | static inline int blk_mq_rq_state(struct request *rq) | |
111 | { | |
112 | return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK; | |
113 | } | |
114 | ||
115 | /** | |
116 | * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request | |
117 | * @rq: target request. | |
118 | * @state: new state to set. | |
119 | * | |
120 | * Set @rq's state to @state. The caller is responsible for ensuring that | |
121 | * there are no other updaters. A request can transition into IN_FLIGHT | |
122 | * only from IDLE and doing so increments the generation number. | |
123 | */ | |
124 | static inline void blk_mq_rq_update_state(struct request *rq, | |
125 | enum mq_rq_state state) | |
126 | { | |
127 | u64 old_val = READ_ONCE(rq->gstate); | |
128 | u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state; | |
129 | ||
130 | if (state == MQ_RQ_IN_FLIGHT) { | |
131 | WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE); | |
132 | new_val += MQ_RQ_GEN_INC; | |
133 | } | |
134 | ||
135 | /* avoid exposing interim values */ | |
136 | WRITE_ONCE(rq->gstate, new_val); | |
137 | } | |
138 | ||
1aecfe48 ML |
139 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
140 | unsigned int cpu) | |
141 | { | |
142 | return per_cpu_ptr(q->queue_ctx, cpu); | |
143 | } | |
144 | ||
145 | /* | |
146 | * This assumes per-cpu software queueing queues. They could be per-node | |
147 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | |
148 | * care about preemption, since we know the ctx's are persistent. This does | |
149 | * mean that we can't rely on ctx always matching the currently running CPU. | |
150 | */ | |
151 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | |
152 | { | |
153 | return __blk_mq_get_ctx(q, get_cpu()); | |
154 | } | |
155 | ||
156 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | |
157 | { | |
158 | put_cpu(); | |
159 | } | |
160 | ||
cb96a42c ML |
161 | struct blk_mq_alloc_data { |
162 | /* input parameter */ | |
163 | struct request_queue *q; | |
9a95e4ef | 164 | blk_mq_req_flags_t flags; |
229a9287 | 165 | unsigned int shallow_depth; |
cb96a42c ML |
166 | |
167 | /* input & output parameter */ | |
168 | struct blk_mq_ctx *ctx; | |
169 | struct blk_mq_hw_ctx *hctx; | |
170 | }; | |
171 | ||
4941115b JA |
172 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) |
173 | { | |
bd166ef1 JA |
174 | if (data->flags & BLK_MQ_REQ_INTERNAL) |
175 | return data->hctx->sched_tags; | |
176 | ||
4941115b JA |
177 | return data->hctx->tags; |
178 | } | |
179 | ||
5d1b25c1 BVA |
180 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) |
181 | { | |
182 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); | |
183 | } | |
184 | ||
19c66e59 ML |
185 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
186 | { | |
187 | return hctx->nr_ctx && hctx->tags; | |
188 | } | |
189 | ||
f299b7c7 JA |
190 | void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, |
191 | unsigned int inflight[2]); | |
192 | ||
de148297 ML |
193 | static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) |
194 | { | |
195 | struct request_queue *q = hctx->queue; | |
196 | ||
197 | if (q->mq_ops->put_budget) | |
198 | q->mq_ops->put_budget(hctx); | |
199 | } | |
200 | ||
88022d72 | 201 | static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx) |
de148297 ML |
202 | { |
203 | struct request_queue *q = hctx->queue; | |
204 | ||
205 | if (q->mq_ops->get_budget) | |
206 | return q->mq_ops->get_budget(hctx); | |
88022d72 | 207 | return true; |
de148297 ML |
208 | } |
209 | ||
244c65a3 ML |
210 | static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, |
211 | struct request *rq) | |
212 | { | |
213 | blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); | |
214 | rq->tag = -1; | |
215 | ||
216 | if (rq->rq_flags & RQF_MQ_INFLIGHT) { | |
217 | rq->rq_flags &= ~RQF_MQ_INFLIGHT; | |
218 | atomic_dec(&hctx->nr_active); | |
219 | } | |
220 | } | |
221 | ||
222 | static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, | |
223 | struct request *rq) | |
224 | { | |
225 | if (rq->tag == -1 || rq->internal_tag == -1) | |
226 | return; | |
227 | ||
228 | __blk_mq_put_driver_tag(hctx, rq); | |
229 | } | |
230 | ||
231 | static inline void blk_mq_put_driver_tag(struct request *rq) | |
232 | { | |
233 | struct blk_mq_hw_ctx *hctx; | |
234 | ||
235 | if (rq->tag == -1 || rq->internal_tag == -1) | |
236 | return; | |
237 | ||
238 | hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); | |
239 | __blk_mq_put_driver_tag(hctx, rq); | |
240 | } | |
241 | ||
320ae51f | 242 | #endif |