]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
320ae51f JA |
2 | #ifndef BLK_MQ_H |
3 | #define BLK_MQ_H | |
4 | ||
5 | #include <linux/blkdev.h> | |
88459642 | 6 | #include <linux/sbitmap.h> |
6a83e74d | 7 | #include <linux/srcu.h> |
320ae51f JA |
8 | |
9 | struct blk_mq_tags; | |
f70ced09 | 10 | struct blk_flush_queue; |
320ae51f | 11 | |
fe644072 LW |
12 | /** |
13 | * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device | |
14 | */ | |
320ae51f JA |
15 | struct blk_mq_hw_ctx { |
16 | struct { | |
17 | spinlock_t lock; | |
18 | struct list_head dispatch; | |
8d354f13 | 19 | unsigned long state; /* BLK_MQ_S_* flags */ |
320ae51f JA |
20 | } ____cacheline_aligned_in_smp; |
21 | ||
9f993737 | 22 | struct delayed_work run_work; |
e4043dcf | 23 | cpumask_var_t cpumask; |
506e931f JA |
24 | int next_cpu; |
25 | int next_cpu_batch; | |
320ae51f JA |
26 | |
27 | unsigned long flags; /* BLK_MQ_F_* flags */ | |
28 | ||
bd166ef1 | 29 | void *sched_data; |
320ae51f | 30 | struct request_queue *queue; |
f70ced09 | 31 | struct blk_flush_queue *fq; |
320ae51f JA |
32 | |
33 | void *driver_data; | |
34 | ||
88459642 | 35 | struct sbitmap ctx_map; |
1429d7c9 | 36 | |
b347689f | 37 | struct blk_mq_ctx *dispatch_from; |
6e768717 | 38 | unsigned int dispatch_busy; |
b347689f | 39 | |
f31967f0 JA |
40 | unsigned short type; |
41 | unsigned short nr_ctx; | |
6e768717 | 42 | struct blk_mq_ctx **ctxs; |
4bb659b1 | 43 | |
5815839b | 44 | spinlock_t dispatch_wait_lock; |
eb619fdb | 45 | wait_queue_entry_t dispatch_wait; |
8537b120 | 46 | atomic_t wait_index; |
320ae51f | 47 | |
320ae51f | 48 | struct blk_mq_tags *tags; |
bd166ef1 | 49 | struct blk_mq_tags *sched_tags; |
320ae51f JA |
50 | |
51 | unsigned long queued; | |
52 | unsigned long run; | |
8d354f13 | 53 | #define BLK_MQ_MAX_DISPATCH_ORDER 7 |
320ae51f JA |
54 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
55 | ||
320ae51f | 56 | unsigned int numa_node; |
17ded320 | 57 | unsigned int queue_num; |
320ae51f | 58 | |
0d2602ca | 59 | atomic_t nr_active; |
1d9bd516 | 60 | unsigned int nr_expired; |
0d2602ca | 61 | |
9467f859 | 62 | struct hlist_node cpuhp_dead; |
320ae51f | 63 | struct kobject kobj; |
05229bee | 64 | |
6e219353 | 65 | unsigned long poll_considered; |
05229bee JA |
66 | unsigned long poll_invoked; |
67 | unsigned long poll_success; | |
9c1051aa OS |
68 | |
69 | #ifdef CONFIG_BLK_DEBUG_FS | |
70 | struct dentry *debugfs_dir; | |
d332ce09 | 71 | struct dentry *sched_debugfs_dir; |
9c1051aa | 72 | #endif |
07319678 BVA |
73 | |
74 | /* Must be the last member - see also blk_mq_hw_ctx_size(). */ | |
05707b64 | 75 | struct srcu_struct srcu[0]; |
320ae51f JA |
76 | }; |
77 | ||
ed76e329 JA |
78 | struct blk_mq_queue_map { |
79 | unsigned int *mq_map; | |
80 | unsigned int nr_queues; | |
81 | }; | |
82 | ||
83 | enum { | |
84 | HCTX_MAX_TYPES = 1, | |
85 | }; | |
86 | ||
24d2f903 | 87 | struct blk_mq_tag_set { |
ed76e329 | 88 | struct blk_mq_queue_map map[HCTX_MAX_TYPES]; |
f8a5b122 | 89 | const struct blk_mq_ops *ops; |
ed76e329 | 90 | unsigned int nr_hw_queues; /* nr hw queues across maps */ |
e3a2b3f9 | 91 | unsigned int queue_depth; /* max hw supported */ |
320ae51f JA |
92 | unsigned int reserved_tags; |
93 | unsigned int cmd_size; /* per-request extra data */ | |
94 | int numa_node; | |
95 | unsigned int timeout; | |
96 | unsigned int flags; /* BLK_MQ_F_* */ | |
24d2f903 CH |
97 | void *driver_data; |
98 | ||
99 | struct blk_mq_tags **tags; | |
0d2602ca JA |
100 | |
101 | struct mutex tag_list_lock; | |
102 | struct list_head tag_list; | |
320ae51f JA |
103 | }; |
104 | ||
74c45052 JA |
105 | struct blk_mq_queue_data { |
106 | struct request *rq; | |
74c45052 JA |
107 | bool last; |
108 | }; | |
109 | ||
fc17b653 CH |
110 | typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, |
111 | const struct blk_mq_queue_data *); | |
88022d72 | 112 | typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); |
de148297 | 113 | typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); |
0152fb6b | 114 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
320ae51f JA |
115 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
116 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); | |
d6296d39 | 117 | typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, |
24d2f903 | 118 | unsigned int, unsigned int); |
d6296d39 | 119 | typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, |
24d2f903 | 120 | unsigned int); |
320ae51f | 121 | |
81481eb4 CH |
122 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
123 | bool); | |
f26cdc85 | 124 | typedef void (busy_tag_iter_fn)(struct request *, void *, bool); |
05229bee | 125 | typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); |
da695ba2 | 126 | typedef int (map_queues_fn)(struct blk_mq_tag_set *set); |
9ba20527 | 127 | typedef bool (busy_fn)(struct request_queue *); |
c7bb9ad1 | 128 | typedef void (complete_fn)(struct request *); |
05229bee | 129 | |
81481eb4 | 130 | |
320ae51f JA |
131 | struct blk_mq_ops { |
132 | /* | |
133 | * Queue request | |
134 | */ | |
135 | queue_rq_fn *queue_rq; | |
136 | ||
de148297 ML |
137 | /* |
138 | * Reserve budget before queue request, once .queue_rq is | |
139 | * run, it is driver's responsibility to release the | |
140 | * reserved budget. Also we have to handle failure case | |
141 | * of .get_budget for avoiding I/O deadlock. | |
142 | */ | |
143 | get_budget_fn *get_budget; | |
144 | put_budget_fn *put_budget; | |
145 | ||
320ae51f JA |
146 | /* |
147 | * Called on request timeout | |
148 | */ | |
0152fb6b | 149 | timeout_fn *timeout; |
320ae51f | 150 | |
05229bee JA |
151 | /* |
152 | * Called to poll for completion of a specific tag. | |
153 | */ | |
154 | poll_fn *poll; | |
155 | ||
c7bb9ad1 | 156 | complete_fn *complete; |
30a91cb4 | 157 | |
320ae51f JA |
158 | /* |
159 | * Called when the block layer side of a hardware queue has been | |
160 | * set up, allowing the driver to allocate/init matching structures. | |
161 | * Ditto for exit/teardown. | |
162 | */ | |
163 | init_hctx_fn *init_hctx; | |
164 | exit_hctx_fn *exit_hctx; | |
e9b267d9 CH |
165 | |
166 | /* | |
167 | * Called for every command allocated by the block layer to allow | |
168 | * the driver to set up driver specific data. | |
f70ced09 ML |
169 | * |
170 | * Tag greater than or equal to queue_depth is for setting up | |
171 | * flush request. | |
172 | * | |
e9b267d9 CH |
173 | * Ditto for exit/teardown. |
174 | */ | |
175 | init_request_fn *init_request; | |
176 | exit_request_fn *exit_request; | |
d280bab3 BVA |
177 | /* Called from inside blk_get_request() */ |
178 | void (*initialize_rq_fn)(struct request *rq); | |
da695ba2 | 179 | |
9ba20527 JA |
180 | /* |
181 | * If set, returns whether or not this queue currently is busy | |
182 | */ | |
183 | busy_fn *busy; | |
184 | ||
da695ba2 | 185 | map_queues_fn *map_queues; |
2836ee4b BVA |
186 | |
187 | #ifdef CONFIG_BLK_DEBUG_FS | |
188 | /* | |
189 | * Used by the debugfs implementation to show driver-specific | |
190 | * information about a request. | |
191 | */ | |
192 | void (*show_rq)(struct seq_file *m, struct request *rq); | |
193 | #endif | |
320ae51f JA |
194 | }; |
195 | ||
196 | enum { | |
320ae51f | 197 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
8a58d1f1 JA |
198 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
199 | BLK_MQ_F_SG_MERGE = 1 << 2, | |
1b792f2f | 200 | BLK_MQ_F_BLOCKING = 1 << 5, |
d3484991 | 201 | BLK_MQ_F_NO_SCHED = 1 << 6, |
24391c0d SL |
202 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
203 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | |
320ae51f | 204 | |
5d12f905 | 205 | BLK_MQ_S_STOPPED = 0, |
0d2602ca | 206 | BLK_MQ_S_TAG_ACTIVE = 1, |
bd166ef1 | 207 | BLK_MQ_S_SCHED_RESTART = 2, |
320ae51f | 208 | |
a4391c64 | 209 | BLK_MQ_MAX_DEPTH = 10240, |
506e931f JA |
210 | |
211 | BLK_MQ_CPU_WORK_BATCH = 8, | |
320ae51f | 212 | }; |
24391c0d SL |
213 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
214 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ | |
215 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) | |
216 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ | |
217 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ | |
218 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) | |
320ae51f | 219 | |
24d2f903 | 220 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
b62c21b7 MS |
221 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
222 | struct request_queue *q); | |
9316a9ed JA |
223 | struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, |
224 | const struct blk_mq_ops *ops, | |
225 | unsigned int queue_depth, | |
226 | unsigned int set_flags); | |
b21d5b30 MB |
227 | int blk_mq_register_dev(struct device *, struct request_queue *); |
228 | void blk_mq_unregister_dev(struct device *, struct request_queue *); | |
320ae51f | 229 | |
24d2f903 CH |
230 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
231 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); | |
232 | ||
320ae51f JA |
233 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
234 | ||
320ae51f JA |
235 | void blk_mq_free_request(struct request *rq); |
236 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | |
6f3b0e8b CH |
237 | |
238 | enum { | |
9a95e4ef BVA |
239 | /* return when out of requests */ |
240 | BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), | |
241 | /* allocate from reserved pool */ | |
242 | BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), | |
243 | /* allocate internal/sched tag */ | |
244 | BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), | |
245 | /* set RQF_PREEMPT */ | |
246 | BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), | |
6f3b0e8b CH |
247 | }; |
248 | ||
cd6ce148 | 249 | struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, |
9a95e4ef | 250 | blk_mq_req_flags_t flags); |
cd6ce148 | 251 | struct request *blk_mq_alloc_request_hctx(struct request_queue *q, |
9a95e4ef BVA |
252 | unsigned int op, blk_mq_req_flags_t flags, |
253 | unsigned int hctx_idx); | |
0e62f51f | 254 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
320ae51f | 255 | |
205fb5f5 BVA |
256 | enum { |
257 | BLK_MQ_UNIQUE_TAG_BITS = 16, | |
258 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, | |
259 | }; | |
260 | ||
261 | u32 blk_mq_unique_tag(struct request *rq); | |
262 | ||
263 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) | |
264 | { | |
265 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; | |
266 | } | |
267 | ||
268 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |
269 | { | |
270 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; | |
271 | } | |
272 | ||
320ae51f | 273 | |
973c0191 | 274 | int blk_mq_request_started(struct request *rq); |
e2490073 | 275 | void blk_mq_start_request(struct request *rq); |
2a842aca CH |
276 | void blk_mq_end_request(struct request *rq, blk_status_t error); |
277 | void __blk_mq_end_request(struct request *rq, blk_status_t error); | |
320ae51f | 278 | |
2b053aca BVA |
279 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
280 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, | |
281 | bool kick_requeue_list); | |
6fca6a61 | 282 | void blk_mq_kick_requeue_list(struct request_queue *q); |
2849450a | 283 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
08e0029a | 284 | void blk_mq_complete_request(struct request *rq); |
9c558734 JA |
285 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
286 | struct bio *bio); | |
fd001443 | 287 | bool blk_mq_queue_stopped(struct request_queue *q); |
320ae51f JA |
288 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
289 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | |
280d45f6 | 290 | void blk_mq_stop_hw_queues(struct request_queue *q); |
2f268556 | 291 | void blk_mq_start_hw_queues(struct request_queue *q); |
ae911c5e | 292 | void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
1b4a3258 | 293 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
97e01209 | 294 | void blk_mq_quiesce_queue(struct request_queue *q); |
e4e73913 | 295 | void blk_mq_unquiesce_queue(struct request_queue *q); |
7587a5ae | 296 | void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
79f720a7 | 297 | bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
b94ec296 | 298 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
e0489487 SG |
299 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
300 | busy_tag_iter_fn *fn, void *priv); | |
c761d96b | 301 | void blk_mq_freeze_queue(struct request_queue *q); |
b4c6a028 | 302 | void blk_mq_unfreeze_queue(struct request_queue *q); |
1671d522 | 303 | void blk_freeze_queue_start(struct request_queue *q); |
6bae363e | 304 | void blk_mq_freeze_queue_wait(struct request_queue *q); |
f91328c4 KB |
305 | int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, |
306 | unsigned long timeout); | |
320ae51f | 307 | |
ed76e329 | 308 | int blk_mq_map_queues(struct blk_mq_queue_map *qmap); |
868f2f0b KB |
309 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
310 | ||
852ec809 | 311 | void blk_mq_quiesce_queue_nowait(struct request_queue *q); |
4f084b41 | 312 | |
9cf2bab6 JA |
313 | unsigned int blk_mq_rq_cpu(struct request *rq); |
314 | ||
0fc09f92 KB |
315 | /** |
316 | * blk_mq_mark_complete() - Set request state to complete | |
317 | * @rq: request to set to complete state | |
318 | * | |
319 | * Returns true if request state was successfully set to complete. If | |
320 | * successful, the caller is responsibile for seeing this request is ended, as | |
321 | * blk_mq_complete_request will not work again. | |
322 | */ | |
323 | static inline bool blk_mq_mark_complete(struct request *rq) | |
324 | { | |
325 | return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) == | |
326 | MQ_RQ_IN_FLIGHT; | |
327 | } | |
328 | ||
320ae51f JA |
329 | /* |
330 | * Driver command data is immediately after the request. So subtract request | |
2963e3f7 | 331 | * size to get back to the original request, add request size to get the PDU. |
320ae51f JA |
332 | */ |
333 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) | |
334 | { | |
335 | return pdu - sizeof(struct request); | |
336 | } | |
337 | static inline void *blk_mq_rq_to_pdu(struct request *rq) | |
338 | { | |
2963e3f7 | 339 | return rq + 1; |
320ae51f JA |
340 | } |
341 | ||
320ae51f | 342 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
0d0b7d42 JA |
343 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
344 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) | |
320ae51f | 345 | |
320ae51f | 346 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
0d0b7d42 JA |
347 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
348 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) | |
320ae51f | 349 | |
320ae51f | 350 | #endif |