1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include "blk-mq-tag.h"
12 struct blk_mq_ctx __percpu
*queue_ctx
;
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
21 struct list_head rq_lists
[HCTX_MAX_TYPES
];
22 } ____cacheline_aligned_in_smp
;
25 unsigned short index_hw
[HCTX_MAX_TYPES
];
26 struct blk_mq_hw_ctx
*hctxs
[HCTX_MAX_TYPES
];
28 /* incremented at dispatch time */
29 unsigned long rq_dispatched
[2];
30 unsigned long rq_merged
;
32 /* incremented at completion time */
33 unsigned long ____cacheline_aligned_in_smp rq_completed
[2];
35 struct request_queue
*queue
;
36 struct blk_mq_ctxs
*ctxs
;
38 } ____cacheline_aligned_in_smp
;
40 void blk_mq_exit_queue(struct request_queue
*q
);
41 int blk_mq_update_nr_requests(struct request_queue
*q
, unsigned int nr
);
42 void blk_mq_wake_waiters(struct request_queue
*q
);
43 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx
*hctx
, struct list_head
*,
45 void blk_mq_add_to_requeue_list(struct request
*rq
, bool at_head
,
46 bool kick_requeue_list
);
47 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx
*hctx
, struct list_head
*list
);
48 struct request
*blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx
*hctx
,
49 struct blk_mq_ctx
*start
);
52 * Internal helpers for allocating/freeing the request map
54 void blk_mq_free_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
55 unsigned int hctx_idx
);
56 void blk_mq_free_rq_map(struct blk_mq_tags
*tags
, unsigned int flags
);
57 struct blk_mq_tags
*blk_mq_alloc_rq_map(struct blk_mq_tag_set
*set
,
58 unsigned int hctx_idx
,
60 unsigned int reserved_tags
,
62 int blk_mq_alloc_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
63 unsigned int hctx_idx
, unsigned int depth
);
66 * Internal helpers for request insertion into sw queues
68 void __blk_mq_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
70 void blk_mq_request_bypass_insert(struct request
*rq
, bool at_head
,
72 void blk_mq_insert_requests(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_ctx
*ctx
,
73 struct list_head
*list
);
75 /* Used by blk_insert_cloned_request() to issue request directly */
76 blk_status_t
blk_mq_request_issue_directly(struct request
*rq
, bool last
);
77 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx
*hctx
,
78 struct list_head
*list
);
81 * CPU -> queue mappings
83 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map
*qmap
, unsigned int);
86 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
88 * @type: the hctx type index
91 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue_type(struct request_queue
*q
,
95 return q
->queue_hw_ctx
[q
->tag_set
->map
[type
].mq_map
[cpu
]];
99 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
101 * @flags: request command flags
104 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*q
,
106 struct blk_mq_ctx
*ctx
)
108 enum hctx_type type
= HCTX_TYPE_DEFAULT
;
111 * The caller ensure that if REQ_HIPRI, poll must be enabled.
113 if (flags
& REQ_HIPRI
)
114 type
= HCTX_TYPE_POLL
;
115 else if ((flags
& REQ_OP_MASK
) == REQ_OP_READ
)
116 type
= HCTX_TYPE_READ
;
118 return ctx
->hctxs
[type
];
124 extern void blk_mq_sysfs_init(struct request_queue
*q
);
125 extern void blk_mq_sysfs_deinit(struct request_queue
*q
);
126 extern int __blk_mq_register_dev(struct device
*dev
, struct request_queue
*q
);
127 extern int blk_mq_sysfs_register(struct request_queue
*q
);
128 extern void blk_mq_sysfs_unregister(struct request_queue
*q
);
129 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
);
131 void blk_mq_release(struct request_queue
*q
);
133 static inline struct blk_mq_ctx
*__blk_mq_get_ctx(struct request_queue
*q
,
136 return per_cpu_ptr(q
->queue_ctx
, cpu
);
140 * This assumes per-cpu software queueing queues. They could be per-node
141 * as well, for instance. For now this is hardcoded as-is. Note that we don't
142 * care about preemption, since we know the ctx's are persistent. This does
143 * mean that we can't rely on ctx always matching the currently running CPU.
145 static inline struct blk_mq_ctx
*blk_mq_get_ctx(struct request_queue
*q
)
147 return __blk_mq_get_ctx(q
, raw_smp_processor_id());
150 struct blk_mq_alloc_data
{
151 /* input parameter */
152 struct request_queue
*q
;
153 blk_mq_req_flags_t flags
;
154 unsigned int shallow_depth
;
155 unsigned int cmd_flags
;
157 /* input & output parameter */
158 struct blk_mq_ctx
*ctx
;
159 struct blk_mq_hw_ctx
*hctx
;
162 static inline bool blk_mq_is_sbitmap_shared(unsigned int flags
)
164 return flags
& BLK_MQ_F_TAG_HCTX_SHARED
;
167 static inline struct blk_mq_tags
*blk_mq_tags_from_data(struct blk_mq_alloc_data
*data
)
169 if (data
->q
->elevator
)
170 return data
->hctx
->sched_tags
;
172 return data
->hctx
->tags
;
175 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx
*hctx
)
177 return test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
);
180 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx
*hctx
)
182 return hctx
->nr_ctx
&& hctx
->tags
;
185 unsigned int blk_mq_in_flight(struct request_queue
*q
, struct hd_struct
*part
);
186 void blk_mq_in_flight_rw(struct request_queue
*q
, struct hd_struct
*part
,
187 unsigned int inflight
[2]);
189 static inline void blk_mq_put_dispatch_budget(struct request_queue
*q
)
191 if (q
->mq_ops
->put_budget
)
192 q
->mq_ops
->put_budget(q
);
195 static inline bool blk_mq_get_dispatch_budget(struct request_queue
*q
)
197 if (q
->mq_ops
->get_budget
)
198 return q
->mq_ops
->get_budget(q
);
202 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx
*hctx
)
204 if (blk_mq_is_sbitmap_shared(hctx
->flags
))
205 atomic_inc(&hctx
->queue
->nr_active_requests_shared_sbitmap
);
207 atomic_inc(&hctx
->nr_active
);
210 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx
*hctx
)
212 if (blk_mq_is_sbitmap_shared(hctx
->flags
))
213 atomic_dec(&hctx
->queue
->nr_active_requests_shared_sbitmap
);
215 atomic_dec(&hctx
->nr_active
);
218 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx
*hctx
)
220 if (blk_mq_is_sbitmap_shared(hctx
->flags
))
221 return atomic_read(&hctx
->queue
->nr_active_requests_shared_sbitmap
);
222 return atomic_read(&hctx
->nr_active
);
224 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx
*hctx
,
227 blk_mq_put_tag(hctx
->tags
, rq
->mq_ctx
, rq
->tag
);
228 rq
->tag
= BLK_MQ_NO_TAG
;
230 if (rq
->rq_flags
& RQF_MQ_INFLIGHT
) {
231 rq
->rq_flags
&= ~RQF_MQ_INFLIGHT
;
232 __blk_mq_dec_active_requests(hctx
);
236 static inline void blk_mq_put_driver_tag(struct request
*rq
)
238 if (rq
->tag
== BLK_MQ_NO_TAG
|| rq
->internal_tag
== BLK_MQ_NO_TAG
)
241 __blk_mq_put_driver_tag(rq
->mq_hctx
, rq
);
244 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map
*qmap
)
248 for_each_possible_cpu(cpu
)
249 qmap
->mq_map
[cpu
] = 0;
253 * blk_mq_plug() - Get caller context plug
255 * @bio : the bio being submitted by the caller context
257 * Plugging, by design, may delay the insertion of BIOs into the elevator in
258 * order to increase BIO merging opportunities. This however can cause BIO
259 * insertion order to change from the order in which submit_bio() is being
260 * executed in the case of multiple contexts concurrently issuing BIOs to a
261 * device, even if these context are synchronized to tightly control BIO issuing
262 * order. While this is not a problem with regular block devices, this ordering
263 * change can cause write BIO failures with zoned block devices as these
264 * require sequential write patterns to zones. Prevent this from happening by
265 * ignoring the plug state of a BIO issuing context if the target request queue
266 * is for a zoned block device and the BIO to plug is a write operation.
268 * Return current->plug if the bio can be plugged and NULL otherwise
270 static inline struct blk_plug
*blk_mq_plug(struct request_queue
*q
,
274 * For regular block devices or read operations, use the context plug
275 * which may be NULL if blk_start_plug() was not executed.
277 if (!blk_queue_is_zoned(q
) || !op_is_write(bio_op(bio
)))
278 return current
->plug
;
280 /* Zoned block device write operation case: do not plug the BIO */
285 * For shared tag users, we track the number of currently active users
286 * and attempt to provide a fair share of the tag depth for each of them.
288 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
289 struct sbitmap_queue
*bt
)
291 unsigned int depth
, users
;
293 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
))
297 * Don't try dividing an ant
299 if (bt
->sb
.depth
== 1)
302 if (blk_mq_is_sbitmap_shared(hctx
->flags
)) {
303 struct request_queue
*q
= hctx
->queue
;
304 struct blk_mq_tag_set
*set
= q
->tag_set
;
306 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &q
->queue_flags
))
308 users
= atomic_read(&set
->active_queues_shared_sbitmap
);
310 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
312 users
= atomic_read(&hctx
->tags
->active_queues
);
319 * Allow at least some tags
321 depth
= max((bt
->sb
.depth
+ users
- 1) / users
, 4U);
322 return __blk_mq_active_requests(hctx
) < depth
;