4 #include <linux/blkdev.h>
5 #include <linux/sbitmap.h>
6 #include <linux/srcu.h>
9 struct blk_flush_queue
;
11 struct blk_mq_hw_ctx
{
14 struct list_head dispatch
;
15 unsigned long state
; /* BLK_MQ_S_* flags */
16 } ____cacheline_aligned_in_smp
;
18 struct work_struct run_work
;
19 cpumask_var_t cpumask
;
23 unsigned long flags
; /* BLK_MQ_F_* flags */
26 struct request_queue
*queue
;
27 struct blk_flush_queue
*fq
;
31 struct sbitmap ctx_map
;
33 struct blk_mq_ctx
**ctxs
;
36 wait_queue_t dispatch_wait
;
39 struct blk_mq_tags
*tags
;
40 struct blk_mq_tags
*sched_tags
;
42 struct srcu_struct queue_rq_srcu
;
46 #define BLK_MQ_MAX_DISPATCH_ORDER 7
47 unsigned long dispatched
[BLK_MQ_MAX_DISPATCH_ORDER
];
49 unsigned int numa_node
;
50 unsigned int queue_num
;
54 struct delayed_work delay_work
;
56 struct hlist_node cpuhp_dead
;
59 unsigned long poll_considered
;
60 unsigned long poll_invoked
;
61 unsigned long poll_success
;
64 struct blk_mq_tag_set
{
66 const struct blk_mq_ops
*ops
;
67 unsigned int nr_hw_queues
;
68 unsigned int queue_depth
; /* max hw supported */
69 unsigned int reserved_tags
;
70 unsigned int cmd_size
; /* per-request extra data */
73 unsigned int flags
; /* BLK_MQ_F_* */
76 struct blk_mq_tags
**tags
;
78 struct mutex tag_list_lock
;
79 struct list_head tag_list
;
82 struct blk_mq_queue_data
{
84 struct list_head
*list
;
88 typedef int (queue_rq_fn
)(struct blk_mq_hw_ctx
*, const struct blk_mq_queue_data
*);
89 typedef enum blk_eh_timer_return (timeout_fn
)(struct request
*, bool);
90 typedef int (init_hctx_fn
)(struct blk_mq_hw_ctx
*, void *, unsigned int);
91 typedef void (exit_hctx_fn
)(struct blk_mq_hw_ctx
*, unsigned int);
92 typedef int (init_request_fn
)(void *, struct request
*, unsigned int,
93 unsigned int, unsigned int);
94 typedef void (exit_request_fn
)(void *, struct request
*, unsigned int,
96 typedef int (reinit_request_fn
)(void *, struct request
*);
98 typedef void (busy_iter_fn
)(struct blk_mq_hw_ctx
*, struct request
*, void *,
100 typedef void (busy_tag_iter_fn
)(struct request
*, void *, bool);
101 typedef int (poll_fn
)(struct blk_mq_hw_ctx
*, unsigned int);
102 typedef int (map_queues_fn
)(struct blk_mq_tag_set
*set
);
109 queue_rq_fn
*queue_rq
;
112 * Called on request timeout
117 * Called to poll for completion of a specific tag.
121 softirq_done_fn
*complete
;
124 * Called when the block layer side of a hardware queue has been
125 * set up, allowing the driver to allocate/init matching structures.
126 * Ditto for exit/teardown.
128 init_hctx_fn
*init_hctx
;
129 exit_hctx_fn
*exit_hctx
;
132 * Called for every command allocated by the block layer to allow
133 * the driver to set up driver specific data.
135 * Tag greater than or equal to queue_depth is for setting up
138 * Ditto for exit/teardown.
140 init_request_fn
*init_request
;
141 exit_request_fn
*exit_request
;
142 reinit_request_fn
*reinit_request
;
144 map_queues_fn
*map_queues
;
148 BLK_MQ_RQ_QUEUE_OK
= 0, /* queued fine */
149 BLK_MQ_RQ_QUEUE_BUSY
= 1, /* requeue IO for later */
150 BLK_MQ_RQ_QUEUE_ERROR
= 2, /* end IO with error */
152 BLK_MQ_F_SHOULD_MERGE
= 1 << 0,
153 BLK_MQ_F_TAG_SHARED
= 1 << 1,
154 BLK_MQ_F_SG_MERGE
= 1 << 2,
155 BLK_MQ_F_DEFER_ISSUE
= 1 << 4,
156 BLK_MQ_F_BLOCKING
= 1 << 5,
157 BLK_MQ_F_NO_SCHED
= 1 << 6,
158 BLK_MQ_F_ALLOC_POLICY_START_BIT
= 8,
159 BLK_MQ_F_ALLOC_POLICY_BITS
= 1,
161 BLK_MQ_S_STOPPED
= 0,
162 BLK_MQ_S_TAG_ACTIVE
= 1,
163 BLK_MQ_S_SCHED_RESTART
= 2,
164 BLK_MQ_S_TAG_WAITING
= 3,
166 BLK_MQ_MAX_DEPTH
= 10240,
168 BLK_MQ_CPU_WORK_BATCH
= 8,
170 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
171 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
172 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
173 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
174 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
175 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
177 struct request_queue
*blk_mq_init_queue(struct blk_mq_tag_set
*);
178 struct request_queue
*blk_mq_init_allocated_queue(struct blk_mq_tag_set
*set
,
179 struct request_queue
*q
);
180 int blk_mq_register_dev(struct device
*, struct request_queue
*);
181 void blk_mq_unregister_dev(struct device
*, struct request_queue
*);
183 int blk_mq_alloc_tag_set(struct blk_mq_tag_set
*set
);
184 void blk_mq_free_tag_set(struct blk_mq_tag_set
*set
);
186 void blk_mq_flush_plug_list(struct blk_plug
*plug
, bool from_schedule
);
188 void blk_mq_free_request(struct request
*rq
);
189 bool blk_mq_can_queue(struct blk_mq_hw_ctx
*);
192 BLK_MQ_REQ_NOWAIT
= (1 << 0), /* return when out of requests */
193 BLK_MQ_REQ_RESERVED
= (1 << 1), /* allocate from reserved pool */
194 BLK_MQ_REQ_INTERNAL
= (1 << 2), /* allocate internal/sched tag */
197 struct request
*blk_mq_alloc_request(struct request_queue
*q
, int rw
,
199 struct request
*blk_mq_alloc_request_hctx(struct request_queue
*q
, int op
,
200 unsigned int flags
, unsigned int hctx_idx
);
201 struct request
*blk_mq_tag_to_rq(struct blk_mq_tags
*tags
, unsigned int tag
);
204 BLK_MQ_UNIQUE_TAG_BITS
= 16,
205 BLK_MQ_UNIQUE_TAG_MASK
= (1 << BLK_MQ_UNIQUE_TAG_BITS
) - 1,
208 u32
blk_mq_unique_tag(struct request
*rq
);
210 static inline u16
blk_mq_unique_tag_to_hwq(u32 unique_tag
)
212 return unique_tag
>> BLK_MQ_UNIQUE_TAG_BITS
;
215 static inline u16
blk_mq_unique_tag_to_tag(u32 unique_tag
)
217 return unique_tag
& BLK_MQ_UNIQUE_TAG_MASK
;
221 int blk_mq_request_started(struct request
*rq
);
222 void blk_mq_start_request(struct request
*rq
);
223 void blk_mq_end_request(struct request
*rq
, int error
);
224 void __blk_mq_end_request(struct request
*rq
, int error
);
226 void blk_mq_requeue_request(struct request
*rq
, bool kick_requeue_list
);
227 void blk_mq_add_to_requeue_list(struct request
*rq
, bool at_head
,
228 bool kick_requeue_list
);
229 void blk_mq_kick_requeue_list(struct request_queue
*q
);
230 void blk_mq_delay_kick_requeue_list(struct request_queue
*q
, unsigned long msecs
);
231 void blk_mq_abort_requeue_list(struct request_queue
*q
);
232 void blk_mq_complete_request(struct request
*rq
, int error
);
234 bool blk_mq_queue_stopped(struct request_queue
*q
);
235 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx
*hctx
);
236 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx
*hctx
);
237 void blk_mq_stop_hw_queues(struct request_queue
*q
);
238 void blk_mq_start_hw_queues(struct request_queue
*q
);
239 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx
*hctx
, bool async
);
240 void blk_mq_start_stopped_hw_queues(struct request_queue
*q
, bool async
);
241 void blk_mq_run_hw_queues(struct request_queue
*q
, bool async
);
242 void blk_mq_delay_queue(struct blk_mq_hw_ctx
*hctx
, unsigned long msecs
);
243 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set
*tagset
,
244 busy_tag_iter_fn
*fn
, void *priv
);
245 void blk_mq_freeze_queue(struct request_queue
*q
);
246 void blk_mq_unfreeze_queue(struct request_queue
*q
);
247 void blk_mq_freeze_queue_start(struct request_queue
*q
);
248 void blk_mq_freeze_queue_wait(struct request_queue
*q
);
249 int blk_mq_freeze_queue_wait_timeout(struct request_queue
*q
,
250 unsigned long timeout
);
251 int blk_mq_reinit_tagset(struct blk_mq_tag_set
*set
);
253 int blk_mq_map_queues(struct blk_mq_tag_set
*set
);
254 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set
*set
, int nr_hw_queues
);
257 * Driver command data is immediately after the request. So subtract request
258 * size to get back to the original request, add request size to get the PDU.
260 static inline struct request
*blk_mq_rq_from_pdu(void *pdu
)
262 return pdu
- sizeof(struct request
);
264 static inline void *blk_mq_rq_to_pdu(struct request
*rq
)
269 #define queue_for_each_hw_ctx(q, hctx, i) \
270 for ((i) = 0; (i) < (q)->nr_hw_queues && \
271 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
273 #define hctx_for_each_ctx(hctx, ctx, i) \
274 for ((i) = 0; (i) < (hctx)->nr_ctx && \
275 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)