4 #include <linux/blkdev.h>
7 struct blk_flush_queue
;
9 struct blk_mq_cpu_notifier
{
10 struct list_head list
;
12 int (*notify
)(void *data
, unsigned long action
, unsigned int cpu
);
15 struct blk_mq_ctxmap
{
17 unsigned int bits_per_word
;
18 struct blk_align_bitmap
*map
;
21 struct blk_mq_hw_ctx
{
24 struct list_head dispatch
;
25 } ____cacheline_aligned_in_smp
;
27 unsigned long state
; /* BLK_MQ_S_* flags */
28 struct delayed_work run_work
;
29 struct delayed_work delay_work
;
30 cpumask_var_t cpumask
;
34 unsigned long flags
; /* BLK_MQ_F_* flags */
36 struct request_queue
*queue
;
37 struct blk_flush_queue
*fq
;
41 struct blk_mq_ctxmap ctx_map
;
44 struct blk_mq_ctx
**ctxs
;
48 struct blk_mq_tags
*tags
;
52 #define BLK_MQ_MAX_DISPATCH_ORDER 10
53 unsigned long dispatched
[BLK_MQ_MAX_DISPATCH_ORDER
];
55 unsigned int numa_node
;
56 unsigned int queue_num
;
60 struct blk_mq_cpu_notifier cpu_notifier
;
64 struct blk_mq_tag_set
{
65 struct blk_mq_ops
*ops
;
66 unsigned int nr_hw_queues
;
67 unsigned int queue_depth
; /* max hw supported */
68 unsigned int reserved_tags
;
69 unsigned int cmd_size
; /* per-request extra data */
72 unsigned int flags
; /* BLK_MQ_F_* */
75 struct blk_mq_tags
**tags
;
77 struct mutex tag_list_lock
;
78 struct list_head tag_list
;
81 struct blk_mq_queue_data
{
83 struct list_head
*list
;
87 typedef int (queue_rq_fn
)(struct blk_mq_hw_ctx
*, const struct blk_mq_queue_data
*);
88 typedef struct blk_mq_hw_ctx
*(map_queue_fn
)(struct request_queue
*, const int);
89 typedef enum blk_eh_timer_return (timeout_fn
)(struct request
*, bool);
90 typedef int (init_hctx_fn
)(struct blk_mq_hw_ctx
*, void *, unsigned int);
91 typedef void (exit_hctx_fn
)(struct blk_mq_hw_ctx
*, unsigned int);
92 typedef int (init_request_fn
)(void *, struct request
*, unsigned int,
93 unsigned int, unsigned int);
94 typedef void (exit_request_fn
)(void *, struct request
*, unsigned int,
97 typedef void (busy_iter_fn
)(struct blk_mq_hw_ctx
*, struct request
*, void *,
99 typedef void (busy_tag_iter_fn
)(struct request
*, void *, bool);
105 queue_rq_fn
*queue_rq
;
108 * Map to specific hardware queue
110 map_queue_fn
*map_queue
;
113 * Called on request timeout
117 softirq_done_fn
*complete
;
120 * Called when the block layer side of a hardware queue has been
121 * set up, allowing the driver to allocate/init matching structures.
122 * Ditto for exit/teardown.
124 init_hctx_fn
*init_hctx
;
125 exit_hctx_fn
*exit_hctx
;
128 * Called for every command allocated by the block layer to allow
129 * the driver to set up driver specific data.
131 * Tag greater than or equal to queue_depth is for setting up
134 * Ditto for exit/teardown.
136 init_request_fn
*init_request
;
137 exit_request_fn
*exit_request
;
141 BLK_MQ_RQ_QUEUE_OK
= 0, /* queued fine */
142 BLK_MQ_RQ_QUEUE_BUSY
= 1, /* requeue IO for later */
143 BLK_MQ_RQ_QUEUE_ERROR
= 2, /* end IO with error */
145 BLK_MQ_F_SHOULD_MERGE
= 1 << 0,
146 BLK_MQ_F_TAG_SHARED
= 1 << 1,
147 BLK_MQ_F_SG_MERGE
= 1 << 2,
148 BLK_MQ_F_SYSFS_UP
= 1 << 3,
149 BLK_MQ_F_DEFER_ISSUE
= 1 << 4,
150 BLK_MQ_F_ALLOC_POLICY_START_BIT
= 8,
151 BLK_MQ_F_ALLOC_POLICY_BITS
= 1,
153 BLK_MQ_S_STOPPED
= 0,
154 BLK_MQ_S_TAG_ACTIVE
= 1,
156 BLK_MQ_MAX_DEPTH
= 10240,
158 BLK_MQ_CPU_WORK_BATCH
= 8,
160 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
161 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
162 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
163 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
164 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
165 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
167 struct request_queue
*blk_mq_init_queue(struct blk_mq_tag_set
*);
168 struct request_queue
*blk_mq_init_allocated_queue(struct blk_mq_tag_set
*set
,
169 struct request_queue
*q
);
170 void blk_mq_finish_init(struct request_queue
*q
);
171 int blk_mq_register_disk(struct gendisk
*);
172 void blk_mq_unregister_disk(struct gendisk
*);
174 int blk_mq_alloc_tag_set(struct blk_mq_tag_set
*set
);
175 void blk_mq_free_tag_set(struct blk_mq_tag_set
*set
);
177 void blk_mq_flush_plug_list(struct blk_plug
*plug
, bool from_schedule
);
179 void blk_mq_insert_request(struct request
*, bool, bool, bool);
180 void blk_mq_free_request(struct request
*rq
);
181 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx
*, struct request
*rq
);
182 bool blk_mq_can_queue(struct blk_mq_hw_ctx
*);
183 struct request
*blk_mq_alloc_request(struct request_queue
*q
, int rw
,
184 gfp_t gfp
, bool reserved
);
185 struct request
*blk_mq_tag_to_rq(struct blk_mq_tags
*tags
, unsigned int tag
);
186 struct cpumask
*blk_mq_tags_cpumask(struct blk_mq_tags
*tags
);
189 BLK_MQ_UNIQUE_TAG_BITS
= 16,
190 BLK_MQ_UNIQUE_TAG_MASK
= (1 << BLK_MQ_UNIQUE_TAG_BITS
) - 1,
193 u32
blk_mq_unique_tag(struct request
*rq
);
195 static inline u16
blk_mq_unique_tag_to_hwq(u32 unique_tag
)
197 return unique_tag
>> BLK_MQ_UNIQUE_TAG_BITS
;
200 static inline u16
blk_mq_unique_tag_to_tag(u32 unique_tag
)
202 return unique_tag
& BLK_MQ_UNIQUE_TAG_MASK
;
205 struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*, const int ctx_index
);
206 struct blk_mq_hw_ctx
*blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set
*, unsigned int, int);
208 int blk_mq_request_started(struct request
*rq
);
209 void blk_mq_start_request(struct request
*rq
);
210 void blk_mq_end_request(struct request
*rq
, int error
);
211 void __blk_mq_end_request(struct request
*rq
, int error
);
213 void blk_mq_requeue_request(struct request
*rq
);
214 void blk_mq_add_to_requeue_list(struct request
*rq
, bool at_head
);
215 void blk_mq_cancel_requeue_work(struct request_queue
*q
);
216 void blk_mq_kick_requeue_list(struct request_queue
*q
);
217 void blk_mq_abort_requeue_list(struct request_queue
*q
);
218 void blk_mq_complete_request(struct request
*rq
);
220 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx
*hctx
);
221 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx
*hctx
);
222 void blk_mq_stop_hw_queues(struct request_queue
*q
);
223 void blk_mq_start_hw_queues(struct request_queue
*q
);
224 void blk_mq_start_stopped_hw_queues(struct request_queue
*q
, bool async
);
225 void blk_mq_run_hw_queues(struct request_queue
*q
, bool async
);
226 void blk_mq_delay_queue(struct blk_mq_hw_ctx
*hctx
, unsigned long msecs
);
227 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx
*hctx
, busy_iter_fn
*fn
,
229 void blk_mq_all_tag_busy_iter(struct blk_mq_tags
*tags
, busy_tag_iter_fn
*fn
,
231 void blk_mq_freeze_queue(struct request_queue
*q
);
232 void blk_mq_unfreeze_queue(struct request_queue
*q
);
233 void blk_mq_freeze_queue_start(struct request_queue
*q
);
236 * Driver command data is immediately after the request. So subtract request
237 * size to get back to the original request, add request size to get the PDU.
239 static inline struct request
*blk_mq_rq_from_pdu(void *pdu
)
241 return pdu
- sizeof(struct request
);
243 static inline void *blk_mq_rq_to_pdu(struct request
*rq
)
248 #define queue_for_each_hw_ctx(q, hctx, i) \
249 for ((i) = 0; (i) < (q)->nr_hw_queues && \
250 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
252 #define queue_for_each_ctx(q, ctx, i) \
253 for ((i) = 0; (i) < (q)->nr_queues && \
254 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
256 #define hctx_for_each_ctx(hctx, ctx, i) \
257 for ((i) = 0; (i) < (hctx)->nr_ctx && \
258 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
260 #define blk_ctx_sum(q, sum) \
262 struct blk_mq_ctx *__x; \
263 unsigned int __ret = 0, __i; \
265 queue_for_each_ctx((q), __x, __i) \