4 #include <linux/blkdev.h>
8 struct blk_mq_cpu_notifier
{
11 void (*notify
)(void *data
, unsigned long action
, unsigned int cpu
);
14 struct blk_mq_hw_ctx
{
17 struct list_head dispatch
;
18 } ____cacheline_aligned_in_smp
;
20 unsigned long state
; /* BLK_MQ_S_* flags */
21 struct delayed_work run_work
;
22 struct delayed_work delay_work
;
23 cpumask_var_t cpumask
;
25 unsigned long flags
; /* BLK_MQ_F_* flags */
27 struct request_queue
*queue
;
28 unsigned int queue_num
;
33 struct blk_mq_ctx
**ctxs
;
34 unsigned int nr_ctx_map
;
35 unsigned long *ctx_map
;
37 struct blk_mq_tags
*tags
;
41 #define BLK_MQ_MAX_DISPATCH_ORDER 10
42 unsigned long dispatched
[BLK_MQ_MAX_DISPATCH_ORDER
];
44 unsigned int numa_node
;
45 unsigned int cmd_size
; /* per-request extra data */
47 struct blk_mq_cpu_notifier cpu_notifier
;
51 struct blk_mq_tag_set
{
52 struct blk_mq_ops
*ops
;
53 unsigned int nr_hw_queues
;
54 unsigned int queue_depth
;
55 unsigned int reserved_tags
;
56 unsigned int cmd_size
; /* per-request extra data */
59 unsigned int flags
; /* BLK_MQ_F_* */
62 struct blk_mq_tags
**tags
;
65 typedef int (queue_rq_fn
)(struct blk_mq_hw_ctx
*, struct request
*);
66 typedef struct blk_mq_hw_ctx
*(map_queue_fn
)(struct request_queue
*, const int);
67 typedef struct blk_mq_hw_ctx
*(alloc_hctx_fn
)(struct blk_mq_tag_set
*,
69 typedef void (free_hctx_fn
)(struct blk_mq_hw_ctx
*, unsigned int);
70 typedef int (init_hctx_fn
)(struct blk_mq_hw_ctx
*, void *, unsigned int);
71 typedef void (exit_hctx_fn
)(struct blk_mq_hw_ctx
*, unsigned int);
72 typedef int (init_request_fn
)(void *, struct request
*, unsigned int,
73 unsigned int, unsigned int);
74 typedef void (exit_request_fn
)(void *, struct request
*, unsigned int,
81 queue_rq_fn
*queue_rq
;
84 * Map to specific hardware queue
86 map_queue_fn
*map_queue
;
89 * Called on request timeout
91 rq_timed_out_fn
*timeout
;
93 softirq_done_fn
*complete
;
96 * Override for hctx allocations (should probably go)
98 alloc_hctx_fn
*alloc_hctx
;
99 free_hctx_fn
*free_hctx
;
102 * Called when the block layer side of a hardware queue has been
103 * set up, allowing the driver to allocate/init matching structures.
104 * Ditto for exit/teardown.
106 init_hctx_fn
*init_hctx
;
107 exit_hctx_fn
*exit_hctx
;
110 * Called for every command allocated by the block layer to allow
111 * the driver to set up driver specific data.
112 * Ditto for exit/teardown.
114 init_request_fn
*init_request
;
115 exit_request_fn
*exit_request
;
119 BLK_MQ_RQ_QUEUE_OK
= 0, /* queued fine */
120 BLK_MQ_RQ_QUEUE_BUSY
= 1, /* requeue IO for later */
121 BLK_MQ_RQ_QUEUE_ERROR
= 2, /* end IO with error */
123 BLK_MQ_F_SHOULD_MERGE
= 1 << 0,
124 BLK_MQ_F_SHOULD_SORT
= 1 << 1,
125 BLK_MQ_F_SHOULD_IPI
= 1 << 2,
127 BLK_MQ_S_STOPPED
= 0,
129 BLK_MQ_MAX_DEPTH
= 2048,
132 struct request_queue
*blk_mq_init_queue(struct blk_mq_tag_set
*);
133 int blk_mq_register_disk(struct gendisk
*);
134 void blk_mq_unregister_disk(struct gendisk
*);
136 int blk_mq_alloc_tag_set(struct blk_mq_tag_set
*set
);
137 void blk_mq_free_tag_set(struct blk_mq_tag_set
*set
);
139 void blk_mq_flush_plug_list(struct blk_plug
*plug
, bool from_schedule
);
141 void blk_mq_insert_request(struct request
*, bool, bool, bool);
142 void blk_mq_run_queues(struct request_queue
*q
, bool async
);
143 void blk_mq_free_request(struct request
*rq
);
144 bool blk_mq_can_queue(struct blk_mq_hw_ctx
*);
145 struct request
*blk_mq_alloc_request(struct request_queue
*q
, int rw
, gfp_t gfp
);
146 struct request
*blk_mq_alloc_reserved_request(struct request_queue
*q
, int rw
, gfp_t gfp
);
147 struct request
*blk_mq_tag_to_rq(struct blk_mq_tags
*tags
, unsigned int tag
);
149 struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*, const int ctx_index
);
150 struct blk_mq_hw_ctx
*blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set
*, unsigned int);
151 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx
*, unsigned int);
153 void blk_mq_end_io(struct request
*rq
, int error
);
154 void __blk_mq_end_io(struct request
*rq
, int error
);
156 void blk_mq_complete_request(struct request
*rq
);
158 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx
*hctx
);
159 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx
*hctx
);
160 void blk_mq_stop_hw_queues(struct request_queue
*q
);
161 void blk_mq_start_hw_queues(struct request_queue
*q
);
162 void blk_mq_start_stopped_hw_queues(struct request_queue
*q
, bool async
);
163 void blk_mq_delay_queue(struct blk_mq_hw_ctx
*hctx
, unsigned long msecs
);
166 * Driver command data is immediately after the request. So subtract request
167 * size to get back to the original request.
169 static inline struct request
*blk_mq_rq_from_pdu(void *pdu
)
171 return pdu
- sizeof(struct request
);
173 static inline void *blk_mq_rq_to_pdu(struct request
*rq
)
175 return (void *) rq
+ sizeof(*rq
);
178 #define queue_for_each_hw_ctx(q, hctx, i) \
179 for ((i) = 0; (i) < (q)->nr_hw_queues && \
180 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
182 #define queue_for_each_ctx(q, ctx, i) \
183 for ((i) = 0; (i) < (q)->nr_queues && \
184 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
186 #define hctx_for_each_ctx(hctx, ctx, i) \
187 for ((i) = 0; (i) < (hctx)->nr_ctx && \
188 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
190 #define blk_ctx_sum(q, sum) \
192 struct blk_mq_ctx *__x; \
193 unsigned int __ret = 0, __i; \
195 queue_for_each_ctx((q), __x, __i) \