]>
Commit | Line | Data |
---|---|---|
1 | #ifndef BLK_MQ_H | |
2 | #define BLK_MQ_H | |
3 | ||
4 | #include <linux/blkdev.h> | |
5 | ||
6 | struct blk_mq_tags; | |
7 | ||
8 | struct blk_mq_cpu_notifier { | |
9 | struct list_head list; | |
10 | void *data; | |
11 | int (*notify)(void *data, unsigned long action, unsigned int cpu); | |
12 | }; | |
13 | ||
14 | struct blk_mq_ctxmap { | |
15 | unsigned int map_size; | |
16 | unsigned int bits_per_word; | |
17 | struct blk_align_bitmap *map; | |
18 | }; | |
19 | ||
20 | struct blk_mq_hw_ctx { | |
21 | struct { | |
22 | spinlock_t lock; | |
23 | struct list_head dispatch; | |
24 | } ____cacheline_aligned_in_smp; | |
25 | ||
26 | unsigned long state; /* BLK_MQ_S_* flags */ | |
27 | struct delayed_work run_work; | |
28 | struct delayed_work delay_work; | |
29 | cpumask_var_t cpumask; | |
30 | int next_cpu; | |
31 | int next_cpu_batch; | |
32 | ||
33 | unsigned long flags; /* BLK_MQ_F_* flags */ | |
34 | ||
35 | struct request_queue *queue; | |
36 | unsigned int queue_num; | |
37 | ||
38 | void *driver_data; | |
39 | ||
40 | struct blk_mq_ctxmap ctx_map; | |
41 | ||
42 | unsigned int nr_ctx; | |
43 | struct blk_mq_ctx **ctxs; | |
44 | ||
45 | atomic_t wait_index; | |
46 | ||
47 | struct blk_mq_tags *tags; | |
48 | ||
49 | unsigned long queued; | |
50 | unsigned long run; | |
51 | #define BLK_MQ_MAX_DISPATCH_ORDER 10 | |
52 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; | |
53 | ||
54 | unsigned int numa_node; | |
55 | unsigned int cmd_size; /* per-request extra data */ | |
56 | ||
57 | atomic_t nr_active; | |
58 | ||
59 | struct blk_mq_cpu_notifier cpu_notifier; | |
60 | struct kobject kobj; | |
61 | }; | |
62 | ||
63 | struct blk_mq_tag_set { | |
64 | struct blk_mq_ops *ops; | |
65 | unsigned int nr_hw_queues; | |
66 | unsigned int queue_depth; /* max hw supported */ | |
67 | unsigned int reserved_tags; | |
68 | unsigned int cmd_size; /* per-request extra data */ | |
69 | int numa_node; | |
70 | unsigned int timeout; | |
71 | unsigned int flags; /* BLK_MQ_F_* */ | |
72 | void *driver_data; | |
73 | ||
74 | struct blk_mq_tags **tags; | |
75 | ||
76 | struct mutex tag_list_lock; | |
77 | struct list_head tag_list; | |
78 | }; | |
79 | ||
80 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool); | |
81 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); | |
82 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); | |
83 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); | |
84 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); | |
85 | typedef int (init_request_fn)(void *, struct request *, unsigned int, | |
86 | unsigned int, unsigned int); | |
87 | typedef void (exit_request_fn)(void *, struct request *, unsigned int, | |
88 | unsigned int); | |
89 | ||
90 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, | |
91 | bool); | |
92 | ||
93 | struct blk_mq_ops { | |
94 | /* | |
95 | * Queue request | |
96 | */ | |
97 | queue_rq_fn *queue_rq; | |
98 | ||
99 | /* | |
100 | * Map to specific hardware queue | |
101 | */ | |
102 | map_queue_fn *map_queue; | |
103 | ||
104 | /* | |
105 | * Called on request timeout | |
106 | */ | |
107 | timeout_fn *timeout; | |
108 | ||
109 | softirq_done_fn *complete; | |
110 | ||
111 | /* | |
112 | * Called when the block layer side of a hardware queue has been | |
113 | * set up, allowing the driver to allocate/init matching structures. | |
114 | * Ditto for exit/teardown. | |
115 | */ | |
116 | init_hctx_fn *init_hctx; | |
117 | exit_hctx_fn *exit_hctx; | |
118 | ||
119 | /* | |
120 | * Called for every command allocated by the block layer to allow | |
121 | * the driver to set up driver specific data. | |
122 | * Ditto for exit/teardown. | |
123 | */ | |
124 | init_request_fn *init_request; | |
125 | exit_request_fn *exit_request; | |
126 | }; | |
127 | ||
128 | enum { | |
129 | BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ | |
130 | BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ | |
131 | BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ | |
132 | ||
133 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, | |
134 | BLK_MQ_F_TAG_SHARED = 1 << 1, | |
135 | BLK_MQ_F_SG_MERGE = 1 << 2, | |
136 | BLK_MQ_F_SYSFS_UP = 1 << 3, | |
137 | ||
138 | BLK_MQ_S_STOPPED = 0, | |
139 | BLK_MQ_S_TAG_ACTIVE = 1, | |
140 | ||
141 | BLK_MQ_MAX_DEPTH = 10240, | |
142 | ||
143 | BLK_MQ_CPU_WORK_BATCH = 8, | |
144 | }; | |
145 | ||
146 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); | |
147 | int blk_mq_register_disk(struct gendisk *); | |
148 | void blk_mq_unregister_disk(struct gendisk *); | |
149 | ||
150 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); | |
151 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); | |
152 | ||
153 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); | |
154 | ||
155 | void blk_mq_insert_request(struct request *, bool, bool, bool); | |
156 | void blk_mq_run_queues(struct request_queue *q, bool async); | |
157 | void blk_mq_free_request(struct request *rq); | |
158 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | |
159 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | |
160 | gfp_t gfp, bool reserved); | |
161 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); | |
162 | ||
163 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); | |
164 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); | |
165 | ||
166 | void blk_mq_start_request(struct request *rq); | |
167 | void blk_mq_end_request(struct request *rq, int error); | |
168 | void __blk_mq_end_request(struct request *rq, int error); | |
169 | ||
170 | void blk_mq_requeue_request(struct request *rq); | |
171 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | |
172 | void blk_mq_kick_requeue_list(struct request_queue *q); | |
173 | void blk_mq_complete_request(struct request *rq); | |
174 | ||
175 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | |
176 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | |
177 | void blk_mq_stop_hw_queues(struct request_queue *q); | |
178 | void blk_mq_start_hw_queues(struct request_queue *q); | |
179 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | |
180 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | |
181 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | |
182 | void *priv); | |
183 | ||
184 | /* | |
185 | * Driver command data is immediately after the request. So subtract request | |
186 | * size to get back to the original request. | |
187 | */ | |
188 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) | |
189 | { | |
190 | return pdu - sizeof(struct request); | |
191 | } | |
192 | static inline void *blk_mq_rq_to_pdu(struct request *rq) | |
193 | { | |
194 | return (void *) rq + sizeof(*rq); | |
195 | } | |
196 | ||
197 | #define queue_for_each_hw_ctx(q, hctx, i) \ | |
198 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ | |
199 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) | |
200 | ||
201 | #define queue_for_each_ctx(q, ctx, i) \ | |
202 | for ((i) = 0; (i) < (q)->nr_queues && \ | |
203 | ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++) | |
204 | ||
205 | #define hctx_for_each_ctx(hctx, ctx, i) \ | |
206 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ | |
207 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) | |
208 | ||
209 | #define blk_ctx_sum(q, sum) \ | |
210 | ({ \ | |
211 | struct blk_mq_ctx *__x; \ | |
212 | unsigned int __ret = 0, __i; \ | |
213 | \ | |
214 | queue_for_each_ctx((q), __x, __i) \ | |
215 | __ret += sum; \ | |
216 | __ret; \ | |
217 | }) | |
218 | ||
219 | #endif |