]>
Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to tagged command queuing | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | ||
9 | /** | |
10 | * blk_queue_find_tag - find a request by its tag and queue | |
11 | * @q: The request queue for the device | |
12 | * @tag: The tag of the request | |
13 | * | |
14 | * Notes: | |
15 | * Should be used when a device returns a tag and you want to match | |
16 | * it with a request. | |
17 | * | |
18 | * no locks need be held. | |
19 | **/ | |
20 | struct request *blk_queue_find_tag(struct request_queue *q, int tag) | |
21 | { | |
22 | return blk_map_queue_find_tag(q->queue_tags, tag); | |
23 | } | |
8324aa91 JA |
24 | EXPORT_SYMBOL(blk_queue_find_tag); |
25 | ||
26 | /** | |
27 | * __blk_free_tags - release a given set of tag maintenance info | |
28 | * @bqt: the tag map to free | |
29 | * | |
30 | * Tries to free the specified @bqt@. Returns true if it was | |
31 | * actually freed and false if there are still references using it | |
32 | */ | |
33 | static int __blk_free_tags(struct blk_queue_tag *bqt) | |
34 | { | |
35 | int retval; | |
36 | ||
37 | retval = atomic_dec_and_test(&bqt->refcnt); | |
38 | if (retval) { | |
39 | BUG_ON(bqt->busy); | |
40 | ||
41 | kfree(bqt->tag_index); | |
42 | bqt->tag_index = NULL; | |
43 | ||
44 | kfree(bqt->tag_map); | |
45 | bqt->tag_map = NULL; | |
46 | ||
47 | kfree(bqt); | |
48 | } | |
49 | ||
50 | return retval; | |
51 | } | |
52 | ||
53 | /** | |
54 | * __blk_queue_free_tags - release tag maintenance info | |
55 | * @q: the request queue for the device | |
56 | * | |
57 | * Notes: | |
58 | * blk_cleanup_queue() will take care of calling this function, if tagging | |
59 | * has been used. So there's no need to call this directly. | |
60 | **/ | |
61 | void __blk_queue_free_tags(struct request_queue *q) | |
62 | { | |
63 | struct blk_queue_tag *bqt = q->queue_tags; | |
64 | ||
65 | if (!bqt) | |
66 | return; | |
67 | ||
68 | __blk_free_tags(bqt); | |
69 | ||
70 | q->queue_tags = NULL; | |
71 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); | |
72 | } | |
73 | ||
74 | /** | |
75 | * blk_free_tags - release a given set of tag maintenance info | |
76 | * @bqt: the tag map to free | |
77 | * | |
78 | * For externally managed @bqt@ frees the map. Callers of this | |
79 | * function must guarantee to have released all the queues that | |
80 | * might have been using this tag map. | |
81 | */ | |
82 | void blk_free_tags(struct blk_queue_tag *bqt) | |
83 | { | |
84 | if (unlikely(!__blk_free_tags(bqt))) | |
85 | BUG(); | |
86 | } | |
87 | EXPORT_SYMBOL(blk_free_tags); | |
88 | ||
89 | /** | |
90 | * blk_queue_free_tags - release tag maintenance info | |
91 | * @q: the request queue for the device | |
92 | * | |
93 | * Notes: | |
94 | * This is used to disabled tagged queuing to a device, yet leave | |
95 | * queue in function. | |
96 | **/ | |
97 | void blk_queue_free_tags(struct request_queue *q) | |
98 | { | |
99 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | |
100 | } | |
8324aa91 JA |
101 | EXPORT_SYMBOL(blk_queue_free_tags); |
102 | ||
103 | static int | |
104 | init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) | |
105 | { | |
106 | struct request **tag_index; | |
107 | unsigned long *tag_map; | |
108 | int nr_ulongs; | |
109 | ||
110 | if (q && depth > q->nr_requests * 2) { | |
111 | depth = q->nr_requests * 2; | |
112 | printk(KERN_ERR "%s: adjusted depth to %d\n", | |
113 | __FUNCTION__, depth); | |
114 | } | |
115 | ||
116 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); | |
117 | if (!tag_index) | |
118 | goto fail; | |
119 | ||
120 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; | |
121 | tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | |
122 | if (!tag_map) | |
123 | goto fail; | |
124 | ||
125 | tags->real_max_depth = depth; | |
126 | tags->max_depth = depth; | |
127 | tags->tag_index = tag_index; | |
128 | tags->tag_map = tag_map; | |
129 | ||
130 | return 0; | |
131 | fail: | |
132 | kfree(tag_index); | |
133 | return -ENOMEM; | |
134 | } | |
135 | ||
136 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | |
137 | int depth) | |
138 | { | |
139 | struct blk_queue_tag *tags; | |
140 | ||
141 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | |
142 | if (!tags) | |
143 | goto fail; | |
144 | ||
145 | if (init_tag_map(q, tags, depth)) | |
146 | goto fail; | |
147 | ||
148 | tags->busy = 0; | |
149 | atomic_set(&tags->refcnt, 1); | |
150 | return tags; | |
151 | fail: | |
152 | kfree(tags); | |
153 | return NULL; | |
154 | } | |
155 | ||
156 | /** | |
157 | * blk_init_tags - initialize the tag info for an external tag map | |
158 | * @depth: the maximum queue depth supported | |
159 | * @tags: the tag to use | |
160 | **/ | |
161 | struct blk_queue_tag *blk_init_tags(int depth) | |
162 | { | |
163 | return __blk_queue_init_tags(NULL, depth); | |
164 | } | |
165 | EXPORT_SYMBOL(blk_init_tags); | |
166 | ||
167 | /** | |
168 | * blk_queue_init_tags - initialize the queue tag info | |
169 | * @q: the request queue for the device | |
170 | * @depth: the maximum queue depth supported | |
171 | * @tags: the tag to use | |
172 | **/ | |
173 | int blk_queue_init_tags(struct request_queue *q, int depth, | |
174 | struct blk_queue_tag *tags) | |
175 | { | |
176 | int rc; | |
177 | ||
178 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); | |
179 | ||
180 | if (!tags && !q->queue_tags) { | |
181 | tags = __blk_queue_init_tags(q, depth); | |
182 | ||
183 | if (!tags) | |
184 | goto fail; | |
185 | } else if (q->queue_tags) { | |
6728cb0e JA |
186 | rc = blk_queue_resize_tags(q, depth); |
187 | if (rc) | |
8324aa91 JA |
188 | return rc; |
189 | set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | |
190 | return 0; | |
191 | } else | |
192 | atomic_inc(&tags->refcnt); | |
193 | ||
194 | /* | |
195 | * assign it, all done | |
196 | */ | |
197 | q->queue_tags = tags; | |
198 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | |
199 | INIT_LIST_HEAD(&q->tag_busy_list); | |
200 | return 0; | |
201 | fail: | |
202 | kfree(tags); | |
203 | return -ENOMEM; | |
204 | } | |
8324aa91 JA |
205 | EXPORT_SYMBOL(blk_queue_init_tags); |
206 | ||
207 | /** | |
208 | * blk_queue_resize_tags - change the queueing depth | |
209 | * @q: the request queue for the device | |
210 | * @new_depth: the new max command queueing depth | |
211 | * | |
212 | * Notes: | |
213 | * Must be called with the queue lock held. | |
214 | **/ | |
215 | int blk_queue_resize_tags(struct request_queue *q, int new_depth) | |
216 | { | |
217 | struct blk_queue_tag *bqt = q->queue_tags; | |
218 | struct request **tag_index; | |
219 | unsigned long *tag_map; | |
220 | int max_depth, nr_ulongs; | |
221 | ||
222 | if (!bqt) | |
223 | return -ENXIO; | |
224 | ||
225 | /* | |
226 | * if we already have large enough real_max_depth. just | |
227 | * adjust max_depth. *NOTE* as requests with tag value | |
228 | * between new_depth and real_max_depth can be in-flight, tag | |
229 | * map can not be shrunk blindly here. | |
230 | */ | |
231 | if (new_depth <= bqt->real_max_depth) { | |
232 | bqt->max_depth = new_depth; | |
233 | return 0; | |
234 | } | |
235 | ||
236 | /* | |
237 | * Currently cannot replace a shared tag map with a new | |
238 | * one, so error out if this is the case | |
239 | */ | |
240 | if (atomic_read(&bqt->refcnt) != 1) | |
241 | return -EBUSY; | |
242 | ||
243 | /* | |
244 | * save the old state info, so we can copy it back | |
245 | */ | |
246 | tag_index = bqt->tag_index; | |
247 | tag_map = bqt->tag_map; | |
248 | max_depth = bqt->real_max_depth; | |
249 | ||
250 | if (init_tag_map(q, bqt, new_depth)) | |
251 | return -ENOMEM; | |
252 | ||
253 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); | |
254 | nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; | |
255 | memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); | |
256 | ||
257 | kfree(tag_index); | |
258 | kfree(tag_map); | |
259 | return 0; | |
260 | } | |
8324aa91 JA |
261 | EXPORT_SYMBOL(blk_queue_resize_tags); |
262 | ||
263 | /** | |
264 | * blk_queue_end_tag - end tag operations for a request | |
265 | * @q: the request queue for the device | |
266 | * @rq: the request that has completed | |
267 | * | |
268 | * Description: | |
269 | * Typically called when end_that_request_first() returns 0, meaning | |
270 | * all transfers have been done for a request. It's important to call | |
271 | * this function before end_that_request_last(), as that will put the | |
272 | * request back on the free list thus corrupting the internal tag list. | |
273 | * | |
274 | * Notes: | |
275 | * queue lock must be held. | |
276 | **/ | |
277 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) | |
278 | { | |
279 | struct blk_queue_tag *bqt = q->queue_tags; | |
280 | int tag = rq->tag; | |
281 | ||
282 | BUG_ON(tag == -1); | |
283 | ||
284 | if (unlikely(tag >= bqt->real_max_depth)) | |
285 | /* | |
286 | * This can happen after tag depth has been reduced. | |
287 | * FIXME: how about a warning or info message here? | |
288 | */ | |
289 | return; | |
290 | ||
291 | list_del_init(&rq->queuelist); | |
292 | rq->cmd_flags &= ~REQ_QUEUED; | |
293 | rq->tag = -1; | |
294 | ||
295 | if (unlikely(bqt->tag_index[tag] == NULL)) | |
296 | printk(KERN_ERR "%s: tag %d is missing\n", | |
297 | __FUNCTION__, tag); | |
298 | ||
299 | bqt->tag_index[tag] = NULL; | |
300 | ||
301 | if (unlikely(!test_bit(tag, bqt->tag_map))) { | |
302 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | |
303 | __FUNCTION__, tag); | |
304 | return; | |
305 | } | |
306 | /* | |
307 | * The tag_map bit acts as a lock for tag_index[bit], so we need | |
308 | * unlock memory barrier semantics. | |
309 | */ | |
310 | clear_bit_unlock(tag, bqt->tag_map); | |
311 | bqt->busy--; | |
312 | } | |
8324aa91 JA |
313 | EXPORT_SYMBOL(blk_queue_end_tag); |
314 | ||
315 | /** | |
316 | * blk_queue_start_tag - find a free tag and assign it | |
317 | * @q: the request queue for the device | |
318 | * @rq: the block request that needs tagging | |
319 | * | |
320 | * Description: | |
321 | * This can either be used as a stand-alone helper, or possibly be | |
322 | * assigned as the queue &prep_rq_fn (in which case &struct request | |
323 | * automagically gets a tag assigned). Note that this function | |
324 | * assumes that any type of request can be queued! if this is not | |
325 | * true for your device, you must check the request type before | |
326 | * calling this function. The request will also be removed from | |
327 | * the request queue, so it's the drivers responsibility to readd | |
328 | * it if it should need to be restarted for some reason. | |
329 | * | |
330 | * Notes: | |
331 | * queue lock must be held. | |
332 | **/ | |
333 | int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |
334 | { | |
335 | struct blk_queue_tag *bqt = q->queue_tags; | |
336 | int tag; | |
337 | ||
338 | if (unlikely((rq->cmd_flags & REQ_QUEUED))) { | |
6728cb0e | 339 | printk(KERN_ERR |
8324aa91 JA |
340 | "%s: request %p for device [%s] already tagged %d", |
341 | __FUNCTION__, rq, | |
342 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); | |
343 | BUG(); | |
344 | } | |
345 | ||
346 | /* | |
347 | * Protect against shared tag maps, as we may not have exclusive | |
348 | * access to the tag map. | |
349 | */ | |
350 | do { | |
351 | tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); | |
352 | if (tag >= bqt->max_depth) | |
353 | return 1; | |
354 | ||
355 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); | |
356 | /* | |
357 | * We need lock ordering semantics given by test_and_set_bit_lock. | |
358 | * See blk_queue_end_tag for details. | |
359 | */ | |
360 | ||
361 | rq->cmd_flags |= REQ_QUEUED; | |
362 | rq->tag = tag; | |
363 | bqt->tag_index[tag] = rq; | |
364 | blkdev_dequeue_request(rq); | |
365 | list_add(&rq->queuelist, &q->tag_busy_list); | |
366 | bqt->busy++; | |
367 | return 0; | |
368 | } | |
8324aa91 JA |
369 | EXPORT_SYMBOL(blk_queue_start_tag); |
370 | ||
371 | /** | |
372 | * blk_queue_invalidate_tags - invalidate all pending tags | |
373 | * @q: the request queue for the device | |
374 | * | |
375 | * Description: | |
376 | * Hardware conditions may dictate a need to stop all pending requests. | |
377 | * In this case, we will safely clear the block side of the tag queue and | |
378 | * readd all requests to the request queue in the right order. | |
379 | * | |
380 | * Notes: | |
381 | * queue lock must be held. | |
382 | **/ | |
383 | void blk_queue_invalidate_tags(struct request_queue *q) | |
384 | { | |
385 | struct list_head *tmp, *n; | |
386 | ||
387 | list_for_each_safe(tmp, n, &q->tag_busy_list) | |
388 | blk_requeue_request(q, list_entry_rq(tmp)); | |
389 | } | |
8324aa91 | 390 | EXPORT_SYMBOL(blk_queue_invalidate_tags); |