]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Functions related to tagged command queuing | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/slab.h> | |
10 | ||
11 | #include "blk.h" | |
12 | ||
13 | /** | |
14 | * blk_queue_find_tag - find a request by its tag and queue | |
15 | * @q: The request queue for the device | |
16 | * @tag: The tag of the request | |
17 | * | |
18 | * Notes: | |
19 | * Should be used when a device returns a tag and you want to match | |
20 | * it with a request. | |
21 | * | |
22 | * no locks need be held. | |
23 | **/ | |
24 | struct request *blk_queue_find_tag(struct request_queue *q, int tag) | |
25 | { | |
26 | return blk_map_queue_find_tag(q->queue_tags, tag); | |
27 | } | |
28 | EXPORT_SYMBOL(blk_queue_find_tag); | |
29 | ||
30 | /** | |
31 | * blk_free_tags - release a given set of tag maintenance info | |
32 | * @bqt: the tag map to free | |
33 | * | |
34 | * Drop the reference count on @bqt and frees it when the last reference | |
35 | * is dropped. | |
36 | */ | |
37 | void blk_free_tags(struct blk_queue_tag *bqt) | |
38 | { | |
39 | if (atomic_dec_and_test(&bqt->refcnt)) { | |
40 | BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < | |
41 | bqt->max_depth); | |
42 | ||
43 | kfree(bqt->tag_index); | |
44 | bqt->tag_index = NULL; | |
45 | ||
46 | kfree(bqt->tag_map); | |
47 | bqt->tag_map = NULL; | |
48 | ||
49 | kfree(bqt); | |
50 | } | |
51 | } | |
52 | EXPORT_SYMBOL(blk_free_tags); | |
53 | ||
54 | /** | |
55 | * __blk_queue_free_tags - release tag maintenance info | |
56 | * @q: the request queue for the device | |
57 | * | |
58 | * Notes: | |
59 | * blk_cleanup_queue() will take care of calling this function, if tagging | |
60 | * has been used. So there's no need to call this directly. | |
61 | **/ | |
62 | void __blk_queue_free_tags(struct request_queue *q) | |
63 | { | |
64 | struct blk_queue_tag *bqt = q->queue_tags; | |
65 | ||
66 | if (!bqt) | |
67 | return; | |
68 | ||
69 | blk_free_tags(bqt); | |
70 | ||
71 | q->queue_tags = NULL; | |
72 | queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); | |
73 | } | |
74 | ||
75 | /** | |
76 | * blk_queue_free_tags - release tag maintenance info | |
77 | * @q: the request queue for the device | |
78 | * | |
79 | * Notes: | |
80 | * This is used to disable tagged queuing to a device, yet leave | |
81 | * queue in function. | |
82 | **/ | |
83 | void blk_queue_free_tags(struct request_queue *q) | |
84 | { | |
85 | queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); | |
86 | } | |
87 | EXPORT_SYMBOL(blk_queue_free_tags); | |
88 | ||
89 | static int | |
90 | init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) | |
91 | { | |
92 | struct request **tag_index; | |
93 | unsigned long *tag_map; | |
94 | int nr_ulongs; | |
95 | ||
96 | if (q && depth > q->nr_requests * 2) { | |
97 | depth = q->nr_requests * 2; | |
98 | printk(KERN_ERR "%s: adjusted depth to %d\n", | |
99 | __func__, depth); | |
100 | } | |
101 | ||
102 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); | |
103 | if (!tag_index) | |
104 | goto fail; | |
105 | ||
106 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; | |
107 | tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | |
108 | if (!tag_map) | |
109 | goto fail; | |
110 | ||
111 | tags->real_max_depth = depth; | |
112 | tags->max_depth = depth; | |
113 | tags->tag_index = tag_index; | |
114 | tags->tag_map = tag_map; | |
115 | ||
116 | return 0; | |
117 | fail: | |
118 | kfree(tag_index); | |
119 | return -ENOMEM; | |
120 | } | |
121 | ||
122 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | |
123 | int depth, int alloc_policy) | |
124 | { | |
125 | struct blk_queue_tag *tags; | |
126 | ||
127 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | |
128 | if (!tags) | |
129 | goto fail; | |
130 | ||
131 | if (init_tag_map(q, tags, depth)) | |
132 | goto fail; | |
133 | ||
134 | atomic_set(&tags->refcnt, 1); | |
135 | tags->alloc_policy = alloc_policy; | |
136 | tags->next_tag = 0; | |
137 | return tags; | |
138 | fail: | |
139 | kfree(tags); | |
140 | return NULL; | |
141 | } | |
142 | ||
143 | /** | |
144 | * blk_init_tags - initialize the tag info for an external tag map | |
145 | * @depth: the maximum queue depth supported | |
146 | * @alloc_policy: tag allocation policy | |
147 | **/ | |
148 | struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy) | |
149 | { | |
150 | return __blk_queue_init_tags(NULL, depth, alloc_policy); | |
151 | } | |
152 | EXPORT_SYMBOL(blk_init_tags); | |
153 | ||
154 | /** | |
155 | * blk_queue_init_tags - initialize the queue tag info | |
156 | * @q: the request queue for the device | |
157 | * @depth: the maximum queue depth supported | |
158 | * @tags: the tag to use | |
159 | * @alloc_policy: tag allocation policy | |
160 | * | |
161 | * Queue lock must be held here if the function is called to resize an | |
162 | * existing map. | |
163 | **/ | |
164 | int blk_queue_init_tags(struct request_queue *q, int depth, | |
165 | struct blk_queue_tag *tags, int alloc_policy) | |
166 | { | |
167 | int rc; | |
168 | ||
169 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); | |
170 | ||
171 | if (!tags && !q->queue_tags) { | |
172 | tags = __blk_queue_init_tags(q, depth, alloc_policy); | |
173 | ||
174 | if (!tags) | |
175 | return -ENOMEM; | |
176 | ||
177 | } else if (q->queue_tags) { | |
178 | rc = blk_queue_resize_tags(q, depth); | |
179 | if (rc) | |
180 | return rc; | |
181 | queue_flag_set(QUEUE_FLAG_QUEUED, q); | |
182 | return 0; | |
183 | } else | |
184 | atomic_inc(&tags->refcnt); | |
185 | ||
186 | /* | |
187 | * assign it, all done | |
188 | */ | |
189 | q->queue_tags = tags; | |
190 | queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); | |
191 | INIT_LIST_HEAD(&q->tag_busy_list); | |
192 | return 0; | |
193 | } | |
194 | EXPORT_SYMBOL(blk_queue_init_tags); | |
195 | ||
196 | /** | |
197 | * blk_queue_resize_tags - change the queueing depth | |
198 | * @q: the request queue for the device | |
199 | * @new_depth: the new max command queueing depth | |
200 | * | |
201 | * Notes: | |
202 | * Must be called with the queue lock held. | |
203 | **/ | |
204 | int blk_queue_resize_tags(struct request_queue *q, int new_depth) | |
205 | { | |
206 | struct blk_queue_tag *bqt = q->queue_tags; | |
207 | struct request **tag_index; | |
208 | unsigned long *tag_map; | |
209 | int max_depth, nr_ulongs; | |
210 | ||
211 | if (!bqt) | |
212 | return -ENXIO; | |
213 | ||
214 | /* | |
215 | * if we already have large enough real_max_depth. just | |
216 | * adjust max_depth. *NOTE* as requests with tag value | |
217 | * between new_depth and real_max_depth can be in-flight, tag | |
218 | * map can not be shrunk blindly here. | |
219 | */ | |
220 | if (new_depth <= bqt->real_max_depth) { | |
221 | bqt->max_depth = new_depth; | |
222 | return 0; | |
223 | } | |
224 | ||
225 | /* | |
226 | * Currently cannot replace a shared tag map with a new | |
227 | * one, so error out if this is the case | |
228 | */ | |
229 | if (atomic_read(&bqt->refcnt) != 1) | |
230 | return -EBUSY; | |
231 | ||
232 | /* | |
233 | * save the old state info, so we can copy it back | |
234 | */ | |
235 | tag_index = bqt->tag_index; | |
236 | tag_map = bqt->tag_map; | |
237 | max_depth = bqt->real_max_depth; | |
238 | ||
239 | if (init_tag_map(q, bqt, new_depth)) | |
240 | return -ENOMEM; | |
241 | ||
242 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); | |
243 | nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; | |
244 | memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); | |
245 | ||
246 | kfree(tag_index); | |
247 | kfree(tag_map); | |
248 | return 0; | |
249 | } | |
250 | EXPORT_SYMBOL(blk_queue_resize_tags); | |
251 | ||
252 | /** | |
253 | * blk_queue_end_tag - end tag operations for a request | |
254 | * @q: the request queue for the device | |
255 | * @rq: the request that has completed | |
256 | * | |
257 | * Description: | |
258 | * Typically called when end_that_request_first() returns %0, meaning | |
259 | * all transfers have been done for a request. It's important to call | |
260 | * this function before end_that_request_last(), as that will put the | |
261 | * request back on the free list thus corrupting the internal tag list. | |
262 | **/ | |
263 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) | |
264 | { | |
265 | struct blk_queue_tag *bqt = q->queue_tags; | |
266 | unsigned tag = rq->tag; /* negative tags invalid */ | |
267 | ||
268 | lockdep_assert_held(q->queue_lock); | |
269 | ||
270 | BUG_ON(tag >= bqt->real_max_depth); | |
271 | ||
272 | list_del_init(&rq->queuelist); | |
273 | rq->rq_flags &= ~RQF_QUEUED; | |
274 | rq->tag = -1; | |
275 | rq->internal_tag = -1; | |
276 | ||
277 | if (unlikely(bqt->tag_index[tag] == NULL)) | |
278 | printk(KERN_ERR "%s: tag %d is missing\n", | |
279 | __func__, tag); | |
280 | ||
281 | bqt->tag_index[tag] = NULL; | |
282 | ||
283 | if (unlikely(!test_bit(tag, bqt->tag_map))) { | |
284 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | |
285 | __func__, tag); | |
286 | return; | |
287 | } | |
288 | /* | |
289 | * The tag_map bit acts as a lock for tag_index[bit], so we need | |
290 | * unlock memory barrier semantics. | |
291 | */ | |
292 | clear_bit_unlock(tag, bqt->tag_map); | |
293 | } | |
294 | ||
295 | /** | |
296 | * blk_queue_start_tag - find a free tag and assign it | |
297 | * @q: the request queue for the device | |
298 | * @rq: the block request that needs tagging | |
299 | * | |
300 | * Description: | |
301 | * This can either be used as a stand-alone helper, or possibly be | |
302 | * assigned as the queue &prep_rq_fn (in which case &struct request | |
303 | * automagically gets a tag assigned). Note that this function | |
304 | * assumes that any type of request can be queued! if this is not | |
305 | * true for your device, you must check the request type before | |
306 | * calling this function. The request will also be removed from | |
307 | * the request queue, so it's the drivers responsibility to readd | |
308 | * it if it should need to be restarted for some reason. | |
309 | **/ | |
310 | int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |
311 | { | |
312 | struct blk_queue_tag *bqt = q->queue_tags; | |
313 | unsigned max_depth; | |
314 | int tag; | |
315 | ||
316 | lockdep_assert_held(q->queue_lock); | |
317 | ||
318 | if (unlikely((rq->rq_flags & RQF_QUEUED))) { | |
319 | printk(KERN_ERR | |
320 | "%s: request %p for device [%s] already tagged %d", | |
321 | __func__, rq, | |
322 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); | |
323 | BUG(); | |
324 | } | |
325 | ||
326 | /* | |
327 | * Protect against shared tag maps, as we may not have exclusive | |
328 | * access to the tag map. | |
329 | * | |
330 | * We reserve a few tags just for sync IO, since we don't want | |
331 | * to starve sync IO on behalf of flooding async IO. | |
332 | */ | |
333 | max_depth = bqt->max_depth; | |
334 | if (!rq_is_sync(rq) && max_depth > 1) { | |
335 | switch (max_depth) { | |
336 | case 2: | |
337 | max_depth = 1; | |
338 | break; | |
339 | case 3: | |
340 | max_depth = 2; | |
341 | break; | |
342 | default: | |
343 | max_depth -= 2; | |
344 | } | |
345 | if (q->in_flight[BLK_RW_ASYNC] > max_depth) | |
346 | return 1; | |
347 | } | |
348 | ||
349 | do { | |
350 | if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) { | |
351 | tag = find_first_zero_bit(bqt->tag_map, max_depth); | |
352 | if (tag >= max_depth) | |
353 | return 1; | |
354 | } else { | |
355 | int start = bqt->next_tag; | |
356 | int size = min_t(int, bqt->max_depth, max_depth + start); | |
357 | tag = find_next_zero_bit(bqt->tag_map, size, start); | |
358 | if (tag >= size && start + size > bqt->max_depth) { | |
359 | size = start + size - bqt->max_depth; | |
360 | tag = find_first_zero_bit(bqt->tag_map, size); | |
361 | } | |
362 | if (tag >= size) | |
363 | return 1; | |
364 | } | |
365 | ||
366 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); | |
367 | /* | |
368 | * We need lock ordering semantics given by test_and_set_bit_lock. | |
369 | * See blk_queue_end_tag for details. | |
370 | */ | |
371 | ||
372 | bqt->next_tag = (tag + 1) % bqt->max_depth; | |
373 | rq->rq_flags |= RQF_QUEUED; | |
374 | rq->tag = tag; | |
375 | bqt->tag_index[tag] = rq; | |
376 | blk_start_request(rq); | |
377 | list_add(&rq->queuelist, &q->tag_busy_list); | |
378 | return 0; | |
379 | } | |
380 | EXPORT_SYMBOL(blk_queue_start_tag); | |
381 | ||
382 | /** | |
383 | * blk_queue_invalidate_tags - invalidate all pending tags | |
384 | * @q: the request queue for the device | |
385 | * | |
386 | * Description: | |
387 | * Hardware conditions may dictate a need to stop all pending requests. | |
388 | * In this case, we will safely clear the block side of the tag queue and | |
389 | * readd all requests to the request queue in the right order. | |
390 | **/ | |
391 | void blk_queue_invalidate_tags(struct request_queue *q) | |
392 | { | |
393 | struct list_head *tmp, *n; | |
394 | ||
395 | lockdep_assert_held(q->queue_lock); | |
396 | ||
397 | list_for_each_safe(tmp, n, &q->tag_busy_list) | |
398 | blk_requeue_request(q, list_entry_rq(tmp)); | |
399 | } | |
400 | EXPORT_SYMBOL(blk_queue_invalidate_tags); |