]>
Commit | Line | Data |
---|---|---|
4cc96131 MS |
1 | /* |
2 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm-core.h" | |
8 | #include "dm-rq.h" | |
9 | ||
10 | #include <linux/elevator.h> /* for rq_end_sector() */ | |
11 | #include <linux/blk-mq.h> | |
12 | ||
13 | #define DM_MSG_PREFIX "core-rq" | |
14 | ||
15 | #define DM_MQ_NR_HW_QUEUES 1 | |
16 | #define DM_MQ_QUEUE_DEPTH 2048 | |
17 | static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; | |
18 | static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; | |
19 | ||
20 | /* | |
21 | * Request-based DM's mempools' reserved IOs set by the user. | |
22 | */ | |
23 | #define RESERVED_REQUEST_BASED_IOS 256 | |
24 | static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; | |
25 | ||
b23df0d0 | 26 | static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT); |
4cc96131 MS |
27 | |
28 | bool dm_use_blk_mq_default(void) | |
29 | { | |
30 | return use_blk_mq; | |
31 | } | |
32 | ||
33 | bool dm_use_blk_mq(struct mapped_device *md) | |
34 | { | |
35 | return md->use_blk_mq; | |
36 | } | |
37 | EXPORT_SYMBOL_GPL(dm_use_blk_mq); | |
38 | ||
39 | unsigned dm_get_reserved_rq_based_ios(void) | |
40 | { | |
41 | return __dm_get_module_param(&reserved_rq_based_ios, | |
42 | RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS); | |
43 | } | |
44 | EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); | |
45 | ||
46 | static unsigned dm_get_blk_mq_nr_hw_queues(void) | |
47 | { | |
48 | return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); | |
49 | } | |
50 | ||
51 | static unsigned dm_get_blk_mq_queue_depth(void) | |
52 | { | |
53 | return __dm_get_module_param(&dm_mq_queue_depth, | |
54 | DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); | |
55 | } | |
56 | ||
57 | int dm_request_based(struct mapped_device *md) | |
58 | { | |
5fdee212 | 59 | return queue_is_rq_based(md->queue); |
4cc96131 MS |
60 | } |
61 | ||
62 | static void dm_old_start_queue(struct request_queue *q) | |
63 | { | |
64 | unsigned long flags; | |
65 | ||
66 | spin_lock_irqsave(q->queue_lock, flags); | |
67 | if (blk_queue_stopped(q)) | |
68 | blk_start_queue(q); | |
69 | spin_unlock_irqrestore(q->queue_lock, flags); | |
70 | } | |
71 | ||
9dbeaeab MS |
72 | static void dm_mq_start_queue(struct request_queue *q) |
73 | { | |
f660174e | 74 | blk_mq_unquiesce_queue(q); |
9dbeaeab MS |
75 | blk_mq_kick_requeue_list(q); |
76 | } | |
77 | ||
4cc96131 MS |
78 | void dm_start_queue(struct request_queue *q) |
79 | { | |
80 | if (!q->mq_ops) | |
81 | dm_old_start_queue(q); | |
9dbeaeab MS |
82 | else |
83 | dm_mq_start_queue(q); | |
4cc96131 MS |
84 | } |
85 | ||
86 | static void dm_old_stop_queue(struct request_queue *q) | |
87 | { | |
88 | unsigned long flags; | |
89 | ||
90 | spin_lock_irqsave(q->queue_lock, flags); | |
c533f249 BVA |
91 | if (!blk_queue_stopped(q)) |
92 | blk_stop_queue(q); | |
4cc96131 MS |
93 | spin_unlock_irqrestore(q->queue_lock, flags); |
94 | } | |
95 | ||
2397a15a BVA |
96 | static void dm_mq_stop_queue(struct request_queue *q) |
97 | { | |
f0d33ab7 | 98 | if (blk_mq_queue_stopped(q)) |
4cc96131 | 99 | return; |
2397a15a | 100 | |
7b17c2f7 | 101 | blk_mq_quiesce_queue(q); |
4cc96131 MS |
102 | } |
103 | ||
104 | void dm_stop_queue(struct request_queue *q) | |
105 | { | |
106 | if (!q->mq_ops) | |
107 | dm_old_stop_queue(q); | |
2397a15a BVA |
108 | else |
109 | dm_mq_stop_queue(q); | |
4cc96131 MS |
110 | } |
111 | ||
4cc96131 MS |
112 | /* |
113 | * Partial completion handling for request-based dm | |
114 | */ | |
115 | static void end_clone_bio(struct bio *clone) | |
116 | { | |
117 | struct dm_rq_clone_bio_info *info = | |
118 | container_of(clone, struct dm_rq_clone_bio_info, clone); | |
119 | struct dm_rq_target_io *tio = info->tio; | |
4cc96131 | 120 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
4e4cbee9 | 121 | blk_status_t error = clone->bi_status; |
dc6364b5 | 122 | bool is_last = !clone->bi_next; |
4cc96131 MS |
123 | |
124 | bio_put(clone); | |
125 | ||
126 | if (tio->error) | |
127 | /* | |
128 | * An error has already been detected on the request. | |
129 | * Once error occurred, just let clone->end_io() handle | |
130 | * the remainder. | |
131 | */ | |
132 | return; | |
133 | else if (error) { | |
134 | /* | |
135 | * Don't notice the error to the upper layer yet. | |
136 | * The error handling decision is made by the target driver, | |
137 | * when the request is completed. | |
138 | */ | |
139 | tio->error = error; | |
dc6364b5 | 140 | goto exit; |
4cc96131 MS |
141 | } |
142 | ||
143 | /* | |
144 | * I/O for the bio successfully completed. | |
145 | * Notice the data completion to the upper layer. | |
146 | */ | |
dc6364b5 | 147 | tio->completed += nr_bytes; |
4cc96131 MS |
148 | |
149 | /* | |
150 | * Update the original request. | |
151 | * Do not use blk_end_request() here, because it may complete | |
152 | * the original request before the clone, and break the ordering. | |
153 | */ | |
dc6364b5 ML |
154 | if (is_last) |
155 | exit: | |
156 | blk_update_request(tio->orig, BLK_STS_OK, tio->completed); | |
4cc96131 MS |
157 | } |
158 | ||
159 | static struct dm_rq_target_io *tio_from_request(struct request *rq) | |
160 | { | |
eb8db831 | 161 | return blk_mq_rq_to_pdu(rq); |
4cc96131 MS |
162 | } |
163 | ||
164 | static void rq_end_stats(struct mapped_device *md, struct request *orig) | |
165 | { | |
166 | if (unlikely(dm_stats_used(&md->stats))) { | |
167 | struct dm_rq_target_io *tio = tio_from_request(orig); | |
168 | tio->duration_jiffies = jiffies - tio->duration_jiffies; | |
169 | dm_stats_account_io(&md->stats, rq_data_dir(orig), | |
170 | blk_rq_pos(orig), tio->n_sectors, true, | |
171 | tio->duration_jiffies, &tio->stats_aux); | |
172 | } | |
173 | } | |
174 | ||
175 | /* | |
176 | * Don't touch any member of the md after calling this function because | |
177 | * the md may be freed in dm_put() at the end of this function. | |
178 | * Or do dm_get() before calling this function and dm_put() later. | |
179 | */ | |
180 | static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | |
181 | { | |
d15bb3a6 BVA |
182 | struct request_queue *q = md->queue; |
183 | unsigned long flags; | |
184 | ||
4cc96131 MS |
185 | atomic_dec(&md->pending[rw]); |
186 | ||
187 | /* nudge anyone waiting on suspend queue */ | |
188 | if (!md_in_flight(md)) | |
189 | wake_up(&md->wait); | |
190 | ||
191 | /* | |
192 | * Run this off this callpath, as drivers could invoke end_io while | |
193 | * inside their request_fn (and holding the queue lock). Calling | |
194 | * back into ->request_fn() could deadlock attempting to grab the | |
195 | * queue lock again. | |
196 | */ | |
d15bb3a6 BVA |
197 | if (!q->mq_ops && run_queue) { |
198 | spin_lock_irqsave(q->queue_lock, flags); | |
199 | blk_run_queue_async(q); | |
200 | spin_unlock_irqrestore(q->queue_lock, flags); | |
201 | } | |
4cc96131 MS |
202 | |
203 | /* | |
204 | * dm_put() must be at the end of this function. See the comment above | |
205 | */ | |
206 | dm_put(md); | |
207 | } | |
208 | ||
4cc96131 MS |
209 | /* |
210 | * Complete the clone and the original request. | |
211 | * Must be called without clone's queue lock held, | |
212 | * see end_clone_request() for more details. | |
213 | */ | |
2a842aca | 214 | static void dm_end_request(struct request *clone, blk_status_t error) |
4cc96131 MS |
215 | { |
216 | int rw = rq_data_dir(clone); | |
217 | struct dm_rq_target_io *tio = clone->end_io_data; | |
218 | struct mapped_device *md = tio->md; | |
219 | struct request *rq = tio->orig; | |
220 | ||
eb8db831 | 221 | blk_rq_unprep_clone(clone); |
17ee3fa8 | 222 | tio->ti->type->release_clone_rq(clone, NULL); |
4cc96131 | 223 | |
4cc96131 MS |
224 | rq_end_stats(md, rq); |
225 | if (!rq->q->mq_ops) | |
226 | blk_end_request_all(rq, error); | |
227 | else | |
228 | blk_mq_end_request(rq, error); | |
229 | rq_completed(md, rw, true); | |
230 | } | |
231 | ||
4cc96131 MS |
232 | /* |
233 | * Requeue the original request of a clone. | |
234 | */ | |
d5c27f3f | 235 | static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms) |
4cc96131 MS |
236 | { |
237 | struct request_queue *q = rq->q; | |
238 | unsigned long flags; | |
239 | ||
240 | spin_lock_irqsave(q->queue_lock, flags); | |
241 | blk_requeue_request(q, rq); | |
d5c27f3f | 242 | blk_delay_queue(q, delay_ms); |
4cc96131 MS |
243 | spin_unlock_irqrestore(q->queue_lock, flags); |
244 | } | |
245 | ||
e0c10752 | 246 | static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) |
4cc96131 | 247 | { |
52d7f1b5 | 248 | blk_mq_delay_kick_requeue_list(q, msecs); |
4cc96131 MS |
249 | } |
250 | ||
e0c10752 MS |
251 | void dm_mq_kick_requeue_list(struct mapped_device *md) |
252 | { | |
253 | __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); | |
254 | } | |
255 | EXPORT_SYMBOL(dm_mq_kick_requeue_list); | |
256 | ||
257 | static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) | |
258 | { | |
2b053aca | 259 | blk_mq_requeue_request(rq, false); |
e0c10752 MS |
260 | __dm_mq_kick_requeue_list(rq->q, msecs); |
261 | } | |
262 | ||
fbc39b4c | 263 | static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) |
4cc96131 | 264 | { |
fbc39b4c MS |
265 | struct mapped_device *md = tio->md; |
266 | struct request *rq = tio->orig; | |
4cc96131 | 267 | int rw = rq_data_dir(rq); |
d5c27f3f | 268 | unsigned long delay_ms = delay_requeue ? 100 : 0; |
4cc96131 MS |
269 | |
270 | rq_end_stats(md, rq); | |
eb8db831 CH |
271 | if (tio->clone) { |
272 | blk_rq_unprep_clone(tio->clone); | |
17ee3fa8 | 273 | tio->ti->type->release_clone_rq(tio->clone, NULL); |
eb8db831 | 274 | } |
4cc96131 MS |
275 | |
276 | if (!rq->q->mq_ops) | |
d5c27f3f | 277 | dm_old_requeue_request(rq, delay_ms); |
4cc96131 | 278 | else |
d5c27f3f | 279 | dm_mq_delay_requeue_request(rq, delay_ms); |
4cc96131 MS |
280 | |
281 | rq_completed(md, rw, false); | |
282 | } | |
283 | ||
2a842aca | 284 | static void dm_done(struct request *clone, blk_status_t error, bool mapped) |
4cc96131 | 285 | { |
7ed8578a | 286 | int r = DM_ENDIO_DONE; |
4cc96131 MS |
287 | struct dm_rq_target_io *tio = clone->end_io_data; |
288 | dm_request_endio_fn rq_end_io = NULL; | |
289 | ||
290 | if (tio->ti) { | |
291 | rq_end_io = tio->ti->type->rq_end_io; | |
292 | ||
293 | if (mapped && rq_end_io) | |
294 | r = rq_end_io(tio->ti, clone, error, &tio->info); | |
295 | } | |
296 | ||
2a842aca | 297 | if (unlikely(error == BLK_STS_TARGET)) { |
ac62d620 CH |
298 | if (req_op(clone) == REQ_OP_WRITE_SAME && |
299 | !clone->q->limits.max_write_same_sectors) | |
300 | disable_write_same(tio->md); | |
301 | if (req_op(clone) == REQ_OP_WRITE_ZEROES && | |
302 | !clone->q->limits.max_write_zeroes_sectors) | |
303 | disable_write_zeroes(tio->md); | |
304 | } | |
4cc96131 | 305 | |
7ed8578a CH |
306 | switch (r) { |
307 | case DM_ENDIO_DONE: | |
4cc96131 | 308 | /* The target wants to complete the I/O */ |
7ed8578a CH |
309 | dm_end_request(clone, error); |
310 | break; | |
311 | case DM_ENDIO_INCOMPLETE: | |
4cc96131 MS |
312 | /* The target will handle the I/O */ |
313 | return; | |
7ed8578a | 314 | case DM_ENDIO_REQUEUE: |
4cc96131 | 315 | /* The target wants to requeue the I/O */ |
fbc39b4c | 316 | dm_requeue_original_request(tio, false); |
7ed8578a CH |
317 | break; |
318 | default: | |
4cc96131 MS |
319 | DMWARN("unimplemented target endio return value: %d", r); |
320 | BUG(); | |
321 | } | |
322 | } | |
323 | ||
324 | /* | |
325 | * Request completion handler for request-based dm | |
326 | */ | |
327 | static void dm_softirq_done(struct request *rq) | |
328 | { | |
329 | bool mapped = true; | |
330 | struct dm_rq_target_io *tio = tio_from_request(rq); | |
331 | struct request *clone = tio->clone; | |
332 | int rw; | |
333 | ||
334 | if (!clone) { | |
61febef4 JA |
335 | struct mapped_device *md = tio->md; |
336 | ||
337 | rq_end_stats(md, rq); | |
4cc96131 | 338 | rw = rq_data_dir(rq); |
eb8db831 | 339 | if (!rq->q->mq_ops) |
4cc96131 | 340 | blk_end_request_all(rq, tio->error); |
eb8db831 | 341 | else |
4cc96131 | 342 | blk_mq_end_request(rq, tio->error); |
61febef4 | 343 | rq_completed(md, rw, false); |
4cc96131 MS |
344 | return; |
345 | } | |
346 | ||
e8064021 | 347 | if (rq->rq_flags & RQF_FAILED) |
4cc96131 MS |
348 | mapped = false; |
349 | ||
350 | dm_done(clone, tio->error, mapped); | |
351 | } | |
352 | ||
353 | /* | |
354 | * Complete the clone and the original request with the error status | |
355 | * through softirq context. | |
356 | */ | |
2a842aca | 357 | static void dm_complete_request(struct request *rq, blk_status_t error) |
4cc96131 MS |
358 | { |
359 | struct dm_rq_target_io *tio = tio_from_request(rq); | |
360 | ||
361 | tio->error = error; | |
362 | if (!rq->q->mq_ops) | |
363 | blk_complete_request(rq); | |
364 | else | |
08e0029a | 365 | blk_mq_complete_request(rq); |
4cc96131 MS |
366 | } |
367 | ||
368 | /* | |
369 | * Complete the not-mapped clone and the original request with the error status | |
370 | * through softirq context. | |
371 | * Target's rq_end_io() function isn't called. | |
372 | * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. | |
373 | */ | |
2a842aca | 374 | static void dm_kill_unmapped_request(struct request *rq, blk_status_t error) |
4cc96131 | 375 | { |
e8064021 | 376 | rq->rq_flags |= RQF_FAILED; |
4cc96131 MS |
377 | dm_complete_request(rq, error); |
378 | } | |
379 | ||
380 | /* | |
381 | * Called with the clone's queue lock held (in the case of .request_fn) | |
382 | */ | |
2a842aca | 383 | static void end_clone_request(struct request *clone, blk_status_t error) |
4cc96131 MS |
384 | { |
385 | struct dm_rq_target_io *tio = clone->end_io_data; | |
386 | ||
4cc96131 MS |
387 | /* |
388 | * Actual request completion is done in a softirq context which doesn't | |
389 | * hold the clone's queue lock. Otherwise, deadlock could occur because: | |
390 | * - another request may be submitted by the upper level driver | |
391 | * of the stacking during the completion | |
392 | * - the submission which requires queue lock may be done | |
393 | * against this clone's queue | |
394 | */ | |
395 | dm_complete_request(tio->orig, error); | |
396 | } | |
397 | ||
5e22a908 | 398 | static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq) |
4cc96131 | 399 | { |
2a842aca | 400 | blk_status_t r; |
4cc96131 MS |
401 | |
402 | if (blk_queue_io_stat(clone->q)) | |
e8064021 | 403 | clone->rq_flags |= RQF_IO_STAT; |
4cc96131 MS |
404 | |
405 | clone->start_time = jiffies; | |
406 | r = blk_insert_cloned_request(clone->q, clone); | |
5e22a908 | 407 | if (r != BLK_STS_OK && r != BLK_STS_RESOURCE) |
4cc96131 MS |
408 | /* must complete clone in terms of original request */ |
409 | dm_complete_request(rq, r); | |
5e22a908 | 410 | return r; |
4cc96131 MS |
411 | } |
412 | ||
413 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | |
414 | void *data) | |
415 | { | |
416 | struct dm_rq_target_io *tio = data; | |
417 | struct dm_rq_clone_bio_info *info = | |
418 | container_of(bio, struct dm_rq_clone_bio_info, clone); | |
419 | ||
420 | info->orig = bio_orig; | |
421 | info->tio = tio; | |
422 | bio->bi_end_io = end_clone_bio; | |
423 | ||
424 | return 0; | |
425 | } | |
426 | ||
427 | static int setup_clone(struct request *clone, struct request *rq, | |
428 | struct dm_rq_target_io *tio, gfp_t gfp_mask) | |
429 | { | |
430 | int r; | |
431 | ||
432 | r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, | |
433 | dm_rq_bio_constructor, tio); | |
434 | if (r) | |
435 | return r; | |
436 | ||
4cc96131 MS |
437 | clone->end_io = end_clone_request; |
438 | clone->end_io_data = tio; | |
439 | ||
440 | tio->clone = clone; | |
441 | ||
442 | return 0; | |
443 | } | |
444 | ||
4cc96131 MS |
445 | static void map_tio_request(struct kthread_work *work); |
446 | ||
447 | static void init_tio(struct dm_rq_target_io *tio, struct request *rq, | |
448 | struct mapped_device *md) | |
449 | { | |
450 | tio->md = md; | |
451 | tio->ti = NULL; | |
452 | tio->clone = NULL; | |
453 | tio->orig = rq; | |
454 | tio->error = 0; | |
dc6364b5 | 455 | tio->completed = 0; |
4cc96131 MS |
456 | /* |
457 | * Avoid initializing info for blk-mq; it passes | |
458 | * target-specific data through info.ptr | |
459 | * (see: dm_mq_init_request) | |
460 | */ | |
461 | if (!md->init_tio_pdu) | |
462 | memset(&tio->info, 0, sizeof(tio->info)); | |
463 | if (md->kworker_task) | |
3989144f | 464 | kthread_init_work(&tio->work, map_tio_request); |
4cc96131 MS |
465 | } |
466 | ||
4cc96131 MS |
467 | /* |
468 | * Returns: | |
a8ac51e4 MS |
469 | * DM_MAPIO_* : the request has been processed as indicated |
470 | * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued | |
4cc96131 MS |
471 | * < 0 : the request was completed due to failure |
472 | */ | |
fbc39b4c | 473 | static int map_request(struct dm_rq_target_io *tio) |
4cc96131 MS |
474 | { |
475 | int r; | |
476 | struct dm_target *ti = tio->ti; | |
fbc39b4c MS |
477 | struct mapped_device *md = tio->md; |
478 | struct request *rq = tio->orig; | |
4cc96131 | 479 | struct request *clone = NULL; |
5e22a908 | 480 | blk_status_t ret; |
4cc96131 | 481 | |
eb8db831 | 482 | r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); |
5e22a908 | 483 | check_again: |
4cc96131 MS |
484 | switch (r) { |
485 | case DM_MAPIO_SUBMITTED: | |
486 | /* The target has taken the I/O to submit by itself later */ | |
487 | break; | |
488 | case DM_MAPIO_REMAPPED: | |
eb8db831 CH |
489 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { |
490 | /* -ENOMEM */ | |
17ee3fa8 | 491 | ti->type->release_clone_rq(clone, &tio->info); |
eb8db831 CH |
492 | return DM_MAPIO_REQUEUE; |
493 | } | |
494 | ||
4cc96131 MS |
495 | /* The target has remapped the I/O so dispatch it */ |
496 | trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), | |
497 | blk_rq_pos(rq)); | |
5e22a908 ML |
498 | ret = dm_dispatch_clone_request(clone, rq); |
499 | if (ret == BLK_STS_RESOURCE) { | |
500 | blk_rq_unprep_clone(clone); | |
501 | tio->ti->type->release_clone_rq(clone); | |
502 | tio->clone = NULL; | |
503 | if (!rq->q->mq_ops) | |
504 | r = DM_MAPIO_DELAY_REQUEUE; | |
505 | else | |
506 | r = DM_MAPIO_REQUEUE; | |
507 | goto check_again; | |
508 | } | |
4cc96131 MS |
509 | break; |
510 | case DM_MAPIO_REQUEUE: | |
511 | /* The target wants to requeue the I/O */ | |
a8ac51e4 MS |
512 | break; |
513 | case DM_MAPIO_DELAY_REQUEUE: | |
514 | /* The target wants to requeue the I/O after a delay */ | |
fbc39b4c | 515 | dm_requeue_original_request(tio, true); |
4cc96131 | 516 | break; |
412445ac | 517 | case DM_MAPIO_KILL: |
4cc96131 | 518 | /* The target wants to complete the I/O */ |
2a842aca | 519 | dm_kill_unmapped_request(rq, BLK_STS_IOERR); |
ece07280 | 520 | break; |
412445ac CH |
521 | default: |
522 | DMWARN("unimplemented target map return value: %d", r); | |
523 | BUG(); | |
4cc96131 MS |
524 | } |
525 | ||
a8ac51e4 | 526 | return r; |
4cc96131 MS |
527 | } |
528 | ||
529 | static void dm_start_request(struct mapped_device *md, struct request *orig) | |
530 | { | |
531 | if (!orig->q->mq_ops) | |
532 | blk_start_request(orig); | |
533 | else | |
534 | blk_mq_start_request(orig); | |
535 | atomic_inc(&md->pending[rq_data_dir(orig)]); | |
536 | ||
537 | if (md->seq_rq_merge_deadline_usecs) { | |
538 | md->last_rq_pos = rq_end_sector(orig); | |
539 | md->last_rq_rw = rq_data_dir(orig); | |
540 | md->last_rq_start_time = ktime_get(); | |
541 | } | |
542 | ||
543 | if (unlikely(dm_stats_used(&md->stats))) { | |
544 | struct dm_rq_target_io *tio = tio_from_request(orig); | |
545 | tio->duration_jiffies = jiffies; | |
546 | tio->n_sectors = blk_rq_sectors(orig); | |
547 | dm_stats_account_io(&md->stats, rq_data_dir(orig), | |
548 | blk_rq_pos(orig), tio->n_sectors, false, 0, | |
549 | &tio->stats_aux); | |
550 | } | |
551 | ||
552 | /* | |
553 | * Hold the md reference here for the in-flight I/O. | |
554 | * We can't rely on the reference count by device opener, | |
555 | * because the device may be closed during the request completion | |
556 | * when all bios are completed. | |
557 | * See the comment in rq_completed() too. | |
558 | */ | |
559 | dm_get(md); | |
560 | } | |
561 | ||
eb8db831 CH |
562 | static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq) |
563 | { | |
564 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); | |
565 | ||
566 | /* | |
567 | * Must initialize md member of tio, otherwise it won't | |
568 | * be available in dm_mq_queue_rq. | |
569 | */ | |
570 | tio->md = md; | |
571 | ||
572 | if (md->init_tio_pdu) { | |
573 | /* target-specific per-io data is immediately after the tio */ | |
574 | tio->info.ptr = tio + 1; | |
575 | } | |
576 | ||
577 | return 0; | |
578 | } | |
579 | ||
580 | static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) | |
581 | { | |
582 | return __dm_rq_init_rq(q->rq_alloc_data, rq); | |
583 | } | |
584 | ||
4cc96131 MS |
585 | static void map_tio_request(struct kthread_work *work) |
586 | { | |
587 | struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); | |
4cc96131 | 588 | |
fbc39b4c MS |
589 | if (map_request(tio) == DM_MAPIO_REQUEUE) |
590 | dm_requeue_original_request(tio, false); | |
4cc96131 MS |
591 | } |
592 | ||
593 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) | |
594 | { | |
595 | return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); | |
596 | } | |
597 | ||
598 | #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000 | |
599 | ||
600 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, | |
601 | const char *buf, size_t count) | |
602 | { | |
603 | unsigned deadline; | |
604 | ||
e83068a5 | 605 | if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED) |
4cc96131 MS |
606 | return count; |
607 | ||
608 | if (kstrtouint(buf, 10, &deadline)) | |
609 | return -EINVAL; | |
610 | ||
611 | if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS) | |
612 | deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS; | |
613 | ||
614 | md->seq_rq_merge_deadline_usecs = deadline; | |
615 | ||
616 | return count; | |
617 | } | |
618 | ||
619 | static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md) | |
620 | { | |
621 | ktime_t kt_deadline; | |
622 | ||
623 | if (!md->seq_rq_merge_deadline_usecs) | |
624 | return false; | |
625 | ||
626 | kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); | |
627 | kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); | |
628 | ||
629 | return !ktime_after(ktime_get(), kt_deadline); | |
630 | } | |
631 | ||
632 | /* | |
633 | * q->request_fn for old request-based dm. | |
634 | * Called with the queue lock held. | |
635 | */ | |
636 | static void dm_old_request_fn(struct request_queue *q) | |
637 | { | |
638 | struct mapped_device *md = q->queuedata; | |
639 | struct dm_target *ti = md->immutable_target; | |
640 | struct request *rq; | |
641 | struct dm_rq_target_io *tio; | |
642 | sector_t pos = 0; | |
643 | ||
644 | if (unlikely(!ti)) { | |
645 | int srcu_idx; | |
646 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); | |
647 | ||
4087a1ff MS |
648 | if (unlikely(!map)) { |
649 | dm_put_live_table(md, srcu_idx); | |
650 | return; | |
651 | } | |
4cc96131 MS |
652 | ti = dm_table_find_target(map, pos); |
653 | dm_put_live_table(md, srcu_idx); | |
654 | } | |
655 | ||
656 | /* | |
657 | * For suspend, check blk_queue_stopped() and increment | |
658 | * ->pending within a single queue_lock not to increment the | |
659 | * number of in-flight I/Os after the queue is stopped in | |
660 | * dm_suspend(). | |
661 | */ | |
662 | while (!blk_queue_stopped(q)) { | |
663 | rq = blk_peek_request(q); | |
664 | if (!rq) | |
665 | return; | |
666 | ||
667 | /* always use block 0 to find the target for flushes for now */ | |
668 | pos = 0; | |
669 | if (req_op(rq) != REQ_OP_FLUSH) | |
670 | pos = blk_rq_pos(rq); | |
671 | ||
672 | if ((dm_old_request_peeked_before_merge_deadline(md) && | |
4f9c74c6 | 673 | md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) && |
4cc96131 MS |
674 | md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) || |
675 | (ti->type->busy && ti->type->busy(ti))) { | |
bd9f55ea | 676 | blk_delay_queue(q, 10); |
4cc96131 MS |
677 | return; |
678 | } | |
679 | ||
680 | dm_start_request(md, rq); | |
681 | ||
682 | tio = tio_from_request(rq); | |
eb8db831 | 683 | init_tio(tio, rq, md); |
4cc96131 MS |
684 | /* Establish tio->ti before queuing work (map_tio_request) */ |
685 | tio->ti = ti; | |
3989144f | 686 | kthread_queue_work(&md->kworker, &tio->work); |
4cc96131 MS |
687 | BUG_ON(!irqs_disabled()); |
688 | } | |
689 | } | |
690 | ||
691 | /* | |
692 | * Fully initialize a .request_fn request-based queue. | |
693 | */ | |
eb8db831 | 694 | int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t) |
4cc96131 | 695 | { |
eb8db831 CH |
696 | struct dm_target *immutable_tgt; |
697 | ||
4cc96131 | 698 | /* Fully initialize the queue */ |
eb8db831 CH |
699 | md->queue->cmd_size = sizeof(struct dm_rq_target_io); |
700 | md->queue->rq_alloc_data = md; | |
5ea708d1 | 701 | md->queue->request_fn = dm_old_request_fn; |
eb8db831 CH |
702 | md->queue->init_rq_fn = dm_rq_init_rq; |
703 | ||
704 | immutable_tgt = dm_table_get_immutable_target(t); | |
705 | if (immutable_tgt && immutable_tgt->per_io_data_size) { | |
706 | /* any target-specific per-io data is immediately after the tio */ | |
707 | md->queue->cmd_size += immutable_tgt->per_io_data_size; | |
708 | md->init_tio_pdu = true; | |
709 | } | |
5ea708d1 | 710 | if (blk_init_allocated_queue(md->queue) < 0) |
4cc96131 MS |
711 | return -EINVAL; |
712 | ||
713 | /* disable dm_old_request_fn's merge heuristic by default */ | |
714 | md->seq_rq_merge_deadline_usecs = 0; | |
715 | ||
716 | dm_init_normal_md_queue(md); | |
717 | blk_queue_softirq_done(md->queue, dm_softirq_done); | |
4cc96131 MS |
718 | |
719 | /* Initialize the request-based DM worker thread */ | |
3989144f | 720 | kthread_init_worker(&md->kworker); |
4cc96131 MS |
721 | md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, |
722 | "kdmwork-%s", dm_device_name(md)); | |
937fa62e MS |
723 | if (IS_ERR(md->kworker_task)) { |
724 | int error = PTR_ERR(md->kworker_task); | |
725 | md->kworker_task = NULL; | |
726 | return error; | |
727 | } | |
4cc96131 MS |
728 | |
729 | elv_register_queue(md->queue); | |
730 | ||
731 | return 0; | |
732 | } | |
733 | ||
d6296d39 CH |
734 | static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, |
735 | unsigned int hctx_idx, unsigned int numa_node) | |
4cc96131 | 736 | { |
d6296d39 | 737 | return __dm_rq_init_rq(set->driver_data, rq); |
4cc96131 MS |
738 | } |
739 | ||
fc17b653 | 740 | static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
4cc96131 MS |
741 | const struct blk_mq_queue_data *bd) |
742 | { | |
743 | struct request *rq = bd->rq; | |
744 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); | |
745 | struct mapped_device *md = tio->md; | |
746 | struct dm_target *ti = md->immutable_target; | |
747 | ||
748 | if (unlikely(!ti)) { | |
749 | int srcu_idx; | |
750 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); | |
751 | ||
752 | ti = dm_table_find_target(map, 0); | |
753 | dm_put_live_table(md, srcu_idx); | |
754 | } | |
755 | ||
756 | if (ti->type->busy && ti->type->busy(ti)) | |
fc17b653 | 757 | return BLK_STS_RESOURCE; |
4cc96131 MS |
758 | |
759 | dm_start_request(md, rq); | |
760 | ||
761 | /* Init tio using md established in .init_request */ | |
762 | init_tio(tio, rq, md); | |
763 | ||
764 | /* | |
765 | * Establish tio->ti before calling map_request(). | |
766 | */ | |
767 | tio->ti = ti; | |
768 | ||
769 | /* Direct call is fine since .queue_rq allows allocations */ | |
fbc39b4c | 770 | if (map_request(tio) == DM_MAPIO_REQUEUE) { |
4cc96131 MS |
771 | /* Undo dm_start_request() before requeuing */ |
772 | rq_end_stats(md, rq); | |
773 | rq_completed(md, rq_data_dir(rq), false); | |
6077c2d7 | 774 | blk_mq_delay_run_hw_queue(hctx, 100/*ms*/); |
fc17b653 | 775 | return BLK_STS_RESOURCE; |
4cc96131 MS |
776 | } |
777 | ||
fc17b653 | 778 | return BLK_STS_OK; |
4cc96131 MS |
779 | } |
780 | ||
f363b089 | 781 | static const struct blk_mq_ops dm_mq_ops = { |
4cc96131 | 782 | .queue_rq = dm_mq_queue_rq, |
4cc96131 MS |
783 | .complete = dm_softirq_done, |
784 | .init_request = dm_mq_init_request, | |
785 | }; | |
786 | ||
e83068a5 | 787 | int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) |
4cc96131 MS |
788 | { |
789 | struct request_queue *q; | |
e83068a5 | 790 | struct dm_target *immutable_tgt; |
4cc96131 MS |
791 | int err; |
792 | ||
e83068a5 | 793 | if (!dm_table_all_blk_mq_devices(t)) { |
4cc96131 MS |
794 | DMERR("request-based dm-mq may only be stacked on blk-mq device(s)"); |
795 | return -EINVAL; | |
796 | } | |
797 | ||
798 | md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); | |
799 | if (!md->tag_set) | |
800 | return -ENOMEM; | |
801 | ||
802 | md->tag_set->ops = &dm_mq_ops; | |
803 | md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); | |
804 | md->tag_set->numa_node = md->numa_node_id; | |
805 | md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; | |
806 | md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); | |
807 | md->tag_set->driver_data = md; | |
808 | ||
809 | md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); | |
e83068a5 | 810 | immutable_tgt = dm_table_get_immutable_target(t); |
4cc96131 MS |
811 | if (immutable_tgt && immutable_tgt->per_io_data_size) { |
812 | /* any target-specific per-io data is immediately after the tio */ | |
813 | md->tag_set->cmd_size += immutable_tgt->per_io_data_size; | |
814 | md->init_tio_pdu = true; | |
815 | } | |
816 | ||
817 | err = blk_mq_alloc_tag_set(md->tag_set); | |
818 | if (err) | |
819 | goto out_kfree_tag_set; | |
820 | ||
821 | q = blk_mq_init_allocated_queue(md->tag_set, md->queue); | |
822 | if (IS_ERR(q)) { | |
823 | err = PTR_ERR(q); | |
824 | goto out_tag_set; | |
825 | } | |
826 | dm_init_md_queue(md); | |
827 | ||
828 | /* backfill 'mq' sysfs registration normally done in blk_register_queue */ | |
23a60124 BVA |
829 | err = blk_mq_register_dev(disk_to_dev(md->disk), q); |
830 | if (err) | |
831 | goto out_cleanup_queue; | |
4cc96131 MS |
832 | |
833 | return 0; | |
834 | ||
23a60124 BVA |
835 | out_cleanup_queue: |
836 | blk_cleanup_queue(q); | |
4cc96131 MS |
837 | out_tag_set: |
838 | blk_mq_free_tag_set(md->tag_set); | |
839 | out_kfree_tag_set: | |
840 | kfree(md->tag_set); | |
841 | ||
842 | return err; | |
843 | } | |
844 | ||
845 | void dm_mq_cleanup_mapped_device(struct mapped_device *md) | |
846 | { | |
847 | if (md->tag_set) { | |
848 | blk_mq_free_tag_set(md->tag_set); | |
849 | kfree(md->tag_set); | |
850 | } | |
851 | } | |
852 | ||
853 | module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); | |
854 | MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); | |
855 | ||
856 | module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); | |
857 | MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); | |
858 | ||
859 | module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR); | |
860 | MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); | |
861 | ||
862 | module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR); | |
863 | MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); |