]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-core.c
block: Export blk_init_request_from_bio()
[mirror_ubuntu-bionic-kernel.git] / block / blk-core.c
1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11 /*
12 * This handles all read/write requests to block devices
13 */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-mq.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/slab.h>
27 #include <linux/swap.h>
28 #include <linux/writeback.h>
29 #include <linux/task_io_accounting_ops.h>
30 #include <linux/fault-inject.h>
31 #include <linux/list_sort.h>
32 #include <linux/delay.h>
33 #include <linux/ratelimit.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/blk-cgroup.h>
36 #include <linux/debugfs.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/block.h>
40
41 #include "blk.h"
42 #include "blk-mq.h"
43 #include "blk-mq-sched.h"
44 #include "blk-wbt.h"
45
46 #ifdef CONFIG_DEBUG_FS
47 struct dentry *blk_debugfs_root;
48 #endif
49
50 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
51 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
52 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
53 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
54 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
55
56 DEFINE_IDA(blk_queue_ida);
57
58 /*
59 * For the allocated request tables
60 */
61 struct kmem_cache *request_cachep;
62
63 /*
64 * For queue allocation
65 */
66 struct kmem_cache *blk_requestq_cachep;
67
68 /*
69 * Controlling structure to kblockd
70 */
71 static struct workqueue_struct *kblockd_workqueue;
72
73 static void blk_clear_congested(struct request_list *rl, int sync)
74 {
75 #ifdef CONFIG_CGROUP_WRITEBACK
76 clear_wb_congested(rl->blkg->wb_congested, sync);
77 #else
78 /*
79 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
80 * flip its congestion state for events on other blkcgs.
81 */
82 if (rl == &rl->q->root_rl)
83 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
84 #endif
85 }
86
87 static void blk_set_congested(struct request_list *rl, int sync)
88 {
89 #ifdef CONFIG_CGROUP_WRITEBACK
90 set_wb_congested(rl->blkg->wb_congested, sync);
91 #else
92 /* see blk_clear_congested() */
93 if (rl == &rl->q->root_rl)
94 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
95 #endif
96 }
97
98 void blk_queue_congestion_threshold(struct request_queue *q)
99 {
100 int nr;
101
102 nr = q->nr_requests - (q->nr_requests / 8) + 1;
103 if (nr > q->nr_requests)
104 nr = q->nr_requests;
105 q->nr_congestion_on = nr;
106
107 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
108 if (nr < 1)
109 nr = 1;
110 q->nr_congestion_off = nr;
111 }
112
113 void blk_rq_init(struct request_queue *q, struct request *rq)
114 {
115 memset(rq, 0, sizeof(*rq));
116
117 INIT_LIST_HEAD(&rq->queuelist);
118 INIT_LIST_HEAD(&rq->timeout_list);
119 rq->cpu = -1;
120 rq->q = q;
121 rq->__sector = (sector_t) -1;
122 INIT_HLIST_NODE(&rq->hash);
123 RB_CLEAR_NODE(&rq->rb_node);
124 rq->tag = -1;
125 rq->internal_tag = -1;
126 rq->start_time = jiffies;
127 set_start_time_ns(rq);
128 rq->part = NULL;
129 }
130 EXPORT_SYMBOL(blk_rq_init);
131
132 static void req_bio_endio(struct request *rq, struct bio *bio,
133 unsigned int nbytes, int error)
134 {
135 if (error)
136 bio->bi_error = error;
137
138 if (unlikely(rq->rq_flags & RQF_QUIET))
139 bio_set_flag(bio, BIO_QUIET);
140
141 bio_advance(bio, nbytes);
142
143 /* don't actually finish bio if it's part of flush sequence */
144 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
145 bio_endio(bio);
146 }
147
148 void blk_dump_rq_flags(struct request *rq, char *msg)
149 {
150 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
151 rq->rq_disk ? rq->rq_disk->disk_name : "?",
152 (unsigned long long) rq->cmd_flags);
153
154 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
155 (unsigned long long)blk_rq_pos(rq),
156 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
157 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
158 rq->bio, rq->biotail, blk_rq_bytes(rq));
159 }
160 EXPORT_SYMBOL(blk_dump_rq_flags);
161
162 static void blk_delay_work(struct work_struct *work)
163 {
164 struct request_queue *q;
165
166 q = container_of(work, struct request_queue, delay_work.work);
167 spin_lock_irq(q->queue_lock);
168 __blk_run_queue(q);
169 spin_unlock_irq(q->queue_lock);
170 }
171
172 /**
173 * blk_delay_queue - restart queueing after defined interval
174 * @q: The &struct request_queue in question
175 * @msecs: Delay in msecs
176 *
177 * Description:
178 * Sometimes queueing needs to be postponed for a little while, to allow
179 * resources to come back. This function will make sure that queueing is
180 * restarted around the specified time. Queue lock must be held.
181 */
182 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
183 {
184 if (likely(!blk_queue_dead(q)))
185 queue_delayed_work(kblockd_workqueue, &q->delay_work,
186 msecs_to_jiffies(msecs));
187 }
188 EXPORT_SYMBOL(blk_delay_queue);
189
190 /**
191 * blk_start_queue_async - asynchronously restart a previously stopped queue
192 * @q: The &struct request_queue in question
193 *
194 * Description:
195 * blk_start_queue_async() will clear the stop flag on the queue, and
196 * ensure that the request_fn for the queue is run from an async
197 * context.
198 **/
199 void blk_start_queue_async(struct request_queue *q)
200 {
201 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
202 blk_run_queue_async(q);
203 }
204 EXPORT_SYMBOL(blk_start_queue_async);
205
206 /**
207 * blk_start_queue - restart a previously stopped queue
208 * @q: The &struct request_queue in question
209 *
210 * Description:
211 * blk_start_queue() will clear the stop flag on the queue, and call
212 * the request_fn for the queue if it was in a stopped state when
213 * entered. Also see blk_stop_queue(). Queue lock must be held.
214 **/
215 void blk_start_queue(struct request_queue *q)
216 {
217 WARN_ON(!irqs_disabled());
218
219 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
220 __blk_run_queue(q);
221 }
222 EXPORT_SYMBOL(blk_start_queue);
223
224 /**
225 * blk_stop_queue - stop a queue
226 * @q: The &struct request_queue in question
227 *
228 * Description:
229 * The Linux block layer assumes that a block driver will consume all
230 * entries on the request queue when the request_fn strategy is called.
231 * Often this will not happen, because of hardware limitations (queue
232 * depth settings). If a device driver gets a 'queue full' response,
233 * or if it simply chooses not to queue more I/O at one point, it can
234 * call this function to prevent the request_fn from being called until
235 * the driver has signalled it's ready to go again. This happens by calling
236 * blk_start_queue() to restart queue operations. Queue lock must be held.
237 **/
238 void blk_stop_queue(struct request_queue *q)
239 {
240 cancel_delayed_work(&q->delay_work);
241 queue_flag_set(QUEUE_FLAG_STOPPED, q);
242 }
243 EXPORT_SYMBOL(blk_stop_queue);
244
245 /**
246 * blk_sync_queue - cancel any pending callbacks on a queue
247 * @q: the queue
248 *
249 * Description:
250 * The block layer may perform asynchronous callback activity
251 * on a queue, such as calling the unplug function after a timeout.
252 * A block device may call blk_sync_queue to ensure that any
253 * such activity is cancelled, thus allowing it to release resources
254 * that the callbacks might use. The caller must already have made sure
255 * that its ->make_request_fn will not re-add plugging prior to calling
256 * this function.
257 *
258 * This function does not cancel any asynchronous activity arising
259 * out of elevator or throttling code. That would require elevator_exit()
260 * and blkcg_exit_queue() to be called with queue lock initialized.
261 *
262 */
263 void blk_sync_queue(struct request_queue *q)
264 {
265 del_timer_sync(&q->timeout);
266
267 if (q->mq_ops) {
268 struct blk_mq_hw_ctx *hctx;
269 int i;
270
271 queue_for_each_hw_ctx(q, hctx, i) {
272 cancel_work_sync(&hctx->run_work);
273 cancel_delayed_work_sync(&hctx->delay_work);
274 }
275 } else {
276 cancel_delayed_work_sync(&q->delay_work);
277 }
278 }
279 EXPORT_SYMBOL(blk_sync_queue);
280
281 /**
282 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
283 * @q: The queue to run
284 *
285 * Description:
286 * Invoke request handling on a queue if there are any pending requests.
287 * May be used to restart request handling after a request has completed.
288 * This variant runs the queue whether or not the queue has been
289 * stopped. Must be called with the queue lock held and interrupts
290 * disabled. See also @blk_run_queue.
291 */
292 inline void __blk_run_queue_uncond(struct request_queue *q)
293 {
294 if (unlikely(blk_queue_dead(q)))
295 return;
296
297 /*
298 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
299 * the queue lock internally. As a result multiple threads may be
300 * running such a request function concurrently. Keep track of the
301 * number of active request_fn invocations such that blk_drain_queue()
302 * can wait until all these request_fn calls have finished.
303 */
304 q->request_fn_active++;
305 q->request_fn(q);
306 q->request_fn_active--;
307 }
308 EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
309
310 /**
311 * __blk_run_queue - run a single device queue
312 * @q: The queue to run
313 *
314 * Description:
315 * See @blk_run_queue. This variant must be called with the queue lock
316 * held and interrupts disabled.
317 */
318 void __blk_run_queue(struct request_queue *q)
319 {
320 if (unlikely(blk_queue_stopped(q)))
321 return;
322
323 __blk_run_queue_uncond(q);
324 }
325 EXPORT_SYMBOL(__blk_run_queue);
326
327 /**
328 * blk_run_queue_async - run a single device queue in workqueue context
329 * @q: The queue to run
330 *
331 * Description:
332 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
333 * of us. The caller must hold the queue lock.
334 */
335 void blk_run_queue_async(struct request_queue *q)
336 {
337 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
338 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
339 }
340 EXPORT_SYMBOL(blk_run_queue_async);
341
342 /**
343 * blk_run_queue - run a single device queue
344 * @q: The queue to run
345 *
346 * Description:
347 * Invoke request handling on this queue, if it has pending work to do.
348 * May be used to restart queueing when a request has completed.
349 */
350 void blk_run_queue(struct request_queue *q)
351 {
352 unsigned long flags;
353
354 spin_lock_irqsave(q->queue_lock, flags);
355 __blk_run_queue(q);
356 spin_unlock_irqrestore(q->queue_lock, flags);
357 }
358 EXPORT_SYMBOL(blk_run_queue);
359
360 void blk_put_queue(struct request_queue *q)
361 {
362 kobject_put(&q->kobj);
363 }
364 EXPORT_SYMBOL(blk_put_queue);
365
366 /**
367 * __blk_drain_queue - drain requests from request_queue
368 * @q: queue to drain
369 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
370 *
371 * Drain requests from @q. If @drain_all is set, all requests are drained.
372 * If not, only ELVPRIV requests are drained. The caller is responsible
373 * for ensuring that no new requests which need to be drained are queued.
374 */
375 static void __blk_drain_queue(struct request_queue *q, bool drain_all)
376 __releases(q->queue_lock)
377 __acquires(q->queue_lock)
378 {
379 int i;
380
381 lockdep_assert_held(q->queue_lock);
382
383 while (true) {
384 bool drain = false;
385
386 /*
387 * The caller might be trying to drain @q before its
388 * elevator is initialized.
389 */
390 if (q->elevator)
391 elv_drain_elevator(q);
392
393 blkcg_drain_queue(q);
394
395 /*
396 * This function might be called on a queue which failed
397 * driver init after queue creation or is not yet fully
398 * active yet. Some drivers (e.g. fd and loop) get unhappy
399 * in such cases. Kick queue iff dispatch queue has
400 * something on it and @q has request_fn set.
401 */
402 if (!list_empty(&q->queue_head) && q->request_fn)
403 __blk_run_queue(q);
404
405 drain |= q->nr_rqs_elvpriv;
406 drain |= q->request_fn_active;
407
408 /*
409 * Unfortunately, requests are queued at and tracked from
410 * multiple places and there's no single counter which can
411 * be drained. Check all the queues and counters.
412 */
413 if (drain_all) {
414 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
415 drain |= !list_empty(&q->queue_head);
416 for (i = 0; i < 2; i++) {
417 drain |= q->nr_rqs[i];
418 drain |= q->in_flight[i];
419 if (fq)
420 drain |= !list_empty(&fq->flush_queue[i]);
421 }
422 }
423
424 if (!drain)
425 break;
426
427 spin_unlock_irq(q->queue_lock);
428
429 msleep(10);
430
431 spin_lock_irq(q->queue_lock);
432 }
433
434 /*
435 * With queue marked dead, any woken up waiter will fail the
436 * allocation path, so the wakeup chaining is lost and we're
437 * left with hung waiters. We need to wake up those waiters.
438 */
439 if (q->request_fn) {
440 struct request_list *rl;
441
442 blk_queue_for_each_rl(rl, q)
443 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
444 wake_up_all(&rl->wait[i]);
445 }
446 }
447
448 /**
449 * blk_queue_bypass_start - enter queue bypass mode
450 * @q: queue of interest
451 *
452 * In bypass mode, only the dispatch FIFO queue of @q is used. This
453 * function makes @q enter bypass mode and drains all requests which were
454 * throttled or issued before. On return, it's guaranteed that no request
455 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
456 * inside queue or RCU read lock.
457 */
458 void blk_queue_bypass_start(struct request_queue *q)
459 {
460 spin_lock_irq(q->queue_lock);
461 q->bypass_depth++;
462 queue_flag_set(QUEUE_FLAG_BYPASS, q);
463 spin_unlock_irq(q->queue_lock);
464
465 /*
466 * Queues start drained. Skip actual draining till init is
467 * complete. This avoids lenghty delays during queue init which
468 * can happen many times during boot.
469 */
470 if (blk_queue_init_done(q)) {
471 spin_lock_irq(q->queue_lock);
472 __blk_drain_queue(q, false);
473 spin_unlock_irq(q->queue_lock);
474
475 /* ensure blk_queue_bypass() is %true inside RCU read lock */
476 synchronize_rcu();
477 }
478 }
479 EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
480
481 /**
482 * blk_queue_bypass_end - leave queue bypass mode
483 * @q: queue of interest
484 *
485 * Leave bypass mode and restore the normal queueing behavior.
486 */
487 void blk_queue_bypass_end(struct request_queue *q)
488 {
489 spin_lock_irq(q->queue_lock);
490 if (!--q->bypass_depth)
491 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
492 WARN_ON_ONCE(q->bypass_depth < 0);
493 spin_unlock_irq(q->queue_lock);
494 }
495 EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
496
497 void blk_set_queue_dying(struct request_queue *q)
498 {
499 spin_lock_irq(q->queue_lock);
500 queue_flag_set(QUEUE_FLAG_DYING, q);
501 spin_unlock_irq(q->queue_lock);
502
503 /*
504 * When queue DYING flag is set, we need to block new req
505 * entering queue, so we call blk_freeze_queue_start() to
506 * prevent I/O from crossing blk_queue_enter().
507 */
508 blk_freeze_queue_start(q);
509
510 if (q->mq_ops)
511 blk_mq_wake_waiters(q);
512 else {
513 struct request_list *rl;
514
515 spin_lock_irq(q->queue_lock);
516 blk_queue_for_each_rl(rl, q) {
517 if (rl->rq_pool) {
518 wake_up(&rl->wait[BLK_RW_SYNC]);
519 wake_up(&rl->wait[BLK_RW_ASYNC]);
520 }
521 }
522 spin_unlock_irq(q->queue_lock);
523 }
524 }
525 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
526
527 /**
528 * blk_cleanup_queue - shutdown a request queue
529 * @q: request queue to shutdown
530 *
531 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
532 * put it. All future requests will be failed immediately with -ENODEV.
533 */
534 void blk_cleanup_queue(struct request_queue *q)
535 {
536 spinlock_t *lock = q->queue_lock;
537
538 /* mark @q DYING, no new request or merges will be allowed afterwards */
539 mutex_lock(&q->sysfs_lock);
540 blk_set_queue_dying(q);
541 spin_lock_irq(lock);
542
543 /*
544 * A dying queue is permanently in bypass mode till released. Note
545 * that, unlike blk_queue_bypass_start(), we aren't performing
546 * synchronize_rcu() after entering bypass mode to avoid the delay
547 * as some drivers create and destroy a lot of queues while
548 * probing. This is still safe because blk_release_queue() will be
549 * called only after the queue refcnt drops to zero and nothing,
550 * RCU or not, would be traversing the queue by then.
551 */
552 q->bypass_depth++;
553 queue_flag_set(QUEUE_FLAG_BYPASS, q);
554
555 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
556 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
557 queue_flag_set(QUEUE_FLAG_DYING, q);
558 spin_unlock_irq(lock);
559 mutex_unlock(&q->sysfs_lock);
560
561 /*
562 * Drain all requests queued before DYING marking. Set DEAD flag to
563 * prevent that q->request_fn() gets invoked after draining finished.
564 */
565 blk_freeze_queue(q);
566 spin_lock_irq(lock);
567 if (!q->mq_ops)
568 __blk_drain_queue(q, true);
569 queue_flag_set(QUEUE_FLAG_DEAD, q);
570 spin_unlock_irq(lock);
571
572 /* for synchronous bio-based driver finish in-flight integrity i/o */
573 blk_flush_integrity();
574
575 /* @q won't process any more request, flush async actions */
576 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
577 blk_sync_queue(q);
578
579 if (q->mq_ops)
580 blk_mq_free_queue(q);
581 percpu_ref_exit(&q->q_usage_counter);
582
583 spin_lock_irq(lock);
584 if (q->queue_lock != &q->__queue_lock)
585 q->queue_lock = &q->__queue_lock;
586 spin_unlock_irq(lock);
587
588 /* @q is and will stay empty, shutdown and put */
589 blk_put_queue(q);
590 }
591 EXPORT_SYMBOL(blk_cleanup_queue);
592
593 /* Allocate memory local to the request queue */
594 static void *alloc_request_simple(gfp_t gfp_mask, void *data)
595 {
596 struct request_queue *q = data;
597
598 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
599 }
600
601 static void free_request_simple(void *element, void *data)
602 {
603 kmem_cache_free(request_cachep, element);
604 }
605
606 static void *alloc_request_size(gfp_t gfp_mask, void *data)
607 {
608 struct request_queue *q = data;
609 struct request *rq;
610
611 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
612 q->node);
613 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
614 kfree(rq);
615 rq = NULL;
616 }
617 return rq;
618 }
619
620 static void free_request_size(void *element, void *data)
621 {
622 struct request_queue *q = data;
623
624 if (q->exit_rq_fn)
625 q->exit_rq_fn(q, element);
626 kfree(element);
627 }
628
629 int blk_init_rl(struct request_list *rl, struct request_queue *q,
630 gfp_t gfp_mask)
631 {
632 if (unlikely(rl->rq_pool))
633 return 0;
634
635 rl->q = q;
636 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
637 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
638 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
639 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
640
641 if (q->cmd_size) {
642 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
643 alloc_request_size, free_request_size,
644 q, gfp_mask, q->node);
645 } else {
646 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
647 alloc_request_simple, free_request_simple,
648 q, gfp_mask, q->node);
649 }
650 if (!rl->rq_pool)
651 return -ENOMEM;
652
653 return 0;
654 }
655
656 void blk_exit_rl(struct request_list *rl)
657 {
658 if (rl->rq_pool)
659 mempool_destroy(rl->rq_pool);
660 }
661
662 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
663 {
664 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
665 }
666 EXPORT_SYMBOL(blk_alloc_queue);
667
668 int blk_queue_enter(struct request_queue *q, bool nowait)
669 {
670 while (true) {
671 int ret;
672
673 if (percpu_ref_tryget_live(&q->q_usage_counter))
674 return 0;
675
676 if (nowait)
677 return -EBUSY;
678
679 /*
680 * read pair of barrier in blk_freeze_queue_start(),
681 * we need to order reading __PERCPU_REF_DEAD flag of
682 * .q_usage_counter and reading .mq_freeze_depth or
683 * queue dying flag, otherwise the following wait may
684 * never return if the two reads are reordered.
685 */
686 smp_rmb();
687
688 ret = wait_event_interruptible(q->mq_freeze_wq,
689 !atomic_read(&q->mq_freeze_depth) ||
690 blk_queue_dying(q));
691 if (blk_queue_dying(q))
692 return -ENODEV;
693 if (ret)
694 return ret;
695 }
696 }
697
698 void blk_queue_exit(struct request_queue *q)
699 {
700 percpu_ref_put(&q->q_usage_counter);
701 }
702
703 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
704 {
705 struct request_queue *q =
706 container_of(ref, struct request_queue, q_usage_counter);
707
708 wake_up_all(&q->mq_freeze_wq);
709 }
710
711 static void blk_rq_timed_out_timer(unsigned long data)
712 {
713 struct request_queue *q = (struct request_queue *)data;
714
715 kblockd_schedule_work(&q->timeout_work);
716 }
717
718 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
719 {
720 struct request_queue *q;
721
722 q = kmem_cache_alloc_node(blk_requestq_cachep,
723 gfp_mask | __GFP_ZERO, node_id);
724 if (!q)
725 return NULL;
726
727 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
728 if (q->id < 0)
729 goto fail_q;
730
731 q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
732 if (!q->bio_split)
733 goto fail_id;
734
735 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
736 if (!q->backing_dev_info)
737 goto fail_split;
738
739 q->stats = blk_alloc_queue_stats();
740 if (!q->stats)
741 goto fail_stats;
742
743 q->backing_dev_info->ra_pages =
744 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
745 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
746 q->backing_dev_info->name = "block";
747 q->node = node_id;
748
749 setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
750 laptop_mode_timer_fn, (unsigned long) q);
751 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
752 INIT_LIST_HEAD(&q->queue_head);
753 INIT_LIST_HEAD(&q->timeout_list);
754 INIT_LIST_HEAD(&q->icq_list);
755 #ifdef CONFIG_BLK_CGROUP
756 INIT_LIST_HEAD(&q->blkg_list);
757 #endif
758 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
759
760 kobject_init(&q->kobj, &blk_queue_ktype);
761
762 mutex_init(&q->sysfs_lock);
763 spin_lock_init(&q->__queue_lock);
764
765 /*
766 * By default initialize queue_lock to internal lock and driver can
767 * override it later if need be.
768 */
769 q->queue_lock = &q->__queue_lock;
770
771 /*
772 * A queue starts its life with bypass turned on to avoid
773 * unnecessary bypass on/off overhead and nasty surprises during
774 * init. The initial bypass will be finished when the queue is
775 * registered by blk_register_queue().
776 */
777 q->bypass_depth = 1;
778 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
779
780 init_waitqueue_head(&q->mq_freeze_wq);
781
782 /*
783 * Init percpu_ref in atomic mode so that it's faster to shutdown.
784 * See blk_register_queue() for details.
785 */
786 if (percpu_ref_init(&q->q_usage_counter,
787 blk_queue_usage_counter_release,
788 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
789 goto fail_bdi;
790
791 if (blkcg_init_queue(q))
792 goto fail_ref;
793
794 return q;
795
796 fail_ref:
797 percpu_ref_exit(&q->q_usage_counter);
798 fail_bdi:
799 blk_free_queue_stats(q->stats);
800 fail_stats:
801 bdi_put(q->backing_dev_info);
802 fail_split:
803 bioset_free(q->bio_split);
804 fail_id:
805 ida_simple_remove(&blk_queue_ida, q->id);
806 fail_q:
807 kmem_cache_free(blk_requestq_cachep, q);
808 return NULL;
809 }
810 EXPORT_SYMBOL(blk_alloc_queue_node);
811
812 /**
813 * blk_init_queue - prepare a request queue for use with a block device
814 * @rfn: The function to be called to process requests that have been
815 * placed on the queue.
816 * @lock: Request queue spin lock
817 *
818 * Description:
819 * If a block device wishes to use the standard request handling procedures,
820 * which sorts requests and coalesces adjacent requests, then it must
821 * call blk_init_queue(). The function @rfn will be called when there
822 * are requests on the queue that need to be processed. If the device
823 * supports plugging, then @rfn may not be called immediately when requests
824 * are available on the queue, but may be called at some time later instead.
825 * Plugged queues are generally unplugged when a buffer belonging to one
826 * of the requests on the queue is needed, or due to memory pressure.
827 *
828 * @rfn is not required, or even expected, to remove all requests off the
829 * queue, but only as many as it can handle at a time. If it does leave
830 * requests on the queue, it is responsible for arranging that the requests
831 * get dealt with eventually.
832 *
833 * The queue spin lock must be held while manipulating the requests on the
834 * request queue; this lock will be taken also from interrupt context, so irq
835 * disabling is needed for it.
836 *
837 * Function returns a pointer to the initialized request queue, or %NULL if
838 * it didn't succeed.
839 *
840 * Note:
841 * blk_init_queue() must be paired with a blk_cleanup_queue() call
842 * when the block device is deactivated (such as at module unload).
843 **/
844
845 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
846 {
847 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
848 }
849 EXPORT_SYMBOL(blk_init_queue);
850
851 struct request_queue *
852 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
853 {
854 struct request_queue *q;
855
856 q = blk_alloc_queue_node(GFP_KERNEL, node_id);
857 if (!q)
858 return NULL;
859
860 q->request_fn = rfn;
861 if (lock)
862 q->queue_lock = lock;
863 if (blk_init_allocated_queue(q) < 0) {
864 blk_cleanup_queue(q);
865 return NULL;
866 }
867
868 return q;
869 }
870 EXPORT_SYMBOL(blk_init_queue_node);
871
872 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
873
874
875 int blk_init_allocated_queue(struct request_queue *q)
876 {
877 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
878 if (!q->fq)
879 return -ENOMEM;
880
881 if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
882 goto out_free_flush_queue;
883
884 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
885 goto out_exit_flush_rq;
886
887 INIT_WORK(&q->timeout_work, blk_timeout_work);
888 q->queue_flags |= QUEUE_FLAG_DEFAULT;
889
890 /*
891 * This also sets hw/phys segments, boundary and size
892 */
893 blk_queue_make_request(q, blk_queue_bio);
894
895 q->sg_reserved_size = INT_MAX;
896
897 /* Protect q->elevator from elevator_change */
898 mutex_lock(&q->sysfs_lock);
899
900 /* init elevator */
901 if (elevator_init(q, NULL)) {
902 mutex_unlock(&q->sysfs_lock);
903 goto out_exit_flush_rq;
904 }
905
906 mutex_unlock(&q->sysfs_lock);
907 return 0;
908
909 out_exit_flush_rq:
910 if (q->exit_rq_fn)
911 q->exit_rq_fn(q, q->fq->flush_rq);
912 out_free_flush_queue:
913 blk_free_flush_queue(q->fq);
914 return -ENOMEM;
915 }
916 EXPORT_SYMBOL(blk_init_allocated_queue);
917
918 bool blk_get_queue(struct request_queue *q)
919 {
920 if (likely(!blk_queue_dying(q))) {
921 __blk_get_queue(q);
922 return true;
923 }
924
925 return false;
926 }
927 EXPORT_SYMBOL(blk_get_queue);
928
929 static inline void blk_free_request(struct request_list *rl, struct request *rq)
930 {
931 if (rq->rq_flags & RQF_ELVPRIV) {
932 elv_put_request(rl->q, rq);
933 if (rq->elv.icq)
934 put_io_context(rq->elv.icq->ioc);
935 }
936
937 mempool_free(rq, rl->rq_pool);
938 }
939
940 /*
941 * ioc_batching returns true if the ioc is a valid batching request and
942 * should be given priority access to a request.
943 */
944 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
945 {
946 if (!ioc)
947 return 0;
948
949 /*
950 * Make sure the process is able to allocate at least 1 request
951 * even if the batch times out, otherwise we could theoretically
952 * lose wakeups.
953 */
954 return ioc->nr_batch_requests == q->nr_batching ||
955 (ioc->nr_batch_requests > 0
956 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
957 }
958
959 /*
960 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
961 * will cause the process to be a "batcher" on all queues in the system. This
962 * is the behaviour we want though - once it gets a wakeup it should be given
963 * a nice run.
964 */
965 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
966 {
967 if (!ioc || ioc_batching(q, ioc))
968 return;
969
970 ioc->nr_batch_requests = q->nr_batching;
971 ioc->last_waited = jiffies;
972 }
973
974 static void __freed_request(struct request_list *rl, int sync)
975 {
976 struct request_queue *q = rl->q;
977
978 if (rl->count[sync] < queue_congestion_off_threshold(q))
979 blk_clear_congested(rl, sync);
980
981 if (rl->count[sync] + 1 <= q->nr_requests) {
982 if (waitqueue_active(&rl->wait[sync]))
983 wake_up(&rl->wait[sync]);
984
985 blk_clear_rl_full(rl, sync);
986 }
987 }
988
989 /*
990 * A request has just been released. Account for it, update the full and
991 * congestion status, wake up any waiters. Called under q->queue_lock.
992 */
993 static void freed_request(struct request_list *rl, bool sync,
994 req_flags_t rq_flags)
995 {
996 struct request_queue *q = rl->q;
997
998 q->nr_rqs[sync]--;
999 rl->count[sync]--;
1000 if (rq_flags & RQF_ELVPRIV)
1001 q->nr_rqs_elvpriv--;
1002
1003 __freed_request(rl, sync);
1004
1005 if (unlikely(rl->starved[sync ^ 1]))
1006 __freed_request(rl, sync ^ 1);
1007 }
1008
1009 int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
1010 {
1011 struct request_list *rl;
1012 int on_thresh, off_thresh;
1013
1014 spin_lock_irq(q->queue_lock);
1015 q->nr_requests = nr;
1016 blk_queue_congestion_threshold(q);
1017 on_thresh = queue_congestion_on_threshold(q);
1018 off_thresh = queue_congestion_off_threshold(q);
1019
1020 blk_queue_for_each_rl(rl, q) {
1021 if (rl->count[BLK_RW_SYNC] >= on_thresh)
1022 blk_set_congested(rl, BLK_RW_SYNC);
1023 else if (rl->count[BLK_RW_SYNC] < off_thresh)
1024 blk_clear_congested(rl, BLK_RW_SYNC);
1025
1026 if (rl->count[BLK_RW_ASYNC] >= on_thresh)
1027 blk_set_congested(rl, BLK_RW_ASYNC);
1028 else if (rl->count[BLK_RW_ASYNC] < off_thresh)
1029 blk_clear_congested(rl, BLK_RW_ASYNC);
1030
1031 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
1032 blk_set_rl_full(rl, BLK_RW_SYNC);
1033 } else {
1034 blk_clear_rl_full(rl, BLK_RW_SYNC);
1035 wake_up(&rl->wait[BLK_RW_SYNC]);
1036 }
1037
1038 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
1039 blk_set_rl_full(rl, BLK_RW_ASYNC);
1040 } else {
1041 blk_clear_rl_full(rl, BLK_RW_ASYNC);
1042 wake_up(&rl->wait[BLK_RW_ASYNC]);
1043 }
1044 }
1045
1046 spin_unlock_irq(q->queue_lock);
1047 return 0;
1048 }
1049
1050 /**
1051 * __get_request - get a free request
1052 * @rl: request list to allocate from
1053 * @op: operation and flags
1054 * @bio: bio to allocate request for (can be %NULL)
1055 * @gfp_mask: allocation mask
1056 *
1057 * Get a free request from @q. This function may fail under memory
1058 * pressure or if @q is dead.
1059 *
1060 * Must be called with @q->queue_lock held and,
1061 * Returns ERR_PTR on failure, with @q->queue_lock held.
1062 * Returns request pointer on success, with @q->queue_lock *not held*.
1063 */
1064 static struct request *__get_request(struct request_list *rl, unsigned int op,
1065 struct bio *bio, gfp_t gfp_mask)
1066 {
1067 struct request_queue *q = rl->q;
1068 struct request *rq;
1069 struct elevator_type *et = q->elevator->type;
1070 struct io_context *ioc = rq_ioc(bio);
1071 struct io_cq *icq = NULL;
1072 const bool is_sync = op_is_sync(op);
1073 int may_queue;
1074 req_flags_t rq_flags = RQF_ALLOCED;
1075
1076 if (unlikely(blk_queue_dying(q)))
1077 return ERR_PTR(-ENODEV);
1078
1079 may_queue = elv_may_queue(q, op);
1080 if (may_queue == ELV_MQUEUE_NO)
1081 goto rq_starved;
1082
1083 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
1084 if (rl->count[is_sync]+1 >= q->nr_requests) {
1085 /*
1086 * The queue will fill after this allocation, so set
1087 * it as full, and mark this process as "batching".
1088 * This process will be allowed to complete a batch of
1089 * requests, others will be blocked.
1090 */
1091 if (!blk_rl_full(rl, is_sync)) {
1092 ioc_set_batching(q, ioc);
1093 blk_set_rl_full(rl, is_sync);
1094 } else {
1095 if (may_queue != ELV_MQUEUE_MUST
1096 && !ioc_batching(q, ioc)) {
1097 /*
1098 * The queue is full and the allocating
1099 * process is not a "batcher", and not
1100 * exempted by the IO scheduler
1101 */
1102 return ERR_PTR(-ENOMEM);
1103 }
1104 }
1105 }
1106 blk_set_congested(rl, is_sync);
1107 }
1108
1109 /*
1110 * Only allow batching queuers to allocate up to 50% over the defined
1111 * limit of requests, otherwise we could have thousands of requests
1112 * allocated with any setting of ->nr_requests
1113 */
1114 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
1115 return ERR_PTR(-ENOMEM);
1116
1117 q->nr_rqs[is_sync]++;
1118 rl->count[is_sync]++;
1119 rl->starved[is_sync] = 0;
1120
1121 /*
1122 * Decide whether the new request will be managed by elevator. If
1123 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
1124 * prevent the current elevator from being destroyed until the new
1125 * request is freed. This guarantees icq's won't be destroyed and
1126 * makes creating new ones safe.
1127 *
1128 * Flush requests do not use the elevator so skip initialization.
1129 * This allows a request to share the flush and elevator data.
1130 *
1131 * Also, lookup icq while holding queue_lock. If it doesn't exist,
1132 * it will be created after releasing queue_lock.
1133 */
1134 if (!op_is_flush(op) && !blk_queue_bypass(q)) {
1135 rq_flags |= RQF_ELVPRIV;
1136 q->nr_rqs_elvpriv++;
1137 if (et->icq_cache && ioc)
1138 icq = ioc_lookup_icq(ioc, q);
1139 }
1140
1141 if (blk_queue_io_stat(q))
1142 rq_flags |= RQF_IO_STAT;
1143 spin_unlock_irq(q->queue_lock);
1144
1145 /* allocate and init request */
1146 rq = mempool_alloc(rl->rq_pool, gfp_mask);
1147 if (!rq)
1148 goto fail_alloc;
1149
1150 blk_rq_init(q, rq);
1151 blk_rq_set_rl(rq, rl);
1152 rq->cmd_flags = op;
1153 rq->rq_flags = rq_flags;
1154
1155 /* init elvpriv */
1156 if (rq_flags & RQF_ELVPRIV) {
1157 if (unlikely(et->icq_cache && !icq)) {
1158 if (ioc)
1159 icq = ioc_create_icq(ioc, q, gfp_mask);
1160 if (!icq)
1161 goto fail_elvpriv;
1162 }
1163
1164 rq->elv.icq = icq;
1165 if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
1166 goto fail_elvpriv;
1167
1168 /* @rq->elv.icq holds io_context until @rq is freed */
1169 if (icq)
1170 get_io_context(icq->ioc);
1171 }
1172 out:
1173 /*
1174 * ioc may be NULL here, and ioc_batching will be false. That's
1175 * OK, if the queue is under the request limit then requests need
1176 * not count toward the nr_batch_requests limit. There will always
1177 * be some limit enforced by BLK_BATCH_TIME.
1178 */
1179 if (ioc_batching(q, ioc))
1180 ioc->nr_batch_requests--;
1181
1182 trace_block_getrq(q, bio, op);
1183 return rq;
1184
1185 fail_elvpriv:
1186 /*
1187 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
1188 * and may fail indefinitely under memory pressure and thus
1189 * shouldn't stall IO. Treat this request as !elvpriv. This will
1190 * disturb iosched and blkcg but weird is bettern than dead.
1191 */
1192 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
1193 __func__, dev_name(q->backing_dev_info->dev));
1194
1195 rq->rq_flags &= ~RQF_ELVPRIV;
1196 rq->elv.icq = NULL;
1197
1198 spin_lock_irq(q->queue_lock);
1199 q->nr_rqs_elvpriv--;
1200 spin_unlock_irq(q->queue_lock);
1201 goto out;
1202
1203 fail_alloc:
1204 /*
1205 * Allocation failed presumably due to memory. Undo anything we
1206 * might have messed up.
1207 *
1208 * Allocating task should really be put onto the front of the wait
1209 * queue, but this is pretty rare.
1210 */
1211 spin_lock_irq(q->queue_lock);
1212 freed_request(rl, is_sync, rq_flags);
1213
1214 /*
1215 * in the very unlikely event that allocation failed and no
1216 * requests for this direction was pending, mark us starved so that
1217 * freeing of a request in the other direction will notice
1218 * us. another possible fix would be to split the rq mempool into
1219 * READ and WRITE
1220 */
1221 rq_starved:
1222 if (unlikely(rl->count[is_sync] == 0))
1223 rl->starved[is_sync] = 1;
1224 return ERR_PTR(-ENOMEM);
1225 }
1226
1227 /**
1228 * get_request - get a free request
1229 * @q: request_queue to allocate request from
1230 * @op: operation and flags
1231 * @bio: bio to allocate request for (can be %NULL)
1232 * @gfp_mask: allocation mask
1233 *
1234 * Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
1235 * this function keeps retrying under memory pressure and fails iff @q is dead.
1236 *
1237 * Must be called with @q->queue_lock held and,
1238 * Returns ERR_PTR on failure, with @q->queue_lock held.
1239 * Returns request pointer on success, with @q->queue_lock *not held*.
1240 */
1241 static struct request *get_request(struct request_queue *q, unsigned int op,
1242 struct bio *bio, gfp_t gfp_mask)
1243 {
1244 const bool is_sync = op_is_sync(op);
1245 DEFINE_WAIT(wait);
1246 struct request_list *rl;
1247 struct request *rq;
1248
1249 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
1250 retry:
1251 rq = __get_request(rl, op, bio, gfp_mask);
1252 if (!IS_ERR(rq))
1253 return rq;
1254
1255 if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
1256 blk_put_rl(rl);
1257 return rq;
1258 }
1259
1260 /* wait on @rl and retry */
1261 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1262 TASK_UNINTERRUPTIBLE);
1263
1264 trace_block_sleeprq(q, bio, op);
1265
1266 spin_unlock_irq(q->queue_lock);
1267 io_schedule();
1268
1269 /*
1270 * After sleeping, we become a "batching" process and will be able
1271 * to allocate at least one request, and up to a big batch of them
1272 * for a small period time. See ioc_batching, ioc_set_batching
1273 */
1274 ioc_set_batching(q, current->io_context);
1275
1276 spin_lock_irq(q->queue_lock);
1277 finish_wait(&rl->wait[is_sync], &wait);
1278
1279 goto retry;
1280 }
1281
1282 static struct request *blk_old_get_request(struct request_queue *q, int rw,
1283 gfp_t gfp_mask)
1284 {
1285 struct request *rq;
1286
1287 /* create ioc upfront */
1288 create_io_context(gfp_mask, q->node);
1289
1290 spin_lock_irq(q->queue_lock);
1291 rq = get_request(q, rw, NULL, gfp_mask);
1292 if (IS_ERR(rq)) {
1293 spin_unlock_irq(q->queue_lock);
1294 return rq;
1295 }
1296
1297 /* q->queue_lock is unlocked at this point */
1298 rq->__data_len = 0;
1299 rq->__sector = (sector_t) -1;
1300 rq->bio = rq->biotail = NULL;
1301 return rq;
1302 }
1303
1304 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1305 {
1306 if (q->mq_ops)
1307 return blk_mq_alloc_request(q, rw,
1308 (gfp_mask & __GFP_DIRECT_RECLAIM) ?
1309 0 : BLK_MQ_REQ_NOWAIT);
1310 else
1311 return blk_old_get_request(q, rw, gfp_mask);
1312 }
1313 EXPORT_SYMBOL(blk_get_request);
1314
1315 /**
1316 * blk_requeue_request - put a request back on queue
1317 * @q: request queue where request should be inserted
1318 * @rq: request to be inserted
1319 *
1320 * Description:
1321 * Drivers often keep queueing requests until the hardware cannot accept
1322 * more, when that condition happens we need to put the request back
1323 * on the queue. Must be called with queue lock held.
1324 */
1325 void blk_requeue_request(struct request_queue *q, struct request *rq)
1326 {
1327 blk_delete_timer(rq);
1328 blk_clear_rq_complete(rq);
1329 trace_block_rq_requeue(q, rq);
1330 wbt_requeue(q->rq_wb, &rq->issue_stat);
1331
1332 if (rq->rq_flags & RQF_QUEUED)
1333 blk_queue_end_tag(q, rq);
1334
1335 BUG_ON(blk_queued_rq(rq));
1336
1337 elv_requeue_request(q, rq);
1338 }
1339 EXPORT_SYMBOL(blk_requeue_request);
1340
1341 static void add_acct_request(struct request_queue *q, struct request *rq,
1342 int where)
1343 {
1344 blk_account_io_start(rq, true);
1345 __elv_add_request(q, rq, where);
1346 }
1347
1348 static void part_round_stats_single(int cpu, struct hd_struct *part,
1349 unsigned long now)
1350 {
1351 int inflight;
1352
1353 if (now == part->stamp)
1354 return;
1355
1356 inflight = part_in_flight(part);
1357 if (inflight) {
1358 __part_stat_add(cpu, part, time_in_queue,
1359 inflight * (now - part->stamp));
1360 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1361 }
1362 part->stamp = now;
1363 }
1364
1365 /**
1366 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1367 * @cpu: cpu number for stats access
1368 * @part: target partition
1369 *
1370 * The average IO queue length and utilisation statistics are maintained
1371 * by observing the current state of the queue length and the amount of
1372 * time it has been in this state for.
1373 *
1374 * Normally, that accounting is done on IO completion, but that can result
1375 * in more than a second's worth of IO being accounted for within any one
1376 * second, leading to >100% utilisation. To deal with that, we call this
1377 * function to do a round-off before returning the results when reading
1378 * /proc/diskstats. This accounts immediately for all queue usage up to
1379 * the current jiffies and restarts the counters again.
1380 */
1381 void part_round_stats(int cpu, struct hd_struct *part)
1382 {
1383 unsigned long now = jiffies;
1384
1385 if (part->partno)
1386 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1387 part_round_stats_single(cpu, part, now);
1388 }
1389 EXPORT_SYMBOL_GPL(part_round_stats);
1390
1391 #ifdef CONFIG_PM
1392 static void blk_pm_put_request(struct request *rq)
1393 {
1394 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
1395 pm_runtime_mark_last_busy(rq->q->dev);
1396 }
1397 #else
1398 static inline void blk_pm_put_request(struct request *rq) {}
1399 #endif
1400
1401 /*
1402 * queue lock must be held
1403 */
1404 void __blk_put_request(struct request_queue *q, struct request *req)
1405 {
1406 req_flags_t rq_flags = req->rq_flags;
1407
1408 if (unlikely(!q))
1409 return;
1410
1411 if (q->mq_ops) {
1412 blk_mq_free_request(req);
1413 return;
1414 }
1415
1416 blk_pm_put_request(req);
1417
1418 elv_completed_request(q, req);
1419
1420 /* this is a bio leak */
1421 WARN_ON(req->bio != NULL);
1422
1423 wbt_done(q->rq_wb, &req->issue_stat);
1424
1425 /*
1426 * Request may not have originated from ll_rw_blk. if not,
1427 * it didn't come out of our reserved rq pools
1428 */
1429 if (rq_flags & RQF_ALLOCED) {
1430 struct request_list *rl = blk_rq_rl(req);
1431 bool sync = op_is_sync(req->cmd_flags);
1432
1433 BUG_ON(!list_empty(&req->queuelist));
1434 BUG_ON(ELV_ON_HASH(req));
1435
1436 blk_free_request(rl, req);
1437 freed_request(rl, sync, rq_flags);
1438 blk_put_rl(rl);
1439 }
1440 }
1441 EXPORT_SYMBOL_GPL(__blk_put_request);
1442
1443 void blk_put_request(struct request *req)
1444 {
1445 struct request_queue *q = req->q;
1446
1447 if (q->mq_ops)
1448 blk_mq_free_request(req);
1449 else {
1450 unsigned long flags;
1451
1452 spin_lock_irqsave(q->queue_lock, flags);
1453 __blk_put_request(q, req);
1454 spin_unlock_irqrestore(q->queue_lock, flags);
1455 }
1456 }
1457 EXPORT_SYMBOL(blk_put_request);
1458
1459 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1460 struct bio *bio)
1461 {
1462 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1463
1464 if (!ll_back_merge_fn(q, req, bio))
1465 return false;
1466
1467 trace_block_bio_backmerge(q, req, bio);
1468
1469 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1470 blk_rq_set_mixed_merge(req);
1471
1472 req->biotail->bi_next = bio;
1473 req->biotail = bio;
1474 req->__data_len += bio->bi_iter.bi_size;
1475 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1476
1477 blk_account_io_start(req, false);
1478 return true;
1479 }
1480
1481 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1482 struct bio *bio)
1483 {
1484 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1485
1486 if (!ll_front_merge_fn(q, req, bio))
1487 return false;
1488
1489 trace_block_bio_frontmerge(q, req, bio);
1490
1491 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1492 blk_rq_set_mixed_merge(req);
1493
1494 bio->bi_next = req->bio;
1495 req->bio = bio;
1496
1497 req->__sector = bio->bi_iter.bi_sector;
1498 req->__data_len += bio->bi_iter.bi_size;
1499 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1500
1501 blk_account_io_start(req, false);
1502 return true;
1503 }
1504
1505 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
1506 struct bio *bio)
1507 {
1508 unsigned short segments = blk_rq_nr_discard_segments(req);
1509
1510 if (segments >= queue_max_discard_segments(q))
1511 goto no_merge;
1512 if (blk_rq_sectors(req) + bio_sectors(bio) >
1513 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1514 goto no_merge;
1515
1516 req->biotail->bi_next = bio;
1517 req->biotail = bio;
1518 req->__data_len += bio->bi_iter.bi_size;
1519 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1520 req->nr_phys_segments = segments + 1;
1521
1522 blk_account_io_start(req, false);
1523 return true;
1524 no_merge:
1525 req_set_nomerge(q, req);
1526 return false;
1527 }
1528
1529 /**
1530 * blk_attempt_plug_merge - try to merge with %current's plugged list
1531 * @q: request_queue new bio is being queued at
1532 * @bio: new bio being queued
1533 * @request_count: out parameter for number of traversed plugged requests
1534 * @same_queue_rq: pointer to &struct request that gets filled in when
1535 * another request associated with @q is found on the plug list
1536 * (optional, may be %NULL)
1537 *
1538 * Determine whether @bio being queued on @q can be merged with a request
1539 * on %current's plugged list. Returns %true if merge was successful,
1540 * otherwise %false.
1541 *
1542 * Plugging coalesces IOs from the same issuer for the same purpose without
1543 * going through @q->queue_lock. As such it's more of an issuing mechanism
1544 * than scheduling, and the request, while may have elvpriv data, is not
1545 * added on the elevator at this point. In addition, we don't have
1546 * reliable access to the elevator outside queue lock. Only check basic
1547 * merging parameters without querying the elevator.
1548 *
1549 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1550 */
1551 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1552 unsigned int *request_count,
1553 struct request **same_queue_rq)
1554 {
1555 struct blk_plug *plug;
1556 struct request *rq;
1557 struct list_head *plug_list;
1558
1559 plug = current->plug;
1560 if (!plug)
1561 return false;
1562 *request_count = 0;
1563
1564 if (q->mq_ops)
1565 plug_list = &plug->mq_list;
1566 else
1567 plug_list = &plug->list;
1568
1569 list_for_each_entry_reverse(rq, plug_list, queuelist) {
1570 bool merged = false;
1571
1572 if (rq->q == q) {
1573 (*request_count)++;
1574 /*
1575 * Only blk-mq multiple hardware queues case checks the
1576 * rq in the same queue, there should be only one such
1577 * rq in a queue
1578 **/
1579 if (same_queue_rq)
1580 *same_queue_rq = rq;
1581 }
1582
1583 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1584 continue;
1585
1586 switch (blk_try_merge(rq, bio)) {
1587 case ELEVATOR_BACK_MERGE:
1588 merged = bio_attempt_back_merge(q, rq, bio);
1589 break;
1590 case ELEVATOR_FRONT_MERGE:
1591 merged = bio_attempt_front_merge(q, rq, bio);
1592 break;
1593 case ELEVATOR_DISCARD_MERGE:
1594 merged = bio_attempt_discard_merge(q, rq, bio);
1595 break;
1596 default:
1597 break;
1598 }
1599
1600 if (merged)
1601 return true;
1602 }
1603
1604 return false;
1605 }
1606
1607 unsigned int blk_plug_queued_count(struct request_queue *q)
1608 {
1609 struct blk_plug *plug;
1610 struct request *rq;
1611 struct list_head *plug_list;
1612 unsigned int ret = 0;
1613
1614 plug = current->plug;
1615 if (!plug)
1616 goto out;
1617
1618 if (q->mq_ops)
1619 plug_list = &plug->mq_list;
1620 else
1621 plug_list = &plug->list;
1622
1623 list_for_each_entry(rq, plug_list, queuelist) {
1624 if (rq->q == q)
1625 ret++;
1626 }
1627 out:
1628 return ret;
1629 }
1630
1631 void blk_init_request_from_bio(struct request *req, struct bio *bio)
1632 {
1633 if (bio->bi_opf & REQ_RAHEAD)
1634 req->cmd_flags |= REQ_FAILFAST_MASK;
1635
1636 req->errors = 0;
1637 req->__sector = bio->bi_iter.bi_sector;
1638 blk_rq_set_prio(req, rq_ioc(bio));
1639 if (ioprio_valid(bio_prio(bio)))
1640 req->ioprio = bio_prio(bio);
1641 blk_rq_bio_prep(req->q, req, bio);
1642 }
1643 EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
1644
1645 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1646 {
1647 struct blk_plug *plug;
1648 int where = ELEVATOR_INSERT_SORT;
1649 struct request *req, *free;
1650 unsigned int request_count = 0;
1651 unsigned int wb_acct;
1652
1653 /*
1654 * low level driver can indicate that it wants pages above a
1655 * certain limit bounced to low memory (ie for highmem, or even
1656 * ISA dma in theory)
1657 */
1658 blk_queue_bounce(q, &bio);
1659
1660 blk_queue_split(q, &bio, q->bio_split);
1661
1662 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1663 bio->bi_error = -EIO;
1664 bio_endio(bio);
1665 return BLK_QC_T_NONE;
1666 }
1667
1668 if (op_is_flush(bio->bi_opf)) {
1669 spin_lock_irq(q->queue_lock);
1670 where = ELEVATOR_INSERT_FLUSH;
1671 goto get_rq;
1672 }
1673
1674 /*
1675 * Check if we can merge with the plugged list before grabbing
1676 * any locks.
1677 */
1678 if (!blk_queue_nomerges(q)) {
1679 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1680 return BLK_QC_T_NONE;
1681 } else
1682 request_count = blk_plug_queued_count(q);
1683
1684 spin_lock_irq(q->queue_lock);
1685
1686 switch (elv_merge(q, &req, bio)) {
1687 case ELEVATOR_BACK_MERGE:
1688 if (!bio_attempt_back_merge(q, req, bio))
1689 break;
1690 elv_bio_merged(q, req, bio);
1691 free = attempt_back_merge(q, req);
1692 if (free)
1693 __blk_put_request(q, free);
1694 else
1695 elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
1696 goto out_unlock;
1697 case ELEVATOR_FRONT_MERGE:
1698 if (!bio_attempt_front_merge(q, req, bio))
1699 break;
1700 elv_bio_merged(q, req, bio);
1701 free = attempt_front_merge(q, req);
1702 if (free)
1703 __blk_put_request(q, free);
1704 else
1705 elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
1706 goto out_unlock;
1707 default:
1708 break;
1709 }
1710
1711 get_rq:
1712 wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
1713
1714 /*
1715 * Grab a free request. This is might sleep but can not fail.
1716 * Returns with the queue unlocked.
1717 */
1718 req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
1719 if (IS_ERR(req)) {
1720 __wbt_done(q->rq_wb, wb_acct);
1721 bio->bi_error = PTR_ERR(req);
1722 bio_endio(bio);
1723 goto out_unlock;
1724 }
1725
1726 wbt_track(&req->issue_stat, wb_acct);
1727
1728 /*
1729 * After dropping the lock and possibly sleeping here, our request
1730 * may now be mergeable after it had proven unmergeable (above).
1731 * We don't worry about that case for efficiency. It won't happen
1732 * often, and the elevators are able to handle it.
1733 */
1734 blk_init_request_from_bio(req, bio);
1735
1736 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1737 req->cpu = raw_smp_processor_id();
1738
1739 plug = current->plug;
1740 if (plug) {
1741 /*
1742 * If this is the first request added after a plug, fire
1743 * of a plug trace.
1744 *
1745 * @request_count may become stale because of schedule
1746 * out, so check plug list again.
1747 */
1748 if (!request_count || list_empty(&plug->list))
1749 trace_block_plug(q);
1750 else {
1751 struct request *last = list_entry_rq(plug->list.prev);
1752 if (request_count >= BLK_MAX_REQUEST_COUNT ||
1753 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
1754 blk_flush_plug_list(plug, false);
1755 trace_block_plug(q);
1756 }
1757 }
1758 list_add_tail(&req->queuelist, &plug->list);
1759 blk_account_io_start(req, true);
1760 } else {
1761 spin_lock_irq(q->queue_lock);
1762 add_acct_request(q, req, where);
1763 __blk_run_queue(q);
1764 out_unlock:
1765 spin_unlock_irq(q->queue_lock);
1766 }
1767
1768 return BLK_QC_T_NONE;
1769 }
1770
1771 /*
1772 * If bio->bi_dev is a partition, remap the location
1773 */
1774 static inline void blk_partition_remap(struct bio *bio)
1775 {
1776 struct block_device *bdev = bio->bi_bdev;
1777
1778 /*
1779 * Zone reset does not include bi_size so bio_sectors() is always 0.
1780 * Include a test for the reset op code and perform the remap if needed.
1781 */
1782 if (bdev != bdev->bd_contains &&
1783 (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) {
1784 struct hd_struct *p = bdev->bd_part;
1785
1786 bio->bi_iter.bi_sector += p->start_sect;
1787 bio->bi_bdev = bdev->bd_contains;
1788
1789 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1790 bdev->bd_dev,
1791 bio->bi_iter.bi_sector - p->start_sect);
1792 }
1793 }
1794
1795 static void handle_bad_sector(struct bio *bio)
1796 {
1797 char b[BDEVNAME_SIZE];
1798
1799 printk(KERN_INFO "attempt to access beyond end of device\n");
1800 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
1801 bdevname(bio->bi_bdev, b),
1802 bio->bi_opf,
1803 (unsigned long long)bio_end_sector(bio),
1804 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1805 }
1806
1807 #ifdef CONFIG_FAIL_MAKE_REQUEST
1808
1809 static DECLARE_FAULT_ATTR(fail_make_request);
1810
1811 static int __init setup_fail_make_request(char *str)
1812 {
1813 return setup_fault_attr(&fail_make_request, str);
1814 }
1815 __setup("fail_make_request=", setup_fail_make_request);
1816
1817 static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1818 {
1819 return part->make_it_fail && should_fail(&fail_make_request, bytes);
1820 }
1821
1822 static int __init fail_make_request_debugfs(void)
1823 {
1824 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1825 NULL, &fail_make_request);
1826
1827 return PTR_ERR_OR_ZERO(dir);
1828 }
1829
1830 late_initcall(fail_make_request_debugfs);
1831
1832 #else /* CONFIG_FAIL_MAKE_REQUEST */
1833
1834 static inline bool should_fail_request(struct hd_struct *part,
1835 unsigned int bytes)
1836 {
1837 return false;
1838 }
1839
1840 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1841
1842 /*
1843 * Check whether this bio extends beyond the end of the device.
1844 */
1845 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1846 {
1847 sector_t maxsector;
1848
1849 if (!nr_sectors)
1850 return 0;
1851
1852 /* Test device or partition size, when known. */
1853 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1854 if (maxsector) {
1855 sector_t sector = bio->bi_iter.bi_sector;
1856
1857 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1858 /*
1859 * This may well happen - the kernel calls bread()
1860 * without checking the size of the device, e.g., when
1861 * mounting a device.
1862 */
1863 handle_bad_sector(bio);
1864 return 1;
1865 }
1866 }
1867
1868 return 0;
1869 }
1870
1871 static noinline_for_stack bool
1872 generic_make_request_checks(struct bio *bio)
1873 {
1874 struct request_queue *q;
1875 int nr_sectors = bio_sectors(bio);
1876 int err = -EIO;
1877 char b[BDEVNAME_SIZE];
1878 struct hd_struct *part;
1879
1880 might_sleep();
1881
1882 if (bio_check_eod(bio, nr_sectors))
1883 goto end_io;
1884
1885 q = bdev_get_queue(bio->bi_bdev);
1886 if (unlikely(!q)) {
1887 printk(KERN_ERR
1888 "generic_make_request: Trying to access "
1889 "nonexistent block-device %s (%Lu)\n",
1890 bdevname(bio->bi_bdev, b),
1891 (long long) bio->bi_iter.bi_sector);
1892 goto end_io;
1893 }
1894
1895 part = bio->bi_bdev->bd_part;
1896 if (should_fail_request(part, bio->bi_iter.bi_size) ||
1897 should_fail_request(&part_to_disk(part)->part0,
1898 bio->bi_iter.bi_size))
1899 goto end_io;
1900
1901 /*
1902 * If this device has partitions, remap block n
1903 * of partition p to block n+start(p) of the disk.
1904 */
1905 blk_partition_remap(bio);
1906
1907 if (bio_check_eod(bio, nr_sectors))
1908 goto end_io;
1909
1910 /*
1911 * Filter flush bio's early so that make_request based
1912 * drivers without flush support don't have to worry
1913 * about them.
1914 */
1915 if (op_is_flush(bio->bi_opf) &&
1916 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1917 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
1918 if (!nr_sectors) {
1919 err = 0;
1920 goto end_io;
1921 }
1922 }
1923
1924 switch (bio_op(bio)) {
1925 case REQ_OP_DISCARD:
1926 if (!blk_queue_discard(q))
1927 goto not_supported;
1928 break;
1929 case REQ_OP_SECURE_ERASE:
1930 if (!blk_queue_secure_erase(q))
1931 goto not_supported;
1932 break;
1933 case REQ_OP_WRITE_SAME:
1934 if (!bdev_write_same(bio->bi_bdev))
1935 goto not_supported;
1936 break;
1937 case REQ_OP_ZONE_REPORT:
1938 case REQ_OP_ZONE_RESET:
1939 if (!bdev_is_zoned(bio->bi_bdev))
1940 goto not_supported;
1941 break;
1942 case REQ_OP_WRITE_ZEROES:
1943 if (!bdev_write_zeroes_sectors(bio->bi_bdev))
1944 goto not_supported;
1945 break;
1946 default:
1947 break;
1948 }
1949
1950 /*
1951 * Various block parts want %current->io_context and lazy ioc
1952 * allocation ends up trading a lot of pain for a small amount of
1953 * memory. Just allocate it upfront. This may fail and block
1954 * layer knows how to live with it.
1955 */
1956 create_io_context(GFP_ATOMIC, q->node);
1957
1958 if (!blkcg_bio_issue_check(q, bio))
1959 return false;
1960
1961 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1962 trace_block_bio_queue(q, bio);
1963 /* Now that enqueuing has been traced, we need to trace
1964 * completion as well.
1965 */
1966 bio_set_flag(bio, BIO_TRACE_COMPLETION);
1967 }
1968 return true;
1969
1970 not_supported:
1971 err = -EOPNOTSUPP;
1972 end_io:
1973 bio->bi_error = err;
1974 bio_endio(bio);
1975 return false;
1976 }
1977
1978 /**
1979 * generic_make_request - hand a buffer to its device driver for I/O
1980 * @bio: The bio describing the location in memory and on the device.
1981 *
1982 * generic_make_request() is used to make I/O requests of block
1983 * devices. It is passed a &struct bio, which describes the I/O that needs
1984 * to be done.
1985 *
1986 * generic_make_request() does not return any status. The
1987 * success/failure status of the request, along with notification of
1988 * completion, is delivered asynchronously through the bio->bi_end_io
1989 * function described (one day) else where.
1990 *
1991 * The caller of generic_make_request must make sure that bi_io_vec
1992 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1993 * set to describe the device address, and the
1994 * bi_end_io and optionally bi_private are set to describe how
1995 * completion notification should be signaled.
1996 *
1997 * generic_make_request and the drivers it calls may use bi_next if this
1998 * bio happens to be merged with someone else, and may resubmit the bio to
1999 * a lower device by calling into generic_make_request recursively, which
2000 * means the bio should NOT be touched after the call to ->make_request_fn.
2001 */
2002 blk_qc_t generic_make_request(struct bio *bio)
2003 {
2004 /*
2005 * bio_list_on_stack[0] contains bios submitted by the current
2006 * make_request_fn.
2007 * bio_list_on_stack[1] contains bios that were submitted before
2008 * the current make_request_fn, but that haven't been processed
2009 * yet.
2010 */
2011 struct bio_list bio_list_on_stack[2];
2012 blk_qc_t ret = BLK_QC_T_NONE;
2013
2014 if (!generic_make_request_checks(bio))
2015 goto out;
2016
2017 /*
2018 * We only want one ->make_request_fn to be active at a time, else
2019 * stack usage with stacked devices could be a problem. So use
2020 * current->bio_list to keep a list of requests submited by a
2021 * make_request_fn function. current->bio_list is also used as a
2022 * flag to say if generic_make_request is currently active in this
2023 * task or not. If it is NULL, then no make_request is active. If
2024 * it is non-NULL, then a make_request is active, and new requests
2025 * should be added at the tail
2026 */
2027 if (current->bio_list) {
2028 bio_list_add(&current->bio_list[0], bio);
2029 goto out;
2030 }
2031
2032 /* following loop may be a bit non-obvious, and so deserves some
2033 * explanation.
2034 * Before entering the loop, bio->bi_next is NULL (as all callers
2035 * ensure that) so we have a list with a single bio.
2036 * We pretend that we have just taken it off a longer list, so
2037 * we assign bio_list to a pointer to the bio_list_on_stack,
2038 * thus initialising the bio_list of new bios to be
2039 * added. ->make_request() may indeed add some more bios
2040 * through a recursive call to generic_make_request. If it
2041 * did, we find a non-NULL value in bio_list and re-enter the loop
2042 * from the top. In this case we really did just take the bio
2043 * of the top of the list (no pretending) and so remove it from
2044 * bio_list, and call into ->make_request() again.
2045 */
2046 BUG_ON(bio->bi_next);
2047 bio_list_init(&bio_list_on_stack[0]);
2048 current->bio_list = bio_list_on_stack;
2049 do {
2050 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2051
2052 if (likely(blk_queue_enter(q, false) == 0)) {
2053 struct bio_list lower, same;
2054
2055 /* Create a fresh bio_list for all subordinate requests */
2056 bio_list_on_stack[1] = bio_list_on_stack[0];
2057 bio_list_init(&bio_list_on_stack[0]);
2058 ret = q->make_request_fn(q, bio);
2059
2060 blk_queue_exit(q);
2061
2062 /* sort new bios into those for a lower level
2063 * and those for the same level
2064 */
2065 bio_list_init(&lower);
2066 bio_list_init(&same);
2067 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
2068 if (q == bdev_get_queue(bio->bi_bdev))
2069 bio_list_add(&same, bio);
2070 else
2071 bio_list_add(&lower, bio);
2072 /* now assemble so we handle the lowest level first */
2073 bio_list_merge(&bio_list_on_stack[0], &lower);
2074 bio_list_merge(&bio_list_on_stack[0], &same);
2075 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
2076 } else {
2077 bio_io_error(bio);
2078 }
2079 bio = bio_list_pop(&bio_list_on_stack[0]);
2080 } while (bio);
2081 current->bio_list = NULL; /* deactivate */
2082
2083 out:
2084 return ret;
2085 }
2086 EXPORT_SYMBOL(generic_make_request);
2087
2088 /**
2089 * submit_bio - submit a bio to the block device layer for I/O
2090 * @bio: The &struct bio which describes the I/O
2091 *
2092 * submit_bio() is very similar in purpose to generic_make_request(), and
2093 * uses that function to do most of the work. Both are fairly rough
2094 * interfaces; @bio must be presetup and ready for I/O.
2095 *
2096 */
2097 blk_qc_t submit_bio(struct bio *bio)
2098 {
2099 /*
2100 * If it's a regular read/write or a barrier with data attached,
2101 * go through the normal accounting stuff before submission.
2102 */
2103 if (bio_has_data(bio)) {
2104 unsigned int count;
2105
2106 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
2107 count = bdev_logical_block_size(bio->bi_bdev) >> 9;
2108 else
2109 count = bio_sectors(bio);
2110
2111 if (op_is_write(bio_op(bio))) {
2112 count_vm_events(PGPGOUT, count);
2113 } else {
2114 task_io_account_read(bio->bi_iter.bi_size);
2115 count_vm_events(PGPGIN, count);
2116 }
2117
2118 if (unlikely(block_dump)) {
2119 char b[BDEVNAME_SIZE];
2120 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
2121 current->comm, task_pid_nr(current),
2122 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
2123 (unsigned long long)bio->bi_iter.bi_sector,
2124 bdevname(bio->bi_bdev, b),
2125 count);
2126 }
2127 }
2128
2129 return generic_make_request(bio);
2130 }
2131 EXPORT_SYMBOL(submit_bio);
2132
2133 /**
2134 * blk_cloned_rq_check_limits - Helper function to check a cloned request
2135 * for new the queue limits
2136 * @q: the queue
2137 * @rq: the request being checked
2138 *
2139 * Description:
2140 * @rq may have been made based on weaker limitations of upper-level queues
2141 * in request stacking drivers, and it may violate the limitation of @q.
2142 * Since the block layer and the underlying device driver trust @rq
2143 * after it is inserted to @q, it should be checked against @q before
2144 * the insertion using this generic function.
2145 *
2146 * Request stacking drivers like request-based dm may change the queue
2147 * limits when retrying requests on other queues. Those requests need
2148 * to be checked against the new queue limits again during dispatch.
2149 */
2150 static int blk_cloned_rq_check_limits(struct request_queue *q,
2151 struct request *rq)
2152 {
2153 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
2154 printk(KERN_ERR "%s: over max size limit.\n", __func__);
2155 return -EIO;
2156 }
2157
2158 /*
2159 * queue's settings related to segment counting like q->bounce_pfn
2160 * may differ from that of other stacking queues.
2161 * Recalculate it to check the request correctly on this queue's
2162 * limitation.
2163 */
2164 blk_recalc_rq_segments(rq);
2165 if (rq->nr_phys_segments > queue_max_segments(q)) {
2166 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
2167 return -EIO;
2168 }
2169
2170 return 0;
2171 }
2172
2173 /**
2174 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2175 * @q: the queue to submit the request
2176 * @rq: the request being queued
2177 */
2178 int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2179 {
2180 unsigned long flags;
2181 int where = ELEVATOR_INSERT_BACK;
2182
2183 if (blk_cloned_rq_check_limits(q, rq))
2184 return -EIO;
2185
2186 if (rq->rq_disk &&
2187 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2188 return -EIO;
2189
2190 if (q->mq_ops) {
2191 if (blk_queue_io_stat(q))
2192 blk_account_io_start(rq, true);
2193 blk_mq_sched_insert_request(rq, false, true, false, false);
2194 return 0;
2195 }
2196
2197 spin_lock_irqsave(q->queue_lock, flags);
2198 if (unlikely(blk_queue_dying(q))) {
2199 spin_unlock_irqrestore(q->queue_lock, flags);
2200 return -ENODEV;
2201 }
2202
2203 /*
2204 * Submitting request must be dequeued before calling this function
2205 * because it will be linked to another request_queue
2206 */
2207 BUG_ON(blk_queued_rq(rq));
2208
2209 if (op_is_flush(rq->cmd_flags))
2210 where = ELEVATOR_INSERT_FLUSH;
2211
2212 add_acct_request(q, rq, where);
2213 if (where == ELEVATOR_INSERT_FLUSH)
2214 __blk_run_queue(q);
2215 spin_unlock_irqrestore(q->queue_lock, flags);
2216
2217 return 0;
2218 }
2219 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2220
2221 /**
2222 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
2223 * @rq: request to examine
2224 *
2225 * Description:
2226 * A request could be merge of IOs which require different failure
2227 * handling. This function determines the number of bytes which
2228 * can be failed from the beginning of the request without
2229 * crossing into area which need to be retried further.
2230 *
2231 * Return:
2232 * The number of bytes to fail.
2233 *
2234 * Context:
2235 * queue_lock must be held.
2236 */
2237 unsigned int blk_rq_err_bytes(const struct request *rq)
2238 {
2239 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
2240 unsigned int bytes = 0;
2241 struct bio *bio;
2242
2243 if (!(rq->rq_flags & RQF_MIXED_MERGE))
2244 return blk_rq_bytes(rq);
2245
2246 /*
2247 * Currently the only 'mixing' which can happen is between
2248 * different fastfail types. We can safely fail portions
2249 * which have all the failfast bits that the first one has -
2250 * the ones which are at least as eager to fail as the first
2251 * one.
2252 */
2253 for (bio = rq->bio; bio; bio = bio->bi_next) {
2254 if ((bio->bi_opf & ff) != ff)
2255 break;
2256 bytes += bio->bi_iter.bi_size;
2257 }
2258
2259 /* this could lead to infinite loop */
2260 BUG_ON(blk_rq_bytes(rq) && !bytes);
2261 return bytes;
2262 }
2263 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2264
2265 void blk_account_io_completion(struct request *req, unsigned int bytes)
2266 {
2267 if (blk_do_io_stat(req)) {
2268 const int rw = rq_data_dir(req);
2269 struct hd_struct *part;
2270 int cpu;
2271
2272 cpu = part_stat_lock();
2273 part = req->part;
2274 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2275 part_stat_unlock();
2276 }
2277 }
2278
2279 void blk_account_io_done(struct request *req)
2280 {
2281 /*
2282 * Account IO completion. flush_rq isn't accounted as a
2283 * normal IO on queueing nor completion. Accounting the
2284 * containing request is enough.
2285 */
2286 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
2287 unsigned long duration = jiffies - req->start_time;
2288 const int rw = rq_data_dir(req);
2289 struct hd_struct *part;
2290 int cpu;
2291
2292 cpu = part_stat_lock();
2293 part = req->part;
2294
2295 part_stat_inc(cpu, part, ios[rw]);
2296 part_stat_add(cpu, part, ticks[rw], duration);
2297 part_round_stats(cpu, part);
2298 part_dec_in_flight(part, rw);
2299
2300 hd_struct_put(part);
2301 part_stat_unlock();
2302 }
2303 }
2304
2305 #ifdef CONFIG_PM
2306 /*
2307 * Don't process normal requests when queue is suspended
2308 * or in the process of suspending/resuming
2309 */
2310 static struct request *blk_pm_peek_request(struct request_queue *q,
2311 struct request *rq)
2312 {
2313 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2314 (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
2315 return NULL;
2316 else
2317 return rq;
2318 }
2319 #else
2320 static inline struct request *blk_pm_peek_request(struct request_queue *q,
2321 struct request *rq)
2322 {
2323 return rq;
2324 }
2325 #endif
2326
2327 void blk_account_io_start(struct request *rq, bool new_io)
2328 {
2329 struct hd_struct *part;
2330 int rw = rq_data_dir(rq);
2331 int cpu;
2332
2333 if (!blk_do_io_stat(rq))
2334 return;
2335
2336 cpu = part_stat_lock();
2337
2338 if (!new_io) {
2339 part = rq->part;
2340 part_stat_inc(cpu, part, merges[rw]);
2341 } else {
2342 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2343 if (!hd_struct_try_get(part)) {
2344 /*
2345 * The partition is already being removed,
2346 * the request will be accounted on the disk only
2347 *
2348 * We take a reference on disk->part0 although that
2349 * partition will never be deleted, so we can treat
2350 * it as any other partition.
2351 */
2352 part = &rq->rq_disk->part0;
2353 hd_struct_get(part);
2354 }
2355 part_round_stats(cpu, part);
2356 part_inc_in_flight(part, rw);
2357 rq->part = part;
2358 }
2359
2360 part_stat_unlock();
2361 }
2362
2363 /**
2364 * blk_peek_request - peek at the top of a request queue
2365 * @q: request queue to peek at
2366 *
2367 * Description:
2368 * Return the request at the top of @q. The returned request
2369 * should be started using blk_start_request() before LLD starts
2370 * processing it.
2371 *
2372 * Return:
2373 * Pointer to the request at the top of @q if available. Null
2374 * otherwise.
2375 *
2376 * Context:
2377 * queue_lock must be held.
2378 */
2379 struct request *blk_peek_request(struct request_queue *q)
2380 {
2381 struct request *rq;
2382 int ret;
2383
2384 while ((rq = __elv_next_request(q)) != NULL) {
2385
2386 rq = blk_pm_peek_request(q, rq);
2387 if (!rq)
2388 break;
2389
2390 if (!(rq->rq_flags & RQF_STARTED)) {
2391 /*
2392 * This is the first time the device driver
2393 * sees this request (possibly after
2394 * requeueing). Notify IO scheduler.
2395 */
2396 if (rq->rq_flags & RQF_SORTED)
2397 elv_activate_rq(q, rq);
2398
2399 /*
2400 * just mark as started even if we don't start
2401 * it, a request that has been delayed should
2402 * not be passed by new incoming requests
2403 */
2404 rq->rq_flags |= RQF_STARTED;
2405 trace_block_rq_issue(q, rq);
2406 }
2407
2408 if (!q->boundary_rq || q->boundary_rq == rq) {
2409 q->end_sector = rq_end_sector(rq);
2410 q->boundary_rq = NULL;
2411 }
2412
2413 if (rq->rq_flags & RQF_DONTPREP)
2414 break;
2415
2416 if (q->dma_drain_size && blk_rq_bytes(rq)) {
2417 /*
2418 * make sure space for the drain appears we
2419 * know we can do this because max_hw_segments
2420 * has been adjusted to be one fewer than the
2421 * device can handle
2422 */
2423 rq->nr_phys_segments++;
2424 }
2425
2426 if (!q->prep_rq_fn)
2427 break;
2428
2429 ret = q->prep_rq_fn(q, rq);
2430 if (ret == BLKPREP_OK) {
2431 break;
2432 } else if (ret == BLKPREP_DEFER) {
2433 /*
2434 * the request may have been (partially) prepped.
2435 * we need to keep this request in the front to
2436 * avoid resource deadlock. RQF_STARTED will
2437 * prevent other fs requests from passing this one.
2438 */
2439 if (q->dma_drain_size && blk_rq_bytes(rq) &&
2440 !(rq->rq_flags & RQF_DONTPREP)) {
2441 /*
2442 * remove the space for the drain we added
2443 * so that we don't add it again
2444 */
2445 --rq->nr_phys_segments;
2446 }
2447
2448 rq = NULL;
2449 break;
2450 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
2451 int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
2452
2453 rq->rq_flags |= RQF_QUIET;
2454 /*
2455 * Mark this request as started so we don't trigger
2456 * any debug logic in the end I/O path.
2457 */
2458 blk_start_request(rq);
2459 __blk_end_request_all(rq, err);
2460 } else {
2461 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2462 break;
2463 }
2464 }
2465
2466 return rq;
2467 }
2468 EXPORT_SYMBOL(blk_peek_request);
2469
2470 void blk_dequeue_request(struct request *rq)
2471 {
2472 struct request_queue *q = rq->q;
2473
2474 BUG_ON(list_empty(&rq->queuelist));
2475 BUG_ON(ELV_ON_HASH(rq));
2476
2477 list_del_init(&rq->queuelist);
2478
2479 /*
2480 * the time frame between a request being removed from the lists
2481 * and to it is freed is accounted as io that is in progress at
2482 * the driver side.
2483 */
2484 if (blk_account_rq(rq)) {
2485 q->in_flight[rq_is_sync(rq)]++;
2486 set_io_start_time_ns(rq);
2487 }
2488 }
2489
2490 /**
2491 * blk_start_request - start request processing on the driver
2492 * @req: request to dequeue
2493 *
2494 * Description:
2495 * Dequeue @req and start timeout timer on it. This hands off the
2496 * request to the driver.
2497 *
2498 * Block internal functions which don't want to start timer should
2499 * call blk_dequeue_request().
2500 *
2501 * Context:
2502 * queue_lock must be held.
2503 */
2504 void blk_start_request(struct request *req)
2505 {
2506 blk_dequeue_request(req);
2507
2508 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
2509 blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req));
2510 req->rq_flags |= RQF_STATS;
2511 wbt_issue(req->q->rq_wb, &req->issue_stat);
2512 }
2513
2514 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
2515 blk_add_timer(req);
2516 }
2517 EXPORT_SYMBOL(blk_start_request);
2518
2519 /**
2520 * blk_fetch_request - fetch a request from a request queue
2521 * @q: request queue to fetch a request from
2522 *
2523 * Description:
2524 * Return the request at the top of @q. The request is started on
2525 * return and LLD can start processing it immediately.
2526 *
2527 * Return:
2528 * Pointer to the request at the top of @q if available. Null
2529 * otherwise.
2530 *
2531 * Context:
2532 * queue_lock must be held.
2533 */
2534 struct request *blk_fetch_request(struct request_queue *q)
2535 {
2536 struct request *rq;
2537
2538 rq = blk_peek_request(q);
2539 if (rq)
2540 blk_start_request(rq);
2541 return rq;
2542 }
2543 EXPORT_SYMBOL(blk_fetch_request);
2544
2545 /**
2546 * blk_update_request - Special helper function for request stacking drivers
2547 * @req: the request being processed
2548 * @error: %0 for success, < %0 for error
2549 * @nr_bytes: number of bytes to complete @req
2550 *
2551 * Description:
2552 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2553 * the request structure even if @req doesn't have leftover.
2554 * If @req has leftover, sets it up for the next range of segments.
2555 *
2556 * This special helper function is only for request stacking drivers
2557 * (e.g. request-based dm) so that they can handle partial completion.
2558 * Actual device drivers should use blk_end_request instead.
2559 *
2560 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2561 * %false return from this function.
2562 *
2563 * Return:
2564 * %false - this request doesn't have any more data
2565 * %true - this request has more data
2566 **/
2567 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2568 {
2569 int total_bytes;
2570
2571 trace_block_rq_complete(req->q, req, nr_bytes);
2572
2573 if (!req->bio)
2574 return false;
2575
2576 /*
2577 * For fs requests, rq is just carrier of independent bio's
2578 * and each partial completion should be handled separately.
2579 * Reset per-request error on each partial completion.
2580 *
2581 * TODO: tj: This is too subtle. It would be better to let
2582 * low level drivers do what they see fit.
2583 */
2584 if (!blk_rq_is_passthrough(req))
2585 req->errors = 0;
2586
2587 if (error && !blk_rq_is_passthrough(req) &&
2588 !(req->rq_flags & RQF_QUIET)) {
2589 char *error_type;
2590
2591 switch (error) {
2592 case -ENOLINK:
2593 error_type = "recoverable transport";
2594 break;
2595 case -EREMOTEIO:
2596 error_type = "critical target";
2597 break;
2598 case -EBADE:
2599 error_type = "critical nexus";
2600 break;
2601 case -ETIMEDOUT:
2602 error_type = "timeout";
2603 break;
2604 case -ENOSPC:
2605 error_type = "critical space allocation";
2606 break;
2607 case -ENODATA:
2608 error_type = "critical medium";
2609 break;
2610 case -EIO:
2611 default:
2612 error_type = "I/O";
2613 break;
2614 }
2615 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
2616 __func__, error_type, req->rq_disk ?
2617 req->rq_disk->disk_name : "?",
2618 (unsigned long long)blk_rq_pos(req));
2619
2620 }
2621
2622 blk_account_io_completion(req, nr_bytes);
2623
2624 total_bytes = 0;
2625 while (req->bio) {
2626 struct bio *bio = req->bio;
2627 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
2628
2629 if (bio_bytes == bio->bi_iter.bi_size)
2630 req->bio = bio->bi_next;
2631
2632 /* Completion has already been traced */
2633 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
2634 req_bio_endio(req, bio, bio_bytes, error);
2635
2636 total_bytes += bio_bytes;
2637 nr_bytes -= bio_bytes;
2638
2639 if (!nr_bytes)
2640 break;
2641 }
2642
2643 /*
2644 * completely done
2645 */
2646 if (!req->bio) {
2647 /*
2648 * Reset counters so that the request stacking driver
2649 * can find how many bytes remain in the request
2650 * later.
2651 */
2652 req->__data_len = 0;
2653 return false;
2654 }
2655
2656 WARN_ON_ONCE(req->rq_flags & RQF_SPECIAL_PAYLOAD);
2657
2658 req->__data_len -= total_bytes;
2659
2660 /* update sector only for requests with clear definition of sector */
2661 if (!blk_rq_is_passthrough(req))
2662 req->__sector += total_bytes >> 9;
2663
2664 /* mixed attributes always follow the first bio */
2665 if (req->rq_flags & RQF_MIXED_MERGE) {
2666 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2667 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
2668 }
2669
2670 /*
2671 * If total number of sectors is less than the first segment
2672 * size, something has gone terribly wrong.
2673 */
2674 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2675 blk_dump_rq_flags(req, "request botched");
2676 req->__data_len = blk_rq_cur_bytes(req);
2677 }
2678
2679 /* recalculate the number of segments */
2680 blk_recalc_rq_segments(req);
2681
2682 return true;
2683 }
2684 EXPORT_SYMBOL_GPL(blk_update_request);
2685
2686 static bool blk_update_bidi_request(struct request *rq, int error,
2687 unsigned int nr_bytes,
2688 unsigned int bidi_bytes)
2689 {
2690 if (blk_update_request(rq, error, nr_bytes))
2691 return true;
2692
2693 /* Bidi request must be completed as a whole */
2694 if (unlikely(blk_bidi_rq(rq)) &&
2695 blk_update_request(rq->next_rq, error, bidi_bytes))
2696 return true;
2697
2698 if (blk_queue_add_random(rq->q))
2699 add_disk_randomness(rq->rq_disk);
2700
2701 return false;
2702 }
2703
2704 /**
2705 * blk_unprep_request - unprepare a request
2706 * @req: the request
2707 *
2708 * This function makes a request ready for complete resubmission (or
2709 * completion). It happens only after all error handling is complete,
2710 * so represents the appropriate moment to deallocate any resources
2711 * that were allocated to the request in the prep_rq_fn. The queue
2712 * lock is held when calling this.
2713 */
2714 void blk_unprep_request(struct request *req)
2715 {
2716 struct request_queue *q = req->q;
2717
2718 req->rq_flags &= ~RQF_DONTPREP;
2719 if (q->unprep_rq_fn)
2720 q->unprep_rq_fn(q, req);
2721 }
2722 EXPORT_SYMBOL_GPL(blk_unprep_request);
2723
2724 /*
2725 * queue lock must be held
2726 */
2727 void blk_finish_request(struct request *req, int error)
2728 {
2729 struct request_queue *q = req->q;
2730
2731 if (req->rq_flags & RQF_STATS)
2732 blk_stat_add(req);
2733
2734 if (req->rq_flags & RQF_QUEUED)
2735 blk_queue_end_tag(q, req);
2736
2737 BUG_ON(blk_queued_rq(req));
2738
2739 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
2740 laptop_io_completion(req->q->backing_dev_info);
2741
2742 blk_delete_timer(req);
2743
2744 if (req->rq_flags & RQF_DONTPREP)
2745 blk_unprep_request(req);
2746
2747 blk_account_io_done(req);
2748
2749 if (req->end_io) {
2750 wbt_done(req->q->rq_wb, &req->issue_stat);
2751 req->end_io(req, error);
2752 } else {
2753 if (blk_bidi_rq(req))
2754 __blk_put_request(req->next_rq->q, req->next_rq);
2755
2756 __blk_put_request(q, req);
2757 }
2758 }
2759 EXPORT_SYMBOL(blk_finish_request);
2760
2761 /**
2762 * blk_end_bidi_request - Complete a bidi request
2763 * @rq: the request to complete
2764 * @error: %0 for success, < %0 for error
2765 * @nr_bytes: number of bytes to complete @rq
2766 * @bidi_bytes: number of bytes to complete @rq->next_rq
2767 *
2768 * Description:
2769 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2770 * Drivers that supports bidi can safely call this member for any
2771 * type of request, bidi or uni. In the later case @bidi_bytes is
2772 * just ignored.
2773 *
2774 * Return:
2775 * %false - we are done with this request
2776 * %true - still buffers pending for this request
2777 **/
2778 static bool blk_end_bidi_request(struct request *rq, int error,
2779 unsigned int nr_bytes, unsigned int bidi_bytes)
2780 {
2781 struct request_queue *q = rq->q;
2782 unsigned long flags;
2783
2784 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2785 return true;
2786
2787 spin_lock_irqsave(q->queue_lock, flags);
2788 blk_finish_request(rq, error);
2789 spin_unlock_irqrestore(q->queue_lock, flags);
2790
2791 return false;
2792 }
2793
2794 /**
2795 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2796 * @rq: the request to complete
2797 * @error: %0 for success, < %0 for error
2798 * @nr_bytes: number of bytes to complete @rq
2799 * @bidi_bytes: number of bytes to complete @rq->next_rq
2800 *
2801 * Description:
2802 * Identical to blk_end_bidi_request() except that queue lock is
2803 * assumed to be locked on entry and remains so on return.
2804 *
2805 * Return:
2806 * %false - we are done with this request
2807 * %true - still buffers pending for this request
2808 **/
2809 static bool __blk_end_bidi_request(struct request *rq, int error,
2810 unsigned int nr_bytes, unsigned int bidi_bytes)
2811 {
2812 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2813 return true;
2814
2815 blk_finish_request(rq, error);
2816
2817 return false;
2818 }
2819
2820 /**
2821 * blk_end_request - Helper function for drivers to complete the request.
2822 * @rq: the request being processed
2823 * @error: %0 for success, < %0 for error
2824 * @nr_bytes: number of bytes to complete
2825 *
2826 * Description:
2827 * Ends I/O on a number of bytes attached to @rq.
2828 * If @rq has leftover, sets it up for the next range of segments.
2829 *
2830 * Return:
2831 * %false - we are done with this request
2832 * %true - still buffers pending for this request
2833 **/
2834 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2835 {
2836 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2837 }
2838 EXPORT_SYMBOL(blk_end_request);
2839
2840 /**
2841 * blk_end_request_all - Helper function for drives to finish the request.
2842 * @rq: the request to finish
2843 * @error: %0 for success, < %0 for error
2844 *
2845 * Description:
2846 * Completely finish @rq.
2847 */
2848 void blk_end_request_all(struct request *rq, int error)
2849 {
2850 bool pending;
2851 unsigned int bidi_bytes = 0;
2852
2853 if (unlikely(blk_bidi_rq(rq)))
2854 bidi_bytes = blk_rq_bytes(rq->next_rq);
2855
2856 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2857 BUG_ON(pending);
2858 }
2859 EXPORT_SYMBOL(blk_end_request_all);
2860
2861 /**
2862 * __blk_end_request - Helper function for drivers to complete the request.
2863 * @rq: the request being processed
2864 * @error: %0 for success, < %0 for error
2865 * @nr_bytes: number of bytes to complete
2866 *
2867 * Description:
2868 * Must be called with queue lock held unlike blk_end_request().
2869 *
2870 * Return:
2871 * %false - we are done with this request
2872 * %true - still buffers pending for this request
2873 **/
2874 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2875 {
2876 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2877 }
2878 EXPORT_SYMBOL(__blk_end_request);
2879
2880 /**
2881 * __blk_end_request_all - Helper function for drives to finish the request.
2882 * @rq: the request to finish
2883 * @error: %0 for success, < %0 for error
2884 *
2885 * Description:
2886 * Completely finish @rq. Must be called with queue lock held.
2887 */
2888 void __blk_end_request_all(struct request *rq, int error)
2889 {
2890 bool pending;
2891 unsigned int bidi_bytes = 0;
2892
2893 if (unlikely(blk_bidi_rq(rq)))
2894 bidi_bytes = blk_rq_bytes(rq->next_rq);
2895
2896 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2897 BUG_ON(pending);
2898 }
2899 EXPORT_SYMBOL(__blk_end_request_all);
2900
2901 /**
2902 * __blk_end_request_cur - Helper function to finish the current request chunk.
2903 * @rq: the request to finish the current chunk for
2904 * @error: %0 for success, < %0 for error
2905 *
2906 * Description:
2907 * Complete the current consecutively mapped chunk from @rq. Must
2908 * be called with queue lock held.
2909 *
2910 * Return:
2911 * %false - we are done with this request
2912 * %true - still buffers pending for this request
2913 */
2914 bool __blk_end_request_cur(struct request *rq, int error)
2915 {
2916 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2917 }
2918 EXPORT_SYMBOL(__blk_end_request_cur);
2919
2920 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2921 struct bio *bio)
2922 {
2923 if (bio_has_data(bio))
2924 rq->nr_phys_segments = bio_phys_segments(q, bio);
2925
2926 rq->__data_len = bio->bi_iter.bi_size;
2927 rq->bio = rq->biotail = bio;
2928
2929 if (bio->bi_bdev)
2930 rq->rq_disk = bio->bi_bdev->bd_disk;
2931 }
2932
2933 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2934 /**
2935 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2936 * @rq: the request to be flushed
2937 *
2938 * Description:
2939 * Flush all pages in @rq.
2940 */
2941 void rq_flush_dcache_pages(struct request *rq)
2942 {
2943 struct req_iterator iter;
2944 struct bio_vec bvec;
2945
2946 rq_for_each_segment(bvec, rq, iter)
2947 flush_dcache_page(bvec.bv_page);
2948 }
2949 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2950 #endif
2951
2952 /**
2953 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2954 * @q : the queue of the device being checked
2955 *
2956 * Description:
2957 * Check if underlying low-level drivers of a device are busy.
2958 * If the drivers want to export their busy state, they must set own
2959 * exporting function using blk_queue_lld_busy() first.
2960 *
2961 * Basically, this function is used only by request stacking drivers
2962 * to stop dispatching requests to underlying devices when underlying
2963 * devices are busy. This behavior helps more I/O merging on the queue
2964 * of the request stacking driver and prevents I/O throughput regression
2965 * on burst I/O load.
2966 *
2967 * Return:
2968 * 0 - Not busy (The request stacking driver should dispatch request)
2969 * 1 - Busy (The request stacking driver should stop dispatching request)
2970 */
2971 int blk_lld_busy(struct request_queue *q)
2972 {
2973 if (q->lld_busy_fn)
2974 return q->lld_busy_fn(q);
2975
2976 return 0;
2977 }
2978 EXPORT_SYMBOL_GPL(blk_lld_busy);
2979
2980 /**
2981 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2982 * @rq: the clone request to be cleaned up
2983 *
2984 * Description:
2985 * Free all bios in @rq for a cloned request.
2986 */
2987 void blk_rq_unprep_clone(struct request *rq)
2988 {
2989 struct bio *bio;
2990
2991 while ((bio = rq->bio) != NULL) {
2992 rq->bio = bio->bi_next;
2993
2994 bio_put(bio);
2995 }
2996 }
2997 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2998
2999 /*
3000 * Copy attributes of the original request to the clone request.
3001 * The actual data parts (e.g. ->cmd, ->sense) are not copied.
3002 */
3003 static void __blk_rq_prep_clone(struct request *dst, struct request *src)
3004 {
3005 dst->cpu = src->cpu;
3006 dst->__sector = blk_rq_pos(src);
3007 dst->__data_len = blk_rq_bytes(src);
3008 dst->nr_phys_segments = src->nr_phys_segments;
3009 dst->ioprio = src->ioprio;
3010 dst->extra_len = src->extra_len;
3011 }
3012
3013 /**
3014 * blk_rq_prep_clone - Helper function to setup clone request
3015 * @rq: the request to be setup
3016 * @rq_src: original request to be cloned
3017 * @bs: bio_set that bios for clone are allocated from
3018 * @gfp_mask: memory allocation mask for bio
3019 * @bio_ctr: setup function to be called for each clone bio.
3020 * Returns %0 for success, non %0 for failure.
3021 * @data: private data to be passed to @bio_ctr
3022 *
3023 * Description:
3024 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3025 * The actual data parts of @rq_src (e.g. ->cmd, ->sense)
3026 * are not copied, and copying such parts is the caller's responsibility.
3027 * Also, pages which the original bios are pointing to are not copied
3028 * and the cloned bios just point same pages.
3029 * So cloned bios must be completed before original bios, which means
3030 * the caller must complete @rq before @rq_src.
3031 */
3032 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3033 struct bio_set *bs, gfp_t gfp_mask,
3034 int (*bio_ctr)(struct bio *, struct bio *, void *),
3035 void *data)
3036 {
3037 struct bio *bio, *bio_src;
3038
3039 if (!bs)
3040 bs = fs_bio_set;
3041
3042 __rq_for_each_bio(bio_src, rq_src) {
3043 bio = bio_clone_fast(bio_src, gfp_mask, bs);
3044 if (!bio)
3045 goto free_and_out;
3046
3047 if (bio_ctr && bio_ctr(bio, bio_src, data))
3048 goto free_and_out;
3049
3050 if (rq->bio) {
3051 rq->biotail->bi_next = bio;
3052 rq->biotail = bio;
3053 } else
3054 rq->bio = rq->biotail = bio;
3055 }
3056
3057 __blk_rq_prep_clone(rq, rq_src);
3058
3059 return 0;
3060
3061 free_and_out:
3062 if (bio)
3063 bio_put(bio);
3064 blk_rq_unprep_clone(rq);
3065
3066 return -ENOMEM;
3067 }
3068 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3069
3070 int kblockd_schedule_work(struct work_struct *work)
3071 {
3072 return queue_work(kblockd_workqueue, work);
3073 }
3074 EXPORT_SYMBOL(kblockd_schedule_work);
3075
3076 int kblockd_schedule_work_on(int cpu, struct work_struct *work)
3077 {
3078 return queue_work_on(cpu, kblockd_workqueue, work);
3079 }
3080 EXPORT_SYMBOL(kblockd_schedule_work_on);
3081
3082 int kblockd_schedule_delayed_work(struct delayed_work *dwork,
3083 unsigned long delay)
3084 {
3085 return queue_delayed_work(kblockd_workqueue, dwork, delay);
3086 }
3087 EXPORT_SYMBOL(kblockd_schedule_delayed_work);
3088
3089 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
3090 unsigned long delay)
3091 {
3092 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3093 }
3094 EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
3095
3096 /**
3097 * blk_start_plug - initialize blk_plug and track it inside the task_struct
3098 * @plug: The &struct blk_plug that needs to be initialized
3099 *
3100 * Description:
3101 * Tracking blk_plug inside the task_struct will help with auto-flushing the
3102 * pending I/O should the task end up blocking between blk_start_plug() and
3103 * blk_finish_plug(). This is important from a performance perspective, but
3104 * also ensures that we don't deadlock. For instance, if the task is blocking
3105 * for a memory allocation, memory reclaim could end up wanting to free a
3106 * page belonging to that request that is currently residing in our private
3107 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
3108 * this kind of deadlock.
3109 */
3110 void blk_start_plug(struct blk_plug *plug)
3111 {
3112 struct task_struct *tsk = current;
3113
3114 /*
3115 * If this is a nested plug, don't actually assign it.
3116 */
3117 if (tsk->plug)
3118 return;
3119
3120 INIT_LIST_HEAD(&plug->list);
3121 INIT_LIST_HEAD(&plug->mq_list);
3122 INIT_LIST_HEAD(&plug->cb_list);
3123 /*
3124 * Store ordering should not be needed here, since a potential
3125 * preempt will imply a full memory barrier
3126 */
3127 tsk->plug = plug;
3128 }
3129 EXPORT_SYMBOL(blk_start_plug);
3130
3131 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
3132 {
3133 struct request *rqa = container_of(a, struct request, queuelist);
3134 struct request *rqb = container_of(b, struct request, queuelist);
3135
3136 return !(rqa->q < rqb->q ||
3137 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
3138 }
3139
3140 /*
3141 * If 'from_schedule' is true, then postpone the dispatch of requests
3142 * until a safe kblockd context. We due this to avoid accidental big
3143 * additional stack usage in driver dispatch, in places where the originally
3144 * plugger did not intend it.
3145 */
3146 static void queue_unplugged(struct request_queue *q, unsigned int depth,
3147 bool from_schedule)
3148 __releases(q->queue_lock)
3149 {
3150 trace_block_unplug(q, depth, !from_schedule);
3151
3152 if (from_schedule)
3153 blk_run_queue_async(q);
3154 else
3155 __blk_run_queue(q);
3156 spin_unlock(q->queue_lock);
3157 }
3158
3159 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
3160 {
3161 LIST_HEAD(callbacks);
3162
3163 while (!list_empty(&plug->cb_list)) {
3164 list_splice_init(&plug->cb_list, &callbacks);
3165
3166 while (!list_empty(&callbacks)) {
3167 struct blk_plug_cb *cb = list_first_entry(&callbacks,
3168 struct blk_plug_cb,
3169 list);
3170 list_del(&cb->list);
3171 cb->callback(cb, from_schedule);
3172 }
3173 }
3174 }
3175
3176 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
3177 int size)
3178 {
3179 struct blk_plug *plug = current->plug;
3180 struct blk_plug_cb *cb;
3181
3182 if (!plug)
3183 return NULL;
3184
3185 list_for_each_entry(cb, &plug->cb_list, list)
3186 if (cb->callback == unplug && cb->data == data)
3187 return cb;
3188
3189 /* Not currently on the callback list */
3190 BUG_ON(size < sizeof(*cb));
3191 cb = kzalloc(size, GFP_ATOMIC);
3192 if (cb) {
3193 cb->data = data;
3194 cb->callback = unplug;
3195 list_add(&cb->list, &plug->cb_list);
3196 }
3197 return cb;
3198 }
3199 EXPORT_SYMBOL(blk_check_plugged);
3200
3201 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3202 {
3203 struct request_queue *q;
3204 unsigned long flags;
3205 struct request *rq;
3206 LIST_HEAD(list);
3207 unsigned int depth;
3208
3209 flush_plug_callbacks(plug, from_schedule);
3210
3211 if (!list_empty(&plug->mq_list))
3212 blk_mq_flush_plug_list(plug, from_schedule);
3213
3214 if (list_empty(&plug->list))
3215 return;
3216
3217 list_splice_init(&plug->list, &list);
3218
3219 list_sort(NULL, &list, plug_rq_cmp);
3220
3221 q = NULL;
3222 depth = 0;
3223
3224 /*
3225 * Save and disable interrupts here, to avoid doing it for every
3226 * queue lock we have to take.
3227 */
3228 local_irq_save(flags);
3229 while (!list_empty(&list)) {
3230 rq = list_entry_rq(list.next);
3231 list_del_init(&rq->queuelist);
3232 BUG_ON(!rq->q);
3233 if (rq->q != q) {
3234 /*
3235 * This drops the queue lock
3236 */
3237 if (q)
3238 queue_unplugged(q, depth, from_schedule);
3239 q = rq->q;
3240 depth = 0;
3241 spin_lock(q->queue_lock);
3242 }
3243
3244 /*
3245 * Short-circuit if @q is dead
3246 */
3247 if (unlikely(blk_queue_dying(q))) {
3248 __blk_end_request_all(rq, -ENODEV);
3249 continue;
3250 }
3251
3252 /*
3253 * rq is already accounted, so use raw insert
3254 */
3255 if (op_is_flush(rq->cmd_flags))
3256 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3257 else
3258 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
3259
3260 depth++;
3261 }
3262
3263 /*
3264 * This drops the queue lock
3265 */
3266 if (q)
3267 queue_unplugged(q, depth, from_schedule);
3268
3269 local_irq_restore(flags);
3270 }
3271
3272 void blk_finish_plug(struct blk_plug *plug)
3273 {
3274 if (plug != current->plug)
3275 return;
3276 blk_flush_plug_list(plug, false);
3277
3278 current->plug = NULL;
3279 }
3280 EXPORT_SYMBOL(blk_finish_plug);
3281
3282 #ifdef CONFIG_PM
3283 /**
3284 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3285 * @q: the queue of the device
3286 * @dev: the device the queue belongs to
3287 *
3288 * Description:
3289 * Initialize runtime-PM-related fields for @q and start auto suspend for
3290 * @dev. Drivers that want to take advantage of request-based runtime PM
3291 * should call this function after @dev has been initialized, and its
3292 * request queue @q has been allocated, and runtime PM for it can not happen
3293 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3294 * cases, driver should call this function before any I/O has taken place.
3295 *
3296 * This function takes care of setting up using auto suspend for the device,
3297 * the autosuspend delay is set to -1 to make runtime suspend impossible
3298 * until an updated value is either set by user or by driver. Drivers do
3299 * not need to touch other autosuspend settings.
3300 *
3301 * The block layer runtime PM is request based, so only works for drivers
3302 * that use request as their IO unit instead of those directly use bio's.
3303 */
3304 void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3305 {
3306 q->dev = dev;
3307 q->rpm_status = RPM_ACTIVE;
3308 pm_runtime_set_autosuspend_delay(q->dev, -1);
3309 pm_runtime_use_autosuspend(q->dev);
3310 }
3311 EXPORT_SYMBOL(blk_pm_runtime_init);
3312
3313 /**
3314 * blk_pre_runtime_suspend - Pre runtime suspend check
3315 * @q: the queue of the device
3316 *
3317 * Description:
3318 * This function will check if runtime suspend is allowed for the device
3319 * by examining if there are any requests pending in the queue. If there
3320 * are requests pending, the device can not be runtime suspended; otherwise,
3321 * the queue's status will be updated to SUSPENDING and the driver can
3322 * proceed to suspend the device.
3323 *
3324 * For the not allowed case, we mark last busy for the device so that
3325 * runtime PM core will try to autosuspend it some time later.
3326 *
3327 * This function should be called near the start of the device's
3328 * runtime_suspend callback.
3329 *
3330 * Return:
3331 * 0 - OK to runtime suspend the device
3332 * -EBUSY - Device should not be runtime suspended
3333 */
3334 int blk_pre_runtime_suspend(struct request_queue *q)
3335 {
3336 int ret = 0;
3337
3338 if (!q->dev)
3339 return ret;
3340
3341 spin_lock_irq(q->queue_lock);
3342 if (q->nr_pending) {
3343 ret = -EBUSY;
3344 pm_runtime_mark_last_busy(q->dev);
3345 } else {
3346 q->rpm_status = RPM_SUSPENDING;
3347 }
3348 spin_unlock_irq(q->queue_lock);
3349 return ret;
3350 }
3351 EXPORT_SYMBOL(blk_pre_runtime_suspend);
3352
3353 /**
3354 * blk_post_runtime_suspend - Post runtime suspend processing
3355 * @q: the queue of the device
3356 * @err: return value of the device's runtime_suspend function
3357 *
3358 * Description:
3359 * Update the queue's runtime status according to the return value of the
3360 * device's runtime suspend function and mark last busy for the device so
3361 * that PM core will try to auto suspend the device at a later time.
3362 *
3363 * This function should be called near the end of the device's
3364 * runtime_suspend callback.
3365 */
3366 void blk_post_runtime_suspend(struct request_queue *q, int err)
3367 {
3368 if (!q->dev)
3369 return;
3370
3371 spin_lock_irq(q->queue_lock);
3372 if (!err) {
3373 q->rpm_status = RPM_SUSPENDED;
3374 } else {
3375 q->rpm_status = RPM_ACTIVE;
3376 pm_runtime_mark_last_busy(q->dev);
3377 }
3378 spin_unlock_irq(q->queue_lock);
3379 }
3380 EXPORT_SYMBOL(blk_post_runtime_suspend);
3381
3382 /**
3383 * blk_pre_runtime_resume - Pre runtime resume processing
3384 * @q: the queue of the device
3385 *
3386 * Description:
3387 * Update the queue's runtime status to RESUMING in preparation for the
3388 * runtime resume of the device.
3389 *
3390 * This function should be called near the start of the device's
3391 * runtime_resume callback.
3392 */
3393 void blk_pre_runtime_resume(struct request_queue *q)
3394 {
3395 if (!q->dev)
3396 return;
3397
3398 spin_lock_irq(q->queue_lock);
3399 q->rpm_status = RPM_RESUMING;
3400 spin_unlock_irq(q->queue_lock);
3401 }
3402 EXPORT_SYMBOL(blk_pre_runtime_resume);
3403
3404 /**
3405 * blk_post_runtime_resume - Post runtime resume processing
3406 * @q: the queue of the device
3407 * @err: return value of the device's runtime_resume function
3408 *
3409 * Description:
3410 * Update the queue's runtime status according to the return value of the
3411 * device's runtime_resume function. If it is successfully resumed, process
3412 * the requests that are queued into the device's queue when it is resuming
3413 * and then mark last busy and initiate autosuspend for it.
3414 *
3415 * This function should be called near the end of the device's
3416 * runtime_resume callback.
3417 */
3418 void blk_post_runtime_resume(struct request_queue *q, int err)
3419 {
3420 if (!q->dev)
3421 return;
3422
3423 spin_lock_irq(q->queue_lock);
3424 if (!err) {
3425 q->rpm_status = RPM_ACTIVE;
3426 __blk_run_queue(q);
3427 pm_runtime_mark_last_busy(q->dev);
3428 pm_request_autosuspend(q->dev);
3429 } else {
3430 q->rpm_status = RPM_SUSPENDED;
3431 }
3432 spin_unlock_irq(q->queue_lock);
3433 }
3434 EXPORT_SYMBOL(blk_post_runtime_resume);
3435
3436 /**
3437 * blk_set_runtime_active - Force runtime status of the queue to be active
3438 * @q: the queue of the device
3439 *
3440 * If the device is left runtime suspended during system suspend the resume
3441 * hook typically resumes the device and corrects runtime status
3442 * accordingly. However, that does not affect the queue runtime PM status
3443 * which is still "suspended". This prevents processing requests from the
3444 * queue.
3445 *
3446 * This function can be used in driver's resume hook to correct queue
3447 * runtime PM status and re-enable peeking requests from the queue. It
3448 * should be called before first request is added to the queue.
3449 */
3450 void blk_set_runtime_active(struct request_queue *q)
3451 {
3452 spin_lock_irq(q->queue_lock);
3453 q->rpm_status = RPM_ACTIVE;
3454 pm_runtime_mark_last_busy(q->dev);
3455 pm_request_autosuspend(q->dev);
3456 spin_unlock_irq(q->queue_lock);
3457 }
3458 EXPORT_SYMBOL(blk_set_runtime_active);
3459 #endif
3460
3461 int __init blk_dev_init(void)
3462 {
3463 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
3464 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3465 FIELD_SIZEOF(struct request, cmd_flags));
3466 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3467 FIELD_SIZEOF(struct bio, bi_opf));
3468
3469 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3470 kblockd_workqueue = alloc_workqueue("kblockd",
3471 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3472 if (!kblockd_workqueue)
3473 panic("Failed to create kblockd\n");
3474
3475 request_cachep = kmem_cache_create("blkdev_requests",
3476 sizeof(struct request), 0, SLAB_PANIC, NULL);
3477
3478 blk_requestq_cachep = kmem_cache_create("request_queue",
3479 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3480
3481 #ifdef CONFIG_DEBUG_FS
3482 blk_debugfs_root = debugfs_create_dir("block", NULL);
3483 #endif
3484
3485 return 0;
3486 }