]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-core.c
block: extend queue bypassing to cover blkcg policies
[mirror_ubuntu-bionic-kernel.git] / block / blk-core.c
1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11 /*
12 * This handles all read/write requests to block devices
13 */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/highmem.h>
20 #include <linux/mm.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/string.h>
23 #include <linux/init.h>
24 #include <linux/completion.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 #include <linux/task_io_accounting_ops.h>
29 #include <linux/fault-inject.h>
30 #include <linux/list_sort.h>
31 #include <linux/delay.h>
32
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/block.h>
35
36 #include "blk.h"
37
38 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
39 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
40 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
41
42 DEFINE_IDA(blk_queue_ida);
43
44 /*
45 * For the allocated request tables
46 */
47 static struct kmem_cache *request_cachep;
48
49 /*
50 * For queue allocation
51 */
52 struct kmem_cache *blk_requestq_cachep;
53
54 /*
55 * Controlling structure to kblockd
56 */
57 static struct workqueue_struct *kblockd_workqueue;
58
59 static void drive_stat_acct(struct request *rq, int new_io)
60 {
61 struct hd_struct *part;
62 int rw = rq_data_dir(rq);
63 int cpu;
64
65 if (!blk_do_io_stat(rq))
66 return;
67
68 cpu = part_stat_lock();
69
70 if (!new_io) {
71 part = rq->part;
72 part_stat_inc(cpu, part, merges[rw]);
73 } else {
74 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
75 if (!hd_struct_try_get(part)) {
76 /*
77 * The partition is already being removed,
78 * the request will be accounted on the disk only
79 *
80 * We take a reference on disk->part0 although that
81 * partition will never be deleted, so we can treat
82 * it as any other partition.
83 */
84 part = &rq->rq_disk->part0;
85 hd_struct_get(part);
86 }
87 part_round_stats(cpu, part);
88 part_inc_in_flight(part, rw);
89 rq->part = part;
90 }
91
92 part_stat_unlock();
93 }
94
95 void blk_queue_congestion_threshold(struct request_queue *q)
96 {
97 int nr;
98
99 nr = q->nr_requests - (q->nr_requests / 8) + 1;
100 if (nr > q->nr_requests)
101 nr = q->nr_requests;
102 q->nr_congestion_on = nr;
103
104 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
105 if (nr < 1)
106 nr = 1;
107 q->nr_congestion_off = nr;
108 }
109
110 /**
111 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
112 * @bdev: device
113 *
114 * Locates the passed device's request queue and returns the address of its
115 * backing_dev_info
116 *
117 * Will return NULL if the request queue cannot be located.
118 */
119 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
120 {
121 struct backing_dev_info *ret = NULL;
122 struct request_queue *q = bdev_get_queue(bdev);
123
124 if (q)
125 ret = &q->backing_dev_info;
126 return ret;
127 }
128 EXPORT_SYMBOL(blk_get_backing_dev_info);
129
130 void blk_rq_init(struct request_queue *q, struct request *rq)
131 {
132 memset(rq, 0, sizeof(*rq));
133
134 INIT_LIST_HEAD(&rq->queuelist);
135 INIT_LIST_HEAD(&rq->timeout_list);
136 rq->cpu = -1;
137 rq->q = q;
138 rq->__sector = (sector_t) -1;
139 INIT_HLIST_NODE(&rq->hash);
140 RB_CLEAR_NODE(&rq->rb_node);
141 rq->cmd = rq->__cmd;
142 rq->cmd_len = BLK_MAX_CDB;
143 rq->tag = -1;
144 rq->ref_count = 1;
145 rq->start_time = jiffies;
146 set_start_time_ns(rq);
147 rq->part = NULL;
148 }
149 EXPORT_SYMBOL(blk_rq_init);
150
151 static void req_bio_endio(struct request *rq, struct bio *bio,
152 unsigned int nbytes, int error)
153 {
154 if (error)
155 clear_bit(BIO_UPTODATE, &bio->bi_flags);
156 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
157 error = -EIO;
158
159 if (unlikely(nbytes > bio->bi_size)) {
160 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
161 __func__, nbytes, bio->bi_size);
162 nbytes = bio->bi_size;
163 }
164
165 if (unlikely(rq->cmd_flags & REQ_QUIET))
166 set_bit(BIO_QUIET, &bio->bi_flags);
167
168 bio->bi_size -= nbytes;
169 bio->bi_sector += (nbytes >> 9);
170
171 if (bio_integrity(bio))
172 bio_integrity_advance(bio, nbytes);
173
174 /* don't actually finish bio if it's part of flush sequence */
175 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
176 bio_endio(bio, error);
177 }
178
179 void blk_dump_rq_flags(struct request *rq, char *msg)
180 {
181 int bit;
182
183 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
184 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
185 rq->cmd_flags);
186
187 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
188 (unsigned long long)blk_rq_pos(rq),
189 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
190 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
191 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
192
193 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
194 printk(KERN_INFO " cdb: ");
195 for (bit = 0; bit < BLK_MAX_CDB; bit++)
196 printk("%02x ", rq->cmd[bit]);
197 printk("\n");
198 }
199 }
200 EXPORT_SYMBOL(blk_dump_rq_flags);
201
202 static void blk_delay_work(struct work_struct *work)
203 {
204 struct request_queue *q;
205
206 q = container_of(work, struct request_queue, delay_work.work);
207 spin_lock_irq(q->queue_lock);
208 __blk_run_queue(q);
209 spin_unlock_irq(q->queue_lock);
210 }
211
212 /**
213 * blk_delay_queue - restart queueing after defined interval
214 * @q: The &struct request_queue in question
215 * @msecs: Delay in msecs
216 *
217 * Description:
218 * Sometimes queueing needs to be postponed for a little while, to allow
219 * resources to come back. This function will make sure that queueing is
220 * restarted around the specified time.
221 */
222 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
223 {
224 queue_delayed_work(kblockd_workqueue, &q->delay_work,
225 msecs_to_jiffies(msecs));
226 }
227 EXPORT_SYMBOL(blk_delay_queue);
228
229 /**
230 * blk_start_queue - restart a previously stopped queue
231 * @q: The &struct request_queue in question
232 *
233 * Description:
234 * blk_start_queue() will clear the stop flag on the queue, and call
235 * the request_fn for the queue if it was in a stopped state when
236 * entered. Also see blk_stop_queue(). Queue lock must be held.
237 **/
238 void blk_start_queue(struct request_queue *q)
239 {
240 WARN_ON(!irqs_disabled());
241
242 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
243 __blk_run_queue(q);
244 }
245 EXPORT_SYMBOL(blk_start_queue);
246
247 /**
248 * blk_stop_queue - stop a queue
249 * @q: The &struct request_queue in question
250 *
251 * Description:
252 * The Linux block layer assumes that a block driver will consume all
253 * entries on the request queue when the request_fn strategy is called.
254 * Often this will not happen, because of hardware limitations (queue
255 * depth settings). If a device driver gets a 'queue full' response,
256 * or if it simply chooses not to queue more I/O at one point, it can
257 * call this function to prevent the request_fn from being called until
258 * the driver has signalled it's ready to go again. This happens by calling
259 * blk_start_queue() to restart queue operations. Queue lock must be held.
260 **/
261 void blk_stop_queue(struct request_queue *q)
262 {
263 __cancel_delayed_work(&q->delay_work);
264 queue_flag_set(QUEUE_FLAG_STOPPED, q);
265 }
266 EXPORT_SYMBOL(blk_stop_queue);
267
268 /**
269 * blk_sync_queue - cancel any pending callbacks on a queue
270 * @q: the queue
271 *
272 * Description:
273 * The block layer may perform asynchronous callback activity
274 * on a queue, such as calling the unplug function after a timeout.
275 * A block device may call blk_sync_queue to ensure that any
276 * such activity is cancelled, thus allowing it to release resources
277 * that the callbacks might use. The caller must already have made sure
278 * that its ->make_request_fn will not re-add plugging prior to calling
279 * this function.
280 *
281 * This function does not cancel any asynchronous activity arising
282 * out of elevator or throttling code. That would require elevaotor_exit()
283 * and blk_throtl_exit() to be called with queue lock initialized.
284 *
285 */
286 void blk_sync_queue(struct request_queue *q)
287 {
288 del_timer_sync(&q->timeout);
289 cancel_delayed_work_sync(&q->delay_work);
290 }
291 EXPORT_SYMBOL(blk_sync_queue);
292
293 /**
294 * __blk_run_queue - run a single device queue
295 * @q: The queue to run
296 *
297 * Description:
298 * See @blk_run_queue. This variant must be called with the queue lock
299 * held and interrupts disabled.
300 */
301 void __blk_run_queue(struct request_queue *q)
302 {
303 if (unlikely(blk_queue_stopped(q)))
304 return;
305
306 q->request_fn(q);
307 }
308 EXPORT_SYMBOL(__blk_run_queue);
309
310 /**
311 * blk_run_queue_async - run a single device queue in workqueue context
312 * @q: The queue to run
313 *
314 * Description:
315 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
316 * of us.
317 */
318 void blk_run_queue_async(struct request_queue *q)
319 {
320 if (likely(!blk_queue_stopped(q))) {
321 __cancel_delayed_work(&q->delay_work);
322 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
323 }
324 }
325 EXPORT_SYMBOL(blk_run_queue_async);
326
327 /**
328 * blk_run_queue - run a single device queue
329 * @q: The queue to run
330 *
331 * Description:
332 * Invoke request handling on this queue, if it has pending work to do.
333 * May be used to restart queueing when a request has completed.
334 */
335 void blk_run_queue(struct request_queue *q)
336 {
337 unsigned long flags;
338
339 spin_lock_irqsave(q->queue_lock, flags);
340 __blk_run_queue(q);
341 spin_unlock_irqrestore(q->queue_lock, flags);
342 }
343 EXPORT_SYMBOL(blk_run_queue);
344
345 void blk_put_queue(struct request_queue *q)
346 {
347 kobject_put(&q->kobj);
348 }
349 EXPORT_SYMBOL(blk_put_queue);
350
351 /**
352 * blk_drain_queue - drain requests from request_queue
353 * @q: queue to drain
354 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
355 *
356 * Drain requests from @q. If @drain_all is set, all requests are drained.
357 * If not, only ELVPRIV requests are drained. The caller is responsible
358 * for ensuring that no new requests which need to be drained are queued.
359 */
360 void blk_drain_queue(struct request_queue *q, bool drain_all)
361 {
362 while (true) {
363 bool drain = false;
364 int i;
365
366 spin_lock_irq(q->queue_lock);
367
368 /*
369 * The caller might be trying to drain @q before its
370 * elevator is initialized.
371 */
372 if (q->elevator)
373 elv_drain_elevator(q);
374
375 blk_throtl_drain(q);
376
377 /*
378 * This function might be called on a queue which failed
379 * driver init after queue creation or is not yet fully
380 * active yet. Some drivers (e.g. fd and loop) get unhappy
381 * in such cases. Kick queue iff dispatch queue has
382 * something on it and @q has request_fn set.
383 */
384 if (!list_empty(&q->queue_head) && q->request_fn)
385 __blk_run_queue(q);
386
387 drain |= q->rq.elvpriv;
388
389 /*
390 * Unfortunately, requests are queued at and tracked from
391 * multiple places and there's no single counter which can
392 * be drained. Check all the queues and counters.
393 */
394 if (drain_all) {
395 drain |= !list_empty(&q->queue_head);
396 for (i = 0; i < 2; i++) {
397 drain |= q->rq.count[i];
398 drain |= q->in_flight[i];
399 drain |= !list_empty(&q->flush_queue[i]);
400 }
401 }
402
403 spin_unlock_irq(q->queue_lock);
404
405 if (!drain)
406 break;
407 msleep(10);
408 }
409 }
410
411 /**
412 * blk_queue_bypass_start - enter queue bypass mode
413 * @q: queue of interest
414 *
415 * In bypass mode, only the dispatch FIFO queue of @q is used. This
416 * function makes @q enter bypass mode and drains all requests which were
417 * throttled or issued before. On return, it's guaranteed that no request
418 * is being throttled or has ELVPRIV set.
419 */
420 void blk_queue_bypass_start(struct request_queue *q)
421 {
422 spin_lock_irq(q->queue_lock);
423 q->bypass_depth++;
424 queue_flag_set(QUEUE_FLAG_BYPASS, q);
425 spin_unlock_irq(q->queue_lock);
426
427 blk_drain_queue(q, false);
428 }
429 EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
430
431 /**
432 * blk_queue_bypass_end - leave queue bypass mode
433 * @q: queue of interest
434 *
435 * Leave bypass mode and restore the normal queueing behavior.
436 */
437 void blk_queue_bypass_end(struct request_queue *q)
438 {
439 spin_lock_irq(q->queue_lock);
440 if (!--q->bypass_depth)
441 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
442 WARN_ON_ONCE(q->bypass_depth < 0);
443 spin_unlock_irq(q->queue_lock);
444 }
445 EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
446
447 /**
448 * blk_cleanup_queue - shutdown a request queue
449 * @q: request queue to shutdown
450 *
451 * Mark @q DEAD, drain all pending requests, destroy and put it. All
452 * future requests will be failed immediately with -ENODEV.
453 */
454 void blk_cleanup_queue(struct request_queue *q)
455 {
456 spinlock_t *lock = q->queue_lock;
457
458 /* mark @q DEAD, no new request or merges will be allowed afterwards */
459 mutex_lock(&q->sysfs_lock);
460 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
461
462 spin_lock_irq(lock);
463
464 /* dead queue is permanently in bypass mode till released */
465 q->bypass_depth++;
466 queue_flag_set(QUEUE_FLAG_BYPASS, q);
467
468 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
469 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
470 queue_flag_set(QUEUE_FLAG_DEAD, q);
471
472 if (q->queue_lock != &q->__queue_lock)
473 q->queue_lock = &q->__queue_lock;
474
475 spin_unlock_irq(lock);
476 mutex_unlock(&q->sysfs_lock);
477
478 /* drain all requests queued before DEAD marking */
479 blk_drain_queue(q, true);
480
481 /* @q won't process any more request, flush async actions */
482 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
483 blk_sync_queue(q);
484
485 /* @q is and will stay empty, shutdown and put */
486 blk_put_queue(q);
487 }
488 EXPORT_SYMBOL(blk_cleanup_queue);
489
490 static int blk_init_free_list(struct request_queue *q)
491 {
492 struct request_list *rl = &q->rq;
493
494 if (unlikely(rl->rq_pool))
495 return 0;
496
497 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
498 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
499 rl->elvpriv = 0;
500 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
501 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
502
503 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
504 mempool_free_slab, request_cachep, q->node);
505
506 if (!rl->rq_pool)
507 return -ENOMEM;
508
509 return 0;
510 }
511
512 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
513 {
514 return blk_alloc_queue_node(gfp_mask, -1);
515 }
516 EXPORT_SYMBOL(blk_alloc_queue);
517
518 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
519 {
520 struct request_queue *q;
521 int err;
522
523 q = kmem_cache_alloc_node(blk_requestq_cachep,
524 gfp_mask | __GFP_ZERO, node_id);
525 if (!q)
526 return NULL;
527
528 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
529 if (q->id < 0)
530 goto fail_q;
531
532 q->backing_dev_info.ra_pages =
533 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
534 q->backing_dev_info.state = 0;
535 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
536 q->backing_dev_info.name = "block";
537 q->node = node_id;
538
539 err = bdi_init(&q->backing_dev_info);
540 if (err)
541 goto fail_id;
542
543 if (blk_throtl_init(q))
544 goto fail_id;
545
546 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
547 laptop_mode_timer_fn, (unsigned long) q);
548 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
549 INIT_LIST_HEAD(&q->queue_head);
550 INIT_LIST_HEAD(&q->timeout_list);
551 INIT_LIST_HEAD(&q->icq_list);
552 INIT_LIST_HEAD(&q->flush_queue[0]);
553 INIT_LIST_HEAD(&q->flush_queue[1]);
554 INIT_LIST_HEAD(&q->flush_data_in_flight);
555 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
556
557 kobject_init(&q->kobj, &blk_queue_ktype);
558
559 mutex_init(&q->sysfs_lock);
560 spin_lock_init(&q->__queue_lock);
561
562 /*
563 * By default initialize queue_lock to internal lock and driver can
564 * override it later if need be.
565 */
566 q->queue_lock = &q->__queue_lock;
567
568 return q;
569
570 fail_id:
571 ida_simple_remove(&blk_queue_ida, q->id);
572 fail_q:
573 kmem_cache_free(blk_requestq_cachep, q);
574 return NULL;
575 }
576 EXPORT_SYMBOL(blk_alloc_queue_node);
577
578 /**
579 * blk_init_queue - prepare a request queue for use with a block device
580 * @rfn: The function to be called to process requests that have been
581 * placed on the queue.
582 * @lock: Request queue spin lock
583 *
584 * Description:
585 * If a block device wishes to use the standard request handling procedures,
586 * which sorts requests and coalesces adjacent requests, then it must
587 * call blk_init_queue(). The function @rfn will be called when there
588 * are requests on the queue that need to be processed. If the device
589 * supports plugging, then @rfn may not be called immediately when requests
590 * are available on the queue, but may be called at some time later instead.
591 * Plugged queues are generally unplugged when a buffer belonging to one
592 * of the requests on the queue is needed, or due to memory pressure.
593 *
594 * @rfn is not required, or even expected, to remove all requests off the
595 * queue, but only as many as it can handle at a time. If it does leave
596 * requests on the queue, it is responsible for arranging that the requests
597 * get dealt with eventually.
598 *
599 * The queue spin lock must be held while manipulating the requests on the
600 * request queue; this lock will be taken also from interrupt context, so irq
601 * disabling is needed for it.
602 *
603 * Function returns a pointer to the initialized request queue, or %NULL if
604 * it didn't succeed.
605 *
606 * Note:
607 * blk_init_queue() must be paired with a blk_cleanup_queue() call
608 * when the block device is deactivated (such as at module unload).
609 **/
610
611 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
612 {
613 return blk_init_queue_node(rfn, lock, -1);
614 }
615 EXPORT_SYMBOL(blk_init_queue);
616
617 struct request_queue *
618 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
619 {
620 struct request_queue *uninit_q, *q;
621
622 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
623 if (!uninit_q)
624 return NULL;
625
626 q = blk_init_allocated_queue(uninit_q, rfn, lock);
627 if (!q)
628 blk_cleanup_queue(uninit_q);
629
630 return q;
631 }
632 EXPORT_SYMBOL(blk_init_queue_node);
633
634 struct request_queue *
635 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
636 spinlock_t *lock)
637 {
638 if (!q)
639 return NULL;
640
641 if (blk_init_free_list(q))
642 return NULL;
643
644 q->request_fn = rfn;
645 q->prep_rq_fn = NULL;
646 q->unprep_rq_fn = NULL;
647 q->queue_flags = QUEUE_FLAG_DEFAULT;
648
649 /* Override internal queue lock with supplied lock pointer */
650 if (lock)
651 q->queue_lock = lock;
652
653 /*
654 * This also sets hw/phys segments, boundary and size
655 */
656 blk_queue_make_request(q, blk_queue_bio);
657
658 q->sg_reserved_size = INT_MAX;
659
660 /*
661 * all done
662 */
663 if (!elevator_init(q, NULL)) {
664 blk_queue_congestion_threshold(q);
665 return q;
666 }
667
668 return NULL;
669 }
670 EXPORT_SYMBOL(blk_init_allocated_queue);
671
672 bool blk_get_queue(struct request_queue *q)
673 {
674 if (likely(!blk_queue_dead(q))) {
675 __blk_get_queue(q);
676 return true;
677 }
678
679 return false;
680 }
681 EXPORT_SYMBOL(blk_get_queue);
682
683 static inline void blk_free_request(struct request_queue *q, struct request *rq)
684 {
685 if (rq->cmd_flags & REQ_ELVPRIV) {
686 elv_put_request(q, rq);
687 if (rq->elv.icq)
688 put_io_context(rq->elv.icq->ioc);
689 }
690
691 mempool_free(rq, q->rq.rq_pool);
692 }
693
694 static struct request *
695 blk_alloc_request(struct request_queue *q, struct io_cq *icq,
696 unsigned int flags, gfp_t gfp_mask)
697 {
698 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
699
700 if (!rq)
701 return NULL;
702
703 blk_rq_init(q, rq);
704
705 rq->cmd_flags = flags | REQ_ALLOCED;
706
707 if (flags & REQ_ELVPRIV) {
708 rq->elv.icq = icq;
709 if (unlikely(elv_set_request(q, rq, gfp_mask))) {
710 mempool_free(rq, q->rq.rq_pool);
711 return NULL;
712 }
713 /* @rq->elv.icq holds on to io_context until @rq is freed */
714 if (icq)
715 get_io_context(icq->ioc);
716 }
717
718 return rq;
719 }
720
721 /*
722 * ioc_batching returns true if the ioc is a valid batching request and
723 * should be given priority access to a request.
724 */
725 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
726 {
727 if (!ioc)
728 return 0;
729
730 /*
731 * Make sure the process is able to allocate at least 1 request
732 * even if the batch times out, otherwise we could theoretically
733 * lose wakeups.
734 */
735 return ioc->nr_batch_requests == q->nr_batching ||
736 (ioc->nr_batch_requests > 0
737 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
738 }
739
740 /*
741 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
742 * will cause the process to be a "batcher" on all queues in the system. This
743 * is the behaviour we want though - once it gets a wakeup it should be given
744 * a nice run.
745 */
746 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
747 {
748 if (!ioc || ioc_batching(q, ioc))
749 return;
750
751 ioc->nr_batch_requests = q->nr_batching;
752 ioc->last_waited = jiffies;
753 }
754
755 static void __freed_request(struct request_queue *q, int sync)
756 {
757 struct request_list *rl = &q->rq;
758
759 if (rl->count[sync] < queue_congestion_off_threshold(q))
760 blk_clear_queue_congested(q, sync);
761
762 if (rl->count[sync] + 1 <= q->nr_requests) {
763 if (waitqueue_active(&rl->wait[sync]))
764 wake_up(&rl->wait[sync]);
765
766 blk_clear_queue_full(q, sync);
767 }
768 }
769
770 /*
771 * A request has just been released. Account for it, update the full and
772 * congestion status, wake up any waiters. Called under q->queue_lock.
773 */
774 static void freed_request(struct request_queue *q, unsigned int flags)
775 {
776 struct request_list *rl = &q->rq;
777 int sync = rw_is_sync(flags);
778
779 rl->count[sync]--;
780 if (flags & REQ_ELVPRIV)
781 rl->elvpriv--;
782
783 __freed_request(q, sync);
784
785 if (unlikely(rl->starved[sync ^ 1]))
786 __freed_request(q, sync ^ 1);
787 }
788
789 /*
790 * Determine if elevator data should be initialized when allocating the
791 * request associated with @bio.
792 */
793 static bool blk_rq_should_init_elevator(struct bio *bio)
794 {
795 if (!bio)
796 return true;
797
798 /*
799 * Flush requests do not use the elevator so skip initialization.
800 * This allows a request to share the flush and elevator data.
801 */
802 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
803 return false;
804
805 return true;
806 }
807
808 /**
809 * get_request - get a free request
810 * @q: request_queue to allocate request from
811 * @rw_flags: RW and SYNC flags
812 * @bio: bio to allocate request for (can be %NULL)
813 * @gfp_mask: allocation mask
814 *
815 * Get a free request from @q. This function may fail under memory
816 * pressure or if @q is dead.
817 *
818 * Must be callled with @q->queue_lock held and,
819 * Returns %NULL on failure, with @q->queue_lock held.
820 * Returns !%NULL on success, with @q->queue_lock *not held*.
821 */
822 static struct request *get_request(struct request_queue *q, int rw_flags,
823 struct bio *bio, gfp_t gfp_mask)
824 {
825 struct request *rq = NULL;
826 struct request_list *rl = &q->rq;
827 struct elevator_type *et;
828 struct io_context *ioc;
829 struct io_cq *icq = NULL;
830 const bool is_sync = rw_is_sync(rw_flags) != 0;
831 bool retried = false;
832 int may_queue;
833 retry:
834 et = q->elevator->type;
835 ioc = current->io_context;
836
837 if (unlikely(blk_queue_dead(q)))
838 return NULL;
839
840 may_queue = elv_may_queue(q, rw_flags);
841 if (may_queue == ELV_MQUEUE_NO)
842 goto rq_starved;
843
844 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
845 if (rl->count[is_sync]+1 >= q->nr_requests) {
846 /*
847 * We want ioc to record batching state. If it's
848 * not already there, creating a new one requires
849 * dropping queue_lock, which in turn requires
850 * retesting conditions to avoid queue hang.
851 */
852 if (!ioc && !retried) {
853 spin_unlock_irq(q->queue_lock);
854 create_io_context(current, gfp_mask, q->node);
855 spin_lock_irq(q->queue_lock);
856 retried = true;
857 goto retry;
858 }
859
860 /*
861 * The queue will fill after this allocation, so set
862 * it as full, and mark this process as "batching".
863 * This process will be allowed to complete a batch of
864 * requests, others will be blocked.
865 */
866 if (!blk_queue_full(q, is_sync)) {
867 ioc_set_batching(q, ioc);
868 blk_set_queue_full(q, is_sync);
869 } else {
870 if (may_queue != ELV_MQUEUE_MUST
871 && !ioc_batching(q, ioc)) {
872 /*
873 * The queue is full and the allocating
874 * process is not a "batcher", and not
875 * exempted by the IO scheduler
876 */
877 goto out;
878 }
879 }
880 }
881 blk_set_queue_congested(q, is_sync);
882 }
883
884 /*
885 * Only allow batching queuers to allocate up to 50% over the defined
886 * limit of requests, otherwise we could have thousands of requests
887 * allocated with any setting of ->nr_requests
888 */
889 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
890 goto out;
891
892 rl->count[is_sync]++;
893 rl->starved[is_sync] = 0;
894
895 /*
896 * Decide whether the new request will be managed by elevator. If
897 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
898 * prevent the current elevator from being destroyed until the new
899 * request is freed. This guarantees icq's won't be destroyed and
900 * makes creating new ones safe.
901 *
902 * Also, lookup icq while holding queue_lock. If it doesn't exist,
903 * it will be created after releasing queue_lock.
904 */
905 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
906 rw_flags |= REQ_ELVPRIV;
907 rl->elvpriv++;
908 if (et->icq_cache && ioc)
909 icq = ioc_lookup_icq(ioc, q);
910 }
911
912 if (blk_queue_io_stat(q))
913 rw_flags |= REQ_IO_STAT;
914 spin_unlock_irq(q->queue_lock);
915
916 /* create icq if missing */
917 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
918 icq = ioc_create_icq(q, gfp_mask);
919 if (!icq)
920 goto fail_icq;
921 }
922
923 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
924
925 fail_icq:
926 if (unlikely(!rq)) {
927 /*
928 * Allocation failed presumably due to memory. Undo anything
929 * we might have messed up.
930 *
931 * Allocating task should really be put onto the front of the
932 * wait queue, but this is pretty rare.
933 */
934 spin_lock_irq(q->queue_lock);
935 freed_request(q, rw_flags);
936
937 /*
938 * in the very unlikely event that allocation failed and no
939 * requests for this direction was pending, mark us starved
940 * so that freeing of a request in the other direction will
941 * notice us. another possible fix would be to split the
942 * rq mempool into READ and WRITE
943 */
944 rq_starved:
945 if (unlikely(rl->count[is_sync] == 0))
946 rl->starved[is_sync] = 1;
947
948 goto out;
949 }
950
951 /*
952 * ioc may be NULL here, and ioc_batching will be false. That's
953 * OK, if the queue is under the request limit then requests need
954 * not count toward the nr_batch_requests limit. There will always
955 * be some limit enforced by BLK_BATCH_TIME.
956 */
957 if (ioc_batching(q, ioc))
958 ioc->nr_batch_requests--;
959
960 trace_block_getrq(q, bio, rw_flags & 1);
961 out:
962 return rq;
963 }
964
965 /**
966 * get_request_wait - get a free request with retry
967 * @q: request_queue to allocate request from
968 * @rw_flags: RW and SYNC flags
969 * @bio: bio to allocate request for (can be %NULL)
970 *
971 * Get a free request from @q. This function keeps retrying under memory
972 * pressure and fails iff @q is dead.
973 *
974 * Must be callled with @q->queue_lock held and,
975 * Returns %NULL on failure, with @q->queue_lock held.
976 * Returns !%NULL on success, with @q->queue_lock *not held*.
977 */
978 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
979 struct bio *bio)
980 {
981 const bool is_sync = rw_is_sync(rw_flags) != 0;
982 struct request *rq;
983
984 rq = get_request(q, rw_flags, bio, GFP_NOIO);
985 while (!rq) {
986 DEFINE_WAIT(wait);
987 struct request_list *rl = &q->rq;
988
989 if (unlikely(blk_queue_dead(q)))
990 return NULL;
991
992 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
993 TASK_UNINTERRUPTIBLE);
994
995 trace_block_sleeprq(q, bio, rw_flags & 1);
996
997 spin_unlock_irq(q->queue_lock);
998 io_schedule();
999
1000 /*
1001 * After sleeping, we become a "batching" process and
1002 * will be able to allocate at least one request, and
1003 * up to a big batch of them for a small period time.
1004 * See ioc_batching, ioc_set_batching
1005 */
1006 create_io_context(current, GFP_NOIO, q->node);
1007 ioc_set_batching(q, current->io_context);
1008
1009 spin_lock_irq(q->queue_lock);
1010 finish_wait(&rl->wait[is_sync], &wait);
1011
1012 rq = get_request(q, rw_flags, bio, GFP_NOIO);
1013 };
1014
1015 return rq;
1016 }
1017
1018 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1019 {
1020 struct request *rq;
1021
1022 BUG_ON(rw != READ && rw != WRITE);
1023
1024 spin_lock_irq(q->queue_lock);
1025 if (gfp_mask & __GFP_WAIT)
1026 rq = get_request_wait(q, rw, NULL);
1027 else
1028 rq = get_request(q, rw, NULL, gfp_mask);
1029 if (!rq)
1030 spin_unlock_irq(q->queue_lock);
1031 /* q->queue_lock is unlocked at this point */
1032
1033 return rq;
1034 }
1035 EXPORT_SYMBOL(blk_get_request);
1036
1037 /**
1038 * blk_make_request - given a bio, allocate a corresponding struct request.
1039 * @q: target request queue
1040 * @bio: The bio describing the memory mappings that will be submitted for IO.
1041 * It may be a chained-bio properly constructed by block/bio layer.
1042 * @gfp_mask: gfp flags to be used for memory allocation
1043 *
1044 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
1045 * type commands. Where the struct request needs to be farther initialized by
1046 * the caller. It is passed a &struct bio, which describes the memory info of
1047 * the I/O transfer.
1048 *
1049 * The caller of blk_make_request must make sure that bi_io_vec
1050 * are set to describe the memory buffers. That bio_data_dir() will return
1051 * the needed direction of the request. (And all bio's in the passed bio-chain
1052 * are properly set accordingly)
1053 *
1054 * If called under none-sleepable conditions, mapped bio buffers must not
1055 * need bouncing, by calling the appropriate masked or flagged allocator,
1056 * suitable for the target device. Otherwise the call to blk_queue_bounce will
1057 * BUG.
1058 *
1059 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
1060 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
1061 * anything but the first bio in the chain. Otherwise you risk waiting for IO
1062 * completion of a bio that hasn't been submitted yet, thus resulting in a
1063 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
1064 * of bio_alloc(), as that avoids the mempool deadlock.
1065 * If possible a big IO should be split into smaller parts when allocation
1066 * fails. Partial allocation should not be an error, or you risk a live-lock.
1067 */
1068 struct request *blk_make_request(struct request_queue *q, struct bio *bio,
1069 gfp_t gfp_mask)
1070 {
1071 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
1072
1073 if (unlikely(!rq))
1074 return ERR_PTR(-ENOMEM);
1075
1076 for_each_bio(bio) {
1077 struct bio *bounce_bio = bio;
1078 int ret;
1079
1080 blk_queue_bounce(q, &bounce_bio);
1081 ret = blk_rq_append_bio(q, rq, bounce_bio);
1082 if (unlikely(ret)) {
1083 blk_put_request(rq);
1084 return ERR_PTR(ret);
1085 }
1086 }
1087
1088 return rq;
1089 }
1090 EXPORT_SYMBOL(blk_make_request);
1091
1092 /**
1093 * blk_requeue_request - put a request back on queue
1094 * @q: request queue where request should be inserted
1095 * @rq: request to be inserted
1096 *
1097 * Description:
1098 * Drivers often keep queueing requests until the hardware cannot accept
1099 * more, when that condition happens we need to put the request back
1100 * on the queue. Must be called with queue lock held.
1101 */
1102 void blk_requeue_request(struct request_queue *q, struct request *rq)
1103 {
1104 blk_delete_timer(rq);
1105 blk_clear_rq_complete(rq);
1106 trace_block_rq_requeue(q, rq);
1107
1108 if (blk_rq_tagged(rq))
1109 blk_queue_end_tag(q, rq);
1110
1111 BUG_ON(blk_queued_rq(rq));
1112
1113 elv_requeue_request(q, rq);
1114 }
1115 EXPORT_SYMBOL(blk_requeue_request);
1116
1117 static void add_acct_request(struct request_queue *q, struct request *rq,
1118 int where)
1119 {
1120 drive_stat_acct(rq, 1);
1121 __elv_add_request(q, rq, where);
1122 }
1123
1124 static void part_round_stats_single(int cpu, struct hd_struct *part,
1125 unsigned long now)
1126 {
1127 if (now == part->stamp)
1128 return;
1129
1130 if (part_in_flight(part)) {
1131 __part_stat_add(cpu, part, time_in_queue,
1132 part_in_flight(part) * (now - part->stamp));
1133 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1134 }
1135 part->stamp = now;
1136 }
1137
1138 /**
1139 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1140 * @cpu: cpu number for stats access
1141 * @part: target partition
1142 *
1143 * The average IO queue length and utilisation statistics are maintained
1144 * by observing the current state of the queue length and the amount of
1145 * time it has been in this state for.
1146 *
1147 * Normally, that accounting is done on IO completion, but that can result
1148 * in more than a second's worth of IO being accounted for within any one
1149 * second, leading to >100% utilisation. To deal with that, we call this
1150 * function to do a round-off before returning the results when reading
1151 * /proc/diskstats. This accounts immediately for all queue usage up to
1152 * the current jiffies and restarts the counters again.
1153 */
1154 void part_round_stats(int cpu, struct hd_struct *part)
1155 {
1156 unsigned long now = jiffies;
1157
1158 if (part->partno)
1159 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1160 part_round_stats_single(cpu, part, now);
1161 }
1162 EXPORT_SYMBOL_GPL(part_round_stats);
1163
1164 /*
1165 * queue lock must be held
1166 */
1167 void __blk_put_request(struct request_queue *q, struct request *req)
1168 {
1169 if (unlikely(!q))
1170 return;
1171 if (unlikely(--req->ref_count))
1172 return;
1173
1174 elv_completed_request(q, req);
1175
1176 /* this is a bio leak */
1177 WARN_ON(req->bio != NULL);
1178
1179 /*
1180 * Request may not have originated from ll_rw_blk. if not,
1181 * it didn't come out of our reserved rq pools
1182 */
1183 if (req->cmd_flags & REQ_ALLOCED) {
1184 unsigned int flags = req->cmd_flags;
1185
1186 BUG_ON(!list_empty(&req->queuelist));
1187 BUG_ON(!hlist_unhashed(&req->hash));
1188
1189 blk_free_request(q, req);
1190 freed_request(q, flags);
1191 }
1192 }
1193 EXPORT_SYMBOL_GPL(__blk_put_request);
1194
1195 void blk_put_request(struct request *req)
1196 {
1197 unsigned long flags;
1198 struct request_queue *q = req->q;
1199
1200 spin_lock_irqsave(q->queue_lock, flags);
1201 __blk_put_request(q, req);
1202 spin_unlock_irqrestore(q->queue_lock, flags);
1203 }
1204 EXPORT_SYMBOL(blk_put_request);
1205
1206 /**
1207 * blk_add_request_payload - add a payload to a request
1208 * @rq: request to update
1209 * @page: page backing the payload
1210 * @len: length of the payload.
1211 *
1212 * This allows to later add a payload to an already submitted request by
1213 * a block driver. The driver needs to take care of freeing the payload
1214 * itself.
1215 *
1216 * Note that this is a quite horrible hack and nothing but handling of
1217 * discard requests should ever use it.
1218 */
1219 void blk_add_request_payload(struct request *rq, struct page *page,
1220 unsigned int len)
1221 {
1222 struct bio *bio = rq->bio;
1223
1224 bio->bi_io_vec->bv_page = page;
1225 bio->bi_io_vec->bv_offset = 0;
1226 bio->bi_io_vec->bv_len = len;
1227
1228 bio->bi_size = len;
1229 bio->bi_vcnt = 1;
1230 bio->bi_phys_segments = 1;
1231
1232 rq->__data_len = rq->resid_len = len;
1233 rq->nr_phys_segments = 1;
1234 rq->buffer = bio_data(bio);
1235 }
1236 EXPORT_SYMBOL_GPL(blk_add_request_payload);
1237
1238 static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1239 struct bio *bio)
1240 {
1241 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1242
1243 if (!ll_back_merge_fn(q, req, bio))
1244 return false;
1245
1246 trace_block_bio_backmerge(q, bio);
1247
1248 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1249 blk_rq_set_mixed_merge(req);
1250
1251 req->biotail->bi_next = bio;
1252 req->biotail = bio;
1253 req->__data_len += bio->bi_size;
1254 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1255
1256 drive_stat_acct(req, 0);
1257 return true;
1258 }
1259
1260 static bool bio_attempt_front_merge(struct request_queue *q,
1261 struct request *req, struct bio *bio)
1262 {
1263 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1264
1265 if (!ll_front_merge_fn(q, req, bio))
1266 return false;
1267
1268 trace_block_bio_frontmerge(q, bio);
1269
1270 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1271 blk_rq_set_mixed_merge(req);
1272
1273 bio->bi_next = req->bio;
1274 req->bio = bio;
1275
1276 /*
1277 * may not be valid. if the low level driver said
1278 * it didn't need a bounce buffer then it better
1279 * not touch req->buffer either...
1280 */
1281 req->buffer = bio_data(bio);
1282 req->__sector = bio->bi_sector;
1283 req->__data_len += bio->bi_size;
1284 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1285
1286 drive_stat_acct(req, 0);
1287 return true;
1288 }
1289
1290 /**
1291 * attempt_plug_merge - try to merge with %current's plugged list
1292 * @q: request_queue new bio is being queued at
1293 * @bio: new bio being queued
1294 * @request_count: out parameter for number of traversed plugged requests
1295 *
1296 * Determine whether @bio being queued on @q can be merged with a request
1297 * on %current's plugged list. Returns %true if merge was successful,
1298 * otherwise %false.
1299 *
1300 * Plugging coalesces IOs from the same issuer for the same purpose without
1301 * going through @q->queue_lock. As such it's more of an issuing mechanism
1302 * than scheduling, and the request, while may have elvpriv data, is not
1303 * added on the elevator at this point. In addition, we don't have
1304 * reliable access to the elevator outside queue lock. Only check basic
1305 * merging parameters without querying the elevator.
1306 */
1307 static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1308 unsigned int *request_count)
1309 {
1310 struct blk_plug *plug;
1311 struct request *rq;
1312 bool ret = false;
1313
1314 plug = current->plug;
1315 if (!plug)
1316 goto out;
1317 *request_count = 0;
1318
1319 list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1320 int el_ret;
1321
1322 (*request_count)++;
1323
1324 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1325 continue;
1326
1327 el_ret = blk_try_merge(rq, bio);
1328 if (el_ret == ELEVATOR_BACK_MERGE) {
1329 ret = bio_attempt_back_merge(q, rq, bio);
1330 if (ret)
1331 break;
1332 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1333 ret = bio_attempt_front_merge(q, rq, bio);
1334 if (ret)
1335 break;
1336 }
1337 }
1338 out:
1339 return ret;
1340 }
1341
1342 void init_request_from_bio(struct request *req, struct bio *bio)
1343 {
1344 req->cmd_type = REQ_TYPE_FS;
1345
1346 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1347 if (bio->bi_rw & REQ_RAHEAD)
1348 req->cmd_flags |= REQ_FAILFAST_MASK;
1349
1350 req->errors = 0;
1351 req->__sector = bio->bi_sector;
1352 req->ioprio = bio_prio(bio);
1353 blk_rq_bio_prep(req->q, req, bio);
1354 }
1355
1356 void blk_queue_bio(struct request_queue *q, struct bio *bio)
1357 {
1358 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1359 struct blk_plug *plug;
1360 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1361 struct request *req;
1362 unsigned int request_count = 0;
1363
1364 /*
1365 * low level driver can indicate that it wants pages above a
1366 * certain limit bounced to low memory (ie for highmem, or even
1367 * ISA dma in theory)
1368 */
1369 blk_queue_bounce(q, &bio);
1370
1371 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1372 spin_lock_irq(q->queue_lock);
1373 where = ELEVATOR_INSERT_FLUSH;
1374 goto get_rq;
1375 }
1376
1377 /*
1378 * Check if we can merge with the plugged list before grabbing
1379 * any locks.
1380 */
1381 if (attempt_plug_merge(q, bio, &request_count))
1382 return;
1383
1384 spin_lock_irq(q->queue_lock);
1385
1386 el_ret = elv_merge(q, &req, bio);
1387 if (el_ret == ELEVATOR_BACK_MERGE) {
1388 if (bio_attempt_back_merge(q, req, bio)) {
1389 elv_bio_merged(q, req, bio);
1390 if (!attempt_back_merge(q, req))
1391 elv_merged_request(q, req, el_ret);
1392 goto out_unlock;
1393 }
1394 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1395 if (bio_attempt_front_merge(q, req, bio)) {
1396 elv_bio_merged(q, req, bio);
1397 if (!attempt_front_merge(q, req))
1398 elv_merged_request(q, req, el_ret);
1399 goto out_unlock;
1400 }
1401 }
1402
1403 get_rq:
1404 /*
1405 * This sync check and mask will be re-done in init_request_from_bio(),
1406 * but we need to set it earlier to expose the sync flag to the
1407 * rq allocator and io schedulers.
1408 */
1409 rw_flags = bio_data_dir(bio);
1410 if (sync)
1411 rw_flags |= REQ_SYNC;
1412
1413 /*
1414 * Grab a free request. This is might sleep but can not fail.
1415 * Returns with the queue unlocked.
1416 */
1417 req = get_request_wait(q, rw_flags, bio);
1418 if (unlikely(!req)) {
1419 bio_endio(bio, -ENODEV); /* @q is dead */
1420 goto out_unlock;
1421 }
1422
1423 /*
1424 * After dropping the lock and possibly sleeping here, our request
1425 * may now be mergeable after it had proven unmergeable (above).
1426 * We don't worry about that case for efficiency. It won't happen
1427 * often, and the elevators are able to handle it.
1428 */
1429 init_request_from_bio(req, bio);
1430
1431 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1432 req->cpu = raw_smp_processor_id();
1433
1434 plug = current->plug;
1435 if (plug) {
1436 /*
1437 * If this is the first request added after a plug, fire
1438 * of a plug trace. If others have been added before, check
1439 * if we have multiple devices in this plug. If so, make a
1440 * note to sort the list before dispatch.
1441 */
1442 if (list_empty(&plug->list))
1443 trace_block_plug(q);
1444 else {
1445 if (!plug->should_sort) {
1446 struct request *__rq;
1447
1448 __rq = list_entry_rq(plug->list.prev);
1449 if (__rq->q != q)
1450 plug->should_sort = 1;
1451 }
1452 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1453 blk_flush_plug_list(plug, false);
1454 trace_block_plug(q);
1455 }
1456 }
1457 list_add_tail(&req->queuelist, &plug->list);
1458 drive_stat_acct(req, 1);
1459 } else {
1460 spin_lock_irq(q->queue_lock);
1461 add_acct_request(q, req, where);
1462 __blk_run_queue(q);
1463 out_unlock:
1464 spin_unlock_irq(q->queue_lock);
1465 }
1466 }
1467 EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
1468
1469 /*
1470 * If bio->bi_dev is a partition, remap the location
1471 */
1472 static inline void blk_partition_remap(struct bio *bio)
1473 {
1474 struct block_device *bdev = bio->bi_bdev;
1475
1476 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1477 struct hd_struct *p = bdev->bd_part;
1478
1479 bio->bi_sector += p->start_sect;
1480 bio->bi_bdev = bdev->bd_contains;
1481
1482 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1483 bdev->bd_dev,
1484 bio->bi_sector - p->start_sect);
1485 }
1486 }
1487
1488 static void handle_bad_sector(struct bio *bio)
1489 {
1490 char b[BDEVNAME_SIZE];
1491
1492 printk(KERN_INFO "attempt to access beyond end of device\n");
1493 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1494 bdevname(bio->bi_bdev, b),
1495 bio->bi_rw,
1496 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1497 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1498
1499 set_bit(BIO_EOF, &bio->bi_flags);
1500 }
1501
1502 #ifdef CONFIG_FAIL_MAKE_REQUEST
1503
1504 static DECLARE_FAULT_ATTR(fail_make_request);
1505
1506 static int __init setup_fail_make_request(char *str)
1507 {
1508 return setup_fault_attr(&fail_make_request, str);
1509 }
1510 __setup("fail_make_request=", setup_fail_make_request);
1511
1512 static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1513 {
1514 return part->make_it_fail && should_fail(&fail_make_request, bytes);
1515 }
1516
1517 static int __init fail_make_request_debugfs(void)
1518 {
1519 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1520 NULL, &fail_make_request);
1521
1522 return IS_ERR(dir) ? PTR_ERR(dir) : 0;
1523 }
1524
1525 late_initcall(fail_make_request_debugfs);
1526
1527 #else /* CONFIG_FAIL_MAKE_REQUEST */
1528
1529 static inline bool should_fail_request(struct hd_struct *part,
1530 unsigned int bytes)
1531 {
1532 return false;
1533 }
1534
1535 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1536
1537 /*
1538 * Check whether this bio extends beyond the end of the device.
1539 */
1540 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1541 {
1542 sector_t maxsector;
1543
1544 if (!nr_sectors)
1545 return 0;
1546
1547 /* Test device or partition size, when known. */
1548 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1549 if (maxsector) {
1550 sector_t sector = bio->bi_sector;
1551
1552 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1553 /*
1554 * This may well happen - the kernel calls bread()
1555 * without checking the size of the device, e.g., when
1556 * mounting a device.
1557 */
1558 handle_bad_sector(bio);
1559 return 1;
1560 }
1561 }
1562
1563 return 0;
1564 }
1565
1566 static noinline_for_stack bool
1567 generic_make_request_checks(struct bio *bio)
1568 {
1569 struct request_queue *q;
1570 int nr_sectors = bio_sectors(bio);
1571 int err = -EIO;
1572 char b[BDEVNAME_SIZE];
1573 struct hd_struct *part;
1574
1575 might_sleep();
1576
1577 if (bio_check_eod(bio, nr_sectors))
1578 goto end_io;
1579
1580 q = bdev_get_queue(bio->bi_bdev);
1581 if (unlikely(!q)) {
1582 printk(KERN_ERR
1583 "generic_make_request: Trying to access "
1584 "nonexistent block-device %s (%Lu)\n",
1585 bdevname(bio->bi_bdev, b),
1586 (long long) bio->bi_sector);
1587 goto end_io;
1588 }
1589
1590 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1591 nr_sectors > queue_max_hw_sectors(q))) {
1592 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1593 bdevname(bio->bi_bdev, b),
1594 bio_sectors(bio),
1595 queue_max_hw_sectors(q));
1596 goto end_io;
1597 }
1598
1599 part = bio->bi_bdev->bd_part;
1600 if (should_fail_request(part, bio->bi_size) ||
1601 should_fail_request(&part_to_disk(part)->part0,
1602 bio->bi_size))
1603 goto end_io;
1604
1605 /*
1606 * If this device has partitions, remap block n
1607 * of partition p to block n+start(p) of the disk.
1608 */
1609 blk_partition_remap(bio);
1610
1611 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1612 goto end_io;
1613
1614 if (bio_check_eod(bio, nr_sectors))
1615 goto end_io;
1616
1617 /*
1618 * Filter flush bio's early so that make_request based
1619 * drivers without flush support don't have to worry
1620 * about them.
1621 */
1622 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1623 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1624 if (!nr_sectors) {
1625 err = 0;
1626 goto end_io;
1627 }
1628 }
1629
1630 if ((bio->bi_rw & REQ_DISCARD) &&
1631 (!blk_queue_discard(q) ||
1632 ((bio->bi_rw & REQ_SECURE) &&
1633 !blk_queue_secdiscard(q)))) {
1634 err = -EOPNOTSUPP;
1635 goto end_io;
1636 }
1637
1638 if (blk_throtl_bio(q, bio))
1639 return false; /* throttled, will be resubmitted later */
1640
1641 trace_block_bio_queue(q, bio);
1642 return true;
1643
1644 end_io:
1645 bio_endio(bio, err);
1646 return false;
1647 }
1648
1649 /**
1650 * generic_make_request - hand a buffer to its device driver for I/O
1651 * @bio: The bio describing the location in memory and on the device.
1652 *
1653 * generic_make_request() is used to make I/O requests of block
1654 * devices. It is passed a &struct bio, which describes the I/O that needs
1655 * to be done.
1656 *
1657 * generic_make_request() does not return any status. The
1658 * success/failure status of the request, along with notification of
1659 * completion, is delivered asynchronously through the bio->bi_end_io
1660 * function described (one day) else where.
1661 *
1662 * The caller of generic_make_request must make sure that bi_io_vec
1663 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1664 * set to describe the device address, and the
1665 * bi_end_io and optionally bi_private are set to describe how
1666 * completion notification should be signaled.
1667 *
1668 * generic_make_request and the drivers it calls may use bi_next if this
1669 * bio happens to be merged with someone else, and may resubmit the bio to
1670 * a lower device by calling into generic_make_request recursively, which
1671 * means the bio should NOT be touched after the call to ->make_request_fn.
1672 */
1673 void generic_make_request(struct bio *bio)
1674 {
1675 struct bio_list bio_list_on_stack;
1676
1677 if (!generic_make_request_checks(bio))
1678 return;
1679
1680 /*
1681 * We only want one ->make_request_fn to be active at a time, else
1682 * stack usage with stacked devices could be a problem. So use
1683 * current->bio_list to keep a list of requests submited by a
1684 * make_request_fn function. current->bio_list is also used as a
1685 * flag to say if generic_make_request is currently active in this
1686 * task or not. If it is NULL, then no make_request is active. If
1687 * it is non-NULL, then a make_request is active, and new requests
1688 * should be added at the tail
1689 */
1690 if (current->bio_list) {
1691 bio_list_add(current->bio_list, bio);
1692 return;
1693 }
1694
1695 /* following loop may be a bit non-obvious, and so deserves some
1696 * explanation.
1697 * Before entering the loop, bio->bi_next is NULL (as all callers
1698 * ensure that) so we have a list with a single bio.
1699 * We pretend that we have just taken it off a longer list, so
1700 * we assign bio_list to a pointer to the bio_list_on_stack,
1701 * thus initialising the bio_list of new bios to be
1702 * added. ->make_request() may indeed add some more bios
1703 * through a recursive call to generic_make_request. If it
1704 * did, we find a non-NULL value in bio_list and re-enter the loop
1705 * from the top. In this case we really did just take the bio
1706 * of the top of the list (no pretending) and so remove it from
1707 * bio_list, and call into ->make_request() again.
1708 */
1709 BUG_ON(bio->bi_next);
1710 bio_list_init(&bio_list_on_stack);
1711 current->bio_list = &bio_list_on_stack;
1712 do {
1713 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1714
1715 q->make_request_fn(q, bio);
1716
1717 bio = bio_list_pop(current->bio_list);
1718 } while (bio);
1719 current->bio_list = NULL; /* deactivate */
1720 }
1721 EXPORT_SYMBOL(generic_make_request);
1722
1723 /**
1724 * submit_bio - submit a bio to the block device layer for I/O
1725 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1726 * @bio: The &struct bio which describes the I/O
1727 *
1728 * submit_bio() is very similar in purpose to generic_make_request(), and
1729 * uses that function to do most of the work. Both are fairly rough
1730 * interfaces; @bio must be presetup and ready for I/O.
1731 *
1732 */
1733 void submit_bio(int rw, struct bio *bio)
1734 {
1735 int count = bio_sectors(bio);
1736
1737 bio->bi_rw |= rw;
1738
1739 /*
1740 * If it's a regular read/write or a barrier with data attached,
1741 * go through the normal accounting stuff before submission.
1742 */
1743 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
1744 if (rw & WRITE) {
1745 count_vm_events(PGPGOUT, count);
1746 } else {
1747 task_io_account_read(bio->bi_size);
1748 count_vm_events(PGPGIN, count);
1749 }
1750
1751 if (unlikely(block_dump)) {
1752 char b[BDEVNAME_SIZE];
1753 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1754 current->comm, task_pid_nr(current),
1755 (rw & WRITE) ? "WRITE" : "READ",
1756 (unsigned long long)bio->bi_sector,
1757 bdevname(bio->bi_bdev, b),
1758 count);
1759 }
1760 }
1761
1762 generic_make_request(bio);
1763 }
1764 EXPORT_SYMBOL(submit_bio);
1765
1766 /**
1767 * blk_rq_check_limits - Helper function to check a request for the queue limit
1768 * @q: the queue
1769 * @rq: the request being checked
1770 *
1771 * Description:
1772 * @rq may have been made based on weaker limitations of upper-level queues
1773 * in request stacking drivers, and it may violate the limitation of @q.
1774 * Since the block layer and the underlying device driver trust @rq
1775 * after it is inserted to @q, it should be checked against @q before
1776 * the insertion using this generic function.
1777 *
1778 * This function should also be useful for request stacking drivers
1779 * in some cases below, so export this function.
1780 * Request stacking drivers like request-based dm may change the queue
1781 * limits while requests are in the queue (e.g. dm's table swapping).
1782 * Such request stacking drivers should check those requests agaist
1783 * the new queue limits again when they dispatch those requests,
1784 * although such checkings are also done against the old queue limits
1785 * when submitting requests.
1786 */
1787 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1788 {
1789 if (rq->cmd_flags & REQ_DISCARD)
1790 return 0;
1791
1792 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1793 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1794 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1795 return -EIO;
1796 }
1797
1798 /*
1799 * queue's settings related to segment counting like q->bounce_pfn
1800 * may differ from that of other stacking queues.
1801 * Recalculate it to check the request correctly on this queue's
1802 * limitation.
1803 */
1804 blk_recalc_rq_segments(rq);
1805 if (rq->nr_phys_segments > queue_max_segments(q)) {
1806 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1807 return -EIO;
1808 }
1809
1810 return 0;
1811 }
1812 EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1813
1814 /**
1815 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1816 * @q: the queue to submit the request
1817 * @rq: the request being queued
1818 */
1819 int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1820 {
1821 unsigned long flags;
1822 int where = ELEVATOR_INSERT_BACK;
1823
1824 if (blk_rq_check_limits(q, rq))
1825 return -EIO;
1826
1827 if (rq->rq_disk &&
1828 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1829 return -EIO;
1830
1831 spin_lock_irqsave(q->queue_lock, flags);
1832 if (unlikely(blk_queue_dead(q))) {
1833 spin_unlock_irqrestore(q->queue_lock, flags);
1834 return -ENODEV;
1835 }
1836
1837 /*
1838 * Submitting request must be dequeued before calling this function
1839 * because it will be linked to another request_queue
1840 */
1841 BUG_ON(blk_queued_rq(rq));
1842
1843 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
1844 where = ELEVATOR_INSERT_FLUSH;
1845
1846 add_acct_request(q, rq, where);
1847 if (where == ELEVATOR_INSERT_FLUSH)
1848 __blk_run_queue(q);
1849 spin_unlock_irqrestore(q->queue_lock, flags);
1850
1851 return 0;
1852 }
1853 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1854
1855 /**
1856 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1857 * @rq: request to examine
1858 *
1859 * Description:
1860 * A request could be merge of IOs which require different failure
1861 * handling. This function determines the number of bytes which
1862 * can be failed from the beginning of the request without
1863 * crossing into area which need to be retried further.
1864 *
1865 * Return:
1866 * The number of bytes to fail.
1867 *
1868 * Context:
1869 * queue_lock must be held.
1870 */
1871 unsigned int blk_rq_err_bytes(const struct request *rq)
1872 {
1873 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1874 unsigned int bytes = 0;
1875 struct bio *bio;
1876
1877 if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1878 return blk_rq_bytes(rq);
1879
1880 /*
1881 * Currently the only 'mixing' which can happen is between
1882 * different fastfail types. We can safely fail portions
1883 * which have all the failfast bits that the first one has -
1884 * the ones which are at least as eager to fail as the first
1885 * one.
1886 */
1887 for (bio = rq->bio; bio; bio = bio->bi_next) {
1888 if ((bio->bi_rw & ff) != ff)
1889 break;
1890 bytes += bio->bi_size;
1891 }
1892
1893 /* this could lead to infinite loop */
1894 BUG_ON(blk_rq_bytes(rq) && !bytes);
1895 return bytes;
1896 }
1897 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1898
1899 static void blk_account_io_completion(struct request *req, unsigned int bytes)
1900 {
1901 if (blk_do_io_stat(req)) {
1902 const int rw = rq_data_dir(req);
1903 struct hd_struct *part;
1904 int cpu;
1905
1906 cpu = part_stat_lock();
1907 part = req->part;
1908 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1909 part_stat_unlock();
1910 }
1911 }
1912
1913 static void blk_account_io_done(struct request *req)
1914 {
1915 /*
1916 * Account IO completion. flush_rq isn't accounted as a
1917 * normal IO on queueing nor completion. Accounting the
1918 * containing request is enough.
1919 */
1920 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
1921 unsigned long duration = jiffies - req->start_time;
1922 const int rw = rq_data_dir(req);
1923 struct hd_struct *part;
1924 int cpu;
1925
1926 cpu = part_stat_lock();
1927 part = req->part;
1928
1929 part_stat_inc(cpu, part, ios[rw]);
1930 part_stat_add(cpu, part, ticks[rw], duration);
1931 part_round_stats(cpu, part);
1932 part_dec_in_flight(part, rw);
1933
1934 hd_struct_put(part);
1935 part_stat_unlock();
1936 }
1937 }
1938
1939 /**
1940 * blk_peek_request - peek at the top of a request queue
1941 * @q: request queue to peek at
1942 *
1943 * Description:
1944 * Return the request at the top of @q. The returned request
1945 * should be started using blk_start_request() before LLD starts
1946 * processing it.
1947 *
1948 * Return:
1949 * Pointer to the request at the top of @q if available. Null
1950 * otherwise.
1951 *
1952 * Context:
1953 * queue_lock must be held.
1954 */
1955 struct request *blk_peek_request(struct request_queue *q)
1956 {
1957 struct request *rq;
1958 int ret;
1959
1960 while ((rq = __elv_next_request(q)) != NULL) {
1961 if (!(rq->cmd_flags & REQ_STARTED)) {
1962 /*
1963 * This is the first time the device driver
1964 * sees this request (possibly after
1965 * requeueing). Notify IO scheduler.
1966 */
1967 if (rq->cmd_flags & REQ_SORTED)
1968 elv_activate_rq(q, rq);
1969
1970 /*
1971 * just mark as started even if we don't start
1972 * it, a request that has been delayed should
1973 * not be passed by new incoming requests
1974 */
1975 rq->cmd_flags |= REQ_STARTED;
1976 trace_block_rq_issue(q, rq);
1977 }
1978
1979 if (!q->boundary_rq || q->boundary_rq == rq) {
1980 q->end_sector = rq_end_sector(rq);
1981 q->boundary_rq = NULL;
1982 }
1983
1984 if (rq->cmd_flags & REQ_DONTPREP)
1985 break;
1986
1987 if (q->dma_drain_size && blk_rq_bytes(rq)) {
1988 /*
1989 * make sure space for the drain appears we
1990 * know we can do this because max_hw_segments
1991 * has been adjusted to be one fewer than the
1992 * device can handle
1993 */
1994 rq->nr_phys_segments++;
1995 }
1996
1997 if (!q->prep_rq_fn)
1998 break;
1999
2000 ret = q->prep_rq_fn(q, rq);
2001 if (ret == BLKPREP_OK) {
2002 break;
2003 } else if (ret == BLKPREP_DEFER) {
2004 /*
2005 * the request may have been (partially) prepped.
2006 * we need to keep this request in the front to
2007 * avoid resource deadlock. REQ_STARTED will
2008 * prevent other fs requests from passing this one.
2009 */
2010 if (q->dma_drain_size && blk_rq_bytes(rq) &&
2011 !(rq->cmd_flags & REQ_DONTPREP)) {
2012 /*
2013 * remove the space for the drain we added
2014 * so that we don't add it again
2015 */
2016 --rq->nr_phys_segments;
2017 }
2018
2019 rq = NULL;
2020 break;
2021 } else if (ret == BLKPREP_KILL) {
2022 rq->cmd_flags |= REQ_QUIET;
2023 /*
2024 * Mark this request as started so we don't trigger
2025 * any debug logic in the end I/O path.
2026 */
2027 blk_start_request(rq);
2028 __blk_end_request_all(rq, -EIO);
2029 } else {
2030 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2031 break;
2032 }
2033 }
2034
2035 return rq;
2036 }
2037 EXPORT_SYMBOL(blk_peek_request);
2038
2039 void blk_dequeue_request(struct request *rq)
2040 {
2041 struct request_queue *q = rq->q;
2042
2043 BUG_ON(list_empty(&rq->queuelist));
2044 BUG_ON(ELV_ON_HASH(rq));
2045
2046 list_del_init(&rq->queuelist);
2047
2048 /*
2049 * the time frame between a request being removed from the lists
2050 * and to it is freed is accounted as io that is in progress at
2051 * the driver side.
2052 */
2053 if (blk_account_rq(rq)) {
2054 q->in_flight[rq_is_sync(rq)]++;
2055 set_io_start_time_ns(rq);
2056 }
2057 }
2058
2059 /**
2060 * blk_start_request - start request processing on the driver
2061 * @req: request to dequeue
2062 *
2063 * Description:
2064 * Dequeue @req and start timeout timer on it. This hands off the
2065 * request to the driver.
2066 *
2067 * Block internal functions which don't want to start timer should
2068 * call blk_dequeue_request().
2069 *
2070 * Context:
2071 * queue_lock must be held.
2072 */
2073 void blk_start_request(struct request *req)
2074 {
2075 blk_dequeue_request(req);
2076
2077 /*
2078 * We are now handing the request to the hardware, initialize
2079 * resid_len to full count and add the timeout handler.
2080 */
2081 req->resid_len = blk_rq_bytes(req);
2082 if (unlikely(blk_bidi_rq(req)))
2083 req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
2084
2085 blk_add_timer(req);
2086 }
2087 EXPORT_SYMBOL(blk_start_request);
2088
2089 /**
2090 * blk_fetch_request - fetch a request from a request queue
2091 * @q: request queue to fetch a request from
2092 *
2093 * Description:
2094 * Return the request at the top of @q. The request is started on
2095 * return and LLD can start processing it immediately.
2096 *
2097 * Return:
2098 * Pointer to the request at the top of @q if available. Null
2099 * otherwise.
2100 *
2101 * Context:
2102 * queue_lock must be held.
2103 */
2104 struct request *blk_fetch_request(struct request_queue *q)
2105 {
2106 struct request *rq;
2107
2108 rq = blk_peek_request(q);
2109 if (rq)
2110 blk_start_request(rq);
2111 return rq;
2112 }
2113 EXPORT_SYMBOL(blk_fetch_request);
2114
2115 /**
2116 * blk_update_request - Special helper function for request stacking drivers
2117 * @req: the request being processed
2118 * @error: %0 for success, < %0 for error
2119 * @nr_bytes: number of bytes to complete @req
2120 *
2121 * Description:
2122 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2123 * the request structure even if @req doesn't have leftover.
2124 * If @req has leftover, sets it up for the next range of segments.
2125 *
2126 * This special helper function is only for request stacking drivers
2127 * (e.g. request-based dm) so that they can handle partial completion.
2128 * Actual device drivers should use blk_end_request instead.
2129 *
2130 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2131 * %false return from this function.
2132 *
2133 * Return:
2134 * %false - this request doesn't have any more data
2135 * %true - this request has more data
2136 **/
2137 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2138 {
2139 int total_bytes, bio_nbytes, next_idx = 0;
2140 struct bio *bio;
2141
2142 if (!req->bio)
2143 return false;
2144
2145 trace_block_rq_complete(req->q, req);
2146
2147 /*
2148 * For fs requests, rq is just carrier of independent bio's
2149 * and each partial completion should be handled separately.
2150 * Reset per-request error on each partial completion.
2151 *
2152 * TODO: tj: This is too subtle. It would be better to let
2153 * low level drivers do what they see fit.
2154 */
2155 if (req->cmd_type == REQ_TYPE_FS)
2156 req->errors = 0;
2157
2158 if (error && req->cmd_type == REQ_TYPE_FS &&
2159 !(req->cmd_flags & REQ_QUIET)) {
2160 char *error_type;
2161
2162 switch (error) {
2163 case -ENOLINK:
2164 error_type = "recoverable transport";
2165 break;
2166 case -EREMOTEIO:
2167 error_type = "critical target";
2168 break;
2169 case -EBADE:
2170 error_type = "critical nexus";
2171 break;
2172 case -EIO:
2173 default:
2174 error_type = "I/O";
2175 break;
2176 }
2177 printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
2178 error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
2179 (unsigned long long)blk_rq_pos(req));
2180 }
2181
2182 blk_account_io_completion(req, nr_bytes);
2183
2184 total_bytes = bio_nbytes = 0;
2185 while ((bio = req->bio) != NULL) {
2186 int nbytes;
2187
2188 if (nr_bytes >= bio->bi_size) {
2189 req->bio = bio->bi_next;
2190 nbytes = bio->bi_size;
2191 req_bio_endio(req, bio, nbytes, error);
2192 next_idx = 0;
2193 bio_nbytes = 0;
2194 } else {
2195 int idx = bio->bi_idx + next_idx;
2196
2197 if (unlikely(idx >= bio->bi_vcnt)) {
2198 blk_dump_rq_flags(req, "__end_that");
2199 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
2200 __func__, idx, bio->bi_vcnt);
2201 break;
2202 }
2203
2204 nbytes = bio_iovec_idx(bio, idx)->bv_len;
2205 BIO_BUG_ON(nbytes > bio->bi_size);
2206
2207 /*
2208 * not a complete bvec done
2209 */
2210 if (unlikely(nbytes > nr_bytes)) {
2211 bio_nbytes += nr_bytes;
2212 total_bytes += nr_bytes;
2213 break;
2214 }
2215
2216 /*
2217 * advance to the next vector
2218 */
2219 next_idx++;
2220 bio_nbytes += nbytes;
2221 }
2222
2223 total_bytes += nbytes;
2224 nr_bytes -= nbytes;
2225
2226 bio = req->bio;
2227 if (bio) {
2228 /*
2229 * end more in this run, or just return 'not-done'
2230 */
2231 if (unlikely(nr_bytes <= 0))
2232 break;
2233 }
2234 }
2235
2236 /*
2237 * completely done
2238 */
2239 if (!req->bio) {
2240 /*
2241 * Reset counters so that the request stacking driver
2242 * can find how many bytes remain in the request
2243 * later.
2244 */
2245 req->__data_len = 0;
2246 return false;
2247 }
2248
2249 /*
2250 * if the request wasn't completed, update state
2251 */
2252 if (bio_nbytes) {
2253 req_bio_endio(req, bio, bio_nbytes, error);
2254 bio->bi_idx += next_idx;
2255 bio_iovec(bio)->bv_offset += nr_bytes;
2256 bio_iovec(bio)->bv_len -= nr_bytes;
2257 }
2258
2259 req->__data_len -= total_bytes;
2260 req->buffer = bio_data(req->bio);
2261
2262 /* update sector only for requests with clear definition of sector */
2263 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
2264 req->__sector += total_bytes >> 9;
2265
2266 /* mixed attributes always follow the first bio */
2267 if (req->cmd_flags & REQ_MIXED_MERGE) {
2268 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2269 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2270 }
2271
2272 /*
2273 * If total number of sectors is less than the first segment
2274 * size, something has gone terribly wrong.
2275 */
2276 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2277 blk_dump_rq_flags(req, "request botched");
2278 req->__data_len = blk_rq_cur_bytes(req);
2279 }
2280
2281 /* recalculate the number of segments */
2282 blk_recalc_rq_segments(req);
2283
2284 return true;
2285 }
2286 EXPORT_SYMBOL_GPL(blk_update_request);
2287
2288 static bool blk_update_bidi_request(struct request *rq, int error,
2289 unsigned int nr_bytes,
2290 unsigned int bidi_bytes)
2291 {
2292 if (blk_update_request(rq, error, nr_bytes))
2293 return true;
2294
2295 /* Bidi request must be completed as a whole */
2296 if (unlikely(blk_bidi_rq(rq)) &&
2297 blk_update_request(rq->next_rq, error, bidi_bytes))
2298 return true;
2299
2300 if (blk_queue_add_random(rq->q))
2301 add_disk_randomness(rq->rq_disk);
2302
2303 return false;
2304 }
2305
2306 /**
2307 * blk_unprep_request - unprepare a request
2308 * @req: the request
2309 *
2310 * This function makes a request ready for complete resubmission (or
2311 * completion). It happens only after all error handling is complete,
2312 * so represents the appropriate moment to deallocate any resources
2313 * that were allocated to the request in the prep_rq_fn. The queue
2314 * lock is held when calling this.
2315 */
2316 void blk_unprep_request(struct request *req)
2317 {
2318 struct request_queue *q = req->q;
2319
2320 req->cmd_flags &= ~REQ_DONTPREP;
2321 if (q->unprep_rq_fn)
2322 q->unprep_rq_fn(q, req);
2323 }
2324 EXPORT_SYMBOL_GPL(blk_unprep_request);
2325
2326 /*
2327 * queue lock must be held
2328 */
2329 static void blk_finish_request(struct request *req, int error)
2330 {
2331 if (blk_rq_tagged(req))
2332 blk_queue_end_tag(req->q, req);
2333
2334 BUG_ON(blk_queued_rq(req));
2335
2336 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2337 laptop_io_completion(&req->q->backing_dev_info);
2338
2339 blk_delete_timer(req);
2340
2341 if (req->cmd_flags & REQ_DONTPREP)
2342 blk_unprep_request(req);
2343
2344
2345 blk_account_io_done(req);
2346
2347 if (req->end_io)
2348 req->end_io(req, error);
2349 else {
2350 if (blk_bidi_rq(req))
2351 __blk_put_request(req->next_rq->q, req->next_rq);
2352
2353 __blk_put_request(req->q, req);
2354 }
2355 }
2356
2357 /**
2358 * blk_end_bidi_request - Complete a bidi request
2359 * @rq: the request to complete
2360 * @error: %0 for success, < %0 for error
2361 * @nr_bytes: number of bytes to complete @rq
2362 * @bidi_bytes: number of bytes to complete @rq->next_rq
2363 *
2364 * Description:
2365 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2366 * Drivers that supports bidi can safely call this member for any
2367 * type of request, bidi or uni. In the later case @bidi_bytes is
2368 * just ignored.
2369 *
2370 * Return:
2371 * %false - we are done with this request
2372 * %true - still buffers pending for this request
2373 **/
2374 static bool blk_end_bidi_request(struct request *rq, int error,
2375 unsigned int nr_bytes, unsigned int bidi_bytes)
2376 {
2377 struct request_queue *q = rq->q;
2378 unsigned long flags;
2379
2380 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2381 return true;
2382
2383 spin_lock_irqsave(q->queue_lock, flags);
2384 blk_finish_request(rq, error);
2385 spin_unlock_irqrestore(q->queue_lock, flags);
2386
2387 return false;
2388 }
2389
2390 /**
2391 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2392 * @rq: the request to complete
2393 * @error: %0 for success, < %0 for error
2394 * @nr_bytes: number of bytes to complete @rq
2395 * @bidi_bytes: number of bytes to complete @rq->next_rq
2396 *
2397 * Description:
2398 * Identical to blk_end_bidi_request() except that queue lock is
2399 * assumed to be locked on entry and remains so on return.
2400 *
2401 * Return:
2402 * %false - we are done with this request
2403 * %true - still buffers pending for this request
2404 **/
2405 bool __blk_end_bidi_request(struct request *rq, int error,
2406 unsigned int nr_bytes, unsigned int bidi_bytes)
2407 {
2408 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2409 return true;
2410
2411 blk_finish_request(rq, error);
2412
2413 return false;
2414 }
2415
2416 /**
2417 * blk_end_request - Helper function for drivers to complete the request.
2418 * @rq: the request being processed
2419 * @error: %0 for success, < %0 for error
2420 * @nr_bytes: number of bytes to complete
2421 *
2422 * Description:
2423 * Ends I/O on a number of bytes attached to @rq.
2424 * If @rq has leftover, sets it up for the next range of segments.
2425 *
2426 * Return:
2427 * %false - we are done with this request
2428 * %true - still buffers pending for this request
2429 **/
2430 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2431 {
2432 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2433 }
2434 EXPORT_SYMBOL(blk_end_request);
2435
2436 /**
2437 * blk_end_request_all - Helper function for drives to finish the request.
2438 * @rq: the request to finish
2439 * @error: %0 for success, < %0 for error
2440 *
2441 * Description:
2442 * Completely finish @rq.
2443 */
2444 void blk_end_request_all(struct request *rq, int error)
2445 {
2446 bool pending;
2447 unsigned int bidi_bytes = 0;
2448
2449 if (unlikely(blk_bidi_rq(rq)))
2450 bidi_bytes = blk_rq_bytes(rq->next_rq);
2451
2452 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2453 BUG_ON(pending);
2454 }
2455 EXPORT_SYMBOL(blk_end_request_all);
2456
2457 /**
2458 * blk_end_request_cur - Helper function to finish the current request chunk.
2459 * @rq: the request to finish the current chunk for
2460 * @error: %0 for success, < %0 for error
2461 *
2462 * Description:
2463 * Complete the current consecutively mapped chunk from @rq.
2464 *
2465 * Return:
2466 * %false - we are done with this request
2467 * %true - still buffers pending for this request
2468 */
2469 bool blk_end_request_cur(struct request *rq, int error)
2470 {
2471 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2472 }
2473 EXPORT_SYMBOL(blk_end_request_cur);
2474
2475 /**
2476 * blk_end_request_err - Finish a request till the next failure boundary.
2477 * @rq: the request to finish till the next failure boundary for
2478 * @error: must be negative errno
2479 *
2480 * Description:
2481 * Complete @rq till the next failure boundary.
2482 *
2483 * Return:
2484 * %false - we are done with this request
2485 * %true - still buffers pending for this request
2486 */
2487 bool blk_end_request_err(struct request *rq, int error)
2488 {
2489 WARN_ON(error >= 0);
2490 return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2491 }
2492 EXPORT_SYMBOL_GPL(blk_end_request_err);
2493
2494 /**
2495 * __blk_end_request - Helper function for drivers to complete the request.
2496 * @rq: the request being processed
2497 * @error: %0 for success, < %0 for error
2498 * @nr_bytes: number of bytes to complete
2499 *
2500 * Description:
2501 * Must be called with queue lock held unlike blk_end_request().
2502 *
2503 * Return:
2504 * %false - we are done with this request
2505 * %true - still buffers pending for this request
2506 **/
2507 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2508 {
2509 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2510 }
2511 EXPORT_SYMBOL(__blk_end_request);
2512
2513 /**
2514 * __blk_end_request_all - Helper function for drives to finish the request.
2515 * @rq: the request to finish
2516 * @error: %0 for success, < %0 for error
2517 *
2518 * Description:
2519 * Completely finish @rq. Must be called with queue lock held.
2520 */
2521 void __blk_end_request_all(struct request *rq, int error)
2522 {
2523 bool pending;
2524 unsigned int bidi_bytes = 0;
2525
2526 if (unlikely(blk_bidi_rq(rq)))
2527 bidi_bytes = blk_rq_bytes(rq->next_rq);
2528
2529 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2530 BUG_ON(pending);
2531 }
2532 EXPORT_SYMBOL(__blk_end_request_all);
2533
2534 /**
2535 * __blk_end_request_cur - Helper function to finish the current request chunk.
2536 * @rq: the request to finish the current chunk for
2537 * @error: %0 for success, < %0 for error
2538 *
2539 * Description:
2540 * Complete the current consecutively mapped chunk from @rq. Must
2541 * be called with queue lock held.
2542 *
2543 * Return:
2544 * %false - we are done with this request
2545 * %true - still buffers pending for this request
2546 */
2547 bool __blk_end_request_cur(struct request *rq, int error)
2548 {
2549 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2550 }
2551 EXPORT_SYMBOL(__blk_end_request_cur);
2552
2553 /**
2554 * __blk_end_request_err - Finish a request till the next failure boundary.
2555 * @rq: the request to finish till the next failure boundary for
2556 * @error: must be negative errno
2557 *
2558 * Description:
2559 * Complete @rq till the next failure boundary. Must be called
2560 * with queue lock held.
2561 *
2562 * Return:
2563 * %false - we are done with this request
2564 * %true - still buffers pending for this request
2565 */
2566 bool __blk_end_request_err(struct request *rq, int error)
2567 {
2568 WARN_ON(error >= 0);
2569 return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2570 }
2571 EXPORT_SYMBOL_GPL(__blk_end_request_err);
2572
2573 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2574 struct bio *bio)
2575 {
2576 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2577 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2578
2579 if (bio_has_data(bio)) {
2580 rq->nr_phys_segments = bio_phys_segments(q, bio);
2581 rq->buffer = bio_data(bio);
2582 }
2583 rq->__data_len = bio->bi_size;
2584 rq->bio = rq->biotail = bio;
2585
2586 if (bio->bi_bdev)
2587 rq->rq_disk = bio->bi_bdev->bd_disk;
2588 }
2589
2590 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2591 /**
2592 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2593 * @rq: the request to be flushed
2594 *
2595 * Description:
2596 * Flush all pages in @rq.
2597 */
2598 void rq_flush_dcache_pages(struct request *rq)
2599 {
2600 struct req_iterator iter;
2601 struct bio_vec *bvec;
2602
2603 rq_for_each_segment(bvec, rq, iter)
2604 flush_dcache_page(bvec->bv_page);
2605 }
2606 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2607 #endif
2608
2609 /**
2610 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2611 * @q : the queue of the device being checked
2612 *
2613 * Description:
2614 * Check if underlying low-level drivers of a device are busy.
2615 * If the drivers want to export their busy state, they must set own
2616 * exporting function using blk_queue_lld_busy() first.
2617 *
2618 * Basically, this function is used only by request stacking drivers
2619 * to stop dispatching requests to underlying devices when underlying
2620 * devices are busy. This behavior helps more I/O merging on the queue
2621 * of the request stacking driver and prevents I/O throughput regression
2622 * on burst I/O load.
2623 *
2624 * Return:
2625 * 0 - Not busy (The request stacking driver should dispatch request)
2626 * 1 - Busy (The request stacking driver should stop dispatching request)
2627 */
2628 int blk_lld_busy(struct request_queue *q)
2629 {
2630 if (q->lld_busy_fn)
2631 return q->lld_busy_fn(q);
2632
2633 return 0;
2634 }
2635 EXPORT_SYMBOL_GPL(blk_lld_busy);
2636
2637 /**
2638 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2639 * @rq: the clone request to be cleaned up
2640 *
2641 * Description:
2642 * Free all bios in @rq for a cloned request.
2643 */
2644 void blk_rq_unprep_clone(struct request *rq)
2645 {
2646 struct bio *bio;
2647
2648 while ((bio = rq->bio) != NULL) {
2649 rq->bio = bio->bi_next;
2650
2651 bio_put(bio);
2652 }
2653 }
2654 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2655
2656 /*
2657 * Copy attributes of the original request to the clone request.
2658 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2659 */
2660 static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2661 {
2662 dst->cpu = src->cpu;
2663 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
2664 dst->cmd_type = src->cmd_type;
2665 dst->__sector = blk_rq_pos(src);
2666 dst->__data_len = blk_rq_bytes(src);
2667 dst->nr_phys_segments = src->nr_phys_segments;
2668 dst->ioprio = src->ioprio;
2669 dst->extra_len = src->extra_len;
2670 }
2671
2672 /**
2673 * blk_rq_prep_clone - Helper function to setup clone request
2674 * @rq: the request to be setup
2675 * @rq_src: original request to be cloned
2676 * @bs: bio_set that bios for clone are allocated from
2677 * @gfp_mask: memory allocation mask for bio
2678 * @bio_ctr: setup function to be called for each clone bio.
2679 * Returns %0 for success, non %0 for failure.
2680 * @data: private data to be passed to @bio_ctr
2681 *
2682 * Description:
2683 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2684 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2685 * are not copied, and copying such parts is the caller's responsibility.
2686 * Also, pages which the original bios are pointing to are not copied
2687 * and the cloned bios just point same pages.
2688 * So cloned bios must be completed before original bios, which means
2689 * the caller must complete @rq before @rq_src.
2690 */
2691 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2692 struct bio_set *bs, gfp_t gfp_mask,
2693 int (*bio_ctr)(struct bio *, struct bio *, void *),
2694 void *data)
2695 {
2696 struct bio *bio, *bio_src;
2697
2698 if (!bs)
2699 bs = fs_bio_set;
2700
2701 blk_rq_init(NULL, rq);
2702
2703 __rq_for_each_bio(bio_src, rq_src) {
2704 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
2705 if (!bio)
2706 goto free_and_out;
2707
2708 __bio_clone(bio, bio_src);
2709
2710 if (bio_integrity(bio_src) &&
2711 bio_integrity_clone(bio, bio_src, gfp_mask, bs))
2712 goto free_and_out;
2713
2714 if (bio_ctr && bio_ctr(bio, bio_src, data))
2715 goto free_and_out;
2716
2717 if (rq->bio) {
2718 rq->biotail->bi_next = bio;
2719 rq->biotail = bio;
2720 } else
2721 rq->bio = rq->biotail = bio;
2722 }
2723
2724 __blk_rq_prep_clone(rq, rq_src);
2725
2726 return 0;
2727
2728 free_and_out:
2729 if (bio)
2730 bio_free(bio, bs);
2731 blk_rq_unprep_clone(rq);
2732
2733 return -ENOMEM;
2734 }
2735 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2736
2737 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2738 {
2739 return queue_work(kblockd_workqueue, work);
2740 }
2741 EXPORT_SYMBOL(kblockd_schedule_work);
2742
2743 int kblockd_schedule_delayed_work(struct request_queue *q,
2744 struct delayed_work *dwork, unsigned long delay)
2745 {
2746 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2747 }
2748 EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2749
2750 #define PLUG_MAGIC 0x91827364
2751
2752 /**
2753 * blk_start_plug - initialize blk_plug and track it inside the task_struct
2754 * @plug: The &struct blk_plug that needs to be initialized
2755 *
2756 * Description:
2757 * Tracking blk_plug inside the task_struct will help with auto-flushing the
2758 * pending I/O should the task end up blocking between blk_start_plug() and
2759 * blk_finish_plug(). This is important from a performance perspective, but
2760 * also ensures that we don't deadlock. For instance, if the task is blocking
2761 * for a memory allocation, memory reclaim could end up wanting to free a
2762 * page belonging to that request that is currently residing in our private
2763 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
2764 * this kind of deadlock.
2765 */
2766 void blk_start_plug(struct blk_plug *plug)
2767 {
2768 struct task_struct *tsk = current;
2769
2770 plug->magic = PLUG_MAGIC;
2771 INIT_LIST_HEAD(&plug->list);
2772 INIT_LIST_HEAD(&plug->cb_list);
2773 plug->should_sort = 0;
2774
2775 /*
2776 * If this is a nested plug, don't actually assign it. It will be
2777 * flushed on its own.
2778 */
2779 if (!tsk->plug) {
2780 /*
2781 * Store ordering should not be needed here, since a potential
2782 * preempt will imply a full memory barrier
2783 */
2784 tsk->plug = plug;
2785 }
2786 }
2787 EXPORT_SYMBOL(blk_start_plug);
2788
2789 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2790 {
2791 struct request *rqa = container_of(a, struct request, queuelist);
2792 struct request *rqb = container_of(b, struct request, queuelist);
2793
2794 return !(rqa->q <= rqb->q);
2795 }
2796
2797 /*
2798 * If 'from_schedule' is true, then postpone the dispatch of requests
2799 * until a safe kblockd context. We due this to avoid accidental big
2800 * additional stack usage in driver dispatch, in places where the originally
2801 * plugger did not intend it.
2802 */
2803 static void queue_unplugged(struct request_queue *q, unsigned int depth,
2804 bool from_schedule)
2805 __releases(q->queue_lock)
2806 {
2807 trace_block_unplug(q, depth, !from_schedule);
2808
2809 /*
2810 * Don't mess with dead queue.
2811 */
2812 if (unlikely(blk_queue_dead(q))) {
2813 spin_unlock(q->queue_lock);
2814 return;
2815 }
2816
2817 /*
2818 * If we are punting this to kblockd, then we can safely drop
2819 * the queue_lock before waking kblockd (which needs to take
2820 * this lock).
2821 */
2822 if (from_schedule) {
2823 spin_unlock(q->queue_lock);
2824 blk_run_queue_async(q);
2825 } else {
2826 __blk_run_queue(q);
2827 spin_unlock(q->queue_lock);
2828 }
2829
2830 }
2831
2832 static void flush_plug_callbacks(struct blk_plug *plug)
2833 {
2834 LIST_HEAD(callbacks);
2835
2836 if (list_empty(&plug->cb_list))
2837 return;
2838
2839 list_splice_init(&plug->cb_list, &callbacks);
2840
2841 while (!list_empty(&callbacks)) {
2842 struct blk_plug_cb *cb = list_first_entry(&callbacks,
2843 struct blk_plug_cb,
2844 list);
2845 list_del(&cb->list);
2846 cb->callback(cb);
2847 }
2848 }
2849
2850 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2851 {
2852 struct request_queue *q;
2853 unsigned long flags;
2854 struct request *rq;
2855 LIST_HEAD(list);
2856 unsigned int depth;
2857
2858 BUG_ON(plug->magic != PLUG_MAGIC);
2859
2860 flush_plug_callbacks(plug);
2861 if (list_empty(&plug->list))
2862 return;
2863
2864 list_splice_init(&plug->list, &list);
2865
2866 if (plug->should_sort) {
2867 list_sort(NULL, &list, plug_rq_cmp);
2868 plug->should_sort = 0;
2869 }
2870
2871 q = NULL;
2872 depth = 0;
2873
2874 /*
2875 * Save and disable interrupts here, to avoid doing it for every
2876 * queue lock we have to take.
2877 */
2878 local_irq_save(flags);
2879 while (!list_empty(&list)) {
2880 rq = list_entry_rq(list.next);
2881 list_del_init(&rq->queuelist);
2882 BUG_ON(!rq->q);
2883 if (rq->q != q) {
2884 /*
2885 * This drops the queue lock
2886 */
2887 if (q)
2888 queue_unplugged(q, depth, from_schedule);
2889 q = rq->q;
2890 depth = 0;
2891 spin_lock(q->queue_lock);
2892 }
2893
2894 /*
2895 * Short-circuit if @q is dead
2896 */
2897 if (unlikely(blk_queue_dead(q))) {
2898 __blk_end_request_all(rq, -ENODEV);
2899 continue;
2900 }
2901
2902 /*
2903 * rq is already accounted, so use raw insert
2904 */
2905 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
2906 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
2907 else
2908 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
2909
2910 depth++;
2911 }
2912
2913 /*
2914 * This drops the queue lock
2915 */
2916 if (q)
2917 queue_unplugged(q, depth, from_schedule);
2918
2919 local_irq_restore(flags);
2920 }
2921
2922 void blk_finish_plug(struct blk_plug *plug)
2923 {
2924 blk_flush_plug_list(plug, false);
2925
2926 if (plug == current->plug)
2927 current->plug = NULL;
2928 }
2929 EXPORT_SYMBOL(blk_finish_plug);
2930
2931 int __init blk_dev_init(void)
2932 {
2933 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
2934 sizeof(((struct request *)0)->cmd_flags));
2935
2936 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
2937 kblockd_workqueue = alloc_workqueue("kblockd",
2938 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2939 if (!kblockd_workqueue)
2940 panic("Failed to create kblockd\n");
2941
2942 request_cachep = kmem_cache_create("blkdev_requests",
2943 sizeof(struct request), 0, SLAB_PANIC, NULL);
2944
2945 blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2946 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2947
2948 return 0;
2949 }