]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-core.c
goldfish: goldfish_tty_probe() is not using 'i' any more
[mirror_ubuntu-artful-kernel.git] / block / blk-core.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e
JA
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
1da177e4
LT
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
1da177e4
LT
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
320ae51f 19#include <linux/blk-mq.h>
1da177e4
LT
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/kernel_stat.h>
23#include <linux/string.h>
24#include <linux/init.h>
1da177e4
LT
25#include <linux/completion.h>
26#include <linux/slab.h>
27#include <linux/swap.h>
28#include <linux/writeback.h>
faccbd4b 29#include <linux/task_io_accounting_ops.h>
c17bb495 30#include <linux/fault-inject.h>
73c10101 31#include <linux/list_sort.h>
e3c78ca5 32#include <linux/delay.h>
aaf7c680 33#include <linux/ratelimit.h>
6c954667 34#include <linux/pm_runtime.h>
55782138
LZ
35
36#define CREATE_TRACE_POINTS
37#include <trace/events/block.h>
1da177e4 38
8324aa91 39#include "blk.h"
5efd6113 40#include "blk-cgroup.h"
43a5e4e2 41#include "blk-mq.h"
8324aa91 42
d07335e5 43EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0d 44EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0a82a8d1 45EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
3291fa57 46EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
cbae8d45 47EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
0bfc2455 48
a73f730d
TH
49DEFINE_IDA(blk_queue_ida);
50
1da177e4
LT
51/*
52 * For the allocated request tables
53 */
320ae51f 54struct kmem_cache *request_cachep = NULL;
1da177e4
LT
55
56/*
57 * For queue allocation
58 */
6728cb0e 59struct kmem_cache *blk_requestq_cachep;
1da177e4 60
1da177e4
LT
61/*
62 * Controlling structure to kblockd
63 */
ff856bad 64static struct workqueue_struct *kblockd_workqueue;
1da177e4 65
8324aa91 66void blk_queue_congestion_threshold(struct request_queue *q)
1da177e4
LT
67{
68 int nr;
69
70 nr = q->nr_requests - (q->nr_requests / 8) + 1;
71 if (nr > q->nr_requests)
72 nr = q->nr_requests;
73 q->nr_congestion_on = nr;
74
75 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
76 if (nr < 1)
77 nr = 1;
78 q->nr_congestion_off = nr;
79}
80
1da177e4
LT
81/**
82 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
83 * @bdev: device
84 *
85 * Locates the passed device's request queue and returns the address of its
ff9ea323
TH
86 * backing_dev_info. This function can only be called if @bdev is opened
87 * and the return value is never NULL.
1da177e4
LT
88 */
89struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
90{
165125e1 91 struct request_queue *q = bdev_get_queue(bdev);
1da177e4 92
ff9ea323 93 return &q->backing_dev_info;
1da177e4 94}
1da177e4
LT
95EXPORT_SYMBOL(blk_get_backing_dev_info);
96
2a4aa30c 97void blk_rq_init(struct request_queue *q, struct request *rq)
1da177e4 98{
1afb20f3
FT
99 memset(rq, 0, sizeof(*rq));
100
1da177e4 101 INIT_LIST_HEAD(&rq->queuelist);
242f9dcb 102 INIT_LIST_HEAD(&rq->timeout_list);
c7c22e4d 103 rq->cpu = -1;
63a71386 104 rq->q = q;
a2dec7b3 105 rq->__sector = (sector_t) -1;
2e662b65
JA
106 INIT_HLIST_NODE(&rq->hash);
107 RB_CLEAR_NODE(&rq->rb_node);
d7e3c324 108 rq->cmd = rq->__cmd;
e2494e1b 109 rq->cmd_len = BLK_MAX_CDB;
63a71386 110 rq->tag = -1;
b243ddcb 111 rq->start_time = jiffies;
9195291e 112 set_start_time_ns(rq);
09e099d4 113 rq->part = NULL;
1da177e4 114}
2a4aa30c 115EXPORT_SYMBOL(blk_rq_init);
1da177e4 116
5bb23a68
N
117static void req_bio_endio(struct request *rq, struct bio *bio,
118 unsigned int nbytes, int error)
1da177e4 119{
143a87f4
TH
120 if (error)
121 clear_bit(BIO_UPTODATE, &bio->bi_flags);
122 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
123 error = -EIO;
797e7dbb 124
143a87f4
TH
125 if (unlikely(rq->cmd_flags & REQ_QUIET))
126 set_bit(BIO_QUIET, &bio->bi_flags);
08bafc03 127
f79ea416 128 bio_advance(bio, nbytes);
7ba1ba12 129
143a87f4 130 /* don't actually finish bio if it's part of flush sequence */
4f024f37 131 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
143a87f4 132 bio_endio(bio, error);
1da177e4 133}
1da177e4 134
1da177e4
LT
135void blk_dump_rq_flags(struct request *rq, char *msg)
136{
137 int bit;
138
5953316d 139 printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
4aff5e23 140 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
5953316d 141 (unsigned long long) rq->cmd_flags);
1da177e4 142
83096ebf
TH
143 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
144 (unsigned long long)blk_rq_pos(rq),
145 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
b4f42e28
JA
146 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
147 rq->bio, rq->biotail, blk_rq_bytes(rq));
1da177e4 148
33659ebb 149 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
6728cb0e 150 printk(KERN_INFO " cdb: ");
d34c87e4 151 for (bit = 0; bit < BLK_MAX_CDB; bit++)
1da177e4
LT
152 printk("%02x ", rq->cmd[bit]);
153 printk("\n");
154 }
155}
1da177e4
LT
156EXPORT_SYMBOL(blk_dump_rq_flags);
157
3cca6dc1 158static void blk_delay_work(struct work_struct *work)
1da177e4 159{
3cca6dc1 160 struct request_queue *q;
1da177e4 161
3cca6dc1
JA
162 q = container_of(work, struct request_queue, delay_work.work);
163 spin_lock_irq(q->queue_lock);
24ecfbe2 164 __blk_run_queue(q);
3cca6dc1 165 spin_unlock_irq(q->queue_lock);
1da177e4 166}
1da177e4
LT
167
168/**
3cca6dc1
JA
169 * blk_delay_queue - restart queueing after defined interval
170 * @q: The &struct request_queue in question
171 * @msecs: Delay in msecs
1da177e4
LT
172 *
173 * Description:
3cca6dc1
JA
174 * Sometimes queueing needs to be postponed for a little while, to allow
175 * resources to come back. This function will make sure that queueing is
70460571 176 * restarted around the specified time. Queue lock must be held.
3cca6dc1
JA
177 */
178void blk_delay_queue(struct request_queue *q, unsigned long msecs)
2ad8b1ef 179{
70460571
BVA
180 if (likely(!blk_queue_dead(q)))
181 queue_delayed_work(kblockd_workqueue, &q->delay_work,
182 msecs_to_jiffies(msecs));
2ad8b1ef 183}
3cca6dc1 184EXPORT_SYMBOL(blk_delay_queue);
2ad8b1ef 185
1da177e4
LT
186/**
187 * blk_start_queue - restart a previously stopped queue
165125e1 188 * @q: The &struct request_queue in question
1da177e4
LT
189 *
190 * Description:
191 * blk_start_queue() will clear the stop flag on the queue, and call
192 * the request_fn for the queue if it was in a stopped state when
193 * entered. Also see blk_stop_queue(). Queue lock must be held.
194 **/
165125e1 195void blk_start_queue(struct request_queue *q)
1da177e4 196{
a038e253
PBG
197 WARN_ON(!irqs_disabled());
198
75ad23bc 199 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
24ecfbe2 200 __blk_run_queue(q);
1da177e4 201}
1da177e4
LT
202EXPORT_SYMBOL(blk_start_queue);
203
204/**
205 * blk_stop_queue - stop a queue
165125e1 206 * @q: The &struct request_queue in question
1da177e4
LT
207 *
208 * Description:
209 * The Linux block layer assumes that a block driver will consume all
210 * entries on the request queue when the request_fn strategy is called.
211 * Often this will not happen, because of hardware limitations (queue
212 * depth settings). If a device driver gets a 'queue full' response,
213 * or if it simply chooses not to queue more I/O at one point, it can
214 * call this function to prevent the request_fn from being called until
215 * the driver has signalled it's ready to go again. This happens by calling
216 * blk_start_queue() to restart queue operations. Queue lock must be held.
217 **/
165125e1 218void blk_stop_queue(struct request_queue *q)
1da177e4 219{
136b5721 220 cancel_delayed_work(&q->delay_work);
75ad23bc 221 queue_flag_set(QUEUE_FLAG_STOPPED, q);
1da177e4
LT
222}
223EXPORT_SYMBOL(blk_stop_queue);
224
225/**
226 * blk_sync_queue - cancel any pending callbacks on a queue
227 * @q: the queue
228 *
229 * Description:
230 * The block layer may perform asynchronous callback activity
231 * on a queue, such as calling the unplug function after a timeout.
232 * A block device may call blk_sync_queue to ensure that any
233 * such activity is cancelled, thus allowing it to release resources
59c51591 234 * that the callbacks might use. The caller must already have made sure
1da177e4
LT
235 * that its ->make_request_fn will not re-add plugging prior to calling
236 * this function.
237 *
da527770 238 * This function does not cancel any asynchronous activity arising
da3dae54 239 * out of elevator or throttling code. That would require elevator_exit()
5efd6113 240 * and blkcg_exit_queue() to be called with queue lock initialized.
da527770 241 *
1da177e4
LT
242 */
243void blk_sync_queue(struct request_queue *q)
244{
70ed28b9 245 del_timer_sync(&q->timeout);
f04c1fe7
ML
246
247 if (q->mq_ops) {
248 struct blk_mq_hw_ctx *hctx;
249 int i;
250
70f4db63
CH
251 queue_for_each_hw_ctx(q, hctx, i) {
252 cancel_delayed_work_sync(&hctx->run_work);
253 cancel_delayed_work_sync(&hctx->delay_work);
254 }
f04c1fe7
ML
255 } else {
256 cancel_delayed_work_sync(&q->delay_work);
257 }
1da177e4
LT
258}
259EXPORT_SYMBOL(blk_sync_queue);
260
c246e80d
BVA
261/**
262 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
263 * @q: The queue to run
264 *
265 * Description:
266 * Invoke request handling on a queue if there are any pending requests.
267 * May be used to restart request handling after a request has completed.
268 * This variant runs the queue whether or not the queue has been
269 * stopped. Must be called with the queue lock held and interrupts
270 * disabled. See also @blk_run_queue.
271 */
272inline void __blk_run_queue_uncond(struct request_queue *q)
273{
274 if (unlikely(blk_queue_dead(q)))
275 return;
276
24faf6f6
BVA
277 /*
278 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
279 * the queue lock internally. As a result multiple threads may be
280 * running such a request function concurrently. Keep track of the
281 * number of active request_fn invocations such that blk_drain_queue()
282 * can wait until all these request_fn calls have finished.
283 */
284 q->request_fn_active++;
c246e80d 285 q->request_fn(q);
24faf6f6 286 q->request_fn_active--;
c246e80d
BVA
287}
288
1da177e4 289/**
80a4b58e 290 * __blk_run_queue - run a single device queue
1da177e4 291 * @q: The queue to run
80a4b58e
JA
292 *
293 * Description:
294 * See @blk_run_queue. This variant must be called with the queue lock
24ecfbe2 295 * held and interrupts disabled.
1da177e4 296 */
24ecfbe2 297void __blk_run_queue(struct request_queue *q)
1da177e4 298{
a538cd03
TH
299 if (unlikely(blk_queue_stopped(q)))
300 return;
301
c246e80d 302 __blk_run_queue_uncond(q);
75ad23bc
NP
303}
304EXPORT_SYMBOL(__blk_run_queue);
dac07ec1 305
24ecfbe2
CH
306/**
307 * blk_run_queue_async - run a single device queue in workqueue context
308 * @q: The queue to run
309 *
310 * Description:
311 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
70460571 312 * of us. The caller must hold the queue lock.
24ecfbe2
CH
313 */
314void blk_run_queue_async(struct request_queue *q)
315{
70460571 316 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
e7c2f967 317 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
24ecfbe2 318}
c21e6beb 319EXPORT_SYMBOL(blk_run_queue_async);
24ecfbe2 320
75ad23bc
NP
321/**
322 * blk_run_queue - run a single device queue
323 * @q: The queue to run
80a4b58e
JA
324 *
325 * Description:
326 * Invoke request handling on this queue, if it has pending work to do.
a7f55792 327 * May be used to restart queueing when a request has completed.
75ad23bc
NP
328 */
329void blk_run_queue(struct request_queue *q)
330{
331 unsigned long flags;
332
333 spin_lock_irqsave(q->queue_lock, flags);
24ecfbe2 334 __blk_run_queue(q);
1da177e4
LT
335 spin_unlock_irqrestore(q->queue_lock, flags);
336}
337EXPORT_SYMBOL(blk_run_queue);
338
165125e1 339void blk_put_queue(struct request_queue *q)
483f4afc
AV
340{
341 kobject_put(&q->kobj);
342}
d86e0e83 343EXPORT_SYMBOL(blk_put_queue);
483f4afc 344
e3c78ca5 345/**
807592a4 346 * __blk_drain_queue - drain requests from request_queue
e3c78ca5 347 * @q: queue to drain
c9a929dd 348 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
e3c78ca5 349 *
c9a929dd
TH
350 * Drain requests from @q. If @drain_all is set, all requests are drained.
351 * If not, only ELVPRIV requests are drained. The caller is responsible
352 * for ensuring that no new requests which need to be drained are queued.
e3c78ca5 353 */
807592a4
BVA
354static void __blk_drain_queue(struct request_queue *q, bool drain_all)
355 __releases(q->queue_lock)
356 __acquires(q->queue_lock)
e3c78ca5 357{
458f27a9
AH
358 int i;
359
807592a4
BVA
360 lockdep_assert_held(q->queue_lock);
361
e3c78ca5 362 while (true) {
481a7d64 363 bool drain = false;
e3c78ca5 364
b855b04a
TH
365 /*
366 * The caller might be trying to drain @q before its
367 * elevator is initialized.
368 */
369 if (q->elevator)
370 elv_drain_elevator(q);
371
5efd6113 372 blkcg_drain_queue(q);
e3c78ca5 373
4eabc941
TH
374 /*
375 * This function might be called on a queue which failed
b855b04a
TH
376 * driver init after queue creation or is not yet fully
377 * active yet. Some drivers (e.g. fd and loop) get unhappy
378 * in such cases. Kick queue iff dispatch queue has
379 * something on it and @q has request_fn set.
4eabc941 380 */
b855b04a 381 if (!list_empty(&q->queue_head) && q->request_fn)
4eabc941 382 __blk_run_queue(q);
c9a929dd 383
8a5ecdd4 384 drain |= q->nr_rqs_elvpriv;
24faf6f6 385 drain |= q->request_fn_active;
481a7d64
TH
386
387 /*
388 * Unfortunately, requests are queued at and tracked from
389 * multiple places and there's no single counter which can
390 * be drained. Check all the queues and counters.
391 */
392 if (drain_all) {
e97c293c 393 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
481a7d64
TH
394 drain |= !list_empty(&q->queue_head);
395 for (i = 0; i < 2; i++) {
8a5ecdd4 396 drain |= q->nr_rqs[i];
481a7d64 397 drain |= q->in_flight[i];
7c94e1c1
ML
398 if (fq)
399 drain |= !list_empty(&fq->flush_queue[i]);
481a7d64
TH
400 }
401 }
e3c78ca5 402
481a7d64 403 if (!drain)
e3c78ca5 404 break;
807592a4
BVA
405
406 spin_unlock_irq(q->queue_lock);
407
e3c78ca5 408 msleep(10);
807592a4
BVA
409
410 spin_lock_irq(q->queue_lock);
e3c78ca5 411 }
458f27a9
AH
412
413 /*
414 * With queue marked dead, any woken up waiter will fail the
415 * allocation path, so the wakeup chaining is lost and we're
416 * left with hung waiters. We need to wake up those waiters.
417 */
418 if (q->request_fn) {
a051661c
TH
419 struct request_list *rl;
420
a051661c
TH
421 blk_queue_for_each_rl(rl, q)
422 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
423 wake_up_all(&rl->wait[i]);
458f27a9 424 }
e3c78ca5
TH
425}
426
d732580b
TH
427/**
428 * blk_queue_bypass_start - enter queue bypass mode
429 * @q: queue of interest
430 *
431 * In bypass mode, only the dispatch FIFO queue of @q is used. This
432 * function makes @q enter bypass mode and drains all requests which were
6ecf23af 433 * throttled or issued before. On return, it's guaranteed that no request
80fd9979
TH
434 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
435 * inside queue or RCU read lock.
d732580b
TH
436 */
437void blk_queue_bypass_start(struct request_queue *q)
438{
439 spin_lock_irq(q->queue_lock);
776687bc 440 q->bypass_depth++;
d732580b
TH
441 queue_flag_set(QUEUE_FLAG_BYPASS, q);
442 spin_unlock_irq(q->queue_lock);
443
776687bc
TH
444 /*
445 * Queues start drained. Skip actual draining till init is
446 * complete. This avoids lenghty delays during queue init which
447 * can happen many times during boot.
448 */
449 if (blk_queue_init_done(q)) {
807592a4
BVA
450 spin_lock_irq(q->queue_lock);
451 __blk_drain_queue(q, false);
452 spin_unlock_irq(q->queue_lock);
453
b82d4b19
TH
454 /* ensure blk_queue_bypass() is %true inside RCU read lock */
455 synchronize_rcu();
456 }
d732580b
TH
457}
458EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
459
460/**
461 * blk_queue_bypass_end - leave queue bypass mode
462 * @q: queue of interest
463 *
464 * Leave bypass mode and restore the normal queueing behavior.
465 */
466void blk_queue_bypass_end(struct request_queue *q)
467{
468 spin_lock_irq(q->queue_lock);
469 if (!--q->bypass_depth)
470 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
471 WARN_ON_ONCE(q->bypass_depth < 0);
472 spin_unlock_irq(q->queue_lock);
473}
474EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
475
c9a929dd
TH
476/**
477 * blk_cleanup_queue - shutdown a request queue
478 * @q: request queue to shutdown
479 *
c246e80d
BVA
480 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
481 * put it. All future requests will be failed immediately with -ENODEV.
c94a96ac 482 */
6728cb0e 483void blk_cleanup_queue(struct request_queue *q)
483f4afc 484{
c9a929dd 485 spinlock_t *lock = q->queue_lock;
e3335de9 486
3f3299d5 487 /* mark @q DYING, no new request or merges will be allowed afterwards */
483f4afc 488 mutex_lock(&q->sysfs_lock);
3f3299d5 489 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
c9a929dd 490 spin_lock_irq(lock);
6ecf23af 491
80fd9979 492 /*
3f3299d5 493 * A dying queue is permanently in bypass mode till released. Note
80fd9979
TH
494 * that, unlike blk_queue_bypass_start(), we aren't performing
495 * synchronize_rcu() after entering bypass mode to avoid the delay
496 * as some drivers create and destroy a lot of queues while
497 * probing. This is still safe because blk_release_queue() will be
498 * called only after the queue refcnt drops to zero and nothing,
499 * RCU or not, would be traversing the queue by then.
500 */
6ecf23af
TH
501 q->bypass_depth++;
502 queue_flag_set(QUEUE_FLAG_BYPASS, q);
503
c9a929dd
TH
504 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
505 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3f3299d5 506 queue_flag_set(QUEUE_FLAG_DYING, q);
c9a929dd
TH
507 spin_unlock_irq(lock);
508 mutex_unlock(&q->sysfs_lock);
509
c246e80d
BVA
510 /*
511 * Drain all requests queued before DYING marking. Set DEAD flag to
512 * prevent that q->request_fn() gets invoked after draining finished.
513 */
43a5e4e2 514 if (q->mq_ops) {
780db207 515 blk_mq_freeze_queue(q);
43a5e4e2
ML
516 spin_lock_irq(lock);
517 } else {
518 spin_lock_irq(lock);
519 __blk_drain_queue(q, true);
520 }
c246e80d 521 queue_flag_set(QUEUE_FLAG_DEAD, q);
807592a4 522 spin_unlock_irq(lock);
c9a929dd
TH
523
524 /* @q won't process any more request, flush async actions */
525 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
526 blk_sync_queue(q);
527
5e5cfac0
AH
528 spin_lock_irq(lock);
529 if (q->queue_lock != &q->__queue_lock)
530 q->queue_lock = &q->__queue_lock;
531 spin_unlock_irq(lock);
532
c9a929dd 533 /* @q is and will stay empty, shutdown and put */
483f4afc
AV
534 blk_put_queue(q);
535}
1da177e4
LT
536EXPORT_SYMBOL(blk_cleanup_queue);
537
5b788ce3
TH
538int blk_init_rl(struct request_list *rl, struct request_queue *q,
539 gfp_t gfp_mask)
1da177e4 540{
1abec4fd
MS
541 if (unlikely(rl->rq_pool))
542 return 0;
543
5b788ce3 544 rl->q = q;
1faa16d2
JA
545 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
546 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
1faa16d2
JA
547 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
548 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
1da177e4 549
1946089a 550 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
a91a5ac6 551 mempool_free_slab, request_cachep,
5b788ce3 552 gfp_mask, q->node);
1da177e4
LT
553 if (!rl->rq_pool)
554 return -ENOMEM;
555
556 return 0;
557}
558
5b788ce3
TH
559void blk_exit_rl(struct request_list *rl)
560{
561 if (rl->rq_pool)
562 mempool_destroy(rl->rq_pool);
563}
564
165125e1 565struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1da177e4 566{
c304a51b 567 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
1946089a
CL
568}
569EXPORT_SYMBOL(blk_alloc_queue);
1da177e4 570
165125e1 571struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1946089a 572{
165125e1 573 struct request_queue *q;
e0bf68dd 574 int err;
1946089a 575
8324aa91 576 q = kmem_cache_alloc_node(blk_requestq_cachep,
94f6030c 577 gfp_mask | __GFP_ZERO, node_id);
1da177e4
LT
578 if (!q)
579 return NULL;
580
00380a40 581 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
a73f730d 582 if (q->id < 0)
3d2936f4 583 goto fail_q;
a73f730d 584
0989a025
JA
585 q->backing_dev_info.ra_pages =
586 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
587 q->backing_dev_info.state = 0;
588 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
d993831f 589 q->backing_dev_info.name = "block";
5151412d 590 q->node = node_id;
0989a025 591
e0bf68dd 592 err = bdi_init(&q->backing_dev_info);
a73f730d
TH
593 if (err)
594 goto fail_id;
e0bf68dd 595
31373d09
MG
596 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
597 laptop_mode_timer_fn, (unsigned long) q);
242f9dcb 598 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
b855b04a 599 INIT_LIST_HEAD(&q->queue_head);
242f9dcb 600 INIT_LIST_HEAD(&q->timeout_list);
a612fddf 601 INIT_LIST_HEAD(&q->icq_list);
4eef3049 602#ifdef CONFIG_BLK_CGROUP
e8989fae 603 INIT_LIST_HEAD(&q->blkg_list);
4eef3049 604#endif
3cca6dc1 605 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
483f4afc 606
8324aa91 607 kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4 608
483f4afc 609 mutex_init(&q->sysfs_lock);
e7e72bf6 610 spin_lock_init(&q->__queue_lock);
483f4afc 611
c94a96ac
VG
612 /*
613 * By default initialize queue_lock to internal lock and driver can
614 * override it later if need be.
615 */
616 q->queue_lock = &q->__queue_lock;
617
b82d4b19
TH
618 /*
619 * A queue starts its life with bypass turned on to avoid
620 * unnecessary bypass on/off overhead and nasty surprises during
749fefe6
TH
621 * init. The initial bypass will be finished when the queue is
622 * registered by blk_register_queue().
b82d4b19
TH
623 */
624 q->bypass_depth = 1;
625 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
626
320ae51f
JA
627 init_waitqueue_head(&q->mq_freeze_wq);
628
5efd6113 629 if (blkcg_init_queue(q))
fff4996b 630 goto fail_bdi;
f51b802c 631
1da177e4 632 return q;
a73f730d 633
fff4996b
MP
634fail_bdi:
635 bdi_destroy(&q->backing_dev_info);
a73f730d
TH
636fail_id:
637 ida_simple_remove(&blk_queue_ida, q->id);
638fail_q:
639 kmem_cache_free(blk_requestq_cachep, q);
640 return NULL;
1da177e4 641}
1946089a 642EXPORT_SYMBOL(blk_alloc_queue_node);
1da177e4
LT
643
644/**
645 * blk_init_queue - prepare a request queue for use with a block device
646 * @rfn: The function to be called to process requests that have been
647 * placed on the queue.
648 * @lock: Request queue spin lock
649 *
650 * Description:
651 * If a block device wishes to use the standard request handling procedures,
652 * which sorts requests and coalesces adjacent requests, then it must
653 * call blk_init_queue(). The function @rfn will be called when there
654 * are requests on the queue that need to be processed. If the device
655 * supports plugging, then @rfn may not be called immediately when requests
656 * are available on the queue, but may be called at some time later instead.
657 * Plugged queues are generally unplugged when a buffer belonging to one
658 * of the requests on the queue is needed, or due to memory pressure.
659 *
660 * @rfn is not required, or even expected, to remove all requests off the
661 * queue, but only as many as it can handle at a time. If it does leave
662 * requests on the queue, it is responsible for arranging that the requests
663 * get dealt with eventually.
664 *
665 * The queue spin lock must be held while manipulating the requests on the
a038e253
PBG
666 * request queue; this lock will be taken also from interrupt context, so irq
667 * disabling is needed for it.
1da177e4 668 *
710027a4 669 * Function returns a pointer to the initialized request queue, or %NULL if
1da177e4
LT
670 * it didn't succeed.
671 *
672 * Note:
673 * blk_init_queue() must be paired with a blk_cleanup_queue() call
674 * when the block device is deactivated (such as at module unload).
675 **/
1946089a 676
165125e1 677struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1da177e4 678{
c304a51b 679 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
1946089a
CL
680}
681EXPORT_SYMBOL(blk_init_queue);
682
165125e1 683struct request_queue *
1946089a
CL
684blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
685{
c86d1b8a 686 struct request_queue *uninit_q, *q;
1da177e4 687
c86d1b8a
MS
688 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
689 if (!uninit_q)
690 return NULL;
691
5151412d 692 q = blk_init_allocated_queue(uninit_q, rfn, lock);
c86d1b8a 693 if (!q)
7982e90c 694 blk_cleanup_queue(uninit_q);
18741986 695
7982e90c 696 return q;
01effb0d
MS
697}
698EXPORT_SYMBOL(blk_init_queue_node);
699
700struct request_queue *
701blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
702 spinlock_t *lock)
01effb0d 703{
1da177e4
LT
704 if (!q)
705 return NULL;
706
f70ced09 707 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
ba483388 708 if (!q->fq)
7982e90c
MS
709 return NULL;
710
a051661c 711 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
708f04d2 712 goto fail;
1da177e4
LT
713
714 q->request_fn = rfn;
1da177e4 715 q->prep_rq_fn = NULL;
28018c24 716 q->unprep_rq_fn = NULL;
60ea8226 717 q->queue_flags |= QUEUE_FLAG_DEFAULT;
c94a96ac
VG
718
719 /* Override internal queue lock with supplied lock pointer */
720 if (lock)
721 q->queue_lock = lock;
1da177e4 722
f3b144aa
JA
723 /*
724 * This also sets hw/phys segments, boundary and size
725 */
c20e8de2 726 blk_queue_make_request(q, blk_queue_bio);
1da177e4 727
44ec9542
AS
728 q->sg_reserved_size = INT_MAX;
729
eb1c160b
TS
730 /* Protect q->elevator from elevator_change */
731 mutex_lock(&q->sysfs_lock);
732
b82d4b19 733 /* init elevator */
eb1c160b
TS
734 if (elevator_init(q, NULL)) {
735 mutex_unlock(&q->sysfs_lock);
708f04d2 736 goto fail;
eb1c160b
TS
737 }
738
739 mutex_unlock(&q->sysfs_lock);
740
b82d4b19 741 return q;
708f04d2
DJ
742
743fail:
ba483388 744 blk_free_flush_queue(q->fq);
708f04d2 745 return NULL;
1da177e4 746}
5151412d 747EXPORT_SYMBOL(blk_init_allocated_queue);
1da177e4 748
09ac46c4 749bool blk_get_queue(struct request_queue *q)
1da177e4 750{
3f3299d5 751 if (likely(!blk_queue_dying(q))) {
09ac46c4
TH
752 __blk_get_queue(q);
753 return true;
1da177e4
LT
754 }
755
09ac46c4 756 return false;
1da177e4 757}
d86e0e83 758EXPORT_SYMBOL(blk_get_queue);
1da177e4 759
5b788ce3 760static inline void blk_free_request(struct request_list *rl, struct request *rq)
1da177e4 761{
f1f8cc94 762 if (rq->cmd_flags & REQ_ELVPRIV) {
5b788ce3 763 elv_put_request(rl->q, rq);
f1f8cc94 764 if (rq->elv.icq)
11a3122f 765 put_io_context(rq->elv.icq->ioc);
f1f8cc94
TH
766 }
767
5b788ce3 768 mempool_free(rq, rl->rq_pool);
1da177e4
LT
769}
770
1da177e4
LT
771/*
772 * ioc_batching returns true if the ioc is a valid batching request and
773 * should be given priority access to a request.
774 */
165125e1 775static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
776{
777 if (!ioc)
778 return 0;
779
780 /*
781 * Make sure the process is able to allocate at least 1 request
782 * even if the batch times out, otherwise we could theoretically
783 * lose wakeups.
784 */
785 return ioc->nr_batch_requests == q->nr_batching ||
786 (ioc->nr_batch_requests > 0
787 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
788}
789
790/*
791 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
792 * will cause the process to be a "batcher" on all queues in the system. This
793 * is the behaviour we want though - once it gets a wakeup it should be given
794 * a nice run.
795 */
165125e1 796static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
797{
798 if (!ioc || ioc_batching(q, ioc))
799 return;
800
801 ioc->nr_batch_requests = q->nr_batching;
802 ioc->last_waited = jiffies;
803}
804
5b788ce3 805static void __freed_request(struct request_list *rl, int sync)
1da177e4 806{
5b788ce3 807 struct request_queue *q = rl->q;
1da177e4 808
a051661c
TH
809 /*
810 * bdi isn't aware of blkcg yet. As all async IOs end up root
811 * blkcg anyway, just use root blkcg state.
812 */
813 if (rl == &q->root_rl &&
814 rl->count[sync] < queue_congestion_off_threshold(q))
1faa16d2 815 blk_clear_queue_congested(q, sync);
1da177e4 816
1faa16d2
JA
817 if (rl->count[sync] + 1 <= q->nr_requests) {
818 if (waitqueue_active(&rl->wait[sync]))
819 wake_up(&rl->wait[sync]);
1da177e4 820
5b788ce3 821 blk_clear_rl_full(rl, sync);
1da177e4
LT
822 }
823}
824
825/*
826 * A request has just been released. Account for it, update the full and
827 * congestion status, wake up any waiters. Called under q->queue_lock.
828 */
5b788ce3 829static void freed_request(struct request_list *rl, unsigned int flags)
1da177e4 830{
5b788ce3 831 struct request_queue *q = rl->q;
75eb6c37 832 int sync = rw_is_sync(flags);
1da177e4 833
8a5ecdd4 834 q->nr_rqs[sync]--;
1faa16d2 835 rl->count[sync]--;
75eb6c37 836 if (flags & REQ_ELVPRIV)
8a5ecdd4 837 q->nr_rqs_elvpriv--;
1da177e4 838
5b788ce3 839 __freed_request(rl, sync);
1da177e4 840
1faa16d2 841 if (unlikely(rl->starved[sync ^ 1]))
5b788ce3 842 __freed_request(rl, sync ^ 1);
1da177e4
LT
843}
844
e3a2b3f9
JA
845int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
846{
847 struct request_list *rl;
848
849 spin_lock_irq(q->queue_lock);
850 q->nr_requests = nr;
851 blk_queue_congestion_threshold(q);
852
853 /* congestion isn't cgroup aware and follows root blkcg for now */
854 rl = &q->root_rl;
855
856 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
857 blk_set_queue_congested(q, BLK_RW_SYNC);
858 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
859 blk_clear_queue_congested(q, BLK_RW_SYNC);
860
861 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
862 blk_set_queue_congested(q, BLK_RW_ASYNC);
863 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
864 blk_clear_queue_congested(q, BLK_RW_ASYNC);
865
866 blk_queue_for_each_rl(rl, q) {
867 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
868 blk_set_rl_full(rl, BLK_RW_SYNC);
869 } else {
870 blk_clear_rl_full(rl, BLK_RW_SYNC);
871 wake_up(&rl->wait[BLK_RW_SYNC]);
872 }
873
874 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
875 blk_set_rl_full(rl, BLK_RW_ASYNC);
876 } else {
877 blk_clear_rl_full(rl, BLK_RW_ASYNC);
878 wake_up(&rl->wait[BLK_RW_ASYNC]);
879 }
880 }
881
882 spin_unlock_irq(q->queue_lock);
883 return 0;
884}
885
9d5a4e94
MS
886/*
887 * Determine if elevator data should be initialized when allocating the
888 * request associated with @bio.
889 */
890static bool blk_rq_should_init_elevator(struct bio *bio)
891{
892 if (!bio)
893 return true;
894
895 /*
896 * Flush requests do not use the elevator so skip initialization.
897 * This allows a request to share the flush and elevator data.
898 */
899 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
900 return false;
901
902 return true;
903}
904
852c788f
TH
905/**
906 * rq_ioc - determine io_context for request allocation
907 * @bio: request being allocated is for this bio (can be %NULL)
908 *
909 * Determine io_context to use for request allocation for @bio. May return
910 * %NULL if %current->io_context doesn't exist.
911 */
912static struct io_context *rq_ioc(struct bio *bio)
913{
914#ifdef CONFIG_BLK_CGROUP
915 if (bio && bio->bi_ioc)
916 return bio->bi_ioc;
917#endif
918 return current->io_context;
919}
920
da8303c6 921/**
a06e05e6 922 * __get_request - get a free request
5b788ce3 923 * @rl: request list to allocate from
da8303c6
TH
924 * @rw_flags: RW and SYNC flags
925 * @bio: bio to allocate request for (can be %NULL)
926 * @gfp_mask: allocation mask
927 *
928 * Get a free request from @q. This function may fail under memory
929 * pressure or if @q is dead.
930 *
da3dae54 931 * Must be called with @q->queue_lock held and,
a492f075
JL
932 * Returns ERR_PTR on failure, with @q->queue_lock held.
933 * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4 934 */
5b788ce3 935static struct request *__get_request(struct request_list *rl, int rw_flags,
a06e05e6 936 struct bio *bio, gfp_t gfp_mask)
1da177e4 937{
5b788ce3 938 struct request_queue *q = rl->q;
b679281a 939 struct request *rq;
7f4b35d1
TH
940 struct elevator_type *et = q->elevator->type;
941 struct io_context *ioc = rq_ioc(bio);
f1f8cc94 942 struct io_cq *icq = NULL;
1faa16d2 943 const bool is_sync = rw_is_sync(rw_flags) != 0;
75eb6c37 944 int may_queue;
88ee5ef1 945
3f3299d5 946 if (unlikely(blk_queue_dying(q)))
a492f075 947 return ERR_PTR(-ENODEV);
da8303c6 948
7749a8d4 949 may_queue = elv_may_queue(q, rw_flags);
88ee5ef1
JA
950 if (may_queue == ELV_MQUEUE_NO)
951 goto rq_starved;
952
1faa16d2
JA
953 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
954 if (rl->count[is_sync]+1 >= q->nr_requests) {
88ee5ef1
JA
955 /*
956 * The queue will fill after this allocation, so set
957 * it as full, and mark this process as "batching".
958 * This process will be allowed to complete a batch of
959 * requests, others will be blocked.
960 */
5b788ce3 961 if (!blk_rl_full(rl, is_sync)) {
88ee5ef1 962 ioc_set_batching(q, ioc);
5b788ce3 963 blk_set_rl_full(rl, is_sync);
88ee5ef1
JA
964 } else {
965 if (may_queue != ELV_MQUEUE_MUST
966 && !ioc_batching(q, ioc)) {
967 /*
968 * The queue is full and the allocating
969 * process is not a "batcher", and not
970 * exempted by the IO scheduler
971 */
a492f075 972 return ERR_PTR(-ENOMEM);
88ee5ef1
JA
973 }
974 }
1da177e4 975 }
a051661c
TH
976 /*
977 * bdi isn't aware of blkcg yet. As all async IOs end up
978 * root blkcg anyway, just use root blkcg state.
979 */
980 if (rl == &q->root_rl)
981 blk_set_queue_congested(q, is_sync);
1da177e4
LT
982 }
983
082cf69e
JA
984 /*
985 * Only allow batching queuers to allocate up to 50% over the defined
986 * limit of requests, otherwise we could have thousands of requests
987 * allocated with any setting of ->nr_requests
988 */
1faa16d2 989 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
a492f075 990 return ERR_PTR(-ENOMEM);
fd782a4a 991
8a5ecdd4 992 q->nr_rqs[is_sync]++;
1faa16d2
JA
993 rl->count[is_sync]++;
994 rl->starved[is_sync] = 0;
cb98fc8b 995
f1f8cc94
TH
996 /*
997 * Decide whether the new request will be managed by elevator. If
998 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
999 * prevent the current elevator from being destroyed until the new
1000 * request is freed. This guarantees icq's won't be destroyed and
1001 * makes creating new ones safe.
1002 *
1003 * Also, lookup icq while holding queue_lock. If it doesn't exist,
1004 * it will be created after releasing queue_lock.
1005 */
d732580b 1006 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
75eb6c37 1007 rw_flags |= REQ_ELVPRIV;
8a5ecdd4 1008 q->nr_rqs_elvpriv++;
f1f8cc94
TH
1009 if (et->icq_cache && ioc)
1010 icq = ioc_lookup_icq(ioc, q);
9d5a4e94 1011 }
cb98fc8b 1012
f253b86b
JA
1013 if (blk_queue_io_stat(q))
1014 rw_flags |= REQ_IO_STAT;
1da177e4
LT
1015 spin_unlock_irq(q->queue_lock);
1016
29e2b09a 1017 /* allocate and init request */
5b788ce3 1018 rq = mempool_alloc(rl->rq_pool, gfp_mask);
29e2b09a 1019 if (!rq)
b679281a 1020 goto fail_alloc;
1da177e4 1021
29e2b09a 1022 blk_rq_init(q, rq);
a051661c 1023 blk_rq_set_rl(rq, rl);
29e2b09a
TH
1024 rq->cmd_flags = rw_flags | REQ_ALLOCED;
1025
aaf7c680 1026 /* init elvpriv */
29e2b09a 1027 if (rw_flags & REQ_ELVPRIV) {
aaf7c680 1028 if (unlikely(et->icq_cache && !icq)) {
7f4b35d1
TH
1029 if (ioc)
1030 icq = ioc_create_icq(ioc, q, gfp_mask);
aaf7c680
TH
1031 if (!icq)
1032 goto fail_elvpriv;
29e2b09a 1033 }
aaf7c680
TH
1034
1035 rq->elv.icq = icq;
1036 if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
1037 goto fail_elvpriv;
1038
1039 /* @rq->elv.icq holds io_context until @rq is freed */
29e2b09a
TH
1040 if (icq)
1041 get_io_context(icq->ioc);
1042 }
aaf7c680 1043out:
88ee5ef1
JA
1044 /*
1045 * ioc may be NULL here, and ioc_batching will be false. That's
1046 * OK, if the queue is under the request limit then requests need
1047 * not count toward the nr_batch_requests limit. There will always
1048 * be some limit enforced by BLK_BATCH_TIME.
1049 */
1da177e4
LT
1050 if (ioc_batching(q, ioc))
1051 ioc->nr_batch_requests--;
6728cb0e 1052
1faa16d2 1053 trace_block_getrq(q, bio, rw_flags & 1);
1da177e4 1054 return rq;
b679281a 1055
aaf7c680
TH
1056fail_elvpriv:
1057 /*
1058 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
1059 * and may fail indefinitely under memory pressure and thus
1060 * shouldn't stall IO. Treat this request as !elvpriv. This will
1061 * disturb iosched and blkcg but weird is bettern than dead.
1062 */
7b2b10e0
RE
1063 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
1064 __func__, dev_name(q->backing_dev_info.dev));
aaf7c680
TH
1065
1066 rq->cmd_flags &= ~REQ_ELVPRIV;
1067 rq->elv.icq = NULL;
1068
1069 spin_lock_irq(q->queue_lock);
8a5ecdd4 1070 q->nr_rqs_elvpriv--;
aaf7c680
TH
1071 spin_unlock_irq(q->queue_lock);
1072 goto out;
1073
b679281a
TH
1074fail_alloc:
1075 /*
1076 * Allocation failed presumably due to memory. Undo anything we
1077 * might have messed up.
1078 *
1079 * Allocating task should really be put onto the front of the wait
1080 * queue, but this is pretty rare.
1081 */
1082 spin_lock_irq(q->queue_lock);
5b788ce3 1083 freed_request(rl, rw_flags);
b679281a
TH
1084
1085 /*
1086 * in the very unlikely event that allocation failed and no
1087 * requests for this direction was pending, mark us starved so that
1088 * freeing of a request in the other direction will notice
1089 * us. another possible fix would be to split the rq mempool into
1090 * READ and WRITE
1091 */
1092rq_starved:
1093 if (unlikely(rl->count[is_sync] == 0))
1094 rl->starved[is_sync] = 1;
a492f075 1095 return ERR_PTR(-ENOMEM);
1da177e4
LT
1096}
1097
da8303c6 1098/**
a06e05e6 1099 * get_request - get a free request
da8303c6
TH
1100 * @q: request_queue to allocate request from
1101 * @rw_flags: RW and SYNC flags
1102 * @bio: bio to allocate request for (can be %NULL)
a06e05e6 1103 * @gfp_mask: allocation mask
da8303c6 1104 *
a06e05e6
TH
1105 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
1106 * function keeps retrying under memory pressure and fails iff @q is dead.
d6344532 1107 *
da3dae54 1108 * Must be called with @q->queue_lock held and,
a492f075
JL
1109 * Returns ERR_PTR on failure, with @q->queue_lock held.
1110 * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4 1111 */
a06e05e6
TH
1112static struct request *get_request(struct request_queue *q, int rw_flags,
1113 struct bio *bio, gfp_t gfp_mask)
1da177e4 1114{
1faa16d2 1115 const bool is_sync = rw_is_sync(rw_flags) != 0;
a06e05e6 1116 DEFINE_WAIT(wait);
a051661c 1117 struct request_list *rl;
1da177e4 1118 struct request *rq;
a051661c
TH
1119
1120 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
a06e05e6 1121retry:
a051661c 1122 rq = __get_request(rl, rw_flags, bio, gfp_mask);
a492f075 1123 if (!IS_ERR(rq))
a06e05e6 1124 return rq;
1da177e4 1125
3f3299d5 1126 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
a051661c 1127 blk_put_rl(rl);
a492f075 1128 return rq;
a051661c 1129 }
1da177e4 1130
a06e05e6
TH
1131 /* wait on @rl and retry */
1132 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1133 TASK_UNINTERRUPTIBLE);
1da177e4 1134
a06e05e6 1135 trace_block_sleeprq(q, bio, rw_flags & 1);
1da177e4 1136
a06e05e6
TH
1137 spin_unlock_irq(q->queue_lock);
1138 io_schedule();
d6344532 1139
a06e05e6
TH
1140 /*
1141 * After sleeping, we become a "batching" process and will be able
1142 * to allocate at least one request, and up to a big batch of them
1143 * for a small period time. See ioc_batching, ioc_set_batching
1144 */
a06e05e6 1145 ioc_set_batching(q, current->io_context);
05caf8db 1146
a06e05e6
TH
1147 spin_lock_irq(q->queue_lock);
1148 finish_wait(&rl->wait[is_sync], &wait);
1da177e4 1149
a06e05e6 1150 goto retry;
1da177e4
LT
1151}
1152
320ae51f
JA
1153static struct request *blk_old_get_request(struct request_queue *q, int rw,
1154 gfp_t gfp_mask)
1da177e4
LT
1155{
1156 struct request *rq;
1157
1158 BUG_ON(rw != READ && rw != WRITE);
1159
7f4b35d1
TH
1160 /* create ioc upfront */
1161 create_io_context(gfp_mask, q->node);
1162
d6344532 1163 spin_lock_irq(q->queue_lock);
a06e05e6 1164 rq = get_request(q, rw, NULL, gfp_mask);
a492f075 1165 if (IS_ERR(rq))
da8303c6 1166 spin_unlock_irq(q->queue_lock);
d6344532 1167 /* q->queue_lock is unlocked at this point */
1da177e4
LT
1168
1169 return rq;
1170}
320ae51f
JA
1171
1172struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1173{
1174 if (q->mq_ops)
4ce01dd1 1175 return blk_mq_alloc_request(q, rw, gfp_mask, false);
320ae51f
JA
1176 else
1177 return blk_old_get_request(q, rw, gfp_mask);
1178}
1da177e4
LT
1179EXPORT_SYMBOL(blk_get_request);
1180
dc72ef4a 1181/**
79eb63e9 1182 * blk_make_request - given a bio, allocate a corresponding struct request.
8ebf9756 1183 * @q: target request queue
79eb63e9
BH
1184 * @bio: The bio describing the memory mappings that will be submitted for IO.
1185 * It may be a chained-bio properly constructed by block/bio layer.
8ebf9756 1186 * @gfp_mask: gfp flags to be used for memory allocation
dc72ef4a 1187 *
79eb63e9
BH
1188 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
1189 * type commands. Where the struct request needs to be farther initialized by
1190 * the caller. It is passed a &struct bio, which describes the memory info of
1191 * the I/O transfer.
dc72ef4a 1192 *
79eb63e9
BH
1193 * The caller of blk_make_request must make sure that bi_io_vec
1194 * are set to describe the memory buffers. That bio_data_dir() will return
1195 * the needed direction of the request. (And all bio's in the passed bio-chain
1196 * are properly set accordingly)
1197 *
1198 * If called under none-sleepable conditions, mapped bio buffers must not
1199 * need bouncing, by calling the appropriate masked or flagged allocator,
1200 * suitable for the target device. Otherwise the call to blk_queue_bounce will
1201 * BUG.
53674ac5
JA
1202 *
1203 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
1204 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
1205 * anything but the first bio in the chain. Otherwise you risk waiting for IO
1206 * completion of a bio that hasn't been submitted yet, thus resulting in a
1207 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
1208 * of bio_alloc(), as that avoids the mempool deadlock.
1209 * If possible a big IO should be split into smaller parts when allocation
1210 * fails. Partial allocation should not be an error, or you risk a live-lock.
dc72ef4a 1211 */
79eb63e9
BH
1212struct request *blk_make_request(struct request_queue *q, struct bio *bio,
1213 gfp_t gfp_mask)
dc72ef4a 1214{
79eb63e9
BH
1215 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
1216
a492f075
JL
1217 if (IS_ERR(rq))
1218 return rq;
79eb63e9 1219
f27b087b
JA
1220 blk_rq_set_block_pc(rq);
1221
79eb63e9
BH
1222 for_each_bio(bio) {
1223 struct bio *bounce_bio = bio;
1224 int ret;
1225
1226 blk_queue_bounce(q, &bounce_bio);
1227 ret = blk_rq_append_bio(q, rq, bounce_bio);
1228 if (unlikely(ret)) {
1229 blk_put_request(rq);
1230 return ERR_PTR(ret);
1231 }
1232 }
1233
1234 return rq;
dc72ef4a 1235}
79eb63e9 1236EXPORT_SYMBOL(blk_make_request);
dc72ef4a 1237
f27b087b 1238/**
da3dae54 1239 * blk_rq_set_block_pc - initialize a request to type BLOCK_PC
f27b087b
JA
1240 * @rq: request to be initialized
1241 *
1242 */
1243void blk_rq_set_block_pc(struct request *rq)
1244{
1245 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1246 rq->__data_len = 0;
1247 rq->__sector = (sector_t) -1;
1248 rq->bio = rq->biotail = NULL;
1249 memset(rq->__cmd, 0, sizeof(rq->__cmd));
f27b087b
JA
1250}
1251EXPORT_SYMBOL(blk_rq_set_block_pc);
1252
1da177e4
LT
1253/**
1254 * blk_requeue_request - put a request back on queue
1255 * @q: request queue where request should be inserted
1256 * @rq: request to be inserted
1257 *
1258 * Description:
1259 * Drivers often keep queueing requests until the hardware cannot accept
1260 * more, when that condition happens we need to put the request back
1261 * on the queue. Must be called with queue lock held.
1262 */
165125e1 1263void blk_requeue_request(struct request_queue *q, struct request *rq)
1da177e4 1264{
242f9dcb
JA
1265 blk_delete_timer(rq);
1266 blk_clear_rq_complete(rq);
5f3ea37c 1267 trace_block_rq_requeue(q, rq);
2056a782 1268
1da177e4
LT
1269 if (blk_rq_tagged(rq))
1270 blk_queue_end_tag(q, rq);
1271
ba396a6c
JB
1272 BUG_ON(blk_queued_rq(rq));
1273
1da177e4
LT
1274 elv_requeue_request(q, rq);
1275}
1da177e4
LT
1276EXPORT_SYMBOL(blk_requeue_request);
1277
73c10101
JA
1278static void add_acct_request(struct request_queue *q, struct request *rq,
1279 int where)
1280{
320ae51f 1281 blk_account_io_start(rq, true);
7eaceacc 1282 __elv_add_request(q, rq, where);
73c10101
JA
1283}
1284
074a7aca
TH
1285static void part_round_stats_single(int cpu, struct hd_struct *part,
1286 unsigned long now)
1287{
7276d02e
JA
1288 int inflight;
1289
074a7aca
TH
1290 if (now == part->stamp)
1291 return;
1292
7276d02e
JA
1293 inflight = part_in_flight(part);
1294 if (inflight) {
074a7aca 1295 __part_stat_add(cpu, part, time_in_queue,
7276d02e 1296 inflight * (now - part->stamp));
074a7aca
TH
1297 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1298 }
1299 part->stamp = now;
1300}
1301
1302/**
496aa8a9
RD
1303 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1304 * @cpu: cpu number for stats access
1305 * @part: target partition
1da177e4
LT
1306 *
1307 * The average IO queue length and utilisation statistics are maintained
1308 * by observing the current state of the queue length and the amount of
1309 * time it has been in this state for.
1310 *
1311 * Normally, that accounting is done on IO completion, but that can result
1312 * in more than a second's worth of IO being accounted for within any one
1313 * second, leading to >100% utilisation. To deal with that, we call this
1314 * function to do a round-off before returning the results when reading
1315 * /proc/diskstats. This accounts immediately for all queue usage up to
1316 * the current jiffies and restarts the counters again.
1317 */
c9959059 1318void part_round_stats(int cpu, struct hd_struct *part)
6f2576af
JM
1319{
1320 unsigned long now = jiffies;
1321
074a7aca
TH
1322 if (part->partno)
1323 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1324 part_round_stats_single(cpu, part, now);
6f2576af 1325}
074a7aca 1326EXPORT_SYMBOL_GPL(part_round_stats);
6f2576af 1327
c8158819
LM
1328#ifdef CONFIG_PM_RUNTIME
1329static void blk_pm_put_request(struct request *rq)
1330{
1331 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
1332 pm_runtime_mark_last_busy(rq->q->dev);
1333}
1334#else
1335static inline void blk_pm_put_request(struct request *rq) {}
1336#endif
1337
1da177e4
LT
1338/*
1339 * queue lock must be held
1340 */
165125e1 1341void __blk_put_request(struct request_queue *q, struct request *req)
1da177e4 1342{
1da177e4
LT
1343 if (unlikely(!q))
1344 return;
1da177e4 1345
6f5ba581
CH
1346 if (q->mq_ops) {
1347 blk_mq_free_request(req);
1348 return;
1349 }
1350
c8158819
LM
1351 blk_pm_put_request(req);
1352
8922e16c
TH
1353 elv_completed_request(q, req);
1354
1cd96c24
BH
1355 /* this is a bio leak */
1356 WARN_ON(req->bio != NULL);
1357
1da177e4
LT
1358 /*
1359 * Request may not have originated from ll_rw_blk. if not,
1360 * it didn't come out of our reserved rq pools
1361 */
49171e5c 1362 if (req->cmd_flags & REQ_ALLOCED) {
75eb6c37 1363 unsigned int flags = req->cmd_flags;
a051661c 1364 struct request_list *rl = blk_rq_rl(req);
1da177e4 1365
1da177e4 1366 BUG_ON(!list_empty(&req->queuelist));
360f92c2 1367 BUG_ON(ELV_ON_HASH(req));
1da177e4 1368
a051661c
TH
1369 blk_free_request(rl, req);
1370 freed_request(rl, flags);
1371 blk_put_rl(rl);
1da177e4
LT
1372 }
1373}
6e39b69e
MC
1374EXPORT_SYMBOL_GPL(__blk_put_request);
1375
1da177e4
LT
1376void blk_put_request(struct request *req)
1377{
165125e1 1378 struct request_queue *q = req->q;
8922e16c 1379
320ae51f
JA
1380 if (q->mq_ops)
1381 blk_mq_free_request(req);
1382 else {
1383 unsigned long flags;
1384
1385 spin_lock_irqsave(q->queue_lock, flags);
1386 __blk_put_request(q, req);
1387 spin_unlock_irqrestore(q->queue_lock, flags);
1388 }
1da177e4 1389}
1da177e4
LT
1390EXPORT_SYMBOL(blk_put_request);
1391
66ac0280
CH
1392/**
1393 * blk_add_request_payload - add a payload to a request
1394 * @rq: request to update
1395 * @page: page backing the payload
1396 * @len: length of the payload.
1397 *
1398 * This allows to later add a payload to an already submitted request by
1399 * a block driver. The driver needs to take care of freeing the payload
1400 * itself.
1401 *
1402 * Note that this is a quite horrible hack and nothing but handling of
1403 * discard requests should ever use it.
1404 */
1405void blk_add_request_payload(struct request *rq, struct page *page,
1406 unsigned int len)
1407{
1408 struct bio *bio = rq->bio;
1409
1410 bio->bi_io_vec->bv_page = page;
1411 bio->bi_io_vec->bv_offset = 0;
1412 bio->bi_io_vec->bv_len = len;
1413
4f024f37 1414 bio->bi_iter.bi_size = len;
66ac0280
CH
1415 bio->bi_vcnt = 1;
1416 bio->bi_phys_segments = 1;
1417
1418 rq->__data_len = rq->resid_len = len;
1419 rq->nr_phys_segments = 1;
66ac0280
CH
1420}
1421EXPORT_SYMBOL_GPL(blk_add_request_payload);
1422
320ae51f
JA
1423bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1424 struct bio *bio)
73c10101
JA
1425{
1426 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1427
73c10101
JA
1428 if (!ll_back_merge_fn(q, req, bio))
1429 return false;
1430
8c1cf6bb 1431 trace_block_bio_backmerge(q, req, bio);
73c10101
JA
1432
1433 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1434 blk_rq_set_mixed_merge(req);
1435
1436 req->biotail->bi_next = bio;
1437 req->biotail = bio;
4f024f37 1438 req->__data_len += bio->bi_iter.bi_size;
73c10101
JA
1439 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1440
320ae51f 1441 blk_account_io_start(req, false);
73c10101
JA
1442 return true;
1443}
1444
320ae51f
JA
1445bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1446 struct bio *bio)
73c10101
JA
1447{
1448 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
73c10101 1449
73c10101
JA
1450 if (!ll_front_merge_fn(q, req, bio))
1451 return false;
1452
8c1cf6bb 1453 trace_block_bio_frontmerge(q, req, bio);
73c10101
JA
1454
1455 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1456 blk_rq_set_mixed_merge(req);
1457
73c10101
JA
1458 bio->bi_next = req->bio;
1459 req->bio = bio;
1460
4f024f37
KO
1461 req->__sector = bio->bi_iter.bi_sector;
1462 req->__data_len += bio->bi_iter.bi_size;
73c10101
JA
1463 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1464
320ae51f 1465 blk_account_io_start(req, false);
73c10101
JA
1466 return true;
1467}
1468
bd87b589 1469/**
320ae51f 1470 * blk_attempt_plug_merge - try to merge with %current's plugged list
bd87b589
TH
1471 * @q: request_queue new bio is being queued at
1472 * @bio: new bio being queued
1473 * @request_count: out parameter for number of traversed plugged requests
1474 *
1475 * Determine whether @bio being queued on @q can be merged with a request
1476 * on %current's plugged list. Returns %true if merge was successful,
1477 * otherwise %false.
1478 *
07c2bd37
TH
1479 * Plugging coalesces IOs from the same issuer for the same purpose without
1480 * going through @q->queue_lock. As such it's more of an issuing mechanism
1481 * than scheduling, and the request, while may have elvpriv data, is not
1482 * added on the elevator at this point. In addition, we don't have
1483 * reliable access to the elevator outside queue lock. Only check basic
1484 * merging parameters without querying the elevator.
da41a589
RE
1485 *
1486 * Caller must ensure !blk_queue_nomerges(q) beforehand.
73c10101 1487 */
320ae51f
JA
1488bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1489 unsigned int *request_count)
73c10101
JA
1490{
1491 struct blk_plug *plug;
1492 struct request *rq;
1493 bool ret = false;
92f399c7 1494 struct list_head *plug_list;
73c10101 1495
bd87b589 1496 plug = current->plug;
73c10101
JA
1497 if (!plug)
1498 goto out;
56ebdaf2 1499 *request_count = 0;
73c10101 1500
92f399c7
SL
1501 if (q->mq_ops)
1502 plug_list = &plug->mq_list;
1503 else
1504 plug_list = &plug->list;
1505
1506 list_for_each_entry_reverse(rq, plug_list, queuelist) {
73c10101
JA
1507 int el_ret;
1508
1b2e19f1
SL
1509 if (rq->q == q)
1510 (*request_count)++;
56ebdaf2 1511
07c2bd37 1512 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
73c10101
JA
1513 continue;
1514
050c8ea8 1515 el_ret = blk_try_merge(rq, bio);
73c10101
JA
1516 if (el_ret == ELEVATOR_BACK_MERGE) {
1517 ret = bio_attempt_back_merge(q, rq, bio);
1518 if (ret)
1519 break;
1520 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1521 ret = bio_attempt_front_merge(q, rq, bio);
1522 if (ret)
1523 break;
1524 }
1525 }
1526out:
1527 return ret;
1528}
1529
86db1e29 1530void init_request_from_bio(struct request *req, struct bio *bio)
52d9e675 1531{
4aff5e23 1532 req->cmd_type = REQ_TYPE_FS;
52d9e675 1533
7b6d91da
CH
1534 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1535 if (bio->bi_rw & REQ_RAHEAD)
a82afdfc 1536 req->cmd_flags |= REQ_FAILFAST_MASK;
b31dc66a 1537
52d9e675 1538 req->errors = 0;
4f024f37 1539 req->__sector = bio->bi_iter.bi_sector;
52d9e675 1540 req->ioprio = bio_prio(bio);
bc1c56fd 1541 blk_rq_bio_prep(req->q, req, bio);
52d9e675
TH
1542}
1543
5a7bbad2 1544void blk_queue_bio(struct request_queue *q, struct bio *bio)
1da177e4 1545{
5e00d1b5 1546 const bool sync = !!(bio->bi_rw & REQ_SYNC);
73c10101
JA
1547 struct blk_plug *plug;
1548 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1549 struct request *req;
56ebdaf2 1550 unsigned int request_count = 0;
1da177e4 1551
1da177e4
LT
1552 /*
1553 * low level driver can indicate that it wants pages above a
1554 * certain limit bounced to low memory (ie for highmem, or even
1555 * ISA dma in theory)
1556 */
1557 blk_queue_bounce(q, &bio);
1558
ffecfd1a
DW
1559 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1560 bio_endio(bio, -EIO);
1561 return;
1562 }
1563
4fed947c 1564 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
73c10101 1565 spin_lock_irq(q->queue_lock);
ae1b1539 1566 where = ELEVATOR_INSERT_FLUSH;
28e7d184
TH
1567 goto get_rq;
1568 }
1569
73c10101
JA
1570 /*
1571 * Check if we can merge with the plugged list before grabbing
1572 * any locks.
1573 */
da41a589
RE
1574 if (!blk_queue_nomerges(q) &&
1575 blk_attempt_plug_merge(q, bio, &request_count))
5a7bbad2 1576 return;
1da177e4 1577
73c10101 1578 spin_lock_irq(q->queue_lock);
2056a782 1579
73c10101
JA
1580 el_ret = elv_merge(q, &req, bio);
1581 if (el_ret == ELEVATOR_BACK_MERGE) {
73c10101 1582 if (bio_attempt_back_merge(q, req, bio)) {
07c2bd37 1583 elv_bio_merged(q, req, bio);
73c10101
JA
1584 if (!attempt_back_merge(q, req))
1585 elv_merged_request(q, req, el_ret);
1586 goto out_unlock;
1587 }
1588 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
73c10101 1589 if (bio_attempt_front_merge(q, req, bio)) {
07c2bd37 1590 elv_bio_merged(q, req, bio);
73c10101
JA
1591 if (!attempt_front_merge(q, req))
1592 elv_merged_request(q, req, el_ret);
1593 goto out_unlock;
80a761fd 1594 }
1da177e4
LT
1595 }
1596
450991bc 1597get_rq:
7749a8d4
JA
1598 /*
1599 * This sync check and mask will be re-done in init_request_from_bio(),
1600 * but we need to set it earlier to expose the sync flag to the
1601 * rq allocator and io schedulers.
1602 */
1603 rw_flags = bio_data_dir(bio);
1604 if (sync)
7b6d91da 1605 rw_flags |= REQ_SYNC;
7749a8d4 1606
1da177e4 1607 /*
450991bc 1608 * Grab a free request. This is might sleep but can not fail.
d6344532 1609 * Returns with the queue unlocked.
450991bc 1610 */
a06e05e6 1611 req = get_request(q, rw_flags, bio, GFP_NOIO);
a492f075
JL
1612 if (IS_ERR(req)) {
1613 bio_endio(bio, PTR_ERR(req)); /* @q is dead */
da8303c6
TH
1614 goto out_unlock;
1615 }
d6344532 1616
450991bc
NP
1617 /*
1618 * After dropping the lock and possibly sleeping here, our request
1619 * may now be mergeable after it had proven unmergeable (above).
1620 * We don't worry about that case for efficiency. It won't happen
1621 * often, and the elevators are able to handle it.
1da177e4 1622 */
52d9e675 1623 init_request_from_bio(req, bio);
1da177e4 1624
9562ad9a 1625 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
11ccf116 1626 req->cpu = raw_smp_processor_id();
73c10101
JA
1627
1628 plug = current->plug;
721a9602 1629 if (plug) {
dc6d36c9
JA
1630 /*
1631 * If this is the first request added after a plug, fire
7aef2e78 1632 * of a plug trace.
dc6d36c9 1633 */
7aef2e78 1634 if (!request_count)
dc6d36c9 1635 trace_block_plug(q);
3540d5e8 1636 else {
019ceb7d 1637 if (request_count >= BLK_MAX_REQUEST_COUNT) {
3540d5e8 1638 blk_flush_plug_list(plug, false);
019ceb7d
SL
1639 trace_block_plug(q);
1640 }
73c10101 1641 }
73c10101 1642 list_add_tail(&req->queuelist, &plug->list);
320ae51f 1643 blk_account_io_start(req, true);
73c10101
JA
1644 } else {
1645 spin_lock_irq(q->queue_lock);
1646 add_acct_request(q, req, where);
24ecfbe2 1647 __blk_run_queue(q);
73c10101
JA
1648out_unlock:
1649 spin_unlock_irq(q->queue_lock);
1650 }
1da177e4 1651}
c20e8de2 1652EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
1da177e4
LT
1653
1654/*
1655 * If bio->bi_dev is a partition, remap the location
1656 */
1657static inline void blk_partition_remap(struct bio *bio)
1658{
1659 struct block_device *bdev = bio->bi_bdev;
1660
bf2de6f5 1661 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1da177e4
LT
1662 struct hd_struct *p = bdev->bd_part;
1663
4f024f37 1664 bio->bi_iter.bi_sector += p->start_sect;
1da177e4 1665 bio->bi_bdev = bdev->bd_contains;
c7149d6b 1666
d07335e5
MS
1667 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1668 bdev->bd_dev,
4f024f37 1669 bio->bi_iter.bi_sector - p->start_sect);
1da177e4
LT
1670 }
1671}
1672
1da177e4
LT
1673static void handle_bad_sector(struct bio *bio)
1674{
1675 char b[BDEVNAME_SIZE];
1676
1677 printk(KERN_INFO "attempt to access beyond end of device\n");
1678 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1679 bdevname(bio->bi_bdev, b),
1680 bio->bi_rw,
f73a1c7d 1681 (unsigned long long)bio_end_sector(bio),
77304d2a 1682 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1da177e4
LT
1683
1684 set_bit(BIO_EOF, &bio->bi_flags);
1685}
1686
c17bb495
AM
1687#ifdef CONFIG_FAIL_MAKE_REQUEST
1688
1689static DECLARE_FAULT_ATTR(fail_make_request);
1690
1691static int __init setup_fail_make_request(char *str)
1692{
1693 return setup_fault_attr(&fail_make_request, str);
1694}
1695__setup("fail_make_request=", setup_fail_make_request);
1696
b2c9cd37 1697static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
c17bb495 1698{
b2c9cd37 1699 return part->make_it_fail && should_fail(&fail_make_request, bytes);
c17bb495
AM
1700}
1701
1702static int __init fail_make_request_debugfs(void)
1703{
dd48c085
AM
1704 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1705 NULL, &fail_make_request);
1706
21f9fcd8 1707 return PTR_ERR_OR_ZERO(dir);
c17bb495
AM
1708}
1709
1710late_initcall(fail_make_request_debugfs);
1711
1712#else /* CONFIG_FAIL_MAKE_REQUEST */
1713
b2c9cd37
AM
1714static inline bool should_fail_request(struct hd_struct *part,
1715 unsigned int bytes)
c17bb495 1716{
b2c9cd37 1717 return false;
c17bb495
AM
1718}
1719
1720#endif /* CONFIG_FAIL_MAKE_REQUEST */
1721
c07e2b41
JA
1722/*
1723 * Check whether this bio extends beyond the end of the device.
1724 */
1725static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1726{
1727 sector_t maxsector;
1728
1729 if (!nr_sectors)
1730 return 0;
1731
1732 /* Test device or partition size, when known. */
77304d2a 1733 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
c07e2b41 1734 if (maxsector) {
4f024f37 1735 sector_t sector = bio->bi_iter.bi_sector;
c07e2b41
JA
1736
1737 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1738 /*
1739 * This may well happen - the kernel calls bread()
1740 * without checking the size of the device, e.g., when
1741 * mounting a device.
1742 */
1743 handle_bad_sector(bio);
1744 return 1;
1745 }
1746 }
1747
1748 return 0;
1749}
1750
27a84d54
CH
1751static noinline_for_stack bool
1752generic_make_request_checks(struct bio *bio)
1da177e4 1753{
165125e1 1754 struct request_queue *q;
5a7bbad2 1755 int nr_sectors = bio_sectors(bio);
51fd77bd 1756 int err = -EIO;
5a7bbad2
CH
1757 char b[BDEVNAME_SIZE];
1758 struct hd_struct *part;
1da177e4
LT
1759
1760 might_sleep();
1da177e4 1761
c07e2b41
JA
1762 if (bio_check_eod(bio, nr_sectors))
1763 goto end_io;
1da177e4 1764
5a7bbad2
CH
1765 q = bdev_get_queue(bio->bi_bdev);
1766 if (unlikely(!q)) {
1767 printk(KERN_ERR
1768 "generic_make_request: Trying to access "
1769 "nonexistent block-device %s (%Lu)\n",
1770 bdevname(bio->bi_bdev, b),
4f024f37 1771 (long long) bio->bi_iter.bi_sector);
5a7bbad2
CH
1772 goto end_io;
1773 }
c17bb495 1774
e2a60da7
MP
1775 if (likely(bio_is_rw(bio) &&
1776 nr_sectors > queue_max_hw_sectors(q))) {
5a7bbad2
CH
1777 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1778 bdevname(bio->bi_bdev, b),
1779 bio_sectors(bio),
1780 queue_max_hw_sectors(q));
1781 goto end_io;
1782 }
1da177e4 1783
5a7bbad2 1784 part = bio->bi_bdev->bd_part;
4f024f37 1785 if (should_fail_request(part, bio->bi_iter.bi_size) ||
5a7bbad2 1786 should_fail_request(&part_to_disk(part)->part0,
4f024f37 1787 bio->bi_iter.bi_size))
5a7bbad2 1788 goto end_io;
2056a782 1789
5a7bbad2
CH
1790 /*
1791 * If this device has partitions, remap block n
1792 * of partition p to block n+start(p) of the disk.
1793 */
1794 blk_partition_remap(bio);
2056a782 1795
5a7bbad2
CH
1796 if (bio_check_eod(bio, nr_sectors))
1797 goto end_io;
1e87901e 1798
5a7bbad2
CH
1799 /*
1800 * Filter flush bio's early so that make_request based
1801 * drivers without flush support don't have to worry
1802 * about them.
1803 */
1804 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1805 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1806 if (!nr_sectors) {
1807 err = 0;
51fd77bd
JA
1808 goto end_io;
1809 }
5a7bbad2 1810 }
5ddfe969 1811
5a7bbad2
CH
1812 if ((bio->bi_rw & REQ_DISCARD) &&
1813 (!blk_queue_discard(q) ||
e2a60da7 1814 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
5a7bbad2
CH
1815 err = -EOPNOTSUPP;
1816 goto end_io;
1817 }
01edede4 1818
4363ac7c 1819 if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
5a7bbad2
CH
1820 err = -EOPNOTSUPP;
1821 goto end_io;
1822 }
01edede4 1823
7f4b35d1
TH
1824 /*
1825 * Various block parts want %current->io_context and lazy ioc
1826 * allocation ends up trading a lot of pain for a small amount of
1827 * memory. Just allocate it upfront. This may fail and block
1828 * layer knows how to live with it.
1829 */
1830 create_io_context(GFP_ATOMIC, q->node);
1831
bc16a4f9
TH
1832 if (blk_throtl_bio(q, bio))
1833 return false; /* throttled, will be resubmitted later */
27a84d54 1834
5a7bbad2 1835 trace_block_bio_queue(q, bio);
27a84d54 1836 return true;
a7384677
TH
1837
1838end_io:
1839 bio_endio(bio, err);
27a84d54 1840 return false;
1da177e4
LT
1841}
1842
27a84d54
CH
1843/**
1844 * generic_make_request - hand a buffer to its device driver for I/O
1845 * @bio: The bio describing the location in memory and on the device.
1846 *
1847 * generic_make_request() is used to make I/O requests of block
1848 * devices. It is passed a &struct bio, which describes the I/O that needs
1849 * to be done.
1850 *
1851 * generic_make_request() does not return any status. The
1852 * success/failure status of the request, along with notification of
1853 * completion, is delivered asynchronously through the bio->bi_end_io
1854 * function described (one day) else where.
1855 *
1856 * The caller of generic_make_request must make sure that bi_io_vec
1857 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1858 * set to describe the device address, and the
1859 * bi_end_io and optionally bi_private are set to describe how
1860 * completion notification should be signaled.
1861 *
1862 * generic_make_request and the drivers it calls may use bi_next if this
1863 * bio happens to be merged with someone else, and may resubmit the bio to
1864 * a lower device by calling into generic_make_request recursively, which
1865 * means the bio should NOT be touched after the call to ->make_request_fn.
d89d8796
NB
1866 */
1867void generic_make_request(struct bio *bio)
1868{
bddd87c7
AM
1869 struct bio_list bio_list_on_stack;
1870
27a84d54
CH
1871 if (!generic_make_request_checks(bio))
1872 return;
1873
1874 /*
1875 * We only want one ->make_request_fn to be active at a time, else
1876 * stack usage with stacked devices could be a problem. So use
1877 * current->bio_list to keep a list of requests submited by a
1878 * make_request_fn function. current->bio_list is also used as a
1879 * flag to say if generic_make_request is currently active in this
1880 * task or not. If it is NULL, then no make_request is active. If
1881 * it is non-NULL, then a make_request is active, and new requests
1882 * should be added at the tail
1883 */
bddd87c7 1884 if (current->bio_list) {
bddd87c7 1885 bio_list_add(current->bio_list, bio);
d89d8796
NB
1886 return;
1887 }
27a84d54 1888
d89d8796
NB
1889 /* following loop may be a bit non-obvious, and so deserves some
1890 * explanation.
1891 * Before entering the loop, bio->bi_next is NULL (as all callers
1892 * ensure that) so we have a list with a single bio.
1893 * We pretend that we have just taken it off a longer list, so
bddd87c7
AM
1894 * we assign bio_list to a pointer to the bio_list_on_stack,
1895 * thus initialising the bio_list of new bios to be
27a84d54 1896 * added. ->make_request() may indeed add some more bios
d89d8796
NB
1897 * through a recursive call to generic_make_request. If it
1898 * did, we find a non-NULL value in bio_list and re-enter the loop
1899 * from the top. In this case we really did just take the bio
bddd87c7 1900 * of the top of the list (no pretending) and so remove it from
27a84d54 1901 * bio_list, and call into ->make_request() again.
d89d8796
NB
1902 */
1903 BUG_ON(bio->bi_next);
bddd87c7
AM
1904 bio_list_init(&bio_list_on_stack);
1905 current->bio_list = &bio_list_on_stack;
d89d8796 1906 do {
27a84d54
CH
1907 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1908
1909 q->make_request_fn(q, bio);
1910
bddd87c7 1911 bio = bio_list_pop(current->bio_list);
d89d8796 1912 } while (bio);
bddd87c7 1913 current->bio_list = NULL; /* deactivate */
d89d8796 1914}
1da177e4
LT
1915EXPORT_SYMBOL(generic_make_request);
1916
1917/**
710027a4 1918 * submit_bio - submit a bio to the block device layer for I/O
1da177e4
LT
1919 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1920 * @bio: The &struct bio which describes the I/O
1921 *
1922 * submit_bio() is very similar in purpose to generic_make_request(), and
1923 * uses that function to do most of the work. Both are fairly rough
710027a4 1924 * interfaces; @bio must be presetup and ready for I/O.
1da177e4
LT
1925 *
1926 */
1927void submit_bio(int rw, struct bio *bio)
1928{
22e2c507 1929 bio->bi_rw |= rw;
1da177e4 1930
bf2de6f5
JA
1931 /*
1932 * If it's a regular read/write or a barrier with data attached,
1933 * go through the normal accounting stuff before submission.
1934 */
e2a60da7 1935 if (bio_has_data(bio)) {
4363ac7c
MP
1936 unsigned int count;
1937
1938 if (unlikely(rw & REQ_WRITE_SAME))
1939 count = bdev_logical_block_size(bio->bi_bdev) >> 9;
1940 else
1941 count = bio_sectors(bio);
1942
bf2de6f5
JA
1943 if (rw & WRITE) {
1944 count_vm_events(PGPGOUT, count);
1945 } else {
4f024f37 1946 task_io_account_read(bio->bi_iter.bi_size);
bf2de6f5
JA
1947 count_vm_events(PGPGIN, count);
1948 }
1949
1950 if (unlikely(block_dump)) {
1951 char b[BDEVNAME_SIZE];
8dcbdc74 1952 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
ba25f9dc 1953 current->comm, task_pid_nr(current),
bf2de6f5 1954 (rw & WRITE) ? "WRITE" : "READ",
4f024f37 1955 (unsigned long long)bio->bi_iter.bi_sector,
8dcbdc74
SM
1956 bdevname(bio->bi_bdev, b),
1957 count);
bf2de6f5 1958 }
1da177e4
LT
1959 }
1960
1961 generic_make_request(bio);
1962}
1da177e4
LT
1963EXPORT_SYMBOL(submit_bio);
1964
82124d60
KU
1965/**
1966 * blk_rq_check_limits - Helper function to check a request for the queue limit
1967 * @q: the queue
1968 * @rq: the request being checked
1969 *
1970 * Description:
1971 * @rq may have been made based on weaker limitations of upper-level queues
1972 * in request stacking drivers, and it may violate the limitation of @q.
1973 * Since the block layer and the underlying device driver trust @rq
1974 * after it is inserted to @q, it should be checked against @q before
1975 * the insertion using this generic function.
1976 *
1977 * This function should also be useful for request stacking drivers
eef35c2d 1978 * in some cases below, so export this function.
82124d60
KU
1979 * Request stacking drivers like request-based dm may change the queue
1980 * limits while requests are in the queue (e.g. dm's table swapping).
e227867f 1981 * Such request stacking drivers should check those requests against
82124d60
KU
1982 * the new queue limits again when they dispatch those requests,
1983 * although such checkings are also done against the old queue limits
1984 * when submitting requests.
1985 */
1986int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1987{
e2a60da7 1988 if (!rq_mergeable(rq))
3383977f
S
1989 return 0;
1990
f31dc1cd 1991 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
82124d60
KU
1992 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1993 return -EIO;
1994 }
1995
1996 /*
1997 * queue's settings related to segment counting like q->bounce_pfn
1998 * may differ from that of other stacking queues.
1999 * Recalculate it to check the request correctly on this queue's
2000 * limitation.
2001 */
2002 blk_recalc_rq_segments(rq);
8a78362c 2003 if (rq->nr_phys_segments > queue_max_segments(q)) {
82124d60
KU
2004 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
2005 return -EIO;
2006 }
2007
2008 return 0;
2009}
2010EXPORT_SYMBOL_GPL(blk_rq_check_limits);
2011
2012/**
2013 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2014 * @q: the queue to submit the request
2015 * @rq: the request being queued
2016 */
2017int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2018{
2019 unsigned long flags;
4853abaa 2020 int where = ELEVATOR_INSERT_BACK;
82124d60
KU
2021
2022 if (blk_rq_check_limits(q, rq))
2023 return -EIO;
2024
b2c9cd37
AM
2025 if (rq->rq_disk &&
2026 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
82124d60 2027 return -EIO;
82124d60
KU
2028
2029 spin_lock_irqsave(q->queue_lock, flags);
3f3299d5 2030 if (unlikely(blk_queue_dying(q))) {
8ba61435
TH
2031 spin_unlock_irqrestore(q->queue_lock, flags);
2032 return -ENODEV;
2033 }
82124d60
KU
2034
2035 /*
2036 * Submitting request must be dequeued before calling this function
2037 * because it will be linked to another request_queue
2038 */
2039 BUG_ON(blk_queued_rq(rq));
2040
4853abaa
JM
2041 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
2042 where = ELEVATOR_INSERT_FLUSH;
2043
2044 add_acct_request(q, rq, where);
e67b77c7
JM
2045 if (where == ELEVATOR_INSERT_FLUSH)
2046 __blk_run_queue(q);
82124d60
KU
2047 spin_unlock_irqrestore(q->queue_lock, flags);
2048
2049 return 0;
2050}
2051EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2052
80a761fd
TH
2053/**
2054 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
2055 * @rq: request to examine
2056 *
2057 * Description:
2058 * A request could be merge of IOs which require different failure
2059 * handling. This function determines the number of bytes which
2060 * can be failed from the beginning of the request without
2061 * crossing into area which need to be retried further.
2062 *
2063 * Return:
2064 * The number of bytes to fail.
2065 *
2066 * Context:
2067 * queue_lock must be held.
2068 */
2069unsigned int blk_rq_err_bytes(const struct request *rq)
2070{
2071 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
2072 unsigned int bytes = 0;
2073 struct bio *bio;
2074
2075 if (!(rq->cmd_flags & REQ_MIXED_MERGE))
2076 return blk_rq_bytes(rq);
2077
2078 /*
2079 * Currently the only 'mixing' which can happen is between
2080 * different fastfail types. We can safely fail portions
2081 * which have all the failfast bits that the first one has -
2082 * the ones which are at least as eager to fail as the first
2083 * one.
2084 */
2085 for (bio = rq->bio; bio; bio = bio->bi_next) {
2086 if ((bio->bi_rw & ff) != ff)
2087 break;
4f024f37 2088 bytes += bio->bi_iter.bi_size;
80a761fd
TH
2089 }
2090
2091 /* this could lead to infinite loop */
2092 BUG_ON(blk_rq_bytes(rq) && !bytes);
2093 return bytes;
2094}
2095EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2096
320ae51f 2097void blk_account_io_completion(struct request *req, unsigned int bytes)
bc58ba94 2098{
c2553b58 2099 if (blk_do_io_stat(req)) {
bc58ba94
JA
2100 const int rw = rq_data_dir(req);
2101 struct hd_struct *part;
2102 int cpu;
2103
2104 cpu = part_stat_lock();
09e099d4 2105 part = req->part;
bc58ba94
JA
2106 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2107 part_stat_unlock();
2108 }
2109}
2110
320ae51f 2111void blk_account_io_done(struct request *req)
bc58ba94 2112{
bc58ba94 2113 /*
dd4c133f
TH
2114 * Account IO completion. flush_rq isn't accounted as a
2115 * normal IO on queueing nor completion. Accounting the
2116 * containing request is enough.
bc58ba94 2117 */
414b4ff5 2118 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
bc58ba94
JA
2119 unsigned long duration = jiffies - req->start_time;
2120 const int rw = rq_data_dir(req);
2121 struct hd_struct *part;
2122 int cpu;
2123
2124 cpu = part_stat_lock();
09e099d4 2125 part = req->part;
bc58ba94
JA
2126
2127 part_stat_inc(cpu, part, ios[rw]);
2128 part_stat_add(cpu, part, ticks[rw], duration);
2129 part_round_stats(cpu, part);
316d315b 2130 part_dec_in_flight(part, rw);
bc58ba94 2131
6c23a968 2132 hd_struct_put(part);
bc58ba94
JA
2133 part_stat_unlock();
2134 }
2135}
2136
c8158819
LM
2137#ifdef CONFIG_PM_RUNTIME
2138/*
2139 * Don't process normal requests when queue is suspended
2140 * or in the process of suspending/resuming
2141 */
2142static struct request *blk_pm_peek_request(struct request_queue *q,
2143 struct request *rq)
2144{
2145 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2146 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
2147 return NULL;
2148 else
2149 return rq;
2150}
2151#else
2152static inline struct request *blk_pm_peek_request(struct request_queue *q,
2153 struct request *rq)
2154{
2155 return rq;
2156}
2157#endif
2158
320ae51f
JA
2159void blk_account_io_start(struct request *rq, bool new_io)
2160{
2161 struct hd_struct *part;
2162 int rw = rq_data_dir(rq);
2163 int cpu;
2164
2165 if (!blk_do_io_stat(rq))
2166 return;
2167
2168 cpu = part_stat_lock();
2169
2170 if (!new_io) {
2171 part = rq->part;
2172 part_stat_inc(cpu, part, merges[rw]);
2173 } else {
2174 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2175 if (!hd_struct_try_get(part)) {
2176 /*
2177 * The partition is already being removed,
2178 * the request will be accounted on the disk only
2179 *
2180 * We take a reference on disk->part0 although that
2181 * partition will never be deleted, so we can treat
2182 * it as any other partition.
2183 */
2184 part = &rq->rq_disk->part0;
2185 hd_struct_get(part);
2186 }
2187 part_round_stats(cpu, part);
2188 part_inc_in_flight(part, rw);
2189 rq->part = part;
2190 }
2191
2192 part_stat_unlock();
2193}
2194
3bcddeac 2195/**
9934c8c0
TH
2196 * blk_peek_request - peek at the top of a request queue
2197 * @q: request queue to peek at
2198 *
2199 * Description:
2200 * Return the request at the top of @q. The returned request
2201 * should be started using blk_start_request() before LLD starts
2202 * processing it.
2203 *
2204 * Return:
2205 * Pointer to the request at the top of @q if available. Null
2206 * otherwise.
2207 *
2208 * Context:
2209 * queue_lock must be held.
2210 */
2211struct request *blk_peek_request(struct request_queue *q)
158dbda0
TH
2212{
2213 struct request *rq;
2214 int ret;
2215
2216 while ((rq = __elv_next_request(q)) != NULL) {
c8158819
LM
2217
2218 rq = blk_pm_peek_request(q, rq);
2219 if (!rq)
2220 break;
2221
158dbda0
TH
2222 if (!(rq->cmd_flags & REQ_STARTED)) {
2223 /*
2224 * This is the first time the device driver
2225 * sees this request (possibly after
2226 * requeueing). Notify IO scheduler.
2227 */
33659ebb 2228 if (rq->cmd_flags & REQ_SORTED)
158dbda0
TH
2229 elv_activate_rq(q, rq);
2230
2231 /*
2232 * just mark as started even if we don't start
2233 * it, a request that has been delayed should
2234 * not be passed by new incoming requests
2235 */
2236 rq->cmd_flags |= REQ_STARTED;
2237 trace_block_rq_issue(q, rq);
2238 }
2239
2240 if (!q->boundary_rq || q->boundary_rq == rq) {
2241 q->end_sector = rq_end_sector(rq);
2242 q->boundary_rq = NULL;
2243 }
2244
2245 if (rq->cmd_flags & REQ_DONTPREP)
2246 break;
2247
2e46e8b2 2248 if (q->dma_drain_size && blk_rq_bytes(rq)) {
158dbda0
TH
2249 /*
2250 * make sure space for the drain appears we
2251 * know we can do this because max_hw_segments
2252 * has been adjusted to be one fewer than the
2253 * device can handle
2254 */
2255 rq->nr_phys_segments++;
2256 }
2257
2258 if (!q->prep_rq_fn)
2259 break;
2260
2261 ret = q->prep_rq_fn(q, rq);
2262 if (ret == BLKPREP_OK) {
2263 break;
2264 } else if (ret == BLKPREP_DEFER) {
2265 /*
2266 * the request may have been (partially) prepped.
2267 * we need to keep this request in the front to
2268 * avoid resource deadlock. REQ_STARTED will
2269 * prevent other fs requests from passing this one.
2270 */
2e46e8b2 2271 if (q->dma_drain_size && blk_rq_bytes(rq) &&
158dbda0
TH
2272 !(rq->cmd_flags & REQ_DONTPREP)) {
2273 /*
2274 * remove the space for the drain we added
2275 * so that we don't add it again
2276 */
2277 --rq->nr_phys_segments;
2278 }
2279
2280 rq = NULL;
2281 break;
2282 } else if (ret == BLKPREP_KILL) {
2283 rq->cmd_flags |= REQ_QUIET;
c143dc90
JB
2284 /*
2285 * Mark this request as started so we don't trigger
2286 * any debug logic in the end I/O path.
2287 */
2288 blk_start_request(rq);
40cbbb78 2289 __blk_end_request_all(rq, -EIO);
158dbda0
TH
2290 } else {
2291 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2292 break;
2293 }
2294 }
2295
2296 return rq;
2297}
9934c8c0 2298EXPORT_SYMBOL(blk_peek_request);
158dbda0 2299
9934c8c0 2300void blk_dequeue_request(struct request *rq)
158dbda0 2301{
9934c8c0
TH
2302 struct request_queue *q = rq->q;
2303
158dbda0
TH
2304 BUG_ON(list_empty(&rq->queuelist));
2305 BUG_ON(ELV_ON_HASH(rq));
2306
2307 list_del_init(&rq->queuelist);
2308
2309 /*
2310 * the time frame between a request being removed from the lists
2311 * and to it is freed is accounted as io that is in progress at
2312 * the driver side.
2313 */
9195291e 2314 if (blk_account_rq(rq)) {
0a7ae2ff 2315 q->in_flight[rq_is_sync(rq)]++;
9195291e
DS
2316 set_io_start_time_ns(rq);
2317 }
158dbda0
TH
2318}
2319
9934c8c0
TH
2320/**
2321 * blk_start_request - start request processing on the driver
2322 * @req: request to dequeue
2323 *
2324 * Description:
2325 * Dequeue @req and start timeout timer on it. This hands off the
2326 * request to the driver.
2327 *
2328 * Block internal functions which don't want to start timer should
2329 * call blk_dequeue_request().
2330 *
2331 * Context:
2332 * queue_lock must be held.
2333 */
2334void blk_start_request(struct request *req)
2335{
2336 blk_dequeue_request(req);
2337
2338 /*
5f49f631
TH
2339 * We are now handing the request to the hardware, initialize
2340 * resid_len to full count and add the timeout handler.
9934c8c0 2341 */
5f49f631 2342 req->resid_len = blk_rq_bytes(req);
dbb66c4b
FT
2343 if (unlikely(blk_bidi_rq(req)))
2344 req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
2345
4912aa6c 2346 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
9934c8c0
TH
2347 blk_add_timer(req);
2348}
2349EXPORT_SYMBOL(blk_start_request);
2350
2351/**
2352 * blk_fetch_request - fetch a request from a request queue
2353 * @q: request queue to fetch a request from
2354 *
2355 * Description:
2356 * Return the request at the top of @q. The request is started on
2357 * return and LLD can start processing it immediately.
2358 *
2359 * Return:
2360 * Pointer to the request at the top of @q if available. Null
2361 * otherwise.
2362 *
2363 * Context:
2364 * queue_lock must be held.
2365 */
2366struct request *blk_fetch_request(struct request_queue *q)
2367{
2368 struct request *rq;
2369
2370 rq = blk_peek_request(q);
2371 if (rq)
2372 blk_start_request(rq);
2373 return rq;
2374}
2375EXPORT_SYMBOL(blk_fetch_request);
2376
3bcddeac 2377/**
2e60e022 2378 * blk_update_request - Special helper function for request stacking drivers
8ebf9756 2379 * @req: the request being processed
710027a4 2380 * @error: %0 for success, < %0 for error
8ebf9756 2381 * @nr_bytes: number of bytes to complete @req
3bcddeac
KU
2382 *
2383 * Description:
8ebf9756
RD
2384 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2385 * the request structure even if @req doesn't have leftover.
2386 * If @req has leftover, sets it up for the next range of segments.
2e60e022
TH
2387 *
2388 * This special helper function is only for request stacking drivers
2389 * (e.g. request-based dm) so that they can handle partial completion.
2390 * Actual device drivers should use blk_end_request instead.
2391 *
2392 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2393 * %false return from this function.
3bcddeac
KU
2394 *
2395 * Return:
2e60e022
TH
2396 * %false - this request doesn't have any more data
2397 * %true - this request has more data
3bcddeac 2398 **/
2e60e022 2399bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1da177e4 2400{
f79ea416 2401 int total_bytes;
1da177e4 2402
4a0efdc9
HR
2403 trace_block_rq_complete(req->q, req, nr_bytes);
2404
2e60e022
TH
2405 if (!req->bio)
2406 return false;
2407
1da177e4 2408 /*
6f41469c
TH
2409 * For fs requests, rq is just carrier of independent bio's
2410 * and each partial completion should be handled separately.
2411 * Reset per-request error on each partial completion.
2412 *
2413 * TODO: tj: This is too subtle. It would be better to let
2414 * low level drivers do what they see fit.
1da177e4 2415 */
33659ebb 2416 if (req->cmd_type == REQ_TYPE_FS)
1da177e4
LT
2417 req->errors = 0;
2418
33659ebb
CH
2419 if (error && req->cmd_type == REQ_TYPE_FS &&
2420 !(req->cmd_flags & REQ_QUIET)) {
79775567
HR
2421 char *error_type;
2422
2423 switch (error) {
2424 case -ENOLINK:
2425 error_type = "recoverable transport";
2426 break;
2427 case -EREMOTEIO:
2428 error_type = "critical target";
2429 break;
2430 case -EBADE:
2431 error_type = "critical nexus";
2432 break;
d1ffc1f8
HR
2433 case -ETIMEDOUT:
2434 error_type = "timeout";
2435 break;
a9d6ceb8
HR
2436 case -ENOSPC:
2437 error_type = "critical space allocation";
2438 break;
7e782af5
HR
2439 case -ENODATA:
2440 error_type = "critical medium";
2441 break;
79775567
HR
2442 case -EIO:
2443 default:
2444 error_type = "I/O";
2445 break;
2446 }
ef3ecb66
RE
2447 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
2448 __func__, error_type, req->rq_disk ?
37d7b34f
YZ
2449 req->rq_disk->disk_name : "?",
2450 (unsigned long long)blk_rq_pos(req));
2451
1da177e4
LT
2452 }
2453
bc58ba94 2454 blk_account_io_completion(req, nr_bytes);
d72d904a 2455
f79ea416
KO
2456 total_bytes = 0;
2457 while (req->bio) {
2458 struct bio *bio = req->bio;
4f024f37 2459 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1da177e4 2460
4f024f37 2461 if (bio_bytes == bio->bi_iter.bi_size)
1da177e4 2462 req->bio = bio->bi_next;
1da177e4 2463
f79ea416 2464 req_bio_endio(req, bio, bio_bytes, error);
1da177e4 2465
f79ea416
KO
2466 total_bytes += bio_bytes;
2467 nr_bytes -= bio_bytes;
1da177e4 2468
f79ea416
KO
2469 if (!nr_bytes)
2470 break;
1da177e4
LT
2471 }
2472
2473 /*
2474 * completely done
2475 */
2e60e022
TH
2476 if (!req->bio) {
2477 /*
2478 * Reset counters so that the request stacking driver
2479 * can find how many bytes remain in the request
2480 * later.
2481 */
a2dec7b3 2482 req->__data_len = 0;
2e60e022
TH
2483 return false;
2484 }
1da177e4 2485
a2dec7b3 2486 req->__data_len -= total_bytes;
2e46e8b2
TH
2487
2488 /* update sector only for requests with clear definition of sector */
e2a60da7 2489 if (req->cmd_type == REQ_TYPE_FS)
a2dec7b3 2490 req->__sector += total_bytes >> 9;
2e46e8b2 2491
80a761fd
TH
2492 /* mixed attributes always follow the first bio */
2493 if (req->cmd_flags & REQ_MIXED_MERGE) {
2494 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2495 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2496 }
2497
2e46e8b2
TH
2498 /*
2499 * If total number of sectors is less than the first segment
2500 * size, something has gone terribly wrong.
2501 */
2502 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
8182924b 2503 blk_dump_rq_flags(req, "request botched");
a2dec7b3 2504 req->__data_len = blk_rq_cur_bytes(req);
2e46e8b2
TH
2505 }
2506
2507 /* recalculate the number of segments */
1da177e4 2508 blk_recalc_rq_segments(req);
2e46e8b2 2509
2e60e022 2510 return true;
1da177e4 2511}
2e60e022 2512EXPORT_SYMBOL_GPL(blk_update_request);
1da177e4 2513
2e60e022
TH
2514static bool blk_update_bidi_request(struct request *rq, int error,
2515 unsigned int nr_bytes,
2516 unsigned int bidi_bytes)
5efccd17 2517{
2e60e022
TH
2518 if (blk_update_request(rq, error, nr_bytes))
2519 return true;
5efccd17 2520
2e60e022
TH
2521 /* Bidi request must be completed as a whole */
2522 if (unlikely(blk_bidi_rq(rq)) &&
2523 blk_update_request(rq->next_rq, error, bidi_bytes))
2524 return true;
5efccd17 2525
e2e1a148
JA
2526 if (blk_queue_add_random(rq->q))
2527 add_disk_randomness(rq->rq_disk);
2e60e022
TH
2528
2529 return false;
1da177e4
LT
2530}
2531
28018c24
JB
2532/**
2533 * blk_unprep_request - unprepare a request
2534 * @req: the request
2535 *
2536 * This function makes a request ready for complete resubmission (or
2537 * completion). It happens only after all error handling is complete,
2538 * so represents the appropriate moment to deallocate any resources
2539 * that were allocated to the request in the prep_rq_fn. The queue
2540 * lock is held when calling this.
2541 */
2542void blk_unprep_request(struct request *req)
2543{
2544 struct request_queue *q = req->q;
2545
2546 req->cmd_flags &= ~REQ_DONTPREP;
2547 if (q->unprep_rq_fn)
2548 q->unprep_rq_fn(q, req);
2549}
2550EXPORT_SYMBOL_GPL(blk_unprep_request);
2551
1da177e4
LT
2552/*
2553 * queue lock must be held
2554 */
12120077 2555void blk_finish_request(struct request *req, int error)
1da177e4 2556{
b8286239
KU
2557 if (blk_rq_tagged(req))
2558 blk_queue_end_tag(req->q, req);
2559
ba396a6c 2560 BUG_ON(blk_queued_rq(req));
1da177e4 2561
33659ebb 2562 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
31373d09 2563 laptop_io_completion(&req->q->backing_dev_info);
1da177e4 2564
e78042e5
MA
2565 blk_delete_timer(req);
2566
28018c24
JB
2567 if (req->cmd_flags & REQ_DONTPREP)
2568 blk_unprep_request(req);
2569
bc58ba94 2570 blk_account_io_done(req);
b8286239 2571
1da177e4 2572 if (req->end_io)
8ffdc655 2573 req->end_io(req, error);
b8286239
KU
2574 else {
2575 if (blk_bidi_rq(req))
2576 __blk_put_request(req->next_rq->q, req->next_rq);
2577
1da177e4 2578 __blk_put_request(req->q, req);
b8286239 2579 }
1da177e4 2580}
12120077 2581EXPORT_SYMBOL(blk_finish_request);
1da177e4 2582
3b11313a 2583/**
2e60e022
TH
2584 * blk_end_bidi_request - Complete a bidi request
2585 * @rq: the request to complete
2586 * @error: %0 for success, < %0 for error
2587 * @nr_bytes: number of bytes to complete @rq
2588 * @bidi_bytes: number of bytes to complete @rq->next_rq
a0cd1285
JA
2589 *
2590 * Description:
e3a04fe3 2591 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2e60e022
TH
2592 * Drivers that supports bidi can safely call this member for any
2593 * type of request, bidi or uni. In the later case @bidi_bytes is
2594 * just ignored.
336cdb40
KU
2595 *
2596 * Return:
2e60e022
TH
2597 * %false - we are done with this request
2598 * %true - still buffers pending for this request
a0cd1285 2599 **/
b1f74493 2600static bool blk_end_bidi_request(struct request *rq, int error,
32fab448
KU
2601 unsigned int nr_bytes, unsigned int bidi_bytes)
2602{
336cdb40 2603 struct request_queue *q = rq->q;
2e60e022 2604 unsigned long flags;
32fab448 2605
2e60e022
TH
2606 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2607 return true;
32fab448 2608
336cdb40 2609 spin_lock_irqsave(q->queue_lock, flags);
2e60e022 2610 blk_finish_request(rq, error);
336cdb40
KU
2611 spin_unlock_irqrestore(q->queue_lock, flags);
2612
2e60e022 2613 return false;
32fab448
KU
2614}
2615
336cdb40 2616/**
2e60e022
TH
2617 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2618 * @rq: the request to complete
710027a4 2619 * @error: %0 for success, < %0 for error
e3a04fe3
KU
2620 * @nr_bytes: number of bytes to complete @rq
2621 * @bidi_bytes: number of bytes to complete @rq->next_rq
336cdb40
KU
2622 *
2623 * Description:
2e60e022
TH
2624 * Identical to blk_end_bidi_request() except that queue lock is
2625 * assumed to be locked on entry and remains so on return.
336cdb40
KU
2626 *
2627 * Return:
2e60e022
TH
2628 * %false - we are done with this request
2629 * %true - still buffers pending for this request
336cdb40 2630 **/
4853abaa 2631bool __blk_end_bidi_request(struct request *rq, int error,
b1f74493 2632 unsigned int nr_bytes, unsigned int bidi_bytes)
336cdb40 2633{
2e60e022
TH
2634 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2635 return true;
336cdb40 2636
2e60e022 2637 blk_finish_request(rq, error);
336cdb40 2638
2e60e022 2639 return false;
336cdb40 2640}
e19a3ab0
KU
2641
2642/**
2643 * blk_end_request - Helper function for drivers to complete the request.
2644 * @rq: the request being processed
710027a4 2645 * @error: %0 for success, < %0 for error
e19a3ab0
KU
2646 * @nr_bytes: number of bytes to complete
2647 *
2648 * Description:
2649 * Ends I/O on a number of bytes attached to @rq.
2650 * If @rq has leftover, sets it up for the next range of segments.
2651 *
2652 * Return:
b1f74493
FT
2653 * %false - we are done with this request
2654 * %true - still buffers pending for this request
e19a3ab0 2655 **/
b1f74493 2656bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
e19a3ab0 2657{
b1f74493 2658 return blk_end_bidi_request(rq, error, nr_bytes, 0);
e19a3ab0 2659}
56ad1740 2660EXPORT_SYMBOL(blk_end_request);
336cdb40
KU
2661
2662/**
b1f74493
FT
2663 * blk_end_request_all - Helper function for drives to finish the request.
2664 * @rq: the request to finish
8ebf9756 2665 * @error: %0 for success, < %0 for error
336cdb40
KU
2666 *
2667 * Description:
b1f74493
FT
2668 * Completely finish @rq.
2669 */
2670void blk_end_request_all(struct request *rq, int error)
336cdb40 2671{
b1f74493
FT
2672 bool pending;
2673 unsigned int bidi_bytes = 0;
336cdb40 2674
b1f74493
FT
2675 if (unlikely(blk_bidi_rq(rq)))
2676 bidi_bytes = blk_rq_bytes(rq->next_rq);
336cdb40 2677
b1f74493
FT
2678 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2679 BUG_ON(pending);
2680}
56ad1740 2681EXPORT_SYMBOL(blk_end_request_all);
336cdb40 2682
b1f74493
FT
2683/**
2684 * blk_end_request_cur - Helper function to finish the current request chunk.
2685 * @rq: the request to finish the current chunk for
8ebf9756 2686 * @error: %0 for success, < %0 for error
b1f74493
FT
2687 *
2688 * Description:
2689 * Complete the current consecutively mapped chunk from @rq.
2690 *
2691 * Return:
2692 * %false - we are done with this request
2693 * %true - still buffers pending for this request
2694 */
2695bool blk_end_request_cur(struct request *rq, int error)
2696{
2697 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
336cdb40 2698}
56ad1740 2699EXPORT_SYMBOL(blk_end_request_cur);
336cdb40 2700
80a761fd
TH
2701/**
2702 * blk_end_request_err - Finish a request till the next failure boundary.
2703 * @rq: the request to finish till the next failure boundary for
2704 * @error: must be negative errno
2705 *
2706 * Description:
2707 * Complete @rq till the next failure boundary.
2708 *
2709 * Return:
2710 * %false - we are done with this request
2711 * %true - still buffers pending for this request
2712 */
2713bool blk_end_request_err(struct request *rq, int error)
2714{
2715 WARN_ON(error >= 0);
2716 return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2717}
2718EXPORT_SYMBOL_GPL(blk_end_request_err);
2719
e3a04fe3 2720/**
b1f74493
FT
2721 * __blk_end_request - Helper function for drivers to complete the request.
2722 * @rq: the request being processed
2723 * @error: %0 for success, < %0 for error
2724 * @nr_bytes: number of bytes to complete
e3a04fe3
KU
2725 *
2726 * Description:
b1f74493 2727 * Must be called with queue lock held unlike blk_end_request().
e3a04fe3
KU
2728 *
2729 * Return:
b1f74493
FT
2730 * %false - we are done with this request
2731 * %true - still buffers pending for this request
e3a04fe3 2732 **/
b1f74493 2733bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
e3a04fe3 2734{
b1f74493 2735 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
e3a04fe3 2736}
56ad1740 2737EXPORT_SYMBOL(__blk_end_request);
e3a04fe3 2738
32fab448 2739/**
b1f74493
FT
2740 * __blk_end_request_all - Helper function for drives to finish the request.
2741 * @rq: the request to finish
8ebf9756 2742 * @error: %0 for success, < %0 for error
32fab448
KU
2743 *
2744 * Description:
b1f74493 2745 * Completely finish @rq. Must be called with queue lock held.
32fab448 2746 */
b1f74493 2747void __blk_end_request_all(struct request *rq, int error)
32fab448 2748{
b1f74493
FT
2749 bool pending;
2750 unsigned int bidi_bytes = 0;
2751
2752 if (unlikely(blk_bidi_rq(rq)))
2753 bidi_bytes = blk_rq_bytes(rq->next_rq);
2754
2755 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2756 BUG_ON(pending);
32fab448 2757}
56ad1740 2758EXPORT_SYMBOL(__blk_end_request_all);
32fab448 2759
e19a3ab0 2760/**
b1f74493
FT
2761 * __blk_end_request_cur - Helper function to finish the current request chunk.
2762 * @rq: the request to finish the current chunk for
8ebf9756 2763 * @error: %0 for success, < %0 for error
e19a3ab0
KU
2764 *
2765 * Description:
b1f74493
FT
2766 * Complete the current consecutively mapped chunk from @rq. Must
2767 * be called with queue lock held.
e19a3ab0
KU
2768 *
2769 * Return:
b1f74493
FT
2770 * %false - we are done with this request
2771 * %true - still buffers pending for this request
2772 */
2773bool __blk_end_request_cur(struct request *rq, int error)
e19a3ab0 2774{
b1f74493 2775 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
e19a3ab0 2776}
56ad1740 2777EXPORT_SYMBOL(__blk_end_request_cur);
e19a3ab0 2778
80a761fd
TH
2779/**
2780 * __blk_end_request_err - Finish a request till the next failure boundary.
2781 * @rq: the request to finish till the next failure boundary for
2782 * @error: must be negative errno
2783 *
2784 * Description:
2785 * Complete @rq till the next failure boundary. Must be called
2786 * with queue lock held.
2787 *
2788 * Return:
2789 * %false - we are done with this request
2790 * %true - still buffers pending for this request
2791 */
2792bool __blk_end_request_err(struct request *rq, int error)
2793{
2794 WARN_ON(error >= 0);
2795 return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2796}
2797EXPORT_SYMBOL_GPL(__blk_end_request_err);
2798
86db1e29
JA
2799void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2800 struct bio *bio)
1da177e4 2801{
a82afdfc 2802 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
7b6d91da 2803 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
1da177e4 2804
b4f42e28 2805 if (bio_has_data(bio))
fb2dce86 2806 rq->nr_phys_segments = bio_phys_segments(q, bio);
b4f42e28 2807
4f024f37 2808 rq->__data_len = bio->bi_iter.bi_size;
1da177e4 2809 rq->bio = rq->biotail = bio;
1da177e4 2810
66846572
N
2811 if (bio->bi_bdev)
2812 rq->rq_disk = bio->bi_bdev->bd_disk;
2813}
1da177e4 2814
2d4dc890
IL
2815#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2816/**
2817 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2818 * @rq: the request to be flushed
2819 *
2820 * Description:
2821 * Flush all pages in @rq.
2822 */
2823void rq_flush_dcache_pages(struct request *rq)
2824{
2825 struct req_iterator iter;
7988613b 2826 struct bio_vec bvec;
2d4dc890
IL
2827
2828 rq_for_each_segment(bvec, rq, iter)
7988613b 2829 flush_dcache_page(bvec.bv_page);
2d4dc890
IL
2830}
2831EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2832#endif
2833
ef9e3fac
KU
2834/**
2835 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2836 * @q : the queue of the device being checked
2837 *
2838 * Description:
2839 * Check if underlying low-level drivers of a device are busy.
2840 * If the drivers want to export their busy state, they must set own
2841 * exporting function using blk_queue_lld_busy() first.
2842 *
2843 * Basically, this function is used only by request stacking drivers
2844 * to stop dispatching requests to underlying devices when underlying
2845 * devices are busy. This behavior helps more I/O merging on the queue
2846 * of the request stacking driver and prevents I/O throughput regression
2847 * on burst I/O load.
2848 *
2849 * Return:
2850 * 0 - Not busy (The request stacking driver should dispatch request)
2851 * 1 - Busy (The request stacking driver should stop dispatching request)
2852 */
2853int blk_lld_busy(struct request_queue *q)
2854{
2855 if (q->lld_busy_fn)
2856 return q->lld_busy_fn(q);
2857
2858 return 0;
2859}
2860EXPORT_SYMBOL_GPL(blk_lld_busy);
2861
b0fd271d
KU
2862/**
2863 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2864 * @rq: the clone request to be cleaned up
2865 *
2866 * Description:
2867 * Free all bios in @rq for a cloned request.
2868 */
2869void blk_rq_unprep_clone(struct request *rq)
2870{
2871 struct bio *bio;
2872
2873 while ((bio = rq->bio) != NULL) {
2874 rq->bio = bio->bi_next;
2875
2876 bio_put(bio);
2877 }
2878}
2879EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2880
2881/*
2882 * Copy attributes of the original request to the clone request.
b4f42e28 2883 * The actual data parts (e.g. ->cmd, ->sense) are not copied.
b0fd271d
KU
2884 */
2885static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2886{
2887 dst->cpu = src->cpu;
3a2edd0d 2888 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
b0fd271d
KU
2889 dst->cmd_type = src->cmd_type;
2890 dst->__sector = blk_rq_pos(src);
2891 dst->__data_len = blk_rq_bytes(src);
2892 dst->nr_phys_segments = src->nr_phys_segments;
2893 dst->ioprio = src->ioprio;
2894 dst->extra_len = src->extra_len;
2895}
2896
2897/**
2898 * blk_rq_prep_clone - Helper function to setup clone request
2899 * @rq: the request to be setup
2900 * @rq_src: original request to be cloned
2901 * @bs: bio_set that bios for clone are allocated from
2902 * @gfp_mask: memory allocation mask for bio
2903 * @bio_ctr: setup function to be called for each clone bio.
2904 * Returns %0 for success, non %0 for failure.
2905 * @data: private data to be passed to @bio_ctr
2906 *
2907 * Description:
2908 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
b4f42e28 2909 * The actual data parts of @rq_src (e.g. ->cmd, ->sense)
b0fd271d
KU
2910 * are not copied, and copying such parts is the caller's responsibility.
2911 * Also, pages which the original bios are pointing to are not copied
2912 * and the cloned bios just point same pages.
2913 * So cloned bios must be completed before original bios, which means
2914 * the caller must complete @rq before @rq_src.
2915 */
2916int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2917 struct bio_set *bs, gfp_t gfp_mask,
2918 int (*bio_ctr)(struct bio *, struct bio *, void *),
2919 void *data)
2920{
2921 struct bio *bio, *bio_src;
2922
2923 if (!bs)
2924 bs = fs_bio_set;
2925
2926 blk_rq_init(NULL, rq);
2927
2928 __rq_for_each_bio(bio_src, rq_src) {
11dfce50 2929 bio = bio_clone_fast(bio_src, gfp_mask, bs);
b0fd271d
KU
2930 if (!bio)
2931 goto free_and_out;
2932
b0fd271d
KU
2933 if (bio_ctr && bio_ctr(bio, bio_src, data))
2934 goto free_and_out;
2935
2936 if (rq->bio) {
2937 rq->biotail->bi_next = bio;
2938 rq->biotail = bio;
2939 } else
2940 rq->bio = rq->biotail = bio;
2941 }
2942
2943 __blk_rq_prep_clone(rq, rq_src);
2944
2945 return 0;
2946
2947free_and_out:
2948 if (bio)
4254bba1 2949 bio_put(bio);
b0fd271d
KU
2950 blk_rq_unprep_clone(rq);
2951
2952 return -ENOMEM;
2953}
2954EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2955
59c3d45e 2956int kblockd_schedule_work(struct work_struct *work)
1da177e4
LT
2957{
2958 return queue_work(kblockd_workqueue, work);
2959}
1da177e4
LT
2960EXPORT_SYMBOL(kblockd_schedule_work);
2961
59c3d45e
JA
2962int kblockd_schedule_delayed_work(struct delayed_work *dwork,
2963 unsigned long delay)
e43473b7
VG
2964{
2965 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2966}
2967EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2968
8ab14595
JA
2969int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
2970 unsigned long delay)
2971{
2972 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
2973}
2974EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
2975
75df7136
SJ
2976/**
2977 * blk_start_plug - initialize blk_plug and track it inside the task_struct
2978 * @plug: The &struct blk_plug that needs to be initialized
2979 *
2980 * Description:
2981 * Tracking blk_plug inside the task_struct will help with auto-flushing the
2982 * pending I/O should the task end up blocking between blk_start_plug() and
2983 * blk_finish_plug(). This is important from a performance perspective, but
2984 * also ensures that we don't deadlock. For instance, if the task is blocking
2985 * for a memory allocation, memory reclaim could end up wanting to free a
2986 * page belonging to that request that is currently residing in our private
2987 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
2988 * this kind of deadlock.
2989 */
73c10101
JA
2990void blk_start_plug(struct blk_plug *plug)
2991{
2992 struct task_struct *tsk = current;
2993
73c10101 2994 INIT_LIST_HEAD(&plug->list);
320ae51f 2995 INIT_LIST_HEAD(&plug->mq_list);
048c9374 2996 INIT_LIST_HEAD(&plug->cb_list);
73c10101
JA
2997
2998 /*
2999 * If this is a nested plug, don't actually assign it. It will be
3000 * flushed on its own.
3001 */
3002 if (!tsk->plug) {
3003 /*
3004 * Store ordering should not be needed here, since a potential
3005 * preempt will imply a full memory barrier
3006 */
3007 tsk->plug = plug;
3008 }
3009}
3010EXPORT_SYMBOL(blk_start_plug);
3011
3012static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
3013{
3014 struct request *rqa = container_of(a, struct request, queuelist);
3015 struct request *rqb = container_of(b, struct request, queuelist);
3016
975927b9
JM
3017 return !(rqa->q < rqb->q ||
3018 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
73c10101
JA
3019}
3020
49cac01e
JA
3021/*
3022 * If 'from_schedule' is true, then postpone the dispatch of requests
3023 * until a safe kblockd context. We due this to avoid accidental big
3024 * additional stack usage in driver dispatch, in places where the originally
3025 * plugger did not intend it.
3026 */
f6603783 3027static void queue_unplugged(struct request_queue *q, unsigned int depth,
49cac01e 3028 bool from_schedule)
99e22598 3029 __releases(q->queue_lock)
94b5eb28 3030{
49cac01e 3031 trace_block_unplug(q, depth, !from_schedule);
99e22598 3032
70460571 3033 if (from_schedule)
24ecfbe2 3034 blk_run_queue_async(q);
70460571 3035 else
24ecfbe2 3036 __blk_run_queue(q);
70460571 3037 spin_unlock(q->queue_lock);
94b5eb28
JA
3038}
3039
74018dc3 3040static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
048c9374
N
3041{
3042 LIST_HEAD(callbacks);
3043
2a7d5559
SL
3044 while (!list_empty(&plug->cb_list)) {
3045 list_splice_init(&plug->cb_list, &callbacks);
048c9374 3046
2a7d5559
SL
3047 while (!list_empty(&callbacks)) {
3048 struct blk_plug_cb *cb = list_first_entry(&callbacks,
048c9374
N
3049 struct blk_plug_cb,
3050 list);
2a7d5559 3051 list_del(&cb->list);
74018dc3 3052 cb->callback(cb, from_schedule);
2a7d5559 3053 }
048c9374
N
3054 }
3055}
3056
9cbb1750
N
3057struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
3058 int size)
3059{
3060 struct blk_plug *plug = current->plug;
3061 struct blk_plug_cb *cb;
3062
3063 if (!plug)
3064 return NULL;
3065
3066 list_for_each_entry(cb, &plug->cb_list, list)
3067 if (cb->callback == unplug && cb->data == data)
3068 return cb;
3069
3070 /* Not currently on the callback list */
3071 BUG_ON(size < sizeof(*cb));
3072 cb = kzalloc(size, GFP_ATOMIC);
3073 if (cb) {
3074 cb->data = data;
3075 cb->callback = unplug;
3076 list_add(&cb->list, &plug->cb_list);
3077 }
3078 return cb;
3079}
3080EXPORT_SYMBOL(blk_check_plugged);
3081
49cac01e 3082void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
73c10101
JA
3083{
3084 struct request_queue *q;
3085 unsigned long flags;
3086 struct request *rq;
109b8129 3087 LIST_HEAD(list);
94b5eb28 3088 unsigned int depth;
73c10101 3089
74018dc3 3090 flush_plug_callbacks(plug, from_schedule);
320ae51f
JA
3091
3092 if (!list_empty(&plug->mq_list))
3093 blk_mq_flush_plug_list(plug, from_schedule);
3094
73c10101
JA
3095 if (list_empty(&plug->list))
3096 return;
3097
109b8129
N
3098 list_splice_init(&plug->list, &list);
3099
422765c2 3100 list_sort(NULL, &list, plug_rq_cmp);
73c10101
JA
3101
3102 q = NULL;
94b5eb28 3103 depth = 0;
18811272
JA
3104
3105 /*
3106 * Save and disable interrupts here, to avoid doing it for every
3107 * queue lock we have to take.
3108 */
73c10101 3109 local_irq_save(flags);
109b8129
N
3110 while (!list_empty(&list)) {
3111 rq = list_entry_rq(list.next);
73c10101 3112 list_del_init(&rq->queuelist);
73c10101
JA
3113 BUG_ON(!rq->q);
3114 if (rq->q != q) {
99e22598
JA
3115 /*
3116 * This drops the queue lock
3117 */
3118 if (q)
49cac01e 3119 queue_unplugged(q, depth, from_schedule);
73c10101 3120 q = rq->q;
94b5eb28 3121 depth = 0;
73c10101
JA
3122 spin_lock(q->queue_lock);
3123 }
8ba61435
TH
3124
3125 /*
3126 * Short-circuit if @q is dead
3127 */
3f3299d5 3128 if (unlikely(blk_queue_dying(q))) {
8ba61435
TH
3129 __blk_end_request_all(rq, -ENODEV);
3130 continue;
3131 }
3132
73c10101
JA
3133 /*
3134 * rq is already accounted, so use raw insert
3135 */
401a18e9
JA
3136 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
3137 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3138 else
3139 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
94b5eb28
JA
3140
3141 depth++;
73c10101
JA
3142 }
3143
99e22598
JA
3144 /*
3145 * This drops the queue lock
3146 */
3147 if (q)
49cac01e 3148 queue_unplugged(q, depth, from_schedule);
73c10101 3149
73c10101
JA
3150 local_irq_restore(flags);
3151}
73c10101
JA
3152
3153void blk_finish_plug(struct blk_plug *plug)
3154{
f6603783 3155 blk_flush_plug_list(plug, false);
73c10101 3156
88b996cd
CH
3157 if (plug == current->plug)
3158 current->plug = NULL;
73c10101 3159}
88b996cd 3160EXPORT_SYMBOL(blk_finish_plug);
73c10101 3161
6c954667
LM
3162#ifdef CONFIG_PM_RUNTIME
3163/**
3164 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3165 * @q: the queue of the device
3166 * @dev: the device the queue belongs to
3167 *
3168 * Description:
3169 * Initialize runtime-PM-related fields for @q and start auto suspend for
3170 * @dev. Drivers that want to take advantage of request-based runtime PM
3171 * should call this function after @dev has been initialized, and its
3172 * request queue @q has been allocated, and runtime PM for it can not happen
3173 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3174 * cases, driver should call this function before any I/O has taken place.
3175 *
3176 * This function takes care of setting up using auto suspend for the device,
3177 * the autosuspend delay is set to -1 to make runtime suspend impossible
3178 * until an updated value is either set by user or by driver. Drivers do
3179 * not need to touch other autosuspend settings.
3180 *
3181 * The block layer runtime PM is request based, so only works for drivers
3182 * that use request as their IO unit instead of those directly use bio's.
3183 */
3184void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3185{
3186 q->dev = dev;
3187 q->rpm_status = RPM_ACTIVE;
3188 pm_runtime_set_autosuspend_delay(q->dev, -1);
3189 pm_runtime_use_autosuspend(q->dev);
3190}
3191EXPORT_SYMBOL(blk_pm_runtime_init);
3192
3193/**
3194 * blk_pre_runtime_suspend - Pre runtime suspend check
3195 * @q: the queue of the device
3196 *
3197 * Description:
3198 * This function will check if runtime suspend is allowed for the device
3199 * by examining if there are any requests pending in the queue. If there
3200 * are requests pending, the device can not be runtime suspended; otherwise,
3201 * the queue's status will be updated to SUSPENDING and the driver can
3202 * proceed to suspend the device.
3203 *
3204 * For the not allowed case, we mark last busy for the device so that
3205 * runtime PM core will try to autosuspend it some time later.
3206 *
3207 * This function should be called near the start of the device's
3208 * runtime_suspend callback.
3209 *
3210 * Return:
3211 * 0 - OK to runtime suspend the device
3212 * -EBUSY - Device should not be runtime suspended
3213 */
3214int blk_pre_runtime_suspend(struct request_queue *q)
3215{
3216 int ret = 0;
3217
3218 spin_lock_irq(q->queue_lock);
3219 if (q->nr_pending) {
3220 ret = -EBUSY;
3221 pm_runtime_mark_last_busy(q->dev);
3222 } else {
3223 q->rpm_status = RPM_SUSPENDING;
3224 }
3225 spin_unlock_irq(q->queue_lock);
3226 return ret;
3227}
3228EXPORT_SYMBOL(blk_pre_runtime_suspend);
3229
3230/**
3231 * blk_post_runtime_suspend - Post runtime suspend processing
3232 * @q: the queue of the device
3233 * @err: return value of the device's runtime_suspend function
3234 *
3235 * Description:
3236 * Update the queue's runtime status according to the return value of the
3237 * device's runtime suspend function and mark last busy for the device so
3238 * that PM core will try to auto suspend the device at a later time.
3239 *
3240 * This function should be called near the end of the device's
3241 * runtime_suspend callback.
3242 */
3243void blk_post_runtime_suspend(struct request_queue *q, int err)
3244{
3245 spin_lock_irq(q->queue_lock);
3246 if (!err) {
3247 q->rpm_status = RPM_SUSPENDED;
3248 } else {
3249 q->rpm_status = RPM_ACTIVE;
3250 pm_runtime_mark_last_busy(q->dev);
3251 }
3252 spin_unlock_irq(q->queue_lock);
3253}
3254EXPORT_SYMBOL(blk_post_runtime_suspend);
3255
3256/**
3257 * blk_pre_runtime_resume - Pre runtime resume processing
3258 * @q: the queue of the device
3259 *
3260 * Description:
3261 * Update the queue's runtime status to RESUMING in preparation for the
3262 * runtime resume of the device.
3263 *
3264 * This function should be called near the start of the device's
3265 * runtime_resume callback.
3266 */
3267void blk_pre_runtime_resume(struct request_queue *q)
3268{
3269 spin_lock_irq(q->queue_lock);
3270 q->rpm_status = RPM_RESUMING;
3271 spin_unlock_irq(q->queue_lock);
3272}
3273EXPORT_SYMBOL(blk_pre_runtime_resume);
3274
3275/**
3276 * blk_post_runtime_resume - Post runtime resume processing
3277 * @q: the queue of the device
3278 * @err: return value of the device's runtime_resume function
3279 *
3280 * Description:
3281 * Update the queue's runtime status according to the return value of the
3282 * device's runtime_resume function. If it is successfully resumed, process
3283 * the requests that are queued into the device's queue when it is resuming
3284 * and then mark last busy and initiate autosuspend for it.
3285 *
3286 * This function should be called near the end of the device's
3287 * runtime_resume callback.
3288 */
3289void blk_post_runtime_resume(struct request_queue *q, int err)
3290{
3291 spin_lock_irq(q->queue_lock);
3292 if (!err) {
3293 q->rpm_status = RPM_ACTIVE;
3294 __blk_run_queue(q);
3295 pm_runtime_mark_last_busy(q->dev);
c60855cd 3296 pm_request_autosuspend(q->dev);
6c954667
LM
3297 } else {
3298 q->rpm_status = RPM_SUSPENDED;
3299 }
3300 spin_unlock_irq(q->queue_lock);
3301}
3302EXPORT_SYMBOL(blk_post_runtime_resume);
3303#endif
3304
1da177e4
LT
3305int __init blk_dev_init(void)
3306{
9eb55b03
NK
3307 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
3308 sizeof(((struct request *)0)->cmd_flags));
3309
89b90be2
TH
3310 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3311 kblockd_workqueue = alloc_workqueue("kblockd",
28747fcd 3312 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4
LT
3313 if (!kblockd_workqueue)
3314 panic("Failed to create kblockd\n");
3315
3316 request_cachep = kmem_cache_create("blkdev_requests",
20c2df83 3317 sizeof(struct request), 0, SLAB_PANIC, NULL);
1da177e4 3318
8324aa91 3319 blk_requestq_cachep = kmem_cache_create("blkdev_queue",
165125e1 3320 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4 3321
d38ecf93 3322 return 0;
1da177e4 3323}