]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-core.c
block, bfq: decrease burst size when queues in burst exit
[mirror_ubuntu-bionic-kernel.git] / block / blk-core.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e
JA
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
1da177e4
LT
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
1da177e4
LT
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
320ae51f 19#include <linux/blk-mq.h>
1da177e4
LT
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/kernel_stat.h>
23#include <linux/string.h>
24#include <linux/init.h>
1da177e4
LT
25#include <linux/completion.h>
26#include <linux/slab.h>
27#include <linux/swap.h>
28#include <linux/writeback.h>
faccbd4b 29#include <linux/task_io_accounting_ops.h>
c17bb495 30#include <linux/fault-inject.h>
73c10101 31#include <linux/list_sort.h>
e3c78ca5 32#include <linux/delay.h>
aaf7c680 33#include <linux/ratelimit.h>
6c954667 34#include <linux/pm_runtime.h>
eea8f41c 35#include <linux/blk-cgroup.h>
18fbda91 36#include <linux/debugfs.h>
55782138
LZ
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/block.h>
1da177e4 40
8324aa91 41#include "blk.h"
43a5e4e2 42#include "blk-mq.h"
bd166ef1 43#include "blk-mq-sched.h"
87760e5e 44#include "blk-wbt.h"
8324aa91 45
18fbda91
OS
46#ifdef CONFIG_DEBUG_FS
47struct dentry *blk_debugfs_root;
48#endif
49
d07335e5 50EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0d 51EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0a82a8d1 52EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
3291fa57 53EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
cbae8d45 54EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
0bfc2455 55
a73f730d
TH
56DEFINE_IDA(blk_queue_ida);
57
1da177e4
LT
58/*
59 * For the allocated request tables
60 */
d674d414 61struct kmem_cache *request_cachep;
1da177e4
LT
62
63/*
64 * For queue allocation
65 */
6728cb0e 66struct kmem_cache *blk_requestq_cachep;
1da177e4 67
1da177e4
LT
68/*
69 * Controlling structure to kblockd
70 */
ff856bad 71static struct workqueue_struct *kblockd_workqueue;
1da177e4 72
d40f75a0
TH
73static void blk_clear_congested(struct request_list *rl, int sync)
74{
d40f75a0
TH
75#ifdef CONFIG_CGROUP_WRITEBACK
76 clear_wb_congested(rl->blkg->wb_congested, sync);
77#else
482cf79c
TH
78 /*
79 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
80 * flip its congestion state for events on other blkcgs.
81 */
82 if (rl == &rl->q->root_rl)
dc3b17cc 83 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a0
TH
84#endif
85}
86
87static void blk_set_congested(struct request_list *rl, int sync)
88{
d40f75a0
TH
89#ifdef CONFIG_CGROUP_WRITEBACK
90 set_wb_congested(rl->blkg->wb_congested, sync);
91#else
482cf79c
TH
92 /* see blk_clear_congested() */
93 if (rl == &rl->q->root_rl)
dc3b17cc 94 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
d40f75a0
TH
95#endif
96}
97
8324aa91 98void blk_queue_congestion_threshold(struct request_queue *q)
1da177e4
LT
99{
100 int nr;
101
102 nr = q->nr_requests - (q->nr_requests / 8) + 1;
103 if (nr > q->nr_requests)
104 nr = q->nr_requests;
105 q->nr_congestion_on = nr;
106
107 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
108 if (nr < 1)
109 nr = 1;
110 q->nr_congestion_off = nr;
111}
112
2a4aa30c 113void blk_rq_init(struct request_queue *q, struct request *rq)
1da177e4 114{
1afb20f3
FT
115 memset(rq, 0, sizeof(*rq));
116
1da177e4 117 INIT_LIST_HEAD(&rq->queuelist);
242f9dcb 118 INIT_LIST_HEAD(&rq->timeout_list);
c7c22e4d 119 rq->cpu = -1;
63a71386 120 rq->q = q;
a2dec7b3 121 rq->__sector = (sector_t) -1;
2e662b65
JA
122 INIT_HLIST_NODE(&rq->hash);
123 RB_CLEAR_NODE(&rq->rb_node);
63a71386 124 rq->tag = -1;
bd166ef1 125 rq->internal_tag = -1;
b243ddcb 126 rq->start_time = jiffies;
9195291e 127 set_start_time_ns(rq);
09e099d4 128 rq->part = NULL;
1da177e4 129}
2a4aa30c 130EXPORT_SYMBOL(blk_rq_init);
1da177e4 131
2a842aca
CH
132static const struct {
133 int errno;
134 const char *name;
135} blk_errors[] = {
136 [BLK_STS_OK] = { 0, "" },
137 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
138 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
139 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
140 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
141 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
142 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
143 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
144 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
145 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
03a07c92 146 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
2a842aca 147
4e4cbee9
CH
148 /* device mapper special case, should not leak out: */
149 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
150
2a842aca
CH
151 /* everything else not covered above: */
152 [BLK_STS_IOERR] = { -EIO, "I/O" },
153};
154
155blk_status_t errno_to_blk_status(int errno)
156{
157 int i;
158
159 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
160 if (blk_errors[i].errno == errno)
161 return (__force blk_status_t)i;
162 }
163
164 return BLK_STS_IOERR;
165}
166EXPORT_SYMBOL_GPL(errno_to_blk_status);
167
168int blk_status_to_errno(blk_status_t status)
169{
170 int idx = (__force int)status;
171
34bd9c1c 172 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842aca
CH
173 return -EIO;
174 return blk_errors[idx].errno;
175}
176EXPORT_SYMBOL_GPL(blk_status_to_errno);
177
178static void print_req_error(struct request *req, blk_status_t status)
179{
180 int idx = (__force int)status;
181
34bd9c1c 182 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842aca
CH
183 return;
184
185 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
186 __func__, blk_errors[idx].name, req->rq_disk ?
187 req->rq_disk->disk_name : "?",
188 (unsigned long long)blk_rq_pos(req));
189}
190
5bb23a68 191static void req_bio_endio(struct request *rq, struct bio *bio,
2a842aca 192 unsigned int nbytes, blk_status_t error)
1da177e4 193{
78d8e58a 194 if (error)
4e4cbee9 195 bio->bi_status = error;
797e7dbb 196
e8064021 197 if (unlikely(rq->rq_flags & RQF_QUIET))
b7c44ed9 198 bio_set_flag(bio, BIO_QUIET);
08bafc03 199
f79ea416 200 bio_advance(bio, nbytes);
7ba1ba12 201
143a87f4 202 /* don't actually finish bio if it's part of flush sequence */
e8064021 203 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
4246a0b6 204 bio_endio(bio);
1da177e4 205}
1da177e4 206
1da177e4
LT
207void blk_dump_rq_flags(struct request *rq, char *msg)
208{
aebf526b
CH
209 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
210 rq->rq_disk ? rq->rq_disk->disk_name : "?",
5953316d 211 (unsigned long long) rq->cmd_flags);
1da177e4 212
83096ebf
TH
213 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
214 (unsigned long long)blk_rq_pos(rq),
215 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
b4f42e28
JA
216 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
217 rq->bio, rq->biotail, blk_rq_bytes(rq));
1da177e4 218}
1da177e4
LT
219EXPORT_SYMBOL(blk_dump_rq_flags);
220
3cca6dc1 221static void blk_delay_work(struct work_struct *work)
1da177e4 222{
3cca6dc1 223 struct request_queue *q;
1da177e4 224
3cca6dc1
JA
225 q = container_of(work, struct request_queue, delay_work.work);
226 spin_lock_irq(q->queue_lock);
24ecfbe2 227 __blk_run_queue(q);
3cca6dc1 228 spin_unlock_irq(q->queue_lock);
1da177e4 229}
1da177e4
LT
230
231/**
3cca6dc1
JA
232 * blk_delay_queue - restart queueing after defined interval
233 * @q: The &struct request_queue in question
234 * @msecs: Delay in msecs
1da177e4
LT
235 *
236 * Description:
3cca6dc1
JA
237 * Sometimes queueing needs to be postponed for a little while, to allow
238 * resources to come back. This function will make sure that queueing is
2fff8a92 239 * restarted around the specified time.
3cca6dc1
JA
240 */
241void blk_delay_queue(struct request_queue *q, unsigned long msecs)
2ad8b1ef 242{
2fff8a92 243 lockdep_assert_held(q->queue_lock);
332ebbf7 244 WARN_ON_ONCE(q->mq_ops);
2fff8a92 245
70460571
BVA
246 if (likely(!blk_queue_dead(q)))
247 queue_delayed_work(kblockd_workqueue, &q->delay_work,
248 msecs_to_jiffies(msecs));
2ad8b1ef 249}
3cca6dc1 250EXPORT_SYMBOL(blk_delay_queue);
2ad8b1ef 251
21491412
JA
252/**
253 * blk_start_queue_async - asynchronously restart a previously stopped queue
254 * @q: The &struct request_queue in question
255 *
256 * Description:
257 * blk_start_queue_async() will clear the stop flag on the queue, and
258 * ensure that the request_fn for the queue is run from an async
259 * context.
260 **/
261void blk_start_queue_async(struct request_queue *q)
262{
2fff8a92 263 lockdep_assert_held(q->queue_lock);
332ebbf7 264 WARN_ON_ONCE(q->mq_ops);
2fff8a92 265
21491412
JA
266 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
267 blk_run_queue_async(q);
268}
269EXPORT_SYMBOL(blk_start_queue_async);
270
1da177e4
LT
271/**
272 * blk_start_queue - restart a previously stopped queue
165125e1 273 * @q: The &struct request_queue in question
1da177e4
LT
274 *
275 * Description:
276 * blk_start_queue() will clear the stop flag on the queue, and call
277 * the request_fn for the queue if it was in a stopped state when
2fff8a92 278 * entered. Also see blk_stop_queue().
1da177e4 279 **/
165125e1 280void blk_start_queue(struct request_queue *q)
1da177e4 281{
2fff8a92 282 lockdep_assert_held(q->queue_lock);
4ddd56b0 283 WARN_ON(!in_interrupt() && !irqs_disabled());
332ebbf7 284 WARN_ON_ONCE(q->mq_ops);
a038e253 285
75ad23bc 286 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
24ecfbe2 287 __blk_run_queue(q);
1da177e4 288}
1da177e4
LT
289EXPORT_SYMBOL(blk_start_queue);
290
291/**
292 * blk_stop_queue - stop a queue
165125e1 293 * @q: The &struct request_queue in question
1da177e4
LT
294 *
295 * Description:
296 * The Linux block layer assumes that a block driver will consume all
297 * entries on the request queue when the request_fn strategy is called.
298 * Often this will not happen, because of hardware limitations (queue
299 * depth settings). If a device driver gets a 'queue full' response,
300 * or if it simply chooses not to queue more I/O at one point, it can
301 * call this function to prevent the request_fn from being called until
302 * the driver has signalled it's ready to go again. This happens by calling
2fff8a92 303 * blk_start_queue() to restart queue operations.
1da177e4 304 **/
165125e1 305void blk_stop_queue(struct request_queue *q)
1da177e4 306{
2fff8a92 307 lockdep_assert_held(q->queue_lock);
332ebbf7 308 WARN_ON_ONCE(q->mq_ops);
2fff8a92 309
136b5721 310 cancel_delayed_work(&q->delay_work);
75ad23bc 311 queue_flag_set(QUEUE_FLAG_STOPPED, q);
1da177e4
LT
312}
313EXPORT_SYMBOL(blk_stop_queue);
314
315/**
316 * blk_sync_queue - cancel any pending callbacks on a queue
317 * @q: the queue
318 *
319 * Description:
320 * The block layer may perform asynchronous callback activity
321 * on a queue, such as calling the unplug function after a timeout.
322 * A block device may call blk_sync_queue to ensure that any
323 * such activity is cancelled, thus allowing it to release resources
59c51591 324 * that the callbacks might use. The caller must already have made sure
1da177e4
LT
325 * that its ->make_request_fn will not re-add plugging prior to calling
326 * this function.
327 *
da527770 328 * This function does not cancel any asynchronous activity arising
da3dae54 329 * out of elevator or throttling code. That would require elevator_exit()
5efd6113 330 * and blkcg_exit_queue() to be called with queue lock initialized.
da527770 331 *
1da177e4
LT
332 */
333void blk_sync_queue(struct request_queue *q)
334{
70ed28b9 335 del_timer_sync(&q->timeout);
f04c1fe7
ML
336
337 if (q->mq_ops) {
338 struct blk_mq_hw_ctx *hctx;
339 int i;
340
21c6e939 341 queue_for_each_hw_ctx(q, hctx, i)
9f993737 342 cancel_delayed_work_sync(&hctx->run_work);
f04c1fe7
ML
343 } else {
344 cancel_delayed_work_sync(&q->delay_work);
345 }
1da177e4
LT
346}
347EXPORT_SYMBOL(blk_sync_queue);
348
c246e80d
BVA
349/**
350 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
351 * @q: The queue to run
352 *
353 * Description:
354 * Invoke request handling on a queue if there are any pending requests.
355 * May be used to restart request handling after a request has completed.
356 * This variant runs the queue whether or not the queue has been
357 * stopped. Must be called with the queue lock held and interrupts
358 * disabled. See also @blk_run_queue.
359 */
360inline void __blk_run_queue_uncond(struct request_queue *q)
361{
2fff8a92 362 lockdep_assert_held(q->queue_lock);
332ebbf7 363 WARN_ON_ONCE(q->mq_ops);
2fff8a92 364
c246e80d
BVA
365 if (unlikely(blk_queue_dead(q)))
366 return;
367
24faf6f6
BVA
368 /*
369 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
370 * the queue lock internally. As a result multiple threads may be
371 * running such a request function concurrently. Keep track of the
372 * number of active request_fn invocations such that blk_drain_queue()
373 * can wait until all these request_fn calls have finished.
374 */
375 q->request_fn_active++;
c246e80d 376 q->request_fn(q);
24faf6f6 377 q->request_fn_active--;
c246e80d 378}
a7928c15 379EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
c246e80d 380
1da177e4 381/**
80a4b58e 382 * __blk_run_queue - run a single device queue
1da177e4 383 * @q: The queue to run
80a4b58e
JA
384 *
385 * Description:
2fff8a92 386 * See @blk_run_queue.
1da177e4 387 */
24ecfbe2 388void __blk_run_queue(struct request_queue *q)
1da177e4 389{
2fff8a92 390 lockdep_assert_held(q->queue_lock);
332ebbf7 391 WARN_ON_ONCE(q->mq_ops);
2fff8a92 392
a538cd03
TH
393 if (unlikely(blk_queue_stopped(q)))
394 return;
395
c246e80d 396 __blk_run_queue_uncond(q);
75ad23bc
NP
397}
398EXPORT_SYMBOL(__blk_run_queue);
dac07ec1 399
24ecfbe2
CH
400/**
401 * blk_run_queue_async - run a single device queue in workqueue context
402 * @q: The queue to run
403 *
404 * Description:
405 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
2fff8a92
BVA
406 * of us.
407 *
408 * Note:
409 * Since it is not allowed to run q->delay_work after blk_cleanup_queue()
410 * has canceled q->delay_work, callers must hold the queue lock to avoid
411 * race conditions between blk_cleanup_queue() and blk_run_queue_async().
24ecfbe2
CH
412 */
413void blk_run_queue_async(struct request_queue *q)
414{
2fff8a92 415 lockdep_assert_held(q->queue_lock);
332ebbf7 416 WARN_ON_ONCE(q->mq_ops);
2fff8a92 417
70460571 418 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
e7c2f967 419 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
24ecfbe2 420}
c21e6beb 421EXPORT_SYMBOL(blk_run_queue_async);
24ecfbe2 422
75ad23bc
NP
423/**
424 * blk_run_queue - run a single device queue
425 * @q: The queue to run
80a4b58e
JA
426 *
427 * Description:
428 * Invoke request handling on this queue, if it has pending work to do.
a7f55792 429 * May be used to restart queueing when a request has completed.
75ad23bc
NP
430 */
431void blk_run_queue(struct request_queue *q)
432{
433 unsigned long flags;
434
332ebbf7
BVA
435 WARN_ON_ONCE(q->mq_ops);
436
75ad23bc 437 spin_lock_irqsave(q->queue_lock, flags);
24ecfbe2 438 __blk_run_queue(q);
1da177e4
LT
439 spin_unlock_irqrestore(q->queue_lock, flags);
440}
441EXPORT_SYMBOL(blk_run_queue);
442
165125e1 443void blk_put_queue(struct request_queue *q)
483f4afc
AV
444{
445 kobject_put(&q->kobj);
446}
d86e0e83 447EXPORT_SYMBOL(blk_put_queue);
483f4afc 448
e3c78ca5 449/**
807592a4 450 * __blk_drain_queue - drain requests from request_queue
e3c78ca5 451 * @q: queue to drain
c9a929dd 452 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
e3c78ca5 453 *
c9a929dd
TH
454 * Drain requests from @q. If @drain_all is set, all requests are drained.
455 * If not, only ELVPRIV requests are drained. The caller is responsible
456 * for ensuring that no new requests which need to be drained are queued.
e3c78ca5 457 */
807592a4
BVA
458static void __blk_drain_queue(struct request_queue *q, bool drain_all)
459 __releases(q->queue_lock)
460 __acquires(q->queue_lock)
e3c78ca5 461{
458f27a9
AH
462 int i;
463
807592a4 464 lockdep_assert_held(q->queue_lock);
332ebbf7 465 WARN_ON_ONCE(q->mq_ops);
807592a4 466
e3c78ca5 467 while (true) {
481a7d64 468 bool drain = false;
e3c78ca5 469
b855b04a
TH
470 /*
471 * The caller might be trying to drain @q before its
472 * elevator is initialized.
473 */
474 if (q->elevator)
475 elv_drain_elevator(q);
476
5efd6113 477 blkcg_drain_queue(q);
e3c78ca5 478
4eabc941
TH
479 /*
480 * This function might be called on a queue which failed
b855b04a
TH
481 * driver init after queue creation or is not yet fully
482 * active yet. Some drivers (e.g. fd and loop) get unhappy
483 * in such cases. Kick queue iff dispatch queue has
484 * something on it and @q has request_fn set.
4eabc941 485 */
b855b04a 486 if (!list_empty(&q->queue_head) && q->request_fn)
4eabc941 487 __blk_run_queue(q);
c9a929dd 488
8a5ecdd4 489 drain |= q->nr_rqs_elvpriv;
24faf6f6 490 drain |= q->request_fn_active;
481a7d64
TH
491
492 /*
493 * Unfortunately, requests are queued at and tracked from
494 * multiple places and there's no single counter which can
495 * be drained. Check all the queues and counters.
496 */
497 if (drain_all) {
e97c293c 498 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
481a7d64
TH
499 drain |= !list_empty(&q->queue_head);
500 for (i = 0; i < 2; i++) {
8a5ecdd4 501 drain |= q->nr_rqs[i];
481a7d64 502 drain |= q->in_flight[i];
7c94e1c1
ML
503 if (fq)
504 drain |= !list_empty(&fq->flush_queue[i]);
481a7d64
TH
505 }
506 }
e3c78ca5 507
481a7d64 508 if (!drain)
e3c78ca5 509 break;
807592a4
BVA
510
511 spin_unlock_irq(q->queue_lock);
512
e3c78ca5 513 msleep(10);
807592a4
BVA
514
515 spin_lock_irq(q->queue_lock);
e3c78ca5 516 }
458f27a9
AH
517
518 /*
519 * With queue marked dead, any woken up waiter will fail the
520 * allocation path, so the wakeup chaining is lost and we're
521 * left with hung waiters. We need to wake up those waiters.
522 */
523 if (q->request_fn) {
a051661c
TH
524 struct request_list *rl;
525
a051661c
TH
526 blk_queue_for_each_rl(rl, q)
527 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
528 wake_up_all(&rl->wait[i]);
458f27a9 529 }
e3c78ca5
TH
530}
531
d732580b
TH
532/**
533 * blk_queue_bypass_start - enter queue bypass mode
534 * @q: queue of interest
535 *
536 * In bypass mode, only the dispatch FIFO queue of @q is used. This
537 * function makes @q enter bypass mode and drains all requests which were
6ecf23af 538 * throttled or issued before. On return, it's guaranteed that no request
80fd9979
TH
539 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
540 * inside queue or RCU read lock.
d732580b
TH
541 */
542void blk_queue_bypass_start(struct request_queue *q)
543{
332ebbf7
BVA
544 WARN_ON_ONCE(q->mq_ops);
545
d732580b 546 spin_lock_irq(q->queue_lock);
776687bc 547 q->bypass_depth++;
d732580b
TH
548 queue_flag_set(QUEUE_FLAG_BYPASS, q);
549 spin_unlock_irq(q->queue_lock);
550
776687bc
TH
551 /*
552 * Queues start drained. Skip actual draining till init is
553 * complete. This avoids lenghty delays during queue init which
554 * can happen many times during boot.
555 */
556 if (blk_queue_init_done(q)) {
807592a4
BVA
557 spin_lock_irq(q->queue_lock);
558 __blk_drain_queue(q, false);
559 spin_unlock_irq(q->queue_lock);
560
b82d4b19
TH
561 /* ensure blk_queue_bypass() is %true inside RCU read lock */
562 synchronize_rcu();
563 }
d732580b
TH
564}
565EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
566
567/**
568 * blk_queue_bypass_end - leave queue bypass mode
569 * @q: queue of interest
570 *
571 * Leave bypass mode and restore the normal queueing behavior.
332ebbf7
BVA
572 *
573 * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
574 * this function is called for both blk-sq and blk-mq queues.
d732580b
TH
575 */
576void blk_queue_bypass_end(struct request_queue *q)
577{
578 spin_lock_irq(q->queue_lock);
579 if (!--q->bypass_depth)
580 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
581 WARN_ON_ONCE(q->bypass_depth < 0);
582 spin_unlock_irq(q->queue_lock);
583}
584EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
585
aed3ea94
JA
586void blk_set_queue_dying(struct request_queue *q)
587{
1b856086
BVA
588 spin_lock_irq(q->queue_lock);
589 queue_flag_set(QUEUE_FLAG_DYING, q);
590 spin_unlock_irq(q->queue_lock);
aed3ea94 591
d3cfb2a0
ML
592 /*
593 * When queue DYING flag is set, we need to block new req
594 * entering queue, so we call blk_freeze_queue_start() to
595 * prevent I/O from crossing blk_queue_enter().
596 */
597 blk_freeze_queue_start(q);
598
aed3ea94
JA
599 if (q->mq_ops)
600 blk_mq_wake_waiters(q);
601 else {
602 struct request_list *rl;
603
bbfc3c5d 604 spin_lock_irq(q->queue_lock);
aed3ea94
JA
605 blk_queue_for_each_rl(rl, q) {
606 if (rl->rq_pool) {
607 wake_up(&rl->wait[BLK_RW_SYNC]);
608 wake_up(&rl->wait[BLK_RW_ASYNC]);
609 }
610 }
bbfc3c5d 611 spin_unlock_irq(q->queue_lock);
aed3ea94
JA
612 }
613}
614EXPORT_SYMBOL_GPL(blk_set_queue_dying);
615
c9a929dd
TH
616/**
617 * blk_cleanup_queue - shutdown a request queue
618 * @q: request queue to shutdown
619 *
c246e80d
BVA
620 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
621 * put it. All future requests will be failed immediately with -ENODEV.
c94a96ac 622 */
6728cb0e 623void blk_cleanup_queue(struct request_queue *q)
483f4afc 624{
c9a929dd 625 spinlock_t *lock = q->queue_lock;
e3335de9 626
3f3299d5 627 /* mark @q DYING, no new request or merges will be allowed afterwards */
483f4afc 628 mutex_lock(&q->sysfs_lock);
aed3ea94 629 blk_set_queue_dying(q);
c9a929dd 630 spin_lock_irq(lock);
6ecf23af 631
80fd9979 632 /*
3f3299d5 633 * A dying queue is permanently in bypass mode till released. Note
80fd9979
TH
634 * that, unlike blk_queue_bypass_start(), we aren't performing
635 * synchronize_rcu() after entering bypass mode to avoid the delay
636 * as some drivers create and destroy a lot of queues while
637 * probing. This is still safe because blk_release_queue() will be
638 * called only after the queue refcnt drops to zero and nothing,
639 * RCU or not, would be traversing the queue by then.
640 */
6ecf23af
TH
641 q->bypass_depth++;
642 queue_flag_set(QUEUE_FLAG_BYPASS, q);
643
c9a929dd
TH
644 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
645 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3f3299d5 646 queue_flag_set(QUEUE_FLAG_DYING, q);
c9a929dd
TH
647 spin_unlock_irq(lock);
648 mutex_unlock(&q->sysfs_lock);
649
c246e80d
BVA
650 /*
651 * Drain all requests queued before DYING marking. Set DEAD flag to
652 * prevent that q->request_fn() gets invoked after draining finished.
653 */
3ef28e83 654 blk_freeze_queue(q);
9c1051aa
OS
655 spin_lock_irq(lock);
656 if (!q->mq_ops)
43a5e4e2 657 __blk_drain_queue(q, true);
c246e80d 658 queue_flag_set(QUEUE_FLAG_DEAD, q);
807592a4 659 spin_unlock_irq(lock);
c9a929dd 660
5a48fc14
DW
661 /* for synchronous bio-based driver finish in-flight integrity i/o */
662 blk_flush_integrity();
663
c9a929dd 664 /* @q won't process any more request, flush async actions */
dc3b17cc 665 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
c9a929dd
TH
666 blk_sync_queue(q);
667
45a9c9d9
BVA
668 if (q->mq_ops)
669 blk_mq_free_queue(q);
3ef28e83 670 percpu_ref_exit(&q->q_usage_counter);
45a9c9d9 671
5e5cfac0
AH
672 spin_lock_irq(lock);
673 if (q->queue_lock != &q->__queue_lock)
674 q->queue_lock = &q->__queue_lock;
675 spin_unlock_irq(lock);
676
c9a929dd 677 /* @q is and will stay empty, shutdown and put */
483f4afc
AV
678 blk_put_queue(q);
679}
1da177e4
LT
680EXPORT_SYMBOL(blk_cleanup_queue);
681
271508db 682/* Allocate memory local to the request queue */
6d247d7f 683static void *alloc_request_simple(gfp_t gfp_mask, void *data)
271508db 684{
6d247d7f
CH
685 struct request_queue *q = data;
686
687 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
271508db
DR
688}
689
6d247d7f 690static void free_request_simple(void *element, void *data)
271508db
DR
691{
692 kmem_cache_free(request_cachep, element);
693}
694
6d247d7f
CH
695static void *alloc_request_size(gfp_t gfp_mask, void *data)
696{
697 struct request_queue *q = data;
698 struct request *rq;
699
700 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
701 q->node);
702 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
703 kfree(rq);
704 rq = NULL;
705 }
706 return rq;
707}
708
709static void free_request_size(void *element, void *data)
710{
711 struct request_queue *q = data;
712
713 if (q->exit_rq_fn)
714 q->exit_rq_fn(q, element);
715 kfree(element);
716}
717
5b788ce3
TH
718int blk_init_rl(struct request_list *rl, struct request_queue *q,
719 gfp_t gfp_mask)
1da177e4 720{
1abec4fd
MS
721 if (unlikely(rl->rq_pool))
722 return 0;
723
5b788ce3 724 rl->q = q;
1faa16d2
JA
725 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
726 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
1faa16d2
JA
727 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
728 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
1da177e4 729
6d247d7f
CH
730 if (q->cmd_size) {
731 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
732 alloc_request_size, free_request_size,
733 q, gfp_mask, q->node);
734 } else {
735 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
736 alloc_request_simple, free_request_simple,
737 q, gfp_mask, q->node);
738 }
1da177e4
LT
739 if (!rl->rq_pool)
740 return -ENOMEM;
741
b425e504
BVA
742 if (rl != &q->root_rl)
743 WARN_ON_ONCE(!blk_get_queue(q));
744
1da177e4
LT
745 return 0;
746}
747
b425e504 748void blk_exit_rl(struct request_queue *q, struct request_list *rl)
5b788ce3 749{
b425e504 750 if (rl->rq_pool) {
5b788ce3 751 mempool_destroy(rl->rq_pool);
b425e504
BVA
752 if (rl != &q->root_rl)
753 blk_put_queue(q);
754 }
5b788ce3
TH
755}
756
165125e1 757struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1da177e4 758{
c304a51b 759 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
1946089a
CL
760}
761EXPORT_SYMBOL(blk_alloc_queue);
1da177e4 762
6f3b0e8b 763int blk_queue_enter(struct request_queue *q, bool nowait)
3ef28e83
DW
764{
765 while (true) {
766 int ret;
767
768 if (percpu_ref_tryget_live(&q->q_usage_counter))
769 return 0;
770
6f3b0e8b 771 if (nowait)
3ef28e83
DW
772 return -EBUSY;
773
5ed61d3f 774 /*
1671d522 775 * read pair of barrier in blk_freeze_queue_start(),
5ed61d3f 776 * we need to order reading __PERCPU_REF_DEAD flag of
d3cfb2a0
ML
777 * .q_usage_counter and reading .mq_freeze_depth or
778 * queue dying flag, otherwise the following wait may
779 * never return if the two reads are reordered.
5ed61d3f
ML
780 */
781 smp_rmb();
782
3ef28e83
DW
783 ret = wait_event_interruptible(q->mq_freeze_wq,
784 !atomic_read(&q->mq_freeze_depth) ||
785 blk_queue_dying(q));
786 if (blk_queue_dying(q))
787 return -ENODEV;
788 if (ret)
789 return ret;
790 }
791}
792
793void blk_queue_exit(struct request_queue *q)
794{
795 percpu_ref_put(&q->q_usage_counter);
796}
797
798static void blk_queue_usage_counter_release(struct percpu_ref *ref)
799{
800 struct request_queue *q =
801 container_of(ref, struct request_queue, q_usage_counter);
802
803 wake_up_all(&q->mq_freeze_wq);
804}
805
287922eb
CH
806static void blk_rq_timed_out_timer(unsigned long data)
807{
808 struct request_queue *q = (struct request_queue *)data;
809
810 kblockd_schedule_work(&q->timeout_work);
811}
812
165125e1 813struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1946089a 814{
165125e1 815 struct request_queue *q;
1946089a 816
8324aa91 817 q = kmem_cache_alloc_node(blk_requestq_cachep,
94f6030c 818 gfp_mask | __GFP_ZERO, node_id);
1da177e4
LT
819 if (!q)
820 return NULL;
821
00380a40 822 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
a73f730d 823 if (q->id < 0)
3d2936f4 824 goto fail_q;
a73f730d 825
93b27e72 826 q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
54efd50b
KO
827 if (!q->bio_split)
828 goto fail_id;
829
d03f6cdc
JK
830 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
831 if (!q->backing_dev_info)
832 goto fail_split;
833
a83b576c
JA
834 q->stats = blk_alloc_queue_stats();
835 if (!q->stats)
836 goto fail_stats;
837
dc3b17cc 838 q->backing_dev_info->ra_pages =
09cbfeaf 839 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
dc3b17cc
JK
840 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
841 q->backing_dev_info->name = "block";
5151412d 842 q->node = node_id;
0989a025 843
dc3b17cc 844 setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
31373d09 845 laptop_mode_timer_fn, (unsigned long) q);
242f9dcb 846 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
b855b04a 847 INIT_LIST_HEAD(&q->queue_head);
242f9dcb 848 INIT_LIST_HEAD(&q->timeout_list);
a612fddf 849 INIT_LIST_HEAD(&q->icq_list);
4eef3049 850#ifdef CONFIG_BLK_CGROUP
e8989fae 851 INIT_LIST_HEAD(&q->blkg_list);
4eef3049 852#endif
3cca6dc1 853 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
483f4afc 854
8324aa91 855 kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4 856
5acb3cc2
WL
857#ifdef CONFIG_BLK_DEV_IO_TRACE
858 mutex_init(&q->blk_trace_mutex);
859#endif
483f4afc 860 mutex_init(&q->sysfs_lock);
e7e72bf6 861 spin_lock_init(&q->__queue_lock);
483f4afc 862
c94a96ac
VG
863 /*
864 * By default initialize queue_lock to internal lock and driver can
865 * override it later if need be.
866 */
867 q->queue_lock = &q->__queue_lock;
868
b82d4b19
TH
869 /*
870 * A queue starts its life with bypass turned on to avoid
871 * unnecessary bypass on/off overhead and nasty surprises during
749fefe6
TH
872 * init. The initial bypass will be finished when the queue is
873 * registered by blk_register_queue().
b82d4b19
TH
874 */
875 q->bypass_depth = 1;
876 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
877
320ae51f
JA
878 init_waitqueue_head(&q->mq_freeze_wq);
879
3ef28e83
DW
880 /*
881 * Init percpu_ref in atomic mode so that it's faster to shutdown.
882 * See blk_register_queue() for details.
883 */
884 if (percpu_ref_init(&q->q_usage_counter,
885 blk_queue_usage_counter_release,
886 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
fff4996b 887 goto fail_bdi;
f51b802c 888
3ef28e83
DW
889 if (blkcg_init_queue(q))
890 goto fail_ref;
891
1da177e4 892 return q;
a73f730d 893
3ef28e83
DW
894fail_ref:
895 percpu_ref_exit(&q->q_usage_counter);
fff4996b 896fail_bdi:
a83b576c
JA
897 blk_free_queue_stats(q->stats);
898fail_stats:
d03f6cdc 899 bdi_put(q->backing_dev_info);
54efd50b
KO
900fail_split:
901 bioset_free(q->bio_split);
a73f730d
TH
902fail_id:
903 ida_simple_remove(&blk_queue_ida, q->id);
904fail_q:
905 kmem_cache_free(blk_requestq_cachep, q);
906 return NULL;
1da177e4 907}
1946089a 908EXPORT_SYMBOL(blk_alloc_queue_node);
1da177e4
LT
909
910/**
911 * blk_init_queue - prepare a request queue for use with a block device
912 * @rfn: The function to be called to process requests that have been
913 * placed on the queue.
914 * @lock: Request queue spin lock
915 *
916 * Description:
917 * If a block device wishes to use the standard request handling procedures,
918 * which sorts requests and coalesces adjacent requests, then it must
919 * call blk_init_queue(). The function @rfn will be called when there
920 * are requests on the queue that need to be processed. If the device
921 * supports plugging, then @rfn may not be called immediately when requests
922 * are available on the queue, but may be called at some time later instead.
923 * Plugged queues are generally unplugged when a buffer belonging to one
924 * of the requests on the queue is needed, or due to memory pressure.
925 *
926 * @rfn is not required, or even expected, to remove all requests off the
927 * queue, but only as many as it can handle at a time. If it does leave
928 * requests on the queue, it is responsible for arranging that the requests
929 * get dealt with eventually.
930 *
931 * The queue spin lock must be held while manipulating the requests on the
a038e253
PBG
932 * request queue; this lock will be taken also from interrupt context, so irq
933 * disabling is needed for it.
1da177e4 934 *
710027a4 935 * Function returns a pointer to the initialized request queue, or %NULL if
1da177e4
LT
936 * it didn't succeed.
937 *
938 * Note:
939 * blk_init_queue() must be paired with a blk_cleanup_queue() call
940 * when the block device is deactivated (such as at module unload).
941 **/
1946089a 942
165125e1 943struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1da177e4 944{
c304a51b 945 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
1946089a
CL
946}
947EXPORT_SYMBOL(blk_init_queue);
948
165125e1 949struct request_queue *
1946089a
CL
950blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
951{
5ea708d1 952 struct request_queue *q;
1da177e4 953
5ea708d1
CH
954 q = blk_alloc_queue_node(GFP_KERNEL, node_id);
955 if (!q)
c86d1b8a
MS
956 return NULL;
957
5ea708d1
CH
958 q->request_fn = rfn;
959 if (lock)
960 q->queue_lock = lock;
961 if (blk_init_allocated_queue(q) < 0) {
962 blk_cleanup_queue(q);
963 return NULL;
964 }
18741986 965
7982e90c 966 return q;
01effb0d
MS
967}
968EXPORT_SYMBOL(blk_init_queue_node);
969
dece1635 970static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
336b7e1f 971
1da177e4 972
5ea708d1
CH
973int blk_init_allocated_queue(struct request_queue *q)
974{
332ebbf7
BVA
975 WARN_ON_ONCE(q->mq_ops);
976
6d247d7f 977 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
ba483388 978 if (!q->fq)
5ea708d1 979 return -ENOMEM;
7982e90c 980
6d247d7f
CH
981 if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
982 goto out_free_flush_queue;
7982e90c 983
a051661c 984 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
6d247d7f 985 goto out_exit_flush_rq;
1da177e4 986
287922eb 987 INIT_WORK(&q->timeout_work, blk_timeout_work);
60ea8226 988 q->queue_flags |= QUEUE_FLAG_DEFAULT;
c94a96ac 989
f3b144aa
JA
990 /*
991 * This also sets hw/phys segments, boundary and size
992 */
c20e8de2 993 blk_queue_make_request(q, blk_queue_bio);
1da177e4 994
44ec9542
AS
995 q->sg_reserved_size = INT_MAX;
996
eb1c160b
TS
997 /* Protect q->elevator from elevator_change */
998 mutex_lock(&q->sysfs_lock);
999
b82d4b19 1000 /* init elevator */
eb1c160b
TS
1001 if (elevator_init(q, NULL)) {
1002 mutex_unlock(&q->sysfs_lock);
6d247d7f 1003 goto out_exit_flush_rq;
eb1c160b
TS
1004 }
1005
1006 mutex_unlock(&q->sysfs_lock);
5ea708d1 1007 return 0;
eb1c160b 1008
6d247d7f
CH
1009out_exit_flush_rq:
1010 if (q->exit_rq_fn)
1011 q->exit_rq_fn(q, q->fq->flush_rq);
1012out_free_flush_queue:
ba483388 1013 blk_free_flush_queue(q->fq);
5ea708d1 1014 return -ENOMEM;
1da177e4 1015}
5151412d 1016EXPORT_SYMBOL(blk_init_allocated_queue);
1da177e4 1017
09ac46c4 1018bool blk_get_queue(struct request_queue *q)
1da177e4 1019{
3f3299d5 1020 if (likely(!blk_queue_dying(q))) {
09ac46c4
TH
1021 __blk_get_queue(q);
1022 return true;
1da177e4
LT
1023 }
1024
09ac46c4 1025 return false;
1da177e4 1026}
d86e0e83 1027EXPORT_SYMBOL(blk_get_queue);
1da177e4 1028
5b788ce3 1029static inline void blk_free_request(struct request_list *rl, struct request *rq)
1da177e4 1030{
e8064021 1031 if (rq->rq_flags & RQF_ELVPRIV) {
5b788ce3 1032 elv_put_request(rl->q, rq);
f1f8cc94 1033 if (rq->elv.icq)
11a3122f 1034 put_io_context(rq->elv.icq->ioc);
f1f8cc94
TH
1035 }
1036
5b788ce3 1037 mempool_free(rq, rl->rq_pool);
1da177e4
LT
1038}
1039
1da177e4
LT
1040/*
1041 * ioc_batching returns true if the ioc is a valid batching request and
1042 * should be given priority access to a request.
1043 */
165125e1 1044static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
1045{
1046 if (!ioc)
1047 return 0;
1048
1049 /*
1050 * Make sure the process is able to allocate at least 1 request
1051 * even if the batch times out, otherwise we could theoretically
1052 * lose wakeups.
1053 */
1054 return ioc->nr_batch_requests == q->nr_batching ||
1055 (ioc->nr_batch_requests > 0
1056 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
1057}
1058
1059/*
1060 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
1061 * will cause the process to be a "batcher" on all queues in the system. This
1062 * is the behaviour we want though - once it gets a wakeup it should be given
1063 * a nice run.
1064 */
165125e1 1065static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
1066{
1067 if (!ioc || ioc_batching(q, ioc))
1068 return;
1069
1070 ioc->nr_batch_requests = q->nr_batching;
1071 ioc->last_waited = jiffies;
1072}
1073
5b788ce3 1074static void __freed_request(struct request_list *rl, int sync)
1da177e4 1075{
5b788ce3 1076 struct request_queue *q = rl->q;
1da177e4 1077
d40f75a0
TH
1078 if (rl->count[sync] < queue_congestion_off_threshold(q))
1079 blk_clear_congested(rl, sync);
1da177e4 1080
1faa16d2
JA
1081 if (rl->count[sync] + 1 <= q->nr_requests) {
1082 if (waitqueue_active(&rl->wait[sync]))
1083 wake_up(&rl->wait[sync]);
1da177e4 1084
5b788ce3 1085 blk_clear_rl_full(rl, sync);
1da177e4
LT
1086 }
1087}
1088
1089/*
1090 * A request has just been released. Account for it, update the full and
1091 * congestion status, wake up any waiters. Called under q->queue_lock.
1092 */
e8064021
CH
1093static void freed_request(struct request_list *rl, bool sync,
1094 req_flags_t rq_flags)
1da177e4 1095{
5b788ce3 1096 struct request_queue *q = rl->q;
1da177e4 1097
8a5ecdd4 1098 q->nr_rqs[sync]--;
1faa16d2 1099 rl->count[sync]--;
e8064021 1100 if (rq_flags & RQF_ELVPRIV)
8a5ecdd4 1101 q->nr_rqs_elvpriv--;
1da177e4 1102
5b788ce3 1103 __freed_request(rl, sync);
1da177e4 1104
1faa16d2 1105 if (unlikely(rl->starved[sync ^ 1]))
5b788ce3 1106 __freed_request(rl, sync ^ 1);
1da177e4
LT
1107}
1108
e3a2b3f9
JA
1109int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
1110{
1111 struct request_list *rl;
d40f75a0 1112 int on_thresh, off_thresh;
e3a2b3f9 1113
332ebbf7
BVA
1114 WARN_ON_ONCE(q->mq_ops);
1115
e3a2b3f9
JA
1116 spin_lock_irq(q->queue_lock);
1117 q->nr_requests = nr;
1118 blk_queue_congestion_threshold(q);
d40f75a0
TH
1119 on_thresh = queue_congestion_on_threshold(q);
1120 off_thresh = queue_congestion_off_threshold(q);
e3a2b3f9 1121
d40f75a0
TH
1122 blk_queue_for_each_rl(rl, q) {
1123 if (rl->count[BLK_RW_SYNC] >= on_thresh)
1124 blk_set_congested(rl, BLK_RW_SYNC);
1125 else if (rl->count[BLK_RW_SYNC] < off_thresh)
1126 blk_clear_congested(rl, BLK_RW_SYNC);
e3a2b3f9 1127
d40f75a0
TH
1128 if (rl->count[BLK_RW_ASYNC] >= on_thresh)
1129 blk_set_congested(rl, BLK_RW_ASYNC);
1130 else if (rl->count[BLK_RW_ASYNC] < off_thresh)
1131 blk_clear_congested(rl, BLK_RW_ASYNC);
e3a2b3f9 1132
e3a2b3f9
JA
1133 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
1134 blk_set_rl_full(rl, BLK_RW_SYNC);
1135 } else {
1136 blk_clear_rl_full(rl, BLK_RW_SYNC);
1137 wake_up(&rl->wait[BLK_RW_SYNC]);
1138 }
1139
1140 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
1141 blk_set_rl_full(rl, BLK_RW_ASYNC);
1142 } else {
1143 blk_clear_rl_full(rl, BLK_RW_ASYNC);
1144 wake_up(&rl->wait[BLK_RW_ASYNC]);
1145 }
1146 }
1147
1148 spin_unlock_irq(q->queue_lock);
1149 return 0;
1150}
1151
da8303c6 1152/**
a06e05e6 1153 * __get_request - get a free request
5b788ce3 1154 * @rl: request list to allocate from
ef295ecf 1155 * @op: operation and flags
da8303c6
TH
1156 * @bio: bio to allocate request for (can be %NULL)
1157 * @gfp_mask: allocation mask
1158 *
1159 * Get a free request from @q. This function may fail under memory
1160 * pressure or if @q is dead.
1161 *
da3dae54 1162 * Must be called with @q->queue_lock held and,
a492f075
JL
1163 * Returns ERR_PTR on failure, with @q->queue_lock held.
1164 * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4 1165 */
ef295ecf
CH
1166static struct request *__get_request(struct request_list *rl, unsigned int op,
1167 struct bio *bio, gfp_t gfp_mask)
1da177e4 1168{
5b788ce3 1169 struct request_queue *q = rl->q;
b679281a 1170 struct request *rq;
7f4b35d1
TH
1171 struct elevator_type *et = q->elevator->type;
1172 struct io_context *ioc = rq_ioc(bio);
f1f8cc94 1173 struct io_cq *icq = NULL;
ef295ecf 1174 const bool is_sync = op_is_sync(op);
75eb6c37 1175 int may_queue;
e8064021 1176 req_flags_t rq_flags = RQF_ALLOCED;
88ee5ef1 1177
2fff8a92
BVA
1178 lockdep_assert_held(q->queue_lock);
1179
3f3299d5 1180 if (unlikely(blk_queue_dying(q)))
a492f075 1181 return ERR_PTR(-ENODEV);
da8303c6 1182
ef295ecf 1183 may_queue = elv_may_queue(q, op);
88ee5ef1
JA
1184 if (may_queue == ELV_MQUEUE_NO)
1185 goto rq_starved;
1186
1faa16d2
JA
1187 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
1188 if (rl->count[is_sync]+1 >= q->nr_requests) {
88ee5ef1
JA
1189 /*
1190 * The queue will fill after this allocation, so set
1191 * it as full, and mark this process as "batching".
1192 * This process will be allowed to complete a batch of
1193 * requests, others will be blocked.
1194 */
5b788ce3 1195 if (!blk_rl_full(rl, is_sync)) {
88ee5ef1 1196 ioc_set_batching(q, ioc);
5b788ce3 1197 blk_set_rl_full(rl, is_sync);
88ee5ef1
JA
1198 } else {
1199 if (may_queue != ELV_MQUEUE_MUST
1200 && !ioc_batching(q, ioc)) {
1201 /*
1202 * The queue is full and the allocating
1203 * process is not a "batcher", and not
1204 * exempted by the IO scheduler
1205 */
a492f075 1206 return ERR_PTR(-ENOMEM);
88ee5ef1
JA
1207 }
1208 }
1da177e4 1209 }
d40f75a0 1210 blk_set_congested(rl, is_sync);
1da177e4
LT
1211 }
1212
082cf69e
JA
1213 /*
1214 * Only allow batching queuers to allocate up to 50% over the defined
1215 * limit of requests, otherwise we could have thousands of requests
1216 * allocated with any setting of ->nr_requests
1217 */
1faa16d2 1218 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
a492f075 1219 return ERR_PTR(-ENOMEM);
fd782a4a 1220
8a5ecdd4 1221 q->nr_rqs[is_sync]++;
1faa16d2
JA
1222 rl->count[is_sync]++;
1223 rl->starved[is_sync] = 0;
cb98fc8b 1224
f1f8cc94
TH
1225 /*
1226 * Decide whether the new request will be managed by elevator. If
e8064021 1227 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
f1f8cc94
TH
1228 * prevent the current elevator from being destroyed until the new
1229 * request is freed. This guarantees icq's won't be destroyed and
1230 * makes creating new ones safe.
1231 *
e6f7f93d
CH
1232 * Flush requests do not use the elevator so skip initialization.
1233 * This allows a request to share the flush and elevator data.
1234 *
f1f8cc94
TH
1235 * Also, lookup icq while holding queue_lock. If it doesn't exist,
1236 * it will be created after releasing queue_lock.
1237 */
e6f7f93d 1238 if (!op_is_flush(op) && !blk_queue_bypass(q)) {
e8064021 1239 rq_flags |= RQF_ELVPRIV;
8a5ecdd4 1240 q->nr_rqs_elvpriv++;
f1f8cc94
TH
1241 if (et->icq_cache && ioc)
1242 icq = ioc_lookup_icq(ioc, q);
9d5a4e94 1243 }
cb98fc8b 1244
f253b86b 1245 if (blk_queue_io_stat(q))
e8064021 1246 rq_flags |= RQF_IO_STAT;
1da177e4
LT
1247 spin_unlock_irq(q->queue_lock);
1248
29e2b09a 1249 /* allocate and init request */
5b788ce3 1250 rq = mempool_alloc(rl->rq_pool, gfp_mask);
29e2b09a 1251 if (!rq)
b679281a 1252 goto fail_alloc;
1da177e4 1253
29e2b09a 1254 blk_rq_init(q, rq);
a051661c 1255 blk_rq_set_rl(rq, rl);
ef295ecf 1256 rq->cmd_flags = op;
e8064021 1257 rq->rq_flags = rq_flags;
29e2b09a 1258
aaf7c680 1259 /* init elvpriv */
e8064021 1260 if (rq_flags & RQF_ELVPRIV) {
aaf7c680 1261 if (unlikely(et->icq_cache && !icq)) {
7f4b35d1
TH
1262 if (ioc)
1263 icq = ioc_create_icq(ioc, q, gfp_mask);
aaf7c680
TH
1264 if (!icq)
1265 goto fail_elvpriv;
29e2b09a 1266 }
aaf7c680
TH
1267
1268 rq->elv.icq = icq;
1269 if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
1270 goto fail_elvpriv;
1271
1272 /* @rq->elv.icq holds io_context until @rq is freed */
29e2b09a
TH
1273 if (icq)
1274 get_io_context(icq->ioc);
1275 }
aaf7c680 1276out:
88ee5ef1
JA
1277 /*
1278 * ioc may be NULL here, and ioc_batching will be false. That's
1279 * OK, if the queue is under the request limit then requests need
1280 * not count toward the nr_batch_requests limit. There will always
1281 * be some limit enforced by BLK_BATCH_TIME.
1282 */
1da177e4
LT
1283 if (ioc_batching(q, ioc))
1284 ioc->nr_batch_requests--;
6728cb0e 1285
e6a40b09 1286 trace_block_getrq(q, bio, op);
1da177e4 1287 return rq;
b679281a 1288
aaf7c680
TH
1289fail_elvpriv:
1290 /*
1291 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
1292 * and may fail indefinitely under memory pressure and thus
1293 * shouldn't stall IO. Treat this request as !elvpriv. This will
1294 * disturb iosched and blkcg but weird is bettern than dead.
1295 */
7b2b10e0 1296 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
dc3b17cc 1297 __func__, dev_name(q->backing_dev_info->dev));
aaf7c680 1298
e8064021 1299 rq->rq_flags &= ~RQF_ELVPRIV;
aaf7c680
TH
1300 rq->elv.icq = NULL;
1301
1302 spin_lock_irq(q->queue_lock);
8a5ecdd4 1303 q->nr_rqs_elvpriv--;
aaf7c680
TH
1304 spin_unlock_irq(q->queue_lock);
1305 goto out;
1306
b679281a
TH
1307fail_alloc:
1308 /*
1309 * Allocation failed presumably due to memory. Undo anything we
1310 * might have messed up.
1311 *
1312 * Allocating task should really be put onto the front of the wait
1313 * queue, but this is pretty rare.
1314 */
1315 spin_lock_irq(q->queue_lock);
e8064021 1316 freed_request(rl, is_sync, rq_flags);
b679281a
TH
1317
1318 /*
1319 * in the very unlikely event that allocation failed and no
1320 * requests for this direction was pending, mark us starved so that
1321 * freeing of a request in the other direction will notice
1322 * us. another possible fix would be to split the rq mempool into
1323 * READ and WRITE
1324 */
1325rq_starved:
1326 if (unlikely(rl->count[is_sync] == 0))
1327 rl->starved[is_sync] = 1;
a492f075 1328 return ERR_PTR(-ENOMEM);
1da177e4
LT
1329}
1330
da8303c6 1331/**
a06e05e6 1332 * get_request - get a free request
da8303c6 1333 * @q: request_queue to allocate request from
ef295ecf 1334 * @op: operation and flags
da8303c6 1335 * @bio: bio to allocate request for (can be %NULL)
a06e05e6 1336 * @gfp_mask: allocation mask
da8303c6 1337 *
d0164adc
MG
1338 * Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
1339 * this function keeps retrying under memory pressure and fails iff @q is dead.
d6344532 1340 *
da3dae54 1341 * Must be called with @q->queue_lock held and,
a492f075
JL
1342 * Returns ERR_PTR on failure, with @q->queue_lock held.
1343 * Returns request pointer on success, with @q->queue_lock *not held*.
1da177e4 1344 */
ef295ecf
CH
1345static struct request *get_request(struct request_queue *q, unsigned int op,
1346 struct bio *bio, gfp_t gfp_mask)
1da177e4 1347{
ef295ecf 1348 const bool is_sync = op_is_sync(op);
a06e05e6 1349 DEFINE_WAIT(wait);
a051661c 1350 struct request_list *rl;
1da177e4 1351 struct request *rq;
a051661c 1352
2fff8a92 1353 lockdep_assert_held(q->queue_lock);
332ebbf7 1354 WARN_ON_ONCE(q->mq_ops);
2fff8a92 1355
a051661c 1356 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
a06e05e6 1357retry:
ef295ecf 1358 rq = __get_request(rl, op, bio, gfp_mask);
a492f075 1359 if (!IS_ERR(rq))
a06e05e6 1360 return rq;
1da177e4 1361
03a07c92
GR
1362 if (op & REQ_NOWAIT) {
1363 blk_put_rl(rl);
1364 return ERR_PTR(-EAGAIN);
1365 }
1366
d0164adc 1367 if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
a051661c 1368 blk_put_rl(rl);
a492f075 1369 return rq;
a051661c 1370 }
1da177e4 1371
a06e05e6
TH
1372 /* wait on @rl and retry */
1373 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1374 TASK_UNINTERRUPTIBLE);
1da177e4 1375
e6a40b09 1376 trace_block_sleeprq(q, bio, op);
1da177e4 1377
a06e05e6
TH
1378 spin_unlock_irq(q->queue_lock);
1379 io_schedule();
d6344532 1380
a06e05e6
TH
1381 /*
1382 * After sleeping, we become a "batching" process and will be able
1383 * to allocate at least one request, and up to a big batch of them
1384 * for a small period time. See ioc_batching, ioc_set_batching
1385 */
a06e05e6 1386 ioc_set_batching(q, current->io_context);
05caf8db 1387
a06e05e6
TH
1388 spin_lock_irq(q->queue_lock);
1389 finish_wait(&rl->wait[is_sync], &wait);
1da177e4 1390
a06e05e6 1391 goto retry;
1da177e4
LT
1392}
1393
cd6ce148
BVA
1394static struct request *blk_old_get_request(struct request_queue *q,
1395 unsigned int op, gfp_t gfp_mask)
1da177e4
LT
1396{
1397 struct request *rq;
1398
332ebbf7
BVA
1399 WARN_ON_ONCE(q->mq_ops);
1400
7f4b35d1
TH
1401 /* create ioc upfront */
1402 create_io_context(gfp_mask, q->node);
1403
d6344532 1404 spin_lock_irq(q->queue_lock);
cd6ce148 1405 rq = get_request(q, op, NULL, gfp_mask);
0c4de0f3 1406 if (IS_ERR(rq)) {
da8303c6 1407 spin_unlock_irq(q->queue_lock);
0c4de0f3
CH
1408 return rq;
1409 }
1da177e4 1410
0c4de0f3
CH
1411 /* q->queue_lock is unlocked at this point */
1412 rq->__data_len = 0;
1413 rq->__sector = (sector_t) -1;
1414 rq->bio = rq->biotail = NULL;
1da177e4
LT
1415 return rq;
1416}
320ae51f 1417
cd6ce148
BVA
1418struct request *blk_get_request(struct request_queue *q, unsigned int op,
1419 gfp_t gfp_mask)
320ae51f 1420{
d280bab3
BVA
1421 struct request *req;
1422
1423 if (q->mq_ops) {
1424 req = blk_mq_alloc_request(q, op,
6f3b0e8b
CH
1425 (gfp_mask & __GFP_DIRECT_RECLAIM) ?
1426 0 : BLK_MQ_REQ_NOWAIT);
d280bab3
BVA
1427 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
1428 q->mq_ops->initialize_rq_fn(req);
1429 } else {
1430 req = blk_old_get_request(q, op, gfp_mask);
1431 if (!IS_ERR(req) && q->initialize_rq_fn)
1432 q->initialize_rq_fn(req);
1433 }
1434
1435 return req;
320ae51f 1436}
1da177e4
LT
1437EXPORT_SYMBOL(blk_get_request);
1438
1439/**
1440 * blk_requeue_request - put a request back on queue
1441 * @q: request queue where request should be inserted
1442 * @rq: request to be inserted
1443 *
1444 * Description:
1445 * Drivers often keep queueing requests until the hardware cannot accept
1446 * more, when that condition happens we need to put the request back
1447 * on the queue. Must be called with queue lock held.
1448 */
165125e1 1449void blk_requeue_request(struct request_queue *q, struct request *rq)
1da177e4 1450{
2fff8a92 1451 lockdep_assert_held(q->queue_lock);
332ebbf7 1452 WARN_ON_ONCE(q->mq_ops);
2fff8a92 1453
242f9dcb
JA
1454 blk_delete_timer(rq);
1455 blk_clear_rq_complete(rq);
5f3ea37c 1456 trace_block_rq_requeue(q, rq);
87760e5e 1457 wbt_requeue(q->rq_wb, &rq->issue_stat);
2056a782 1458
e8064021 1459 if (rq->rq_flags & RQF_QUEUED)
1da177e4
LT
1460 blk_queue_end_tag(q, rq);
1461
ba396a6c
JB
1462 BUG_ON(blk_queued_rq(rq));
1463
1da177e4
LT
1464 elv_requeue_request(q, rq);
1465}
1da177e4
LT
1466EXPORT_SYMBOL(blk_requeue_request);
1467
73c10101
JA
1468static void add_acct_request(struct request_queue *q, struct request *rq,
1469 int where)
1470{
320ae51f 1471 blk_account_io_start(rq, true);
7eaceacc 1472 __elv_add_request(q, rq, where);
73c10101
JA
1473}
1474
d62e26b3 1475static void part_round_stats_single(struct request_queue *q, int cpu,
b8d62b3a
JA
1476 struct hd_struct *part, unsigned long now,
1477 unsigned int inflight)
074a7aca 1478{
b8d62b3a 1479 if (inflight) {
074a7aca 1480 __part_stat_add(cpu, part, time_in_queue,
b8d62b3a 1481 inflight * (now - part->stamp));
074a7aca
TH
1482 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1483 }
1484 part->stamp = now;
1485}
1486
1487/**
496aa8a9 1488 * part_round_stats() - Round off the performance stats on a struct disk_stats.
d62e26b3 1489 * @q: target block queue
496aa8a9
RD
1490 * @cpu: cpu number for stats access
1491 * @part: target partition
1da177e4
LT
1492 *
1493 * The average IO queue length and utilisation statistics are maintained
1494 * by observing the current state of the queue length and the amount of
1495 * time it has been in this state for.
1496 *
1497 * Normally, that accounting is done on IO completion, but that can result
1498 * in more than a second's worth of IO being accounted for within any one
1499 * second, leading to >100% utilisation. To deal with that, we call this
1500 * function to do a round-off before returning the results when reading
1501 * /proc/diskstats. This accounts immediately for all queue usage up to
1502 * the current jiffies and restarts the counters again.
1503 */
d62e26b3 1504void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
6f2576af 1505{
b8d62b3a 1506 struct hd_struct *part2 = NULL;
6f2576af 1507 unsigned long now = jiffies;
b8d62b3a
JA
1508 unsigned int inflight[2];
1509 int stats = 0;
1510
1511 if (part->stamp != now)
1512 stats |= 1;
1513
1514 if (part->partno) {
1515 part2 = &part_to_disk(part)->part0;
1516 if (part2->stamp != now)
1517 stats |= 2;
1518 }
1519
1520 if (!stats)
1521 return;
1522
1523 part_in_flight(q, part, inflight);
6f2576af 1524
b8d62b3a
JA
1525 if (stats & 2)
1526 part_round_stats_single(q, cpu, part2, now, inflight[1]);
1527 if (stats & 1)
1528 part_round_stats_single(q, cpu, part, now, inflight[0]);
6f2576af 1529}
074a7aca 1530EXPORT_SYMBOL_GPL(part_round_stats);
6f2576af 1531
47fafbc7 1532#ifdef CONFIG_PM
c8158819
LM
1533static void blk_pm_put_request(struct request *rq)
1534{
e8064021 1535 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
c8158819
LM
1536 pm_runtime_mark_last_busy(rq->q->dev);
1537}
1538#else
1539static inline void blk_pm_put_request(struct request *rq) {}
1540#endif
1541
165125e1 1542void __blk_put_request(struct request_queue *q, struct request *req)
1da177e4 1543{
e8064021
CH
1544 req_flags_t rq_flags = req->rq_flags;
1545
1da177e4
LT
1546 if (unlikely(!q))
1547 return;
1da177e4 1548
6f5ba581
CH
1549 if (q->mq_ops) {
1550 blk_mq_free_request(req);
1551 return;
1552 }
1553
2fff8a92
BVA
1554 lockdep_assert_held(q->queue_lock);
1555
c8158819
LM
1556 blk_pm_put_request(req);
1557
8922e16c
TH
1558 elv_completed_request(q, req);
1559
1cd96c24
BH
1560 /* this is a bio leak */
1561 WARN_ON(req->bio != NULL);
1562
87760e5e
JA
1563 wbt_done(q->rq_wb, &req->issue_stat);
1564
1da177e4
LT
1565 /*
1566 * Request may not have originated from ll_rw_blk. if not,
1567 * it didn't come out of our reserved rq pools
1568 */
e8064021 1569 if (rq_flags & RQF_ALLOCED) {
a051661c 1570 struct request_list *rl = blk_rq_rl(req);
ef295ecf 1571 bool sync = op_is_sync(req->cmd_flags);
1da177e4 1572
1da177e4 1573 BUG_ON(!list_empty(&req->queuelist));
360f92c2 1574 BUG_ON(ELV_ON_HASH(req));
1da177e4 1575
a051661c 1576 blk_free_request(rl, req);
e8064021 1577 freed_request(rl, sync, rq_flags);
a051661c 1578 blk_put_rl(rl);
1da177e4
LT
1579 }
1580}
6e39b69e
MC
1581EXPORT_SYMBOL_GPL(__blk_put_request);
1582
1da177e4
LT
1583void blk_put_request(struct request *req)
1584{
165125e1 1585 struct request_queue *q = req->q;
8922e16c 1586
320ae51f
JA
1587 if (q->mq_ops)
1588 blk_mq_free_request(req);
1589 else {
1590 unsigned long flags;
1591
1592 spin_lock_irqsave(q->queue_lock, flags);
1593 __blk_put_request(q, req);
1594 spin_unlock_irqrestore(q->queue_lock, flags);
1595 }
1da177e4 1596}
1da177e4
LT
1597EXPORT_SYMBOL(blk_put_request);
1598
320ae51f
JA
1599bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1600 struct bio *bio)
73c10101 1601{
1eff9d32 1602 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c10101 1603
73c10101
JA
1604 if (!ll_back_merge_fn(q, req, bio))
1605 return false;
1606
8c1cf6bb 1607 trace_block_bio_backmerge(q, req, bio);
73c10101
JA
1608
1609 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1610 blk_rq_set_mixed_merge(req);
1611
1612 req->biotail->bi_next = bio;
1613 req->biotail = bio;
4f024f37 1614 req->__data_len += bio->bi_iter.bi_size;
73c10101
JA
1615 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1616
320ae51f 1617 blk_account_io_start(req, false);
73c10101
JA
1618 return true;
1619}
1620
320ae51f
JA
1621bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1622 struct bio *bio)
73c10101 1623{
1eff9d32 1624 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
73c10101 1625
73c10101
JA
1626 if (!ll_front_merge_fn(q, req, bio))
1627 return false;
1628
8c1cf6bb 1629 trace_block_bio_frontmerge(q, req, bio);
73c10101
JA
1630
1631 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1632 blk_rq_set_mixed_merge(req);
1633
73c10101
JA
1634 bio->bi_next = req->bio;
1635 req->bio = bio;
1636
4f024f37
KO
1637 req->__sector = bio->bi_iter.bi_sector;
1638 req->__data_len += bio->bi_iter.bi_size;
73c10101
JA
1639 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1640
320ae51f 1641 blk_account_io_start(req, false);
73c10101
JA
1642 return true;
1643}
1644
1e739730
CH
1645bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
1646 struct bio *bio)
1647{
1648 unsigned short segments = blk_rq_nr_discard_segments(req);
1649
1650 if (segments >= queue_max_discard_segments(q))
1651 goto no_merge;
1652 if (blk_rq_sectors(req) + bio_sectors(bio) >
1653 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1654 goto no_merge;
1655
1656 req->biotail->bi_next = bio;
1657 req->biotail = bio;
1658 req->__data_len += bio->bi_iter.bi_size;
1659 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1660 req->nr_phys_segments = segments + 1;
1661
1662 blk_account_io_start(req, false);
1663 return true;
1664no_merge:
1665 req_set_nomerge(q, req);
1666 return false;
1667}
1668
bd87b589 1669/**
320ae51f 1670 * blk_attempt_plug_merge - try to merge with %current's plugged list
bd87b589
TH
1671 * @q: request_queue new bio is being queued at
1672 * @bio: new bio being queued
1673 * @request_count: out parameter for number of traversed plugged requests
ccc2600b
RD
1674 * @same_queue_rq: pointer to &struct request that gets filled in when
1675 * another request associated with @q is found on the plug list
1676 * (optional, may be %NULL)
bd87b589
TH
1677 *
1678 * Determine whether @bio being queued on @q can be merged with a request
1679 * on %current's plugged list. Returns %true if merge was successful,
1680 * otherwise %false.
1681 *
07c2bd37
TH
1682 * Plugging coalesces IOs from the same issuer for the same purpose without
1683 * going through @q->queue_lock. As such it's more of an issuing mechanism
1684 * than scheduling, and the request, while may have elvpriv data, is not
1685 * added on the elevator at this point. In addition, we don't have
1686 * reliable access to the elevator outside queue lock. Only check basic
1687 * merging parameters without querying the elevator.
da41a589
RE
1688 *
1689 * Caller must ensure !blk_queue_nomerges(q) beforehand.
73c10101 1690 */
320ae51f 1691bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
5b3f341f
SL
1692 unsigned int *request_count,
1693 struct request **same_queue_rq)
73c10101
JA
1694{
1695 struct blk_plug *plug;
1696 struct request *rq;
92f399c7 1697 struct list_head *plug_list;
73c10101 1698
bd87b589 1699 plug = current->plug;
73c10101 1700 if (!plug)
34fe7c05 1701 return false;
56ebdaf2 1702 *request_count = 0;
73c10101 1703
92f399c7
SL
1704 if (q->mq_ops)
1705 plug_list = &plug->mq_list;
1706 else
1707 plug_list = &plug->list;
1708
1709 list_for_each_entry_reverse(rq, plug_list, queuelist) {
34fe7c05 1710 bool merged = false;
73c10101 1711
5b3f341f 1712 if (rq->q == q) {
1b2e19f1 1713 (*request_count)++;
5b3f341f
SL
1714 /*
1715 * Only blk-mq multiple hardware queues case checks the
1716 * rq in the same queue, there should be only one such
1717 * rq in a queue
1718 **/
1719 if (same_queue_rq)
1720 *same_queue_rq = rq;
1721 }
56ebdaf2 1722
07c2bd37 1723 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
73c10101
JA
1724 continue;
1725
34fe7c05
CH
1726 switch (blk_try_merge(rq, bio)) {
1727 case ELEVATOR_BACK_MERGE:
1728 merged = bio_attempt_back_merge(q, rq, bio);
1729 break;
1730 case ELEVATOR_FRONT_MERGE:
1731 merged = bio_attempt_front_merge(q, rq, bio);
1732 break;
1e739730
CH
1733 case ELEVATOR_DISCARD_MERGE:
1734 merged = bio_attempt_discard_merge(q, rq, bio);
1735 break;
34fe7c05
CH
1736 default:
1737 break;
73c10101 1738 }
34fe7c05
CH
1739
1740 if (merged)
1741 return true;
73c10101 1742 }
34fe7c05
CH
1743
1744 return false;
73c10101
JA
1745}
1746
0809e3ac
JM
1747unsigned int blk_plug_queued_count(struct request_queue *q)
1748{
1749 struct blk_plug *plug;
1750 struct request *rq;
1751 struct list_head *plug_list;
1752 unsigned int ret = 0;
1753
1754 plug = current->plug;
1755 if (!plug)
1756 goto out;
1757
1758 if (q->mq_ops)
1759 plug_list = &plug->mq_list;
1760 else
1761 plug_list = &plug->list;
1762
1763 list_for_each_entry(rq, plug_list, queuelist) {
1764 if (rq->q == q)
1765 ret++;
1766 }
1767out:
1768 return ret;
1769}
1770
da8d7f07 1771void blk_init_request_from_bio(struct request *req, struct bio *bio)
52d9e675 1772{
0be0dee6
BVA
1773 struct io_context *ioc = rq_ioc(bio);
1774
1eff9d32 1775 if (bio->bi_opf & REQ_RAHEAD)
a82afdfc 1776 req->cmd_flags |= REQ_FAILFAST_MASK;
b31dc66a 1777
4f024f37 1778 req->__sector = bio->bi_iter.bi_sector;
5dc8b362
AM
1779 if (ioprio_valid(bio_prio(bio)))
1780 req->ioprio = bio_prio(bio);
0be0dee6
BVA
1781 else if (ioc)
1782 req->ioprio = ioc->ioprio;
1783 else
1784 req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
cb6934f8 1785 req->write_hint = bio->bi_write_hint;
bc1c56fd 1786 blk_rq_bio_prep(req->q, req, bio);
52d9e675 1787}
da8d7f07 1788EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
52d9e675 1789
dece1635 1790static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1da177e4 1791{
73c10101 1792 struct blk_plug *plug;
34fe7c05 1793 int where = ELEVATOR_INSERT_SORT;
e4d750c9 1794 struct request *req, *free;
56ebdaf2 1795 unsigned int request_count = 0;
87760e5e 1796 unsigned int wb_acct;
1da177e4 1797
1da177e4
LT
1798 /*
1799 * low level driver can indicate that it wants pages above a
1800 * certain limit bounced to low memory (ie for highmem, or even
1801 * ISA dma in theory)
1802 */
1803 blk_queue_bounce(q, &bio);
1804
af67c31f 1805 blk_queue_split(q, &bio);
23688bf4 1806
e23947bd 1807 if (!bio_integrity_prep(bio))
dece1635 1808 return BLK_QC_T_NONE;
ffecfd1a 1809
f73f44eb 1810 if (op_is_flush(bio->bi_opf)) {
73c10101 1811 spin_lock_irq(q->queue_lock);
ae1b1539 1812 where = ELEVATOR_INSERT_FLUSH;
28e7d184
TH
1813 goto get_rq;
1814 }
1815
73c10101
JA
1816 /*
1817 * Check if we can merge with the plugged list before grabbing
1818 * any locks.
1819 */
0809e3ac
JM
1820 if (!blk_queue_nomerges(q)) {
1821 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
dece1635 1822 return BLK_QC_T_NONE;
0809e3ac
JM
1823 } else
1824 request_count = blk_plug_queued_count(q);
1da177e4 1825
73c10101 1826 spin_lock_irq(q->queue_lock);
2056a782 1827
34fe7c05
CH
1828 switch (elv_merge(q, &req, bio)) {
1829 case ELEVATOR_BACK_MERGE:
1830 if (!bio_attempt_back_merge(q, req, bio))
1831 break;
1832 elv_bio_merged(q, req, bio);
1833 free = attempt_back_merge(q, req);
1834 if (free)
1835 __blk_put_request(q, free);
1836 else
1837 elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
1838 goto out_unlock;
1839 case ELEVATOR_FRONT_MERGE:
1840 if (!bio_attempt_front_merge(q, req, bio))
1841 break;
1842 elv_bio_merged(q, req, bio);
1843 free = attempt_front_merge(q, req);
1844 if (free)
1845 __blk_put_request(q, free);
1846 else
1847 elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
1848 goto out_unlock;
1849 default:
1850 break;
1da177e4
LT
1851 }
1852
450991bc 1853get_rq:
87760e5e
JA
1854 wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
1855
1da177e4 1856 /*
450991bc 1857 * Grab a free request. This is might sleep but can not fail.
d6344532 1858 * Returns with the queue unlocked.
450991bc 1859 */
ef295ecf 1860 req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
a492f075 1861 if (IS_ERR(req)) {
87760e5e 1862 __wbt_done(q->rq_wb, wb_acct);
4e4cbee9
CH
1863 if (PTR_ERR(req) == -ENOMEM)
1864 bio->bi_status = BLK_STS_RESOURCE;
1865 else
1866 bio->bi_status = BLK_STS_IOERR;
4246a0b6 1867 bio_endio(bio);
da8303c6
TH
1868 goto out_unlock;
1869 }
d6344532 1870
87760e5e
JA
1871 wbt_track(&req->issue_stat, wb_acct);
1872
450991bc
NP
1873 /*
1874 * After dropping the lock and possibly sleeping here, our request
1875 * may now be mergeable after it had proven unmergeable (above).
1876 * We don't worry about that case for efficiency. It won't happen
1877 * often, and the elevators are able to handle it.
1da177e4 1878 */
da8d7f07 1879 blk_init_request_from_bio(req, bio);
1da177e4 1880
9562ad9a 1881 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
11ccf116 1882 req->cpu = raw_smp_processor_id();
73c10101
JA
1883
1884 plug = current->plug;
721a9602 1885 if (plug) {
dc6d36c9
JA
1886 /*
1887 * If this is the first request added after a plug, fire
7aef2e78 1888 * of a plug trace.
0a6219a9
ML
1889 *
1890 * @request_count may become stale because of schedule
1891 * out, so check plug list again.
dc6d36c9 1892 */
0a6219a9 1893 if (!request_count || list_empty(&plug->list))
dc6d36c9 1894 trace_block_plug(q);
3540d5e8 1895 else {
50d24c34
SL
1896 struct request *last = list_entry_rq(plug->list.prev);
1897 if (request_count >= BLK_MAX_REQUEST_COUNT ||
1898 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
3540d5e8 1899 blk_flush_plug_list(plug, false);
019ceb7d
SL
1900 trace_block_plug(q);
1901 }
73c10101 1902 }
73c10101 1903 list_add_tail(&req->queuelist, &plug->list);
320ae51f 1904 blk_account_io_start(req, true);
73c10101
JA
1905 } else {
1906 spin_lock_irq(q->queue_lock);
1907 add_acct_request(q, req, where);
24ecfbe2 1908 __blk_run_queue(q);
73c10101
JA
1909out_unlock:
1910 spin_unlock_irq(q->queue_lock);
1911 }
dece1635
JA
1912
1913 return BLK_QC_T_NONE;
1da177e4
LT
1914}
1915
1da177e4
LT
1916static void handle_bad_sector(struct bio *bio)
1917{
1918 char b[BDEVNAME_SIZE];
1919
1920 printk(KERN_INFO "attempt to access beyond end of device\n");
6296b960 1921 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
74d46992 1922 bio_devname(bio, b), bio->bi_opf,
f73a1c7d 1923 (unsigned long long)bio_end_sector(bio),
74d46992 1924 (long long)get_capacity(bio->bi_disk));
1da177e4
LT
1925}
1926
c17bb495
AM
1927#ifdef CONFIG_FAIL_MAKE_REQUEST
1928
1929static DECLARE_FAULT_ATTR(fail_make_request);
1930
1931static int __init setup_fail_make_request(char *str)
1932{
1933 return setup_fault_attr(&fail_make_request, str);
1934}
1935__setup("fail_make_request=", setup_fail_make_request);
1936
b2c9cd37 1937static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
c17bb495 1938{
b2c9cd37 1939 return part->make_it_fail && should_fail(&fail_make_request, bytes);
c17bb495
AM
1940}
1941
1942static int __init fail_make_request_debugfs(void)
1943{
dd48c085
AM
1944 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1945 NULL, &fail_make_request);
1946
21f9fcd8 1947 return PTR_ERR_OR_ZERO(dir);
c17bb495
AM
1948}
1949
1950late_initcall(fail_make_request_debugfs);
1951
1952#else /* CONFIG_FAIL_MAKE_REQUEST */
1953
b2c9cd37
AM
1954static inline bool should_fail_request(struct hd_struct *part,
1955 unsigned int bytes)
c17bb495 1956{
b2c9cd37 1957 return false;
c17bb495
AM
1958}
1959
1960#endif /* CONFIG_FAIL_MAKE_REQUEST */
1961
74d46992
CH
1962/*
1963 * Remap block n of partition p to block n+start(p) of the disk.
1964 */
1965static inline int blk_partition_remap(struct bio *bio)
1966{
1967 struct hd_struct *p;
1968 int ret = 0;
1969
1970 /*
1971 * Zone reset does not include bi_size so bio_sectors() is always 0.
1972 * Include a test for the reset op code and perform the remap if needed.
1973 */
1974 if (!bio->bi_partno ||
1975 (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET))
1976 return 0;
1977
1978 rcu_read_lock();
1979 p = __disk_get_part(bio->bi_disk, bio->bi_partno);
1980 if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) {
1981 bio->bi_iter.bi_sector += p->start_sect;
1982 bio->bi_partno = 0;
1983 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
1984 bio->bi_iter.bi_sector - p->start_sect);
1985 } else {
1986 printk("%s: fail for partition %d\n", __func__, bio->bi_partno);
1987 ret = -EIO;
1988 }
1989 rcu_read_unlock();
1990
1991 return ret;
1992}
1993
c07e2b41
JA
1994/*
1995 * Check whether this bio extends beyond the end of the device.
1996 */
1997static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1998{
1999 sector_t maxsector;
2000
2001 if (!nr_sectors)
2002 return 0;
2003
2004 /* Test device or partition size, when known. */
74d46992 2005 maxsector = get_capacity(bio->bi_disk);
c07e2b41 2006 if (maxsector) {
4f024f37 2007 sector_t sector = bio->bi_iter.bi_sector;
c07e2b41
JA
2008
2009 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
2010 /*
2011 * This may well happen - the kernel calls bread()
2012 * without checking the size of the device, e.g., when
2013 * mounting a device.
2014 */
2015 handle_bad_sector(bio);
2016 return 1;
2017 }
2018 }
2019
2020 return 0;
2021}
2022
27a84d54
CH
2023static noinline_for_stack bool
2024generic_make_request_checks(struct bio *bio)
1da177e4 2025{
165125e1 2026 struct request_queue *q;
5a7bbad2 2027 int nr_sectors = bio_sectors(bio);
4e4cbee9 2028 blk_status_t status = BLK_STS_IOERR;
5a7bbad2 2029 char b[BDEVNAME_SIZE];
1da177e4
LT
2030
2031 might_sleep();
1da177e4 2032
c07e2b41
JA
2033 if (bio_check_eod(bio, nr_sectors))
2034 goto end_io;
1da177e4 2035
74d46992 2036 q = bio->bi_disk->queue;
5a7bbad2
CH
2037 if (unlikely(!q)) {
2038 printk(KERN_ERR
2039 "generic_make_request: Trying to access "
2040 "nonexistent block-device %s (%Lu)\n",
74d46992 2041 bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
5a7bbad2
CH
2042 goto end_io;
2043 }
c17bb495 2044
03a07c92
GR
2045 /*
2046 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
2047 * if queue is not a request based queue.
2048 */
2049
2050 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
2051 goto not_supported;
2052
74d46992 2053 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
5a7bbad2 2054 goto end_io;
2056a782 2055
74d46992
CH
2056 if (blk_partition_remap(bio))
2057 goto end_io;
2056a782 2058
5a7bbad2
CH
2059 if (bio_check_eod(bio, nr_sectors))
2060 goto end_io;
1e87901e 2061
5a7bbad2
CH
2062 /*
2063 * Filter flush bio's early so that make_request based
2064 * drivers without flush support don't have to worry
2065 * about them.
2066 */
f3a8ab7d 2067 if (op_is_flush(bio->bi_opf) &&
c888a8f9 2068 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1eff9d32 2069 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
5a7bbad2 2070 if (!nr_sectors) {
4e4cbee9 2071 status = BLK_STS_OK;
51fd77bd
JA
2072 goto end_io;
2073 }
5a7bbad2 2074 }
5ddfe969 2075
288dab8a
CH
2076 switch (bio_op(bio)) {
2077 case REQ_OP_DISCARD:
2078 if (!blk_queue_discard(q))
2079 goto not_supported;
2080 break;
2081 case REQ_OP_SECURE_ERASE:
2082 if (!blk_queue_secure_erase(q))
2083 goto not_supported;
2084 break;
2085 case REQ_OP_WRITE_SAME:
74d46992 2086 if (!q->limits.max_write_same_sectors)
288dab8a 2087 goto not_supported;
58886785 2088 break;
2d253440
ST
2089 case REQ_OP_ZONE_REPORT:
2090 case REQ_OP_ZONE_RESET:
74d46992 2091 if (!blk_queue_is_zoned(q))
2d253440 2092 goto not_supported;
288dab8a 2093 break;
a6f0788e 2094 case REQ_OP_WRITE_ZEROES:
74d46992 2095 if (!q->limits.max_write_zeroes_sectors)
a6f0788e
CK
2096 goto not_supported;
2097 break;
288dab8a
CH
2098 default:
2099 break;
5a7bbad2 2100 }
01edede4 2101
7f4b35d1
TH
2102 /*
2103 * Various block parts want %current->io_context and lazy ioc
2104 * allocation ends up trading a lot of pain for a small amount of
2105 * memory. Just allocate it upfront. This may fail and block
2106 * layer knows how to live with it.
2107 */
2108 create_io_context(GFP_ATOMIC, q->node);
2109
ae118896
TH
2110 if (!blkcg_bio_issue_check(q, bio))
2111 return false;
27a84d54 2112
fbbaf700
N
2113 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
2114 trace_block_bio_queue(q, bio);
2115 /* Now that enqueuing has been traced, we need to trace
2116 * completion as well.
2117 */
2118 bio_set_flag(bio, BIO_TRACE_COMPLETION);
2119 }
27a84d54 2120 return true;
a7384677 2121
288dab8a 2122not_supported:
4e4cbee9 2123 status = BLK_STS_NOTSUPP;
a7384677 2124end_io:
4e4cbee9 2125 bio->bi_status = status;
4246a0b6 2126 bio_endio(bio);
27a84d54 2127 return false;
1da177e4
LT
2128}
2129
27a84d54
CH
2130/**
2131 * generic_make_request - hand a buffer to its device driver for I/O
2132 * @bio: The bio describing the location in memory and on the device.
2133 *
2134 * generic_make_request() is used to make I/O requests of block
2135 * devices. It is passed a &struct bio, which describes the I/O that needs
2136 * to be done.
2137 *
2138 * generic_make_request() does not return any status. The
2139 * success/failure status of the request, along with notification of
2140 * completion, is delivered asynchronously through the bio->bi_end_io
2141 * function described (one day) else where.
2142 *
2143 * The caller of generic_make_request must make sure that bi_io_vec
2144 * are set to describe the memory buffer, and that bi_dev and bi_sector are
2145 * set to describe the device address, and the
2146 * bi_end_io and optionally bi_private are set to describe how
2147 * completion notification should be signaled.
2148 *
2149 * generic_make_request and the drivers it calls may use bi_next if this
2150 * bio happens to be merged with someone else, and may resubmit the bio to
2151 * a lower device by calling into generic_make_request recursively, which
2152 * means the bio should NOT be touched after the call to ->make_request_fn.
d89d8796 2153 */
dece1635 2154blk_qc_t generic_make_request(struct bio *bio)
d89d8796 2155{
f5fe1b51
N
2156 /*
2157 * bio_list_on_stack[0] contains bios submitted by the current
2158 * make_request_fn.
2159 * bio_list_on_stack[1] contains bios that were submitted before
2160 * the current make_request_fn, but that haven't been processed
2161 * yet.
2162 */
2163 struct bio_list bio_list_on_stack[2];
dece1635 2164 blk_qc_t ret = BLK_QC_T_NONE;
bddd87c7 2165
27a84d54 2166 if (!generic_make_request_checks(bio))
dece1635 2167 goto out;
27a84d54
CH
2168
2169 /*
2170 * We only want one ->make_request_fn to be active at a time, else
2171 * stack usage with stacked devices could be a problem. So use
2172 * current->bio_list to keep a list of requests submited by a
2173 * make_request_fn function. current->bio_list is also used as a
2174 * flag to say if generic_make_request is currently active in this
2175 * task or not. If it is NULL, then no make_request is active. If
2176 * it is non-NULL, then a make_request is active, and new requests
2177 * should be added at the tail
2178 */
bddd87c7 2179 if (current->bio_list) {
f5fe1b51 2180 bio_list_add(&current->bio_list[0], bio);
dece1635 2181 goto out;
d89d8796 2182 }
27a84d54 2183
d89d8796
NB
2184 /* following loop may be a bit non-obvious, and so deserves some
2185 * explanation.
2186 * Before entering the loop, bio->bi_next is NULL (as all callers
2187 * ensure that) so we have a list with a single bio.
2188 * We pretend that we have just taken it off a longer list, so
bddd87c7
AM
2189 * we assign bio_list to a pointer to the bio_list_on_stack,
2190 * thus initialising the bio_list of new bios to be
27a84d54 2191 * added. ->make_request() may indeed add some more bios
d89d8796
NB
2192 * through a recursive call to generic_make_request. If it
2193 * did, we find a non-NULL value in bio_list and re-enter the loop
2194 * from the top. In this case we really did just take the bio
bddd87c7 2195 * of the top of the list (no pretending) and so remove it from
27a84d54 2196 * bio_list, and call into ->make_request() again.
d89d8796
NB
2197 */
2198 BUG_ON(bio->bi_next);
f5fe1b51
N
2199 bio_list_init(&bio_list_on_stack[0]);
2200 current->bio_list = bio_list_on_stack;
d89d8796 2201 do {
74d46992 2202 struct request_queue *q = bio->bi_disk->queue;
27a84d54 2203
03a07c92 2204 if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
79bd9959
N
2205 struct bio_list lower, same;
2206
2207 /* Create a fresh bio_list for all subordinate requests */
f5fe1b51
N
2208 bio_list_on_stack[1] = bio_list_on_stack[0];
2209 bio_list_init(&bio_list_on_stack[0]);
dece1635 2210 ret = q->make_request_fn(q, bio);
3ef28e83
DW
2211
2212 blk_queue_exit(q);
27a84d54 2213
79bd9959
N
2214 /* sort new bios into those for a lower level
2215 * and those for the same level
2216 */
2217 bio_list_init(&lower);
2218 bio_list_init(&same);
f5fe1b51 2219 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
74d46992 2220 if (q == bio->bi_disk->queue)
79bd9959
N
2221 bio_list_add(&same, bio);
2222 else
2223 bio_list_add(&lower, bio);
2224 /* now assemble so we handle the lowest level first */
f5fe1b51
N
2225 bio_list_merge(&bio_list_on_stack[0], &lower);
2226 bio_list_merge(&bio_list_on_stack[0], &same);
2227 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
3ef28e83 2228 } else {
03a07c92
GR
2229 if (unlikely(!blk_queue_dying(q) &&
2230 (bio->bi_opf & REQ_NOWAIT)))
2231 bio_wouldblock_error(bio);
2232 else
2233 bio_io_error(bio);
3ef28e83 2234 }
f5fe1b51 2235 bio = bio_list_pop(&bio_list_on_stack[0]);
d89d8796 2236 } while (bio);
bddd87c7 2237 current->bio_list = NULL; /* deactivate */
dece1635
JA
2238
2239out:
2240 return ret;
d89d8796 2241}
1da177e4
LT
2242EXPORT_SYMBOL(generic_make_request);
2243
2244/**
710027a4 2245 * submit_bio - submit a bio to the block device layer for I/O
1da177e4
LT
2246 * @bio: The &struct bio which describes the I/O
2247 *
2248 * submit_bio() is very similar in purpose to generic_make_request(), and
2249 * uses that function to do most of the work. Both are fairly rough
710027a4 2250 * interfaces; @bio must be presetup and ready for I/O.
1da177e4
LT
2251 *
2252 */
4e49ea4a 2253blk_qc_t submit_bio(struct bio *bio)
1da177e4 2254{
bf2de6f5
JA
2255 /*
2256 * If it's a regular read/write or a barrier with data attached,
2257 * go through the normal accounting stuff before submission.
2258 */
e2a60da7 2259 if (bio_has_data(bio)) {
4363ac7c
MP
2260 unsigned int count;
2261
95fe6c1a 2262 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
74d46992 2263 count = queue_logical_block_size(bio->bi_disk->queue);
4363ac7c
MP
2264 else
2265 count = bio_sectors(bio);
2266
a8ebb056 2267 if (op_is_write(bio_op(bio))) {
bf2de6f5
JA
2268 count_vm_events(PGPGOUT, count);
2269 } else {
4f024f37 2270 task_io_account_read(bio->bi_iter.bi_size);
bf2de6f5
JA
2271 count_vm_events(PGPGIN, count);
2272 }
2273
2274 if (unlikely(block_dump)) {
2275 char b[BDEVNAME_SIZE];
8dcbdc74 2276 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
ba25f9dc 2277 current->comm, task_pid_nr(current),
a8ebb056 2278 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
4f024f37 2279 (unsigned long long)bio->bi_iter.bi_sector,
74d46992 2280 bio_devname(bio, b), count);
bf2de6f5 2281 }
1da177e4
LT
2282 }
2283
dece1635 2284 return generic_make_request(bio);
1da177e4 2285}
1da177e4
LT
2286EXPORT_SYMBOL(submit_bio);
2287
82124d60 2288/**
bf4e6b4e
HR
2289 * blk_cloned_rq_check_limits - Helper function to check a cloned request
2290 * for new the queue limits
82124d60
KU
2291 * @q: the queue
2292 * @rq: the request being checked
2293 *
2294 * Description:
2295 * @rq may have been made based on weaker limitations of upper-level queues
2296 * in request stacking drivers, and it may violate the limitation of @q.
2297 * Since the block layer and the underlying device driver trust @rq
2298 * after it is inserted to @q, it should be checked against @q before
2299 * the insertion using this generic function.
2300 *
82124d60 2301 * Request stacking drivers like request-based dm may change the queue
bf4e6b4e
HR
2302 * limits when retrying requests on other queues. Those requests need
2303 * to be checked against the new queue limits again during dispatch.
82124d60 2304 */
bf4e6b4e
HR
2305static int blk_cloned_rq_check_limits(struct request_queue *q,
2306 struct request *rq)
82124d60 2307{
8fe0d473 2308 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
82124d60
KU
2309 printk(KERN_ERR "%s: over max size limit.\n", __func__);
2310 return -EIO;
2311 }
2312
2313 /*
2314 * queue's settings related to segment counting like q->bounce_pfn
2315 * may differ from that of other stacking queues.
2316 * Recalculate it to check the request correctly on this queue's
2317 * limitation.
2318 */
2319 blk_recalc_rq_segments(rq);
8a78362c 2320 if (rq->nr_phys_segments > queue_max_segments(q)) {
82124d60
KU
2321 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
2322 return -EIO;
2323 }
2324
2325 return 0;
2326}
82124d60
KU
2327
2328/**
2329 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2330 * @q: the queue to submit the request
2331 * @rq: the request being queued
2332 */
2a842aca 2333blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
82124d60
KU
2334{
2335 unsigned long flags;
4853abaa 2336 int where = ELEVATOR_INSERT_BACK;
82124d60 2337
bf4e6b4e 2338 if (blk_cloned_rq_check_limits(q, rq))
2a842aca 2339 return BLK_STS_IOERR;
82124d60 2340
b2c9cd37
AM
2341 if (rq->rq_disk &&
2342 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2a842aca 2343 return BLK_STS_IOERR;
82124d60 2344
7fb4898e
KB
2345 if (q->mq_ops) {
2346 if (blk_queue_io_stat(q))
2347 blk_account_io_start(rq, true);
157f377b
JA
2348 /*
2349 * Since we have a scheduler attached on the top device,
2350 * bypass a potential scheduler on the bottom device for
2351 * insert.
2352 */
2353 blk_mq_request_bypass_insert(rq);
2a842aca 2354 return BLK_STS_OK;
7fb4898e
KB
2355 }
2356
82124d60 2357 spin_lock_irqsave(q->queue_lock, flags);
3f3299d5 2358 if (unlikely(blk_queue_dying(q))) {
8ba61435 2359 spin_unlock_irqrestore(q->queue_lock, flags);
2a842aca 2360 return BLK_STS_IOERR;
8ba61435 2361 }
82124d60
KU
2362
2363 /*
2364 * Submitting request must be dequeued before calling this function
2365 * because it will be linked to another request_queue
2366 */
2367 BUG_ON(blk_queued_rq(rq));
2368
f73f44eb 2369 if (op_is_flush(rq->cmd_flags))
4853abaa
JM
2370 where = ELEVATOR_INSERT_FLUSH;
2371
2372 add_acct_request(q, rq, where);
e67b77c7
JM
2373 if (where == ELEVATOR_INSERT_FLUSH)
2374 __blk_run_queue(q);
82124d60
KU
2375 spin_unlock_irqrestore(q->queue_lock, flags);
2376
2a842aca 2377 return BLK_STS_OK;
82124d60
KU
2378}
2379EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2380
80a761fd
TH
2381/**
2382 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
2383 * @rq: request to examine
2384 *
2385 * Description:
2386 * A request could be merge of IOs which require different failure
2387 * handling. This function determines the number of bytes which
2388 * can be failed from the beginning of the request without
2389 * crossing into area which need to be retried further.
2390 *
2391 * Return:
2392 * The number of bytes to fail.
80a761fd
TH
2393 */
2394unsigned int blk_rq_err_bytes(const struct request *rq)
2395{
2396 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
2397 unsigned int bytes = 0;
2398 struct bio *bio;
2399
e8064021 2400 if (!(rq->rq_flags & RQF_MIXED_MERGE))
80a761fd
TH
2401 return blk_rq_bytes(rq);
2402
2403 /*
2404 * Currently the only 'mixing' which can happen is between
2405 * different fastfail types. We can safely fail portions
2406 * which have all the failfast bits that the first one has -
2407 * the ones which are at least as eager to fail as the first
2408 * one.
2409 */
2410 for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d32 2411 if ((bio->bi_opf & ff) != ff)
80a761fd 2412 break;
4f024f37 2413 bytes += bio->bi_iter.bi_size;
80a761fd
TH
2414 }
2415
2416 /* this could lead to infinite loop */
2417 BUG_ON(blk_rq_bytes(rq) && !bytes);
2418 return bytes;
2419}
2420EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2421
320ae51f 2422void blk_account_io_completion(struct request *req, unsigned int bytes)
bc58ba94 2423{
c2553b58 2424 if (blk_do_io_stat(req)) {
bc58ba94
JA
2425 const int rw = rq_data_dir(req);
2426 struct hd_struct *part;
2427 int cpu;
2428
2429 cpu = part_stat_lock();
09e099d4 2430 part = req->part;
bc58ba94
JA
2431 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2432 part_stat_unlock();
2433 }
2434}
2435
320ae51f 2436void blk_account_io_done(struct request *req)
bc58ba94 2437{
bc58ba94 2438 /*
dd4c133f
TH
2439 * Account IO completion. flush_rq isn't accounted as a
2440 * normal IO on queueing nor completion. Accounting the
2441 * containing request is enough.
bc58ba94 2442 */
e8064021 2443 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
bc58ba94
JA
2444 unsigned long duration = jiffies - req->start_time;
2445 const int rw = rq_data_dir(req);
2446 struct hd_struct *part;
2447 int cpu;
2448
2449 cpu = part_stat_lock();
09e099d4 2450 part = req->part;
bc58ba94
JA
2451
2452 part_stat_inc(cpu, part, ios[rw]);
2453 part_stat_add(cpu, part, ticks[rw], duration);
d62e26b3
JA
2454 part_round_stats(req->q, cpu, part);
2455 part_dec_in_flight(req->q, part, rw);
bc58ba94 2456
6c23a968 2457 hd_struct_put(part);
bc58ba94
JA
2458 part_stat_unlock();
2459 }
2460}
2461
47fafbc7 2462#ifdef CONFIG_PM
c8158819
LM
2463/*
2464 * Don't process normal requests when queue is suspended
2465 * or in the process of suspending/resuming
2466 */
2467static struct request *blk_pm_peek_request(struct request_queue *q,
2468 struct request *rq)
2469{
2470 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
e8064021 2471 (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
c8158819
LM
2472 return NULL;
2473 else
2474 return rq;
2475}
2476#else
2477static inline struct request *blk_pm_peek_request(struct request_queue *q,
2478 struct request *rq)
2479{
2480 return rq;
2481}
2482#endif
2483
320ae51f
JA
2484void blk_account_io_start(struct request *rq, bool new_io)
2485{
2486 struct hd_struct *part;
2487 int rw = rq_data_dir(rq);
2488 int cpu;
2489
2490 if (!blk_do_io_stat(rq))
2491 return;
2492
2493 cpu = part_stat_lock();
2494
2495 if (!new_io) {
2496 part = rq->part;
2497 part_stat_inc(cpu, part, merges[rw]);
2498 } else {
2499 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2500 if (!hd_struct_try_get(part)) {
2501 /*
2502 * The partition is already being removed,
2503 * the request will be accounted on the disk only
2504 *
2505 * We take a reference on disk->part0 although that
2506 * partition will never be deleted, so we can treat
2507 * it as any other partition.
2508 */
2509 part = &rq->rq_disk->part0;
2510 hd_struct_get(part);
2511 }
d62e26b3
JA
2512 part_round_stats(rq->q, cpu, part);
2513 part_inc_in_flight(rq->q, part, rw);
320ae51f
JA
2514 rq->part = part;
2515 }
2516
2517 part_stat_unlock();
2518}
2519
3bcddeac 2520/**
9934c8c0
TH
2521 * blk_peek_request - peek at the top of a request queue
2522 * @q: request queue to peek at
2523 *
2524 * Description:
2525 * Return the request at the top of @q. The returned request
2526 * should be started using blk_start_request() before LLD starts
2527 * processing it.
2528 *
2529 * Return:
2530 * Pointer to the request at the top of @q if available. Null
2531 * otherwise.
9934c8c0
TH
2532 */
2533struct request *blk_peek_request(struct request_queue *q)
158dbda0
TH
2534{
2535 struct request *rq;
2536 int ret;
2537
2fff8a92 2538 lockdep_assert_held(q->queue_lock);
332ebbf7 2539 WARN_ON_ONCE(q->mq_ops);
2fff8a92 2540
158dbda0 2541 while ((rq = __elv_next_request(q)) != NULL) {
c8158819
LM
2542
2543 rq = blk_pm_peek_request(q, rq);
2544 if (!rq)
2545 break;
2546
e8064021 2547 if (!(rq->rq_flags & RQF_STARTED)) {
158dbda0
TH
2548 /*
2549 * This is the first time the device driver
2550 * sees this request (possibly after
2551 * requeueing). Notify IO scheduler.
2552 */
e8064021 2553 if (rq->rq_flags & RQF_SORTED)
158dbda0
TH
2554 elv_activate_rq(q, rq);
2555
2556 /*
2557 * just mark as started even if we don't start
2558 * it, a request that has been delayed should
2559 * not be passed by new incoming requests
2560 */
e8064021 2561 rq->rq_flags |= RQF_STARTED;
158dbda0
TH
2562 trace_block_rq_issue(q, rq);
2563 }
2564
2565 if (!q->boundary_rq || q->boundary_rq == rq) {
2566 q->end_sector = rq_end_sector(rq);
2567 q->boundary_rq = NULL;
2568 }
2569
e8064021 2570 if (rq->rq_flags & RQF_DONTPREP)
158dbda0
TH
2571 break;
2572
2e46e8b2 2573 if (q->dma_drain_size && blk_rq_bytes(rq)) {
158dbda0
TH
2574 /*
2575 * make sure space for the drain appears we
2576 * know we can do this because max_hw_segments
2577 * has been adjusted to be one fewer than the
2578 * device can handle
2579 */
2580 rq->nr_phys_segments++;
2581 }
2582
2583 if (!q->prep_rq_fn)
2584 break;
2585
2586 ret = q->prep_rq_fn(q, rq);
2587 if (ret == BLKPREP_OK) {
2588 break;
2589 } else if (ret == BLKPREP_DEFER) {
2590 /*
2591 * the request may have been (partially) prepped.
2592 * we need to keep this request in the front to
e8064021 2593 * avoid resource deadlock. RQF_STARTED will
158dbda0
TH
2594 * prevent other fs requests from passing this one.
2595 */
2e46e8b2 2596 if (q->dma_drain_size && blk_rq_bytes(rq) &&
e8064021 2597 !(rq->rq_flags & RQF_DONTPREP)) {
158dbda0
TH
2598 /*
2599 * remove the space for the drain we added
2600 * so that we don't add it again
2601 */
2602 --rq->nr_phys_segments;
2603 }
2604
2605 rq = NULL;
2606 break;
0fb5b1fb 2607 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
e8064021 2608 rq->rq_flags |= RQF_QUIET;
c143dc90
JB
2609 /*
2610 * Mark this request as started so we don't trigger
2611 * any debug logic in the end I/O path.
2612 */
2613 blk_start_request(rq);
2a842aca
CH
2614 __blk_end_request_all(rq, ret == BLKPREP_INVALID ?
2615 BLK_STS_TARGET : BLK_STS_IOERR);
158dbda0
TH
2616 } else {
2617 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2618 break;
2619 }
2620 }
2621
2622 return rq;
2623}
9934c8c0 2624EXPORT_SYMBOL(blk_peek_request);
158dbda0 2625
5034435c 2626static void blk_dequeue_request(struct request *rq)
158dbda0 2627{
9934c8c0
TH
2628 struct request_queue *q = rq->q;
2629
158dbda0
TH
2630 BUG_ON(list_empty(&rq->queuelist));
2631 BUG_ON(ELV_ON_HASH(rq));
2632
2633 list_del_init(&rq->queuelist);
2634
2635 /*
2636 * the time frame between a request being removed from the lists
2637 * and to it is freed is accounted as io that is in progress at
2638 * the driver side.
2639 */
9195291e 2640 if (blk_account_rq(rq)) {
0a7ae2ff 2641 q->in_flight[rq_is_sync(rq)]++;
9195291e
DS
2642 set_io_start_time_ns(rq);
2643 }
158dbda0
TH
2644}
2645
9934c8c0
TH
2646/**
2647 * blk_start_request - start request processing on the driver
2648 * @req: request to dequeue
2649 *
2650 * Description:
2651 * Dequeue @req and start timeout timer on it. This hands off the
2652 * request to the driver.
9934c8c0
TH
2653 */
2654void blk_start_request(struct request *req)
2655{
2fff8a92 2656 lockdep_assert_held(req->q->queue_lock);
332ebbf7 2657 WARN_ON_ONCE(req->q->mq_ops);
2fff8a92 2658
9934c8c0
TH
2659 blk_dequeue_request(req);
2660
cf43e6be 2661 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
88eeca49 2662 blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req));
cf43e6be 2663 req->rq_flags |= RQF_STATS;
87760e5e 2664 wbt_issue(req->q->rq_wb, &req->issue_stat);
cf43e6be
JA
2665 }
2666
4912aa6c 2667 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
9934c8c0
TH
2668 blk_add_timer(req);
2669}
2670EXPORT_SYMBOL(blk_start_request);
2671
2672/**
2673 * blk_fetch_request - fetch a request from a request queue
2674 * @q: request queue to fetch a request from
2675 *
2676 * Description:
2677 * Return the request at the top of @q. The request is started on
2678 * return and LLD can start processing it immediately.
2679 *
2680 * Return:
2681 * Pointer to the request at the top of @q if available. Null
2682 * otherwise.
9934c8c0
TH
2683 */
2684struct request *blk_fetch_request(struct request_queue *q)
2685{
2686 struct request *rq;
2687
2fff8a92 2688 lockdep_assert_held(q->queue_lock);
332ebbf7 2689 WARN_ON_ONCE(q->mq_ops);
2fff8a92 2690
9934c8c0
TH
2691 rq = blk_peek_request(q);
2692 if (rq)
2693 blk_start_request(rq);
2694 return rq;
2695}
2696EXPORT_SYMBOL(blk_fetch_request);
2697
3bcddeac 2698/**
2e60e022 2699 * blk_update_request - Special helper function for request stacking drivers
8ebf9756 2700 * @req: the request being processed
2a842aca 2701 * @error: block status code
8ebf9756 2702 * @nr_bytes: number of bytes to complete @req
3bcddeac
KU
2703 *
2704 * Description:
8ebf9756
RD
2705 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2706 * the request structure even if @req doesn't have leftover.
2707 * If @req has leftover, sets it up for the next range of segments.
2e60e022
TH
2708 *
2709 * This special helper function is only for request stacking drivers
2710 * (e.g. request-based dm) so that they can handle partial completion.
2711 * Actual device drivers should use blk_end_request instead.
2712 *
2713 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2714 * %false return from this function.
3bcddeac
KU
2715 *
2716 * Return:
2e60e022
TH
2717 * %false - this request doesn't have any more data
2718 * %true - this request has more data
3bcddeac 2719 **/
2a842aca
CH
2720bool blk_update_request(struct request *req, blk_status_t error,
2721 unsigned int nr_bytes)
1da177e4 2722{
f79ea416 2723 int total_bytes;
1da177e4 2724
2a842aca 2725 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
4a0efdc9 2726
2e60e022
TH
2727 if (!req->bio)
2728 return false;
2729
2a842aca
CH
2730 if (unlikely(error && !blk_rq_is_passthrough(req) &&
2731 !(req->rq_flags & RQF_QUIET)))
2732 print_req_error(req, error);
1da177e4 2733
bc58ba94 2734 blk_account_io_completion(req, nr_bytes);
d72d904a 2735
f79ea416
KO
2736 total_bytes = 0;
2737 while (req->bio) {
2738 struct bio *bio = req->bio;
4f024f37 2739 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1da177e4 2740
4f024f37 2741 if (bio_bytes == bio->bi_iter.bi_size)
1da177e4 2742 req->bio = bio->bi_next;
1da177e4 2743
fbbaf700
N
2744 /* Completion has already been traced */
2745 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
f79ea416 2746 req_bio_endio(req, bio, bio_bytes, error);
1da177e4 2747
f79ea416
KO
2748 total_bytes += bio_bytes;
2749 nr_bytes -= bio_bytes;
1da177e4 2750
f79ea416
KO
2751 if (!nr_bytes)
2752 break;
1da177e4
LT
2753 }
2754
2755 /*
2756 * completely done
2757 */
2e60e022
TH
2758 if (!req->bio) {
2759 /*
2760 * Reset counters so that the request stacking driver
2761 * can find how many bytes remain in the request
2762 * later.
2763 */
a2dec7b3 2764 req->__data_len = 0;
2e60e022
TH
2765 return false;
2766 }
1da177e4 2767
a2dec7b3 2768 req->__data_len -= total_bytes;
2e46e8b2
TH
2769
2770 /* update sector only for requests with clear definition of sector */
57292b58 2771 if (!blk_rq_is_passthrough(req))
a2dec7b3 2772 req->__sector += total_bytes >> 9;
2e46e8b2 2773
80a761fd 2774 /* mixed attributes always follow the first bio */
e8064021 2775 if (req->rq_flags & RQF_MIXED_MERGE) {
80a761fd 2776 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1eff9d32 2777 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
80a761fd
TH
2778 }
2779
ed6565e7
CH
2780 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
2781 /*
2782 * If total number of sectors is less than the first segment
2783 * size, something has gone terribly wrong.
2784 */
2785 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2786 blk_dump_rq_flags(req, "request botched");
2787 req->__data_len = blk_rq_cur_bytes(req);
2788 }
2e46e8b2 2789
ed6565e7
CH
2790 /* recalculate the number of segments */
2791 blk_recalc_rq_segments(req);
2792 }
2e46e8b2 2793
2e60e022 2794 return true;
1da177e4 2795}
2e60e022 2796EXPORT_SYMBOL_GPL(blk_update_request);
1da177e4 2797
2a842aca 2798static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
2e60e022
TH
2799 unsigned int nr_bytes,
2800 unsigned int bidi_bytes)
5efccd17 2801{
2e60e022
TH
2802 if (blk_update_request(rq, error, nr_bytes))
2803 return true;
5efccd17 2804
2e60e022
TH
2805 /* Bidi request must be completed as a whole */
2806 if (unlikely(blk_bidi_rq(rq)) &&
2807 blk_update_request(rq->next_rq, error, bidi_bytes))
2808 return true;
5efccd17 2809
e2e1a148
JA
2810 if (blk_queue_add_random(rq->q))
2811 add_disk_randomness(rq->rq_disk);
2e60e022
TH
2812
2813 return false;
1da177e4
LT
2814}
2815
28018c24
JB
2816/**
2817 * blk_unprep_request - unprepare a request
2818 * @req: the request
2819 *
2820 * This function makes a request ready for complete resubmission (or
2821 * completion). It happens only after all error handling is complete,
2822 * so represents the appropriate moment to deallocate any resources
2823 * that were allocated to the request in the prep_rq_fn. The queue
2824 * lock is held when calling this.
2825 */
2826void blk_unprep_request(struct request *req)
2827{
2828 struct request_queue *q = req->q;
2829
e8064021 2830 req->rq_flags &= ~RQF_DONTPREP;
28018c24
JB
2831 if (q->unprep_rq_fn)
2832 q->unprep_rq_fn(q, req);
2833}
2834EXPORT_SYMBOL_GPL(blk_unprep_request);
2835
2a842aca 2836void blk_finish_request(struct request *req, blk_status_t error)
1da177e4 2837{
cf43e6be
JA
2838 struct request_queue *q = req->q;
2839
2fff8a92 2840 lockdep_assert_held(req->q->queue_lock);
332ebbf7 2841 WARN_ON_ONCE(q->mq_ops);
2fff8a92 2842
cf43e6be 2843 if (req->rq_flags & RQF_STATS)
34dbad5d 2844 blk_stat_add(req);
cf43e6be 2845
e8064021 2846 if (req->rq_flags & RQF_QUEUED)
cf43e6be 2847 blk_queue_end_tag(q, req);
b8286239 2848
ba396a6c 2849 BUG_ON(blk_queued_rq(req));
1da177e4 2850
57292b58 2851 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
dc3b17cc 2852 laptop_io_completion(req->q->backing_dev_info);
1da177e4 2853
e78042e5
MA
2854 blk_delete_timer(req);
2855
e8064021 2856 if (req->rq_flags & RQF_DONTPREP)
28018c24
JB
2857 blk_unprep_request(req);
2858
bc58ba94 2859 blk_account_io_done(req);
b8286239 2860
87760e5e
JA
2861 if (req->end_io) {
2862 wbt_done(req->q->rq_wb, &req->issue_stat);
8ffdc655 2863 req->end_io(req, error);
87760e5e 2864 } else {
b8286239
KU
2865 if (blk_bidi_rq(req))
2866 __blk_put_request(req->next_rq->q, req->next_rq);
2867
cf43e6be 2868 __blk_put_request(q, req);
b8286239 2869 }
1da177e4 2870}
12120077 2871EXPORT_SYMBOL(blk_finish_request);
1da177e4 2872
3b11313a 2873/**
2e60e022
TH
2874 * blk_end_bidi_request - Complete a bidi request
2875 * @rq: the request to complete
2a842aca 2876 * @error: block status code
2e60e022
TH
2877 * @nr_bytes: number of bytes to complete @rq
2878 * @bidi_bytes: number of bytes to complete @rq->next_rq
a0cd1285
JA
2879 *
2880 * Description:
e3a04fe3 2881 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2e60e022
TH
2882 * Drivers that supports bidi can safely call this member for any
2883 * type of request, bidi or uni. In the later case @bidi_bytes is
2884 * just ignored.
336cdb40
KU
2885 *
2886 * Return:
2e60e022
TH
2887 * %false - we are done with this request
2888 * %true - still buffers pending for this request
a0cd1285 2889 **/
2a842aca 2890static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
32fab448
KU
2891 unsigned int nr_bytes, unsigned int bidi_bytes)
2892{
336cdb40 2893 struct request_queue *q = rq->q;
2e60e022 2894 unsigned long flags;
32fab448 2895
332ebbf7
BVA
2896 WARN_ON_ONCE(q->mq_ops);
2897
2e60e022
TH
2898 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2899 return true;
32fab448 2900
336cdb40 2901 spin_lock_irqsave(q->queue_lock, flags);
2e60e022 2902 blk_finish_request(rq, error);
336cdb40
KU
2903 spin_unlock_irqrestore(q->queue_lock, flags);
2904
2e60e022 2905 return false;
32fab448
KU
2906}
2907
336cdb40 2908/**
2e60e022
TH
2909 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2910 * @rq: the request to complete
2a842aca 2911 * @error: block status code
e3a04fe3
KU
2912 * @nr_bytes: number of bytes to complete @rq
2913 * @bidi_bytes: number of bytes to complete @rq->next_rq
336cdb40
KU
2914 *
2915 * Description:
2e60e022
TH
2916 * Identical to blk_end_bidi_request() except that queue lock is
2917 * assumed to be locked on entry and remains so on return.
336cdb40
KU
2918 *
2919 * Return:
2e60e022
TH
2920 * %false - we are done with this request
2921 * %true - still buffers pending for this request
336cdb40 2922 **/
2a842aca 2923static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
b1f74493 2924 unsigned int nr_bytes, unsigned int bidi_bytes)
336cdb40 2925{
2fff8a92 2926 lockdep_assert_held(rq->q->queue_lock);
332ebbf7 2927 WARN_ON_ONCE(rq->q->mq_ops);
2fff8a92 2928
2e60e022
TH
2929 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2930 return true;
336cdb40 2931
2e60e022 2932 blk_finish_request(rq, error);
336cdb40 2933
2e60e022 2934 return false;
336cdb40 2935}
e19a3ab0
KU
2936
2937/**
2938 * blk_end_request - Helper function for drivers to complete the request.
2939 * @rq: the request being processed
2a842aca 2940 * @error: block status code
e19a3ab0
KU
2941 * @nr_bytes: number of bytes to complete
2942 *
2943 * Description:
2944 * Ends I/O on a number of bytes attached to @rq.
2945 * If @rq has leftover, sets it up for the next range of segments.
2946 *
2947 * Return:
b1f74493
FT
2948 * %false - we are done with this request
2949 * %true - still buffers pending for this request
e19a3ab0 2950 **/
2a842aca
CH
2951bool blk_end_request(struct request *rq, blk_status_t error,
2952 unsigned int nr_bytes)
e19a3ab0 2953{
332ebbf7 2954 WARN_ON_ONCE(rq->q->mq_ops);
b1f74493 2955 return blk_end_bidi_request(rq, error, nr_bytes, 0);
e19a3ab0 2956}
56ad1740 2957EXPORT_SYMBOL(blk_end_request);
336cdb40
KU
2958
2959/**
b1f74493
FT
2960 * blk_end_request_all - Helper function for drives to finish the request.
2961 * @rq: the request to finish
2a842aca 2962 * @error: block status code
336cdb40
KU
2963 *
2964 * Description:
b1f74493
FT
2965 * Completely finish @rq.
2966 */
2a842aca 2967void blk_end_request_all(struct request *rq, blk_status_t error)
336cdb40 2968{
b1f74493
FT
2969 bool pending;
2970 unsigned int bidi_bytes = 0;
336cdb40 2971
b1f74493
FT
2972 if (unlikely(blk_bidi_rq(rq)))
2973 bidi_bytes = blk_rq_bytes(rq->next_rq);
336cdb40 2974
b1f74493
FT
2975 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2976 BUG_ON(pending);
2977}
56ad1740 2978EXPORT_SYMBOL(blk_end_request_all);
336cdb40 2979
e3a04fe3 2980/**
b1f74493
FT
2981 * __blk_end_request - Helper function for drivers to complete the request.
2982 * @rq: the request being processed
2a842aca 2983 * @error: block status code
b1f74493 2984 * @nr_bytes: number of bytes to complete
e3a04fe3
KU
2985 *
2986 * Description:
b1f74493 2987 * Must be called with queue lock held unlike blk_end_request().
e3a04fe3
KU
2988 *
2989 * Return:
b1f74493
FT
2990 * %false - we are done with this request
2991 * %true - still buffers pending for this request
e3a04fe3 2992 **/
2a842aca
CH
2993bool __blk_end_request(struct request *rq, blk_status_t error,
2994 unsigned int nr_bytes)
e3a04fe3 2995{
2fff8a92 2996 lockdep_assert_held(rq->q->queue_lock);
332ebbf7 2997 WARN_ON_ONCE(rq->q->mq_ops);
2fff8a92 2998
b1f74493 2999 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
e3a04fe3 3000}
56ad1740 3001EXPORT_SYMBOL(__blk_end_request);
e3a04fe3 3002
32fab448 3003/**
b1f74493
FT
3004 * __blk_end_request_all - Helper function for drives to finish the request.
3005 * @rq: the request to finish
2a842aca 3006 * @error: block status code
32fab448
KU
3007 *
3008 * Description:
b1f74493 3009 * Completely finish @rq. Must be called with queue lock held.
32fab448 3010 */
2a842aca 3011void __blk_end_request_all(struct request *rq, blk_status_t error)
32fab448 3012{
b1f74493
FT
3013 bool pending;
3014 unsigned int bidi_bytes = 0;
3015
2fff8a92 3016 lockdep_assert_held(rq->q->queue_lock);
332ebbf7 3017 WARN_ON_ONCE(rq->q->mq_ops);
2fff8a92 3018
b1f74493
FT
3019 if (unlikely(blk_bidi_rq(rq)))
3020 bidi_bytes = blk_rq_bytes(rq->next_rq);
3021
3022 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
3023 BUG_ON(pending);
32fab448 3024}
56ad1740 3025EXPORT_SYMBOL(__blk_end_request_all);
32fab448 3026
e19a3ab0 3027/**
b1f74493
FT
3028 * __blk_end_request_cur - Helper function to finish the current request chunk.
3029 * @rq: the request to finish the current chunk for
2a842aca 3030 * @error: block status code
e19a3ab0
KU
3031 *
3032 * Description:
b1f74493
FT
3033 * Complete the current consecutively mapped chunk from @rq. Must
3034 * be called with queue lock held.
e19a3ab0
KU
3035 *
3036 * Return:
b1f74493
FT
3037 * %false - we are done with this request
3038 * %true - still buffers pending for this request
3039 */
2a842aca 3040bool __blk_end_request_cur(struct request *rq, blk_status_t error)
e19a3ab0 3041{
b1f74493 3042 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
e19a3ab0 3043}
56ad1740 3044EXPORT_SYMBOL(__blk_end_request_cur);
e19a3ab0 3045
86db1e29
JA
3046void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3047 struct bio *bio)
1da177e4 3048{
b4f42e28 3049 if (bio_has_data(bio))
fb2dce86 3050 rq->nr_phys_segments = bio_phys_segments(q, bio);
b4f42e28 3051
4f024f37 3052 rq->__data_len = bio->bi_iter.bi_size;
1da177e4 3053 rq->bio = rq->biotail = bio;
1da177e4 3054
74d46992
CH
3055 if (bio->bi_disk)
3056 rq->rq_disk = bio->bi_disk;
66846572 3057}
1da177e4 3058
2d4dc890
IL
3059#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
3060/**
3061 * rq_flush_dcache_pages - Helper function to flush all pages in a request
3062 * @rq: the request to be flushed
3063 *
3064 * Description:
3065 * Flush all pages in @rq.
3066 */
3067void rq_flush_dcache_pages(struct request *rq)
3068{
3069 struct req_iterator iter;
7988613b 3070 struct bio_vec bvec;
2d4dc890
IL
3071
3072 rq_for_each_segment(bvec, rq, iter)
7988613b 3073 flush_dcache_page(bvec.bv_page);
2d4dc890
IL
3074}
3075EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
3076#endif
3077
ef9e3fac
KU
3078/**
3079 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
3080 * @q : the queue of the device being checked
3081 *
3082 * Description:
3083 * Check if underlying low-level drivers of a device are busy.
3084 * If the drivers want to export their busy state, they must set own
3085 * exporting function using blk_queue_lld_busy() first.
3086 *
3087 * Basically, this function is used only by request stacking drivers
3088 * to stop dispatching requests to underlying devices when underlying
3089 * devices are busy. This behavior helps more I/O merging on the queue
3090 * of the request stacking driver and prevents I/O throughput regression
3091 * on burst I/O load.
3092 *
3093 * Return:
3094 * 0 - Not busy (The request stacking driver should dispatch request)
3095 * 1 - Busy (The request stacking driver should stop dispatching request)
3096 */
3097int blk_lld_busy(struct request_queue *q)
3098{
3099 if (q->lld_busy_fn)
3100 return q->lld_busy_fn(q);
3101
3102 return 0;
3103}
3104EXPORT_SYMBOL_GPL(blk_lld_busy);
3105
78d8e58a
MS
3106/**
3107 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3108 * @rq: the clone request to be cleaned up
3109 *
3110 * Description:
3111 * Free all bios in @rq for a cloned request.
3112 */
3113void blk_rq_unprep_clone(struct request *rq)
3114{
3115 struct bio *bio;
3116
3117 while ((bio = rq->bio) != NULL) {
3118 rq->bio = bio->bi_next;
3119
3120 bio_put(bio);
3121 }
3122}
3123EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3124
3125/*
3126 * Copy attributes of the original request to the clone request.
3127 * The actual data parts (e.g. ->cmd, ->sense) are not copied.
3128 */
3129static void __blk_rq_prep_clone(struct request *dst, struct request *src)
b0fd271d
KU
3130{
3131 dst->cpu = src->cpu;
b0fd271d
KU
3132 dst->__sector = blk_rq_pos(src);
3133 dst->__data_len = blk_rq_bytes(src);
3134 dst->nr_phys_segments = src->nr_phys_segments;
3135 dst->ioprio = src->ioprio;
3136 dst->extra_len = src->extra_len;
78d8e58a
MS
3137}
3138
3139/**
3140 * blk_rq_prep_clone - Helper function to setup clone request
3141 * @rq: the request to be setup
3142 * @rq_src: original request to be cloned
3143 * @bs: bio_set that bios for clone are allocated from
3144 * @gfp_mask: memory allocation mask for bio
3145 * @bio_ctr: setup function to be called for each clone bio.
3146 * Returns %0 for success, non %0 for failure.
3147 * @data: private data to be passed to @bio_ctr
3148 *
3149 * Description:
3150 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3151 * The actual data parts of @rq_src (e.g. ->cmd, ->sense)
3152 * are not copied, and copying such parts is the caller's responsibility.
3153 * Also, pages which the original bios are pointing to are not copied
3154 * and the cloned bios just point same pages.
3155 * So cloned bios must be completed before original bios, which means
3156 * the caller must complete @rq before @rq_src.
3157 */
3158int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3159 struct bio_set *bs, gfp_t gfp_mask,
3160 int (*bio_ctr)(struct bio *, struct bio *, void *),
3161 void *data)
3162{
3163 struct bio *bio, *bio_src;
3164
3165 if (!bs)
3166 bs = fs_bio_set;
3167
3168 __rq_for_each_bio(bio_src, rq_src) {
3169 bio = bio_clone_fast(bio_src, gfp_mask, bs);
3170 if (!bio)
3171 goto free_and_out;
3172
3173 if (bio_ctr && bio_ctr(bio, bio_src, data))
3174 goto free_and_out;
3175
3176 if (rq->bio) {
3177 rq->biotail->bi_next = bio;
3178 rq->biotail = bio;
3179 } else
3180 rq->bio = rq->biotail = bio;
3181 }
3182
3183 __blk_rq_prep_clone(rq, rq_src);
3184
3185 return 0;
3186
3187free_and_out:
3188 if (bio)
3189 bio_put(bio);
3190 blk_rq_unprep_clone(rq);
3191
3192 return -ENOMEM;
b0fd271d
KU
3193}
3194EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3195
59c3d45e 3196int kblockd_schedule_work(struct work_struct *work)
1da177e4
LT
3197{
3198 return queue_work(kblockd_workqueue, work);
3199}
1da177e4
LT
3200EXPORT_SYMBOL(kblockd_schedule_work);
3201
ee63cfa7
JA
3202int kblockd_schedule_work_on(int cpu, struct work_struct *work)
3203{
3204 return queue_work_on(cpu, kblockd_workqueue, work);
3205}
3206EXPORT_SYMBOL(kblockd_schedule_work_on);
3207
818cd1cb
JA
3208int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
3209 unsigned long delay)
3210{
3211 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3212}
3213EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
3214
59c3d45e
JA
3215int kblockd_schedule_delayed_work(struct delayed_work *dwork,
3216 unsigned long delay)
e43473b7
VG
3217{
3218 return queue_delayed_work(kblockd_workqueue, dwork, delay);
3219}
3220EXPORT_SYMBOL(kblockd_schedule_delayed_work);
3221
8ab14595
JA
3222int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
3223 unsigned long delay)
3224{
3225 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3226}
3227EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
3228
75df7136
SJ
3229/**
3230 * blk_start_plug - initialize blk_plug and track it inside the task_struct
3231 * @plug: The &struct blk_plug that needs to be initialized
3232 *
3233 * Description:
3234 * Tracking blk_plug inside the task_struct will help with auto-flushing the
3235 * pending I/O should the task end up blocking between blk_start_plug() and
3236 * blk_finish_plug(). This is important from a performance perspective, but
3237 * also ensures that we don't deadlock. For instance, if the task is blocking
3238 * for a memory allocation, memory reclaim could end up wanting to free a
3239 * page belonging to that request that is currently residing in our private
3240 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
3241 * this kind of deadlock.
3242 */
73c10101
JA
3243void blk_start_plug(struct blk_plug *plug)
3244{
3245 struct task_struct *tsk = current;
3246
dd6cf3e1
SL
3247 /*
3248 * If this is a nested plug, don't actually assign it.
3249 */
3250 if (tsk->plug)
3251 return;
3252
73c10101 3253 INIT_LIST_HEAD(&plug->list);
320ae51f 3254 INIT_LIST_HEAD(&plug->mq_list);
048c9374 3255 INIT_LIST_HEAD(&plug->cb_list);
73c10101 3256 /*
dd6cf3e1
SL
3257 * Store ordering should not be needed here, since a potential
3258 * preempt will imply a full memory barrier
73c10101 3259 */
dd6cf3e1 3260 tsk->plug = plug;
73c10101
JA
3261}
3262EXPORT_SYMBOL(blk_start_plug);
3263
3264static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
3265{
3266 struct request *rqa = container_of(a, struct request, queuelist);
3267 struct request *rqb = container_of(b, struct request, queuelist);
3268
975927b9
JM
3269 return !(rqa->q < rqb->q ||
3270 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
73c10101
JA
3271}
3272
49cac01e
JA
3273/*
3274 * If 'from_schedule' is true, then postpone the dispatch of requests
3275 * until a safe kblockd context. We due this to avoid accidental big
3276 * additional stack usage in driver dispatch, in places where the originally
3277 * plugger did not intend it.
3278 */
f6603783 3279static void queue_unplugged(struct request_queue *q, unsigned int depth,
49cac01e 3280 bool from_schedule)
99e22598 3281 __releases(q->queue_lock)
94b5eb28 3282{
2fff8a92
BVA
3283 lockdep_assert_held(q->queue_lock);
3284
49cac01e 3285 trace_block_unplug(q, depth, !from_schedule);
99e22598 3286
70460571 3287 if (from_schedule)
24ecfbe2 3288 blk_run_queue_async(q);
70460571 3289 else
24ecfbe2 3290 __blk_run_queue(q);
70460571 3291 spin_unlock(q->queue_lock);
94b5eb28
JA
3292}
3293
74018dc3 3294static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
048c9374
N
3295{
3296 LIST_HEAD(callbacks);
3297
2a7d5559
SL
3298 while (!list_empty(&plug->cb_list)) {
3299 list_splice_init(&plug->cb_list, &callbacks);
048c9374 3300
2a7d5559
SL
3301 while (!list_empty(&callbacks)) {
3302 struct blk_plug_cb *cb = list_first_entry(&callbacks,
048c9374
N
3303 struct blk_plug_cb,
3304 list);
2a7d5559 3305 list_del(&cb->list);
74018dc3 3306 cb->callback(cb, from_schedule);
2a7d5559 3307 }
048c9374
N
3308 }
3309}
3310
9cbb1750
N
3311struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
3312 int size)
3313{
3314 struct blk_plug *plug = current->plug;
3315 struct blk_plug_cb *cb;
3316
3317 if (!plug)
3318 return NULL;
3319
3320 list_for_each_entry(cb, &plug->cb_list, list)
3321 if (cb->callback == unplug && cb->data == data)
3322 return cb;
3323
3324 /* Not currently on the callback list */
3325 BUG_ON(size < sizeof(*cb));
3326 cb = kzalloc(size, GFP_ATOMIC);
3327 if (cb) {
3328 cb->data = data;
3329 cb->callback = unplug;
3330 list_add(&cb->list, &plug->cb_list);
3331 }
3332 return cb;
3333}
3334EXPORT_SYMBOL(blk_check_plugged);
3335
49cac01e 3336void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
73c10101
JA
3337{
3338 struct request_queue *q;
3339 unsigned long flags;
3340 struct request *rq;
109b8129 3341 LIST_HEAD(list);
94b5eb28 3342 unsigned int depth;
73c10101 3343
74018dc3 3344 flush_plug_callbacks(plug, from_schedule);
320ae51f
JA
3345
3346 if (!list_empty(&plug->mq_list))
3347 blk_mq_flush_plug_list(plug, from_schedule);
3348
73c10101
JA
3349 if (list_empty(&plug->list))
3350 return;
3351
109b8129
N
3352 list_splice_init(&plug->list, &list);
3353
422765c2 3354 list_sort(NULL, &list, plug_rq_cmp);
73c10101
JA
3355
3356 q = NULL;
94b5eb28 3357 depth = 0;
18811272
JA
3358
3359 /*
3360 * Save and disable interrupts here, to avoid doing it for every
3361 * queue lock we have to take.
3362 */
73c10101 3363 local_irq_save(flags);
109b8129
N
3364 while (!list_empty(&list)) {
3365 rq = list_entry_rq(list.next);
73c10101 3366 list_del_init(&rq->queuelist);
73c10101
JA
3367 BUG_ON(!rq->q);
3368 if (rq->q != q) {
99e22598
JA
3369 /*
3370 * This drops the queue lock
3371 */
3372 if (q)
49cac01e 3373 queue_unplugged(q, depth, from_schedule);
73c10101 3374 q = rq->q;
94b5eb28 3375 depth = 0;
73c10101
JA
3376 spin_lock(q->queue_lock);
3377 }
8ba61435
TH
3378
3379 /*
3380 * Short-circuit if @q is dead
3381 */
3f3299d5 3382 if (unlikely(blk_queue_dying(q))) {
2a842aca 3383 __blk_end_request_all(rq, BLK_STS_IOERR);
8ba61435
TH
3384 continue;
3385 }
3386
73c10101
JA
3387 /*
3388 * rq is already accounted, so use raw insert
3389 */
f73f44eb 3390 if (op_is_flush(rq->cmd_flags))
401a18e9
JA
3391 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3392 else
3393 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
94b5eb28
JA
3394
3395 depth++;
73c10101
JA
3396 }
3397
99e22598
JA
3398 /*
3399 * This drops the queue lock
3400 */
3401 if (q)
49cac01e 3402 queue_unplugged(q, depth, from_schedule);
73c10101 3403
73c10101
JA
3404 local_irq_restore(flags);
3405}
73c10101
JA
3406
3407void blk_finish_plug(struct blk_plug *plug)
3408{
dd6cf3e1
SL
3409 if (plug != current->plug)
3410 return;
f6603783 3411 blk_flush_plug_list(plug, false);
73c10101 3412
dd6cf3e1 3413 current->plug = NULL;
73c10101 3414}
88b996cd 3415EXPORT_SYMBOL(blk_finish_plug);
73c10101 3416
47fafbc7 3417#ifdef CONFIG_PM
6c954667
LM
3418/**
3419 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3420 * @q: the queue of the device
3421 * @dev: the device the queue belongs to
3422 *
3423 * Description:
3424 * Initialize runtime-PM-related fields for @q and start auto suspend for
3425 * @dev. Drivers that want to take advantage of request-based runtime PM
3426 * should call this function after @dev has been initialized, and its
3427 * request queue @q has been allocated, and runtime PM for it can not happen
3428 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3429 * cases, driver should call this function before any I/O has taken place.
3430 *
3431 * This function takes care of setting up using auto suspend for the device,
3432 * the autosuspend delay is set to -1 to make runtime suspend impossible
3433 * until an updated value is either set by user or by driver. Drivers do
3434 * not need to touch other autosuspend settings.
3435 *
3436 * The block layer runtime PM is request based, so only works for drivers
3437 * that use request as their IO unit instead of those directly use bio's.
3438 */
3439void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3440{
765e40b6
CH
3441 /* not support for RQF_PM and ->rpm_status in blk-mq yet */
3442 if (q->mq_ops)
3443 return;
3444
6c954667
LM
3445 q->dev = dev;
3446 q->rpm_status = RPM_ACTIVE;
3447 pm_runtime_set_autosuspend_delay(q->dev, -1);
3448 pm_runtime_use_autosuspend(q->dev);
3449}
3450EXPORT_SYMBOL(blk_pm_runtime_init);
3451
3452/**
3453 * blk_pre_runtime_suspend - Pre runtime suspend check
3454 * @q: the queue of the device
3455 *
3456 * Description:
3457 * This function will check if runtime suspend is allowed for the device
3458 * by examining if there are any requests pending in the queue. If there
3459 * are requests pending, the device can not be runtime suspended; otherwise,
3460 * the queue's status will be updated to SUSPENDING and the driver can
3461 * proceed to suspend the device.
3462 *
3463 * For the not allowed case, we mark last busy for the device so that
3464 * runtime PM core will try to autosuspend it some time later.
3465 *
3466 * This function should be called near the start of the device's
3467 * runtime_suspend callback.
3468 *
3469 * Return:
3470 * 0 - OK to runtime suspend the device
3471 * -EBUSY - Device should not be runtime suspended
3472 */
3473int blk_pre_runtime_suspend(struct request_queue *q)
3474{
3475 int ret = 0;
3476
4fd41a85
KX
3477 if (!q->dev)
3478 return ret;
3479
6c954667
LM
3480 spin_lock_irq(q->queue_lock);
3481 if (q->nr_pending) {
3482 ret = -EBUSY;
3483 pm_runtime_mark_last_busy(q->dev);
3484 } else {
3485 q->rpm_status = RPM_SUSPENDING;
3486 }
3487 spin_unlock_irq(q->queue_lock);
3488 return ret;
3489}
3490EXPORT_SYMBOL(blk_pre_runtime_suspend);
3491
3492/**
3493 * blk_post_runtime_suspend - Post runtime suspend processing
3494 * @q: the queue of the device
3495 * @err: return value of the device's runtime_suspend function
3496 *
3497 * Description:
3498 * Update the queue's runtime status according to the return value of the
3499 * device's runtime suspend function and mark last busy for the device so
3500 * that PM core will try to auto suspend the device at a later time.
3501 *
3502 * This function should be called near the end of the device's
3503 * runtime_suspend callback.
3504 */
3505void blk_post_runtime_suspend(struct request_queue *q, int err)
3506{
4fd41a85
KX
3507 if (!q->dev)
3508 return;
3509
6c954667
LM
3510 spin_lock_irq(q->queue_lock);
3511 if (!err) {
3512 q->rpm_status = RPM_SUSPENDED;
3513 } else {
3514 q->rpm_status = RPM_ACTIVE;
3515 pm_runtime_mark_last_busy(q->dev);
3516 }
3517 spin_unlock_irq(q->queue_lock);
3518}
3519EXPORT_SYMBOL(blk_post_runtime_suspend);
3520
3521/**
3522 * blk_pre_runtime_resume - Pre runtime resume processing
3523 * @q: the queue of the device
3524 *
3525 * Description:
3526 * Update the queue's runtime status to RESUMING in preparation for the
3527 * runtime resume of the device.
3528 *
3529 * This function should be called near the start of the device's
3530 * runtime_resume callback.
3531 */
3532void blk_pre_runtime_resume(struct request_queue *q)
3533{
4fd41a85
KX
3534 if (!q->dev)
3535 return;
3536
6c954667
LM
3537 spin_lock_irq(q->queue_lock);
3538 q->rpm_status = RPM_RESUMING;
3539 spin_unlock_irq(q->queue_lock);
3540}
3541EXPORT_SYMBOL(blk_pre_runtime_resume);
3542
3543/**
3544 * blk_post_runtime_resume - Post runtime resume processing
3545 * @q: the queue of the device
3546 * @err: return value of the device's runtime_resume function
3547 *
3548 * Description:
3549 * Update the queue's runtime status according to the return value of the
3550 * device's runtime_resume function. If it is successfully resumed, process
3551 * the requests that are queued into the device's queue when it is resuming
3552 * and then mark last busy and initiate autosuspend for it.
3553 *
3554 * This function should be called near the end of the device's
3555 * runtime_resume callback.
3556 */
3557void blk_post_runtime_resume(struct request_queue *q, int err)
3558{
4fd41a85
KX
3559 if (!q->dev)
3560 return;
3561
6c954667
LM
3562 spin_lock_irq(q->queue_lock);
3563 if (!err) {
3564 q->rpm_status = RPM_ACTIVE;
3565 __blk_run_queue(q);
3566 pm_runtime_mark_last_busy(q->dev);
c60855cd 3567 pm_request_autosuspend(q->dev);
6c954667
LM
3568 } else {
3569 q->rpm_status = RPM_SUSPENDED;
3570 }
3571 spin_unlock_irq(q->queue_lock);
3572}
3573EXPORT_SYMBOL(blk_post_runtime_resume);
d07ab6d1
MW
3574
3575/**
3576 * blk_set_runtime_active - Force runtime status of the queue to be active
3577 * @q: the queue of the device
3578 *
3579 * If the device is left runtime suspended during system suspend the resume
3580 * hook typically resumes the device and corrects runtime status
3581 * accordingly. However, that does not affect the queue runtime PM status
3582 * which is still "suspended". This prevents processing requests from the
3583 * queue.
3584 *
3585 * This function can be used in driver's resume hook to correct queue
3586 * runtime PM status and re-enable peeking requests from the queue. It
3587 * should be called before first request is added to the queue.
3588 */
3589void blk_set_runtime_active(struct request_queue *q)
3590{
3591 spin_lock_irq(q->queue_lock);
3592 q->rpm_status = RPM_ACTIVE;
3593 pm_runtime_mark_last_busy(q->dev);
3594 pm_request_autosuspend(q->dev);
3595 spin_unlock_irq(q->queue_lock);
3596}
3597EXPORT_SYMBOL(blk_set_runtime_active);
6c954667
LM
3598#endif
3599
1da177e4
LT
3600int __init blk_dev_init(void)
3601{
ef295ecf
CH
3602 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
3603 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
0762b23d 3604 FIELD_SIZEOF(struct request, cmd_flags));
ef295ecf
CH
3605 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3606 FIELD_SIZEOF(struct bio, bi_opf));
9eb55b03 3607
89b90be2
TH
3608 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3609 kblockd_workqueue = alloc_workqueue("kblockd",
28747fcd 3610 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4
LT
3611 if (!kblockd_workqueue)
3612 panic("Failed to create kblockd\n");
3613
3614 request_cachep = kmem_cache_create("blkdev_requests",
20c2df83 3615 sizeof(struct request), 0, SLAB_PANIC, NULL);
1da177e4 3616
c2789bd4 3617 blk_requestq_cachep = kmem_cache_create("request_queue",
165125e1 3618 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4 3619
18fbda91
OS
3620#ifdef CONFIG_DEBUG_FS
3621 blk_debugfs_root = debugfs_create_dir("block", NULL);
3622#endif
3623
d38ecf93 3624 return 0;
1da177e4 3625}