]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-core.c
char: select fw_loader by moxa
[mirror_ubuntu-bionic-kernel.git] / block / blk-core.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e
JA
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
1da177e4
LT
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
1da177e4
LT
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/highmem.h>
20#include <linux/mm.h>
21#include <linux/kernel_stat.h>
22#include <linux/string.h>
23#include <linux/init.h>
1da177e4
LT
24#include <linux/completion.h>
25#include <linux/slab.h>
26#include <linux/swap.h>
27#include <linux/writeback.h>
faccbd4b 28#include <linux/task_io_accounting_ops.h>
ff856bad
JA
29#include <linux/interrupt.h>
30#include <linux/cpu.h>
2056a782 31#include <linux/blktrace_api.h>
c17bb495 32#include <linux/fault-inject.h>
1da177e4 33
8324aa91
JA
34#include "blk.h"
35
165125e1 36static int __make_request(struct request_queue *q, struct bio *bio);
1da177e4
LT
37
38/*
39 * For the allocated request tables
40 */
5ece6c52 41static struct kmem_cache *request_cachep;
1da177e4
LT
42
43/*
44 * For queue allocation
45 */
6728cb0e 46struct kmem_cache *blk_requestq_cachep;
1da177e4 47
1da177e4
LT
48/*
49 * Controlling structure to kblockd
50 */
ff856bad 51static struct workqueue_struct *kblockd_workqueue;
1da177e4 52
ff856bad
JA
53static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
54
26b8256e
JA
55static void drive_stat_acct(struct request *rq, int new_io)
56{
28f13702 57 struct hd_struct *part;
26b8256e
JA
58 int rw = rq_data_dir(rq);
59
60 if (!blk_fs_request(rq) || !rq->rq_disk)
61 return;
62
28f13702
JA
63 part = get_part(rq->rq_disk, rq->sector);
64 if (!new_io)
65 __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
66 else {
26b8256e
JA
67 disk_round_stats(rq->rq_disk);
68 rq->rq_disk->in_flight++;
6f2576af
JM
69 if (part) {
70 part_round_stats(part);
71 part->in_flight++;
72 }
26b8256e
JA
73 }
74}
75
8324aa91 76void blk_queue_congestion_threshold(struct request_queue *q)
1da177e4
LT
77{
78 int nr;
79
80 nr = q->nr_requests - (q->nr_requests / 8) + 1;
81 if (nr > q->nr_requests)
82 nr = q->nr_requests;
83 q->nr_congestion_on = nr;
84
85 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
86 if (nr < 1)
87 nr = 1;
88 q->nr_congestion_off = nr;
89}
90
1da177e4
LT
91/**
92 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
93 * @bdev: device
94 *
95 * Locates the passed device's request queue and returns the address of its
96 * backing_dev_info
97 *
98 * Will return NULL if the request queue cannot be located.
99 */
100struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
101{
102 struct backing_dev_info *ret = NULL;
165125e1 103 struct request_queue *q = bdev_get_queue(bdev);
1da177e4
LT
104
105 if (q)
106 ret = &q->backing_dev_info;
107 return ret;
108}
1da177e4
LT
109EXPORT_SYMBOL(blk_get_backing_dev_info);
110
2a4aa30c 111void blk_rq_init(struct request_queue *q, struct request *rq)
1da177e4 112{
1afb20f3
FT
113 memset(rq, 0, sizeof(*rq));
114
1da177e4 115 INIT_LIST_HEAD(&rq->queuelist);
ff856bad 116 INIT_LIST_HEAD(&rq->donelist);
63a71386
JA
117 rq->q = q;
118 rq->sector = rq->hard_sector = (sector_t) -1;
2e662b65
JA
119 INIT_HLIST_NODE(&rq->hash);
120 RB_CLEAR_NODE(&rq->rb_node);
d7e3c324 121 rq->cmd = rq->__cmd;
63a71386 122 rq->tag = -1;
1da177e4 123 rq->ref_count = 1;
1da177e4 124}
2a4aa30c 125EXPORT_SYMBOL(blk_rq_init);
1da177e4 126
5bb23a68
N
127static void req_bio_endio(struct request *rq, struct bio *bio,
128 unsigned int nbytes, int error)
1da177e4 129{
165125e1 130 struct request_queue *q = rq->q;
797e7dbb 131
5bb23a68
N
132 if (&q->bar_rq != rq) {
133 if (error)
134 clear_bit(BIO_UPTODATE, &bio->bi_flags);
135 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
136 error = -EIO;
797e7dbb 137
5bb23a68 138 if (unlikely(nbytes > bio->bi_size)) {
6728cb0e 139 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
24c03d47 140 __func__, nbytes, bio->bi_size);
5bb23a68
N
141 nbytes = bio->bi_size;
142 }
797e7dbb 143
5bb23a68
N
144 bio->bi_size -= nbytes;
145 bio->bi_sector += (nbytes >> 9);
146 if (bio->bi_size == 0)
6712ecf8 147 bio_endio(bio, error);
5bb23a68
N
148 } else {
149
150 /*
151 * Okay, this is the barrier request in progress, just
152 * record the error;
153 */
154 if (error && !q->orderr)
155 q->orderr = error;
156 }
1da177e4 157}
1da177e4 158
1da177e4
LT
159void blk_dump_rq_flags(struct request *rq, char *msg)
160{
161 int bit;
162
6728cb0e 163 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
4aff5e23
JA
164 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
165 rq->cmd_flags);
1da177e4 166
6728cb0e
JA
167 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
168 (unsigned long long)rq->sector,
169 rq->nr_sectors,
170 rq->current_nr_sectors);
171 printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
172 rq->bio, rq->biotail,
173 rq->buffer, rq->data,
174 rq->data_len);
1da177e4 175
4aff5e23 176 if (blk_pc_request(rq)) {
6728cb0e 177 printk(KERN_INFO " cdb: ");
d34c87e4 178 for (bit = 0; bit < BLK_MAX_CDB; bit++)
1da177e4
LT
179 printk("%02x ", rq->cmd[bit]);
180 printk("\n");
181 }
182}
1da177e4
LT
183EXPORT_SYMBOL(blk_dump_rq_flags);
184
1da177e4
LT
185/*
186 * "plug" the device if there are no outstanding requests: this will
187 * force the transfer to start only after we have put all the requests
188 * on the list.
189 *
190 * This is called with interrupts off and no requests on the queue and
191 * with the queue lock held.
192 */
165125e1 193void blk_plug_device(struct request_queue *q)
1da177e4
LT
194{
195 WARN_ON(!irqs_disabled());
196
197 /*
198 * don't plug a stopped queue, it must be paired with blk_start_queue()
199 * which will restart the queueing
200 */
7daac490 201 if (blk_queue_stopped(q))
1da177e4
LT
202 return;
203
75ad23bc
NP
204 if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
205 __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
1da177e4 206 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
2056a782
JA
207 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
208 }
1da177e4 209}
1da177e4
LT
210EXPORT_SYMBOL(blk_plug_device);
211
212/*
213 * remove the queue from the plugged list, if present. called with
214 * queue lock held and interrupts disabled.
215 */
165125e1 216int blk_remove_plug(struct request_queue *q)
1da177e4
LT
217{
218 WARN_ON(!irqs_disabled());
219
75ad23bc 220 if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
1da177e4
LT
221 return 0;
222
75ad23bc 223 queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
1da177e4
LT
224 del_timer(&q->unplug_timer);
225 return 1;
226}
1da177e4
LT
227EXPORT_SYMBOL(blk_remove_plug);
228
229/*
230 * remove the plug and let it rip..
231 */
165125e1 232void __generic_unplug_device(struct request_queue *q)
1da177e4 233{
7daac490 234 if (unlikely(blk_queue_stopped(q)))
1da177e4
LT
235 return;
236
237 if (!blk_remove_plug(q))
238 return;
239
22e2c507 240 q->request_fn(q);
1da177e4
LT
241}
242EXPORT_SYMBOL(__generic_unplug_device);
243
244/**
245 * generic_unplug_device - fire a request queue
165125e1 246 * @q: The &struct request_queue in question
1da177e4
LT
247 *
248 * Description:
249 * Linux uses plugging to build bigger requests queues before letting
250 * the device have at them. If a queue is plugged, the I/O scheduler
251 * is still adding and merging requests on the queue. Once the queue
252 * gets unplugged, the request_fn defined for the queue is invoked and
253 * transfers started.
254 **/
165125e1 255void generic_unplug_device(struct request_queue *q)
1da177e4 256{
dbaf2c00
JA
257 if (blk_queue_plugged(q)) {
258 spin_lock_irq(q->queue_lock);
259 __generic_unplug_device(q);
260 spin_unlock_irq(q->queue_lock);
261 }
1da177e4
LT
262}
263EXPORT_SYMBOL(generic_unplug_device);
264
265static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
266 struct page *page)
267{
165125e1 268 struct request_queue *q = bdi->unplug_io_data;
1da177e4 269
2ad8b1ef 270 blk_unplug(q);
1da177e4
LT
271}
272
86db1e29 273void blk_unplug_work(struct work_struct *work)
1da177e4 274{
165125e1
JA
275 struct request_queue *q =
276 container_of(work, struct request_queue, unplug_work);
1da177e4 277
2056a782
JA
278 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
279 q->rq.count[READ] + q->rq.count[WRITE]);
280
1da177e4
LT
281 q->unplug_fn(q);
282}
283
86db1e29 284void blk_unplug_timeout(unsigned long data)
1da177e4 285{
165125e1 286 struct request_queue *q = (struct request_queue *)data;
1da177e4 287
2056a782
JA
288 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
289 q->rq.count[READ] + q->rq.count[WRITE]);
290
1da177e4
LT
291 kblockd_schedule_work(&q->unplug_work);
292}
293
2ad8b1ef
AB
294void blk_unplug(struct request_queue *q)
295{
296 /*
297 * devices don't necessarily have an ->unplug_fn defined
298 */
299 if (q->unplug_fn) {
300 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
301 q->rq.count[READ] + q->rq.count[WRITE]);
302
303 q->unplug_fn(q);
304 }
305}
306EXPORT_SYMBOL(blk_unplug);
307
1da177e4
LT
308/**
309 * blk_start_queue - restart a previously stopped queue
165125e1 310 * @q: The &struct request_queue in question
1da177e4
LT
311 *
312 * Description:
313 * blk_start_queue() will clear the stop flag on the queue, and call
314 * the request_fn for the queue if it was in a stopped state when
315 * entered. Also see blk_stop_queue(). Queue lock must be held.
316 **/
165125e1 317void blk_start_queue(struct request_queue *q)
1da177e4 318{
a038e253
PBG
319 WARN_ON(!irqs_disabled());
320
75ad23bc 321 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
1da177e4
LT
322
323 /*
324 * one level of recursion is ok and is much faster than kicking
325 * the unplug handling
326 */
75ad23bc
NP
327 if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
328 queue_flag_set(QUEUE_FLAG_REENTER, q);
1da177e4 329 q->request_fn(q);
75ad23bc 330 queue_flag_clear(QUEUE_FLAG_REENTER, q);
1da177e4
LT
331 } else {
332 blk_plug_device(q);
333 kblockd_schedule_work(&q->unplug_work);
334 }
335}
1da177e4
LT
336EXPORT_SYMBOL(blk_start_queue);
337
338/**
339 * blk_stop_queue - stop a queue
165125e1 340 * @q: The &struct request_queue in question
1da177e4
LT
341 *
342 * Description:
343 * The Linux block layer assumes that a block driver will consume all
344 * entries on the request queue when the request_fn strategy is called.
345 * Often this will not happen, because of hardware limitations (queue
346 * depth settings). If a device driver gets a 'queue full' response,
347 * or if it simply chooses not to queue more I/O at one point, it can
348 * call this function to prevent the request_fn from being called until
349 * the driver has signalled it's ready to go again. This happens by calling
350 * blk_start_queue() to restart queue operations. Queue lock must be held.
351 **/
165125e1 352void blk_stop_queue(struct request_queue *q)
1da177e4
LT
353{
354 blk_remove_plug(q);
75ad23bc 355 queue_flag_set(QUEUE_FLAG_STOPPED, q);
1da177e4
LT
356}
357EXPORT_SYMBOL(blk_stop_queue);
358
359/**
360 * blk_sync_queue - cancel any pending callbacks on a queue
361 * @q: the queue
362 *
363 * Description:
364 * The block layer may perform asynchronous callback activity
365 * on a queue, such as calling the unplug function after a timeout.
366 * A block device may call blk_sync_queue to ensure that any
367 * such activity is cancelled, thus allowing it to release resources
59c51591 368 * that the callbacks might use. The caller must already have made sure
1da177e4
LT
369 * that its ->make_request_fn will not re-add plugging prior to calling
370 * this function.
371 *
372 */
373void blk_sync_queue(struct request_queue *q)
374{
375 del_timer_sync(&q->unplug_timer);
abbeb88d 376 kblockd_flush_work(&q->unplug_work);
1da177e4
LT
377}
378EXPORT_SYMBOL(blk_sync_queue);
379
380/**
381 * blk_run_queue - run a single device queue
382 * @q: The queue to run
383 */
75ad23bc 384void __blk_run_queue(struct request_queue *q)
1da177e4 385{
1da177e4 386 blk_remove_plug(q);
dac07ec1
JA
387
388 /*
389 * Only recurse once to avoid overrunning the stack, let the unplug
390 * handling reinvoke the handler shortly if we already got there.
391 */
392 if (!elv_queue_empty(q)) {
75ad23bc
NP
393 if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
394 queue_flag_set(QUEUE_FLAG_REENTER, q);
dac07ec1 395 q->request_fn(q);
75ad23bc 396 queue_flag_clear(QUEUE_FLAG_REENTER, q);
dac07ec1
JA
397 } else {
398 blk_plug_device(q);
399 kblockd_schedule_work(&q->unplug_work);
400 }
401 }
75ad23bc
NP
402}
403EXPORT_SYMBOL(__blk_run_queue);
dac07ec1 404
75ad23bc
NP
405/**
406 * blk_run_queue - run a single device queue
407 * @q: The queue to run
408 */
409void blk_run_queue(struct request_queue *q)
410{
411 unsigned long flags;
412
413 spin_lock_irqsave(q->queue_lock, flags);
414 __blk_run_queue(q);
1da177e4
LT
415 spin_unlock_irqrestore(q->queue_lock, flags);
416}
417EXPORT_SYMBOL(blk_run_queue);
418
165125e1 419void blk_put_queue(struct request_queue *q)
483f4afc
AV
420{
421 kobject_put(&q->kobj);
422}
483f4afc 423
6728cb0e 424void blk_cleanup_queue(struct request_queue *q)
483f4afc
AV
425{
426 mutex_lock(&q->sysfs_lock);
75ad23bc 427 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
483f4afc
AV
428 mutex_unlock(&q->sysfs_lock);
429
430 if (q->elevator)
431 elevator_exit(q->elevator);
432
433 blk_put_queue(q);
434}
1da177e4
LT
435EXPORT_SYMBOL(blk_cleanup_queue);
436
165125e1 437static int blk_init_free_list(struct request_queue *q)
1da177e4
LT
438{
439 struct request_list *rl = &q->rq;
440
441 rl->count[READ] = rl->count[WRITE] = 0;
442 rl->starved[READ] = rl->starved[WRITE] = 0;
cb98fc8b 443 rl->elvpriv = 0;
1da177e4
LT
444 init_waitqueue_head(&rl->wait[READ]);
445 init_waitqueue_head(&rl->wait[WRITE]);
1da177e4 446
1946089a
CL
447 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
448 mempool_free_slab, request_cachep, q->node);
1da177e4
LT
449
450 if (!rl->rq_pool)
451 return -ENOMEM;
452
453 return 0;
454}
455
165125e1 456struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1da177e4 457{
1946089a
CL
458 return blk_alloc_queue_node(gfp_mask, -1);
459}
460EXPORT_SYMBOL(blk_alloc_queue);
1da177e4 461
165125e1 462struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1946089a 463{
165125e1 464 struct request_queue *q;
e0bf68dd 465 int err;
1946089a 466
8324aa91 467 q = kmem_cache_alloc_node(blk_requestq_cachep,
94f6030c 468 gfp_mask | __GFP_ZERO, node_id);
1da177e4
LT
469 if (!q)
470 return NULL;
471
e0bf68dd
PZ
472 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
473 q->backing_dev_info.unplug_io_data = q;
474 err = bdi_init(&q->backing_dev_info);
475 if (err) {
8324aa91 476 kmem_cache_free(blk_requestq_cachep, q);
e0bf68dd
PZ
477 return NULL;
478 }
479
1da177e4 480 init_timer(&q->unplug_timer);
483f4afc 481
8324aa91 482 kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4 483
483f4afc
AV
484 mutex_init(&q->sysfs_lock);
485
1da177e4
LT
486 return q;
487}
1946089a 488EXPORT_SYMBOL(blk_alloc_queue_node);
1da177e4
LT
489
490/**
491 * blk_init_queue - prepare a request queue for use with a block device
492 * @rfn: The function to be called to process requests that have been
493 * placed on the queue.
494 * @lock: Request queue spin lock
495 *
496 * Description:
497 * If a block device wishes to use the standard request handling procedures,
498 * which sorts requests and coalesces adjacent requests, then it must
499 * call blk_init_queue(). The function @rfn will be called when there
500 * are requests on the queue that need to be processed. If the device
501 * supports plugging, then @rfn may not be called immediately when requests
502 * are available on the queue, but may be called at some time later instead.
503 * Plugged queues are generally unplugged when a buffer belonging to one
504 * of the requests on the queue is needed, or due to memory pressure.
505 *
506 * @rfn is not required, or even expected, to remove all requests off the
507 * queue, but only as many as it can handle at a time. If it does leave
508 * requests on the queue, it is responsible for arranging that the requests
509 * get dealt with eventually.
510 *
511 * The queue spin lock must be held while manipulating the requests on the
a038e253
PBG
512 * request queue; this lock will be taken also from interrupt context, so irq
513 * disabling is needed for it.
1da177e4
LT
514 *
515 * Function returns a pointer to the initialized request queue, or NULL if
516 * it didn't succeed.
517 *
518 * Note:
519 * blk_init_queue() must be paired with a blk_cleanup_queue() call
520 * when the block device is deactivated (such as at module unload).
521 **/
1946089a 522
165125e1 523struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1da177e4 524{
1946089a
CL
525 return blk_init_queue_node(rfn, lock, -1);
526}
527EXPORT_SYMBOL(blk_init_queue);
528
165125e1 529struct request_queue *
1946089a
CL
530blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
531{
165125e1 532 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
1da177e4
LT
533
534 if (!q)
535 return NULL;
536
1946089a 537 q->node = node_id;
8669aafd 538 if (blk_init_free_list(q)) {
8324aa91 539 kmem_cache_free(blk_requestq_cachep, q);
8669aafd
AV
540 return NULL;
541 }
1da177e4 542
152587de
JA
543 /*
544 * if caller didn't supply a lock, they get per-queue locking with
545 * our embedded lock
546 */
547 if (!lock) {
548 spin_lock_init(&q->__queue_lock);
549 lock = &q->__queue_lock;
550 }
551
1da177e4 552 q->request_fn = rfn;
1da177e4
LT
553 q->prep_rq_fn = NULL;
554 q->unplug_fn = generic_unplug_device;
555 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
556 q->queue_lock = lock;
557
558 blk_queue_segment_boundary(q, 0xffffffff);
559
560 blk_queue_make_request(q, __make_request);
561 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
562
563 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
564 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
565
44ec9542
AS
566 q->sg_reserved_size = INT_MAX;
567
1da177e4
LT
568 /*
569 * all done
570 */
571 if (!elevator_init(q, NULL)) {
572 blk_queue_congestion_threshold(q);
573 return q;
574 }
575
8669aafd 576 blk_put_queue(q);
1da177e4
LT
577 return NULL;
578}
1946089a 579EXPORT_SYMBOL(blk_init_queue_node);
1da177e4 580
165125e1 581int blk_get_queue(struct request_queue *q)
1da177e4 582{
fde6ad22 583 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
483f4afc 584 kobject_get(&q->kobj);
1da177e4
LT
585 return 0;
586 }
587
588 return 1;
589}
1da177e4 590
165125e1 591static inline void blk_free_request(struct request_queue *q, struct request *rq)
1da177e4 592{
4aff5e23 593 if (rq->cmd_flags & REQ_ELVPRIV)
cb98fc8b 594 elv_put_request(q, rq);
1da177e4
LT
595 mempool_free(rq, q->rq.rq_pool);
596}
597
1ea25ecb 598static struct request *
165125e1 599blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
1da177e4
LT
600{
601 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
602
603 if (!rq)
604 return NULL;
605
2a4aa30c 606 blk_rq_init(q, rq);
1afb20f3 607
1da177e4 608 /*
4aff5e23 609 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
1da177e4
LT
610 * see bio.h and blkdev.h
611 */
49171e5c 612 rq->cmd_flags = rw | REQ_ALLOCED;
1da177e4 613
cb98fc8b 614 if (priv) {
cb78b285 615 if (unlikely(elv_set_request(q, rq, gfp_mask))) {
cb98fc8b
TH
616 mempool_free(rq, q->rq.rq_pool);
617 return NULL;
618 }
4aff5e23 619 rq->cmd_flags |= REQ_ELVPRIV;
cb98fc8b 620 }
1da177e4 621
cb98fc8b 622 return rq;
1da177e4
LT
623}
624
625/*
626 * ioc_batching returns true if the ioc is a valid batching request and
627 * should be given priority access to a request.
628 */
165125e1 629static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
630{
631 if (!ioc)
632 return 0;
633
634 /*
635 * Make sure the process is able to allocate at least 1 request
636 * even if the batch times out, otherwise we could theoretically
637 * lose wakeups.
638 */
639 return ioc->nr_batch_requests == q->nr_batching ||
640 (ioc->nr_batch_requests > 0
641 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
642}
643
644/*
645 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
646 * will cause the process to be a "batcher" on all queues in the system. This
647 * is the behaviour we want though - once it gets a wakeup it should be given
648 * a nice run.
649 */
165125e1 650static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1da177e4
LT
651{
652 if (!ioc || ioc_batching(q, ioc))
653 return;
654
655 ioc->nr_batch_requests = q->nr_batching;
656 ioc->last_waited = jiffies;
657}
658
165125e1 659static void __freed_request(struct request_queue *q, int rw)
1da177e4
LT
660{
661 struct request_list *rl = &q->rq;
662
663 if (rl->count[rw] < queue_congestion_off_threshold(q))
79e2de4b 664 blk_clear_queue_congested(q, rw);
1da177e4
LT
665
666 if (rl->count[rw] + 1 <= q->nr_requests) {
1da177e4
LT
667 if (waitqueue_active(&rl->wait[rw]))
668 wake_up(&rl->wait[rw]);
669
670 blk_clear_queue_full(q, rw);
671 }
672}
673
674/*
675 * A request has just been released. Account for it, update the full and
676 * congestion status, wake up any waiters. Called under q->queue_lock.
677 */
165125e1 678static void freed_request(struct request_queue *q, int rw, int priv)
1da177e4
LT
679{
680 struct request_list *rl = &q->rq;
681
682 rl->count[rw]--;
cb98fc8b
TH
683 if (priv)
684 rl->elvpriv--;
1da177e4
LT
685
686 __freed_request(q, rw);
687
688 if (unlikely(rl->starved[rw ^ 1]))
689 __freed_request(q, rw ^ 1);
1da177e4
LT
690}
691
692#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
693/*
d6344532
NP
694 * Get a free request, queue_lock must be held.
695 * Returns NULL on failure, with queue_lock held.
696 * Returns !NULL on success, with queue_lock *not held*.
1da177e4 697 */
165125e1 698static struct request *get_request(struct request_queue *q, int rw_flags,
7749a8d4 699 struct bio *bio, gfp_t gfp_mask)
1da177e4
LT
700{
701 struct request *rq = NULL;
702 struct request_list *rl = &q->rq;
88ee5ef1 703 struct io_context *ioc = NULL;
7749a8d4 704 const int rw = rw_flags & 0x01;
88ee5ef1
JA
705 int may_queue, priv;
706
7749a8d4 707 may_queue = elv_may_queue(q, rw_flags);
88ee5ef1
JA
708 if (may_queue == ELV_MQUEUE_NO)
709 goto rq_starved;
710
711 if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
712 if (rl->count[rw]+1 >= q->nr_requests) {
b5deef90 713 ioc = current_io_context(GFP_ATOMIC, q->node);
88ee5ef1
JA
714 /*
715 * The queue will fill after this allocation, so set
716 * it as full, and mark this process as "batching".
717 * This process will be allowed to complete a batch of
718 * requests, others will be blocked.
719 */
720 if (!blk_queue_full(q, rw)) {
721 ioc_set_batching(q, ioc);
722 blk_set_queue_full(q, rw);
723 } else {
724 if (may_queue != ELV_MQUEUE_MUST
725 && !ioc_batching(q, ioc)) {
726 /*
727 * The queue is full and the allocating
728 * process is not a "batcher", and not
729 * exempted by the IO scheduler
730 */
731 goto out;
732 }
733 }
1da177e4 734 }
79e2de4b 735 blk_set_queue_congested(q, rw);
1da177e4
LT
736 }
737
082cf69e
JA
738 /*
739 * Only allow batching queuers to allocate up to 50% over the defined
740 * limit of requests, otherwise we could have thousands of requests
741 * allocated with any setting of ->nr_requests
742 */
fd782a4a 743 if (rl->count[rw] >= (3 * q->nr_requests / 2))
082cf69e 744 goto out;
fd782a4a 745
1da177e4
LT
746 rl->count[rw]++;
747 rl->starved[rw] = 0;
cb98fc8b 748
64521d1a 749 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
cb98fc8b
TH
750 if (priv)
751 rl->elvpriv++;
752
1da177e4
LT
753 spin_unlock_irq(q->queue_lock);
754
7749a8d4 755 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
88ee5ef1 756 if (unlikely(!rq)) {
1da177e4
LT
757 /*
758 * Allocation failed presumably due to memory. Undo anything
759 * we might have messed up.
760 *
761 * Allocating task should really be put onto the front of the
762 * wait queue, but this is pretty rare.
763 */
764 spin_lock_irq(q->queue_lock);
cb98fc8b 765 freed_request(q, rw, priv);
1da177e4
LT
766
767 /*
768 * in the very unlikely event that allocation failed and no
769 * requests for this direction was pending, mark us starved
770 * so that freeing of a request in the other direction will
771 * notice us. another possible fix would be to split the
772 * rq mempool into READ and WRITE
773 */
774rq_starved:
775 if (unlikely(rl->count[rw] == 0))
776 rl->starved[rw] = 1;
777
1da177e4
LT
778 goto out;
779 }
780
88ee5ef1
JA
781 /*
782 * ioc may be NULL here, and ioc_batching will be false. That's
783 * OK, if the queue is under the request limit then requests need
784 * not count toward the nr_batch_requests limit. There will always
785 * be some limit enforced by BLK_BATCH_TIME.
786 */
1da177e4
LT
787 if (ioc_batching(q, ioc))
788 ioc->nr_batch_requests--;
6728cb0e 789
2056a782 790 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
1da177e4 791out:
1da177e4
LT
792 return rq;
793}
794
795/*
796 * No available requests for this queue, unplug the device and wait for some
797 * requests to become available.
d6344532
NP
798 *
799 * Called with q->queue_lock held, and returns with it unlocked.
1da177e4 800 */
165125e1 801static struct request *get_request_wait(struct request_queue *q, int rw_flags,
22e2c507 802 struct bio *bio)
1da177e4 803{
7749a8d4 804 const int rw = rw_flags & 0x01;
1da177e4
LT
805 struct request *rq;
806
7749a8d4 807 rq = get_request(q, rw_flags, bio, GFP_NOIO);
450991bc
NP
808 while (!rq) {
809 DEFINE_WAIT(wait);
1da177e4
LT
810 struct request_list *rl = &q->rq;
811
812 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
813 TASK_UNINTERRUPTIBLE);
814
7749a8d4 815 rq = get_request(q, rw_flags, bio, GFP_NOIO);
1da177e4
LT
816
817 if (!rq) {
818 struct io_context *ioc;
819
2056a782
JA
820 blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
821
d6344532
NP
822 __generic_unplug_device(q);
823 spin_unlock_irq(q->queue_lock);
1da177e4
LT
824 io_schedule();
825
826 /*
827 * After sleeping, we become a "batching" process and
828 * will be able to allocate at least one request, and
829 * up to a big batch of them for a small period time.
830 * See ioc_batching, ioc_set_batching
831 */
b5deef90 832 ioc = current_io_context(GFP_NOIO, q->node);
1da177e4 833 ioc_set_batching(q, ioc);
d6344532
NP
834
835 spin_lock_irq(q->queue_lock);
1da177e4
LT
836 }
837 finish_wait(&rl->wait[rw], &wait);
450991bc 838 }
1da177e4
LT
839
840 return rq;
841}
842
165125e1 843struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1da177e4
LT
844{
845 struct request *rq;
846
847 BUG_ON(rw != READ && rw != WRITE);
848
d6344532
NP
849 spin_lock_irq(q->queue_lock);
850 if (gfp_mask & __GFP_WAIT) {
22e2c507 851 rq = get_request_wait(q, rw, NULL);
d6344532 852 } else {
22e2c507 853 rq = get_request(q, rw, NULL, gfp_mask);
d6344532
NP
854 if (!rq)
855 spin_unlock_irq(q->queue_lock);
856 }
857 /* q->queue_lock is unlocked at this point */
1da177e4
LT
858
859 return rq;
860}
1da177e4
LT
861EXPORT_SYMBOL(blk_get_request);
862
dc72ef4a
JA
863/**
864 * blk_start_queueing - initiate dispatch of requests to device
865 * @q: request queue to kick into gear
866 *
867 * This is basically a helper to remove the need to know whether a queue
868 * is plugged or not if someone just wants to initiate dispatch of requests
869 * for this queue.
870 *
871 * The queue lock must be held with interrupts disabled.
872 */
165125e1 873void blk_start_queueing(struct request_queue *q)
dc72ef4a
JA
874{
875 if (!blk_queue_plugged(q))
876 q->request_fn(q);
877 else
878 __generic_unplug_device(q);
879}
880EXPORT_SYMBOL(blk_start_queueing);
881
1da177e4
LT
882/**
883 * blk_requeue_request - put a request back on queue
884 * @q: request queue where request should be inserted
885 * @rq: request to be inserted
886 *
887 * Description:
888 * Drivers often keep queueing requests until the hardware cannot accept
889 * more, when that condition happens we need to put the request back
890 * on the queue. Must be called with queue lock held.
891 */
165125e1 892void blk_requeue_request(struct request_queue *q, struct request *rq)
1da177e4 893{
2056a782
JA
894 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
895
1da177e4
LT
896 if (blk_rq_tagged(rq))
897 blk_queue_end_tag(q, rq);
898
899 elv_requeue_request(q, rq);
900}
1da177e4
LT
901EXPORT_SYMBOL(blk_requeue_request);
902
903/**
904 * blk_insert_request - insert a special request in to a request queue
905 * @q: request queue where request should be inserted
906 * @rq: request to be inserted
907 * @at_head: insert request at head or tail of queue
908 * @data: private data
1da177e4
LT
909 *
910 * Description:
911 * Many block devices need to execute commands asynchronously, so they don't
912 * block the whole kernel from preemption during request execution. This is
913 * accomplished normally by inserting aritficial requests tagged as
914 * REQ_SPECIAL in to the corresponding request queue, and letting them be
915 * scheduled for actual execution by the request queue.
916 *
917 * We have the option of inserting the head or the tail of the queue.
918 * Typically we use the tail for new ioctls and so forth. We use the head
919 * of the queue for things like a QUEUE_FULL message from a device, or a
920 * host that is unable to accept a particular command.
921 */
165125e1 922void blk_insert_request(struct request_queue *q, struct request *rq,
867d1191 923 int at_head, void *data)
1da177e4 924{
867d1191 925 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
1da177e4
LT
926 unsigned long flags;
927
928 /*
929 * tell I/O scheduler that this isn't a regular read/write (ie it
930 * must not attempt merges on this) and that it acts as a soft
931 * barrier
932 */
4aff5e23
JA
933 rq->cmd_type = REQ_TYPE_SPECIAL;
934 rq->cmd_flags |= REQ_SOFTBARRIER;
1da177e4
LT
935
936 rq->special = data;
937
938 spin_lock_irqsave(q->queue_lock, flags);
939
940 /*
941 * If command is tagged, release the tag
942 */
867d1191
TH
943 if (blk_rq_tagged(rq))
944 blk_queue_end_tag(q, rq);
1da177e4 945
b238b3d4 946 drive_stat_acct(rq, 1);
867d1191 947 __elv_add_request(q, rq, where, 0);
dc72ef4a 948 blk_start_queueing(q);
1da177e4
LT
949 spin_unlock_irqrestore(q->queue_lock, flags);
950}
1da177e4
LT
951EXPORT_SYMBOL(blk_insert_request);
952
1da177e4
LT
953/*
954 * add-request adds a request to the linked list.
955 * queue lock is held and interrupts disabled, as we muck with the
956 * request queue list.
957 */
6728cb0e 958static inline void add_request(struct request_queue *q, struct request *req)
1da177e4 959{
b238b3d4 960 drive_stat_acct(req, 1);
1da177e4 961
1da177e4
LT
962 /*
963 * elevator indicated where it wants this request to be
964 * inserted at elevator_merge time
965 */
966 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
967}
6728cb0e 968
1da177e4
LT
969/*
970 * disk_round_stats() - Round off the performance stats on a struct
971 * disk_stats.
972 *
973 * The average IO queue length and utilisation statistics are maintained
974 * by observing the current state of the queue length and the amount of
975 * time it has been in this state for.
976 *
977 * Normally, that accounting is done on IO completion, but that can result
978 * in more than a second's worth of IO being accounted for within any one
979 * second, leading to >100% utilisation. To deal with that, we call this
980 * function to do a round-off before returning the results when reading
981 * /proc/diskstats. This accounts immediately for all queue usage up to
982 * the current jiffies and restarts the counters again.
983 */
984void disk_round_stats(struct gendisk *disk)
985{
986 unsigned long now = jiffies;
987
b2982649
KC
988 if (now == disk->stamp)
989 return;
1da177e4 990
20e5c81f
KC
991 if (disk->in_flight) {
992 __disk_stat_add(disk, time_in_queue,
993 disk->in_flight * (now - disk->stamp));
994 __disk_stat_add(disk, io_ticks, (now - disk->stamp));
995 }
1da177e4 996 disk->stamp = now;
1da177e4 997}
3eaf840e
JNN
998EXPORT_SYMBOL_GPL(disk_round_stats);
999
6f2576af
JM
1000void part_round_stats(struct hd_struct *part)
1001{
1002 unsigned long now = jiffies;
1003
1004 if (now == part->stamp)
1005 return;
1006
1007 if (part->in_flight) {
1008 __part_stat_add(part, time_in_queue,
1009 part->in_flight * (now - part->stamp));
1010 __part_stat_add(part, io_ticks, (now - part->stamp));
1011 }
1012 part->stamp = now;
1013}
1014
1da177e4
LT
1015/*
1016 * queue lock must be held
1017 */
165125e1 1018void __blk_put_request(struct request_queue *q, struct request *req)
1da177e4 1019{
1da177e4
LT
1020 if (unlikely(!q))
1021 return;
1022 if (unlikely(--req->ref_count))
1023 return;
1024
8922e16c
TH
1025 elv_completed_request(q, req);
1026
1da177e4
LT
1027 /*
1028 * Request may not have originated from ll_rw_blk. if not,
1029 * it didn't come out of our reserved rq pools
1030 */
49171e5c 1031 if (req->cmd_flags & REQ_ALLOCED) {
1da177e4 1032 int rw = rq_data_dir(req);
4aff5e23 1033 int priv = req->cmd_flags & REQ_ELVPRIV;
1da177e4 1034
1da177e4 1035 BUG_ON(!list_empty(&req->queuelist));
9817064b 1036 BUG_ON(!hlist_unhashed(&req->hash));
1da177e4
LT
1037
1038 blk_free_request(q, req);
cb98fc8b 1039 freed_request(q, rw, priv);
1da177e4
LT
1040 }
1041}
6e39b69e
MC
1042EXPORT_SYMBOL_GPL(__blk_put_request);
1043
1da177e4
LT
1044void blk_put_request(struct request *req)
1045{
8922e16c 1046 unsigned long flags;
165125e1 1047 struct request_queue *q = req->q;
8922e16c 1048
1da177e4 1049 /*
8922e16c
TH
1050 * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
1051 * following if (q) test.
1da177e4 1052 */
8922e16c 1053 if (q) {
1da177e4
LT
1054 spin_lock_irqsave(q->queue_lock, flags);
1055 __blk_put_request(q, req);
1056 spin_unlock_irqrestore(q->queue_lock, flags);
1057 }
1058}
1da177e4
LT
1059EXPORT_SYMBOL(blk_put_request);
1060
86db1e29 1061void init_request_from_bio(struct request *req, struct bio *bio)
52d9e675 1062{
4aff5e23 1063 req->cmd_type = REQ_TYPE_FS;
52d9e675
TH
1064
1065 /*
1066 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
1067 */
1068 if (bio_rw_ahead(bio) || bio_failfast(bio))
4aff5e23 1069 req->cmd_flags |= REQ_FAILFAST;
52d9e675
TH
1070
1071 /*
1072 * REQ_BARRIER implies no merging, but lets make it explicit
1073 */
1074 if (unlikely(bio_barrier(bio)))
4aff5e23 1075 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
52d9e675 1076
b31dc66a 1077 if (bio_sync(bio))
4aff5e23 1078 req->cmd_flags |= REQ_RW_SYNC;
5404bc7a
JA
1079 if (bio_rw_meta(bio))
1080 req->cmd_flags |= REQ_RW_META;
b31dc66a 1081
52d9e675
TH
1082 req->errors = 0;
1083 req->hard_sector = req->sector = bio->bi_sector;
52d9e675 1084 req->ioprio = bio_prio(bio);
52d9e675 1085 req->start_time = jiffies;
bc1c56fd 1086 blk_rq_bio_prep(req->q, req, bio);
52d9e675
TH
1087}
1088
165125e1 1089static int __make_request(struct request_queue *q, struct bio *bio)
1da177e4 1090{
450991bc 1091 struct request *req;
51da90fc
JA
1092 int el_ret, nr_sectors, barrier, err;
1093 const unsigned short prio = bio_prio(bio);
1094 const int sync = bio_sync(bio);
7749a8d4 1095 int rw_flags;
1da177e4 1096
1da177e4 1097 nr_sectors = bio_sectors(bio);
1da177e4
LT
1098
1099 /*
1100 * low level driver can indicate that it wants pages above a
1101 * certain limit bounced to low memory (ie for highmem, or even
1102 * ISA dma in theory)
1103 */
1104 blk_queue_bounce(q, &bio);
1105
1da177e4 1106 barrier = bio_barrier(bio);
797e7dbb 1107 if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
1da177e4
LT
1108 err = -EOPNOTSUPP;
1109 goto end_io;
1110 }
1111
1da177e4
LT
1112 spin_lock_irq(q->queue_lock);
1113
450991bc 1114 if (unlikely(barrier) || elv_queue_empty(q))
1da177e4
LT
1115 goto get_rq;
1116
1117 el_ret = elv_merge(q, &req, bio);
1118 switch (el_ret) {
6728cb0e
JA
1119 case ELEVATOR_BACK_MERGE:
1120 BUG_ON(!rq_mergeable(req));
1da177e4 1121
6728cb0e
JA
1122 if (!ll_back_merge_fn(q, req, bio))
1123 break;
1da177e4 1124
6728cb0e 1125 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
2056a782 1126
6728cb0e
JA
1127 req->biotail->bi_next = bio;
1128 req->biotail = bio;
1129 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1130 req->ioprio = ioprio_best(req->ioprio, prio);
1131 drive_stat_acct(req, 0);
1132 if (!attempt_back_merge(q, req))
1133 elv_merged_request(q, req, el_ret);
1134 goto out;
1da177e4 1135
6728cb0e
JA
1136 case ELEVATOR_FRONT_MERGE:
1137 BUG_ON(!rq_mergeable(req));
1da177e4 1138
6728cb0e
JA
1139 if (!ll_front_merge_fn(q, req, bio))
1140 break;
1da177e4 1141
6728cb0e 1142 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
2056a782 1143
6728cb0e
JA
1144 bio->bi_next = req->bio;
1145 req->bio = bio;
1da177e4 1146
6728cb0e
JA
1147 /*
1148 * may not be valid. if the low level driver said
1149 * it didn't need a bounce buffer then it better
1150 * not touch req->buffer either...
1151 */
1152 req->buffer = bio_data(bio);
1153 req->current_nr_sectors = bio_cur_sectors(bio);
1154 req->hard_cur_sectors = req->current_nr_sectors;
1155 req->sector = req->hard_sector = bio->bi_sector;
1156 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1157 req->ioprio = ioprio_best(req->ioprio, prio);
1158 drive_stat_acct(req, 0);
1159 if (!attempt_front_merge(q, req))
1160 elv_merged_request(q, req, el_ret);
1161 goto out;
1162
1163 /* ELV_NO_MERGE: elevator says don't/can't merge. */
1164 default:
1165 ;
1da177e4
LT
1166 }
1167
450991bc 1168get_rq:
7749a8d4
JA
1169 /*
1170 * This sync check and mask will be re-done in init_request_from_bio(),
1171 * but we need to set it earlier to expose the sync flag to the
1172 * rq allocator and io schedulers.
1173 */
1174 rw_flags = bio_data_dir(bio);
1175 if (sync)
1176 rw_flags |= REQ_RW_SYNC;
1177
1da177e4 1178 /*
450991bc 1179 * Grab a free request. This is might sleep but can not fail.
d6344532 1180 * Returns with the queue unlocked.
450991bc 1181 */
7749a8d4 1182 req = get_request_wait(q, rw_flags, bio);
d6344532 1183
450991bc
NP
1184 /*
1185 * After dropping the lock and possibly sleeping here, our request
1186 * may now be mergeable after it had proven unmergeable (above).
1187 * We don't worry about that case for efficiency. It won't happen
1188 * often, and the elevators are able to handle it.
1da177e4 1189 */
52d9e675 1190 init_request_from_bio(req, bio);
1da177e4 1191
450991bc
NP
1192 spin_lock_irq(q->queue_lock);
1193 if (elv_queue_empty(q))
1194 blk_plug_device(q);
1da177e4
LT
1195 add_request(q, req);
1196out:
4a534f93 1197 if (sync)
1da177e4
LT
1198 __generic_unplug_device(q);
1199
1200 spin_unlock_irq(q->queue_lock);
1201 return 0;
1202
1203end_io:
6712ecf8 1204 bio_endio(bio, err);
1da177e4
LT
1205 return 0;
1206}
1207
1208/*
1209 * If bio->bi_dev is a partition, remap the location
1210 */
1211static inline void blk_partition_remap(struct bio *bio)
1212{
1213 struct block_device *bdev = bio->bi_bdev;
1214
bf2de6f5 1215 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1da177e4
LT
1216 struct hd_struct *p = bdev->bd_part;
1217
1da177e4
LT
1218 bio->bi_sector += p->start_sect;
1219 bio->bi_bdev = bdev->bd_contains;
c7149d6b
AB
1220
1221 blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
1222 bdev->bd_dev, bio->bi_sector,
1223 bio->bi_sector - p->start_sect);
1da177e4
LT
1224 }
1225}
1226
1da177e4
LT
1227static void handle_bad_sector(struct bio *bio)
1228{
1229 char b[BDEVNAME_SIZE];
1230
1231 printk(KERN_INFO "attempt to access beyond end of device\n");
1232 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1233 bdevname(bio->bi_bdev, b),
1234 bio->bi_rw,
1235 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1236 (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1237
1238 set_bit(BIO_EOF, &bio->bi_flags);
1239}
1240
c17bb495
AM
1241#ifdef CONFIG_FAIL_MAKE_REQUEST
1242
1243static DECLARE_FAULT_ATTR(fail_make_request);
1244
1245static int __init setup_fail_make_request(char *str)
1246{
1247 return setup_fault_attr(&fail_make_request, str);
1248}
1249__setup("fail_make_request=", setup_fail_make_request);
1250
1251static int should_fail_request(struct bio *bio)
1252{
1253 if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
1254 (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
1255 return should_fail(&fail_make_request, bio->bi_size);
1256
1257 return 0;
1258}
1259
1260static int __init fail_make_request_debugfs(void)
1261{
1262 return init_fault_attr_dentries(&fail_make_request,
1263 "fail_make_request");
1264}
1265
1266late_initcall(fail_make_request_debugfs);
1267
1268#else /* CONFIG_FAIL_MAKE_REQUEST */
1269
1270static inline int should_fail_request(struct bio *bio)
1271{
1272 return 0;
1273}
1274
1275#endif /* CONFIG_FAIL_MAKE_REQUEST */
1276
c07e2b41
JA
1277/*
1278 * Check whether this bio extends beyond the end of the device.
1279 */
1280static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1281{
1282 sector_t maxsector;
1283
1284 if (!nr_sectors)
1285 return 0;
1286
1287 /* Test device or partition size, when known. */
1288 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1289 if (maxsector) {
1290 sector_t sector = bio->bi_sector;
1291
1292 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1293 /*
1294 * This may well happen - the kernel calls bread()
1295 * without checking the size of the device, e.g., when
1296 * mounting a device.
1297 */
1298 handle_bad_sector(bio);
1299 return 1;
1300 }
1301 }
1302
1303 return 0;
1304}
1305
1da177e4
LT
1306/**
1307 * generic_make_request: hand a buffer to its device driver for I/O
1308 * @bio: The bio describing the location in memory and on the device.
1309 *
1310 * generic_make_request() is used to make I/O requests of block
1311 * devices. It is passed a &struct bio, which describes the I/O that needs
1312 * to be done.
1313 *
1314 * generic_make_request() does not return any status. The
1315 * success/failure status of the request, along with notification of
1316 * completion, is delivered asynchronously through the bio->bi_end_io
1317 * function described (one day) else where.
1318 *
1319 * The caller of generic_make_request must make sure that bi_io_vec
1320 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1321 * set to describe the device address, and the
1322 * bi_end_io and optionally bi_private are set to describe how
1323 * completion notification should be signaled.
1324 *
1325 * generic_make_request and the drivers it calls may use bi_next if this
1326 * bio happens to be merged with someone else, and may change bi_dev and
1327 * bi_sector for remaps as it sees fit. So the values of these fields
1328 * should NOT be depended on after the call to generic_make_request.
1329 */
d89d8796 1330static inline void __generic_make_request(struct bio *bio)
1da177e4 1331{
165125e1 1332 struct request_queue *q;
5ddfe969 1333 sector_t old_sector;
1da177e4 1334 int ret, nr_sectors = bio_sectors(bio);
2056a782 1335 dev_t old_dev;
51fd77bd 1336 int err = -EIO;
1da177e4
LT
1337
1338 might_sleep();
1da177e4 1339
c07e2b41
JA
1340 if (bio_check_eod(bio, nr_sectors))
1341 goto end_io;
1da177e4
LT
1342
1343 /*
1344 * Resolve the mapping until finished. (drivers are
1345 * still free to implement/resolve their own stacking
1346 * by explicitly returning 0)
1347 *
1348 * NOTE: we don't repeat the blk_size check for each new device.
1349 * Stacking drivers are expected to know what they are doing.
1350 */
5ddfe969 1351 old_sector = -1;
2056a782 1352 old_dev = 0;
1da177e4
LT
1353 do {
1354 char b[BDEVNAME_SIZE];
1355
1356 q = bdev_get_queue(bio->bi_bdev);
1357 if (!q) {
1358 printk(KERN_ERR
1359 "generic_make_request: Trying to access "
1360 "nonexistent block-device %s (%Lu)\n",
1361 bdevname(bio->bi_bdev, b),
1362 (long long) bio->bi_sector);
1363end_io:
51fd77bd 1364 bio_endio(bio, err);
1da177e4
LT
1365 break;
1366 }
1367
4fa253f3 1368 if (unlikely(nr_sectors > q->max_hw_sectors)) {
6728cb0e 1369 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1da177e4
LT
1370 bdevname(bio->bi_bdev, b),
1371 bio_sectors(bio),
1372 q->max_hw_sectors);
1373 goto end_io;
1374 }
1375
fde6ad22 1376 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1da177e4
LT
1377 goto end_io;
1378
c17bb495
AM
1379 if (should_fail_request(bio))
1380 goto end_io;
1381
1da177e4
LT
1382 /*
1383 * If this device has partitions, remap block n
1384 * of partition p to block n+start(p) of the disk.
1385 */
1386 blk_partition_remap(bio);
1387
5ddfe969 1388 if (old_sector != -1)
4fa253f3 1389 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
5ddfe969 1390 old_sector);
2056a782
JA
1391
1392 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
1393
5ddfe969 1394 old_sector = bio->bi_sector;
2056a782
JA
1395 old_dev = bio->bi_bdev->bd_dev;
1396
c07e2b41
JA
1397 if (bio_check_eod(bio, nr_sectors))
1398 goto end_io;
51fd77bd
JA
1399 if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
1400 err = -EOPNOTSUPP;
1401 goto end_io;
1402 }
5ddfe969 1403
1da177e4
LT
1404 ret = q->make_request_fn(q, bio);
1405 } while (ret);
1406}
1407
d89d8796
NB
1408/*
1409 * We only want one ->make_request_fn to be active at a time,
1410 * else stack usage with stacked devices could be a problem.
1411 * So use current->bio_{list,tail} to keep a list of requests
1412 * submited by a make_request_fn function.
1413 * current->bio_tail is also used as a flag to say if
1414 * generic_make_request is currently active in this task or not.
1415 * If it is NULL, then no make_request is active. If it is non-NULL,
1416 * then a make_request is active, and new requests should be added
1417 * at the tail
1418 */
1419void generic_make_request(struct bio *bio)
1420{
1421 if (current->bio_tail) {
1422 /* make_request is active */
1423 *(current->bio_tail) = bio;
1424 bio->bi_next = NULL;
1425 current->bio_tail = &bio->bi_next;
1426 return;
1427 }
1428 /* following loop may be a bit non-obvious, and so deserves some
1429 * explanation.
1430 * Before entering the loop, bio->bi_next is NULL (as all callers
1431 * ensure that) so we have a list with a single bio.
1432 * We pretend that we have just taken it off a longer list, so
1433 * we assign bio_list to the next (which is NULL) and bio_tail
1434 * to &bio_list, thus initialising the bio_list of new bios to be
1435 * added. __generic_make_request may indeed add some more bios
1436 * through a recursive call to generic_make_request. If it
1437 * did, we find a non-NULL value in bio_list and re-enter the loop
1438 * from the top. In this case we really did just take the bio
1439 * of the top of the list (no pretending) and so fixup bio_list and
1440 * bio_tail or bi_next, and call into __generic_make_request again.
1441 *
1442 * The loop was structured like this to make only one call to
1443 * __generic_make_request (which is important as it is large and
1444 * inlined) and to keep the structure simple.
1445 */
1446 BUG_ON(bio->bi_next);
1447 do {
1448 current->bio_list = bio->bi_next;
1449 if (bio->bi_next == NULL)
1450 current->bio_tail = &current->bio_list;
1451 else
1452 bio->bi_next = NULL;
1453 __generic_make_request(bio);
1454 bio = current->bio_list;
1455 } while (bio);
1456 current->bio_tail = NULL; /* deactivate */
1457}
1da177e4
LT
1458EXPORT_SYMBOL(generic_make_request);
1459
1460/**
1461 * submit_bio: submit a bio to the block device layer for I/O
1462 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1463 * @bio: The &struct bio which describes the I/O
1464 *
1465 * submit_bio() is very similar in purpose to generic_make_request(), and
1466 * uses that function to do most of the work. Both are fairly rough
1467 * interfaces, @bio must be presetup and ready for I/O.
1468 *
1469 */
1470void submit_bio(int rw, struct bio *bio)
1471{
1472 int count = bio_sectors(bio);
1473
22e2c507 1474 bio->bi_rw |= rw;
1da177e4 1475
bf2de6f5
JA
1476 /*
1477 * If it's a regular read/write or a barrier with data attached,
1478 * go through the normal accounting stuff before submission.
1479 */
1480 if (!bio_empty_barrier(bio)) {
1481
1482 BIO_BUG_ON(!bio->bi_size);
1483 BIO_BUG_ON(!bio->bi_io_vec);
1484
1485 if (rw & WRITE) {
1486 count_vm_events(PGPGOUT, count);
1487 } else {
1488 task_io_account_read(bio->bi_size);
1489 count_vm_events(PGPGIN, count);
1490 }
1491
1492 if (unlikely(block_dump)) {
1493 char b[BDEVNAME_SIZE];
1494 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
ba25f9dc 1495 current->comm, task_pid_nr(current),
bf2de6f5
JA
1496 (rw & WRITE) ? "WRITE" : "READ",
1497 (unsigned long long)bio->bi_sector,
6728cb0e 1498 bdevname(bio->bi_bdev, b));
bf2de6f5 1499 }
1da177e4
LT
1500 }
1501
1502 generic_make_request(bio);
1503}
1da177e4
LT
1504EXPORT_SYMBOL(submit_bio);
1505
3bcddeac
KU
1506/**
1507 * __end_that_request_first - end I/O on a request
1508 * @req: the request being processed
5450d3e1 1509 * @error: 0 for success, < 0 for error
3bcddeac
KU
1510 * @nr_bytes: number of bytes to complete
1511 *
1512 * Description:
1513 * Ends I/O on a number of bytes attached to @req, and sets it up
1514 * for the next range of segments (if any) in the cluster.
1515 *
1516 * Return:
1517 * 0 - we are done with this request, call end_that_request_last()
1518 * 1 - still buffers pending for this request
1519 **/
5450d3e1 1520static int __end_that_request_first(struct request *req, int error,
1da177e4
LT
1521 int nr_bytes)
1522{
5450d3e1 1523 int total_bytes, bio_nbytes, next_idx = 0;
1da177e4
LT
1524 struct bio *bio;
1525
2056a782
JA
1526 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1527
1da177e4
LT
1528 /*
1529 * for a REQ_BLOCK_PC request, we want to carry any eventual
1530 * sense key with us all the way through
1531 */
1532 if (!blk_pc_request(req))
1533 req->errors = 0;
1534
6728cb0e
JA
1535 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1536 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1da177e4
LT
1537 req->rq_disk ? req->rq_disk->disk_name : "?",
1538 (unsigned long long)req->sector);
1539 }
1540
d72d904a 1541 if (blk_fs_request(req) && req->rq_disk) {
28f13702 1542 struct hd_struct *part = get_part(req->rq_disk, req->sector);
a362357b
JA
1543 const int rw = rq_data_dir(req);
1544
28f13702
JA
1545 all_stat_add(req->rq_disk, part, sectors[rw],
1546 nr_bytes >> 9, req->sector);
d72d904a
JA
1547 }
1548
1da177e4
LT
1549 total_bytes = bio_nbytes = 0;
1550 while ((bio = req->bio) != NULL) {
1551 int nbytes;
1552
bf2de6f5
JA
1553 /*
1554 * For an empty barrier request, the low level driver must
1555 * store a potential error location in ->sector. We pass
1556 * that back up in ->bi_sector.
1557 */
1558 if (blk_empty_barrier(req))
1559 bio->bi_sector = req->sector;
1560
1da177e4
LT
1561 if (nr_bytes >= bio->bi_size) {
1562 req->bio = bio->bi_next;
1563 nbytes = bio->bi_size;
5bb23a68 1564 req_bio_endio(req, bio, nbytes, error);
1da177e4
LT
1565 next_idx = 0;
1566 bio_nbytes = 0;
1567 } else {
1568 int idx = bio->bi_idx + next_idx;
1569
1570 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1571 blk_dump_rq_flags(req, "__end_that");
6728cb0e 1572 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
24c03d47 1573 __func__, bio->bi_idx, bio->bi_vcnt);
1da177e4
LT
1574 break;
1575 }
1576
1577 nbytes = bio_iovec_idx(bio, idx)->bv_len;
1578 BIO_BUG_ON(nbytes > bio->bi_size);
1579
1580 /*
1581 * not a complete bvec done
1582 */
1583 if (unlikely(nbytes > nr_bytes)) {
1584 bio_nbytes += nr_bytes;
1585 total_bytes += nr_bytes;
1586 break;
1587 }
1588
1589 /*
1590 * advance to the next vector
1591 */
1592 next_idx++;
1593 bio_nbytes += nbytes;
1594 }
1595
1596 total_bytes += nbytes;
1597 nr_bytes -= nbytes;
1598
6728cb0e
JA
1599 bio = req->bio;
1600 if (bio) {
1da177e4
LT
1601 /*
1602 * end more in this run, or just return 'not-done'
1603 */
1604 if (unlikely(nr_bytes <= 0))
1605 break;
1606 }
1607 }
1608
1609 /*
1610 * completely done
1611 */
1612 if (!req->bio)
1613 return 0;
1614
1615 /*
1616 * if the request wasn't completed, update state
1617 */
1618 if (bio_nbytes) {
5bb23a68 1619 req_bio_endio(req, bio, bio_nbytes, error);
1da177e4
LT
1620 bio->bi_idx += next_idx;
1621 bio_iovec(bio)->bv_offset += nr_bytes;
1622 bio_iovec(bio)->bv_len -= nr_bytes;
1623 }
1624
1625 blk_recalc_rq_sectors(req, total_bytes >> 9);
1626 blk_recalc_rq_segments(req);
1627 return 1;
1628}
1629
ff856bad
JA
1630/*
1631 * splice the completion data to a local structure and hand off to
1632 * process_completion_queue() to complete the requests
1633 */
1634static void blk_done_softirq(struct softirq_action *h)
1635{
626ab0e6 1636 struct list_head *cpu_list, local_list;
ff856bad
JA
1637
1638 local_irq_disable();
1639 cpu_list = &__get_cpu_var(blk_cpu_done);
626ab0e6 1640 list_replace_init(cpu_list, &local_list);
ff856bad
JA
1641 local_irq_enable();
1642
1643 while (!list_empty(&local_list)) {
6728cb0e 1644 struct request *rq;
ff856bad 1645
6728cb0e 1646 rq = list_entry(local_list.next, struct request, donelist);
ff856bad
JA
1647 list_del_init(&rq->donelist);
1648 rq->q->softirq_done_fn(rq);
1649 }
1650}
1651
6728cb0e
JA
1652static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1653 unsigned long action, void *hcpu)
ff856bad
JA
1654{
1655 /*
1656 * If a CPU goes away, splice its entries to the current CPU
1657 * and trigger a run of the softirq
1658 */
8bb78442 1659 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
ff856bad
JA
1660 int cpu = (unsigned long) hcpu;
1661
1662 local_irq_disable();
1663 list_splice_init(&per_cpu(blk_cpu_done, cpu),
1664 &__get_cpu_var(blk_cpu_done));
1665 raise_softirq_irqoff(BLOCK_SOFTIRQ);
1666 local_irq_enable();
1667 }
1668
1669 return NOTIFY_OK;
1670}
1671
1672
db47d475 1673static struct notifier_block blk_cpu_notifier __cpuinitdata = {
ff856bad
JA
1674 .notifier_call = blk_cpu_notify,
1675};
1676
ff856bad
JA
1677/**
1678 * blk_complete_request - end I/O on a request
1679 * @req: the request being processed
1680 *
1681 * Description:
1682 * Ends all I/O on a request. It does not handle partial completions,
d6e05edc 1683 * unless the driver actually implements this in its completion callback
4fa253f3 1684 * through requeueing. The actual completion happens out-of-order,
ff856bad
JA
1685 * through a softirq handler. The user must have registered a completion
1686 * callback through blk_queue_softirq_done().
1687 **/
1688
1689void blk_complete_request(struct request *req)
1690{
1691 struct list_head *cpu_list;
1692 unsigned long flags;
1693
1694 BUG_ON(!req->q->softirq_done_fn);
6728cb0e 1695
ff856bad
JA
1696 local_irq_save(flags);
1697
1698 cpu_list = &__get_cpu_var(blk_cpu_done);
1699 list_add_tail(&req->donelist, cpu_list);
1700 raise_softirq_irqoff(BLOCK_SOFTIRQ);
1701
1702 local_irq_restore(flags);
1703}
ff856bad 1704EXPORT_SYMBOL(blk_complete_request);
6728cb0e 1705
1da177e4
LT
1706/*
1707 * queue lock must be held
1708 */
5450d3e1 1709static void end_that_request_last(struct request *req, int error)
1da177e4
LT
1710{
1711 struct gendisk *disk = req->rq_disk;
8ffdc655 1712
b8286239
KU
1713 if (blk_rq_tagged(req))
1714 blk_queue_end_tag(req->q, req);
1715
1716 if (blk_queued_rq(req))
1717 blkdev_dequeue_request(req);
1da177e4
LT
1718
1719 if (unlikely(laptop_mode) && blk_fs_request(req))
1720 laptop_io_completion();
1721
fd0ff8aa
JA
1722 /*
1723 * Account IO completion. bar_rq isn't accounted as a normal
1724 * IO on queueing nor completion. Accounting the containing
1725 * request is enough.
1726 */
1727 if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
1da177e4 1728 unsigned long duration = jiffies - req->start_time;
a362357b 1729 const int rw = rq_data_dir(req);
6f2576af 1730 struct hd_struct *part = get_part(disk, req->sector);
a362357b 1731
28f13702
JA
1732 __all_stat_inc(disk, part, ios[rw], req->sector);
1733 __all_stat_add(disk, part, ticks[rw], duration, req->sector);
1da177e4
LT
1734 disk_round_stats(disk);
1735 disk->in_flight--;
6f2576af
JM
1736 if (part) {
1737 part_round_stats(part);
1738 part->in_flight--;
1739 }
1da177e4 1740 }
b8286239 1741
1da177e4 1742 if (req->end_io)
8ffdc655 1743 req->end_io(req, error);
b8286239
KU
1744 else {
1745 if (blk_bidi_rq(req))
1746 __blk_put_request(req->next_rq->q, req->next_rq);
1747
1da177e4 1748 __blk_put_request(req->q, req);
b8286239 1749 }
1da177e4
LT
1750}
1751
a0cd1285 1752static inline void __end_request(struct request *rq, int uptodate,
9e6e39f2 1753 unsigned int nr_bytes)
1da177e4 1754{
9e6e39f2
KU
1755 int error = 0;
1756
1757 if (uptodate <= 0)
1758 error = uptodate ? uptodate : -EIO;
1759
1760 __blk_end_request(rq, error, nr_bytes);
1da177e4
LT
1761}
1762
3b11313a
KU
1763/**
1764 * blk_rq_bytes - Returns bytes left to complete in the entire request
5d87a052 1765 * @rq: the request being processed
3b11313a
KU
1766 **/
1767unsigned int blk_rq_bytes(struct request *rq)
a0cd1285
JA
1768{
1769 if (blk_fs_request(rq))
1770 return rq->hard_nr_sectors << 9;
1771
1772 return rq->data_len;
1773}
3b11313a
KU
1774EXPORT_SYMBOL_GPL(blk_rq_bytes);
1775
1776/**
1777 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
5d87a052 1778 * @rq: the request being processed
3b11313a
KU
1779 **/
1780unsigned int blk_rq_cur_bytes(struct request *rq)
1781{
1782 if (blk_fs_request(rq))
1783 return rq->current_nr_sectors << 9;
1784
1785 if (rq->bio)
1786 return rq->bio->bi_size;
1787
1788 return rq->data_len;
1789}
1790EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
a0cd1285
JA
1791
1792/**
1793 * end_queued_request - end all I/O on a queued request
1794 * @rq: the request being processed
1795 * @uptodate: error value or 0/1 uptodate flag
1796 *
1797 * Description:
1798 * Ends all I/O on a request, and removes it from the block layer queues.
1799 * Not suitable for normal IO completion, unless the driver still has
1800 * the request attached to the block layer.
1801 *
1802 **/
1803void end_queued_request(struct request *rq, int uptodate)
1804{
9e6e39f2 1805 __end_request(rq, uptodate, blk_rq_bytes(rq));
a0cd1285
JA
1806}
1807EXPORT_SYMBOL(end_queued_request);
1808
1809/**
1810 * end_dequeued_request - end all I/O on a dequeued request
1811 * @rq: the request being processed
1812 * @uptodate: error value or 0/1 uptodate flag
1813 *
1814 * Description:
1815 * Ends all I/O on a request. The request must already have been
1816 * dequeued using blkdev_dequeue_request(), as is normally the case
1817 * for most drivers.
1818 *
1819 **/
1820void end_dequeued_request(struct request *rq, int uptodate)
1821{
9e6e39f2 1822 __end_request(rq, uptodate, blk_rq_bytes(rq));
a0cd1285
JA
1823}
1824EXPORT_SYMBOL(end_dequeued_request);
1825
1826
1827/**
1828 * end_request - end I/O on the current segment of the request
8f731f7d 1829 * @req: the request being processed
a0cd1285
JA
1830 * @uptodate: error value or 0/1 uptodate flag
1831 *
1832 * Description:
1833 * Ends I/O on the current segment of a request. If that is the only
1834 * remaining segment, the request is also completed and freed.
1835 *
1836 * This is a remnant of how older block drivers handled IO completions.
1837 * Modern drivers typically end IO on the full request in one go, unless
1838 * they have a residual value to account for. For that case this function
1839 * isn't really useful, unless the residual just happens to be the
1840 * full current segment. In other words, don't use this function in new
1841 * code. Either use end_request_completely(), or the
1842 * end_that_request_chunk() (along with end_that_request_last()) for
1843 * partial completions.
1844 *
1845 **/
1846void end_request(struct request *req, int uptodate)
1847{
9e6e39f2 1848 __end_request(req, uptodate, req->hard_cur_sectors << 9);
a0cd1285 1849}
1da177e4
LT
1850EXPORT_SYMBOL(end_request);
1851
336cdb40 1852/**
e19a3ab0
KU
1853 * blk_end_io - Generic end_io function to complete a request.
1854 * @rq: the request being processed
1855 * @error: 0 for success, < 0 for error
e3a04fe3
KU
1856 * @nr_bytes: number of bytes to complete @rq
1857 * @bidi_bytes: number of bytes to complete @rq->next_rq
e19a3ab0
KU
1858 * @drv_callback: function called between completion of bios in the request
1859 * and completion of the request.
1860 * If the callback returns non 0, this helper returns without
1861 * completion of the request.
336cdb40
KU
1862 *
1863 * Description:
e3a04fe3 1864 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
336cdb40
KU
1865 * If @rq has leftover, sets it up for the next range of segments.
1866 *
1867 * Return:
1868 * 0 - we are done with this request
e19a3ab0 1869 * 1 - this request is not freed yet, it still has pending buffers.
336cdb40 1870 **/
22b13210
JA
1871static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1872 unsigned int bidi_bytes,
1873 int (drv_callback)(struct request *))
336cdb40
KU
1874{
1875 struct request_queue *q = rq->q;
1876 unsigned long flags = 0UL;
336cdb40
KU
1877
1878 if (blk_fs_request(rq) || blk_pc_request(rq)) {
5450d3e1 1879 if (__end_that_request_first(rq, error, nr_bytes))
336cdb40 1880 return 1;
e3a04fe3
KU
1881
1882 /* Bidi request must be completed as a whole */
1883 if (blk_bidi_rq(rq) &&
5450d3e1 1884 __end_that_request_first(rq->next_rq, error, bidi_bytes))
e3a04fe3 1885 return 1;
336cdb40
KU
1886 }
1887
e19a3ab0
KU
1888 /* Special feature for tricky drivers */
1889 if (drv_callback && drv_callback(rq))
1890 return 1;
1891
336cdb40
KU
1892 add_disk_randomness(rq->rq_disk);
1893
1894 spin_lock_irqsave(q->queue_lock, flags);
b8286239 1895 end_that_request_last(rq, error);
336cdb40
KU
1896 spin_unlock_irqrestore(q->queue_lock, flags);
1897
1898 return 0;
1899}
e19a3ab0
KU
1900
1901/**
1902 * blk_end_request - Helper function for drivers to complete the request.
1903 * @rq: the request being processed
1904 * @error: 0 for success, < 0 for error
1905 * @nr_bytes: number of bytes to complete
1906 *
1907 * Description:
1908 * Ends I/O on a number of bytes attached to @rq.
1909 * If @rq has leftover, sets it up for the next range of segments.
1910 *
1911 * Return:
1912 * 0 - we are done with this request
1913 * 1 - still buffers pending for this request
1914 **/
22b13210 1915int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
e19a3ab0 1916{
e3a04fe3 1917 return blk_end_io(rq, error, nr_bytes, 0, NULL);
e19a3ab0 1918}
336cdb40
KU
1919EXPORT_SYMBOL_GPL(blk_end_request);
1920
1921/**
1922 * __blk_end_request - Helper function for drivers to complete the request.
1923 * @rq: the request being processed
1924 * @error: 0 for success, < 0 for error
1925 * @nr_bytes: number of bytes to complete
1926 *
1927 * Description:
1928 * Must be called with queue lock held unlike blk_end_request().
1929 *
1930 * Return:
1931 * 0 - we are done with this request
1932 * 1 - still buffers pending for this request
1933 **/
22b13210 1934int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
336cdb40 1935{
336cdb40 1936 if (blk_fs_request(rq) || blk_pc_request(rq)) {
5450d3e1 1937 if (__end_that_request_first(rq, error, nr_bytes))
336cdb40
KU
1938 return 1;
1939 }
1940
1941 add_disk_randomness(rq->rq_disk);
1942
b8286239 1943 end_that_request_last(rq, error);
336cdb40
KU
1944
1945 return 0;
1946}
1947EXPORT_SYMBOL_GPL(__blk_end_request);
1948
e3a04fe3
KU
1949/**
1950 * blk_end_bidi_request - Helper function for drivers to complete bidi request.
1951 * @rq: the bidi request being processed
1952 * @error: 0 for success, < 0 for error
1953 * @nr_bytes: number of bytes to complete @rq
1954 * @bidi_bytes: number of bytes to complete @rq->next_rq
1955 *
1956 * Description:
1957 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1958 *
1959 * Return:
1960 * 0 - we are done with this request
1961 * 1 - still buffers pending for this request
1962 **/
22b13210
JA
1963int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1964 unsigned int bidi_bytes)
e3a04fe3
KU
1965{
1966 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
1967}
1968EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1969
e19a3ab0
KU
1970/**
1971 * blk_end_request_callback - Special helper function for tricky drivers
1972 * @rq: the request being processed
1973 * @error: 0 for success, < 0 for error
1974 * @nr_bytes: number of bytes to complete
1975 * @drv_callback: function called between completion of bios in the request
1976 * and completion of the request.
1977 * If the callback returns non 0, this helper returns without
1978 * completion of the request.
1979 *
1980 * Description:
1981 * Ends I/O on a number of bytes attached to @rq.
1982 * If @rq has leftover, sets it up for the next range of segments.
1983 *
1984 * This special helper function is used only for existing tricky drivers.
1985 * (e.g. cdrom_newpc_intr() of ide-cd)
1986 * This interface will be removed when such drivers are rewritten.
1987 * Don't use this interface in other places anymore.
1988 *
1989 * Return:
1990 * 0 - we are done with this request
1991 * 1 - this request is not freed yet.
1992 * this request still has pending buffers or
1993 * the driver doesn't want to finish this request yet.
1994 **/
22b13210
JA
1995int blk_end_request_callback(struct request *rq, int error,
1996 unsigned int nr_bytes,
e19a3ab0
KU
1997 int (drv_callback)(struct request *))
1998{
e3a04fe3 1999 return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
e19a3ab0
KU
2000}
2001EXPORT_SYMBOL_GPL(blk_end_request_callback);
2002
86db1e29
JA
2003void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2004 struct bio *bio)
1da177e4 2005{
4aff5e23
JA
2006 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
2007 rq->cmd_flags |= (bio->bi_rw & 3);
1da177e4
LT
2008
2009 rq->nr_phys_segments = bio_phys_segments(q, bio);
2010 rq->nr_hw_segments = bio_hw_segments(q, bio);
2011 rq->current_nr_sectors = bio_cur_sectors(bio);
2012 rq->hard_cur_sectors = rq->current_nr_sectors;
2013 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2014 rq->buffer = bio_data(bio);
0e75f906 2015 rq->data_len = bio->bi_size;
1da177e4
LT
2016
2017 rq->bio = rq->biotail = bio;
1da177e4 2018
66846572
N
2019 if (bio->bi_bdev)
2020 rq->rq_disk = bio->bi_bdev->bd_disk;
2021}
1da177e4
LT
2022
2023int kblockd_schedule_work(struct work_struct *work)
2024{
2025 return queue_work(kblockd_workqueue, work);
2026}
1da177e4
LT
2027EXPORT_SYMBOL(kblockd_schedule_work);
2028
19a75d83 2029void kblockd_flush_work(struct work_struct *work)
1da177e4 2030{
28e53bdd 2031 cancel_work_sync(work);
1da177e4 2032}
19a75d83 2033EXPORT_SYMBOL(kblockd_flush_work);
1da177e4
LT
2034
2035int __init blk_dev_init(void)
2036{
ff856bad
JA
2037 int i;
2038
1da177e4
LT
2039 kblockd_workqueue = create_workqueue("kblockd");
2040 if (!kblockd_workqueue)
2041 panic("Failed to create kblockd\n");
2042
2043 request_cachep = kmem_cache_create("blkdev_requests",
20c2df83 2044 sizeof(struct request), 0, SLAB_PANIC, NULL);
1da177e4 2045
8324aa91 2046 blk_requestq_cachep = kmem_cache_create("blkdev_queue",
165125e1 2047 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4 2048
0a945022 2049 for_each_possible_cpu(i)
ff856bad
JA
2050 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
2051
2052 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
5a67e4c5 2053 register_hotcpu_notifier(&blk_cpu_notifier);
ff856bad 2054
d38ecf93 2055 return 0;
1da177e4 2056}
1da177e4 2057