]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/blk-exec.c
Include: Uapi: Add user ABI for Sed/Opal
[mirror_ubuntu-artful-kernel.git] / block / blk-exec.c
1 /*
2 * Functions related to setting various queue properties from drivers
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/sched/sysctl.h>
10
11 #include "blk.h"
12 #include "blk-mq-sched.h"
13
14 /*
15 * for max sense size
16 */
17 #include <scsi/scsi_cmnd.h>
18
19 /**
20 * blk_end_sync_rq - executes a completion event on a request
21 * @rq: request to complete
22 * @error: end I/O status of the request
23 */
24 static void blk_end_sync_rq(struct request *rq, int error)
25 {
26 struct completion *waiting = rq->end_io_data;
27
28 rq->end_io_data = NULL;
29
30 /*
31 * complete last, if this is a stack request the process (and thus
32 * the rq pointer) could be invalid right after this complete()
33 */
34 complete(waiting);
35 }
36
37 /**
38 * blk_execute_rq_nowait - insert a request into queue for execution
39 * @q: queue to insert the request in
40 * @bd_disk: matching gendisk
41 * @rq: request to insert
42 * @at_head: insert request at head or tail of queue
43 * @done: I/O completion handler
44 *
45 * Description:
46 * Insert a fully prepared request at the back of the I/O scheduler queue
47 * for execution. Don't wait for completion.
48 *
49 * Note:
50 * This function will invoke @done directly if the queue is dead.
51 */
52 void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
53 struct request *rq, int at_head,
54 rq_end_io_fn *done)
55 {
56 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
57
58 WARN_ON(irqs_disabled());
59 WARN_ON(rq->cmd_type == REQ_TYPE_FS);
60
61 rq->rq_disk = bd_disk;
62 rq->end_io = done;
63
64 /*
65 * don't check dying flag for MQ because the request won't
66 * be reused after dying flag is set
67 */
68 if (q->mq_ops) {
69 blk_mq_sched_insert_request(rq, at_head, true, false, false);
70 return;
71 }
72
73 spin_lock_irq(q->queue_lock);
74
75 if (unlikely(blk_queue_dying(q))) {
76 rq->rq_flags |= RQF_QUIET;
77 rq->errors = -ENXIO;
78 __blk_end_request_all(rq, rq->errors);
79 spin_unlock_irq(q->queue_lock);
80 return;
81 }
82
83 __elv_add_request(q, rq, where);
84 __blk_run_queue(q);
85 spin_unlock_irq(q->queue_lock);
86 }
87 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
88
89 /**
90 * blk_execute_rq - insert a request into queue for execution
91 * @q: queue to insert the request in
92 * @bd_disk: matching gendisk
93 * @rq: request to insert
94 * @at_head: insert request at head or tail of queue
95 *
96 * Description:
97 * Insert a fully prepared request at the back of the I/O scheduler queue
98 * for execution and wait for completion.
99 */
100 int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
101 struct request *rq, int at_head)
102 {
103 DECLARE_COMPLETION_ONSTACK(wait);
104 char sense[SCSI_SENSE_BUFFERSIZE];
105 int err = 0;
106 unsigned long hang_check;
107
108 if (!rq->sense) {
109 memset(sense, 0, sizeof(sense));
110 rq->sense = sense;
111 rq->sense_len = 0;
112 }
113
114 rq->end_io_data = &wait;
115 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
116
117 /* Prevent hang_check timer from firing at us during very long I/O */
118 hang_check = sysctl_hung_task_timeout_secs;
119 if (hang_check)
120 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
121 else
122 wait_for_completion_io(&wait);
123
124 if (rq->errors)
125 err = -EIO;
126
127 if (rq->sense == sense) {
128 rq->sense = NULL;
129 rq->sense_len = 0;
130 }
131
132 return err;
133 }
134 EXPORT_SYMBOL(blk_execute_rq);