]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Functions related to generic timeout handling of requests. | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/blkdev.h> | |
7 | #include <linux/fault-inject.h> | |
8 | ||
9 | #include "blk.h" | |
10 | #include "blk-mq.h" | |
11 | ||
12 | #ifdef CONFIG_FAIL_IO_TIMEOUT | |
13 | ||
14 | static DECLARE_FAULT_ATTR(fail_io_timeout); | |
15 | ||
16 | static int __init setup_fail_io_timeout(char *str) | |
17 | { | |
18 | return setup_fault_attr(&fail_io_timeout, str); | |
19 | } | |
20 | __setup("fail_io_timeout=", setup_fail_io_timeout); | |
21 | ||
22 | int blk_should_fake_timeout(struct request_queue *q) | |
23 | { | |
24 | if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) | |
25 | return 0; | |
26 | ||
27 | return should_fail(&fail_io_timeout, 1); | |
28 | } | |
29 | ||
30 | static int __init fail_io_timeout_debugfs(void) | |
31 | { | |
32 | struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout", | |
33 | NULL, &fail_io_timeout); | |
34 | ||
35 | return PTR_ERR_OR_ZERO(dir); | |
36 | } | |
37 | ||
38 | late_initcall(fail_io_timeout_debugfs); | |
39 | ||
40 | ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr, | |
41 | char *buf) | |
42 | { | |
43 | struct gendisk *disk = dev_to_disk(dev); | |
44 | int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); | |
45 | ||
46 | return sprintf(buf, "%d\n", set != 0); | |
47 | } | |
48 | ||
49 | ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, | |
50 | const char *buf, size_t count) | |
51 | { | |
52 | struct gendisk *disk = dev_to_disk(dev); | |
53 | int val; | |
54 | ||
55 | if (count) { | |
56 | struct request_queue *q = disk->queue; | |
57 | char *p = (char *) buf; | |
58 | ||
59 | val = simple_strtoul(p, &p, 10); | |
60 | spin_lock_irq(q->queue_lock); | |
61 | if (val) | |
62 | queue_flag_set(QUEUE_FLAG_FAIL_IO, q); | |
63 | else | |
64 | queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); | |
65 | spin_unlock_irq(q->queue_lock); | |
66 | } | |
67 | ||
68 | return count; | |
69 | } | |
70 | ||
71 | #endif /* CONFIG_FAIL_IO_TIMEOUT */ | |
72 | ||
73 | /* | |
74 | * blk_delete_timer - Delete/cancel timer for a given function. | |
75 | * @req: request that we are canceling timer for | |
76 | * | |
77 | */ | |
78 | void blk_delete_timer(struct request *req) | |
79 | { | |
80 | list_del_init(&req->timeout_list); | |
81 | } | |
82 | ||
83 | static void blk_rq_timed_out(struct request *req) | |
84 | { | |
85 | struct request_queue *q = req->q; | |
86 | enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; | |
87 | ||
88 | if (q->rq_timed_out_fn) | |
89 | ret = q->rq_timed_out_fn(req); | |
90 | switch (ret) { | |
91 | case BLK_EH_HANDLED: | |
92 | __blk_complete_request(req); | |
93 | break; | |
94 | case BLK_EH_RESET_TIMER: | |
95 | blk_add_timer(req); | |
96 | blk_clear_rq_complete(req); | |
97 | break; | |
98 | case BLK_EH_NOT_HANDLED: | |
99 | /* | |
100 | * LLD handles this for now but in the future | |
101 | * we can send a request msg to abort the command | |
102 | * and we can move more of the generic scsi eh code to | |
103 | * the blk layer. | |
104 | */ | |
105 | break; | |
106 | default: | |
107 | printk(KERN_ERR "block: bad eh return: %d\n", ret); | |
108 | break; | |
109 | } | |
110 | } | |
111 | ||
112 | static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, | |
113 | unsigned int *next_set) | |
114 | { | |
115 | if (time_after_eq(jiffies, rq->deadline)) { | |
116 | list_del_init(&rq->timeout_list); | |
117 | ||
118 | /* | |
119 | * Check if we raced with end io completion | |
120 | */ | |
121 | if (!blk_mark_rq_complete(rq)) | |
122 | blk_rq_timed_out(rq); | |
123 | } else if (!*next_set || time_after(*next_timeout, rq->deadline)) { | |
124 | *next_timeout = rq->deadline; | |
125 | *next_set = 1; | |
126 | } | |
127 | } | |
128 | ||
129 | void blk_timeout_work(struct work_struct *work) | |
130 | { | |
131 | struct request_queue *q = | |
132 | container_of(work, struct request_queue, timeout_work); | |
133 | unsigned long flags, next = 0; | |
134 | struct request *rq, *tmp; | |
135 | int next_set = 0; | |
136 | ||
137 | spin_lock_irqsave(q->queue_lock, flags); | |
138 | ||
139 | list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) | |
140 | blk_rq_check_expired(rq, &next, &next_set); | |
141 | ||
142 | if (next_set) | |
143 | mod_timer(&q->timeout, round_jiffies_up(next)); | |
144 | ||
145 | spin_unlock_irqrestore(q->queue_lock, flags); | |
146 | } | |
147 | ||
148 | /** | |
149 | * blk_abort_request -- Request request recovery for the specified command | |
150 | * @req: pointer to the request of interest | |
151 | * | |
152 | * This function requests that the block layer start recovery for the | |
153 | * request by deleting the timer and calling the q's timeout function. | |
154 | * LLDDs who implement their own error recovery MAY ignore the timeout | |
155 | * event if they generated blk_abort_req. Must hold queue lock. | |
156 | */ | |
157 | void blk_abort_request(struct request *req) | |
158 | { | |
159 | if (blk_mark_rq_complete(req)) | |
160 | return; | |
161 | ||
162 | if (req->q->mq_ops) { | |
163 | blk_mq_rq_timed_out(req, false); | |
164 | } else { | |
165 | blk_delete_timer(req); | |
166 | blk_rq_timed_out(req); | |
167 | } | |
168 | } | |
169 | EXPORT_SYMBOL_GPL(blk_abort_request); | |
170 | ||
171 | unsigned long blk_rq_timeout(unsigned long timeout) | |
172 | { | |
173 | unsigned long maxt; | |
174 | ||
175 | maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT); | |
176 | if (time_after(timeout, maxt)) | |
177 | timeout = maxt; | |
178 | ||
179 | return timeout; | |
180 | } | |
181 | ||
182 | /** | |
183 | * blk_add_timer - Start timeout timer for a single request | |
184 | * @req: request that is about to start running. | |
185 | * | |
186 | * Notes: | |
187 | * Each request has its own timer, and as it is added to the queue, we | |
188 | * set up the timer. When the request completes, we cancel the timer. | |
189 | */ | |
190 | void blk_add_timer(struct request *req) | |
191 | { | |
192 | struct request_queue *q = req->q; | |
193 | unsigned long expiry; | |
194 | ||
195 | if (!q->mq_ops) | |
196 | lockdep_assert_held(q->queue_lock); | |
197 | ||
198 | /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ | |
199 | if (!q->mq_ops && !q->rq_timed_out_fn) | |
200 | return; | |
201 | ||
202 | BUG_ON(!list_empty(&req->timeout_list)); | |
203 | ||
204 | /* | |
205 | * Some LLDs, like scsi, peek at the timeout to prevent a | |
206 | * command from being retried forever. | |
207 | */ | |
208 | if (!req->timeout) | |
209 | req->timeout = q->rq_timeout; | |
210 | ||
211 | WRITE_ONCE(req->deadline, jiffies + req->timeout); | |
212 | ||
213 | /* | |
214 | * Only the non-mq case needs to add the request to a protected list. | |
215 | * For the mq case we simply scan the tag map. | |
216 | */ | |
217 | if (!q->mq_ops) | |
218 | list_add_tail(&req->timeout_list, &req->q->timeout_list); | |
219 | ||
220 | /* | |
221 | * If the timer isn't already pending or this timeout is earlier | |
222 | * than an existing one, modify the timer. Round up to next nearest | |
223 | * second. | |
224 | */ | |
225 | expiry = blk_rq_timeout(round_jiffies_up(req->deadline)); | |
226 | ||
227 | if (!timer_pending(&q->timeout) || | |
228 | time_before(expiry, q->timeout.expires)) { | |
229 | unsigned long diff = q->timeout.expires - expiry; | |
230 | ||
231 | /* | |
232 | * Due to added timer slack to group timers, the timer | |
233 | * will often be a little in front of what we asked for. | |
234 | * So apply some tolerance here too, otherwise we keep | |
235 | * modifying the timer because expires for value X | |
236 | * will be X + something. | |
237 | */ | |
238 | if (!timer_pending(&q->timeout) || (diff >= HZ / 2)) | |
239 | mod_timer(&q->timeout, expiry); | |
240 | } | |
241 | ||
242 | } |