]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_req.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
b411b363 PR |
26 | #include <linux/module.h> |
27 | ||
28 | #include <linux/slab.h> | |
29 | #include <linux/drbd.h> | |
30 | #include "drbd_int.h" | |
b411b363 PR |
31 | #include "drbd_req.h" |
32 | ||
33 | ||
57bcb6cf PR |
34 | static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size); |
35 | ||
b411b363 PR |
36 | /* Update disk stats at start of I/O request */ |
37 | static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio) | |
38 | { | |
39 | const int rw = bio_data_dir(bio); | |
40 | int cpu; | |
41 | cpu = part_stat_lock(); | |
72585d24 | 42 | part_round_stats(cpu, &mdev->vdisk->part0); |
b411b363 PR |
43 | part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); |
44 | part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio)); | |
376694a0 PR |
45 | (void) cpu; /* The macro invocations above want the cpu argument, I do not like |
46 | the compiler warning about cpu only assigned but never used... */ | |
753c8913 | 47 | part_inc_in_flight(&mdev->vdisk->part0, rw); |
b411b363 | 48 | part_stat_unlock(); |
b411b363 PR |
49 | } |
50 | ||
51 | /* Update disk stats when completing request upwards */ | |
52 | static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) | |
53 | { | |
54 | int rw = bio_data_dir(req->master_bio); | |
55 | unsigned long duration = jiffies - req->start_time; | |
56 | int cpu; | |
57 | cpu = part_stat_lock(); | |
58 | part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration); | |
59 | part_round_stats(cpu, &mdev->vdisk->part0); | |
753c8913 | 60 | part_dec_in_flight(&mdev->vdisk->part0, rw); |
b411b363 | 61 | part_stat_unlock(); |
b411b363 PR |
62 | } |
63 | ||
9e204cdd AG |
64 | static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, |
65 | struct bio *bio_src) | |
66 | { | |
67 | struct drbd_request *req; | |
68 | ||
69 | req = mempool_alloc(drbd_request_mempool, GFP_NOIO); | |
70 | if (!req) | |
71 | return NULL; | |
72 | ||
73 | drbd_req_make_private_bio(req, bio_src); | |
74 | req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; | |
a21e9298 | 75 | req->w.mdev = mdev; |
9e204cdd AG |
76 | req->master_bio = bio_src; |
77 | req->epoch = 0; | |
53840641 | 78 | |
9e204cdd AG |
79 | drbd_clear_interval(&req->i); |
80 | req->i.sector = bio_src->bi_sector; | |
81 | req->i.size = bio_src->bi_size; | |
5e472264 | 82 | req->i.local = true; |
53840641 AG |
83 | req->i.waiting = false; |
84 | ||
9e204cdd AG |
85 | INIT_LIST_HEAD(&req->tl_requests); |
86 | INIT_LIST_HEAD(&req->w.list); | |
87 | ||
88 | return req; | |
89 | } | |
90 | ||
91 | static void drbd_req_free(struct drbd_request *req) | |
92 | { | |
93 | mempool_free(req, drbd_request_mempool); | |
94 | } | |
95 | ||
96 | /* rw is bio_data_dir(), only READ or WRITE */ | |
b411b363 PR |
97 | static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw) |
98 | { | |
99 | const unsigned long s = req->rq_state; | |
288f422e PR |
100 | |
101 | /* remove it from the transfer log. | |
102 | * well, only if it had been there in the first | |
103 | * place... if it had not (local only or conflicting | |
104 | * and never sent), it should still be "empty" as | |
105 | * initialized in drbd_req_new(), so we can list_del() it | |
106 | * here unconditionally */ | |
2312f0b3 | 107 | list_del_init(&req->tl_requests); |
288f422e | 108 | |
b411b363 PR |
109 | /* if it was a write, we may have to set the corresponding |
110 | * bit(s) out-of-sync first. If it had a local part, we need to | |
111 | * release the reference to the activity log. */ | |
112 | if (rw == WRITE) { | |
b411b363 PR |
113 | /* Set out-of-sync unless both OK flags are set |
114 | * (local only or remote failed). | |
115 | * Other places where we set out-of-sync: | |
116 | * READ with local io-error */ | |
117 | if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) | |
ace652ac | 118 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); |
b411b363 PR |
119 | |
120 | if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) | |
ace652ac | 121 | drbd_set_in_sync(mdev, req->i.sector, req->i.size); |
b411b363 PR |
122 | |
123 | /* one might be tempted to move the drbd_al_complete_io | |
fcefa62e | 124 | * to the local io completion callback drbd_request_endio. |
b411b363 PR |
125 | * but, if this was a mirror write, we may only |
126 | * drbd_al_complete_io after this is RQ_NET_DONE, | |
127 | * otherwise the extent could be dropped from the al | |
128 | * before it has actually been written on the peer. | |
129 | * if we crash before our peer knows about the request, | |
130 | * but after the extent has been dropped from the al, | |
131 | * we would forget to resync the corresponding extent. | |
132 | */ | |
133 | if (s & RQ_LOCAL_MASK) { | |
134 | if (get_ldev_if_state(mdev, D_FAILED)) { | |
0778286a | 135 | if (s & RQ_IN_ACT_LOG) |
181286ad | 136 | drbd_al_complete_io(mdev, &req->i); |
b411b363 PR |
137 | put_ldev(mdev); |
138 | } else if (__ratelimit(&drbd_ratelimit_state)) { | |
181286ad LE |
139 | dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), " |
140 | "but my Disk seems to have failed :(\n", | |
141 | (unsigned long long) req->i.sector, req->i.size); | |
b411b363 PR |
142 | } |
143 | } | |
144 | } | |
145 | ||
2312f0b3 | 146 | if (s & RQ_POSTPONED) |
9d05e7c4 | 147 | drbd_restart_request(req); |
2312f0b3 LE |
148 | else |
149 | drbd_req_free(req); | |
b411b363 PR |
150 | } |
151 | ||
b6dd1a89 LE |
152 | static void wake_all_senders(struct drbd_tconn *tconn) { |
153 | wake_up(&tconn->sender_work.q_wait); | |
b411b363 PR |
154 | } |
155 | ||
b6dd1a89 LE |
156 | /* must hold resource->req_lock */ |
157 | static void start_new_tl_epoch(struct drbd_tconn *tconn) | |
b411b363 | 158 | { |
b6dd1a89 LE |
159 | tconn->current_tle_writes = 0; |
160 | atomic_inc(&tconn->current_tle_nr); | |
161 | wake_all_senders(tconn); | |
b411b363 PR |
162 | } |
163 | ||
164 | void complete_master_bio(struct drbd_conf *mdev, | |
165 | struct bio_and_error *m) | |
166 | { | |
b411b363 PR |
167 | bio_endio(m->bio, m->error); |
168 | dec_ap_bio(mdev); | |
169 | } | |
170 | ||
53840641 AG |
171 | |
172 | static void drbd_remove_request_interval(struct rb_root *root, | |
173 | struct drbd_request *req) | |
174 | { | |
a21e9298 | 175 | struct drbd_conf *mdev = req->w.mdev; |
53840641 AG |
176 | struct drbd_interval *i = &req->i; |
177 | ||
178 | drbd_remove_interval(root, i); | |
179 | ||
180 | /* Wake up any processes waiting for this request to complete. */ | |
181 | if (i->waiting) | |
182 | wake_up(&mdev->misc_wait); | |
183 | } | |
184 | ||
8d6cdd78 LE |
185 | static void maybe_wakeup_conflicting_requests(struct drbd_request *req) |
186 | { | |
187 | const unsigned long s = req->rq_state; | |
188 | if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) | |
189 | return; | |
190 | if (req->i.waiting) | |
191 | /* Retry all conflicting peer requests. */ | |
192 | wake_up(&req->w.mdev->misc_wait); | |
193 | } | |
194 | ||
6870ca6d LE |
195 | static |
196 | void req_may_be_done(struct drbd_request *req) | |
197 | { | |
198 | const unsigned long s = req->rq_state; | |
199 | struct drbd_conf *mdev = req->w.mdev; | |
200 | int rw = req->rq_state & RQ_WRITE ? WRITE : READ; | |
201 | ||
202 | /* req->master_bio still present means: Not yet completed. | |
203 | * | |
204 | * Unless this is RQ_POSTPONED, which will cause _req_is_done() to | |
205 | * queue it on the retry workqueue instead of destroying it. | |
206 | */ | |
207 | if (req->master_bio && !(s & RQ_POSTPONED)) | |
208 | return; | |
209 | ||
210 | /* Local still pending, even though master_bio is already completed? | |
211 | * may happen for RQ_LOCAL_ABORTED requests. */ | |
212 | if (s & RQ_LOCAL_PENDING) | |
213 | return; | |
214 | ||
215 | if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) { | |
216 | /* this is disconnected (local only) operation, | |
217 | * or protocol A, B, or C P_BARRIER_ACK, | |
218 | * or killed from the transfer log due to connection loss. */ | |
219 | _req_is_done(mdev, req, rw); | |
220 | } | |
221 | /* else: network part and not DONE yet. that is | |
222 | * protocol A, B, or C, barrier ack still pending... */ | |
223 | } | |
224 | ||
b411b363 PR |
225 | /* Helper for __req_mod(). |
226 | * Set m->bio to the master bio, if it is fit to be completed, | |
227 | * or leave it alone (it is initialized to NULL in __req_mod), | |
228 | * if it has already been completed, or cannot be completed yet. | |
229 | * If m->bio is set, the error status to be returned is placed in m->error. | |
230 | */ | |
6870ca6d LE |
231 | static |
232 | void req_may_be_completed(struct drbd_request *req, struct bio_and_error *m) | |
b411b363 PR |
233 | { |
234 | const unsigned long s = req->rq_state; | |
a21e9298 | 235 | struct drbd_conf *mdev = req->w.mdev; |
b411b363 | 236 | |
b411b363 PR |
237 | /* we must not complete the master bio, while it is |
238 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) | |
239 | * not yet acknowledged by the peer | |
240 | * not yet completed by the local io subsystem | |
241 | * these flags may get cleared in any order by | |
242 | * the worker, | |
243 | * the receiver, | |
244 | * the bio_endio completion callbacks. | |
245 | */ | |
cdfda633 | 246 | if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) |
7be8da07 | 247 | return; |
b411b363 PR |
248 | if (s & RQ_NET_QUEUED) |
249 | return; | |
250 | if (s & RQ_NET_PENDING) | |
251 | return; | |
b411b363 PR |
252 | |
253 | if (req->master_bio) { | |
4439c400 LE |
254 | int rw = bio_rw(req->master_bio); |
255 | ||
8554df1c | 256 | /* this is DATA_RECEIVED (remote read) |
b411b363 PR |
257 | * or protocol C P_WRITE_ACK |
258 | * or protocol B P_RECV_ACK | |
8554df1c | 259 | * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck) |
b411b363 PR |
260 | * or canceled or failed, |
261 | * or killed from the transfer log due to connection loss. | |
262 | */ | |
263 | ||
264 | /* | |
265 | * figure out whether to report success or failure. | |
266 | * | |
267 | * report success when at least one of the operations succeeded. | |
268 | * or, to put the other way, | |
269 | * only report failure, when both operations failed. | |
270 | * | |
271 | * what to do about the failures is handled elsewhere. | |
272 | * what we need to do here is just: complete the master_bio. | |
273 | * | |
274 | * local completion error, if any, has been stored as ERR_PTR | |
fcefa62e | 275 | * in private_bio within drbd_request_endio. |
b411b363 PR |
276 | */ |
277 | int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); | |
278 | int error = PTR_ERR(req->private_bio); | |
279 | ||
280 | /* remove the request from the conflict detection | |
281 | * respective block_id verification hash */ | |
dac1389c AG |
282 | if (!drbd_interval_empty(&req->i)) { |
283 | struct rb_root *root; | |
284 | ||
dac1389c AG |
285 | if (rw == WRITE) |
286 | root = &mdev->write_requests; | |
287 | else | |
288 | root = &mdev->read_requests; | |
53840641 | 289 | drbd_remove_request_interval(root, req); |
7be8da07 | 290 | } else if (!(s & RQ_POSTPONED)) |
8825f7c3 | 291 | D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); |
b411b363 | 292 | |
b6dd1a89 LE |
293 | /* Before we can signal completion to the upper layers, |
294 | * we may need to close the current transfer log epoch. | |
295 | * We are within the request lock, so we can simply compare | |
296 | * the request epoch number with the current transfer log | |
297 | * epoch number. If they match, increase the current_tle_nr, | |
298 | * and reset the transfer log epoch write_cnt. | |
299 | */ | |
300 | if (rw == WRITE && | |
301 | req->epoch == atomic_read(&mdev->tconn->current_tle_nr)) | |
302 | start_new_tl_epoch(mdev->tconn); | |
b411b363 PR |
303 | |
304 | /* Update disk stats */ | |
305 | _drbd_end_io_acct(mdev, req); | |
306 | ||
4439c400 LE |
307 | /* if READ failed, |
308 | * have it be pushed back to the retry work queue, | |
309 | * so it will re-enter __drbd_make_request, | |
310 | * and be re-assigned to a suitable local or remote path, | |
311 | * or failed if we do not have access to good data anymore. | |
312 | * READA may fail. | |
313 | * WRITE should have used all available paths already. | |
314 | */ | |
315 | if (!ok && rw == READ) | |
316 | req->rq_state |= RQ_POSTPONED; | |
317 | ||
318 | if (!(req->rq_state & RQ_POSTPONED)) { | |
7be8da07 AG |
319 | m->error = ok ? 0 : (error ?: -EIO); |
320 | m->bio = req->master_bio; | |
2312f0b3 LE |
321 | req->master_bio = NULL; |
322 | } else { | |
323 | /* Assert that this will be _req_is_done() | |
324 | * with this very invokation. */ | |
325 | /* FIXME: | |
326 | * what about (RQ_LOCAL_PENDING | RQ_LOCAL_ABORTED)? | |
327 | */ | |
328 | D_ASSERT(!(s & RQ_LOCAL_PENDING)); | |
629663c9 | 329 | D_ASSERT((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)); |
7be8da07 | 330 | } |
b411b363 | 331 | } |
6870ca6d | 332 | req_may_be_done(req); |
b411b363 PR |
333 | } |
334 | ||
6870ca6d | 335 | static void req_may_be_completed_not_susp(struct drbd_request *req, struct bio_and_error *m) |
cfa03415 | 336 | { |
a21e9298 | 337 | struct drbd_conf *mdev = req->w.mdev; |
cfa03415 | 338 | |
2aebfabb | 339 | if (!drbd_suspended(mdev)) |
6870ca6d | 340 | req_may_be_completed(req, m); |
cfa03415 PR |
341 | } |
342 | ||
b411b363 PR |
343 | /* obviously this could be coded as many single functions |
344 | * instead of one huge switch, | |
345 | * or by putting the code directly in the respective locations | |
346 | * (as it has been before). | |
347 | * | |
348 | * but having it this way | |
349 | * enforces that it is all in this one place, where it is easier to audit, | |
350 | * it makes it obvious that whatever "event" "happens" to a request should | |
351 | * happen "atomically" within the req_lock, | |
352 | * and it enforces that we have to think in a very structured manner | |
353 | * about the "events" that may happen to a request during its life time ... | |
354 | */ | |
2a80699f | 355 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
b411b363 PR |
356 | struct bio_and_error *m) |
357 | { | |
a21e9298 | 358 | struct drbd_conf *mdev = req->w.mdev; |
44ed167d | 359 | struct net_conf *nc; |
303d1448 | 360 | int p, rv = 0; |
7be8da07 AG |
361 | |
362 | if (m) | |
363 | m->bio = NULL; | |
b411b363 | 364 | |
b411b363 PR |
365 | switch (what) { |
366 | default: | |
367 | dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); | |
368 | break; | |
369 | ||
370 | /* does not happen... | |
371 | * initialization done in drbd_req_new | |
8554df1c | 372 | case CREATED: |
b411b363 PR |
373 | break; |
374 | */ | |
375 | ||
8554df1c | 376 | case TO_BE_SENT: /* via network */ |
7be8da07 | 377 | /* reached via __drbd_make_request |
b411b363 PR |
378 | * and from w_read_retry_remote */ |
379 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); | |
380 | req->rq_state |= RQ_NET_PENDING; | |
44ed167d PR |
381 | rcu_read_lock(); |
382 | nc = rcu_dereference(mdev->tconn->net_conf); | |
383 | p = nc->wire_protocol; | |
384 | rcu_read_unlock(); | |
303d1448 PR |
385 | req->rq_state |= |
386 | p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : | |
387 | p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; | |
b411b363 PR |
388 | inc_ap_pending(mdev); |
389 | break; | |
390 | ||
8554df1c | 391 | case TO_BE_SUBMITTED: /* locally */ |
7be8da07 | 392 | /* reached via __drbd_make_request */ |
b411b363 PR |
393 | D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); |
394 | req->rq_state |= RQ_LOCAL_PENDING; | |
395 | break; | |
396 | ||
8554df1c | 397 | case COMPLETED_OK: |
cdfda633 | 398 | if (req->rq_state & RQ_WRITE) |
ace652ac | 399 | mdev->writ_cnt += req->i.size >> 9; |
b411b363 | 400 | else |
ace652ac | 401 | mdev->read_cnt += req->i.size >> 9; |
b411b363 PR |
402 | |
403 | req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); | |
404 | req->rq_state &= ~RQ_LOCAL_PENDING; | |
405 | ||
8d6cdd78 | 406 | maybe_wakeup_conflicting_requests(req); |
6870ca6d | 407 | req_may_be_completed_not_susp(req, m); |
b411b363 PR |
408 | break; |
409 | ||
cdfda633 PR |
410 | case ABORT_DISK_IO: |
411 | req->rq_state |= RQ_LOCAL_ABORTED; | |
4439c400 | 412 | req_may_be_completed_not_susp(req, m); |
cdfda633 PR |
413 | break; |
414 | ||
8554df1c | 415 | case WRITE_COMPLETED_WITH_ERROR: |
b411b363 PR |
416 | req->rq_state |= RQ_LOCAL_COMPLETED; |
417 | req->rq_state &= ~RQ_LOCAL_PENDING; | |
418 | ||
81e84650 | 419 | __drbd_chk_io_error(mdev, false); |
8d6cdd78 | 420 | maybe_wakeup_conflicting_requests(req); |
6870ca6d | 421 | req_may_be_completed_not_susp(req, m); |
b411b363 PR |
422 | break; |
423 | ||
8554df1c | 424 | case READ_AHEAD_COMPLETED_WITH_ERROR: |
b411b363 PR |
425 | /* it is legal to fail READA */ |
426 | req->rq_state |= RQ_LOCAL_COMPLETED; | |
427 | req->rq_state &= ~RQ_LOCAL_PENDING; | |
6870ca6d | 428 | req_may_be_completed_not_susp(req, m); |
b411b363 PR |
429 | break; |
430 | ||
8554df1c | 431 | case READ_COMPLETED_WITH_ERROR: |
ace652ac | 432 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); |
b411b363 PR |
433 | |
434 | req->rq_state |= RQ_LOCAL_COMPLETED; | |
435 | req->rq_state &= ~RQ_LOCAL_PENDING; | |
436 | ||
b411b363 | 437 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); |
b411b363 | 438 | |
81e84650 | 439 | __drbd_chk_io_error(mdev, false); |
ab53b90e | 440 | req_may_be_completed_not_susp(req, m); |
4439c400 | 441 | break; |
b411b363 | 442 | |
8554df1c | 443 | case QUEUE_FOR_NET_READ: |
b411b363 PR |
444 | /* READ or READA, and |
445 | * no local disk, | |
446 | * or target area marked as invalid, | |
447 | * or just got an io-error. */ | |
7be8da07 | 448 | /* from __drbd_make_request |
b411b363 PR |
449 | * or from bio_endio during read io-error recovery */ |
450 | ||
6870ca6d LE |
451 | /* So we can verify the handle in the answer packet. |
452 | * Corresponding drbd_remove_request_interval is in | |
453 | * req_may_be_completed() */ | |
97ddb687 | 454 | D_ASSERT(drbd_interval_empty(&req->i)); |
dac1389c | 455 | drbd_insert_interval(&mdev->read_requests, &req->i); |
b411b363 | 456 | |
83c38830 | 457 | set_bit(UNPLUG_REMOTE, &mdev->flags); |
b411b363 PR |
458 | |
459 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
4439c400 | 460 | D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); |
b411b363 | 461 | req->rq_state |= RQ_NET_QUEUED; |
4439c400 | 462 | req->w.cb = w_send_read_req; |
d5b27b01 | 463 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
b411b363 PR |
464 | break; |
465 | ||
8554df1c | 466 | case QUEUE_FOR_NET_WRITE: |
b411b363 | 467 | /* assert something? */ |
7be8da07 | 468 | /* from __drbd_make_request only */ |
b411b363 | 469 | |
6870ca6d LE |
470 | /* Corresponding drbd_remove_request_interval is in |
471 | * req_may_be_completed() */ | |
97ddb687 | 472 | D_ASSERT(drbd_interval_empty(&req->i)); |
de696716 | 473 | drbd_insert_interval(&mdev->write_requests, &req->i); |
b411b363 PR |
474 | |
475 | /* NOTE | |
476 | * In case the req ended up on the transfer log before being | |
477 | * queued on the worker, it could lead to this request being | |
478 | * missed during cleanup after connection loss. | |
479 | * So we have to do both operations here, | |
480 | * within the same lock that protects the transfer log. | |
481 | * | |
482 | * _req_add_to_epoch(req); this has to be after the | |
483 | * _maybe_start_new_epoch(req); which happened in | |
7be8da07 | 484 | * __drbd_make_request, because we now may set the bit |
b411b363 PR |
485 | * again ourselves to close the current epoch. |
486 | * | |
487 | * Add req to the (now) current epoch (barrier). */ | |
488 | ||
83c38830 LE |
489 | /* otherwise we may lose an unplug, which may cause some remote |
490 | * io-scheduler timeout to expire, increasing maximum latency, | |
491 | * hurting performance. */ | |
492 | set_bit(UNPLUG_REMOTE, &mdev->flags); | |
493 | ||
b411b363 PR |
494 | /* queue work item to send data */ |
495 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
496 | req->rq_state |= RQ_NET_QUEUED; | |
497 | req->w.cb = w_send_dblock; | |
d5b27b01 | 498 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
b411b363 PR |
499 | |
500 | /* close the epoch, in case it outgrew the limit */ | |
44ed167d PR |
501 | rcu_read_lock(); |
502 | nc = rcu_dereference(mdev->tconn->net_conf); | |
503 | p = nc->max_epoch_size; | |
504 | rcu_read_unlock(); | |
b6dd1a89 LE |
505 | if (mdev->tconn->current_tle_writes >= p) |
506 | start_new_tl_epoch(mdev->tconn); | |
b411b363 PR |
507 | |
508 | break; | |
509 | ||
8554df1c | 510 | case QUEUE_FOR_SEND_OOS: |
73a01a18 | 511 | req->rq_state |= RQ_NET_QUEUED; |
8f7bed77 | 512 | req->w.cb = w_send_out_of_sync; |
d5b27b01 | 513 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
73a01a18 PR |
514 | break; |
515 | ||
ea9d6729 | 516 | case READ_RETRY_REMOTE_CANCELED: |
8554df1c | 517 | case SEND_CANCELED: |
8554df1c | 518 | case SEND_FAILED: |
b411b363 PR |
519 | /* real cleanup will be done from tl_clear. just update flags |
520 | * so it is no longer marked as on the worker queue */ | |
521 | req->rq_state &= ~RQ_NET_QUEUED; | |
522 | /* if we did it right, tl_clear should be scheduled only after | |
523 | * this, so this should not be necessary! */ | |
6870ca6d | 524 | req_may_be_completed_not_susp(req, m); |
b411b363 PR |
525 | break; |
526 | ||
8554df1c | 527 | case HANDED_OVER_TO_NETWORK: |
b411b363 | 528 | /* assert something? */ |
759fbdfb | 529 | if (bio_data_dir(req->master_bio) == WRITE) |
ace652ac | 530 | atomic_add(req->i.size >> 9, &mdev->ap_in_flight); |
759fbdfb | 531 | |
b411b363 | 532 | if (bio_data_dir(req->master_bio) == WRITE && |
303d1448 | 533 | !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) { |
b411b363 PR |
534 | /* this is what is dangerous about protocol A: |
535 | * pretend it was successfully written on the peer. */ | |
536 | if (req->rq_state & RQ_NET_PENDING) { | |
537 | dec_ap_pending(mdev); | |
538 | req->rq_state &= ~RQ_NET_PENDING; | |
539 | req->rq_state |= RQ_NET_OK; | |
540 | } /* else: neg-ack was faster... */ | |
541 | /* it is still not yet RQ_NET_DONE until the | |
542 | * corresponding epoch barrier got acked as well, | |
543 | * so we know what to dirty on connection loss */ | |
544 | } | |
545 | req->rq_state &= ~RQ_NET_QUEUED; | |
546 | req->rq_state |= RQ_NET_SENT; | |
6870ca6d | 547 | req_may_be_completed_not_susp(req, m); |
27a434fe LE |
548 | break; |
549 | ||
550 | case OOS_HANDED_TO_NETWORK: | |
551 | /* Was not set PENDING, no longer QUEUED, so is now DONE | |
552 | * as far as this connection is concerned. */ | |
553 | req->rq_state &= ~RQ_NET_QUEUED; | |
554 | req->rq_state |= RQ_NET_DONE; | |
6870ca6d | 555 | req_may_be_completed_not_susp(req, m); |
b411b363 PR |
556 | break; |
557 | ||
8554df1c | 558 | case CONNECTION_LOST_WHILE_PENDING: |
b411b363 PR |
559 | /* transfer log cleanup after connection loss */ |
560 | /* assert something? */ | |
561 | if (req->rq_state & RQ_NET_PENDING) | |
562 | dec_ap_pending(mdev); | |
57bcb6cf PR |
563 | |
564 | p = !(req->rq_state & RQ_WRITE) && req->rq_state & RQ_NET_PENDING; | |
565 | ||
b411b363 PR |
566 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
567 | req->rq_state |= RQ_NET_DONE; | |
759fbdfb | 568 | if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) |
ace652ac | 569 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); |
759fbdfb | 570 | |
4439c400 | 571 | req_may_be_completed(req, m); /* Allowed while state.susp */ |
b411b363 PR |
572 | break; |
573 | ||
7be8da07 | 574 | case DISCARD_WRITE: |
b411b363 PR |
575 | /* for discarded conflicting writes of multiple primaries, |
576 | * there is no need to keep anything in the tl, potential | |
577 | * node crashes are covered by the activity log. */ | |
b411b363 PR |
578 | req->rq_state |= RQ_NET_DONE; |
579 | /* fall through */ | |
0afd569a | 580 | case WRITE_ACKED_BY_PEER_AND_SIS: |
8554df1c | 581 | case WRITE_ACKED_BY_PEER: |
0afd569a LE |
582 | if (what == WRITE_ACKED_BY_PEER_AND_SIS) |
583 | req->rq_state |= RQ_NET_SIS; | |
303d1448 | 584 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); |
b411b363 | 585 | /* protocol C; successfully written on peer. |
0afd569a | 586 | * Nothing more to do here. |
b411b363 | 587 | * We want to keep the tl in place for all protocols, to cater |
0afd569a | 588 | * for volatile write-back caches on lower level devices. */ |
b411b363 | 589 | |
303d1448 | 590 | goto ack_common; |
8554df1c | 591 | case RECV_ACKED_BY_PEER: |
303d1448 | 592 | D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK); |
b411b363 | 593 | /* protocol B; pretends to be successfully written on peer. |
8554df1c | 594 | * see also notes above in HANDED_OVER_TO_NETWORK about |
b411b363 | 595 | * protocol != C */ |
303d1448 | 596 | ack_common: |
b411b363 PR |
597 | req->rq_state |= RQ_NET_OK; |
598 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
599 | dec_ap_pending(mdev); | |
ace652ac | 600 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); |
b411b363 | 601 | req->rq_state &= ~RQ_NET_PENDING; |
8d6cdd78 | 602 | maybe_wakeup_conflicting_requests(req); |
6870ca6d | 603 | req_may_be_completed_not_susp(req, m); |
b411b363 PR |
604 | break; |
605 | ||
7be8da07 | 606 | case POSTPONE_WRITE: |
303d1448 PR |
607 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); |
608 | /* If this node has already detected the write conflict, the | |
7be8da07 AG |
609 | * worker will be waiting on misc_wait. Wake it up once this |
610 | * request has completed locally. | |
611 | */ | |
612 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
613 | req->rq_state |= RQ_POSTPONED; | |
8d6cdd78 | 614 | maybe_wakeup_conflicting_requests(req); |
6870ca6d | 615 | req_may_be_completed_not_susp(req, m); |
7be8da07 AG |
616 | break; |
617 | ||
8554df1c | 618 | case NEG_ACKED: |
b411b363 | 619 | /* assert something? */ |
759fbdfb | 620 | if (req->rq_state & RQ_NET_PENDING) { |
b411b363 | 621 | dec_ap_pending(mdev); |
e8cdc343 PR |
622 | if (req->rq_state & RQ_WRITE) |
623 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); | |
759fbdfb | 624 | } |
b411b363 PR |
625 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
626 | ||
627 | req->rq_state |= RQ_NET_DONE; | |
380207d0 | 628 | |
8d6cdd78 | 629 | maybe_wakeup_conflicting_requests(req); |
6870ca6d | 630 | req_may_be_completed_not_susp(req, m); |
8554df1c | 631 | /* else: done by HANDED_OVER_TO_NETWORK */ |
b411b363 PR |
632 | break; |
633 | ||
8554df1c | 634 | case FAIL_FROZEN_DISK_IO: |
265be2d0 PR |
635 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
636 | break; | |
637 | ||
6870ca6d | 638 | req_may_be_completed(req, m); /* Allowed while state.susp */ |
265be2d0 PR |
639 | break; |
640 | ||
8554df1c | 641 | case RESTART_FROZEN_DISK_IO: |
265be2d0 PR |
642 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
643 | break; | |
644 | ||
645 | req->rq_state &= ~RQ_LOCAL_COMPLETED; | |
646 | ||
647 | rv = MR_READ; | |
648 | if (bio_data_dir(req->master_bio) == WRITE) | |
649 | rv = MR_WRITE; | |
650 | ||
651 | get_ldev(mdev); | |
652 | req->w.cb = w_restart_disk_io; | |
d5b27b01 | 653 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
265be2d0 PR |
654 | break; |
655 | ||
8554df1c | 656 | case RESEND: |
11b58e73 | 657 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK |
47ff2d0a | 658 | before the connection loss (B&C only); only P_BARRIER_ACK was missing. |
6870ca6d LE |
659 | Throwing them out of the TL here by pretending we got a BARRIER_ACK. |
660 | During connection handshake, we ensure that the peer was not rebooted. */ | |
11b58e73 PR |
661 | if (!(req->rq_state & RQ_NET_OK)) { |
662 | if (req->w.cb) { | |
b6dd1a89 | 663 | /* w.cb expected to be w_send_dblock, or w_send_read_req */ |
d5b27b01 | 664 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
11b58e73 PR |
665 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; |
666 | } | |
667 | break; | |
668 | } | |
8554df1c | 669 | /* else, fall through to BARRIER_ACKED */ |
11b58e73 | 670 | |
8554df1c | 671 | case BARRIER_ACKED: |
288f422e PR |
672 | if (!(req->rq_state & RQ_WRITE)) |
673 | break; | |
674 | ||
b411b363 | 675 | if (req->rq_state & RQ_NET_PENDING) { |
a209b4ae | 676 | /* barrier came in before all requests were acked. |
b411b363 PR |
677 | * this is bad, because if the connection is lost now, |
678 | * we won't be able to clean them up... */ | |
8554df1c | 679 | dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); |
b411b363 | 680 | } |
e636db5b LE |
681 | if ((req->rq_state & RQ_NET_MASK) != 0) { |
682 | req->rq_state |= RQ_NET_DONE; | |
303d1448 | 683 | if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) |
89e58e75 | 684 | atomic_sub(req->i.size>>9, &mdev->ap_in_flight); |
e636db5b | 685 | } |
6870ca6d | 686 | req_may_be_done(req); /* Allowed while state.susp */ |
b411b363 PR |
687 | break; |
688 | ||
8554df1c | 689 | case DATA_RECEIVED: |
b411b363 PR |
690 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
691 | dec_ap_pending(mdev); | |
692 | req->rq_state &= ~RQ_NET_PENDING; | |
693 | req->rq_state |= (RQ_NET_OK|RQ_NET_DONE); | |
6870ca6d | 694 | req_may_be_completed_not_susp(req, m); |
b411b363 PR |
695 | break; |
696 | }; | |
2a80699f PR |
697 | |
698 | return rv; | |
b411b363 PR |
699 | } |
700 | ||
701 | /* we may do a local read if: | |
702 | * - we are consistent (of course), | |
703 | * - or we are generally inconsistent, | |
704 | * BUT we are still/already IN SYNC for this area. | |
705 | * since size may be bigger than BM_BLOCK_SIZE, | |
706 | * we may need to check several bits. | |
707 | */ | |
0da34df0 | 708 | static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size) |
b411b363 PR |
709 | { |
710 | unsigned long sbnr, ebnr; | |
711 | sector_t esector, nr_sectors; | |
712 | ||
713 | if (mdev->state.disk == D_UP_TO_DATE) | |
0da34df0 | 714 | return true; |
8c387def | 715 | if (mdev->state.disk != D_INCONSISTENT) |
0da34df0 | 716 | return false; |
b411b363 | 717 | esector = sector + (size >> 9) - 1; |
8ca9844f | 718 | nr_sectors = drbd_get_capacity(mdev->this_bdev); |
b411b363 PR |
719 | D_ASSERT(sector < nr_sectors); |
720 | D_ASSERT(esector < nr_sectors); | |
721 | ||
722 | sbnr = BM_SECT_TO_BIT(sector); | |
723 | ebnr = BM_SECT_TO_BIT(esector); | |
724 | ||
0da34df0 | 725 | return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0; |
b411b363 PR |
726 | } |
727 | ||
d60de03a | 728 | static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector) |
380207d0 PR |
729 | { |
730 | enum drbd_read_balancing rbm; | |
731 | struct backing_dev_info *bdi; | |
d60de03a | 732 | int stripe_shift; |
380207d0 PR |
733 | |
734 | if (mdev->state.pdsk < D_UP_TO_DATE) | |
735 | return false; | |
736 | ||
737 | rcu_read_lock(); | |
738 | rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing; | |
739 | rcu_read_unlock(); | |
740 | ||
741 | switch (rbm) { | |
742 | case RB_CONGESTED_REMOTE: | |
743 | bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info; | |
744 | return bdi_read_congested(bdi); | |
745 | case RB_LEAST_PENDING: | |
746 | return atomic_read(&mdev->local_cnt) > | |
747 | atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt); | |
d60de03a PR |
748 | case RB_32K_STRIPING: /* stripe_shift = 15 */ |
749 | case RB_64K_STRIPING: | |
750 | case RB_128K_STRIPING: | |
751 | case RB_256K_STRIPING: | |
752 | case RB_512K_STRIPING: | |
753 | case RB_1M_STRIPING: /* stripe_shift = 20 */ | |
754 | stripe_shift = (rbm - RB_32K_STRIPING + 15); | |
755 | return (sector >> (stripe_shift - 9)) & 1; | |
380207d0 PR |
756 | case RB_ROUND_ROBIN: |
757 | return test_and_change_bit(READ_BALANCE_RR, &mdev->flags); | |
758 | case RB_PREFER_REMOTE: | |
759 | return true; | |
760 | case RB_PREFER_LOCAL: | |
761 | default: | |
762 | return false; | |
763 | } | |
764 | } | |
765 | ||
6024fece AG |
766 | /* |
767 | * complete_conflicting_writes - wait for any conflicting write requests | |
768 | * | |
769 | * The write_requests tree contains all active write requests which we | |
770 | * currently know about. Wait for any requests to complete which conflict with | |
771 | * the new one. | |
648e46b5 LE |
772 | * |
773 | * Only way out: remove the conflicting intervals from the tree. | |
6024fece | 774 | */ |
648e46b5 | 775 | static void complete_conflicting_writes(struct drbd_request *req) |
6024fece | 776 | { |
648e46b5 LE |
777 | DEFINE_WAIT(wait); |
778 | struct drbd_conf *mdev = req->w.mdev; | |
779 | struct drbd_interval *i; | |
780 | sector_t sector = req->i.sector; | |
781 | int size = req->i.size; | |
782 | ||
783 | i = drbd_find_overlap(&mdev->write_requests, sector, size); | |
784 | if (!i) | |
785 | return; | |
6024fece | 786 | |
648e46b5 LE |
787 | for (;;) { |
788 | prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); | |
6024fece AG |
789 | i = drbd_find_overlap(&mdev->write_requests, sector, size); |
790 | if (!i) | |
648e46b5 LE |
791 | break; |
792 | /* Indicate to wake up device->misc_wait on progress. */ | |
793 | i->waiting = true; | |
794 | spin_unlock_irq(&mdev->tconn->req_lock); | |
795 | schedule(); | |
796 | spin_lock_irq(&mdev->tconn->req_lock); | |
6024fece | 797 | } |
648e46b5 | 798 | finish_wait(&mdev->misc_wait, &wait); |
6024fece AG |
799 | } |
800 | ||
7be8da07 | 801 | int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) |
b411b363 PR |
802 | { |
803 | const int rw = bio_rw(bio); | |
804 | const int size = bio->bi_size; | |
805 | const sector_t sector = bio->bi_sector; | |
b411b363 | 806 | struct drbd_request *req; |
44ed167d | 807 | struct net_conf *nc; |
73a01a18 | 808 | int local, remote, send_oos = 0; |
648e46b5 | 809 | int err = 0; |
9a25a04c | 810 | int ret = 0; |
81f44862 | 811 | union drbd_dev_state s; |
b411b363 PR |
812 | |
813 | /* allocate outside of all locks; */ | |
814 | req = drbd_req_new(mdev, bio); | |
815 | if (!req) { | |
816 | dec_ap_bio(mdev); | |
817 | /* only pass the error to the upper layers. | |
818 | * if user cannot handle io errors, that's not our business. */ | |
819 | dev_err(DEV, "could not kmalloc() req\n"); | |
820 | bio_endio(bio, -ENOMEM); | |
821 | return 0; | |
822 | } | |
aeda1cd6 | 823 | req->start_time = start_time; |
b411b363 | 824 | |
b411b363 PR |
825 | local = get_ldev(mdev); |
826 | if (!local) { | |
827 | bio_put(req->private_bio); /* or we get a bio leak */ | |
828 | req->private_bio = NULL; | |
829 | } | |
830 | if (rw == WRITE) { | |
831 | remote = 1; | |
832 | } else { | |
833 | /* READ || READA */ | |
834 | if (local) { | |
57bcb6cf PR |
835 | if (!drbd_may_do_local_read(mdev, sector, size) || |
836 | remote_due_to_read_balancing(mdev, sector)) { | |
b411b363 PR |
837 | /* we could kick the syncer to |
838 | * sync this extent asap, wait for | |
839 | * it, then continue locally. | |
840 | * Or just issue the request remotely. | |
841 | */ | |
842 | local = 0; | |
843 | bio_put(req->private_bio); | |
844 | req->private_bio = NULL; | |
845 | put_ldev(mdev); | |
846 | } | |
847 | } | |
848 | remote = !local && mdev->state.pdsk >= D_UP_TO_DATE; | |
849 | } | |
850 | ||
851 | /* If we have a disk, but a READA request is mapped to remote, | |
852 | * we are R_PRIMARY, D_INCONSISTENT, SyncTarget. | |
853 | * Just fail that READA request right here. | |
854 | * | |
855 | * THINK: maybe fail all READA when not local? | |
856 | * or make this configurable... | |
857 | * if network is slow, READA won't do any good. | |
858 | */ | |
859 | if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) { | |
860 | err = -EWOULDBLOCK; | |
861 | goto fail_and_free_req; | |
862 | } | |
863 | ||
864 | /* For WRITES going to the local disk, grab a reference on the target | |
865 | * extent. This waits for any resync activity in the corresponding | |
866 | * resync extent to finish, and, if necessary, pulls in the target | |
867 | * extent into the activity log, which involves further disk io because | |
868 | * of transactional on-disk meta data updates. */ | |
0778286a PR |
869 | if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { |
870 | req->rq_state |= RQ_IN_ACT_LOG; | |
181286ad | 871 | drbd_al_begin_io(mdev, &req->i); |
0778286a | 872 | } |
b411b363 | 873 | |
81f44862 LE |
874 | s = mdev->state; |
875 | remote = remote && drbd_should_do_remote(s); | |
876 | send_oos = rw == WRITE && drbd_should_send_out_of_sync(s); | |
3719094e | 877 | D_ASSERT(!(remote && send_oos)); |
b411b363 | 878 | |
2aebfabb | 879 | if (!(local || remote) && !drbd_suspended(mdev)) { |
fb2c7a10 LE |
880 | if (__ratelimit(&drbd_ratelimit_state)) |
881 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); | |
6024fece | 882 | err = -EIO; |
b411b363 PR |
883 | goto fail_free_complete; |
884 | } | |
885 | ||
b411b363 | 886 | /* GOOD, everything prepared, grab the spin_lock */ |
87eeee41 | 887 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 | 888 | |
6024fece | 889 | if (rw == WRITE) { |
648e46b5 LE |
890 | /* This may temporarily give up the req_lock, |
891 | * but will re-aquire it before it returns here. | |
892 | * Needs to be before the check on drbd_suspended() */ | |
893 | complete_conflicting_writes(req); | |
6024fece AG |
894 | } |
895 | ||
2aebfabb | 896 | if (drbd_suspended(mdev)) { |
69b6a3b1 PR |
897 | /* If we got suspended, use the retry mechanism in |
898 | drbd_make_request() to restart processing of this | |
2f58dcfc | 899 | bio. In the next call to drbd_make_request |
9a25a04c PR |
900 | we sleep in inc_ap_bio() */ |
901 | ret = 1; | |
87eeee41 | 902 | spin_unlock_irq(&mdev->tconn->req_lock); |
9a25a04c PR |
903 | goto fail_free_complete; |
904 | } | |
905 | ||
73a01a18 | 906 | if (remote || send_oos) { |
6a35c45f | 907 | remote = drbd_should_do_remote(mdev->state); |
8f7bed77 | 908 | send_oos = rw == WRITE && drbd_should_send_out_of_sync(mdev->state); |
3719094e | 909 | D_ASSERT(!(remote && send_oos)); |
73a01a18 PR |
910 | |
911 | if (!(remote || send_oos)) | |
b411b363 PR |
912 | dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); |
913 | if (!(local || remote)) { | |
914 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); | |
87eeee41 | 915 | spin_unlock_irq(&mdev->tconn->req_lock); |
6024fece | 916 | err = -EIO; |
b411b363 PR |
917 | goto fail_free_complete; |
918 | } | |
919 | } | |
920 | ||
b411b363 PR |
921 | /* Update disk stats */ |
922 | _drbd_start_io_acct(mdev, req, bio); | |
923 | ||
b411b363 PR |
924 | /* NOTE |
925 | * Actually, 'local' may be wrong here already, since we may have failed | |
926 | * to write to the meta data, and may become wrong anytime because of | |
927 | * local io-error for some other request, which would lead to us | |
928 | * "detaching" the local disk. | |
929 | * | |
930 | * 'remote' may become wrong any time because the network could fail. | |
931 | * | |
932 | * This is a harmless race condition, though, since it is handled | |
933 | * correctly at the appropriate places; so it just defers the failure | |
934 | * of the respective operation. | |
935 | */ | |
936 | ||
937 | /* mark them early for readability. | |
938 | * this just sets some state flags. */ | |
939 | if (remote) | |
8554df1c | 940 | _req_mod(req, TO_BE_SENT); |
b411b363 | 941 | if (local) |
8554df1c | 942 | _req_mod(req, TO_BE_SUBMITTED); |
b411b363 | 943 | |
b6dd1a89 LE |
944 | /* which transfer log epoch does this belong to? */ |
945 | req->epoch = atomic_read(&mdev->tconn->current_tle_nr); | |
946 | if (rw == WRITE) | |
947 | mdev->tconn->current_tle_writes++; | |
948 | ||
949 | list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log); | |
288f422e | 950 | |
b411b363 PR |
951 | /* NOTE remote first: to get the concurrent write detection right, |
952 | * we must register the request before start of local IO. */ | |
953 | if (remote) { | |
954 | /* either WRITE and C_CONNECTED, | |
955 | * or READ, and no local disk, | |
956 | * or READ, but not in sync. | |
957 | */ | |
958 | _req_mod(req, (rw == WRITE) | |
8554df1c AG |
959 | ? QUEUE_FOR_NET_WRITE |
960 | : QUEUE_FOR_NET_READ); | |
b411b363 | 961 | } |
73a01a18 | 962 | if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) |
8554df1c | 963 | _req_mod(req, QUEUE_FOR_SEND_OOS); |
67531718 | 964 | |
44ed167d PR |
965 | rcu_read_lock(); |
966 | nc = rcu_dereference(mdev->tconn->net_conf); | |
73a01a18 | 967 | if (remote && |
44ed167d | 968 | nc->on_congestion != OC_BLOCK && mdev->tconn->agreed_pro_version >= 96) { |
67531718 PR |
969 | int congested = 0; |
970 | ||
44ed167d PR |
971 | if (nc->cong_fill && |
972 | atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) { | |
67531718 PR |
973 | dev_info(DEV, "Congestion-fill threshold reached\n"); |
974 | congested = 1; | |
975 | } | |
976 | ||
44ed167d | 977 | if (mdev->act_log->used >= nc->cong_extents) { |
67531718 PR |
978 | dev_info(DEV, "Congestion-extents threshold reached\n"); |
979 | congested = 1; | |
980 | } | |
981 | ||
71c78cfb | 982 | if (congested) { |
b6dd1a89 LE |
983 | if (mdev->tconn->current_tle_writes) |
984 | /* start a new epoch for non-mirrored writes */ | |
985 | start_new_tl_epoch(mdev->tconn); | |
73a01a18 | 986 | |
44ed167d | 987 | if (nc->on_congestion == OC_PULL_AHEAD) |
67531718 | 988 | _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); |
44ed167d | 989 | else /*nc->on_congestion == OC_DISCONNECT */ |
67531718 PR |
990 | _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); |
991 | } | |
992 | } | |
44ed167d | 993 | rcu_read_unlock(); |
67531718 | 994 | |
87eeee41 | 995 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
996 | |
997 | if (local) { | |
998 | req->private_bio->bi_bdev = mdev->ldev->backing_bdev; | |
999 | ||
6719fb03 LE |
1000 | /* State may have changed since we grabbed our reference on the |
1001 | * mdev->ldev member. Double check, and short-circuit to endio. | |
1002 | * In case the last activity log transaction failed to get on | |
1003 | * stable storage, and this is a WRITE, we may not even submit | |
1004 | * this bio. */ | |
1005 | if (get_ldev(mdev)) { | |
0cf9d27e AG |
1006 | if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR |
1007 | : rw == READ ? DRBD_FAULT_DT_RD | |
1008 | : DRBD_FAULT_DT_RA)) | |
6719fb03 LE |
1009 | bio_endio(req->private_bio, -EIO); |
1010 | else | |
1011 | generic_make_request(req->private_bio); | |
1012 | put_ldev(mdev); | |
1013 | } else | |
b411b363 | 1014 | bio_endio(req->private_bio, -EIO); |
b411b363 PR |
1015 | } |
1016 | ||
b411b363 PR |
1017 | return 0; |
1018 | ||
1019 | fail_free_complete: | |
76727f68 | 1020 | if (req->rq_state & RQ_IN_ACT_LOG) |
181286ad | 1021 | drbd_al_complete_io(mdev, &req->i); |
b411b363 | 1022 | fail_and_free_req: |
57bcb6cf | 1023 | if (local) { |
b411b363 PR |
1024 | bio_put(req->private_bio); |
1025 | req->private_bio = NULL; | |
1026 | put_ldev(mdev); | |
1027 | } | |
9a25a04c PR |
1028 | if (!ret) |
1029 | bio_endio(bio, err); | |
1030 | ||
b411b363 PR |
1031 | drbd_req_free(req); |
1032 | dec_ap_bio(mdev); | |
b411b363 | 1033 | |
9a25a04c | 1034 | return ret; |
b411b363 PR |
1035 | } |
1036 | ||
2f58dcfc | 1037 | int drbd_make_request(struct request_queue *q, struct bio *bio) |
b411b363 | 1038 | { |
b411b363 | 1039 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; |
aeda1cd6 | 1040 | unsigned long start_time; |
b411b363 | 1041 | |
aeda1cd6 PR |
1042 | start_time = jiffies; |
1043 | ||
b411b363 PR |
1044 | /* |
1045 | * what we "blindly" assume: | |
1046 | */ | |
1047 | D_ASSERT(bio->bi_size > 0); | |
c670a398 | 1048 | D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); |
b411b363 | 1049 | |
69b6a3b1 PR |
1050 | do { |
1051 | inc_ap_bio(mdev); | |
1052 | } while (__drbd_make_request(mdev, bio, start_time)); | |
1053 | ||
1054 | return 0; | |
b411b363 PR |
1055 | } |
1056 | ||
23361cf3 LE |
1057 | /* This is called by bio_add_page(). |
1058 | * | |
1059 | * q->max_hw_sectors and other global limits are already enforced there. | |
b411b363 | 1060 | * |
23361cf3 LE |
1061 | * We need to call down to our lower level device, |
1062 | * in case it has special restrictions. | |
1063 | * | |
1064 | * We also may need to enforce configured max-bio-bvecs limits. | |
b411b363 PR |
1065 | * |
1066 | * As long as the BIO is empty we have to allow at least one bvec, | |
23361cf3 | 1067 | * regardless of size and offset, so no need to ask lower levels. |
b411b363 PR |
1068 | */ |
1069 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) | |
1070 | { | |
1071 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; | |
b411b363 | 1072 | unsigned int bio_size = bvm->bi_size; |
23361cf3 LE |
1073 | int limit = DRBD_MAX_BIO_SIZE; |
1074 | int backing_limit; | |
1075 | ||
1076 | if (bio_size && get_ldev(mdev)) { | |
b411b363 PR |
1077 | struct request_queue * const b = |
1078 | mdev->ldev->backing_bdev->bd_disk->queue; | |
a1c88d0d | 1079 | if (b->merge_bvec_fn) { |
b411b363 PR |
1080 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); |
1081 | limit = min(limit, backing_limit); | |
1082 | } | |
1083 | put_ldev(mdev); | |
1084 | } | |
1085 | return limit; | |
1086 | } | |
7fde2be9 | 1087 | |
b6dd1a89 LE |
1088 | struct drbd_request *find_oldest_request(struct drbd_tconn *tconn) |
1089 | { | |
1090 | /* Walk the transfer log, | |
1091 | * and find the oldest not yet completed request */ | |
1092 | struct drbd_request *r; | |
1093 | list_for_each_entry(r, &tconn->transfer_log, tl_requests) { | |
1094 | if (r->rq_state & (RQ_NET_PENDING|RQ_LOCAL_PENDING)) | |
1095 | return r; | |
1096 | } | |
1097 | return NULL; | |
1098 | } | |
1099 | ||
7fde2be9 PR |
1100 | void request_timer_fn(unsigned long data) |
1101 | { | |
1102 | struct drbd_conf *mdev = (struct drbd_conf *) data; | |
8b924f1d | 1103 | struct drbd_tconn *tconn = mdev->tconn; |
7fde2be9 | 1104 | struct drbd_request *req; /* oldest request */ |
44ed167d | 1105 | struct net_conf *nc; |
3b03ad59 | 1106 | unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ |
07be15b1 | 1107 | unsigned long now; |
44ed167d PR |
1108 | |
1109 | rcu_read_lock(); | |
1110 | nc = rcu_dereference(tconn->net_conf); | |
07be15b1 LE |
1111 | if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS) |
1112 | ent = nc->timeout * HZ/10 * nc->ko_count; | |
cdfda633 | 1113 | |
07be15b1 | 1114 | if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */ |
cdfda633 PR |
1115 | dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10; |
1116 | put_ldev(mdev); | |
1117 | } | |
44ed167d | 1118 | rcu_read_unlock(); |
7fde2be9 | 1119 | |
cdfda633 PR |
1120 | et = min_not_zero(dt, ent); |
1121 | ||
07be15b1 | 1122 | if (!et) |
7fde2be9 PR |
1123 | return; /* Recurring timer stopped */ |
1124 | ||
07be15b1 LE |
1125 | now = jiffies; |
1126 | ||
8b924f1d | 1127 | spin_lock_irq(&tconn->req_lock); |
b6dd1a89 LE |
1128 | req = find_oldest_request(tconn); |
1129 | if (!req) { | |
8b924f1d | 1130 | spin_unlock_irq(&tconn->req_lock); |
07be15b1 | 1131 | mod_timer(&mdev->request_timer, now + et); |
7fde2be9 PR |
1132 | return; |
1133 | } | |
1134 | ||
07be15b1 LE |
1135 | /* The request is considered timed out, if |
1136 | * - we have some effective timeout from the configuration, | |
1137 | * with above state restrictions applied, | |
1138 | * - the oldest request is waiting for a response from the network | |
1139 | * resp. the local disk, | |
1140 | * - the oldest request is in fact older than the effective timeout, | |
1141 | * - the connection was established (resp. disk was attached) | |
1142 | * for longer than the timeout already. | |
1143 | * Note that for 32bit jiffies and very stable connections/disks, | |
1144 | * we may have a wrap around, which is catched by | |
1145 | * !time_in_range(now, last_..._jif, last_..._jif + timeout). | |
1146 | * | |
1147 | * Side effect: once per 32bit wrap-around interval, which means every | |
1148 | * ~198 days with 250 HZ, we have a window where the timeout would need | |
1149 | * to expire twice (worst case) to become effective. Good enough. | |
1150 | */ | |
1151 | if (ent && req->rq_state & RQ_NET_PENDING && | |
1152 | time_after(now, req->start_time + ent) && | |
1153 | !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) { | |
1154 | dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); | |
1155 | _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); | |
cdfda633 | 1156 | } |
07be15b1 LE |
1157 | if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev && |
1158 | time_after(now, req->start_time + dt) && | |
1159 | !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { | |
1160 | dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); | |
1161 | __drbd_chk_io_error(mdev, 1); | |
7fde2be9 | 1162 | } |
07be15b1 | 1163 | nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; |
8b924f1d | 1164 | spin_unlock_irq(&tconn->req_lock); |
3b03ad59 | 1165 | mod_timer(&mdev->request_timer, nt); |
7fde2be9 | 1166 | } |